1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "X86ISelLowering.h"
16 #include "Utils/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "X86TargetMachine.h"
22 #include "X86TargetObjectFile.h"
23 #include "llvm/ADT/SmallBitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/ADT/VariadicFunction.h"
29 #include "llvm/CodeGen/IntrinsicLowering.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineJumpTableInfo.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/CallingConv.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/MC/MCAsmInfo.h"
46 #include "llvm/MC/MCContext.h"
47 #include "llvm/MC/MCExpr.h"
48 #include "llvm/MC/MCSymbol.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Target/TargetOptions.h"
54 #include "X86IntrinsicsInfo.h"
60 #define DEBUG_TYPE "x86-isel"
62 STATISTIC(NumTailCalls, "Number of tail calls");
64 static cl::opt<bool> ExperimentalVectorWideningLegalization(
65 "x86-experimental-vector-widening-legalization", cl::init(false),
66 cl::desc("Enable an experimental vector type legalization through widening "
67 "rather than promotion."),
70 static cl::opt<bool> ExperimentalVectorShuffleLowering(
71 "x86-experimental-vector-shuffle-lowering", cl::init(true),
72 cl::desc("Enable an experimental vector shuffle lowering code path."),
75 static cl::opt<bool> ExperimentalVectorShuffleLegality(
76 "x86-experimental-vector-shuffle-legality", cl::init(false),
77 cl::desc("Enable experimental shuffle legality based on the experimental "
78 "shuffle lowering. Should only be used with the experimental "
82 static cl::opt<int> ReciprocalEstimateRefinementSteps(
83 "x86-recip-refinement-steps", cl::init(1),
84 cl::desc("Specify the number of Newton-Raphson iterations applied to the "
85 "result of the hardware reciprocal estimate instruction."),
88 // Forward declarations.
89 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
92 static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
93 SelectionDAG &DAG, SDLoc dl,
94 unsigned vectorWidth) {
95 assert((vectorWidth == 128 || vectorWidth == 256) &&
96 "Unsupported vector width");
97 EVT VT = Vec.getValueType();
98 EVT ElVT = VT.getVectorElementType();
99 unsigned Factor = VT.getSizeInBits()/vectorWidth;
100 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
101 VT.getVectorNumElements()/Factor);
103 // Extract from UNDEF is UNDEF.
104 if (Vec.getOpcode() == ISD::UNDEF)
105 return DAG.getUNDEF(ResultVT);
107 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
108 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
110 // This is the index of the first element of the vectorWidth-bit chunk
112 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / vectorWidth)
115 // If the input is a buildvector just emit a smaller one.
116 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
117 return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
118 makeArrayRef(Vec->op_begin() + NormalizedIdxVal,
121 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
122 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
125 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
126 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
127 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
128 /// instructions or a simple subregister reference. Idx is an index in the
129 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
130 /// lowering EXTRACT_VECTOR_ELT operations easier.
131 static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
132 SelectionDAG &DAG, SDLoc dl) {
133 assert((Vec.getValueType().is256BitVector() ||
134 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
135 return ExtractSubVector(Vec, IdxVal, DAG, dl, 128);
138 /// Generate a DAG to grab 256-bits from a 512-bit vector.
139 static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal,
140 SelectionDAG &DAG, SDLoc dl) {
141 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
142 return ExtractSubVector(Vec, IdxVal, DAG, dl, 256);
145 static SDValue InsertSubVector(SDValue Result, SDValue Vec,
146 unsigned IdxVal, SelectionDAG &DAG,
147 SDLoc dl, unsigned vectorWidth) {
148 assert((vectorWidth == 128 || vectorWidth == 256) &&
149 "Unsupported vector width");
150 // Inserting UNDEF is Result
151 if (Vec.getOpcode() == ISD::UNDEF)
153 EVT VT = Vec.getValueType();
154 EVT ElVT = VT.getVectorElementType();
155 EVT ResultVT = Result.getValueType();
157 // Insert the relevant vectorWidth bits.
158 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
160 // This is the index of the first element of the vectorWidth-bit chunk
162 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/vectorWidth)
165 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
166 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
169 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
170 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
171 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
172 /// simple superregister reference. Idx is an index in the 128 bits
173 /// we want. It need not be aligned to a 128-bit boundary. That makes
174 /// lowering INSERT_VECTOR_ELT operations easier.
175 static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
176 SelectionDAG &DAG,SDLoc dl) {
177 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
178 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
181 static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
182 SelectionDAG &DAG, SDLoc dl) {
183 assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
184 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
187 /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128
188 /// instructions. This is used because creating CONCAT_VECTOR nodes of
189 /// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
190 /// large BUILD_VECTORS.
191 static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
192 unsigned NumElems, SelectionDAG &DAG,
194 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
195 return Insert128BitVector(V, V2, NumElems/2, DAG, dl);
198 static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
199 unsigned NumElems, SelectionDAG &DAG,
201 SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
202 return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
205 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
206 const X86Subtarget &STI)
207 : TargetLowering(TM), Subtarget(&STI) {
208 X86ScalarSSEf64 = Subtarget->hasSSE2();
209 X86ScalarSSEf32 = Subtarget->hasSSE1();
210 TD = getDataLayout();
212 // Set up the TargetLowering object.
213 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
215 // X86 is weird. It always uses i8 for shift amounts and setcc results.
216 setBooleanContents(ZeroOrOneBooleanContent);
217 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
218 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
220 // For 64-bit, since we have so many registers, use the ILP scheduler.
221 // For 32-bit, use the register pressure specific scheduling.
222 // For Atom, always use ILP scheduling.
223 if (Subtarget->isAtom())
224 setSchedulingPreference(Sched::ILP);
225 else if (Subtarget->is64Bit())
226 setSchedulingPreference(Sched::ILP);
228 setSchedulingPreference(Sched::RegPressure);
229 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
230 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
232 // Bypass expensive divides on Atom when compiling with O2.
233 if (TM.getOptLevel() >= CodeGenOpt::Default) {
234 if (Subtarget->hasSlowDivide32())
235 addBypassSlowDiv(32, 8);
236 if (Subtarget->hasSlowDivide64() && Subtarget->is64Bit())
237 addBypassSlowDiv(64, 16);
240 if (Subtarget->isTargetKnownWindowsMSVC()) {
241 // Setup Windows compiler runtime calls.
242 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
243 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
244 setLibcallName(RTLIB::SREM_I64, "_allrem");
245 setLibcallName(RTLIB::UREM_I64, "_aullrem");
246 setLibcallName(RTLIB::MUL_I64, "_allmul");
247 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
248 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
249 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
250 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
251 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
253 // The _ftol2 runtime function has an unusual calling conv, which
254 // is modeled by a special pseudo-instruction.
255 setLibcallName(RTLIB::FPTOUINT_F64_I64, nullptr);
256 setLibcallName(RTLIB::FPTOUINT_F32_I64, nullptr);
257 setLibcallName(RTLIB::FPTOUINT_F64_I32, nullptr);
258 setLibcallName(RTLIB::FPTOUINT_F32_I32, nullptr);
261 if (Subtarget->isTargetDarwin()) {
262 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
263 setUseUnderscoreSetJmp(false);
264 setUseUnderscoreLongJmp(false);
265 } else if (Subtarget->isTargetWindowsGNU()) {
266 // MS runtime is weird: it exports _setjmp, but longjmp!
267 setUseUnderscoreSetJmp(true);
268 setUseUnderscoreLongJmp(false);
270 setUseUnderscoreSetJmp(true);
271 setUseUnderscoreLongJmp(true);
274 // Set up the register classes.
275 addRegisterClass(MVT::i8, &X86::GR8RegClass);
276 addRegisterClass(MVT::i16, &X86::GR16RegClass);
277 addRegisterClass(MVT::i32, &X86::GR32RegClass);
278 if (Subtarget->is64Bit())
279 addRegisterClass(MVT::i64, &X86::GR64RegClass);
281 for (MVT VT : MVT::integer_valuetypes())
282 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
284 // We don't accept any truncstore of integer registers.
285 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
286 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
287 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
288 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
289 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
290 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
292 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
294 // SETOEQ and SETUNE require checking two conditions.
295 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
296 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
297 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
298 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
299 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
300 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
302 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
304 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
305 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
306 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
308 if (Subtarget->is64Bit()) {
309 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
310 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
311 } else if (!TM.Options.UseSoftFloat) {
312 // We have an algorithm for SSE2->double, and we turn this into a
313 // 64-bit FILD followed by conditional FADD for other targets.
314 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
315 // We have an algorithm for SSE2, and we turn this into a 64-bit
316 // FILD for other targets.
317 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
320 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
322 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
323 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
325 if (!TM.Options.UseSoftFloat) {
326 // SSE has no i16 to fp conversion, only i32
327 if (X86ScalarSSEf32) {
328 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
329 // f32 and f64 cases are Legal, f80 case is not
330 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
332 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
333 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
336 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
337 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
340 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
341 // are Legal, f80 is custom lowered.
342 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
343 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
345 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
347 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
348 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
350 if (X86ScalarSSEf32) {
351 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
352 // f32 and f64 cases are Legal, f80 case is not
353 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
355 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
356 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
359 // Handle FP_TO_UINT by promoting the destination to a larger signed
361 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
362 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
363 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
365 if (Subtarget->is64Bit()) {
366 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
367 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
368 } else if (!TM.Options.UseSoftFloat) {
369 // Since AVX is a superset of SSE3, only check for SSE here.
370 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3())
371 // Expand FP_TO_UINT into a select.
372 // FIXME: We would like to use a Custom expander here eventually to do
373 // the optimal thing for SSE vs. the default expansion in the legalizer.
374 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
376 // With SSE3 we can use fisttpll to convert to a signed i64; without
377 // SSE, we're stuck with a fistpll.
378 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
381 if (isTargetFTOL()) {
382 // Use the _ftol2 runtime function, which has a pseudo-instruction
383 // to handle its weird calling convention.
384 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
387 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
388 if (!X86ScalarSSEf64) {
389 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
390 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
391 if (Subtarget->is64Bit()) {
392 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
393 // Without SSE, i64->f64 goes through memory.
394 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
398 // Scalar integer divide and remainder are lowered to use operations that
399 // produce two results, to match the available instructions. This exposes
400 // the two-result form to trivial CSE, which is able to combine x/y and x%y
401 // into a single instruction.
403 // Scalar integer multiply-high is also lowered to use two-result
404 // operations, to match the available instructions. However, plain multiply
405 // (low) operations are left as Legal, as there are single-result
406 // instructions for this in x86. Using the two-result multiply instructions
407 // when both high and low results are needed must be arranged by dagcombine.
408 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
410 setOperationAction(ISD::MULHS, VT, Expand);
411 setOperationAction(ISD::MULHU, VT, Expand);
412 setOperationAction(ISD::SDIV, VT, Expand);
413 setOperationAction(ISD::UDIV, VT, Expand);
414 setOperationAction(ISD::SREM, VT, Expand);
415 setOperationAction(ISD::UREM, VT, Expand);
417 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences.
418 setOperationAction(ISD::ADDC, VT, Custom);
419 setOperationAction(ISD::ADDE, VT, Custom);
420 setOperationAction(ISD::SUBC, VT, Custom);
421 setOperationAction(ISD::SUBE, VT, Custom);
424 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
425 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
426 setOperationAction(ISD::BR_CC , MVT::f32, Expand);
427 setOperationAction(ISD::BR_CC , MVT::f64, Expand);
428 setOperationAction(ISD::BR_CC , MVT::f80, Expand);
429 setOperationAction(ISD::BR_CC , MVT::i8, Expand);
430 setOperationAction(ISD::BR_CC , MVT::i16, Expand);
431 setOperationAction(ISD::BR_CC , MVT::i32, Expand);
432 setOperationAction(ISD::BR_CC , MVT::i64, Expand);
433 setOperationAction(ISD::SELECT_CC , MVT::f32, Expand);
434 setOperationAction(ISD::SELECT_CC , MVT::f64, Expand);
435 setOperationAction(ISD::SELECT_CC , MVT::f80, Expand);
436 setOperationAction(ISD::SELECT_CC , MVT::i8, Expand);
437 setOperationAction(ISD::SELECT_CC , MVT::i16, Expand);
438 setOperationAction(ISD::SELECT_CC , MVT::i32, Expand);
439 setOperationAction(ISD::SELECT_CC , MVT::i64, Expand);
440 if (Subtarget->is64Bit())
441 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
442 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
443 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
444 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
445 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
446 setOperationAction(ISD::FREM , MVT::f32 , Expand);
447 setOperationAction(ISD::FREM , MVT::f64 , Expand);
448 setOperationAction(ISD::FREM , MVT::f80 , Expand);
449 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
451 // Promote the i8 variants and force them on up to i32 which has a shorter
453 setOperationAction(ISD::CTTZ , MVT::i8 , Promote);
454 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32);
455 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote);
456 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32);
457 if (Subtarget->hasBMI()) {
458 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand);
459 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand);
460 if (Subtarget->is64Bit())
461 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
463 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
464 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
465 if (Subtarget->is64Bit())
466 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
469 if (Subtarget->hasLZCNT()) {
470 // When promoting the i8 variants, force them to i32 for a shorter
472 setOperationAction(ISD::CTLZ , MVT::i8 , Promote);
473 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32);
474 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote);
475 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
476 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand);
477 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand);
478 if (Subtarget->is64Bit())
479 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
481 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
482 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
483 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
484 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
485 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
486 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
487 if (Subtarget->is64Bit()) {
488 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
489 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
493 // Special handling for half-precision floating point conversions.
494 // If we don't have F16C support, then lower half float conversions
495 // into library calls.
496 if (TM.Options.UseSoftFloat || !Subtarget->hasF16C()) {
497 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
498 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
501 // There's never any support for operations beyond MVT::f32.
502 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
503 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
504 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
505 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
507 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
508 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
509 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
510 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
511 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
512 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
514 if (Subtarget->hasPOPCNT()) {
515 setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
517 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
518 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
519 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
520 if (Subtarget->is64Bit())
521 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
524 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
526 if (!Subtarget->hasMOVBE())
527 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
529 // These should be promoted to a larger select which is supported.
530 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
531 // X86 wants to expand cmov itself.
532 setOperationAction(ISD::SELECT , MVT::i8 , Custom);
533 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
534 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
535 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
536 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
537 setOperationAction(ISD::SELECT , MVT::f80 , Custom);
538 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
539 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
540 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
541 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
542 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
543 setOperationAction(ISD::SETCC , MVT::f80 , Custom);
544 if (Subtarget->is64Bit()) {
545 setOperationAction(ISD::SELECT , MVT::i64 , Custom);
546 setOperationAction(ISD::SETCC , MVT::i64 , Custom);
548 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
549 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
550 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
551 // support continuation, user-level threading, and etc.. As a result, no
552 // other SjLj exception interfaces are implemented and please don't build
553 // your own exception handling based on them.
554 // LLVM/Clang supports zero-cost DWARF exception handling.
555 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
556 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
559 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
560 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
561 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
562 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
563 if (Subtarget->is64Bit())
564 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
565 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
566 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom);
567 if (Subtarget->is64Bit()) {
568 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
569 setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
570 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
571 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
572 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom);
574 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
575 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
576 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
577 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
578 if (Subtarget->is64Bit()) {
579 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
580 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
581 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
584 if (Subtarget->hasSSE1())
585 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
587 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
589 // Expand certain atomics
590 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
592 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
593 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
594 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
597 if (Subtarget->hasCmpxchg16b()) {
598 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
601 // FIXME - use subtarget debug flags
602 if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetELF() &&
603 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWin64()) {
604 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
607 if (Subtarget->is64Bit()) {
608 setExceptionPointerRegister(X86::RAX);
609 setExceptionSelectorRegister(X86::RDX);
611 setExceptionPointerRegister(X86::EAX);
612 setExceptionSelectorRegister(X86::EDX);
614 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
615 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
617 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
618 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
620 setOperationAction(ISD::TRAP, MVT::Other, Legal);
621 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
623 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
624 setOperationAction(ISD::VASTART , MVT::Other, Custom);
625 setOperationAction(ISD::VAEND , MVT::Other, Expand);
626 if (Subtarget->is64Bit() && !Subtarget->isTargetWin64()) {
627 // TargetInfo::X86_64ABIBuiltinVaList
628 setOperationAction(ISD::VAARG , MVT::Other, Custom);
629 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
631 // TargetInfo::CharPtrBuiltinVaList
632 setOperationAction(ISD::VAARG , MVT::Other, Expand);
633 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
636 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
637 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
639 setOperationAction(ISD::DYNAMIC_STACKALLOC, getPointerTy(), Custom);
641 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) {
642 // f32 and f64 use SSE.
643 // Set up the FP register classes.
644 addRegisterClass(MVT::f32, &X86::FR32RegClass);
645 addRegisterClass(MVT::f64, &X86::FR64RegClass);
647 // Use ANDPD to simulate FABS.
648 setOperationAction(ISD::FABS , MVT::f64, Custom);
649 setOperationAction(ISD::FABS , MVT::f32, Custom);
651 // Use XORP to simulate FNEG.
652 setOperationAction(ISD::FNEG , MVT::f64, Custom);
653 setOperationAction(ISD::FNEG , MVT::f32, Custom);
655 // Use ANDPD and ORPD to simulate FCOPYSIGN.
656 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
657 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
659 // Lower this to FGETSIGNx86 plus an AND.
660 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
661 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
663 // We don't support sin/cos/fmod
664 setOperationAction(ISD::FSIN , MVT::f64, Expand);
665 setOperationAction(ISD::FCOS , MVT::f64, Expand);
666 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
667 setOperationAction(ISD::FSIN , MVT::f32, Expand);
668 setOperationAction(ISD::FCOS , MVT::f32, Expand);
669 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
671 // Expand FP immediates into loads from the stack, except for the special
673 addLegalFPImmediate(APFloat(+0.0)); // xorpd
674 addLegalFPImmediate(APFloat(+0.0f)); // xorps
675 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) {
676 // Use SSE for f32, x87 for f64.
677 // Set up the FP register classes.
678 addRegisterClass(MVT::f32, &X86::FR32RegClass);
679 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
681 // Use ANDPS to simulate FABS.
682 setOperationAction(ISD::FABS , MVT::f32, Custom);
684 // Use XORP to simulate FNEG.
685 setOperationAction(ISD::FNEG , MVT::f32, Custom);
687 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
689 // Use ANDPS and ORPS to simulate FCOPYSIGN.
690 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
691 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
693 // We don't support sin/cos/fmod
694 setOperationAction(ISD::FSIN , MVT::f32, Expand);
695 setOperationAction(ISD::FCOS , MVT::f32, Expand);
696 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
698 // Special cases we handle for FP constants.
699 addLegalFPImmediate(APFloat(+0.0f)); // xorps
700 addLegalFPImmediate(APFloat(+0.0)); // FLD0
701 addLegalFPImmediate(APFloat(+1.0)); // FLD1
702 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
703 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
705 if (!TM.Options.UnsafeFPMath) {
706 setOperationAction(ISD::FSIN , MVT::f64, Expand);
707 setOperationAction(ISD::FCOS , MVT::f64, Expand);
708 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
710 } else if (!TM.Options.UseSoftFloat) {
711 // f32 and f64 in x87.
712 // Set up the FP register classes.
713 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
714 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
716 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
717 setOperationAction(ISD::UNDEF, MVT::f32, Expand);
718 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
719 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
721 if (!TM.Options.UnsafeFPMath) {
722 setOperationAction(ISD::FSIN , MVT::f64, Expand);
723 setOperationAction(ISD::FSIN , MVT::f32, Expand);
724 setOperationAction(ISD::FCOS , MVT::f64, Expand);
725 setOperationAction(ISD::FCOS , MVT::f32, Expand);
726 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
727 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
729 addLegalFPImmediate(APFloat(+0.0)); // FLD0
730 addLegalFPImmediate(APFloat(+1.0)); // FLD1
731 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
732 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
733 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
734 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
735 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
736 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
739 // We don't support FMA.
740 setOperationAction(ISD::FMA, MVT::f64, Expand);
741 setOperationAction(ISD::FMA, MVT::f32, Expand);
743 // Long double always uses X87.
744 if (!TM.Options.UseSoftFloat) {
745 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
746 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
747 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
749 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended);
750 addLegalFPImmediate(TmpFlt); // FLD0
752 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
755 APFloat TmpFlt2(+1.0);
756 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
758 addLegalFPImmediate(TmpFlt2); // FLD1
759 TmpFlt2.changeSign();
760 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
763 if (!TM.Options.UnsafeFPMath) {
764 setOperationAction(ISD::FSIN , MVT::f80, Expand);
765 setOperationAction(ISD::FCOS , MVT::f80, Expand);
766 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
769 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
770 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
771 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
772 setOperationAction(ISD::FRINT, MVT::f80, Expand);
773 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
774 setOperationAction(ISD::FMA, MVT::f80, Expand);
777 // Always use a library call for pow.
778 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
779 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
780 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
782 setOperationAction(ISD::FLOG, MVT::f80, Expand);
783 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
784 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
785 setOperationAction(ISD::FEXP, MVT::f80, Expand);
786 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
787 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
788 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
790 // First set operation action for all vector types to either promote
791 // (for widening) or expand (for scalarization). Then we will selectively
792 // turn on ones that can be effectively codegen'd.
793 for (MVT VT : MVT::vector_valuetypes()) {
794 setOperationAction(ISD::ADD , VT, Expand);
795 setOperationAction(ISD::SUB , VT, Expand);
796 setOperationAction(ISD::FADD, VT, Expand);
797 setOperationAction(ISD::FNEG, VT, Expand);
798 setOperationAction(ISD::FSUB, VT, Expand);
799 setOperationAction(ISD::MUL , VT, Expand);
800 setOperationAction(ISD::FMUL, VT, Expand);
801 setOperationAction(ISD::SDIV, VT, Expand);
802 setOperationAction(ISD::UDIV, VT, Expand);
803 setOperationAction(ISD::FDIV, VT, Expand);
804 setOperationAction(ISD::SREM, VT, Expand);
805 setOperationAction(ISD::UREM, VT, Expand);
806 setOperationAction(ISD::LOAD, VT, Expand);
807 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
808 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
809 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
810 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
811 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
812 setOperationAction(ISD::FABS, VT, Expand);
813 setOperationAction(ISD::FSIN, VT, Expand);
814 setOperationAction(ISD::FSINCOS, VT, Expand);
815 setOperationAction(ISD::FCOS, VT, Expand);
816 setOperationAction(ISD::FSINCOS, VT, Expand);
817 setOperationAction(ISD::FREM, VT, Expand);
818 setOperationAction(ISD::FMA, VT, Expand);
819 setOperationAction(ISD::FPOWI, VT, Expand);
820 setOperationAction(ISD::FSQRT, VT, Expand);
821 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
822 setOperationAction(ISD::FFLOOR, VT, Expand);
823 setOperationAction(ISD::FCEIL, VT, Expand);
824 setOperationAction(ISD::FTRUNC, VT, Expand);
825 setOperationAction(ISD::FRINT, VT, Expand);
826 setOperationAction(ISD::FNEARBYINT, VT, Expand);
827 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
828 setOperationAction(ISD::MULHS, VT, Expand);
829 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
830 setOperationAction(ISD::MULHU, VT, Expand);
831 setOperationAction(ISD::SDIVREM, VT, Expand);
832 setOperationAction(ISD::UDIVREM, VT, Expand);
833 setOperationAction(ISD::FPOW, VT, Expand);
834 setOperationAction(ISD::CTPOP, VT, Expand);
835 setOperationAction(ISD::CTTZ, VT, Expand);
836 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
837 setOperationAction(ISD::CTLZ, VT, Expand);
838 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
839 setOperationAction(ISD::SHL, VT, Expand);
840 setOperationAction(ISD::SRA, VT, Expand);
841 setOperationAction(ISD::SRL, VT, Expand);
842 setOperationAction(ISD::ROTL, VT, Expand);
843 setOperationAction(ISD::ROTR, VT, Expand);
844 setOperationAction(ISD::BSWAP, VT, Expand);
845 setOperationAction(ISD::SETCC, VT, Expand);
846 setOperationAction(ISD::FLOG, VT, Expand);
847 setOperationAction(ISD::FLOG2, VT, Expand);
848 setOperationAction(ISD::FLOG10, VT, Expand);
849 setOperationAction(ISD::FEXP, VT, Expand);
850 setOperationAction(ISD::FEXP2, VT, Expand);
851 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
852 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
853 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
854 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
855 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
856 setOperationAction(ISD::TRUNCATE, VT, Expand);
857 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
858 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
859 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
860 setOperationAction(ISD::VSELECT, VT, Expand);
861 setOperationAction(ISD::SELECT_CC, VT, Expand);
862 for (MVT InnerVT : MVT::vector_valuetypes()) {
863 setTruncStoreAction(InnerVT, VT, Expand);
865 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
866 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
868 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
869 // types, we have to deal with them whether we ask for Expansion or not.
870 // Setting Expand causes its own optimisation problems though, so leave
872 if (VT.getVectorElementType() == MVT::i1)
873 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
877 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
878 // with -msoft-float, disable use of MMX as well.
879 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) {
880 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
881 // No operations on x86mmx supported, everything uses intrinsics.
884 // MMX-sized vectors (other than x86mmx) are expected to be expanded
885 // into smaller operations.
886 setOperationAction(ISD::MULHS, MVT::v8i8, Expand);
887 setOperationAction(ISD::MULHS, MVT::v4i16, Expand);
888 setOperationAction(ISD::MULHS, MVT::v2i32, Expand);
889 setOperationAction(ISD::MULHS, MVT::v1i64, Expand);
890 setOperationAction(ISD::AND, MVT::v8i8, Expand);
891 setOperationAction(ISD::AND, MVT::v4i16, Expand);
892 setOperationAction(ISD::AND, MVT::v2i32, Expand);
893 setOperationAction(ISD::AND, MVT::v1i64, Expand);
894 setOperationAction(ISD::OR, MVT::v8i8, Expand);
895 setOperationAction(ISD::OR, MVT::v4i16, Expand);
896 setOperationAction(ISD::OR, MVT::v2i32, Expand);
897 setOperationAction(ISD::OR, MVT::v1i64, Expand);
898 setOperationAction(ISD::XOR, MVT::v8i8, Expand);
899 setOperationAction(ISD::XOR, MVT::v4i16, Expand);
900 setOperationAction(ISD::XOR, MVT::v2i32, Expand);
901 setOperationAction(ISD::XOR, MVT::v1i64, Expand);
902 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand);
903 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand);
904 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand);
905 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand);
906 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
907 setOperationAction(ISD::SELECT, MVT::v8i8, Expand);
908 setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
909 setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
910 setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
911 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand);
912 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand);
913 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand);
914 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
916 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) {
917 addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
919 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
920 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
921 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
922 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
923 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
924 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
925 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
926 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
927 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
928 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
929 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
930 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
931 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
932 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
935 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) {
936 addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
938 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
939 // registers cannot be used even for integer operations.
940 addRegisterClass(MVT::v16i8, &X86::VR128RegClass);
941 addRegisterClass(MVT::v8i16, &X86::VR128RegClass);
942 addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
943 addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
945 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
946 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
947 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
948 setOperationAction(ISD::ADD, MVT::v2i64, Legal);
949 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
950 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
951 setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
952 setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
953 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
954 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
955 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
956 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
957 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
958 setOperationAction(ISD::SUB, MVT::v2i64, Legal);
959 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
960 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
961 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
962 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
963 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
964 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
965 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
966 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
968 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
969 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
970 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
971 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
973 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
974 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
975 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
976 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
977 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
979 // Only provide customized ctpop vector bit twiddling for vector types we
980 // know to perform better than using the popcnt instructions on each vector
981 // element. If popcnt isn't supported, always provide the custom version.
982 if (!Subtarget->hasPOPCNT()) {
983 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
984 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom);
987 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
988 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
989 MVT VT = (MVT::SimpleValueType)i;
990 // Do not attempt to custom lower non-power-of-2 vectors
991 if (!isPowerOf2_32(VT.getVectorNumElements()))
993 // Do not attempt to custom lower non-128-bit vectors
994 if (!VT.is128BitVector())
996 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
997 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
998 setOperationAction(ISD::VSELECT, VT, Custom);
999 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1002 // We support custom legalizing of sext and anyext loads for specific
1003 // memory vector types which we can load as a scalar (or sequence of
1004 // scalars) and extend in-register to a legal 128-bit vector type. For sext
1005 // loads these must work with a single scalar load.
1006 for (MVT VT : MVT::integer_vector_valuetypes()) {
1007 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
1008 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
1009 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
1010 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
1011 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
1012 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
1013 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
1014 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
1015 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
1018 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1019 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1020 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
1021 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
1022 setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
1023 setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
1024 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
1025 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
1027 if (Subtarget->is64Bit()) {
1028 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1029 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1032 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
1033 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
1034 MVT VT = (MVT::SimpleValueType)i;
1036 // Do not attempt to promote non-128-bit vectors
1037 if (!VT.is128BitVector())
1040 setOperationAction(ISD::AND, VT, Promote);
1041 AddPromotedToType (ISD::AND, VT, MVT::v2i64);
1042 setOperationAction(ISD::OR, VT, Promote);
1043 AddPromotedToType (ISD::OR, VT, MVT::v2i64);
1044 setOperationAction(ISD::XOR, VT, Promote);
1045 AddPromotedToType (ISD::XOR, VT, MVT::v2i64);
1046 setOperationAction(ISD::LOAD, VT, Promote);
1047 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
1048 setOperationAction(ISD::SELECT, VT, Promote);
1049 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64);
1052 // Custom lower v2i64 and v2f64 selects.
1053 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
1054 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
1055 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
1056 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
1058 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
1059 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
1061 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1062 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1063 // As there is no 64-bit GPR available, we need build a special custom
1064 // sequence to convert from v2i32 to v2f32.
1065 if (!Subtarget->is64Bit())
1066 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
1068 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1069 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
1071 for (MVT VT : MVT::fp_vector_valuetypes())
1072 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
1074 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1075 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1076 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1079 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
1080 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1081 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1082 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1083 setOperationAction(ISD::FRINT, MVT::f32, Legal);
1084 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1085 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1086 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1087 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1088 setOperationAction(ISD::FRINT, MVT::f64, Legal);
1089 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1091 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1092 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
1093 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1094 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
1095 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
1096 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
1097 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
1098 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
1099 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
1100 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
1102 // FIXME: Do we need to handle scalar-to-vector here?
1103 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1105 // We directly match byte blends in the backend as they match the VSELECT
1107 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1109 // SSE41 brings specific instructions for doing vector sign extend even in
1110 // cases where we don't have SRA.
1111 for (MVT VT : MVT::integer_vector_valuetypes()) {
1112 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
1113 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
1114 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
1117 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1118 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1119 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1120 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1121 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1122 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1123 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1125 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1126 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1127 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1128 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1129 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1130 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1132 // i8 and i16 vectors are custom because the source register and source
1133 // source memory operand types are not the same width. f32 vectors are
1134 // custom since the immediate controlling the insert encodes additional
1136 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1137 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1138 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1139 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1141 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
1142 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
1143 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
1144 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
1146 // FIXME: these should be Legal, but that's only for the case where
1147 // the index is constant. For now custom expand to deal with that.
1148 if (Subtarget->is64Bit()) {
1149 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1150 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1154 if (Subtarget->hasSSE2()) {
1155 setOperationAction(ISD::SRL, MVT::v8i16, Custom);
1156 setOperationAction(ISD::SRL, MVT::v16i8, Custom);
1158 setOperationAction(ISD::SHL, MVT::v8i16, Custom);
1159 setOperationAction(ISD::SHL, MVT::v16i8, Custom);
1161 setOperationAction(ISD::SRA, MVT::v8i16, Custom);
1162 setOperationAction(ISD::SRA, MVT::v16i8, Custom);
1164 // In the customized shift lowering, the legal cases in AVX2 will be
1166 setOperationAction(ISD::SRL, MVT::v2i64, Custom);
1167 setOperationAction(ISD::SRL, MVT::v4i32, Custom);
1169 setOperationAction(ISD::SHL, MVT::v2i64, Custom);
1170 setOperationAction(ISD::SHL, MVT::v4i32, Custom);
1172 setOperationAction(ISD::SRA, MVT::v4i32, Custom);
1175 if (!TM.Options.UseSoftFloat && Subtarget->hasFp256()) {
1176 addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
1177 addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
1178 addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
1179 addRegisterClass(MVT::v8f32, &X86::VR256RegClass);
1180 addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
1181 addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
1183 setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
1184 setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
1185 setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
1187 setOperationAction(ISD::FADD, MVT::v8f32, Legal);
1188 setOperationAction(ISD::FSUB, MVT::v8f32, Legal);
1189 setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
1190 setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
1191 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
1192 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal);
1193 setOperationAction(ISD::FCEIL, MVT::v8f32, Legal);
1194 setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal);
1195 setOperationAction(ISD::FRINT, MVT::v8f32, Legal);
1196 setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal);
1197 setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
1198 setOperationAction(ISD::FABS, MVT::v8f32, Custom);
1200 setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1201 setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1202 setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1203 setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1204 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1205 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1206 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
1207 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1208 setOperationAction(ISD::FRINT, MVT::v4f64, Legal);
1209 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal);
1210 setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
1211 setOperationAction(ISD::FABS, MVT::v4f64, Custom);
1213 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1214 // even though v8i16 is a legal type.
1215 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Promote);
1216 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Promote);
1217 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1219 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote);
1220 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1221 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
1223 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
1224 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
1226 for (MVT VT : MVT::fp_vector_valuetypes())
1227 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
1229 setOperationAction(ISD::SRL, MVT::v16i16, Custom);
1230 setOperationAction(ISD::SRL, MVT::v32i8, Custom);
1232 setOperationAction(ISD::SHL, MVT::v16i16, Custom);
1233 setOperationAction(ISD::SHL, MVT::v32i8, Custom);
1235 setOperationAction(ISD::SRA, MVT::v16i16, Custom);
1236 setOperationAction(ISD::SRA, MVT::v32i8, Custom);
1238 setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
1239 setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
1240 setOperationAction(ISD::SETCC, MVT::v8i32, Custom);
1241 setOperationAction(ISD::SETCC, MVT::v4i64, Custom);
1243 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1244 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1245 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1247 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1248 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
1249 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1250 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom);
1251 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
1252 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom);
1253 setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom);
1254 setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom);
1255 setOperationAction(ISD::ANY_EXTEND, MVT::v16i16, Custom);
1256 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1257 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1258 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1260 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) {
1261 setOperationAction(ISD::FMA, MVT::v8f32, Legal);
1262 setOperationAction(ISD::FMA, MVT::v4f64, Legal);
1263 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
1264 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
1265 setOperationAction(ISD::FMA, MVT::f32, Legal);
1266 setOperationAction(ISD::FMA, MVT::f64, Legal);
1269 if (Subtarget->hasInt256()) {
1270 setOperationAction(ISD::ADD, MVT::v4i64, Legal);
1271 setOperationAction(ISD::ADD, MVT::v8i32, Legal);
1272 setOperationAction(ISD::ADD, MVT::v16i16, Legal);
1273 setOperationAction(ISD::ADD, MVT::v32i8, Legal);
1275 setOperationAction(ISD::SUB, MVT::v4i64, Legal);
1276 setOperationAction(ISD::SUB, MVT::v8i32, Legal);
1277 setOperationAction(ISD::SUB, MVT::v16i16, Legal);
1278 setOperationAction(ISD::SUB, MVT::v32i8, Legal);
1280 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1281 setOperationAction(ISD::MUL, MVT::v8i32, Legal);
1282 setOperationAction(ISD::MUL, MVT::v16i16, Legal);
1283 // Don't lower v32i8 because there is no 128-bit byte mul
1285 setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
1286 setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
1287 setOperationAction(ISD::MULHU, MVT::v16i16, Legal);
1288 setOperationAction(ISD::MULHS, MVT::v16i16, Legal);
1290 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1291 // when we have a 256bit-wide blend with immediate.
1292 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1294 // Only provide customized ctpop vector bit twiddling for vector types we
1295 // know to perform better than using the popcnt instructions on each
1296 // vector element. If popcnt isn't supported, always provide the custom
1298 if (!Subtarget->hasPOPCNT())
1299 setOperationAction(ISD::CTPOP, MVT::v4i64, Custom);
1301 // Custom CTPOP always performs better on natively supported v8i32
1302 setOperationAction(ISD::CTPOP, MVT::v8i32, Custom);
1304 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1305 setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1306 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1307 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1308 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1309 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1310 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1312 setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1313 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1314 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1315 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1316 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1317 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1319 setOperationAction(ISD::ADD, MVT::v4i64, Custom);
1320 setOperationAction(ISD::ADD, MVT::v8i32, Custom);
1321 setOperationAction(ISD::ADD, MVT::v16i16, Custom);
1322 setOperationAction(ISD::ADD, MVT::v32i8, Custom);
1324 setOperationAction(ISD::SUB, MVT::v4i64, Custom);
1325 setOperationAction(ISD::SUB, MVT::v8i32, Custom);
1326 setOperationAction(ISD::SUB, MVT::v16i16, Custom);
1327 setOperationAction(ISD::SUB, MVT::v32i8, Custom);
1329 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1330 setOperationAction(ISD::MUL, MVT::v8i32, Custom);
1331 setOperationAction(ISD::MUL, MVT::v16i16, Custom);
1332 // Don't lower v32i8 because there is no 128-bit byte mul
1335 // In the customized shift lowering, the legal cases in AVX2 will be
1337 setOperationAction(ISD::SRL, MVT::v4i64, Custom);
1338 setOperationAction(ISD::SRL, MVT::v8i32, Custom);
1340 setOperationAction(ISD::SHL, MVT::v4i64, Custom);
1341 setOperationAction(ISD::SHL, MVT::v8i32, Custom);
1343 setOperationAction(ISD::SRA, MVT::v8i32, Custom);
1345 // Custom lower several nodes for 256-bit types.
1346 for (MVT VT : MVT::vector_valuetypes()) {
1347 if (VT.getScalarSizeInBits() >= 32) {
1348 setOperationAction(ISD::MLOAD, VT, Legal);
1349 setOperationAction(ISD::MSTORE, VT, Legal);
1351 // Extract subvector is special because the value type
1352 // (result) is 128-bit but the source is 256-bit wide.
1353 if (VT.is128BitVector()) {
1354 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1356 // Do not attempt to custom lower other non-256-bit vectors
1357 if (!VT.is256BitVector())
1360 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1361 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1362 setOperationAction(ISD::VSELECT, VT, Custom);
1363 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1364 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1365 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1366 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1367 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1370 if (Subtarget->hasInt256())
1371 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1374 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
1375 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) {
1376 MVT VT = (MVT::SimpleValueType)i;
1378 // Do not attempt to promote non-256-bit vectors
1379 if (!VT.is256BitVector())
1382 setOperationAction(ISD::AND, VT, Promote);
1383 AddPromotedToType (ISD::AND, VT, MVT::v4i64);
1384 setOperationAction(ISD::OR, VT, Promote);
1385 AddPromotedToType (ISD::OR, VT, MVT::v4i64);
1386 setOperationAction(ISD::XOR, VT, Promote);
1387 AddPromotedToType (ISD::XOR, VT, MVT::v4i64);
1388 setOperationAction(ISD::LOAD, VT, Promote);
1389 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
1390 setOperationAction(ISD::SELECT, VT, Promote);
1391 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
1395 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512()) {
1396 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1397 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1398 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1399 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1401 addRegisterClass(MVT::i1, &X86::VK1RegClass);
1402 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1403 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1405 for (MVT VT : MVT::fp_vector_valuetypes())
1406 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
1408 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1409 setOperationAction(ISD::SETCC, MVT::i1, Custom);
1410 setOperationAction(ISD::XOR, MVT::i1, Legal);
1411 setOperationAction(ISD::OR, MVT::i1, Legal);
1412 setOperationAction(ISD::AND, MVT::i1, Legal);
1413 setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
1414 setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
1415 setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
1416 setOperationAction(ISD::LOAD, MVT::v16i32, Legal);
1417 setOperationAction(ISD::LOAD, MVT::v16i1, Legal);
1419 setOperationAction(ISD::FADD, MVT::v16f32, Legal);
1420 setOperationAction(ISD::FSUB, MVT::v16f32, Legal);
1421 setOperationAction(ISD::FMUL, MVT::v16f32, Legal);
1422 setOperationAction(ISD::FDIV, MVT::v16f32, Legal);
1423 setOperationAction(ISD::FSQRT, MVT::v16f32, Legal);
1424 setOperationAction(ISD::FNEG, MVT::v16f32, Custom);
1426 setOperationAction(ISD::FADD, MVT::v8f64, Legal);
1427 setOperationAction(ISD::FSUB, MVT::v8f64, Legal);
1428 setOperationAction(ISD::FMUL, MVT::v8f64, Legal);
1429 setOperationAction(ISD::FDIV, MVT::v8f64, Legal);
1430 setOperationAction(ISD::FSQRT, MVT::v8f64, Legal);
1431 setOperationAction(ISD::FNEG, MVT::v8f64, Custom);
1432 setOperationAction(ISD::FMA, MVT::v8f64, Legal);
1433 setOperationAction(ISD::FMA, MVT::v16f32, Legal);
1435 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1436 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1437 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1438 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1439 if (Subtarget->is64Bit()) {
1440 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1441 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1442 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1443 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1445 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1446 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1447 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1448 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1449 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1450 setOperationAction(ISD::SINT_TO_FP, MVT::v8i1, Custom);
1451 setOperationAction(ISD::SINT_TO_FP, MVT::v16i1, Custom);
1452 setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Promote);
1453 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Promote);
1454 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1455 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1456 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1457 setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal);
1458 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1460 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
1461 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1462 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1463 setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
1464 setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
1465 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1466 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1467 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1468 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1469 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1470 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
1471 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
1472 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1474 setOperationAction(ISD::FFLOOR, MVT::v16f32, Legal);
1475 setOperationAction(ISD::FFLOOR, MVT::v8f64, Legal);
1476 setOperationAction(ISD::FCEIL, MVT::v16f32, Legal);
1477 setOperationAction(ISD::FCEIL, MVT::v8f64, Legal);
1478 setOperationAction(ISD::FTRUNC, MVT::v16f32, Legal);
1479 setOperationAction(ISD::FTRUNC, MVT::v8f64, Legal);
1480 setOperationAction(ISD::FRINT, MVT::v16f32, Legal);
1481 setOperationAction(ISD::FRINT, MVT::v8f64, Legal);
1482 setOperationAction(ISD::FNEARBYINT, MVT::v16f32, Legal);
1483 setOperationAction(ISD::FNEARBYINT, MVT::v8f64, Legal);
1485 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1486 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1487 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1488 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1489 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
1490 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Legal);
1492 setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
1493 setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
1495 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1497 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
1498 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
1499 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
1500 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
1501 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
1502 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
1503 setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
1504 setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
1505 setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
1507 setOperationAction(ISD::ADD, MVT::v8i64, Legal);
1508 setOperationAction(ISD::ADD, MVT::v16i32, Legal);
1510 setOperationAction(ISD::SUB, MVT::v8i64, Legal);
1511 setOperationAction(ISD::SUB, MVT::v16i32, Legal);
1513 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1515 setOperationAction(ISD::SRL, MVT::v8i64, Custom);
1516 setOperationAction(ISD::SRL, MVT::v16i32, Custom);
1518 setOperationAction(ISD::SHL, MVT::v8i64, Custom);
1519 setOperationAction(ISD::SHL, MVT::v16i32, Custom);
1521 setOperationAction(ISD::SRA, MVT::v8i64, Custom);
1522 setOperationAction(ISD::SRA, MVT::v16i32, Custom);
1524 setOperationAction(ISD::AND, MVT::v8i64, Legal);
1525 setOperationAction(ISD::OR, MVT::v8i64, Legal);
1526 setOperationAction(ISD::XOR, MVT::v8i64, Legal);
1527 setOperationAction(ISD::AND, MVT::v16i32, Legal);
1528 setOperationAction(ISD::OR, MVT::v16i32, Legal);
1529 setOperationAction(ISD::XOR, MVT::v16i32, Legal);
1531 if (Subtarget->hasCDI()) {
1532 setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
1533 setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
1536 // Custom lower several nodes.
1537 for (MVT VT : MVT::vector_valuetypes()) {
1538 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1539 // Extract subvector is special because the value type
1540 // (result) is 256/128-bit but the source is 512-bit wide.
1541 if (VT.is128BitVector() || VT.is256BitVector()) {
1542 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1544 if (VT.getVectorElementType() == MVT::i1)
1545 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1547 // Do not attempt to custom lower other non-512-bit vectors
1548 if (!VT.is512BitVector())
1551 if ( EltSize >= 32) {
1552 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1553 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1554 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1555 setOperationAction(ISD::VSELECT, VT, Legal);
1556 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1557 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1558 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1559 setOperationAction(ISD::MLOAD, VT, Legal);
1560 setOperationAction(ISD::MSTORE, VT, Legal);
1563 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1564 MVT VT = (MVT::SimpleValueType)i;
1566 // Do not attempt to promote non-512-bit vectors.
1567 if (!VT.is512BitVector())
1570 setOperationAction(ISD::SELECT, VT, Promote);
1571 AddPromotedToType (ISD::SELECT, VT, MVT::v8i64);
1575 if (!TM.Options.UseSoftFloat && Subtarget->hasBWI()) {
1576 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1577 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1579 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1580 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1582 setOperationAction(ISD::LOAD, MVT::v32i16, Legal);
1583 setOperationAction(ISD::LOAD, MVT::v64i8, Legal);
1584 setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
1585 setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
1586 setOperationAction(ISD::ADD, MVT::v32i16, Legal);
1587 setOperationAction(ISD::ADD, MVT::v64i8, Legal);
1588 setOperationAction(ISD::SUB, MVT::v32i16, Legal);
1589 setOperationAction(ISD::SUB, MVT::v64i8, Legal);
1590 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1592 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1593 const MVT VT = (MVT::SimpleValueType)i;
1595 const unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1597 // Do not attempt to promote non-512-bit vectors.
1598 if (!VT.is512BitVector())
1602 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1603 setOperationAction(ISD::VSELECT, VT, Legal);
1608 if (!TM.Options.UseSoftFloat && Subtarget->hasVLX()) {
1609 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1610 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1612 setOperationAction(ISD::SETCC, MVT::v4i1, Custom);
1613 setOperationAction(ISD::SETCC, MVT::v2i1, Custom);
1614 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Legal);
1616 setOperationAction(ISD::AND, MVT::v8i32, Legal);
1617 setOperationAction(ISD::OR, MVT::v8i32, Legal);
1618 setOperationAction(ISD::XOR, MVT::v8i32, Legal);
1619 setOperationAction(ISD::AND, MVT::v4i32, Legal);
1620 setOperationAction(ISD::OR, MVT::v4i32, Legal);
1621 setOperationAction(ISD::XOR, MVT::v4i32, Legal);
1624 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion
1625 // of this type with custom code.
1626 for (MVT VT : MVT::vector_valuetypes())
1627 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
1629 // We want to custom lower some of our intrinsics.
1630 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1631 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1632 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1633 if (!Subtarget->is64Bit())
1634 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1636 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1637 // handle type legalization for these operations here.
1639 // FIXME: We really should do custom legalization for addition and
1640 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1641 // than generic legalization for 64-bit multiplication-with-overflow, though.
1642 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) {
1643 // Add/Sub/Mul with overflow operations are custom lowered.
1645 setOperationAction(ISD::SADDO, VT, Custom);
1646 setOperationAction(ISD::UADDO, VT, Custom);
1647 setOperationAction(ISD::SSUBO, VT, Custom);
1648 setOperationAction(ISD::USUBO, VT, Custom);
1649 setOperationAction(ISD::SMULO, VT, Custom);
1650 setOperationAction(ISD::UMULO, VT, Custom);
1654 if (!Subtarget->is64Bit()) {
1655 // These libcalls are not available in 32-bit.
1656 setLibcallName(RTLIB::SHL_I128, nullptr);
1657 setLibcallName(RTLIB::SRL_I128, nullptr);
1658 setLibcallName(RTLIB::SRA_I128, nullptr);
1661 // Combine sin / cos into one node or libcall if possible.
1662 if (Subtarget->hasSinCos()) {
1663 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1664 setLibcallName(RTLIB::SINCOS_F64, "sincos");
1665 if (Subtarget->isTargetDarwin()) {
1666 // For MacOSX, we don't want the normal expansion of a libcall to sincos.
1667 // We want to issue a libcall to __sincos_stret to avoid memory traffic.
1668 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1669 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1673 if (Subtarget->isTargetWin64()) {
1674 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1675 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1676 setOperationAction(ISD::SREM, MVT::i128, Custom);
1677 setOperationAction(ISD::UREM, MVT::i128, Custom);
1678 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1679 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1682 // We have target-specific dag combine patterns for the following nodes:
1683 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1684 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1685 setTargetDAGCombine(ISD::BITCAST);
1686 setTargetDAGCombine(ISD::VSELECT);
1687 setTargetDAGCombine(ISD::SELECT);
1688 setTargetDAGCombine(ISD::SHL);
1689 setTargetDAGCombine(ISD::SRA);
1690 setTargetDAGCombine(ISD::SRL);
1691 setTargetDAGCombine(ISD::OR);
1692 setTargetDAGCombine(ISD::AND);
1693 setTargetDAGCombine(ISD::ADD);
1694 setTargetDAGCombine(ISD::FADD);
1695 setTargetDAGCombine(ISD::FSUB);
1696 setTargetDAGCombine(ISD::FMA);
1697 setTargetDAGCombine(ISD::SUB);
1698 setTargetDAGCombine(ISD::LOAD);
1699 setTargetDAGCombine(ISD::MLOAD);
1700 setTargetDAGCombine(ISD::STORE);
1701 setTargetDAGCombine(ISD::MSTORE);
1702 setTargetDAGCombine(ISD::ZERO_EXTEND);
1703 setTargetDAGCombine(ISD::ANY_EXTEND);
1704 setTargetDAGCombine(ISD::SIGN_EXTEND);
1705 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1706 setTargetDAGCombine(ISD::TRUNCATE);
1707 setTargetDAGCombine(ISD::SINT_TO_FP);
1708 setTargetDAGCombine(ISD::SETCC);
1709 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1710 setTargetDAGCombine(ISD::BUILD_VECTOR);
1711 setTargetDAGCombine(ISD::MUL);
1712 setTargetDAGCombine(ISD::XOR);
1714 computeRegisterProperties();
1716 // On Darwin, -Os means optimize for size without hurting performance,
1717 // do not reduce the limit.
1718 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1719 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8;
1720 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1721 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1722 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1723 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1724 setPrefLoopAlignment(4); // 2^4 bytes.
1726 // Predictable cmov don't hurt on atom because it's in-order.
1727 PredictableSelectIsExpensive = !Subtarget->isAtom();
1728 EnableExtLdPromotion = true;
1729 setPrefFunctionAlignment(4); // 2^4 bytes.
1731 verifyIntrinsicTables();
1734 // This has so far only been implemented for 64-bit MachO.
1735 bool X86TargetLowering::useLoadStackGuardNode() const {
1736 return Subtarget->isTargetMachO() && Subtarget->is64Bit();
1739 TargetLoweringBase::LegalizeTypeAction
1740 X86TargetLowering::getPreferredVectorAction(EVT VT) const {
1741 if (ExperimentalVectorWideningLegalization &&
1742 VT.getVectorNumElements() != 1 &&
1743 VT.getVectorElementType().getSimpleVT() != MVT::i1)
1744 return TypeWidenVector;
1746 return TargetLoweringBase::getPreferredVectorAction(VT);
1749 EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
1751 return Subtarget->hasAVX512() ? MVT::i1: MVT::i8;
1753 const unsigned NumElts = VT.getVectorNumElements();
1754 const EVT EltVT = VT.getVectorElementType();
1755 if (VT.is512BitVector()) {
1756 if (Subtarget->hasAVX512())
1757 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1758 EltVT == MVT::f32 || EltVT == MVT::f64)
1760 case 8: return MVT::v8i1;
1761 case 16: return MVT::v16i1;
1763 if (Subtarget->hasBWI())
1764 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1766 case 32: return MVT::v32i1;
1767 case 64: return MVT::v64i1;
1771 if (VT.is256BitVector() || VT.is128BitVector()) {
1772 if (Subtarget->hasVLX())
1773 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1774 EltVT == MVT::f32 || EltVT == MVT::f64)
1776 case 2: return MVT::v2i1;
1777 case 4: return MVT::v4i1;
1778 case 8: return MVT::v8i1;
1780 if (Subtarget->hasBWI() && Subtarget->hasVLX())
1781 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1783 case 8: return MVT::v8i1;
1784 case 16: return MVT::v16i1;
1785 case 32: return MVT::v32i1;
1789 return VT.changeVectorElementTypeToInteger();
1792 /// Helper for getByValTypeAlignment to determine
1793 /// the desired ByVal argument alignment.
1794 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
1797 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1798 if (VTy->getBitWidth() == 128)
1800 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1801 unsigned EltAlign = 0;
1802 getMaxByValAlign(ATy->getElementType(), EltAlign);
1803 if (EltAlign > MaxAlign)
1804 MaxAlign = EltAlign;
1805 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1806 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1807 unsigned EltAlign = 0;
1808 getMaxByValAlign(STy->getElementType(i), EltAlign);
1809 if (EltAlign > MaxAlign)
1810 MaxAlign = EltAlign;
1817 /// Return the desired alignment for ByVal aggregate
1818 /// function arguments in the caller parameter area. For X86, aggregates
1819 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
1820 /// are at 4-byte boundaries.
1821 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
1822 if (Subtarget->is64Bit()) {
1823 // Max of 8 and alignment of type.
1824 unsigned TyAlign = TD->getABITypeAlignment(Ty);
1831 if (Subtarget->hasSSE1())
1832 getMaxByValAlign(Ty, Align);
1836 /// Returns the target specific optimal type for load
1837 /// and store operations as a result of memset, memcpy, and memmove
1838 /// lowering. If DstAlign is zero that means it's safe to destination
1839 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
1840 /// means there isn't a need to check it against alignment requirement,
1841 /// probably because the source does not need to be loaded. If 'IsMemset' is
1842 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
1843 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
1844 /// source is constant so it does not need to be loaded.
1845 /// It returns EVT::Other if the type should be determined using generic
1846 /// target-independent logic.
1848 X86TargetLowering::getOptimalMemOpType(uint64_t Size,
1849 unsigned DstAlign, unsigned SrcAlign,
1850 bool IsMemset, bool ZeroMemset,
1852 MachineFunction &MF) const {
1853 const Function *F = MF.getFunction();
1854 if ((!IsMemset || ZeroMemset) &&
1855 !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
1857 (Subtarget->isUnalignedMemAccessFast() ||
1858 ((DstAlign == 0 || DstAlign >= 16) &&
1859 (SrcAlign == 0 || SrcAlign >= 16)))) {
1861 if (Subtarget->hasInt256())
1863 if (Subtarget->hasFp256())
1866 if (Subtarget->hasSSE2())
1868 if (Subtarget->hasSSE1())
1870 } else if (!MemcpyStrSrc && Size >= 8 &&
1871 !Subtarget->is64Bit() &&
1872 Subtarget->hasSSE2()) {
1873 // Do not use f64 to lower memcpy if source is string constant. It's
1874 // better to use i32 to avoid the loads.
1878 if (Subtarget->is64Bit() && Size >= 8)
1883 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
1885 return X86ScalarSSEf32;
1886 else if (VT == MVT::f64)
1887 return X86ScalarSSEf64;
1892 X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
1897 *Fast = Subtarget->isUnalignedMemAccessFast();
1901 /// Return the entry encoding for a jump table in the
1902 /// current function. The returned value is a member of the
1903 /// MachineJumpTableInfo::JTEntryKind enum.
1904 unsigned X86TargetLowering::getJumpTableEncoding() const {
1905 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
1907 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1908 Subtarget->isPICStyleGOT())
1909 return MachineJumpTableInfo::EK_Custom32;
1911 // Otherwise, use the normal jump table encoding heuristics.
1912 return TargetLowering::getJumpTableEncoding();
1916 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
1917 const MachineBasicBlock *MBB,
1918 unsigned uid,MCContext &Ctx) const{
1919 assert(MBB->getParent()->getTarget().getRelocationModel() == Reloc::PIC_ &&
1920 Subtarget->isPICStyleGOT());
1921 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
1923 return MCSymbolRefExpr::Create(MBB->getSymbol(),
1924 MCSymbolRefExpr::VK_GOTOFF, Ctx);
1927 /// Returns relocation base for the given PIC jumptable.
1928 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
1929 SelectionDAG &DAG) const {
1930 if (!Subtarget->is64Bit())
1931 // This doesn't have SDLoc associated with it, but is not really the
1932 // same as a Register.
1933 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy());
1937 /// This returns the relocation base for the given PIC jumptable,
1938 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
1939 const MCExpr *X86TargetLowering::
1940 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
1941 MCContext &Ctx) const {
1942 // X86-64 uses RIP relative addressing based on the jump table label.
1943 if (Subtarget->isPICStyleRIPRel())
1944 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
1946 // Otherwise, the reference is relative to the PIC base.
1947 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
1950 // FIXME: Why this routine is here? Move to RegInfo!
1951 std::pair<const TargetRegisterClass*, uint8_t>
1952 X86TargetLowering::findRepresentativeClass(MVT VT) const{
1953 const TargetRegisterClass *RRC = nullptr;
1955 switch (VT.SimpleTy) {
1957 return TargetLowering::findRepresentativeClass(VT);
1958 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
1959 RRC = Subtarget->is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
1962 RRC = &X86::VR64RegClass;
1964 case MVT::f32: case MVT::f64:
1965 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1966 case MVT::v4f32: case MVT::v2f64:
1967 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32:
1969 RRC = &X86::VR128RegClass;
1972 return std::make_pair(RRC, Cost);
1975 bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
1976 unsigned &Offset) const {
1977 if (!Subtarget->isTargetLinux())
1980 if (Subtarget->is64Bit()) {
1981 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
1983 if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
1995 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1996 unsigned DestAS) const {
1997 assert(SrcAS != DestAS && "Expected different address spaces!");
1999 return SrcAS < 256 && DestAS < 256;
2002 //===----------------------------------------------------------------------===//
2003 // Return Value Calling Convention Implementation
2004 //===----------------------------------------------------------------------===//
2006 #include "X86GenCallingConv.inc"
2009 X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2010 MachineFunction &MF, bool isVarArg,
2011 const SmallVectorImpl<ISD::OutputArg> &Outs,
2012 LLVMContext &Context) const {
2013 SmallVector<CCValAssign, 16> RVLocs;
2014 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2015 return CCInfo.CheckReturn(Outs, RetCC_X86);
2018 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2019 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2024 X86TargetLowering::LowerReturn(SDValue Chain,
2025 CallingConv::ID CallConv, bool isVarArg,
2026 const SmallVectorImpl<ISD::OutputArg> &Outs,
2027 const SmallVectorImpl<SDValue> &OutVals,
2028 SDLoc dl, SelectionDAG &DAG) const {
2029 MachineFunction &MF = DAG.getMachineFunction();
2030 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2032 SmallVector<CCValAssign, 16> RVLocs;
2033 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2034 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2037 SmallVector<SDValue, 6> RetOps;
2038 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2039 // Operand #1 = Bytes To Pop
2040 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(),
2043 // Copy the result values into the output registers.
2044 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2045 CCValAssign &VA = RVLocs[i];
2046 assert(VA.isRegLoc() && "Can only return in registers!");
2047 SDValue ValToCopy = OutVals[i];
2048 EVT ValVT = ValToCopy.getValueType();
2050 // Promote values to the appropriate types.
2051 if (VA.getLocInfo() == CCValAssign::SExt)
2052 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2053 else if (VA.getLocInfo() == CCValAssign::ZExt)
2054 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2055 else if (VA.getLocInfo() == CCValAssign::AExt)
2056 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2057 else if (VA.getLocInfo() == CCValAssign::BCvt)
2058 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy);
2060 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2061 "Unexpected FP-extend for return value.");
2063 // If this is x86-64, and we disabled SSE, we can't return FP values,
2064 // or SSE or MMX vectors.
2065 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2066 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2067 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
2068 report_fatal_error("SSE register return with SSE disabled");
2070 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2071 // llvm-gcc has never done it right and no one has noticed, so this
2072 // should be OK for now.
2073 if (ValVT == MVT::f64 &&
2074 (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
2075 report_fatal_error("SSE2 register return with SSE2 disabled");
2077 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2078 // the RET instruction and handled by the FP Stackifier.
2079 if (VA.getLocReg() == X86::FP0 ||
2080 VA.getLocReg() == X86::FP1) {
2081 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2082 // change the value to the FP stack register class.
2083 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2084 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2085 RetOps.push_back(ValToCopy);
2086 // Don't emit a copytoreg.
2090 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2091 // which is returned in RAX / RDX.
2092 if (Subtarget->is64Bit()) {
2093 if (ValVT == MVT::x86mmx) {
2094 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2095 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy);
2096 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2098 // If we don't have SSE2 available, convert to v4f32 so the generated
2099 // register is legal.
2100 if (!Subtarget->hasSSE2())
2101 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy);
2106 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
2107 Flag = Chain.getValue(1);
2108 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2111 // The x86-64 ABIs require that for returning structs by value we copy
2112 // the sret argument into %rax/%eax (depending on ABI) for the return.
2113 // Win32 requires us to put the sret argument to %eax as well.
2114 // We saved the argument into a virtual register in the entry block,
2115 // so now we copy the value out and into %rax/%eax.
2117 // Checking Function.hasStructRetAttr() here is insufficient because the IR
2118 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2119 // false, then an sret argument may be implicitly inserted in the SelDAG. In
2120 // either case FuncInfo->setSRetReturnReg() will have been called.
2121 if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
2122 assert((Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) &&
2123 "No need for an sret register");
2124 SDValue Val = DAG.getCopyFromReg(Chain, dl, SRetReg, getPointerTy());
2127 = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ?
2128 X86::RAX : X86::EAX;
2129 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2130 Flag = Chain.getValue(1);
2132 // RAX/EAX now acts like a return value.
2133 RetOps.push_back(DAG.getRegister(RetValReg, getPointerTy()));
2136 RetOps[0] = Chain; // Update chain.
2138 // Add the flag if we have it.
2140 RetOps.push_back(Flag);
2142 return DAG.getNode(X86ISD::RET_FLAG, dl, MVT::Other, RetOps);
2145 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2146 if (N->getNumValues() != 1)
2148 if (!N->hasNUsesOfValue(1, 0))
2151 SDValue TCChain = Chain;
2152 SDNode *Copy = *N->use_begin();
2153 if (Copy->getOpcode() == ISD::CopyToReg) {
2154 // If the copy has a glue operand, we conservatively assume it isn't safe to
2155 // perform a tail call.
2156 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2158 TCChain = Copy->getOperand(0);
2159 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2162 bool HasRet = false;
2163 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2165 if (UI->getOpcode() != X86ISD::RET_FLAG)
2167 // If we are returning more than one value, we can definitely
2168 // not make a tail call see PR19530
2169 if (UI->getNumOperands() > 4)
2171 if (UI->getNumOperands() == 4 &&
2172 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2185 X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
2186 ISD::NodeType ExtendKind) const {
2188 // TODO: Is this also valid on 32-bit?
2189 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND)
2190 ReturnMVT = MVT::i8;
2192 ReturnMVT = MVT::i32;
2194 EVT MinVT = getRegisterType(Context, ReturnMVT);
2195 return VT.bitsLT(MinVT) ? MinVT : VT;
2198 /// Lower the result values of a call into the
2199 /// appropriate copies out of appropriate physical registers.
2202 X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
2203 CallingConv::ID CallConv, bool isVarArg,
2204 const SmallVectorImpl<ISD::InputArg> &Ins,
2205 SDLoc dl, SelectionDAG &DAG,
2206 SmallVectorImpl<SDValue> &InVals) const {
2208 // Assign locations to each value returned by this call.
2209 SmallVector<CCValAssign, 16> RVLocs;
2210 bool Is64Bit = Subtarget->is64Bit();
2211 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2213 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2215 // Copy all of the result registers out of their specified physreg.
2216 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2217 CCValAssign &VA = RVLocs[i];
2218 EVT CopyVT = VA.getValVT();
2220 // If this is x86-64, and we disabled SSE, we can't return FP values
2221 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
2222 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
2223 report_fatal_error("SSE register return with SSE disabled");
2226 // If we prefer to use the value in xmm registers, copy it out as f80 and
2227 // use a truncate to move it from fp stack reg to xmm reg.
2228 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2229 isScalarFPTypeInSSEReg(VA.getValVT()))
2232 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
2233 CopyVT, InFlag).getValue(1);
2234 SDValue Val = Chain.getValue(0);
2236 if (CopyVT != VA.getValVT())
2237 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2238 // This truncation won't change the value.
2239 DAG.getIntPtrConstant(1));
2241 InFlag = Chain.getValue(2);
2242 InVals.push_back(Val);
2248 //===----------------------------------------------------------------------===//
2249 // C & StdCall & Fast Calling Convention implementation
2250 //===----------------------------------------------------------------------===//
2251 // StdCall calling convention seems to be standard for many Windows' API
2252 // routines and around. It differs from C calling convention just a little:
2253 // callee should clean up the stack, not caller. Symbols should be also
2254 // decorated in some fancy way :) It doesn't support any vector arguments.
2255 // For info on fast calling convention see Fast Calling Convention (tail call)
2256 // implementation LowerX86_32FastCCCallTo.
2258 /// CallIsStructReturn - Determines whether a call uses struct return
2260 enum StructReturnType {
2265 static StructReturnType
2266 callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
2268 return NotStructReturn;
2270 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2271 if (!Flags.isSRet())
2272 return NotStructReturn;
2273 if (Flags.isInReg())
2274 return RegStructReturn;
2275 return StackStructReturn;
2278 /// Determines whether a function uses struct return semantics.
2279 static StructReturnType
2280 argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
2282 return NotStructReturn;
2284 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2285 if (!Flags.isSRet())
2286 return NotStructReturn;
2287 if (Flags.isInReg())
2288 return RegStructReturn;
2289 return StackStructReturn;
2292 /// Make a copy of an aggregate at address specified by "Src" to address
2293 /// "Dst" with size and alignment information specified by the specific
2294 /// parameter attribute. The copy will be passed as a byval function parameter.
2296 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
2297 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
2299 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
2301 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2302 /*isVolatile*/false, /*AlwaysInline=*/true,
2303 MachinePointerInfo(), MachinePointerInfo());
2306 /// Return true if the calling convention is one that
2307 /// supports tail call optimization.
2308 static bool IsTailCallConvention(CallingConv::ID CC) {
2309 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2310 CC == CallingConv::HiPE);
2313 /// \brief Return true if the calling convention is a C calling convention.
2314 static bool IsCCallConvention(CallingConv::ID CC) {
2315 return (CC == CallingConv::C || CC == CallingConv::X86_64_Win64 ||
2316 CC == CallingConv::X86_64_SysV);
2319 bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
2320 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
2324 CallingConv::ID CalleeCC = CS.getCallingConv();
2325 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
2331 /// Return true if the function is being made into
2332 /// a tailcall target by changing its ABI.
2333 static bool FuncIsMadeTailCallSafe(CallingConv::ID CC,
2334 bool GuaranteedTailCallOpt) {
2335 return GuaranteedTailCallOpt && IsTailCallConvention(CC);
2339 X86TargetLowering::LowerMemArgument(SDValue Chain,
2340 CallingConv::ID CallConv,
2341 const SmallVectorImpl<ISD::InputArg> &Ins,
2342 SDLoc dl, SelectionDAG &DAG,
2343 const CCValAssign &VA,
2344 MachineFrameInfo *MFI,
2346 // Create the nodes corresponding to a load from this parameter slot.
2347 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2348 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(
2349 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
2350 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
2353 // If value is passed by pointer we have address passed instead of the value
2355 if (VA.getLocInfo() == CCValAssign::Indirect)
2356 ValVT = VA.getLocVT();
2358 ValVT = VA.getValVT();
2360 // FIXME: For now, all byval parameter objects are marked mutable. This can be
2361 // changed with more analysis.
2362 // In case of tail call optimization mark all arguments mutable. Since they
2363 // could be overwritten by lowering of arguments in case of a tail call.
2364 if (Flags.isByVal()) {
2365 unsigned Bytes = Flags.getByValSize();
2366 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
2367 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable);
2368 return DAG.getFrameIndex(FI, getPointerTy());
2370 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
2371 VA.getLocMemOffset(), isImmutable);
2372 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2373 return DAG.getLoad(ValVT, dl, Chain, FIN,
2374 MachinePointerInfo::getFixedStack(FI),
2375 false, false, false, 0);
2379 // FIXME: Get this from tablegen.
2380 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
2381 const X86Subtarget *Subtarget) {
2382 assert(Subtarget->is64Bit());
2384 if (Subtarget->isCallingConvWin64(CallConv)) {
2385 static const MCPhysReg GPR64ArgRegsWin64[] = {
2386 X86::RCX, X86::RDX, X86::R8, X86::R9
2388 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
2391 static const MCPhysReg GPR64ArgRegs64Bit[] = {
2392 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
2394 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
2397 // FIXME: Get this from tablegen.
2398 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
2399 CallingConv::ID CallConv,
2400 const X86Subtarget *Subtarget) {
2401 assert(Subtarget->is64Bit());
2402 if (Subtarget->isCallingConvWin64(CallConv)) {
2403 // The XMM registers which might contain var arg parameters are shadowed
2404 // in their paired GPR. So we only need to save the GPR to their home
2406 // TODO: __vectorcall will change this.
2410 const Function *Fn = MF.getFunction();
2411 bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat);
2412 assert(!(MF.getTarget().Options.UseSoftFloat && NoImplicitFloatOps) &&
2413 "SSE register cannot be used when SSE is disabled!");
2414 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps ||
2415 !Subtarget->hasSSE1())
2416 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
2420 static const MCPhysReg XMMArgRegs64Bit[] = {
2421 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2422 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2424 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
2428 X86TargetLowering::LowerFormalArguments(SDValue Chain,
2429 CallingConv::ID CallConv,
2431 const SmallVectorImpl<ISD::InputArg> &Ins,
2434 SmallVectorImpl<SDValue> &InVals)
2436 MachineFunction &MF = DAG.getMachineFunction();
2437 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2439 const Function* Fn = MF.getFunction();
2440 if (Fn->hasExternalLinkage() &&
2441 Subtarget->isTargetCygMing() &&
2442 Fn->getName() == "main")
2443 FuncInfo->setForceFramePointer(true);
2445 MachineFrameInfo *MFI = MF.getFrameInfo();
2446 bool Is64Bit = Subtarget->is64Bit();
2447 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2449 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2450 "Var args not supported with calling convention fastcc, ghc or hipe");
2452 // Assign locations to all of the incoming arguments.
2453 SmallVector<CCValAssign, 16> ArgLocs;
2454 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2456 // Allocate shadow area for Win64
2458 CCInfo.AllocateStack(32, 8);
2460 CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
2462 unsigned LastVal = ~0U;
2464 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2465 CCValAssign &VA = ArgLocs[i];
2466 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
2468 assert(VA.getValNo() != LastVal &&
2469 "Don't support value assigned to multiple locs yet");
2471 LastVal = VA.getValNo();
2473 if (VA.isRegLoc()) {
2474 EVT RegVT = VA.getLocVT();
2475 const TargetRegisterClass *RC;
2476 if (RegVT == MVT::i32)
2477 RC = &X86::GR32RegClass;
2478 else if (Is64Bit && RegVT == MVT::i64)
2479 RC = &X86::GR64RegClass;
2480 else if (RegVT == MVT::f32)
2481 RC = &X86::FR32RegClass;
2482 else if (RegVT == MVT::f64)
2483 RC = &X86::FR64RegClass;
2484 else if (RegVT.is512BitVector())
2485 RC = &X86::VR512RegClass;
2486 else if (RegVT.is256BitVector())
2487 RC = &X86::VR256RegClass;
2488 else if (RegVT.is128BitVector())
2489 RC = &X86::VR128RegClass;
2490 else if (RegVT == MVT::x86mmx)
2491 RC = &X86::VR64RegClass;
2492 else if (RegVT == MVT::i1)
2493 RC = &X86::VK1RegClass;
2494 else if (RegVT == MVT::v8i1)
2495 RC = &X86::VK8RegClass;
2496 else if (RegVT == MVT::v16i1)
2497 RC = &X86::VK16RegClass;
2498 else if (RegVT == MVT::v32i1)
2499 RC = &X86::VK32RegClass;
2500 else if (RegVT == MVT::v64i1)
2501 RC = &X86::VK64RegClass;
2503 llvm_unreachable("Unknown argument type!");
2505 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2506 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2508 // If this is an 8 or 16-bit value, it is really passed promoted to 32
2509 // bits. Insert an assert[sz]ext to capture this, then truncate to the
2511 if (VA.getLocInfo() == CCValAssign::SExt)
2512 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2513 DAG.getValueType(VA.getValVT()));
2514 else if (VA.getLocInfo() == CCValAssign::ZExt)
2515 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2516 DAG.getValueType(VA.getValVT()));
2517 else if (VA.getLocInfo() == CCValAssign::BCvt)
2518 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
2520 if (VA.isExtInLoc()) {
2521 // Handle MMX values passed in XMM regs.
2522 if (RegVT.isVector())
2523 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
2525 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2528 assert(VA.isMemLoc());
2529 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i);
2532 // If value is passed via pointer - do a load.
2533 if (VA.getLocInfo() == CCValAssign::Indirect)
2534 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
2535 MachinePointerInfo(), false, false, false, 0);
2537 InVals.push_back(ArgValue);
2540 if (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) {
2541 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2542 // The x86-64 ABIs require that for returning structs by value we copy
2543 // the sret argument into %rax/%eax (depending on ABI) for the return.
2544 // Win32 requires us to put the sret argument to %eax as well.
2545 // Save the argument into a virtual register so that we can access it
2546 // from the return points.
2547 if (Ins[i].Flags.isSRet()) {
2548 unsigned Reg = FuncInfo->getSRetReturnReg();
2550 MVT PtrTy = getPointerTy();
2551 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
2552 FuncInfo->setSRetReturnReg(Reg);
2554 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[i]);
2555 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
2561 unsigned StackSize = CCInfo.getNextStackOffset();
2562 // Align stack specially for tail calls.
2563 if (FuncIsMadeTailCallSafe(CallConv,
2564 MF.getTarget().Options.GuaranteedTailCallOpt))
2565 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
2567 // If the function takes variable number of arguments, make a frame index for
2568 // the start of the first vararg value... for expansion of llvm.va_start. We
2569 // can skip this if there are no va_start calls.
2570 if (MFI->hasVAStart() &&
2571 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
2572 CallConv != CallingConv::X86_ThisCall))) {
2573 FuncInfo->setVarArgsFrameIndex(
2574 MFI->CreateFixedObject(1, StackSize, true));
2577 // Figure out if XMM registers are in use.
2578 assert(!(MF.getTarget().Options.UseSoftFloat &&
2579 Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
2580 "SSE register cannot be used when SSE is disabled!");
2582 // 64-bit calling conventions support varargs and register parameters, so we
2583 // have to do extra work to spill them in the prologue.
2584 if (Is64Bit && isVarArg && MFI->hasVAStart()) {
2585 // Find the first unallocated argument registers.
2586 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
2587 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
2588 unsigned NumIntRegs =
2589 CCInfo.getFirstUnallocated(ArgGPRs.data(), ArgGPRs.size());
2590 unsigned NumXMMRegs =
2591 CCInfo.getFirstUnallocated(ArgXMMs.data(), ArgXMMs.size());
2592 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
2593 "SSE register cannot be used when SSE is disabled!");
2595 // Gather all the live in physical registers.
2596 SmallVector<SDValue, 6> LiveGPRs;
2597 SmallVector<SDValue, 8> LiveXMMRegs;
2599 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
2600 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
2602 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
2604 if (!ArgXMMs.empty()) {
2605 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2606 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
2607 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
2608 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
2609 LiveXMMRegs.push_back(
2610 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
2615 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
2616 // Get to the caller-allocated home save location. Add 8 to account
2617 // for the return address.
2618 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
2619 FuncInfo->setRegSaveFrameIndex(
2620 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
2621 // Fixup to set vararg frame on shadow area (4 x i64).
2623 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
2625 // For X86-64, if there are vararg parameters that are passed via
2626 // registers, then we must store them to their spots on the stack so
2627 // they may be loaded by deferencing the result of va_next.
2628 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
2629 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
2630 FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
2631 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
2634 // Store the integer parameter registers.
2635 SmallVector<SDValue, 8> MemOps;
2636 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
2638 unsigned Offset = FuncInfo->getVarArgsGPOffset();
2639 for (SDValue Val : LiveGPRs) {
2640 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
2641 DAG.getIntPtrConstant(Offset));
2643 DAG.getStore(Val.getValue(1), dl, Val, FIN,
2644 MachinePointerInfo::getFixedStack(
2645 FuncInfo->getRegSaveFrameIndex(), Offset),
2647 MemOps.push_back(Store);
2651 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
2652 // Now store the XMM (fp + vector) parameter registers.
2653 SmallVector<SDValue, 12> SaveXMMOps;
2654 SaveXMMOps.push_back(Chain);
2655 SaveXMMOps.push_back(ALVal);
2656 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2657 FuncInfo->getRegSaveFrameIndex()));
2658 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2659 FuncInfo->getVarArgsFPOffset()));
2660 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
2662 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
2663 MVT::Other, SaveXMMOps));
2666 if (!MemOps.empty())
2667 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
2670 if (isVarArg && MFI->hasMustTailInVarArgFunc()) {
2671 // Find the largest legal vector type.
2672 MVT VecVT = MVT::Other;
2673 // FIXME: Only some x86_32 calling conventions support AVX512.
2674 if (Subtarget->hasAVX512() &&
2675 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
2676 CallConv == CallingConv::Intel_OCL_BI)))
2677 VecVT = MVT::v16f32;
2678 else if (Subtarget->hasAVX())
2680 else if (Subtarget->hasSSE2())
2683 // We forward some GPRs and some vector types.
2684 SmallVector<MVT, 2> RegParmTypes;
2685 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
2686 RegParmTypes.push_back(IntVT);
2687 if (VecVT != MVT::Other)
2688 RegParmTypes.push_back(VecVT);
2690 // Compute the set of forwarded registers. The rest are scratch.
2691 SmallVectorImpl<ForwardedRegister> &Forwards =
2692 FuncInfo->getForwardedMustTailRegParms();
2693 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
2695 // Conservatively forward AL on x86_64, since it might be used for varargs.
2696 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
2697 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2698 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
2701 // Copy all forwards from physical to virtual registers.
2702 for (ForwardedRegister &F : Forwards) {
2703 // FIXME: Can we use a less constrained schedule?
2704 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
2705 F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
2706 Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
2710 // Some CCs need callee pop.
2711 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
2712 MF.getTarget().Options.GuaranteedTailCallOpt)) {
2713 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
2715 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
2716 // If this is an sret function, the return should pop the hidden pointer.
2717 if (!Is64Bit && !IsTailCallConvention(CallConv) &&
2718 !Subtarget->getTargetTriple().isOSMSVCRT() &&
2719 argsAreStructReturn(Ins) == StackStructReturn)
2720 FuncInfo->setBytesToPopOnReturn(4);
2724 // RegSaveFrameIndex is X86-64 only.
2725 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
2726 if (CallConv == CallingConv::X86_FastCall ||
2727 CallConv == CallingConv::X86_ThisCall)
2728 // fastcc functions can't have varargs.
2729 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
2732 FuncInfo->setArgumentStackSize(StackSize);
2738 X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
2739 SDValue StackPtr, SDValue Arg,
2740 SDLoc dl, SelectionDAG &DAG,
2741 const CCValAssign &VA,
2742 ISD::ArgFlagsTy Flags) const {
2743 unsigned LocMemOffset = VA.getLocMemOffset();
2744 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
2745 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
2746 if (Flags.isByVal())
2747 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
2749 return DAG.getStore(Chain, dl, Arg, PtrOff,
2750 MachinePointerInfo::getStack(LocMemOffset),
2754 /// Emit a load of return address if tail call
2755 /// optimization is performed and it is required.
2757 X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
2758 SDValue &OutRetAddr, SDValue Chain,
2759 bool IsTailCall, bool Is64Bit,
2760 int FPDiff, SDLoc dl) const {
2761 // Adjust the Return address stack slot.
2762 EVT VT = getPointerTy();
2763 OutRetAddr = getReturnAddressFrameIndex(DAG);
2765 // Load the "old" Return address.
2766 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
2767 false, false, false, 0);
2768 return SDValue(OutRetAddr.getNode(), 1);
2771 /// Emit a store of the return address if tail call
2772 /// optimization is performed and it is required (FPDiff!=0).
2773 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
2774 SDValue Chain, SDValue RetAddrFrIdx,
2775 EVT PtrVT, unsigned SlotSize,
2776 int FPDiff, SDLoc dl) {
2777 // Store the return address to the appropriate stack slot.
2778 if (!FPDiff) return Chain;
2779 // Calculate the new stack slot for the return address.
2780 int NewReturnAddrFI =
2781 MF.getFrameInfo()->CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
2783 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
2784 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
2785 MachinePointerInfo::getFixedStack(NewReturnAddrFI),
2791 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2792 SmallVectorImpl<SDValue> &InVals) const {
2793 SelectionDAG &DAG = CLI.DAG;
2795 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2796 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2797 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2798 SDValue Chain = CLI.Chain;
2799 SDValue Callee = CLI.Callee;
2800 CallingConv::ID CallConv = CLI.CallConv;
2801 bool &isTailCall = CLI.IsTailCall;
2802 bool isVarArg = CLI.IsVarArg;
2804 MachineFunction &MF = DAG.getMachineFunction();
2805 bool Is64Bit = Subtarget->is64Bit();
2806 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2807 StructReturnType SR = callIsStructReturn(Outs);
2808 bool IsSibcall = false;
2809 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
2811 if (MF.getTarget().Options.DisableTailCalls)
2814 bool IsMustTail = CLI.CS && CLI.CS->isMustTailCall();
2816 // Force this to be a tail call. The verifier rules are enough to ensure
2817 // that we can lower this successfully without moving the return address
2820 } else if (isTailCall) {
2821 // Check if it's really possible to do a tail call.
2822 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
2823 isVarArg, SR != NotStructReturn,
2824 MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
2825 Outs, OutVals, Ins, DAG);
2827 // Sibcalls are automatically detected tailcalls which do not require
2829 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
2836 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2837 "Var args not supported with calling convention fastcc, ghc or hipe");
2839 // Analyze operands of the call, assigning locations to each operand.
2840 SmallVector<CCValAssign, 16> ArgLocs;
2841 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2843 // Allocate shadow area for Win64
2845 CCInfo.AllocateStack(32, 8);
2847 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
2849 // Get a count of how many bytes are to be pushed on the stack.
2850 unsigned NumBytes = CCInfo.getNextStackOffset();
2852 // This is a sibcall. The memory operands are available in caller's
2853 // own caller's stack.
2855 else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2856 IsTailCallConvention(CallConv))
2857 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
2860 if (isTailCall && !IsSibcall && !IsMustTail) {
2861 // Lower arguments at fp - stackoffset + fpdiff.
2862 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
2864 FPDiff = NumBytesCallerPushed - NumBytes;
2866 // Set the delta of movement of the returnaddr stackslot.
2867 // But only set if delta is greater than previous delta.
2868 if (FPDiff < X86Info->getTCReturnAddrDelta())
2869 X86Info->setTCReturnAddrDelta(FPDiff);
2872 unsigned NumBytesToPush = NumBytes;
2873 unsigned NumBytesToPop = NumBytes;
2875 // If we have an inalloca argument, all stack space has already been allocated
2876 // for us and be right at the top of the stack. We don't support multiple
2877 // arguments passed in memory when using inalloca.
2878 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
2880 if (!ArgLocs.back().isMemLoc())
2881 report_fatal_error("cannot use inalloca attribute on a register "
2883 if (ArgLocs.back().getLocMemOffset() != 0)
2884 report_fatal_error("any parameter with the inalloca attribute must be "
2885 "the only memory argument");
2889 Chain = DAG.getCALLSEQ_START(
2890 Chain, DAG.getIntPtrConstant(NumBytesToPush, true), dl);
2892 SDValue RetAddrFrIdx;
2893 // Load return address for tail calls.
2894 if (isTailCall && FPDiff)
2895 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2896 Is64Bit, FPDiff, dl);
2898 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2899 SmallVector<SDValue, 8> MemOpChains;
2902 // Walk the register/memloc assignments, inserting copies/loads. In the case
2903 // of tail call optimization arguments are handle later.
2904 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2905 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2906 // Skip inalloca arguments, they have already been written.
2907 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2908 if (Flags.isInAlloca())
2911 CCValAssign &VA = ArgLocs[i];
2912 EVT RegVT = VA.getLocVT();
2913 SDValue Arg = OutVals[i];
2914 bool isByVal = Flags.isByVal();
2916 // Promote the value if needed.
2917 switch (VA.getLocInfo()) {
2918 default: llvm_unreachable("Unknown loc info!");
2919 case CCValAssign::Full: break;
2920 case CCValAssign::SExt:
2921 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
2923 case CCValAssign::ZExt:
2924 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
2926 case CCValAssign::AExt:
2927 if (RegVT.is128BitVector()) {
2928 // Special case: passing MMX values in XMM registers.
2929 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
2930 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
2931 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
2933 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
2935 case CCValAssign::BCvt:
2936 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg);
2938 case CCValAssign::Indirect: {
2939 // Store the argument.
2940 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
2941 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2942 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot,
2943 MachinePointerInfo::getFixedStack(FI),
2950 if (VA.isRegLoc()) {
2951 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2952 if (isVarArg && IsWin64) {
2953 // Win64 ABI requires argument XMM reg to be copied to the corresponding
2954 // shadow reg if callee is a varargs function.
2955 unsigned ShadowReg = 0;
2956 switch (VA.getLocReg()) {
2957 case X86::XMM0: ShadowReg = X86::RCX; break;
2958 case X86::XMM1: ShadowReg = X86::RDX; break;
2959 case X86::XMM2: ShadowReg = X86::R8; break;
2960 case X86::XMM3: ShadowReg = X86::R9; break;
2963 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
2965 } else if (!IsSibcall && (!isTailCall || isByVal)) {
2966 assert(VA.isMemLoc());
2967 if (!StackPtr.getNode())
2968 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
2970 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2971 dl, DAG, VA, Flags));
2975 if (!MemOpChains.empty())
2976 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2978 if (Subtarget->isPICStyleGOT()) {
2979 // ELF / PIC requires GOT in the EBX register before function calls via PLT
2982 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX),
2983 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy())));
2985 // If we are tail calling and generating PIC/GOT style code load the
2986 // address of the callee into ECX. The value in ecx is used as target of
2987 // the tail jump. This is done to circumvent the ebx/callee-saved problem
2988 // for tail calls on PIC/GOT architectures. Normally we would just put the
2989 // address of GOT into ebx and then call target@PLT. But for tail calls
2990 // ebx would be restored (since ebx is callee saved) before jumping to the
2993 // Note: The actual moving to ECX is done further down.
2994 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
2995 if (G && !G->getGlobal()->hasHiddenVisibility() &&
2996 !G->getGlobal()->hasProtectedVisibility())
2997 Callee = LowerGlobalAddress(Callee, DAG);
2998 else if (isa<ExternalSymbolSDNode>(Callee))
2999 Callee = LowerExternalSymbol(Callee, DAG);
3003 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
3004 // From AMD64 ABI document:
3005 // For calls that may call functions that use varargs or stdargs
3006 // (prototype-less calls or calls to functions containing ellipsis (...) in
3007 // the declaration) %al is used as hidden argument to specify the number
3008 // of SSE registers used. The contents of %al do not need to match exactly
3009 // the number of registers, but must be an ubound on the number of SSE
3010 // registers used and is in the range 0 - 8 inclusive.
3012 // Count the number of XMM registers allocated.
3013 static const MCPhysReg XMMArgRegs[] = {
3014 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3015 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3017 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
3018 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3019 && "SSE registers cannot be used when SSE is disabled");
3021 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3022 DAG.getConstant(NumXMMRegs, MVT::i8)));
3025 if (isVarArg && IsMustTail) {
3026 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3027 for (const auto &F : Forwards) {
3028 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3029 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3033 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3034 // don't need this because the eligibility check rejects calls that require
3035 // shuffling arguments passed in memory.
3036 if (!IsSibcall && isTailCall) {
3037 // Force all the incoming stack arguments to be loaded from the stack
3038 // before any new outgoing arguments are stored to the stack, because the
3039 // outgoing stack slots may alias the incoming argument stack slots, and
3040 // the alias isn't otherwise explicit. This is slightly more conservative
3041 // than necessary, because it means that each store effectively depends
3042 // on every argument instead of just those arguments it would clobber.
3043 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3045 SmallVector<SDValue, 8> MemOpChains2;
3048 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3049 CCValAssign &VA = ArgLocs[i];
3052 assert(VA.isMemLoc());
3053 SDValue Arg = OutVals[i];
3054 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3055 // Skip inalloca arguments. They don't require any work.
3056 if (Flags.isInAlloca())
3058 // Create frame index.
3059 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3060 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3061 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
3062 FIN = DAG.getFrameIndex(FI, getPointerTy());
3064 if (Flags.isByVal()) {
3065 // Copy relative to framepointer.
3066 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
3067 if (!StackPtr.getNode())
3068 StackPtr = DAG.getCopyFromReg(Chain, dl,
3069 RegInfo->getStackRegister(),
3071 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
3073 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3077 // Store relative to framepointer.
3078 MemOpChains2.push_back(
3079 DAG.getStore(ArgChain, dl, Arg, FIN,
3080 MachinePointerInfo::getFixedStack(FI),
3085 if (!MemOpChains2.empty())
3086 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3088 // Store the return address to the appropriate stack slot.
3089 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3090 getPointerTy(), RegInfo->getSlotSize(),
3094 // Build a sequence of copy-to-reg nodes chained together with token chain
3095 // and flag operands which copy the outgoing args into registers.
3097 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3098 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3099 RegsToPass[i].second, InFlag);
3100 InFlag = Chain.getValue(1);
3103 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3104 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3105 // In the 64-bit large code model, we have to make all calls
3106 // through a register, since the call instruction's 32-bit
3107 // pc-relative offset may not be large enough to hold the whole
3109 } else if (Callee->getOpcode() == ISD::GlobalAddress) {
3110 // If the callee is a GlobalAddress node (quite common, every direct call
3111 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
3113 GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
3115 // We should use extra load for direct calls to dllimported functions in
3117 const GlobalValue *GV = G->getGlobal();
3118 if (!GV->hasDLLImportStorageClass()) {
3119 unsigned char OpFlags = 0;
3120 bool ExtraLoad = false;
3121 unsigned WrapperKind = ISD::DELETED_NODE;
3123 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
3124 // external symbols most go through the PLT in PIC mode. If the symbol
3125 // has hidden or protected visibility, or if it is static or local, then
3126 // we don't need to use the PLT - we can directly call it.
3127 if (Subtarget->isTargetELF() &&
3128 DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
3129 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
3130 OpFlags = X86II::MO_PLT;
3131 } else if (Subtarget->isPICStyleStubAny() &&
3132 (GV->isDeclaration() || GV->isWeakForLinker()) &&
3133 (!Subtarget->getTargetTriple().isMacOSX() ||
3134 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3135 // PC-relative references to external symbols should go through $stub,
3136 // unless we're building with the leopard linker or later, which
3137 // automatically synthesizes these stubs.
3138 OpFlags = X86II::MO_DARWIN_STUB;
3139 } else if (Subtarget->isPICStyleRIPRel() && isa<Function>(GV) &&
3140 cast<Function>(GV)->hasFnAttribute(Attribute::NonLazyBind)) {
3141 // If the function is marked as non-lazy, generate an indirect call
3142 // which loads from the GOT directly. This avoids runtime overhead
3143 // at the cost of eager binding (and one extra byte of encoding).
3144 OpFlags = X86II::MO_GOTPCREL;
3145 WrapperKind = X86ISD::WrapperRIP;
3149 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
3150 G->getOffset(), OpFlags);
3152 // Add a wrapper if needed.
3153 if (WrapperKind != ISD::DELETED_NODE)
3154 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee);
3155 // Add extra indirection if needed.
3157 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
3158 MachinePointerInfo::getGOT(),
3159 false, false, false, 0);
3161 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3162 unsigned char OpFlags = 0;
3164 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
3165 // external symbols should go through the PLT.
3166 if (Subtarget->isTargetELF() &&
3167 DAG.getTarget().getRelocationModel() == Reloc::PIC_) {
3168 OpFlags = X86II::MO_PLT;
3169 } else if (Subtarget->isPICStyleStubAny() &&
3170 (!Subtarget->getTargetTriple().isMacOSX() ||
3171 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3172 // PC-relative references to external symbols should go through $stub,
3173 // unless we're building with the leopard linker or later, which
3174 // automatically synthesizes these stubs.
3175 OpFlags = X86II::MO_DARWIN_STUB;
3178 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
3180 } else if (Subtarget->isTarget64BitILP32() &&
3181 Callee->getValueType(0) == MVT::i32) {
3182 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
3183 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
3186 // Returns a chain & a flag for retval copy to use.
3187 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3188 SmallVector<SDValue, 8> Ops;
3190 if (!IsSibcall && isTailCall) {
3191 Chain = DAG.getCALLSEQ_END(Chain,
3192 DAG.getIntPtrConstant(NumBytesToPop, true),
3193 DAG.getIntPtrConstant(0, true), InFlag, dl);
3194 InFlag = Chain.getValue(1);
3197 Ops.push_back(Chain);
3198 Ops.push_back(Callee);
3201 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
3203 // Add argument registers to the end of the list so that they are known live
3205 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3206 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3207 RegsToPass[i].second.getValueType()));
3209 // Add a register mask operand representing the call-preserved registers.
3210 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
3211 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
3212 assert(Mask && "Missing call preserved mask for calling convention");
3213 Ops.push_back(DAG.getRegisterMask(Mask));
3215 if (InFlag.getNode())
3216 Ops.push_back(InFlag);
3220 //// If this is the first return lowered for this function, add the regs
3221 //// to the liveout set for the function.
3222 // This isn't right, although it's probably harmless on x86; liveouts
3223 // should be computed from returns not tail calls. Consider a void
3224 // function making a tail call to a function returning int.
3225 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
3228 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
3229 InFlag = Chain.getValue(1);
3231 // Create the CALLSEQ_END node.
3232 unsigned NumBytesForCalleeToPop;
3233 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3234 DAG.getTarget().Options.GuaranteedTailCallOpt))
3235 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
3236 else if (!Is64Bit && !IsTailCallConvention(CallConv) &&
3237 !Subtarget->getTargetTriple().isOSMSVCRT() &&
3238 SR == StackStructReturn)
3239 // If this is a call to a struct-return function, the callee
3240 // pops the hidden struct pointer, so we have to push it back.
3241 // This is common for Darwin/X86, Linux & Mingw32 targets.
3242 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
3243 NumBytesForCalleeToPop = 4;
3245 NumBytesForCalleeToPop = 0; // Callee pops nothing.
3247 // Returns a flag for retval copy to use.
3249 Chain = DAG.getCALLSEQ_END(Chain,
3250 DAG.getIntPtrConstant(NumBytesToPop, true),
3251 DAG.getIntPtrConstant(NumBytesForCalleeToPop,
3254 InFlag = Chain.getValue(1);
3257 // Handle result values, copying them out of physregs into vregs that we
3259 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
3260 Ins, dl, DAG, InVals);
3263 //===----------------------------------------------------------------------===//
3264 // Fast Calling Convention (tail call) implementation
3265 //===----------------------------------------------------------------------===//
3267 // Like std call, callee cleans arguments, convention except that ECX is
3268 // reserved for storing the tail called function address. Only 2 registers are
3269 // free for argument passing (inreg). Tail call optimization is performed
3271 // * tailcallopt is enabled
3272 // * caller/callee are fastcc
3273 // On X86_64 architecture with GOT-style position independent code only local
3274 // (within module) calls are supported at the moment.
3275 // To keep the stack aligned according to platform abi the function
3276 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
3277 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
3278 // If a tail called function callee has more arguments than the caller the
3279 // caller needs to make sure that there is room to move the RETADDR to. This is
3280 // achieved by reserving an area the size of the argument delta right after the
3281 // original RETADDR, but before the saved framepointer or the spilled registers
3282 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
3294 /// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
3295 /// for a 16 byte align requirement.
3297 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
3298 SelectionDAG& DAG) const {
3299 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3300 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
3301 unsigned StackAlignment = TFI.getStackAlignment();
3302 uint64_t AlignMask = StackAlignment - 1;
3303 int64_t Offset = StackSize;
3304 unsigned SlotSize = RegInfo->getSlotSize();
3305 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
3306 // Number smaller than 12 so just add the difference.
3307 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
3309 // Mask out lower bits, add stackalignment once plus the 12 bytes.
3310 Offset = ((~AlignMask) & Offset) + StackAlignment +
3311 (StackAlignment-SlotSize);
3316 /// MatchingStackOffset - Return true if the given stack call argument is
3317 /// already available in the same position (relatively) of the caller's
3318 /// incoming argument stack.
3320 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
3321 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
3322 const X86InstrInfo *TII) {
3323 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
3325 if (Arg.getOpcode() == ISD::CopyFromReg) {
3326 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
3327 if (!TargetRegisterInfo::isVirtualRegister(VR))
3329 MachineInstr *Def = MRI->getVRegDef(VR);
3332 if (!Flags.isByVal()) {
3333 if (!TII->isLoadFromStackSlot(Def, FI))
3336 unsigned Opcode = Def->getOpcode();
3337 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
3338 Opcode == X86::LEA64_32r) &&
3339 Def->getOperand(1).isFI()) {
3340 FI = Def->getOperand(1).getIndex();
3341 Bytes = Flags.getByValSize();
3345 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
3346 if (Flags.isByVal())
3347 // ByVal argument is passed in as a pointer but it's now being
3348 // dereferenced. e.g.
3349 // define @foo(%struct.X* %A) {
3350 // tail call @bar(%struct.X* byval %A)
3353 SDValue Ptr = Ld->getBasePtr();
3354 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
3357 FI = FINode->getIndex();
3358 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
3359 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
3360 FI = FINode->getIndex();
3361 Bytes = Flags.getByValSize();
3365 assert(FI != INT_MAX);
3366 if (!MFI->isFixedObjectIndex(FI))
3368 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
3371 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
3372 /// for tail call optimization. Targets which want to do tail call
3373 /// optimization should implement this function.
3375 X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
3376 CallingConv::ID CalleeCC,
3378 bool isCalleeStructRet,
3379 bool isCallerStructRet,
3381 const SmallVectorImpl<ISD::OutputArg> &Outs,
3382 const SmallVectorImpl<SDValue> &OutVals,
3383 const SmallVectorImpl<ISD::InputArg> &Ins,
3384 SelectionDAG &DAG) const {
3385 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
3388 // If -tailcallopt is specified, make fastcc functions tail-callable.
3389 const MachineFunction &MF = DAG.getMachineFunction();
3390 const Function *CallerF = MF.getFunction();
3392 // If the function return type is x86_fp80 and the callee return type is not,
3393 // then the FP_EXTEND of the call result is not a nop. It's not safe to
3394 // perform a tailcall optimization here.
3395 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
3398 CallingConv::ID CallerCC = CallerF->getCallingConv();
3399 bool CCMatch = CallerCC == CalleeCC;
3400 bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
3401 bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
3403 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
3404 if (IsTailCallConvention(CalleeCC) && CCMatch)
3409 // Look for obvious safe cases to perform tail call optimization that do not
3410 // require ABI changes. This is what gcc calls sibcall.
3412 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
3413 // emit a special epilogue.
3414 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3415 if (RegInfo->needsStackRealignment(MF))
3418 // Also avoid sibcall optimization if either caller or callee uses struct
3419 // return semantics.
3420 if (isCalleeStructRet || isCallerStructRet)
3423 // An stdcall/thiscall caller is expected to clean up its arguments; the
3424 // callee isn't going to do that.
3425 // FIXME: this is more restrictive than needed. We could produce a tailcall
3426 // when the stack adjustment matches. For example, with a thiscall that takes
3427 // only one argument.
3428 if (!CCMatch && (CallerCC == CallingConv::X86_StdCall ||
3429 CallerCC == CallingConv::X86_ThisCall))
3432 // Do not sibcall optimize vararg calls unless all arguments are passed via
3434 if (isVarArg && !Outs.empty()) {
3436 // Optimizing for varargs on Win64 is unlikely to be safe without
3437 // additional testing.
3438 if (IsCalleeWin64 || IsCallerWin64)
3441 SmallVector<CCValAssign, 16> ArgLocs;
3442 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3445 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3446 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
3447 if (!ArgLocs[i].isRegLoc())
3451 // If the call result is in ST0 / ST1, it needs to be popped off the x87
3452 // stack. Therefore, if it's not used by the call it is not safe to optimize
3453 // this into a sibcall.
3454 bool Unused = false;
3455 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3462 SmallVector<CCValAssign, 16> RVLocs;
3463 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), RVLocs,
3465 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3466 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
3467 CCValAssign &VA = RVLocs[i];
3468 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
3473 // If the calling conventions do not match, then we'd better make sure the
3474 // results are returned in the same way as what the caller expects.
3476 SmallVector<CCValAssign, 16> RVLocs1;
3477 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
3479 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
3481 SmallVector<CCValAssign, 16> RVLocs2;
3482 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
3484 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
3486 if (RVLocs1.size() != RVLocs2.size())
3488 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
3489 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
3491 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
3493 if (RVLocs1[i].isRegLoc()) {
3494 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
3497 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
3503 // If the callee takes no arguments then go on to check the results of the
3505 if (!Outs.empty()) {
3506 // Check if stack adjustment is needed. For now, do not do this if any
3507 // argument is passed on the stack.
3508 SmallVector<CCValAssign, 16> ArgLocs;
3509 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3512 // Allocate shadow area for Win64
3514 CCInfo.AllocateStack(32, 8);
3516 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3517 if (CCInfo.getNextStackOffset()) {
3518 MachineFunction &MF = DAG.getMachineFunction();
3519 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn())
3522 // Check if the arguments are already laid out in the right way as
3523 // the caller's fixed stack objects.
3524 MachineFrameInfo *MFI = MF.getFrameInfo();
3525 const MachineRegisterInfo *MRI = &MF.getRegInfo();
3526 const X86InstrInfo *TII = Subtarget->getInstrInfo();
3527 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3528 CCValAssign &VA = ArgLocs[i];
3529 SDValue Arg = OutVals[i];
3530 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3531 if (VA.getLocInfo() == CCValAssign::Indirect)
3533 if (!VA.isRegLoc()) {
3534 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
3541 // If the tailcall address may be in a register, then make sure it's
3542 // possible to register allocate for it. In 32-bit, the call address can
3543 // only target EAX, EDX, or ECX since the tail call must be scheduled after
3544 // callee-saved registers are restored. These happen to be the same
3545 // registers used to pass 'inreg' arguments so watch out for those.
3546 if (!Subtarget->is64Bit() &&
3547 ((!isa<GlobalAddressSDNode>(Callee) &&
3548 !isa<ExternalSymbolSDNode>(Callee)) ||
3549 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
3550 unsigned NumInRegs = 0;
3551 // In PIC we need an extra register to formulate the address computation
3553 unsigned MaxInRegs =
3554 (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
3556 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3557 CCValAssign &VA = ArgLocs[i];
3560 unsigned Reg = VA.getLocReg();
3563 case X86::EAX: case X86::EDX: case X86::ECX:
3564 if (++NumInRegs == MaxInRegs)
3576 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
3577 const TargetLibraryInfo *libInfo) const {
3578 return X86::createFastISel(funcInfo, libInfo);
3581 //===----------------------------------------------------------------------===//
3582 // Other Lowering Hooks
3583 //===----------------------------------------------------------------------===//
3585 static bool MayFoldLoad(SDValue Op) {
3586 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
3589 static bool MayFoldIntoStore(SDValue Op) {
3590 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
3593 static bool isTargetShuffle(unsigned Opcode) {
3595 default: return false;
3596 case X86ISD::BLENDI:
3597 case X86ISD::PSHUFB:
3598 case X86ISD::PSHUFD:
3599 case X86ISD::PSHUFHW:
3600 case X86ISD::PSHUFLW:
3602 case X86ISD::PALIGNR:
3603 case X86ISD::MOVLHPS:
3604 case X86ISD::MOVLHPD:
3605 case X86ISD::MOVHLPS:
3606 case X86ISD::MOVLPS:
3607 case X86ISD::MOVLPD:
3608 case X86ISD::MOVSHDUP:
3609 case X86ISD::MOVSLDUP:
3610 case X86ISD::MOVDDUP:
3613 case X86ISD::UNPCKL:
3614 case X86ISD::UNPCKH:
3615 case X86ISD::VPERMILPI:
3616 case X86ISD::VPERM2X128:
3617 case X86ISD::VPERMI:
3622 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3623 SDValue V1, SelectionDAG &DAG) {
3625 default: llvm_unreachable("Unknown x86 shuffle node");
3626 case X86ISD::MOVSHDUP:
3627 case X86ISD::MOVSLDUP:
3628 case X86ISD::MOVDDUP:
3629 return DAG.getNode(Opc, dl, VT, V1);
3633 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3634 SDValue V1, unsigned TargetMask,
3635 SelectionDAG &DAG) {
3637 default: llvm_unreachable("Unknown x86 shuffle node");
3638 case X86ISD::PSHUFD:
3639 case X86ISD::PSHUFHW:
3640 case X86ISD::PSHUFLW:
3641 case X86ISD::VPERMILPI:
3642 case X86ISD::VPERMI:
3643 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8));
3647 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3648 SDValue V1, SDValue V2, unsigned TargetMask,
3649 SelectionDAG &DAG) {
3651 default: llvm_unreachable("Unknown x86 shuffle node");
3652 case X86ISD::PALIGNR:
3653 case X86ISD::VALIGN:
3655 case X86ISD::VPERM2X128:
3656 return DAG.getNode(Opc, dl, VT, V1, V2,
3657 DAG.getConstant(TargetMask, MVT::i8));
3661 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3662 SDValue V1, SDValue V2, SelectionDAG &DAG) {
3664 default: llvm_unreachable("Unknown x86 shuffle node");
3665 case X86ISD::MOVLHPS:
3666 case X86ISD::MOVLHPD:
3667 case X86ISD::MOVHLPS:
3668 case X86ISD::MOVLPS:
3669 case X86ISD::MOVLPD:
3672 case X86ISD::UNPCKL:
3673 case X86ISD::UNPCKH:
3674 return DAG.getNode(Opc, dl, VT, V1, V2);
3678 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
3679 MachineFunction &MF = DAG.getMachineFunction();
3680 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3681 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3682 int ReturnAddrIndex = FuncInfo->getRAIndex();
3684 if (ReturnAddrIndex == 0) {
3685 // Set up a frame object for the return address.
3686 unsigned SlotSize = RegInfo->getSlotSize();
3687 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize,
3690 FuncInfo->setRAIndex(ReturnAddrIndex);
3693 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
3696 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
3697 bool hasSymbolicDisplacement) {
3698 // Offset should fit into 32 bit immediate field.
3699 if (!isInt<32>(Offset))
3702 // If we don't have a symbolic displacement - we don't have any extra
3704 if (!hasSymbolicDisplacement)
3707 // FIXME: Some tweaks might be needed for medium code model.
3708 if (M != CodeModel::Small && M != CodeModel::Kernel)
3711 // For small code model we assume that latest object is 16MB before end of 31
3712 // bits boundary. We may also accept pretty large negative constants knowing
3713 // that all objects are in the positive half of address space.
3714 if (M == CodeModel::Small && Offset < 16*1024*1024)
3717 // For kernel code model we know that all object resist in the negative half
3718 // of 32bits address space. We may not accept negative offsets, since they may
3719 // be just off and we may accept pretty large positive ones.
3720 if (M == CodeModel::Kernel && Offset >= 0)
3726 /// isCalleePop - Determines whether the callee is required to pop its
3727 /// own arguments. Callee pop is necessary to support tail calls.
3728 bool X86::isCalleePop(CallingConv::ID CallingConv,
3729 bool is64Bit, bool IsVarArg, bool TailCallOpt) {
3730 switch (CallingConv) {
3733 case CallingConv::X86_StdCall:
3734 case CallingConv::X86_FastCall:
3735 case CallingConv::X86_ThisCall:
3737 case CallingConv::Fast:
3738 case CallingConv::GHC:
3739 case CallingConv::HiPE:
3746 /// \brief Return true if the condition is an unsigned comparison operation.
3747 static bool isX86CCUnsigned(unsigned X86CC) {
3749 default: llvm_unreachable("Invalid integer condition!");
3750 case X86::COND_E: return true;
3751 case X86::COND_G: return false;
3752 case X86::COND_GE: return false;
3753 case X86::COND_L: return false;
3754 case X86::COND_LE: return false;
3755 case X86::COND_NE: return true;
3756 case X86::COND_B: return true;
3757 case X86::COND_A: return true;
3758 case X86::COND_BE: return true;
3759 case X86::COND_AE: return true;
3761 llvm_unreachable("covered switch fell through?!");
3764 /// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
3765 /// specific condition code, returning the condition code and the LHS/RHS of the
3766 /// comparison to make.
3767 static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
3768 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
3770 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
3771 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
3772 // X > -1 -> X == 0, jump !sign.
3773 RHS = DAG.getConstant(0, RHS.getValueType());
3774 return X86::COND_NS;
3776 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
3777 // X < 0 -> X == 0, jump on sign.
3780 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
3782 RHS = DAG.getConstant(0, RHS.getValueType());
3783 return X86::COND_LE;
3787 switch (SetCCOpcode) {
3788 default: llvm_unreachable("Invalid integer condition!");
3789 case ISD::SETEQ: return X86::COND_E;
3790 case ISD::SETGT: return X86::COND_G;
3791 case ISD::SETGE: return X86::COND_GE;
3792 case ISD::SETLT: return X86::COND_L;
3793 case ISD::SETLE: return X86::COND_LE;
3794 case ISD::SETNE: return X86::COND_NE;
3795 case ISD::SETULT: return X86::COND_B;
3796 case ISD::SETUGT: return X86::COND_A;
3797 case ISD::SETULE: return X86::COND_BE;
3798 case ISD::SETUGE: return X86::COND_AE;
3802 // First determine if it is required or is profitable to flip the operands.
3804 // If LHS is a foldable load, but RHS is not, flip the condition.
3805 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
3806 !ISD::isNON_EXTLoad(RHS.getNode())) {
3807 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
3808 std::swap(LHS, RHS);
3811 switch (SetCCOpcode) {
3817 std::swap(LHS, RHS);
3821 // On a floating point condition, the flags are set as follows:
3823 // 0 | 0 | 0 | X > Y
3824 // 0 | 0 | 1 | X < Y
3825 // 1 | 0 | 0 | X == Y
3826 // 1 | 1 | 1 | unordered
3827 switch (SetCCOpcode) {
3828 default: llvm_unreachable("Condcode should be pre-legalized away");
3830 case ISD::SETEQ: return X86::COND_E;
3831 case ISD::SETOLT: // flipped
3833 case ISD::SETGT: return X86::COND_A;
3834 case ISD::SETOLE: // flipped
3836 case ISD::SETGE: return X86::COND_AE;
3837 case ISD::SETUGT: // flipped
3839 case ISD::SETLT: return X86::COND_B;
3840 case ISD::SETUGE: // flipped
3842 case ISD::SETLE: return X86::COND_BE;
3844 case ISD::SETNE: return X86::COND_NE;
3845 case ISD::SETUO: return X86::COND_P;
3846 case ISD::SETO: return X86::COND_NP;
3848 case ISD::SETUNE: return X86::COND_INVALID;
3852 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
3853 /// code. Current x86 isa includes the following FP cmov instructions:
3854 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
3855 static bool hasFPCMov(unsigned X86CC) {
3871 /// isFPImmLegal - Returns true if the target can instruction select the
3872 /// specified FP immediate natively. If false, the legalizer will
3873 /// materialize the FP immediate as a load from a constant pool.
3874 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
3875 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
3876 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
3882 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
3883 ISD::LoadExtType ExtTy,
3885 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
3886 // relocation target a movq or addq instruction: don't let the load shrink.
3887 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
3888 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
3889 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
3890 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
3894 /// \brief Returns true if it is beneficial to convert a load of a constant
3895 /// to just the constant itself.
3896 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
3898 assert(Ty->isIntegerTy());
3900 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3901 if (BitSize == 0 || BitSize > 64)
3906 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
3907 unsigned Index) const {
3908 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
3911 return (Index == 0 || Index == ResVT.getVectorNumElements());
3914 bool X86TargetLowering::isCheapToSpeculateCttz() const {
3915 // Speculate cttz only if we can directly use TZCNT.
3916 return Subtarget->hasBMI();
3919 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
3920 // Speculate ctlz only if we can directly use LZCNT.
3921 return Subtarget->hasLZCNT();
3924 /// isUndefOrInRange - Return true if Val is undef or if its value falls within
3925 /// the specified range (L, H].
3926 static bool isUndefOrInRange(int Val, int Low, int Hi) {
3927 return (Val < 0) || (Val >= Low && Val < Hi);
3930 /// isUndefOrEqual - Val is either less than zero (undef) or equal to the
3931 /// specified value.
3932 static bool isUndefOrEqual(int Val, int CmpVal) {
3933 return (Val < 0 || Val == CmpVal);
3936 /// isSequentialOrUndefInRange - Return true if every element in Mask, beginning
3937 /// from position Pos and ending in Pos+Size, falls within the specified
3938 /// sequential range (Low, Low+Size]. or is undef.
3939 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
3940 unsigned Pos, unsigned Size, int Low) {
3941 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
3942 if (!isUndefOrEqual(Mask[i], Low))
3947 /// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
3948 /// is suitable for input to PSHUFD. That is, it doesn't reference the other
3949 /// operand - by default will match for first operand.
3950 static bool isPSHUFDMask(ArrayRef<int> Mask, MVT VT,
3951 bool TestSecondOperand = false) {
3952 if (VT != MVT::v4f32 && VT != MVT::v4i32 &&
3953 VT != MVT::v2f64 && VT != MVT::v2i64)
3956 unsigned NumElems = VT.getVectorNumElements();
3957 unsigned Lo = TestSecondOperand ? NumElems : 0;
3958 unsigned Hi = Lo + NumElems;
3960 for (unsigned i = 0; i < NumElems; ++i)
3961 if (!isUndefOrInRange(Mask[i], (int)Lo, (int)Hi))
3967 /// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
3968 /// is suitable for input to PSHUFHW.
3969 static bool isPSHUFHWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3970 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3973 // Lower quadword copied in order or undef.
3974 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0))
3977 // Upper quadword shuffled.
3978 for (unsigned i = 4; i != 8; ++i)
3979 if (!isUndefOrInRange(Mask[i], 4, 8))
3982 if (VT == MVT::v16i16) {
3983 // Lower quadword copied in order or undef.
3984 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8))
3987 // Upper quadword shuffled.
3988 for (unsigned i = 12; i != 16; ++i)
3989 if (!isUndefOrInRange(Mask[i], 12, 16))
3996 /// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
3997 /// is suitable for input to PSHUFLW.
3998 static bool isPSHUFLWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3999 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
4002 // Upper quadword copied in order.
4003 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4))
4006 // Lower quadword shuffled.
4007 for (unsigned i = 0; i != 4; ++i)
4008 if (!isUndefOrInRange(Mask[i], 0, 4))
4011 if (VT == MVT::v16i16) {
4012 // Upper quadword copied in order.
4013 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12))
4016 // Lower quadword shuffled.
4017 for (unsigned i = 8; i != 12; ++i)
4018 if (!isUndefOrInRange(Mask[i], 8, 12))
4025 /// \brief Return true if the mask specifies a shuffle of elements that is
4026 /// suitable for input to intralane (palignr) or interlane (valign) vector
4028 static bool isAlignrMask(ArrayRef<int> Mask, MVT VT, bool InterLane) {
4029 unsigned NumElts = VT.getVectorNumElements();
4030 unsigned NumLanes = InterLane ? 1: VT.getSizeInBits()/128;
4031 unsigned NumLaneElts = NumElts/NumLanes;
4033 // Do not handle 64-bit element shuffles with palignr.
4034 if (NumLaneElts == 2)
4037 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) {
4039 for (i = 0; i != NumLaneElts; ++i) {
4044 // Lane is all undef, go to next lane
4045 if (i == NumLaneElts)
4048 int Start = Mask[i+l];
4050 // Make sure its in this lane in one of the sources
4051 if (!isUndefOrInRange(Start, l, l+NumLaneElts) &&
4052 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts))
4055 // If not lane 0, then we must match lane 0
4056 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l))
4059 // Correct second source to be contiguous with first source
4060 if (Start >= (int)NumElts)
4061 Start -= NumElts - NumLaneElts;
4063 // Make sure we're shifting in the right direction.
4064 if (Start <= (int)(i+l))
4069 // Check the rest of the elements to see if they are consecutive.
4070 for (++i; i != NumLaneElts; ++i) {
4071 int Idx = Mask[i+l];
4073 // Make sure its in this lane
4074 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) &&
4075 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts))
4078 // If not lane 0, then we must match lane 0
4079 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l))
4082 if (Idx >= (int)NumElts)
4083 Idx -= NumElts - NumLaneElts;
4085 if (!isUndefOrEqual(Idx, Start+i))
4094 /// \brief Return true if the node specifies a shuffle of elements that is
4095 /// suitable for input to PALIGNR.
4096 static bool isPALIGNRMask(ArrayRef<int> Mask, MVT VT,
4097 const X86Subtarget *Subtarget) {
4098 if ((VT.is128BitVector() && !Subtarget->hasSSSE3()) ||
4099 (VT.is256BitVector() && !Subtarget->hasInt256()) ||
4100 VT.is512BitVector())
4101 // FIXME: Add AVX512BW.
4104 return isAlignrMask(Mask, VT, false);
4107 /// \brief Return true if the node specifies a shuffle of elements that is
4108 /// suitable for input to VALIGN.
4109 static bool isVALIGNMask(ArrayRef<int> Mask, MVT VT,
4110 const X86Subtarget *Subtarget) {
4111 // FIXME: Add AVX512VL.
4112 if (!VT.is512BitVector() || !Subtarget->hasAVX512())
4114 return isAlignrMask(Mask, VT, true);
4117 /// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
4118 /// the two vector operands have swapped position.
4119 static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask,
4120 unsigned NumElems) {
4121 for (unsigned i = 0; i != NumElems; ++i) {
4125 else if (idx < (int)NumElems)
4126 Mask[i] = idx + NumElems;
4128 Mask[i] = idx - NumElems;
4132 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
4133 /// specifies a shuffle of elements that is suitable for input to 128/256-bit
4134 /// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be
4135 /// reverse of what x86 shuffles want.
4136 static bool isSHUFPMask(ArrayRef<int> Mask, MVT VT, bool Commuted = false) {
4138 unsigned NumElems = VT.getVectorNumElements();
4139 unsigned NumLanes = VT.getSizeInBits()/128;
4140 unsigned NumLaneElems = NumElems/NumLanes;
4142 if (NumLaneElems != 2 && NumLaneElems != 4)
4145 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4146 bool symmetricMaskRequired =
4147 (VT.getSizeInBits() >= 256) && (EltSize == 32);
4149 // VSHUFPSY divides the resulting vector into 4 chunks.
4150 // The sources are also splitted into 4 chunks, and each destination
4151 // chunk must come from a different source chunk.
4153 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0
4154 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9
4156 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4,
4157 // Y3..Y0, Y3..Y0, X3..X0, X3..X0
4159 // VSHUFPDY divides the resulting vector into 4 chunks.
4160 // The sources are also splitted into 4 chunks, and each destination
4161 // chunk must come from a different source chunk.
4163 // SRC1 => X3 X2 X1 X0
4164 // SRC2 => Y3 Y2 Y1 Y0
4166 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0
4168 SmallVector<int, 4> MaskVal(NumLaneElems, -1);
4169 unsigned HalfLaneElems = NumLaneElems/2;
4170 for (unsigned l = 0; l != NumElems; l += NumLaneElems) {
4171 for (unsigned i = 0; i != NumLaneElems; ++i) {
4172 int Idx = Mask[i+l];
4173 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0);
4174 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems))
4176 // For VSHUFPSY, the mask of the second half must be the same as the
4177 // first but with the appropriate offsets. This works in the same way as
4178 // VPERMILPS works with masks.
4179 if (!symmetricMaskRequired || Idx < 0)
4181 if (MaskVal[i] < 0) {
4182 MaskVal[i] = Idx - l;
4185 if ((signed)(Idx - l) != MaskVal[i])
4193 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
4194 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
4195 static bool isMOVHLPSMask(ArrayRef<int> Mask, MVT VT) {
4196 if (!VT.is128BitVector())
4199 unsigned NumElems = VT.getVectorNumElements();
4204 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
4205 return isUndefOrEqual(Mask[0], 6) &&
4206 isUndefOrEqual(Mask[1], 7) &&
4207 isUndefOrEqual(Mask[2], 2) &&
4208 isUndefOrEqual(Mask[3], 3);
4211 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
4212 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
4214 static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, MVT VT) {
4215 if (!VT.is128BitVector())
4218 unsigned NumElems = VT.getVectorNumElements();
4223 return isUndefOrEqual(Mask[0], 2) &&
4224 isUndefOrEqual(Mask[1], 3) &&
4225 isUndefOrEqual(Mask[2], 2) &&
4226 isUndefOrEqual(Mask[3], 3);
4229 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
4230 /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
4231 static bool isMOVLPMask(ArrayRef<int> Mask, MVT VT) {
4232 if (!VT.is128BitVector())
4235 unsigned NumElems = VT.getVectorNumElements();
4237 if (NumElems != 2 && NumElems != 4)
4240 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4241 if (!isUndefOrEqual(Mask[i], i + NumElems))
4244 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
4245 if (!isUndefOrEqual(Mask[i], i))
4251 /// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
4252 /// specifies a shuffle of elements that is suitable for input to MOVLHPS.
4253 static bool isMOVLHPSMask(ArrayRef<int> Mask, MVT VT) {
4254 if (!VT.is128BitVector())
4257 unsigned NumElems = VT.getVectorNumElements();
4259 if (NumElems != 2 && NumElems != 4)
4262 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4263 if (!isUndefOrEqual(Mask[i], i))
4266 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4267 if (!isUndefOrEqual(Mask[i + e], i + NumElems))
4273 /// isINSERTPSMask - Return true if the specified VECTOR_SHUFFLE operand
4274 /// specifies a shuffle of elements that is suitable for input to INSERTPS.
4275 /// i. e: If all but one element come from the same vector.
4276 static bool isINSERTPSMask(ArrayRef<int> Mask, MVT VT) {
4277 // TODO: Deal with AVX's VINSERTPS
4278 if (!VT.is128BitVector() || (VT != MVT::v4f32 && VT != MVT::v4i32))
4281 unsigned CorrectPosV1 = 0;
4282 unsigned CorrectPosV2 = 0;
4283 for (int i = 0, e = (int)VT.getVectorNumElements(); i != e; ++i) {
4284 if (Mask[i] == -1) {
4292 else if (Mask[i] == i + 4)
4296 if (CorrectPosV1 == 3 || CorrectPosV2 == 3)
4297 // We have 3 elements (undefs count as elements from any vector) from one
4298 // vector, and one from another.
4305 // Some special combinations that can be optimized.
4308 SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp,
4309 SelectionDAG &DAG) {
4310 MVT VT = SVOp->getSimpleValueType(0);
4313 if (VT != MVT::v8i32 && VT != MVT::v8f32)
4316 ArrayRef<int> Mask = SVOp->getMask();
4318 // These are the special masks that may be optimized.
4319 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14};
4320 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15};
4321 bool MatchEvenMask = true;
4322 bool MatchOddMask = true;
4323 for (int i=0; i<8; ++i) {
4324 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i]))
4325 MatchEvenMask = false;
4326 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i]))
4327 MatchOddMask = false;
4330 if (!MatchEvenMask && !MatchOddMask)
4333 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT);
4335 SDValue Op0 = SVOp->getOperand(0);
4336 SDValue Op1 = SVOp->getOperand(1);
4338 if (MatchEvenMask) {
4339 // Shift the second operand right to 32 bits.
4340 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 };
4341 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask);
4343 // Shift the first operand left to 32 bits.
4344 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 };
4345 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask);
4347 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15};
4348 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask);
4351 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
4352 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
4353 static bool isUNPCKLMask(ArrayRef<int> Mask, MVT VT,
4354 bool HasInt256, bool V2IsSplat = false) {
4356 assert(VT.getSizeInBits() >= 128 &&
4357 "Unsupported vector type for unpckl");
4359 unsigned NumElts = VT.getVectorNumElements();
4360 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4361 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4364 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4365 "Unsupported vector type for unpckh");
4367 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4368 unsigned NumLanes = VT.getSizeInBits()/128;
4369 unsigned NumLaneElts = NumElts/NumLanes;
4371 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4372 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4373 int BitI = Mask[l+i];
4374 int BitI1 = Mask[l+i+1];
4375 if (!isUndefOrEqual(BitI, j))
4378 if (!isUndefOrEqual(BitI1, NumElts))
4381 if (!isUndefOrEqual(BitI1, j + NumElts))
4390 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
4391 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
4392 static bool isUNPCKHMask(ArrayRef<int> Mask, MVT VT,
4393 bool HasInt256, bool V2IsSplat = false) {
4394 assert(VT.getSizeInBits() >= 128 &&
4395 "Unsupported vector type for unpckh");
4397 unsigned NumElts = VT.getVectorNumElements();
4398 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4399 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4402 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4403 "Unsupported vector type for unpckh");
4405 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4406 unsigned NumLanes = VT.getSizeInBits()/128;
4407 unsigned NumLaneElts = NumElts/NumLanes;
4409 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4410 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4411 int BitI = Mask[l+i];
4412 int BitI1 = Mask[l+i+1];
4413 if (!isUndefOrEqual(BitI, j))
4416 if (isUndefOrEqual(BitI1, NumElts))
4419 if (!isUndefOrEqual(BitI1, j+NumElts))
4427 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
4428 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
4430 static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4431 unsigned NumElts = VT.getVectorNumElements();
4432 bool Is256BitVec = VT.is256BitVector();
4434 if (VT.is512BitVector())
4436 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4437 "Unsupported vector type for unpckh");
4439 if (Is256BitVec && NumElts != 4 && NumElts != 8 &&
4440 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4443 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern
4444 // FIXME: Need a better way to get rid of this, there's no latency difference
4445 // between UNPCKLPD and MOVDDUP, the later should always be checked first and
4446 // the former later. We should also remove the "_undef" special mask.
4447 if (NumElts == 4 && Is256BitVec)
4450 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4451 // independently on 128-bit lanes.
4452 unsigned NumLanes = VT.getSizeInBits()/128;
4453 unsigned NumLaneElts = NumElts/NumLanes;
4455 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4456 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4457 int BitI = Mask[l+i];
4458 int BitI1 = Mask[l+i+1];
4460 if (!isUndefOrEqual(BitI, j))
4462 if (!isUndefOrEqual(BitI1, j))
4470 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
4471 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
4473 static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4474 unsigned NumElts = VT.getVectorNumElements();
4476 if (VT.is512BitVector())
4479 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4480 "Unsupported vector type for unpckh");
4482 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4483 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4486 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4487 // independently on 128-bit lanes.
4488 unsigned NumLanes = VT.getSizeInBits()/128;
4489 unsigned NumLaneElts = NumElts/NumLanes;
4491 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4492 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4493 int BitI = Mask[l+i];
4494 int BitI1 = Mask[l+i+1];
4495 if (!isUndefOrEqual(BitI, j))
4497 if (!isUndefOrEqual(BitI1, j))
4504 // Match for INSERTI64x4 INSERTF64x4 instructions (src0[0], src1[0]) or
4505 // (src1[0], src0[1]), manipulation with 256-bit sub-vectors
4506 static bool isINSERT64x4Mask(ArrayRef<int> Mask, MVT VT, unsigned int *Imm) {
4507 if (!VT.is512BitVector())
4510 unsigned NumElts = VT.getVectorNumElements();
4511 unsigned HalfSize = NumElts/2;
4512 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, 0)) {
4513 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, NumElts)) {
4518 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, NumElts)) {
4519 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, HalfSize)) {
4527 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
4528 /// specifies a shuffle of elements that is suitable for input to MOVSS,
4529 /// MOVSD, and MOVD, i.e. setting the lowest element.
4530 static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) {
4531 if (VT.getVectorElementType().getSizeInBits() < 32)
4533 if (!VT.is128BitVector())
4536 unsigned NumElts = VT.getVectorNumElements();
4538 if (!isUndefOrEqual(Mask[0], NumElts))
4541 for (unsigned i = 1; i != NumElts; ++i)
4542 if (!isUndefOrEqual(Mask[i], i))
4548 /// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered
4549 /// as permutations between 128-bit chunks or halves. As an example: this
4551 /// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15>
4552 /// The first half comes from the second half of V1 and the second half from the
4553 /// the second half of V2.
4554 static bool isVPERM2X128Mask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4555 if (!HasFp256 || !VT.is256BitVector())
4558 // The shuffle result is divided into half A and half B. In total the two
4559 // sources have 4 halves, namely: C, D, E, F. The final values of A and
4560 // B must come from C, D, E or F.
4561 unsigned HalfSize = VT.getVectorNumElements()/2;
4562 bool MatchA = false, MatchB = false;
4564 // Check if A comes from one of C, D, E, F.
4565 for (unsigned Half = 0; Half != 4; ++Half) {
4566 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) {
4572 // Check if B comes from one of C, D, E, F.
4573 for (unsigned Half = 0; Half != 4; ++Half) {
4574 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) {
4580 return MatchA && MatchB;
4583 /// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle
4584 /// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions.
4585 static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) {
4586 MVT VT = SVOp->getSimpleValueType(0);
4588 unsigned HalfSize = VT.getVectorNumElements()/2;
4590 unsigned FstHalf = 0, SndHalf = 0;
4591 for (unsigned i = 0; i < HalfSize; ++i) {
4592 if (SVOp->getMaskElt(i) > 0) {
4593 FstHalf = SVOp->getMaskElt(i)/HalfSize;
4597 for (unsigned i = HalfSize; i < HalfSize*2; ++i) {
4598 if (SVOp->getMaskElt(i) > 0) {
4599 SndHalf = SVOp->getMaskElt(i)/HalfSize;
4604 return (FstHalf | (SndHalf << 4));
4607 // Symmetric in-lane mask. Each lane has 4 elements (for imm8)
4608 static bool isPermImmMask(ArrayRef<int> Mask, MVT VT, unsigned& Imm8) {
4609 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4613 unsigned NumElts = VT.getVectorNumElements();
4615 if (VT.is128BitVector() || (VT.is256BitVector() && EltSize == 64)) {
4616 for (unsigned i = 0; i != NumElts; ++i) {
4619 Imm8 |= Mask[i] << (i*2);
4624 unsigned LaneSize = 4;
4625 SmallVector<int, 4> MaskVal(LaneSize, -1);
4627 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4628 for (unsigned i = 0; i != LaneSize; ++i) {
4629 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4633 if (MaskVal[i] < 0) {
4634 MaskVal[i] = Mask[i+l] - l;
4635 Imm8 |= MaskVal[i] << (i*2);
4638 if (Mask[i+l] != (signed)(MaskVal[i]+l))
4645 /// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand
4646 /// specifies a shuffle of elements that is suitable for input to VPERMILPD*.
4647 /// Note that VPERMIL mask matching is different depending whether theunderlying
4648 /// type is 32 or 64. In the VPERMILPS the high half of the mask should point
4649 /// to the same elements of the low, but to the higher half of the source.
4650 /// In VPERMILPD the two lanes could be shuffled independently of each other
4651 /// with the same restriction that lanes can't be crossed. Also handles PSHUFDY.
4652 static bool isVPERMILPMask(ArrayRef<int> Mask, MVT VT) {
4653 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4654 if (VT.getSizeInBits() < 256 || EltSize < 32)
4656 bool symmetricMaskRequired = (EltSize == 32);
4657 unsigned NumElts = VT.getVectorNumElements();
4659 unsigned NumLanes = VT.getSizeInBits()/128;
4660 unsigned LaneSize = NumElts/NumLanes;
4661 // 2 or 4 elements in one lane
4663 SmallVector<int, 4> ExpectedMaskVal(LaneSize, -1);
4664 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4665 for (unsigned i = 0; i != LaneSize; ++i) {
4666 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4668 if (symmetricMaskRequired) {
4669 if (ExpectedMaskVal[i] < 0 && Mask[i+l] >= 0) {
4670 ExpectedMaskVal[i] = Mask[i+l] - l;
4673 if (!isUndefOrEqual(Mask[i+l], ExpectedMaskVal[i]+l))
4681 /// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse
4682 /// of what x86 movss want. X86 movs requires the lowest element to be lowest
4683 /// element of vector 2 and the other elements to come from vector 1 in order.
4684 static bool isCommutedMOVLMask(ArrayRef<int> Mask, MVT VT,
4685 bool V2IsSplat = false, bool V2IsUndef = false) {
4686 if (!VT.is128BitVector())
4689 unsigned NumOps = VT.getVectorNumElements();
4690 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
4693 if (!isUndefOrEqual(Mask[0], 0))
4696 for (unsigned i = 1; i != NumOps; ++i)
4697 if (!(isUndefOrEqual(Mask[i], i+NumOps) ||
4698 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) ||
4699 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps))))
4705 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4706 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
4707 /// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7>
4708 static bool isMOVSHDUPMask(ArrayRef<int> Mask, MVT VT,
4709 const X86Subtarget *Subtarget) {
4710 if (!Subtarget->hasSSE3())
4713 unsigned NumElems = VT.getVectorNumElements();
4715 if ((VT.is128BitVector() && NumElems != 4) ||
4716 (VT.is256BitVector() && NumElems != 8) ||
4717 (VT.is512BitVector() && NumElems != 16))
4720 // "i+1" is the value the indexed mask element must have
4721 for (unsigned i = 0; i != NumElems; i += 2)
4722 if (!isUndefOrEqual(Mask[i], i+1) ||
4723 !isUndefOrEqual(Mask[i+1], i+1))
4729 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4730 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
4731 /// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6>
4732 static bool isMOVSLDUPMask(ArrayRef<int> Mask, MVT VT,
4733 const X86Subtarget *Subtarget) {
4734 if (!Subtarget->hasSSE3())
4737 unsigned NumElems = VT.getVectorNumElements();
4739 if ((VT.is128BitVector() && NumElems != 4) ||
4740 (VT.is256BitVector() && NumElems != 8) ||
4741 (VT.is512BitVector() && NumElems != 16))
4744 // "i" is the value the indexed mask element must have
4745 for (unsigned i = 0; i != NumElems; i += 2)
4746 if (!isUndefOrEqual(Mask[i], i) ||
4747 !isUndefOrEqual(Mask[i+1], i))
4753 /// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand
4754 /// specifies a shuffle of elements that is suitable for input to 256-bit
4755 /// version of MOVDDUP.
4756 static bool isMOVDDUPYMask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4757 if (!HasFp256 || !VT.is256BitVector())
4760 unsigned NumElts = VT.getVectorNumElements();
4764 for (unsigned i = 0; i != NumElts/2; ++i)
4765 if (!isUndefOrEqual(Mask[i], 0))
4767 for (unsigned i = NumElts/2; i != NumElts; ++i)
4768 if (!isUndefOrEqual(Mask[i], NumElts/2))
4773 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4774 /// specifies a shuffle of elements that is suitable for input to 128-bit
4775 /// version of MOVDDUP.
4776 static bool isMOVDDUPMask(ArrayRef<int> Mask, MVT VT) {
4777 if (!VT.is128BitVector())
4780 unsigned e = VT.getVectorNumElements() / 2;
4781 for (unsigned i = 0; i != e; ++i)
4782 if (!isUndefOrEqual(Mask[i], i))
4784 for (unsigned i = 0; i != e; ++i)
4785 if (!isUndefOrEqual(Mask[e+i], i))
4790 /// isVEXTRACTIndex - Return true if the specified
4791 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
4792 /// suitable for instruction that extract 128 or 256 bit vectors
4793 static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
4794 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4795 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4798 // The index should be aligned on a vecWidth-bit boundary.
4800 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4802 MVT VT = N->getSimpleValueType(0);
4803 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4804 bool Result = (Index * ElSize) % vecWidth == 0;
4809 /// isVINSERTIndex - Return true if the specified INSERT_SUBVECTOR
4810 /// operand specifies a subvector insert that is suitable for input to
4811 /// insertion of 128 or 256-bit subvectors
4812 static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) {
4813 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4814 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4816 // The index should be aligned on a vecWidth-bit boundary.
4818 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4820 MVT VT = N->getSimpleValueType(0);
4821 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4822 bool Result = (Index * ElSize) % vecWidth == 0;
4827 bool X86::isVINSERT128Index(SDNode *N) {
4828 return isVINSERTIndex(N, 128);
4831 bool X86::isVINSERT256Index(SDNode *N) {
4832 return isVINSERTIndex(N, 256);
4835 bool X86::isVEXTRACT128Index(SDNode *N) {
4836 return isVEXTRACTIndex(N, 128);
4839 bool X86::isVEXTRACT256Index(SDNode *N) {
4840 return isVEXTRACTIndex(N, 256);
4843 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
4844 /// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions.
4845 /// Handles 128-bit and 256-bit.
4846 static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
4847 MVT VT = N->getSimpleValueType(0);
4849 assert((VT.getSizeInBits() >= 128) &&
4850 "Unsupported vector type for PSHUF/SHUFP");
4852 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate
4853 // independently on 128-bit lanes.
4854 unsigned NumElts = VT.getVectorNumElements();
4855 unsigned NumLanes = VT.getSizeInBits()/128;
4856 unsigned NumLaneElts = NumElts/NumLanes;
4858 assert((NumLaneElts == 2 || NumLaneElts == 4 || NumLaneElts == 8) &&
4859 "Only supports 2, 4 or 8 elements per lane");
4861 unsigned Shift = (NumLaneElts >= 4) ? 1 : 0;
4863 for (unsigned i = 0; i != NumElts; ++i) {
4864 int Elt = N->getMaskElt(i);
4865 if (Elt < 0) continue;
4866 Elt &= NumLaneElts - 1;
4867 unsigned ShAmt = (i << Shift) % 8;
4868 Mask |= Elt << ShAmt;
4874 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
4875 /// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction.
4876 static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) {
4877 MVT VT = N->getSimpleValueType(0);
4879 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4880 "Unsupported vector type for PSHUFHW");
4882 unsigned NumElts = VT.getVectorNumElements();
4885 for (unsigned l = 0; l != NumElts; l += 8) {
4886 // 8 nodes per lane, but we only care about the last 4.
4887 for (unsigned i = 0; i < 4; ++i) {
4888 int Elt = N->getMaskElt(l+i+4);
4889 if (Elt < 0) continue;
4890 Elt &= 0x3; // only 2-bits.
4891 Mask |= Elt << (i * 2);
4898 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
4899 /// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction.
4900 static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) {
4901 MVT VT = N->getSimpleValueType(0);
4903 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4904 "Unsupported vector type for PSHUFHW");
4906 unsigned NumElts = VT.getVectorNumElements();
4909 for (unsigned l = 0; l != NumElts; l += 8) {
4910 // 8 nodes per lane, but we only care about the first 4.
4911 for (unsigned i = 0; i < 4; ++i) {
4912 int Elt = N->getMaskElt(l+i);
4913 if (Elt < 0) continue;
4914 Elt &= 0x3; // only 2-bits
4915 Mask |= Elt << (i * 2);
4922 /// \brief Return the appropriate immediate to shuffle the specified
4923 /// VECTOR_SHUFFLE mask with the PALIGNR (if InterLane is false) or with
4924 /// VALIGN (if Interlane is true) instructions.
4925 static unsigned getShuffleAlignrImmediate(ShuffleVectorSDNode *SVOp,
4927 MVT VT = SVOp->getSimpleValueType(0);
4928 unsigned EltSize = InterLane ? 1 :
4929 VT.getVectorElementType().getSizeInBits() >> 3;
4931 unsigned NumElts = VT.getVectorNumElements();
4932 unsigned NumLanes = VT.is512BitVector() ? 1 : VT.getSizeInBits()/128;
4933 unsigned NumLaneElts = NumElts/NumLanes;
4937 for (i = 0; i != NumElts; ++i) {
4938 Val = SVOp->getMaskElt(i);
4942 if (Val >= (int)NumElts)
4943 Val -= NumElts - NumLaneElts;
4945 assert(Val - i > 0 && "PALIGNR imm should be positive");
4946 return (Val - i) * EltSize;
4949 /// \brief Return the appropriate immediate to shuffle the specified
4950 /// VECTOR_SHUFFLE mask with the PALIGNR instruction.
4951 static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) {
4952 return getShuffleAlignrImmediate(SVOp, false);
4955 /// \brief Return the appropriate immediate to shuffle the specified
4956 /// VECTOR_SHUFFLE mask with the VALIGN instruction.
4957 static unsigned getShuffleVALIGNImmediate(ShuffleVectorSDNode *SVOp) {
4958 return getShuffleAlignrImmediate(SVOp, true);
4962 static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) {
4963 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4964 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4965 llvm_unreachable("Illegal extract subvector for VEXTRACT");
4968 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4970 MVT VecVT = N->getOperand(0).getSimpleValueType();
4971 MVT ElVT = VecVT.getVectorElementType();
4973 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4974 return Index / NumElemsPerChunk;
4977 static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) {
4978 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4979 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4980 llvm_unreachable("Illegal insert subvector for VINSERT");
4983 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4985 MVT VecVT = N->getSimpleValueType(0);
4986 MVT ElVT = VecVT.getVectorElementType();
4988 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4989 return Index / NumElemsPerChunk;
4992 /// getExtractVEXTRACT128Immediate - Return the appropriate immediate
4993 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128
4994 /// and VINSERTI128 instructions.
4995 unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) {
4996 return getExtractVEXTRACTImmediate(N, 128);
4999 /// getExtractVEXTRACT256Immediate - Return the appropriate immediate
5000 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF64x4
5001 /// and VINSERTI64x4 instructions.
5002 unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) {
5003 return getExtractVEXTRACTImmediate(N, 256);
5006 /// getInsertVINSERT128Immediate - Return the appropriate immediate
5007 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128
5008 /// and VINSERTI128 instructions.
5009 unsigned X86::getInsertVINSERT128Immediate(SDNode *N) {
5010 return getInsertVINSERTImmediate(N, 128);
5013 /// getInsertVINSERT256Immediate - Return the appropriate immediate
5014 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF46x4
5015 /// and VINSERTI64x4 instructions.
5016 unsigned X86::getInsertVINSERT256Immediate(SDNode *N) {
5017 return getInsertVINSERTImmediate(N, 256);
5020 /// isZero - Returns true if Elt is a constant integer zero
5021 static bool isZero(SDValue V) {
5022 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
5023 return C && C->isNullValue();
5026 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
5028 bool X86::isZeroNode(SDValue Elt) {
5031 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Elt))
5032 return CFP->getValueAPF().isPosZero();
5036 /// ShouldXformToMOVHLPS - Return true if the node should be transformed to
5037 /// match movhlps. The lower half elements should come from upper half of
5038 /// V1 (and in order), and the upper half elements should come from the upper
5039 /// half of V2 (and in order).
5040 static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, MVT VT) {
5041 if (!VT.is128BitVector())
5043 if (VT.getVectorNumElements() != 4)
5045 for (unsigned i = 0, e = 2; i != e; ++i)
5046 if (!isUndefOrEqual(Mask[i], i+2))
5048 for (unsigned i = 2; i != 4; ++i)
5049 if (!isUndefOrEqual(Mask[i], i+4))
5054 /// isScalarLoadToVector - Returns true if the node is a scalar load that
5055 /// is promoted to a vector. It also returns the LoadSDNode by reference if
5057 static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = nullptr) {
5058 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR)
5060 N = N->getOperand(0).getNode();
5061 if (!ISD::isNON_EXTLoad(N))
5064 *LD = cast<LoadSDNode>(N);
5068 // Test whether the given value is a vector value which will be legalized
5070 static bool WillBeConstantPoolLoad(SDNode *N) {
5071 if (N->getOpcode() != ISD::BUILD_VECTOR)
5074 // Check for any non-constant elements.
5075 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
5076 switch (N->getOperand(i).getNode()->getOpcode()) {
5078 case ISD::ConstantFP:
5085 // Vectors of all-zeros and all-ones are materialized with special
5086 // instructions rather than being loaded.
5087 return !ISD::isBuildVectorAllZeros(N) &&
5088 !ISD::isBuildVectorAllOnes(N);
5091 /// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
5092 /// match movlp{s|d}. The lower half elements should come from lower half of
5093 /// V1 (and in order), and the upper half elements should come from the upper
5094 /// half of V2 (and in order). And since V1 will become the source of the
5095 /// MOVLP, it must be either a vector load or a scalar load to vector.
5096 static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
5097 ArrayRef<int> Mask, MVT VT) {
5098 if (!VT.is128BitVector())
5101 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
5103 // Is V2 is a vector load, don't do this transformation. We will try to use
5104 // load folding shufps op.
5105 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2))
5108 unsigned NumElems = VT.getVectorNumElements();
5110 if (NumElems != 2 && NumElems != 4)
5112 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
5113 if (!isUndefOrEqual(Mask[i], i))
5115 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
5116 if (!isUndefOrEqual(Mask[i], i+NumElems))
5121 /// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
5122 /// to an zero vector.
5123 /// FIXME: move to dag combiner / method on ShuffleVectorSDNode
5124 static bool isZeroShuffle(ShuffleVectorSDNode *N) {
5125 SDValue V1 = N->getOperand(0);
5126 SDValue V2 = N->getOperand(1);
5127 unsigned NumElems = N->getValueType(0).getVectorNumElements();
5128 for (unsigned i = 0; i != NumElems; ++i) {
5129 int Idx = N->getMaskElt(i);
5130 if (Idx >= (int)NumElems) {
5131 unsigned Opc = V2.getOpcode();
5132 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode()))
5134 if (Opc != ISD::BUILD_VECTOR ||
5135 !X86::isZeroNode(V2.getOperand(Idx-NumElems)))
5137 } else if (Idx >= 0) {
5138 unsigned Opc = V1.getOpcode();
5139 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode()))
5141 if (Opc != ISD::BUILD_VECTOR ||
5142 !X86::isZeroNode(V1.getOperand(Idx)))
5149 /// getZeroVector - Returns a vector of specified type with all zero elements.
5151 static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
5152 SelectionDAG &DAG, SDLoc dl) {
5153 assert(VT.isVector() && "Expected a vector type");
5155 // Always build SSE zero vectors as <4 x i32> bitcasted
5156 // to their dest type. This ensures they get CSE'd.
5158 if (VT.is128BitVector()) { // SSE
5159 if (Subtarget->hasSSE2()) { // SSE2
5160 SDValue Cst = DAG.getConstant(0, MVT::i32);
5161 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5163 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5164 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
5166 } else if (VT.is256BitVector()) { // AVX
5167 if (Subtarget->hasInt256()) { // AVX2
5168 SDValue Cst = DAG.getConstant(0, MVT::i32);
5169 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5170 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5172 // 256-bit logic and arithmetic instructions in AVX are all
5173 // floating-point, no support for integer ops. Emit fp zeroed vectors.
5174 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5175 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5176 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops);
5178 } else if (VT.is512BitVector()) { // AVX-512
5179 SDValue Cst = DAG.getConstant(0, MVT::i32);
5180 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
5181 Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5182 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
5183 } else if (VT.getScalarType() == MVT::i1) {
5184 assert(VT.getVectorNumElements() <= 16 && "Unexpected vector type");
5185 SDValue Cst = DAG.getConstant(0, MVT::i1);
5186 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
5187 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
5189 llvm_unreachable("Unexpected vector type");
5191 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5194 /// getOnesVector - Returns a vector of specified type with all bits set.
5195 /// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with
5196 /// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately.
5197 /// Then bitcast to their original type, ensuring they get CSE'd.
5198 static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG,
5200 assert(VT.isVector() && "Expected a vector type");
5202 SDValue Cst = DAG.getConstant(~0U, MVT::i32);
5204 if (VT.is256BitVector()) {
5205 if (HasInt256) { // AVX2
5206 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5207 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5209 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5210 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
5212 } else if (VT.is128BitVector()) {
5213 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5215 llvm_unreachable("Unexpected vector type");
5217 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5220 /// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
5221 /// that point to V2 points to its first element.
5222 static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) {
5223 for (unsigned i = 0; i != NumElems; ++i) {
5224 if (Mask[i] > (int)NumElems) {
5230 /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
5231 /// operation of specified width.
5232 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
5234 unsigned NumElems = VT.getVectorNumElements();
5235 SmallVector<int, 8> Mask;
5236 Mask.push_back(NumElems);
5237 for (unsigned i = 1; i != NumElems; ++i)
5239 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5242 /// getUnpackl - Returns a vector_shuffle node for an unpackl operation.
5243 static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5245 unsigned NumElems = VT.getVectorNumElements();
5246 SmallVector<int, 8> Mask;
5247 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
5249 Mask.push_back(i + NumElems);
5251 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5254 /// getUnpackh - Returns a vector_shuffle node for an unpackh operation.
5255 static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5257 unsigned NumElems = VT.getVectorNumElements();
5258 SmallVector<int, 8> Mask;
5259 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) {
5260 Mask.push_back(i + Half);
5261 Mask.push_back(i + NumElems + Half);
5263 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5266 // PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by
5267 // a generic shuffle instruction because the target has no such instructions.
5268 // Generate shuffles which repeat i16 and i8 several times until they can be
5269 // represented by v4f32 and then be manipulated by target suported shuffles.
5270 static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) {
5271 MVT VT = V.getSimpleValueType();
5272 int NumElems = VT.getVectorNumElements();
5275 while (NumElems > 4) {
5276 if (EltNo < NumElems/2) {
5277 V = getUnpackl(DAG, dl, VT, V, V);
5279 V = getUnpackh(DAG, dl, VT, V, V);
5280 EltNo -= NumElems/2;
5287 /// getLegalSplat - Generate a legal splat with supported x86 shuffles
5288 static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) {
5289 MVT VT = V.getSimpleValueType();
5292 if (VT.is128BitVector()) {
5293 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V);
5294 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
5295 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32),
5297 } else if (VT.is256BitVector()) {
5298 // To use VPERMILPS to splat scalars, the second half of indicies must
5299 // refer to the higher part, which is a duplication of the lower one,
5300 // because VPERMILPS can only handle in-lane permutations.
5301 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo,
5302 EltNo+4, EltNo+4, EltNo+4, EltNo+4 };
5304 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V);
5305 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32),
5308 llvm_unreachable("Vector size not supported");
5310 return DAG.getNode(ISD::BITCAST, dl, VT, V);
5313 /// PromoteSplat - Splat is promoted to target supported vector shuffles.
5314 static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
5315 MVT SrcVT = SV->getSimpleValueType(0);
5316 SDValue V1 = SV->getOperand(0);
5319 int EltNo = SV->getSplatIndex();
5320 int NumElems = SrcVT.getVectorNumElements();
5321 bool Is256BitVec = SrcVT.is256BitVector();
5323 assert(((SrcVT.is128BitVector() && NumElems > 4) || Is256BitVec) &&
5324 "Unknown how to promote splat for type");
5326 // Extract the 128-bit part containing the splat element and update
5327 // the splat element index when it refers to the higher register.
5329 V1 = Extract128BitVector(V1, EltNo, DAG, dl);
5330 if (EltNo >= NumElems/2)
5331 EltNo -= NumElems/2;
5334 // All i16 and i8 vector types can't be used directly by a generic shuffle
5335 // instruction because the target has no such instruction. Generate shuffles
5336 // which repeat i16 and i8 several times until they fit in i32, and then can
5337 // be manipulated by target suported shuffles.
5338 MVT EltVT = SrcVT.getVectorElementType();
5339 if (EltVT == MVT::i8 || EltVT == MVT::i16)
5340 V1 = PromoteSplati8i16(V1, DAG, EltNo);
5342 // Recreate the 256-bit vector and place the same 128-bit vector
5343 // into the low and high part. This is necessary because we want
5344 // to use VPERM* to shuffle the vectors
5346 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1);
5349 return getLegalSplat(DAG, V1, EltNo);
5352 /// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
5353 /// vector of zero or undef vector. This produces a shuffle where the low
5354 /// element of V2 is swizzled into the zero/undef vector, landing at element
5355 /// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
5356 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
5358 const X86Subtarget *Subtarget,
5359 SelectionDAG &DAG) {
5360 MVT VT = V2.getSimpleValueType();
5362 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
5363 unsigned NumElems = VT.getVectorNumElements();
5364 SmallVector<int, 16> MaskVec;
5365 for (unsigned i = 0; i != NumElems; ++i)
5366 // If this is the insertion idx, put the low elt of V2 here.
5367 MaskVec.push_back(i == Idx ? NumElems : i);
5368 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]);
5371 /// getTargetShuffleMask - Calculates the shuffle mask corresponding to the
5372 /// target specific opcode. Returns true if the Mask could be calculated. Sets
5373 /// IsUnary to true if only uses one source. Note that this will set IsUnary for
5374 /// shuffles which use a single input multiple times, and in those cases it will
5375 /// adjust the mask to only have indices within that single input.
5376 static bool getTargetShuffleMask(SDNode *N, MVT VT,
5377 SmallVectorImpl<int> &Mask, bool &IsUnary) {
5378 unsigned NumElems = VT.getVectorNumElements();
5382 bool IsFakeUnary = false;
5383 switch(N->getOpcode()) {
5384 case X86ISD::BLENDI:
5385 ImmN = N->getOperand(N->getNumOperands()-1);
5386 DecodeBLENDMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5389 ImmN = N->getOperand(N->getNumOperands()-1);
5390 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5391 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5393 case X86ISD::UNPCKH:
5394 DecodeUNPCKHMask(VT, Mask);
5395 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5397 case X86ISD::UNPCKL:
5398 DecodeUNPCKLMask(VT, Mask);
5399 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5401 case X86ISD::MOVHLPS:
5402 DecodeMOVHLPSMask(NumElems, Mask);
5403 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5405 case X86ISD::MOVLHPS:
5406 DecodeMOVLHPSMask(NumElems, Mask);
5407 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5409 case X86ISD::PALIGNR:
5410 ImmN = N->getOperand(N->getNumOperands()-1);
5411 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5413 case X86ISD::PSHUFD:
5414 case X86ISD::VPERMILPI:
5415 ImmN = N->getOperand(N->getNumOperands()-1);
5416 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5419 case X86ISD::PSHUFHW:
5420 ImmN = N->getOperand(N->getNumOperands()-1);
5421 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5424 case X86ISD::PSHUFLW:
5425 ImmN = N->getOperand(N->getNumOperands()-1);
5426 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5429 case X86ISD::PSHUFB: {
5431 SDValue MaskNode = N->getOperand(1);
5432 while (MaskNode->getOpcode() == ISD::BITCAST)
5433 MaskNode = MaskNode->getOperand(0);
5435 if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
5436 // If we have a build-vector, then things are easy.
5437 EVT VT = MaskNode.getValueType();
5438 assert(VT.isVector() &&
5439 "Can't produce a non-vector with a build_vector!");
5440 if (!VT.isInteger())
5443 int NumBytesPerElement = VT.getVectorElementType().getSizeInBits() / 8;
5445 SmallVector<uint64_t, 32> RawMask;
5446 for (int i = 0, e = MaskNode->getNumOperands(); i < e; ++i) {
5447 SDValue Op = MaskNode->getOperand(i);
5448 if (Op->getOpcode() == ISD::UNDEF) {
5449 RawMask.push_back((uint64_t)SM_SentinelUndef);
5452 auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
5455 APInt MaskElement = CN->getAPIntValue();
5457 // We now have to decode the element which could be any integer size and
5458 // extract each byte of it.
5459 for (int j = 0; j < NumBytesPerElement; ++j) {
5460 // Note that this is x86 and so always little endian: the low byte is
5461 // the first byte of the mask.
5462 RawMask.push_back(MaskElement.getLoBits(8).getZExtValue());
5463 MaskElement = MaskElement.lshr(8);
5466 DecodePSHUFBMask(RawMask, Mask);
5470 auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
5474 SDValue Ptr = MaskLoad->getBasePtr();
5475 if (Ptr->getOpcode() == X86ISD::Wrapper)
5476 Ptr = Ptr->getOperand(0);
5478 auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
5479 if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
5482 if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
5483 DecodePSHUFBMask(C, Mask);
5491 case X86ISD::VPERMI:
5492 ImmN = N->getOperand(N->getNumOperands()-1);
5493 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5498 DecodeScalarMoveMask(VT, /* IsLoad */ false, Mask);
5500 case X86ISD::VPERM2X128:
5501 ImmN = N->getOperand(N->getNumOperands()-1);
5502 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5503 if (Mask.empty()) return false;
5505 case X86ISD::MOVSLDUP:
5506 DecodeMOVSLDUPMask(VT, Mask);
5509 case X86ISD::MOVSHDUP:
5510 DecodeMOVSHDUPMask(VT, Mask);
5513 case X86ISD::MOVDDUP:
5514 DecodeMOVDDUPMask(VT, Mask);
5517 case X86ISD::MOVLHPD:
5518 case X86ISD::MOVLPD:
5519 case X86ISD::MOVLPS:
5520 // Not yet implemented
5522 default: llvm_unreachable("unknown target shuffle node");
5525 // If we have a fake unary shuffle, the shuffle mask is spread across two
5526 // inputs that are actually the same node. Re-map the mask to always point
5527 // into the first input.
5530 if (M >= (int)Mask.size())
5536 /// getShuffleScalarElt - Returns the scalar element that will make up the ith
5537 /// element of the result of the vector shuffle.
5538 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
5541 return SDValue(); // Limit search depth.
5543 SDValue V = SDValue(N, 0);
5544 EVT VT = V.getValueType();
5545 unsigned Opcode = V.getOpcode();
5547 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
5548 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
5549 int Elt = SV->getMaskElt(Index);
5552 return DAG.getUNDEF(VT.getVectorElementType());
5554 unsigned NumElems = VT.getVectorNumElements();
5555 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
5556 : SV->getOperand(1);
5557 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
5560 // Recurse into target specific vector shuffles to find scalars.
5561 if (isTargetShuffle(Opcode)) {
5562 MVT ShufVT = V.getSimpleValueType();
5563 unsigned NumElems = ShufVT.getVectorNumElements();
5564 SmallVector<int, 16> ShuffleMask;
5567 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary))
5570 int Elt = ShuffleMask[Index];
5572 return DAG.getUNDEF(ShufVT.getVectorElementType());
5574 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0)
5576 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
5580 // Actual nodes that may contain scalar elements
5581 if (Opcode == ISD::BITCAST) {
5582 V = V.getOperand(0);
5583 EVT SrcVT = V.getValueType();
5584 unsigned NumElems = VT.getVectorNumElements();
5586 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
5590 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
5591 return (Index == 0) ? V.getOperand(0)
5592 : DAG.getUNDEF(VT.getVectorElementType());
5594 if (V.getOpcode() == ISD::BUILD_VECTOR)
5595 return V.getOperand(Index);
5600 /// getNumOfConsecutiveZeros - Return the number of elements of a vector
5601 /// shuffle operation which come from a consecutively from a zero. The
5602 /// search can start in two different directions, from left or right.
5603 /// We count undefs as zeros until PreferredNum is reached.
5604 static unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp,
5605 unsigned NumElems, bool ZerosFromLeft,
5607 unsigned PreferredNum = -1U) {
5608 unsigned NumZeros = 0;
5609 for (unsigned i = 0; i != NumElems; ++i) {
5610 unsigned Index = ZerosFromLeft ? i : NumElems - i - 1;
5611 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0);
5615 if (X86::isZeroNode(Elt))
5617 else if (Elt.getOpcode() == ISD::UNDEF) // Undef as zero up to PreferredNum.
5618 NumZeros = std::min(NumZeros + 1, PreferredNum);
5626 /// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE)
5627 /// correspond consecutively to elements from one of the vector operands,
5628 /// starting from its index OpIdx. Also tell OpNum which source vector operand.
5630 bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp,
5631 unsigned MaskI, unsigned MaskE, unsigned OpIdx,
5632 unsigned NumElems, unsigned &OpNum) {
5633 bool SeenV1 = false;
5634 bool SeenV2 = false;
5636 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) {
5637 int Idx = SVOp->getMaskElt(i);
5638 // Ignore undef indicies
5642 if (Idx < (int)NumElems)
5647 // Only accept consecutive elements from the same vector
5648 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2))
5652 OpNum = SeenV1 ? 0 : 1;
5656 /// isVectorShiftRight - Returns true if the shuffle can be implemented as a
5657 /// logical left shift of a vector.
5658 static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5659 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5661 SVOp->getSimpleValueType(0).getVectorNumElements();
5662 unsigned NumZeros = getNumOfConsecutiveZeros(
5663 SVOp, NumElems, false /* check zeros from right */, DAG,
5664 SVOp->getMaskElt(0));
5670 // Considering the elements in the mask that are not consecutive zeros,
5671 // check if they consecutively come from only one of the source vectors.
5673 // V1 = {X, A, B, C} 0
5675 // vector_shuffle V1, V2 <1, 2, 3, X>
5677 if (!isShuffleMaskConsecutive(SVOp,
5678 0, // Mask Start Index
5679 NumElems-NumZeros, // Mask End Index(exclusive)
5680 NumZeros, // Where to start looking in the src vector
5681 NumElems, // Number of elements in vector
5682 OpSrc)) // Which source operand ?
5687 ShVal = SVOp->getOperand(OpSrc);
5691 /// isVectorShiftLeft - Returns true if the shuffle can be implemented as a
5692 /// logical left shift of a vector.
5693 static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5694 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5696 SVOp->getSimpleValueType(0).getVectorNumElements();
5697 unsigned NumZeros = getNumOfConsecutiveZeros(
5698 SVOp, NumElems, true /* check zeros from left */, DAG,
5699 NumElems - SVOp->getMaskElt(NumElems - 1) - 1);
5705 // Considering the elements in the mask that are not consecutive zeros,
5706 // check if they consecutively come from only one of the source vectors.
5708 // 0 { A, B, X, X } = V2
5710 // vector_shuffle V1, V2 <X, X, 4, 5>
5712 if (!isShuffleMaskConsecutive(SVOp,
5713 NumZeros, // Mask Start Index
5714 NumElems, // Mask End Index(exclusive)
5715 0, // Where to start looking in the src vector
5716 NumElems, // Number of elements in vector
5717 OpSrc)) // Which source operand ?
5722 ShVal = SVOp->getOperand(OpSrc);
5726 /// isVectorShift - Returns true if the shuffle can be implemented as a
5727 /// logical left or right shift of a vector.
5728 static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5729 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5730 // Although the logic below support any bitwidth size, there are no
5731 // shift instructions which handle more than 128-bit vectors.
5732 if (!SVOp->getSimpleValueType(0).is128BitVector())
5735 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) ||
5736 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt))
5742 /// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
5744 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
5745 unsigned NumNonZero, unsigned NumZero,
5747 const X86Subtarget* Subtarget,
5748 const TargetLowering &TLI) {
5755 for (unsigned i = 0; i < 16; ++i) {
5756 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
5757 if (ThisIsNonZero && First) {
5759 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5761 V = DAG.getUNDEF(MVT::v8i16);
5766 SDValue ThisElt, LastElt;
5767 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
5768 if (LastIsNonZero) {
5769 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
5770 MVT::i16, Op.getOperand(i-1));
5772 if (ThisIsNonZero) {
5773 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
5774 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
5775 ThisElt, DAG.getConstant(8, MVT::i8));
5777 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
5781 if (ThisElt.getNode())
5782 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
5783 DAG.getIntPtrConstant(i/2));
5787 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V);
5790 /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
5792 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
5793 unsigned NumNonZero, unsigned NumZero,
5795 const X86Subtarget* Subtarget,
5796 const TargetLowering &TLI) {
5803 for (unsigned i = 0; i < 8; ++i) {
5804 bool isNonZero = (NonZeros & (1 << i)) != 0;
5808 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5810 V = DAG.getUNDEF(MVT::v8i16);
5813 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
5814 MVT::v8i16, V, Op.getOperand(i),
5815 DAG.getIntPtrConstant(i));
5822 /// LowerBuildVectorv4x32 - Custom lower build_vector of v4i32 or v4f32.
5823 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
5824 const X86Subtarget *Subtarget,
5825 const TargetLowering &TLI) {
5826 // Find all zeroable elements.
5827 std::bitset<4> Zeroable;
5828 for (int i=0; i < 4; ++i) {
5829 SDValue Elt = Op->getOperand(i);
5830 Zeroable[i] = (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt));
5832 assert(Zeroable.size() - Zeroable.count() > 1 &&
5833 "We expect at least two non-zero elements!");
5835 // We only know how to deal with build_vector nodes where elements are either
5836 // zeroable or extract_vector_elt with constant index.
5837 SDValue FirstNonZero;
5838 unsigned FirstNonZeroIdx;
5839 for (unsigned i=0; i < 4; ++i) {
5842 SDValue Elt = Op->getOperand(i);
5843 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5844 !isa<ConstantSDNode>(Elt.getOperand(1)))
5846 // Make sure that this node is extracting from a 128-bit vector.
5847 MVT VT = Elt.getOperand(0).getSimpleValueType();
5848 if (!VT.is128BitVector())
5850 if (!FirstNonZero.getNode()) {
5852 FirstNonZeroIdx = i;
5856 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
5857 SDValue V1 = FirstNonZero.getOperand(0);
5858 MVT VT = V1.getSimpleValueType();
5860 // See if this build_vector can be lowered as a blend with zero.
5862 unsigned EltMaskIdx, EltIdx;
5864 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
5865 if (Zeroable[EltIdx]) {
5866 // The zero vector will be on the right hand side.
5867 Mask[EltIdx] = EltIdx+4;
5871 Elt = Op->getOperand(EltIdx);
5872 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
5873 EltMaskIdx = cast<ConstantSDNode>(Elt.getOperand(1))->getZExtValue();
5874 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
5876 Mask[EltIdx] = EltIdx;
5880 // Let the shuffle legalizer deal with blend operations.
5881 SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
5882 if (V1.getSimpleValueType() != VT)
5883 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), VT, V1);
5884 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, &Mask[0]);
5887 // See if we can lower this build_vector to a INSERTPS.
5888 if (!Subtarget->hasSSE41())
5891 SDValue V2 = Elt.getOperand(0);
5892 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
5895 bool CanFold = true;
5896 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
5900 SDValue Current = Op->getOperand(i);
5901 SDValue SrcVector = Current->getOperand(0);
5904 CanFold = SrcVector == V1 &&
5905 cast<ConstantSDNode>(Current.getOperand(1))->getZExtValue() == i;
5911 assert(V1.getNode() && "Expected at least two non-zero elements!");
5912 if (V1.getSimpleValueType() != MVT::v4f32)
5913 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), MVT::v4f32, V1);
5914 if (V2.getSimpleValueType() != MVT::v4f32)
5915 V2 = DAG.getNode(ISD::BITCAST, SDLoc(V2), MVT::v4f32, V2);
5917 // Ok, we can emit an INSERTPS instruction.
5918 unsigned ZMask = Zeroable.to_ulong();
5920 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
5921 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
5922 SDValue Result = DAG.getNode(X86ISD::INSERTPS, SDLoc(Op), MVT::v4f32, V1, V2,
5923 DAG.getIntPtrConstant(InsertPSMask));
5924 return DAG.getNode(ISD::BITCAST, SDLoc(Op), VT, Result);
5927 /// Return a vector logical shift node.
5928 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
5929 unsigned NumBits, SelectionDAG &DAG,
5930 const TargetLowering &TLI, SDLoc dl) {
5931 assert(VT.is128BitVector() && "Unknown type for VShift");
5932 MVT ShVT = MVT::v2i64;
5933 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
5934 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
5935 MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(SrcOp.getValueType());
5936 assert(NumBits % 8 == 0 && "Only support byte sized shifts");
5937 SDValue ShiftVal = DAG.getConstant(NumBits/8, ScalarShiftTy);
5938 return DAG.getNode(ISD::BITCAST, dl, VT,
5939 DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
5943 LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
5945 // Check if the scalar load can be widened into a vector load. And if
5946 // the address is "base + cst" see if the cst can be "absorbed" into
5947 // the shuffle mask.
5948 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
5949 SDValue Ptr = LD->getBasePtr();
5950 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
5952 EVT PVT = LD->getValueType(0);
5953 if (PVT != MVT::i32 && PVT != MVT::f32)
5958 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
5959 FI = FINode->getIndex();
5961 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
5962 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
5963 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5964 Offset = Ptr.getConstantOperandVal(1);
5965 Ptr = Ptr.getOperand(0);
5970 // FIXME: 256-bit vector instructions don't require a strict alignment,
5971 // improve this code to support it better.
5972 unsigned RequiredAlign = VT.getSizeInBits()/8;
5973 SDValue Chain = LD->getChain();
5974 // Make sure the stack object alignment is at least 16 or 32.
5975 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
5976 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
5977 if (MFI->isFixedObjectIndex(FI)) {
5978 // Can't change the alignment. FIXME: It's possible to compute
5979 // the exact stack offset and reference FI + adjust offset instead.
5980 // If someone *really* cares about this. That's the way to implement it.
5983 MFI->setObjectAlignment(FI, RequiredAlign);
5987 // (Offset % 16 or 32) must be multiple of 4. Then address is then
5988 // Ptr + (Offset & ~15).
5991 if ((Offset % RequiredAlign) & 3)
5993 int64_t StartOffset = Offset & ~(RequiredAlign-1);
5995 Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(),
5996 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType()));
5998 int EltNo = (Offset - StartOffset) >> 2;
5999 unsigned NumElems = VT.getVectorNumElements();
6001 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
6002 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
6003 LD->getPointerInfo().getWithOffset(StartOffset),
6004 false, false, false, 0);
6006 SmallVector<int, 8> Mask(NumElems, EltNo);
6008 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
6014 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
6015 /// elements can be replaced by a single large load which has the same value as
6016 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
6018 /// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
6020 /// FIXME: we'd also like to handle the case where the last elements are zero
6021 /// rather than undef via VZEXT_LOAD, but we do not detect that case today.
6022 /// There's even a handy isZeroNode for that purpose.
6023 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
6024 SDLoc &DL, SelectionDAG &DAG,
6025 bool isAfterLegalize) {
6026 unsigned NumElems = Elts.size();
6028 LoadSDNode *LDBase = nullptr;
6029 unsigned LastLoadedElt = -1U;
6031 // For each element in the initializer, see if we've found a load or an undef.
6032 // If we don't find an initial load element, or later load elements are
6033 // non-consecutive, bail out.
6034 for (unsigned i = 0; i < NumElems; ++i) {
6035 SDValue Elt = Elts[i];
6036 // Look through a bitcast.
6037 if (Elt.getNode() && Elt.getOpcode() == ISD::BITCAST)
6038 Elt = Elt.getOperand(0);
6039 if (!Elt.getNode() ||
6040 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
6043 if (Elt.getNode()->getOpcode() == ISD::UNDEF)
6045 LDBase = cast<LoadSDNode>(Elt.getNode());
6049 if (Elt.getOpcode() == ISD::UNDEF)
6052 LoadSDNode *LD = cast<LoadSDNode>(Elt);
6053 EVT LdVT = Elt.getValueType();
6054 // Each loaded element must be the correct fractional portion of the
6055 // requested vector load.
6056 if (LdVT.getSizeInBits() != VT.getSizeInBits() / NumElems)
6058 if (!DAG.isConsecutiveLoad(LD, LDBase, LdVT.getSizeInBits() / 8, i))
6063 // If we have found an entire vector of loads and undefs, then return a large
6064 // load of the entire vector width starting at the base pointer. If we found
6065 // consecutive loads for the low half, generate a vzext_load node.
6066 if (LastLoadedElt == NumElems - 1) {
6067 assert(LDBase && "Did not find base load for merging consecutive loads");
6068 EVT EltVT = LDBase->getValueType(0);
6069 // Ensure that the input vector size for the merged loads matches the
6070 // cumulative size of the input elements.
6071 if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems)
6074 if (isAfterLegalize &&
6075 !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
6078 SDValue NewLd = SDValue();
6080 NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
6081 LDBase->getPointerInfo(), LDBase->isVolatile(),
6082 LDBase->isNonTemporal(), LDBase->isInvariant(),
6083 LDBase->getAlignment());
6085 if (LDBase->hasAnyUseOfValue(1)) {
6086 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6088 SDValue(NewLd.getNode(), 1));
6089 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6090 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6091 SDValue(NewLd.getNode(), 1));
6097 //TODO: The code below fires only for for loading the low v2i32 / v2f32
6098 //of a v4i32 / v4f32. It's probably worth generalizing.
6099 EVT EltVT = VT.getVectorElementType();
6100 if (NumElems == 4 && LastLoadedElt == 1 && (EltVT.getSizeInBits() == 32) &&
6101 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
6102 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
6103 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
6105 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::i64,
6106 LDBase->getPointerInfo(),
6107 LDBase->getAlignment(),
6108 false/*isVolatile*/, true/*ReadMem*/,
6111 // Make sure the newly-created LOAD is in the same position as LDBase in
6112 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and
6113 // update uses of LDBase's output chain to use the TokenFactor.
6114 if (LDBase->hasAnyUseOfValue(1)) {
6115 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6116 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1));
6117 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6118 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6119 SDValue(ResNode.getNode(), 1));
6122 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode);
6127 /// LowerVectorBroadcast - Attempt to use the vbroadcast instruction
6128 /// to generate a splat value for the following cases:
6129 /// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
6130 /// 2. A splat shuffle which uses a scalar_to_vector node which comes from
6131 /// a scalar load, or a constant.
6132 /// The VBROADCAST node is returned when a pattern is found,
6133 /// or SDValue() otherwise.
6134 static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
6135 SelectionDAG &DAG) {
6136 // VBROADCAST requires AVX.
6137 // TODO: Splats could be generated for non-AVX CPUs using SSE
6138 // instructions, but there's less potential gain for only 128-bit vectors.
6139 if (!Subtarget->hasAVX())
6142 MVT VT = Op.getSimpleValueType();
6145 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
6146 "Unsupported vector type for broadcast.");
6151 switch (Op.getOpcode()) {
6153 // Unknown pattern found.
6156 case ISD::BUILD_VECTOR: {
6157 auto *BVOp = cast<BuildVectorSDNode>(Op.getNode());
6158 BitVector UndefElements;
6159 SDValue Splat = BVOp->getSplatValue(&UndefElements);
6161 // We need a splat of a single value to use broadcast, and it doesn't
6162 // make any sense if the value is only in one element of the vector.
6163 if (!Splat || (VT.getVectorNumElements() - UndefElements.count()) <= 1)
6167 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6168 Ld.getOpcode() == ISD::ConstantFP);
6170 // Make sure that all of the users of a non-constant load are from the
6171 // BUILD_VECTOR node.
6172 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
6177 case ISD::VECTOR_SHUFFLE: {
6178 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
6180 // Shuffles must have a splat mask where the first element is
6182 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0)
6185 SDValue Sc = Op.getOperand(0);
6186 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
6187 Sc.getOpcode() != ISD::BUILD_VECTOR) {
6189 if (!Subtarget->hasInt256())
6192 // Use the register form of the broadcast instruction available on AVX2.
6193 if (VT.getSizeInBits() >= 256)
6194 Sc = Extract128BitVector(Sc, 0, DAG, dl);
6195 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
6198 Ld = Sc.getOperand(0);
6199 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6200 Ld.getOpcode() == ISD::ConstantFP);
6202 // The scalar_to_vector node and the suspected
6203 // load node must have exactly one user.
6204 // Constants may have multiple users.
6206 // AVX-512 has register version of the broadcast
6207 bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() &&
6208 Ld.getValueType().getSizeInBits() >= 32;
6209 if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
6216 unsigned ScalarSize = Ld.getValueType().getSizeInBits();
6217 bool IsGE256 = (VT.getSizeInBits() >= 256);
6219 // When optimizing for size, generate up to 5 extra bytes for a broadcast
6220 // instruction to save 8 or more bytes of constant pool data.
6221 // TODO: If multiple splats are generated to load the same constant,
6222 // it may be detrimental to overall size. There needs to be a way to detect
6223 // that condition to know if this is truly a size win.
6224 const Function *F = DAG.getMachineFunction().getFunction();
6225 bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize);
6227 // Handle broadcasting a single constant scalar from the constant pool
6229 // On Sandybridge (no AVX2), it is still better to load a constant vector
6230 // from the constant pool and not to broadcast it from a scalar.
6231 // But override that restriction when optimizing for size.
6232 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
6233 if (ConstSplatVal && (Subtarget->hasAVX2() || OptForSize)) {
6234 EVT CVT = Ld.getValueType();
6235 assert(!CVT.isVector() && "Must not broadcast a vector type");
6237 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
6238 // For size optimization, also splat v2f64 and v2i64, and for size opt
6239 // with AVX2, also splat i8 and i16.
6240 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
6241 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6242 (OptForSize && (ScalarSize == 64 || Subtarget->hasAVX2()))) {
6243 const Constant *C = nullptr;
6244 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
6245 C = CI->getConstantIntValue();
6246 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
6247 C = CF->getConstantFPValue();
6249 assert(C && "Invalid constant type");
6251 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6252 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
6253 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
6254 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP,
6255 MachinePointerInfo::getConstantPool(),
6256 false, false, false, Alignment);
6258 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6262 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
6264 // Handle AVX2 in-register broadcasts.
6265 if (!IsLoad && Subtarget->hasInt256() &&
6266 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
6267 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6269 // The scalar source must be a normal load.
6273 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6274 (Subtarget->hasVLX() && ScalarSize == 64))
6275 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6277 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
6278 // double since there is no vbroadcastsd xmm
6279 if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) {
6280 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
6281 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6284 // Unsupported broadcast.
6288 /// \brief For an EXTRACT_VECTOR_ELT with a constant index return the real
6289 /// underlying vector and index.
6291 /// Modifies \p ExtractedFromVec to the real vector and returns the real
6293 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
6295 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
6296 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
6299 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
6301 // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
6303 // (extract_vector_elt (vector_shuffle<2,u,u,u>
6304 // (extract_subvector (v8f32 %vreg0), Constant<4>),
6307 // In this case the vector is the extract_subvector expression and the index
6308 // is 2, as specified by the shuffle.
6309 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
6310 SDValue ShuffleVec = SVOp->getOperand(0);
6311 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
6312 assert(ShuffleVecVT.getVectorElementType() ==
6313 ExtractedFromVec.getSimpleValueType().getVectorElementType());
6315 int ShuffleIdx = SVOp->getMaskElt(Idx);
6316 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
6317 ExtractedFromVec = ShuffleVec;
6323 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
6324 MVT VT = Op.getSimpleValueType();
6326 // Skip if insert_vec_elt is not supported.
6327 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6328 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
6332 unsigned NumElems = Op.getNumOperands();
6336 SmallVector<unsigned, 4> InsertIndices;
6337 SmallVector<int, 8> Mask(NumElems, -1);
6339 for (unsigned i = 0; i != NumElems; ++i) {
6340 unsigned Opc = Op.getOperand(i).getOpcode();
6342 if (Opc == ISD::UNDEF)
6345 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
6346 // Quit if more than 1 elements need inserting.
6347 if (InsertIndices.size() > 1)
6350 InsertIndices.push_back(i);
6354 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
6355 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
6356 // Quit if non-constant index.
6357 if (!isa<ConstantSDNode>(ExtIdx))
6359 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
6361 // Quit if extracted from vector of different type.
6362 if (ExtractedFromVec.getValueType() != VT)
6365 if (!VecIn1.getNode())
6366 VecIn1 = ExtractedFromVec;
6367 else if (VecIn1 != ExtractedFromVec) {
6368 if (!VecIn2.getNode())
6369 VecIn2 = ExtractedFromVec;
6370 else if (VecIn2 != ExtractedFromVec)
6371 // Quit if more than 2 vectors to shuffle
6375 if (ExtractedFromVec == VecIn1)
6377 else if (ExtractedFromVec == VecIn2)
6378 Mask[i] = Idx + NumElems;
6381 if (!VecIn1.getNode())
6384 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
6385 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]);
6386 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
6387 unsigned Idx = InsertIndices[i];
6388 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
6389 DAG.getIntPtrConstant(Idx));
6395 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
6397 X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
6399 MVT VT = Op.getSimpleValueType();
6400 assert((VT.getVectorElementType() == MVT::i1) && (VT.getSizeInBits() <= 16) &&
6401 "Unexpected type in LowerBUILD_VECTORvXi1!");
6404 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6405 SDValue Cst = DAG.getTargetConstant(0, MVT::i1);
6406 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6407 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6410 if (ISD::isBuildVectorAllOnes(Op.getNode())) {
6411 SDValue Cst = DAG.getTargetConstant(1, MVT::i1);
6412 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6413 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6416 bool AllContants = true;
6417 uint64_t Immediate = 0;
6418 int NonConstIdx = -1;
6419 bool IsSplat = true;
6420 unsigned NumNonConsts = 0;
6421 unsigned NumConsts = 0;
6422 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
6423 SDValue In = Op.getOperand(idx);
6424 if (In.getOpcode() == ISD::UNDEF)
6426 if (!isa<ConstantSDNode>(In)) {
6427 AllContants = false;
6432 if (cast<ConstantSDNode>(In)->getZExtValue())
6433 Immediate |= (1ULL << idx);
6435 if (In != Op.getOperand(0))
6440 SDValue FullMask = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1,
6441 DAG.getConstant(Immediate, MVT::i16));
6442 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, FullMask,
6443 DAG.getIntPtrConstant(0));
6446 if (NumNonConsts == 1 && NonConstIdx != 0) {
6449 SDValue VecAsImm = DAG.getConstant(Immediate,
6450 MVT::getIntegerVT(VT.getSizeInBits()));
6451 DstVec = DAG.getNode(ISD::BITCAST, dl, VT, VecAsImm);
6454 DstVec = DAG.getUNDEF(VT);
6455 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
6456 Op.getOperand(NonConstIdx),
6457 DAG.getIntPtrConstant(NonConstIdx));
6459 if (!IsSplat && (NonConstIdx != 0))
6460 llvm_unreachable("Unsupported BUILD_VECTOR operation");
6461 MVT SelectVT = (VT == MVT::v16i1)? MVT::i16 : MVT::i8;
6464 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6465 DAG.getConstant(-1, SelectVT),
6466 DAG.getConstant(0, SelectVT));
6468 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6469 DAG.getConstant((Immediate | 1), SelectVT),
6470 DAG.getConstant(Immediate, SelectVT));
6471 return DAG.getNode(ISD::BITCAST, dl, VT, Select);
6474 /// \brief Return true if \p N implements a horizontal binop and return the
6475 /// operands for the horizontal binop into V0 and V1.
6477 /// This is a helper function of PerformBUILD_VECTORCombine.
6478 /// This function checks that the build_vector \p N in input implements a
6479 /// horizontal operation. Parameter \p Opcode defines the kind of horizontal
6480 /// operation to match.
6481 /// For example, if \p Opcode is equal to ISD::ADD, then this function
6482 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
6483 /// is equal to ISD::SUB, then this function checks if this is a horizontal
6486 /// This function only analyzes elements of \p N whose indices are
6487 /// in range [BaseIdx, LastIdx).
6488 static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
6490 unsigned BaseIdx, unsigned LastIdx,
6491 SDValue &V0, SDValue &V1) {
6492 EVT VT = N->getValueType(0);
6494 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
6495 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
6496 "Invalid Vector in input!");
6498 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
6499 bool CanFold = true;
6500 unsigned ExpectedVExtractIdx = BaseIdx;
6501 unsigned NumElts = LastIdx - BaseIdx;
6502 V0 = DAG.getUNDEF(VT);
6503 V1 = DAG.getUNDEF(VT);
6505 // Check if N implements a horizontal binop.
6506 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
6507 SDValue Op = N->getOperand(i + BaseIdx);
6510 if (Op->getOpcode() == ISD::UNDEF) {
6511 // Update the expected vector extract index.
6512 if (i * 2 == NumElts)
6513 ExpectedVExtractIdx = BaseIdx;
6514 ExpectedVExtractIdx += 2;
6518 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
6523 SDValue Op0 = Op.getOperand(0);
6524 SDValue Op1 = Op.getOperand(1);
6526 // Try to match the following pattern:
6527 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
6528 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6529 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6530 Op0.getOperand(0) == Op1.getOperand(0) &&
6531 isa<ConstantSDNode>(Op0.getOperand(1)) &&
6532 isa<ConstantSDNode>(Op1.getOperand(1)));
6536 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6537 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
6539 if (i * 2 < NumElts) {
6540 if (V0.getOpcode() == ISD::UNDEF)
6541 V0 = Op0.getOperand(0);
6543 if (V1.getOpcode() == ISD::UNDEF)
6544 V1 = Op0.getOperand(0);
6545 if (i * 2 == NumElts)
6546 ExpectedVExtractIdx = BaseIdx;
6549 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
6550 if (I0 == ExpectedVExtractIdx)
6551 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
6552 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
6553 // Try to match the following dag sequence:
6554 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
6555 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
6559 ExpectedVExtractIdx += 2;
6565 /// \brief Emit a sequence of two 128-bit horizontal add/sub followed by
6566 /// a concat_vector.
6568 /// This is a helper function of PerformBUILD_VECTORCombine.
6569 /// This function expects two 256-bit vectors called V0 and V1.
6570 /// At first, each vector is split into two separate 128-bit vectors.
6571 /// Then, the resulting 128-bit vectors are used to implement two
6572 /// horizontal binary operations.
6574 /// The kind of horizontal binary operation is defined by \p X86Opcode.
6576 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
6577 /// the two new horizontal binop.
6578 /// When Mode is set, the first horizontal binop dag node would take as input
6579 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
6580 /// horizontal binop dag node would take as input the lower 128-bit of V1
6581 /// and the upper 128-bit of V1.
6583 /// HADD V0_LO, V0_HI
6584 /// HADD V1_LO, V1_HI
6586 /// Otherwise, the first horizontal binop dag node takes as input the lower
6587 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
6588 /// dag node takes the the upper 128-bit of V0 and the upper 128-bit of V1.
6590 /// HADD V0_LO, V1_LO
6591 /// HADD V0_HI, V1_HI
6593 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
6594 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
6595 /// the upper 128-bits of the result.
6596 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
6597 SDLoc DL, SelectionDAG &DAG,
6598 unsigned X86Opcode, bool Mode,
6599 bool isUndefLO, bool isUndefHI) {
6600 EVT VT = V0.getValueType();
6601 assert(VT.is256BitVector() && VT == V1.getValueType() &&
6602 "Invalid nodes in input!");
6604 unsigned NumElts = VT.getVectorNumElements();
6605 SDValue V0_LO = Extract128BitVector(V0, 0, DAG, DL);
6606 SDValue V0_HI = Extract128BitVector(V0, NumElts/2, DAG, DL);
6607 SDValue V1_LO = Extract128BitVector(V1, 0, DAG, DL);
6608 SDValue V1_HI = Extract128BitVector(V1, NumElts/2, DAG, DL);
6609 EVT NewVT = V0_LO.getValueType();
6611 SDValue LO = DAG.getUNDEF(NewVT);
6612 SDValue HI = DAG.getUNDEF(NewVT);
6615 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6616 if (!isUndefLO && V0->getOpcode() != ISD::UNDEF)
6617 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
6618 if (!isUndefHI && V1->getOpcode() != ISD::UNDEF)
6619 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
6621 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6622 if (!isUndefLO && (V0_LO->getOpcode() != ISD::UNDEF ||
6623 V1_LO->getOpcode() != ISD::UNDEF))
6624 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
6626 if (!isUndefHI && (V0_HI->getOpcode() != ISD::UNDEF ||
6627 V1_HI->getOpcode() != ISD::UNDEF))
6628 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
6631 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
6634 /// \brief Try to fold a build_vector that performs an 'addsub' into the
6635 /// sequence of 'vadd + vsub + blendi'.
6636 static SDValue matchAddSub(const BuildVectorSDNode *BV, SelectionDAG &DAG,
6637 const X86Subtarget *Subtarget) {
6639 EVT VT = BV->getValueType(0);
6640 unsigned NumElts = VT.getVectorNumElements();
6641 SDValue InVec0 = DAG.getUNDEF(VT);
6642 SDValue InVec1 = DAG.getUNDEF(VT);
6644 assert((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
6645 VT == MVT::v2f64) && "build_vector with an invalid type found!");
6647 // Odd-numbered elements in the input build vector are obtained from
6648 // adding two integer/float elements.
6649 // Even-numbered elements in the input build vector are obtained from
6650 // subtracting two integer/float elements.
6651 unsigned ExpectedOpcode = ISD::FSUB;
6652 unsigned NextExpectedOpcode = ISD::FADD;
6653 bool AddFound = false;
6654 bool SubFound = false;
6656 for (unsigned i = 0, e = NumElts; i != e; ++i) {
6657 SDValue Op = BV->getOperand(i);
6659 // Skip 'undef' values.
6660 unsigned Opcode = Op.getOpcode();
6661 if (Opcode == ISD::UNDEF) {
6662 std::swap(ExpectedOpcode, NextExpectedOpcode);
6666 // Early exit if we found an unexpected opcode.
6667 if (Opcode != ExpectedOpcode)
6670 SDValue Op0 = Op.getOperand(0);
6671 SDValue Op1 = Op.getOperand(1);
6673 // Try to match the following pattern:
6674 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
6675 // Early exit if we cannot match that sequence.
6676 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6677 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6678 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
6679 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
6680 Op0.getOperand(1) != Op1.getOperand(1))
6683 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6687 // We found a valid add/sub node. Update the information accordingly.
6693 // Update InVec0 and InVec1.
6694 if (InVec0.getOpcode() == ISD::UNDEF)
6695 InVec0 = Op0.getOperand(0);
6696 if (InVec1.getOpcode() == ISD::UNDEF)
6697 InVec1 = Op1.getOperand(0);
6699 // Make sure that operands in input to each add/sub node always
6700 // come from a same pair of vectors.
6701 if (InVec0 != Op0.getOperand(0)) {
6702 if (ExpectedOpcode == ISD::FSUB)
6705 // FADD is commutable. Try to commute the operands
6706 // and then test again.
6707 std::swap(Op0, Op1);
6708 if (InVec0 != Op0.getOperand(0))
6712 if (InVec1 != Op1.getOperand(0))
6715 // Update the pair of expected opcodes.
6716 std::swap(ExpectedOpcode, NextExpectedOpcode);
6719 // Don't try to fold this build_vector into an ADDSUB if the inputs are undef.
6720 if (AddFound && SubFound && InVec0.getOpcode() != ISD::UNDEF &&
6721 InVec1.getOpcode() != ISD::UNDEF)
6722 return DAG.getNode(X86ISD::ADDSUB, DL, VT, InVec0, InVec1);
6727 static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
6728 const X86Subtarget *Subtarget) {
6730 EVT VT = N->getValueType(0);
6731 unsigned NumElts = VT.getVectorNumElements();
6732 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
6733 SDValue InVec0, InVec1;
6735 // Try to match an ADDSUB.
6736 if ((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
6737 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
6738 SDValue Value = matchAddSub(BV, DAG, Subtarget);
6739 if (Value.getNode())
6743 // Try to match horizontal ADD/SUB.
6744 unsigned NumUndefsLO = 0;
6745 unsigned NumUndefsHI = 0;
6746 unsigned Half = NumElts/2;
6748 // Count the number of UNDEF operands in the build_vector in input.
6749 for (unsigned i = 0, e = Half; i != e; ++i)
6750 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6753 for (unsigned i = Half, e = NumElts; i != e; ++i)
6754 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6757 // Early exit if this is either a build_vector of all UNDEFs or all the
6758 // operands but one are UNDEF.
6759 if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
6762 if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget->hasSSE3()) {
6763 // Try to match an SSE3 float HADD/HSUB.
6764 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6765 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6767 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6768 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6769 } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) {
6770 // Try to match an SSSE3 integer HADD/HSUB.
6771 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6772 return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
6774 if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6775 return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
6778 if (!Subtarget->hasAVX())
6781 if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
6782 // Try to match an AVX horizontal add/sub of packed single/double
6783 // precision floating point values from 256-bit vectors.
6784 SDValue InVec2, InVec3;
6785 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
6786 isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
6787 ((InVec0.getOpcode() == ISD::UNDEF ||
6788 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6789 ((InVec1.getOpcode() == ISD::UNDEF ||
6790 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6791 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6793 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
6794 isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
6795 ((InVec0.getOpcode() == ISD::UNDEF ||
6796 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6797 ((InVec1.getOpcode() == ISD::UNDEF ||
6798 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6799 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6800 } else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
6801 // Try to match an AVX2 horizontal add/sub of signed integers.
6802 SDValue InVec2, InVec3;
6804 bool CanFold = true;
6806 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
6807 isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
6808 ((InVec0.getOpcode() == ISD::UNDEF ||
6809 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6810 ((InVec1.getOpcode() == ISD::UNDEF ||
6811 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6812 X86Opcode = X86ISD::HADD;
6813 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
6814 isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
6815 ((InVec0.getOpcode() == ISD::UNDEF ||
6816 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6817 ((InVec1.getOpcode() == ISD::UNDEF ||
6818 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6819 X86Opcode = X86ISD::HSUB;
6824 // Fold this build_vector into a single horizontal add/sub.
6825 // Do this only if the target has AVX2.
6826 if (Subtarget->hasAVX2())
6827 return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
6829 // Do not try to expand this build_vector into a pair of horizontal
6830 // add/sub if we can emit a pair of scalar add/sub.
6831 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6834 // Convert this build_vector into a pair of horizontal binop followed by
6836 bool isUndefLO = NumUndefsLO == Half;
6837 bool isUndefHI = NumUndefsHI == Half;
6838 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
6839 isUndefLO, isUndefHI);
6843 if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
6844 VT == MVT::v16i16) && Subtarget->hasAVX()) {
6846 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6847 X86Opcode = X86ISD::HADD;
6848 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6849 X86Opcode = X86ISD::HSUB;
6850 else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6851 X86Opcode = X86ISD::FHADD;
6852 else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6853 X86Opcode = X86ISD::FHSUB;
6857 // Don't try to expand this build_vector into a pair of horizontal add/sub
6858 // if we can simply emit a pair of scalar add/sub.
6859 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6862 // Convert this build_vector into two horizontal add/sub followed by
6864 bool isUndefLO = NumUndefsLO == Half;
6865 bool isUndefHI = NumUndefsHI == Half;
6866 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
6867 isUndefLO, isUndefHI);
6874 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
6877 MVT VT = Op.getSimpleValueType();
6878 MVT ExtVT = VT.getVectorElementType();
6879 unsigned NumElems = Op.getNumOperands();
6881 // Generate vectors for predicate vectors.
6882 if (VT.getScalarType() == MVT::i1 && Subtarget->hasAVX512())
6883 return LowerBUILD_VECTORvXi1(Op, DAG);
6885 // Vectors containing all zeros can be matched by pxor and xorps later
6886 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6887 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
6888 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
6889 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
6892 return getZeroVector(VT, Subtarget, DAG, dl);
6895 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
6896 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
6897 // vpcmpeqd on 256-bit vectors.
6898 if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
6899 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256()))
6902 if (!VT.is512BitVector())
6903 return getOnesVector(VT, Subtarget->hasInt256(), DAG, dl);
6906 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
6907 if (Broadcast.getNode())
6910 unsigned EVTBits = ExtVT.getSizeInBits();
6912 unsigned NumZero = 0;
6913 unsigned NumNonZero = 0;
6914 unsigned NonZeros = 0;
6915 bool IsAllConstants = true;
6916 SmallSet<SDValue, 8> Values;
6917 for (unsigned i = 0; i < NumElems; ++i) {
6918 SDValue Elt = Op.getOperand(i);
6919 if (Elt.getOpcode() == ISD::UNDEF)
6922 if (Elt.getOpcode() != ISD::Constant &&
6923 Elt.getOpcode() != ISD::ConstantFP)
6924 IsAllConstants = false;
6925 if (X86::isZeroNode(Elt))
6928 NonZeros |= (1 << i);
6933 // All undef vector. Return an UNDEF. All zero vectors were handled above.
6934 if (NumNonZero == 0)
6935 return DAG.getUNDEF(VT);
6937 // Special case for single non-zero, non-undef, element.
6938 if (NumNonZero == 1) {
6939 unsigned Idx = countTrailingZeros(NonZeros);
6940 SDValue Item = Op.getOperand(Idx);
6942 // If this is an insertion of an i64 value on x86-32, and if the top bits of
6943 // the value are obviously zero, truncate the value to i32 and do the
6944 // insertion that way. Only do this if the value is non-constant or if the
6945 // value is a constant being inserted into element 0. It is cheaper to do
6946 // a constant pool load than it is to do a movd + shuffle.
6947 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
6948 (!IsAllConstants || Idx == 0)) {
6949 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
6951 assert(VT == MVT::v2i64 && "Expected an SSE value type!");
6952 EVT VecVT = MVT::v4i32;
6953 unsigned VecElts = 4;
6955 // Truncate the value (which may itself be a constant) to i32, and
6956 // convert it to a vector with movd (S2V+shuffle to zero extend).
6957 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
6958 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
6960 // If using the new shuffle lowering, just directly insert this.
6961 if (ExperimentalVectorShuffleLowering)
6963 ISD::BITCAST, dl, VT,
6964 getShuffleVectorZeroOrUndef(Item, Idx * 2, true, Subtarget, DAG));
6966 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6968 // Now we have our 32-bit value zero extended in the low element of
6969 // a vector. If Idx != 0, swizzle it into place.
6971 SmallVector<int, 4> Mask;
6972 Mask.push_back(Idx);
6973 for (unsigned i = 1; i != VecElts; ++i)
6975 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT),
6978 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
6982 // If we have a constant or non-constant insertion into the low element of
6983 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
6984 // the rest of the elements. This will be matched as movd/movq/movss/movsd
6985 // depending on what the source datatype is.
6988 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6990 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
6991 (ExtVT == MVT::i64 && Subtarget->is64Bit())) {
6992 if (VT.is256BitVector() || VT.is512BitVector()) {
6993 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
6994 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
6995 Item, DAG.getIntPtrConstant(0));
6997 assert(VT.is128BitVector() && "Expected an SSE value type!");
6998 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6999 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
7000 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7003 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
7004 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
7005 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
7006 if (VT.is256BitVector()) {
7007 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
7008 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
7010 assert(VT.is128BitVector() && "Expected an SSE value type!");
7011 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7013 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
7017 // Is it a vector logical left shift?
7018 if (NumElems == 2 && Idx == 1 &&
7019 X86::isZeroNode(Op.getOperand(0)) &&
7020 !X86::isZeroNode(Op.getOperand(1))) {
7021 unsigned NumBits = VT.getSizeInBits();
7022 return getVShift(true, VT,
7023 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
7024 VT, Op.getOperand(1)),
7025 NumBits/2, DAG, *this, dl);
7028 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
7031 // Otherwise, if this is a vector with i32 or f32 elements, and the element
7032 // is a non-constant being inserted into an element other than the low one,
7033 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
7034 // movd/movss) to move this into the low element, then shuffle it into
7036 if (EVTBits == 32) {
7037 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7039 // If using the new shuffle lowering, just directly insert this.
7040 if (ExperimentalVectorShuffleLowering)
7041 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
7043 // Turn it into a shuffle of zero and zero-extended scalar to vector.
7044 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG);
7045 SmallVector<int, 8> MaskVec;
7046 for (unsigned i = 0; i != NumElems; ++i)
7047 MaskVec.push_back(i == Idx ? 0 : 1);
7048 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]);
7052 // Splat is obviously ok. Let legalizer expand it to a shuffle.
7053 if (Values.size() == 1) {
7054 if (EVTBits == 32) {
7055 // Instead of a shuffle like this:
7056 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
7057 // Check if it's possible to issue this instead.
7058 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
7059 unsigned Idx = countTrailingZeros(NonZeros);
7060 SDValue Item = Op.getOperand(Idx);
7061 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
7062 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
7067 // A vector full of immediates; various special cases are already
7068 // handled, so this is best done with a single constant-pool load.
7072 // For AVX-length vectors, see if we can use a vector load to get all of the
7073 // elements, otherwise build the individual 128-bit pieces and use
7074 // shuffles to put them in place.
7075 if (VT.is256BitVector() || VT.is512BitVector()) {
7076 SmallVector<SDValue, 64> V(Op->op_begin(), Op->op_begin() + NumElems);
7078 // Check for a build vector of consecutive loads.
7079 if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
7082 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
7084 // Build both the lower and upper subvector.
7085 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7086 makeArrayRef(&V[0], NumElems/2));
7087 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7088 makeArrayRef(&V[NumElems / 2], NumElems/2));
7090 // Recreate the wider vector with the lower and upper part.
7091 if (VT.is256BitVector())
7092 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7093 return Concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7096 // Let legalizer expand 2-wide build_vectors.
7097 if (EVTBits == 64) {
7098 if (NumNonZero == 1) {
7099 // One half is zero or undef.
7100 unsigned Idx = countTrailingZeros(NonZeros);
7101 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
7102 Op.getOperand(Idx));
7103 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
7108 // If element VT is < 32 bits, convert it to inserts into a zero vector.
7109 if (EVTBits == 8 && NumElems == 16) {
7110 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
7112 if (V.getNode()) return V;
7115 if (EVTBits == 16 && NumElems == 8) {
7116 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
7118 if (V.getNode()) return V;
7121 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
7122 if (EVTBits == 32 && NumElems == 4) {
7123 SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget, *this);
7128 // If element VT is == 32 bits, turn it into a number of shuffles.
7129 SmallVector<SDValue, 8> V(NumElems);
7130 if (NumElems == 4 && NumZero > 0) {
7131 for (unsigned i = 0; i < 4; ++i) {
7132 bool isZero = !(NonZeros & (1 << i));
7134 V[i] = getZeroVector(VT, Subtarget, DAG, dl);
7136 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7139 for (unsigned i = 0; i < 2; ++i) {
7140 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
7143 V[i] = V[i*2]; // Must be a zero vector.
7146 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]);
7149 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]);
7152 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]);
7157 bool Reverse1 = (NonZeros & 0x3) == 2;
7158 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
7162 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
7163 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
7165 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
7168 if (Values.size() > 1 && VT.is128BitVector()) {
7169 // Check for a build vector of consecutive loads.
7170 for (unsigned i = 0; i < NumElems; ++i)
7171 V[i] = Op.getOperand(i);
7173 // Check for elements which are consecutive loads.
7174 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false);
7178 // Check for a build vector from mostly shuffle plus few inserting.
7179 SDValue Sh = buildFromShuffleMostly(Op, DAG);
7183 // For SSE 4.1, use insertps to put the high elements into the low element.
7184 if (Subtarget->hasSSE41()) {
7186 if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
7187 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
7189 Result = DAG.getUNDEF(VT);
7191 for (unsigned i = 1; i < NumElems; ++i) {
7192 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
7193 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
7194 Op.getOperand(i), DAG.getIntPtrConstant(i));
7199 // Otherwise, expand into a number of unpckl*, start by extending each of
7200 // our (non-undef) elements to the full vector width with the element in the
7201 // bottom slot of the vector (which generates no code for SSE).
7202 for (unsigned i = 0; i < NumElems; ++i) {
7203 if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
7204 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7206 V[i] = DAG.getUNDEF(VT);
7209 // Next, we iteratively mix elements, e.g. for v4f32:
7210 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
7211 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
7212 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
7213 unsigned EltStride = NumElems >> 1;
7214 while (EltStride != 0) {
7215 for (unsigned i = 0; i < EltStride; ++i) {
7216 // If V[i+EltStride] is undef and this is the first round of mixing,
7217 // then it is safe to just drop this shuffle: V[i] is already in the
7218 // right place, the one element (since it's the first round) being
7219 // inserted as undef can be dropped. This isn't safe for successive
7220 // rounds because they will permute elements within both vectors.
7221 if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
7222 EltStride == NumElems/2)
7225 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
7234 // LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction
7235 // to create 256-bit vectors from two other 128-bit ones.
7236 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7238 MVT ResVT = Op.getSimpleValueType();
7240 assert((ResVT.is256BitVector() ||
7241 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
7243 SDValue V1 = Op.getOperand(0);
7244 SDValue V2 = Op.getOperand(1);
7245 unsigned NumElems = ResVT.getVectorNumElements();
7246 if(ResVT.is256BitVector())
7247 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7249 if (Op.getNumOperands() == 4) {
7250 MVT HalfVT = MVT::getVectorVT(ResVT.getScalarType(),
7251 ResVT.getVectorNumElements()/2);
7252 SDValue V3 = Op.getOperand(2);
7253 SDValue V4 = Op.getOperand(3);
7254 return Concat256BitVectors(Concat128BitVectors(V1, V2, HalfVT, NumElems/2, DAG, dl),
7255 Concat128BitVectors(V3, V4, HalfVT, NumElems/2, DAG, dl), ResVT, NumElems, DAG, dl);
7257 return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7260 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7261 MVT LLVM_ATTRIBUTE_UNUSED VT = Op.getSimpleValueType();
7262 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
7263 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
7264 Op.getNumOperands() == 4)));
7266 // AVX can use the vinsertf128 instruction to create 256-bit vectors
7267 // from two other 128-bit ones.
7269 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
7270 return LowerAVXCONCAT_VECTORS(Op, DAG);
7274 //===----------------------------------------------------------------------===//
7275 // Vector shuffle lowering
7277 // This is an experimental code path for lowering vector shuffles on x86. It is
7278 // designed to handle arbitrary vector shuffles and blends, gracefully
7279 // degrading performance as necessary. It works hard to recognize idiomatic
7280 // shuffles and lower them to optimal instruction patterns without leaving
7281 // a framework that allows reasonably efficient handling of all vector shuffle
7283 //===----------------------------------------------------------------------===//
7285 /// \brief Tiny helper function to identify a no-op mask.
7287 /// This is a somewhat boring predicate function. It checks whether the mask
7288 /// array input, which is assumed to be a single-input shuffle mask of the kind
7289 /// used by the X86 shuffle instructions (not a fully general
7290 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
7291 /// in-place shuffle are 'no-op's.
7292 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
7293 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7294 if (Mask[i] != -1 && Mask[i] != i)
7299 /// \brief Helper function to classify a mask as a single-input mask.
7301 /// This isn't a generic single-input test because in the vector shuffle
7302 /// lowering we canonicalize single inputs to be the first input operand. This
7303 /// means we can more quickly test for a single input by only checking whether
7304 /// an input from the second operand exists. We also assume that the size of
7305 /// mask corresponds to the size of the input vectors which isn't true in the
7306 /// fully general case.
7307 static bool isSingleInputShuffleMask(ArrayRef<int> Mask) {
7309 if (M >= (int)Mask.size())
7314 /// \brief Test whether there are elements crossing 128-bit lanes in this
7317 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
7318 /// and we routinely test for these.
7319 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
7320 int LaneSize = 128 / VT.getScalarSizeInBits();
7321 int Size = Mask.size();
7322 for (int i = 0; i < Size; ++i)
7323 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
7328 /// \brief Test whether a shuffle mask is equivalent within each 128-bit lane.
7330 /// This checks a shuffle mask to see if it is performing the same
7331 /// 128-bit lane-relative shuffle in each 128-bit lane. This trivially implies
7332 /// that it is also not lane-crossing. It may however involve a blend from the
7333 /// same lane of a second vector.
7335 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
7336 /// non-trivial to compute in the face of undef lanes. The representation is
7337 /// *not* suitable for use with existing 128-bit shuffles as it will contain
7338 /// entries from both V1 and V2 inputs to the wider mask.
7340 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
7341 SmallVectorImpl<int> &RepeatedMask) {
7342 int LaneSize = 128 / VT.getScalarSizeInBits();
7343 RepeatedMask.resize(LaneSize, -1);
7344 int Size = Mask.size();
7345 for (int i = 0; i < Size; ++i) {
7348 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
7349 // This entry crosses lanes, so there is no way to model this shuffle.
7352 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
7353 if (RepeatedMask[i % LaneSize] == -1)
7354 // This is the first non-undef entry in this slot of a 128-bit lane.
7355 RepeatedMask[i % LaneSize] =
7356 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + Size;
7357 else if (RepeatedMask[i % LaneSize] + (i / LaneSize) * LaneSize != Mask[i])
7358 // Found a mismatch with the repeated mask.
7364 /// \brief Base case helper for testing a single mask element.
7365 static bool isShuffleEquivalentImpl(SDValue V1, SDValue V2,
7366 BuildVectorSDNode *BV1,
7367 BuildVectorSDNode *BV2, ArrayRef<int> Mask,
7369 int Size = Mask.size();
7370 if (Mask[i] != -1 && Mask[i] != Arg) {
7371 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
7372 auto *ArgsBV = Arg < Size ? BV1 : BV2;
7373 if (!MaskBV || !ArgsBV ||
7374 MaskBV->getOperand(Mask[i] % Size) != ArgsBV->getOperand(Arg % Size))
7380 /// \brief Recursive helper to peel off and test each mask element.
7381 template <typename... Ts>
7382 static bool isShuffleEquivalentImpl(SDValue V1, SDValue V2,
7383 BuildVectorSDNode *BV1,
7384 BuildVectorSDNode *BV2, ArrayRef<int> Mask,
7385 int i, int Arg, Ts... Args) {
7386 if (!isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, i, Arg))
7389 return isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, i + 1, Args...);
7392 /// \brief Checks whether a shuffle mask is equivalent to an explicit list of
7395 /// This is a fast way to test a shuffle mask against a fixed pattern:
7397 /// if (isShuffleEquivalent(Mask, 3, 2, 1, 0)) { ... }
7399 /// It returns true if the mask is exactly as wide as the argument list, and
7400 /// each element of the mask is either -1 (signifying undef) or the value given
7401 /// in the argument.
7402 template <typename... Ts>
7403 static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
7405 if (Mask.size() != sizeof...(Args))
7408 // If the values are build vectors, we can look through them to find
7409 // equivalent inputs that make the shuffles equivalent.
7410 auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
7411 auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
7413 // Recursively peel off arguments and test them against the mask.
7414 return isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, 0, Args...);
7417 /// \brief Get a 4-lane 8-bit shuffle immediate for a mask.
7419 /// This helper function produces an 8-bit shuffle immediate corresponding to
7420 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
7421 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
7424 /// NB: We rely heavily on "undef" masks preserving the input lane.
7425 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,
7426 SelectionDAG &DAG) {
7427 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
7428 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
7429 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
7430 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
7431 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
7434 Imm |= (Mask[0] == -1 ? 0 : Mask[0]) << 0;
7435 Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2;
7436 Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4;
7437 Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6;
7438 return DAG.getConstant(Imm, MVT::i8);
7441 /// \brief Try to emit a blend instruction for a shuffle using bit math.
7443 /// This is used as a fallback approach when first class blend instructions are
7444 /// unavailable. Currently it is only suitable for integer vectors, but could
7445 /// be generalized for floating point vectors if desirable.
7446 static SDValue lowerVectorShuffleAsBitBlend(SDLoc DL, MVT VT, SDValue V1,
7447 SDValue V2, ArrayRef<int> Mask,
7448 SelectionDAG &DAG) {
7449 assert(VT.isInteger() && "Only supports integer vector types!");
7450 MVT EltVT = VT.getScalarType();
7451 int NumEltBits = EltVT.getSizeInBits();
7452 SDValue Zero = DAG.getConstant(0, EltVT);
7453 SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), EltVT);
7454 SmallVector<SDValue, 16> MaskOps;
7455 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7456 if (Mask[i] != -1 && Mask[i] != i && Mask[i] != i + Size)
7457 return SDValue(); // Shuffled input!
7458 MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
7461 SDValue V1Mask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, MaskOps);
7462 V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
7463 // We have to cast V2 around.
7464 MVT MaskVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
7465 V2 = DAG.getNode(ISD::BITCAST, DL, VT,
7466 DAG.getNode(X86ISD::ANDNP, DL, MaskVT,
7467 DAG.getNode(ISD::BITCAST, DL, MaskVT, V1Mask),
7468 DAG.getNode(ISD::BITCAST, DL, MaskVT, V2)));
7469 return DAG.getNode(ISD::OR, DL, VT, V1, V2);
7472 /// \brief Try to emit a blend instruction for a shuffle.
7474 /// This doesn't do any checks for the availability of instructions for blending
7475 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
7476 /// be matched in the backend with the type given. What it does check for is
7477 /// that the shuffle mask is in fact a blend.
7478 static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
7479 SDValue V2, ArrayRef<int> Mask,
7480 const X86Subtarget *Subtarget,
7481 SelectionDAG &DAG) {
7482 unsigned BlendMask = 0;
7483 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7484 if (Mask[i] >= Size) {
7485 if (Mask[i] != i + Size)
7486 return SDValue(); // Shuffled V2 input!
7487 BlendMask |= 1u << i;
7490 if (Mask[i] >= 0 && Mask[i] != i)
7491 return SDValue(); // Shuffled V1 input!
7493 switch (VT.SimpleTy) {
7498 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
7499 DAG.getConstant(BlendMask, MVT::i8));
7503 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7507 // If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
7508 // that instruction.
7509 if (Subtarget->hasAVX2()) {
7510 // Scale the blend by the number of 32-bit dwords per element.
7511 int Scale = VT.getScalarSizeInBits() / 32;
7513 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7514 if (Mask[i] >= Size)
7515 for (int j = 0; j < Scale; ++j)
7516 BlendMask |= 1u << (i * Scale + j);
7518 MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32;
7519 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7520 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7521 return DAG.getNode(ISD::BITCAST, DL, VT,
7522 DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
7523 DAG.getConstant(BlendMask, MVT::i8)));
7527 // For integer shuffles we need to expand the mask and cast the inputs to
7528 // v8i16s prior to blending.
7529 int Scale = 8 / VT.getVectorNumElements();
7531 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7532 if (Mask[i] >= Size)
7533 for (int j = 0; j < Scale; ++j)
7534 BlendMask |= 1u << (i * Scale + j);
7536 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
7537 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
7538 return DAG.getNode(ISD::BITCAST, DL, VT,
7539 DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
7540 DAG.getConstant(BlendMask, MVT::i8)));
7544 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7545 SmallVector<int, 8> RepeatedMask;
7546 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
7547 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
7548 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
7550 for (int i = 0; i < 8; ++i)
7551 if (RepeatedMask[i] >= 16)
7552 BlendMask |= 1u << i;
7553 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
7554 DAG.getConstant(BlendMask, MVT::i8));
7560 // Scale the blend by the number of bytes per element.
7561 int Scale = VT.getScalarSizeInBits() / 8;
7563 // This form of blend is always done on bytes. Compute the byte vector
7565 MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
7567 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
7568 // mix of LLVM's code generator and the x86 backend. We tell the code
7569 // generator that boolean values in the elements of an x86 vector register
7570 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
7571 // mapping a select to operand #1, and 'false' mapping to operand #2. The
7572 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
7573 // of the element (the remaining are ignored) and 0 in that high bit would
7574 // mean operand #1 while 1 in the high bit would mean operand #2. So while
7575 // the LLVM model for boolean values in vector elements gets the relevant
7576 // bit set, it is set backwards and over constrained relative to x86's
7578 SmallVector<SDValue, 32> VSELECTMask;
7579 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7580 for (int j = 0; j < Scale; ++j)
7581 VSELECTMask.push_back(
7582 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
7583 : DAG.getConstant(Mask[i] < Size ? -1 : 0, MVT::i8));
7585 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7586 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7588 ISD::BITCAST, DL, VT,
7589 DAG.getNode(ISD::VSELECT, DL, BlendVT,
7590 DAG.getNode(ISD::BUILD_VECTOR, DL, BlendVT, VSELECTMask),
7595 llvm_unreachable("Not a supported integer vector type!");
7599 /// \brief Try to lower as a blend of elements from two inputs followed by
7600 /// a single-input permutation.
7602 /// This matches the pattern where we can blend elements from two inputs and
7603 /// then reduce the shuffle to a single-input permutation.
7604 static SDValue lowerVectorShuffleAsBlendAndPermute(SDLoc DL, MVT VT, SDValue V1,
7607 SelectionDAG &DAG) {
7608 // We build up the blend mask while checking whether a blend is a viable way
7609 // to reduce the shuffle.
7610 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7611 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
7613 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7617 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
7619 if (BlendMask[Mask[i] % Size] == -1)
7620 BlendMask[Mask[i] % Size] = Mask[i];
7621 else if (BlendMask[Mask[i] % Size] != Mask[i])
7622 return SDValue(); // Can't blend in the needed input!
7624 PermuteMask[i] = Mask[i] % Size;
7627 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7628 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
7631 /// \brief Generic routine to decompose a shuffle and blend into indepndent
7632 /// blends and permutes.
7634 /// This matches the extremely common pattern for handling combined
7635 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
7636 /// operations. It will try to pick the best arrangement of shuffles and
7638 static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
7642 SelectionDAG &DAG) {
7643 // Shuffle the input elements into the desired positions in V1 and V2 and
7644 // blend them together.
7645 SmallVector<int, 32> V1Mask(Mask.size(), -1);
7646 SmallVector<int, 32> V2Mask(Mask.size(), -1);
7647 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7648 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7649 if (Mask[i] >= 0 && Mask[i] < Size) {
7650 V1Mask[i] = Mask[i];
7652 } else if (Mask[i] >= Size) {
7653 V2Mask[i] = Mask[i] - Size;
7654 BlendMask[i] = i + Size;
7657 // Try to lower with the simpler initial blend strategy unless one of the
7658 // input shuffles would be a no-op. We prefer to shuffle inputs as the
7659 // shuffle may be able to fold with a load or other benefit. However, when
7660 // we'll have to do 2x as many shuffles in order to achieve this, blending
7661 // first is a better strategy.
7662 if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask))
7663 if (SDValue BlendPerm =
7664 lowerVectorShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask, DAG))
7667 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
7668 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
7669 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7672 /// \brief Try to lower a vector shuffle as a byte rotation.
7674 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
7675 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
7676 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
7677 /// try to generically lower a vector shuffle through such an pattern. It
7678 /// does not check for the profitability of lowering either as PALIGNR or
7679 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
7680 /// This matches shuffle vectors that look like:
7682 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
7684 /// Essentially it concatenates V1 and V2, shifts right by some number of
7685 /// elements, and takes the low elements as the result. Note that while this is
7686 /// specified as a *right shift* because x86 is little-endian, it is a *left
7687 /// rotate* of the vector lanes.
7688 static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
7691 const X86Subtarget *Subtarget,
7692 SelectionDAG &DAG) {
7693 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7695 int NumElts = Mask.size();
7696 int NumLanes = VT.getSizeInBits() / 128;
7697 int NumLaneElts = NumElts / NumLanes;
7699 // We need to detect various ways of spelling a rotation:
7700 // [11, 12, 13, 14, 15, 0, 1, 2]
7701 // [-1, 12, 13, 14, -1, -1, 1, -1]
7702 // [-1, -1, -1, -1, -1, -1, 1, 2]
7703 // [ 3, 4, 5, 6, 7, 8, 9, 10]
7704 // [-1, 4, 5, 6, -1, -1, 9, -1]
7705 // [-1, 4, 5, 6, -1, -1, -1, -1]
7708 for (int l = 0; l < NumElts; l += NumLaneElts) {
7709 for (int i = 0; i < NumLaneElts; ++i) {
7710 if (Mask[l + i] == -1)
7712 assert(Mask[l + i] >= 0 && "Only -1 is a valid negative mask element!");
7714 // Get the mod-Size index and lane correct it.
7715 int LaneIdx = (Mask[l + i] % NumElts) - l;
7716 // Make sure it was in this lane.
7717 if (LaneIdx < 0 || LaneIdx >= NumLaneElts)
7720 // Determine where a rotated vector would have started.
7721 int StartIdx = i - LaneIdx;
7723 // The identity rotation isn't interesting, stop.
7726 // If we found the tail of a vector the rotation must be the missing
7727 // front. If we found the head of a vector, it must be how much of the
7729 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumLaneElts - StartIdx;
7732 Rotation = CandidateRotation;
7733 else if (Rotation != CandidateRotation)
7734 // The rotations don't match, so we can't match this mask.
7737 // Compute which value this mask is pointing at.
7738 SDValue MaskV = Mask[l + i] < NumElts ? V1 : V2;
7740 // Compute which of the two target values this index should be assigned
7741 // to. This reflects whether the high elements are remaining or the low
7742 // elements are remaining.
7743 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
7745 // Either set up this value if we've not encountered it before, or check
7746 // that it remains consistent.
7749 else if (TargetV != MaskV)
7750 // This may be a rotation, but it pulls from the inputs in some
7751 // unsupported interleaving.
7756 // Check that we successfully analyzed the mask, and normalize the results.
7757 assert(Rotation != 0 && "Failed to locate a viable rotation!");
7758 assert((Lo || Hi) && "Failed to find a rotated input vector!");
7764 // The actual rotate instruction rotates bytes, so we need to scale the
7765 // rotation based on how many bytes are in the vector lane.
7766 int Scale = 16 / NumLaneElts;
7768 // SSSE3 targets can use the palignr instruction.
7769 if (Subtarget->hasSSSE3()) {
7770 // Cast the inputs to i8 vector of correct length to match PALIGNR.
7771 MVT AlignVT = MVT::getVectorVT(MVT::i8, 16 * NumLanes);
7772 Lo = DAG.getNode(ISD::BITCAST, DL, AlignVT, Lo);
7773 Hi = DAG.getNode(ISD::BITCAST, DL, AlignVT, Hi);
7775 return DAG.getNode(ISD::BITCAST, DL, VT,
7776 DAG.getNode(X86ISD::PALIGNR, DL, AlignVT, Hi, Lo,
7777 DAG.getConstant(Rotation * Scale, MVT::i8)));
7780 assert(VT.getSizeInBits() == 128 &&
7781 "Rotate-based lowering only supports 128-bit lowering!");
7782 assert(Mask.size() <= 16 &&
7783 "Can shuffle at most 16 bytes in a 128-bit vector!");
7785 // Default SSE2 implementation
7786 int LoByteShift = 16 - Rotation * Scale;
7787 int HiByteShift = Rotation * Scale;
7789 // Cast the inputs to v2i64 to match PSLLDQ/PSRLDQ.
7790 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Lo);
7791 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Hi);
7793 SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo,
7794 DAG.getConstant(LoByteShift, MVT::i8));
7795 SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi,
7796 DAG.getConstant(HiByteShift, MVT::i8));
7797 return DAG.getNode(ISD::BITCAST, DL, VT,
7798 DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift));
7801 /// \brief Compute whether each element of a shuffle is zeroable.
7803 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
7804 /// Either it is an undef element in the shuffle mask, the element of the input
7805 /// referenced is undef, or the element of the input referenced is known to be
7806 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
7807 /// as many lanes with this technique as possible to simplify the remaining
7809 static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
7810 SDValue V1, SDValue V2) {
7811 SmallBitVector Zeroable(Mask.size(), false);
7813 while (V1.getOpcode() == ISD::BITCAST)
7814 V1 = V1->getOperand(0);
7815 while (V2.getOpcode() == ISD::BITCAST)
7816 V2 = V2->getOperand(0);
7818 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
7819 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
7821 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7823 // Handle the easy cases.
7824 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
7829 // If this is an index into a build_vector node (which has the same number
7830 // of elements), dig out the input value and use it.
7831 SDValue V = M < Size ? V1 : V2;
7832 if (V.getOpcode() != ISD::BUILD_VECTOR || Size != (int)V.getNumOperands())
7835 SDValue Input = V.getOperand(M % Size);
7836 // The UNDEF opcode check really should be dead code here, but not quite
7837 // worth asserting on (it isn't invalid, just unexpected).
7838 if (Input.getOpcode() == ISD::UNDEF || X86::isZeroNode(Input))
7845 /// \brief Try to emit a bitmask instruction for a shuffle.
7847 /// This handles cases where we can model a blend exactly as a bitmask due to
7848 /// one of the inputs being zeroable.
7849 static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1,
7850 SDValue V2, ArrayRef<int> Mask,
7851 SelectionDAG &DAG) {
7852 MVT EltVT = VT.getScalarType();
7853 int NumEltBits = EltVT.getSizeInBits();
7854 MVT IntEltVT = MVT::getIntegerVT(NumEltBits);
7855 SDValue Zero = DAG.getConstant(0, IntEltVT);
7856 SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), IntEltVT);
7857 if (EltVT.isFloatingPoint()) {
7858 Zero = DAG.getNode(ISD::BITCAST, DL, EltVT, Zero);
7859 AllOnes = DAG.getNode(ISD::BITCAST, DL, EltVT, AllOnes);
7861 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
7862 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7864 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7867 if (Mask[i] % Size != i)
7868 return SDValue(); // Not a blend.
7870 V = Mask[i] < Size ? V1 : V2;
7871 else if (V != (Mask[i] < Size ? V1 : V2))
7872 return SDValue(); // Can only let one input through the mask.
7874 VMaskOps[i] = AllOnes;
7877 return SDValue(); // No non-zeroable elements!
7879 SDValue VMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, VMaskOps);
7880 V = DAG.getNode(VT.isFloatingPoint()
7881 ? (unsigned) X86ISD::FAND : (unsigned) ISD::AND,
7886 /// \brief Try to lower a vector shuffle as a bit shift (shifts in zeros).
7888 /// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
7889 /// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
7890 /// matches elements from one of the input vectors shuffled to the left or
7891 /// right with zeroable elements 'shifted in'. It handles both the strictly
7892 /// bit-wise element shifts and the byte shift across an entire 128-bit double
7895 /// PSHL : (little-endian) left bit shift.
7896 /// [ zz, 0, zz, 2 ]
7897 /// [ -1, 4, zz, -1 ]
7898 /// PSRL : (little-endian) right bit shift.
7900 /// [ -1, -1, 7, zz]
7901 /// PSLLDQ : (little-endian) left byte shift
7902 /// [ zz, 0, 1, 2, 3, 4, 5, 6]
7903 /// [ zz, zz, -1, -1, 2, 3, 4, -1]
7904 /// [ zz, zz, zz, zz, zz, zz, -1, 1]
7905 /// PSRLDQ : (little-endian) right byte shift
7906 /// [ 5, 6, 7, zz, zz, zz, zz, zz]
7907 /// [ -1, 5, 6, 7, zz, zz, zz, zz]
7908 /// [ 1, 2, -1, -1, -1, -1, zz, zz]
7909 static SDValue lowerVectorShuffleAsShift(SDLoc DL, MVT VT, SDValue V1,
7910 SDValue V2, ArrayRef<int> Mask,
7911 SelectionDAG &DAG) {
7912 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7914 int Size = Mask.size();
7915 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
7917 auto CheckZeros = [&](int Shift, int Scale, bool Left) {
7918 for (int i = 0; i < Size; i += Scale)
7919 for (int j = 0; j < Shift; ++j)
7920 if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
7926 auto MatchShift = [&](int Shift, int Scale, bool Left, SDValue V) {
7927 for (int i = 0; i != Size; i += Scale) {
7928 unsigned Pos = Left ? i + Shift : i;
7929 unsigned Low = Left ? i : i + Shift;
7930 unsigned Len = Scale - Shift;
7931 if (!isSequentialOrUndefInRange(Mask, Pos, Len,
7932 Low + (V == V1 ? 0 : Size)))
7936 int ShiftEltBits = VT.getScalarSizeInBits() * Scale;
7937 bool ByteShift = ShiftEltBits > 64;
7938 unsigned OpCode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
7939 : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
7940 int ShiftAmt = Shift * VT.getScalarSizeInBits() / (ByteShift ? 8 : 1);
7942 // Normalize the scale for byte shifts to still produce an i64 element
7944 Scale = ByteShift ? Scale / 2 : Scale;
7946 // We need to round trip through the appropriate type for the shift.
7947 MVT ShiftSVT = MVT::getIntegerVT(VT.getScalarSizeInBits() * Scale);
7948 MVT ShiftVT = MVT::getVectorVT(ShiftSVT, Size / Scale);
7949 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
7950 "Illegal integer vector type");
7951 V = DAG.getNode(ISD::BITCAST, DL, ShiftVT, V);
7953 V = DAG.getNode(OpCode, DL, ShiftVT, V, DAG.getConstant(ShiftAmt, MVT::i8));
7954 return DAG.getNode(ISD::BITCAST, DL, VT, V);
7957 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
7958 // keep doubling the size of the integer elements up to that. We can
7959 // then shift the elements of the integer vector by whole multiples of
7960 // their width within the elements of the larger integer vector. Test each
7961 // multiple to see if we can find a match with the moved element indices
7962 // and that the shifted in elements are all zeroable.
7963 for (int Scale = 2; Scale * VT.getScalarSizeInBits() <= 128; Scale *= 2)
7964 for (int Shift = 1; Shift != Scale; ++Shift)
7965 for (bool Left : {true, false})
7966 if (CheckZeros(Shift, Scale, Left))
7967 for (SDValue V : {V1, V2})
7968 if (SDValue Match = MatchShift(Shift, Scale, Left, V))
7975 /// \brief Lower a vector shuffle as a zero or any extension.
7977 /// Given a specific number of elements, element bit width, and extension
7978 /// stride, produce either a zero or any extension based on the available
7979 /// features of the subtarget.
7980 static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
7981 SDLoc DL, MVT VT, int Scale, bool AnyExt, SDValue InputV,
7982 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7983 assert(Scale > 1 && "Need a scale to extend.");
7984 int NumElements = VT.getVectorNumElements();
7985 int EltBits = VT.getScalarSizeInBits();
7986 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
7987 "Only 8, 16, and 32 bit elements can be extended.");
7988 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
7990 // Found a valid zext mask! Try various lowering strategies based on the
7991 // input type and available ISA extensions.
7992 if (Subtarget->hasSSE41()) {
7993 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
7994 NumElements / Scale);
7995 return DAG.getNode(ISD::BITCAST, DL, VT,
7996 DAG.getNode(X86ISD::VZEXT, DL, ExtVT, InputV));
7999 // For any extends we can cheat for larger element sizes and use shuffle
8000 // instructions that can fold with a load and/or copy.
8001 if (AnyExt && EltBits == 32) {
8002 int PSHUFDMask[4] = {0, -1, 1, -1};
8004 ISD::BITCAST, DL, VT,
8005 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8006 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
8007 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
8009 if (AnyExt && EltBits == 16 && Scale > 2) {
8010 int PSHUFDMask[4] = {0, -1, 0, -1};
8011 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8012 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
8013 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG));
8014 int PSHUFHWMask[4] = {1, -1, -1, -1};
8016 ISD::BITCAST, DL, VT,
8017 DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16,
8018 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, InputV),
8019 getV4X86ShuffleImm8ForMask(PSHUFHWMask, DAG)));
8022 // If this would require more than 2 unpack instructions to expand, use
8023 // pshufb when available. We can only use more than 2 unpack instructions
8024 // when zero extending i8 elements which also makes it easier to use pshufb.
8025 if (Scale > 4 && EltBits == 8 && Subtarget->hasSSSE3()) {
8026 assert(NumElements == 16 && "Unexpected byte vector width!");
8027 SDValue PSHUFBMask[16];
8028 for (int i = 0; i < 16; ++i)
8030 DAG.getConstant((i % Scale == 0) ? i / Scale : 0x80, MVT::i8);
8031 InputV = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, InputV);
8032 return DAG.getNode(ISD::BITCAST, DL, VT,
8033 DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
8034 DAG.getNode(ISD::BUILD_VECTOR, DL,
8035 MVT::v16i8, PSHUFBMask)));
8038 // Otherwise emit a sequence of unpacks.
8040 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
8041 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
8042 : getZeroVector(InputVT, Subtarget, DAG, DL);
8043 InputV = DAG.getNode(ISD::BITCAST, DL, InputVT, InputV);
8044 InputV = DAG.getNode(X86ISD::UNPCKL, DL, InputVT, InputV, Ext);
8048 } while (Scale > 1);
8049 return DAG.getNode(ISD::BITCAST, DL, VT, InputV);
8052 /// \brief Try to lower a vector shuffle as a zero extension on any microarch.
8054 /// This routine will try to do everything in its power to cleverly lower
8055 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
8056 /// check for the profitability of this lowering, it tries to aggressively
8057 /// match this pattern. It will use all of the micro-architectural details it
8058 /// can to emit an efficient lowering. It handles both blends with all-zero
8059 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
8060 /// masking out later).
8062 /// The reason we have dedicated lowering for zext-style shuffles is that they
8063 /// are both incredibly common and often quite performance sensitive.
8064 static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
8065 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8066 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8067 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8069 int Bits = VT.getSizeInBits();
8070 int NumElements = VT.getVectorNumElements();
8071 assert(VT.getScalarSizeInBits() <= 32 &&
8072 "Exceeds 32-bit integer zero extension limit");
8073 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
8075 // Define a helper function to check a particular ext-scale and lower to it if
8077 auto Lower = [&](int Scale) -> SDValue {
8080 for (int i = 0; i < NumElements; ++i) {
8082 continue; // Valid anywhere but doesn't tell us anything.
8083 if (i % Scale != 0) {
8084 // Each of the extended elements need to be zeroable.
8088 // We no longer are in the anyext case.
8093 // Each of the base elements needs to be consecutive indices into the
8094 // same input vector.
8095 SDValue V = Mask[i] < NumElements ? V1 : V2;
8098 else if (InputV != V)
8099 return SDValue(); // Flip-flopping inputs.
8101 if (Mask[i] % NumElements != i / Scale)
8102 return SDValue(); // Non-consecutive strided elements.
8105 // If we fail to find an input, we have a zero-shuffle which should always
8106 // have already been handled.
8107 // FIXME: Maybe handle this here in case during blending we end up with one?
8111 return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
8112 DL, VT, Scale, AnyExt, InputV, Subtarget, DAG);
8115 // The widest scale possible for extending is to a 64-bit integer.
8116 assert(Bits % 64 == 0 &&
8117 "The number of bits in a vector must be divisible by 64 on x86!");
8118 int NumExtElements = Bits / 64;
8120 // Each iteration, try extending the elements half as much, but into twice as
8122 for (; NumExtElements < NumElements; NumExtElements *= 2) {
8123 assert(NumElements % NumExtElements == 0 &&
8124 "The input vector size must be divisible by the extended size.");
8125 if (SDValue V = Lower(NumElements / NumExtElements))
8129 // General extends failed, but 128-bit vectors may be able to use MOVQ.
8133 // Returns one of the source operands if the shuffle can be reduced to a
8134 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
8135 auto CanZExtLowHalf = [&]() {
8136 for (int i = NumElements / 2; i != NumElements; ++i)
8139 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
8141 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
8146 if (SDValue V = CanZExtLowHalf()) {
8147 V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V);
8148 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
8149 return DAG.getNode(ISD::BITCAST, DL, VT, V);
8152 // No viable ext lowering found.
8156 /// \brief Try to get a scalar value for a specific element of a vector.
8158 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
8159 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
8160 SelectionDAG &DAG) {
8161 MVT VT = V.getSimpleValueType();
8162 MVT EltVT = VT.getVectorElementType();
8163 while (V.getOpcode() == ISD::BITCAST)
8164 V = V.getOperand(0);
8165 // If the bitcasts shift the element size, we can't extract an equivalent
8167 MVT NewVT = V.getSimpleValueType();
8168 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
8171 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8172 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR))
8173 return DAG.getNode(ISD::BITCAST, SDLoc(V), EltVT, V.getOperand(Idx));
8178 /// \brief Helper to test for a load that can be folded with x86 shuffles.
8180 /// This is particularly important because the set of instructions varies
8181 /// significantly based on whether the operand is a load or not.
8182 static bool isShuffleFoldableLoad(SDValue V) {
8183 while (V.getOpcode() == ISD::BITCAST)
8184 V = V.getOperand(0);
8186 return ISD::isNON_EXTLoad(V.getNode());
8189 /// \brief Try to lower insertion of a single element into a zero vector.
8191 /// This is a common pattern that we have especially efficient patterns to lower
8192 /// across all subtarget feature sets.
8193 static SDValue lowerVectorShuffleAsElementInsertion(
8194 MVT VT, SDLoc DL, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8195 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8196 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8198 MVT EltVT = VT.getVectorElementType();
8200 int V2Index = std::find_if(Mask.begin(), Mask.end(),
8201 [&Mask](int M) { return M >= (int)Mask.size(); }) -
8203 bool IsV1Zeroable = true;
8204 for (int i = 0, Size = Mask.size(); i < Size; ++i)
8205 if (i != V2Index && !Zeroable[i]) {
8206 IsV1Zeroable = false;
8210 // Check for a single input from a SCALAR_TO_VECTOR node.
8211 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
8212 // all the smarts here sunk into that routine. However, the current
8213 // lowering of BUILD_VECTOR makes that nearly impossible until the old
8214 // vector shuffle lowering is dead.
8215 if (SDValue V2S = getScalarValueForVectorElement(
8216 V2, Mask[V2Index] - Mask.size(), DAG)) {
8217 // We need to zext the scalar if it is smaller than an i32.
8218 V2S = DAG.getNode(ISD::BITCAST, DL, EltVT, V2S);
8219 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
8220 // Using zext to expand a narrow element won't work for non-zero
8225 // Zero-extend directly to i32.
8227 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
8229 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
8230 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
8231 EltVT == MVT::i16) {
8232 // Either not inserting from the low element of the input or the input
8233 // element size is too small to use VZEXT_MOVL to clear the high bits.
8237 if (!IsV1Zeroable) {
8238 // If V1 can't be treated as a zero vector we have fewer options to lower
8239 // this. We can't support integer vectors or non-zero targets cheaply, and
8240 // the V1 elements can't be permuted in any way.
8241 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
8242 if (!VT.isFloatingPoint() || V2Index != 0)
8244 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
8245 V1Mask[V2Index] = -1;
8246 if (!isNoopShuffleMask(V1Mask))
8248 // This is essentially a special case blend operation, but if we have
8249 // general purpose blend operations, they are always faster. Bail and let
8250 // the rest of the lowering handle these as blends.
8251 if (Subtarget->hasSSE41())
8254 // Otherwise, use MOVSD or MOVSS.
8255 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
8256 "Only two types of floating point element types to handle!");
8257 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
8261 // This lowering only works for the low element with floating point vectors.
8262 if (VT.isFloatingPoint() && V2Index != 0)
8265 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
8267 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8270 // If we have 4 or fewer lanes we can cheaply shuffle the element into
8271 // the desired position. Otherwise it is more efficient to do a vector
8272 // shift left. We know that we can do a vector shift left because all
8273 // the inputs are zero.
8274 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
8275 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
8276 V2Shuffle[V2Index] = 0;
8277 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
8279 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V2);
8281 X86ISD::VSHLDQ, DL, MVT::v2i64, V2,
8283 V2Index * EltVT.getSizeInBits()/8,
8284 DAG.getTargetLoweringInfo().getScalarShiftAmountTy(MVT::v2i64)));
8285 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8291 /// \brief Try to lower broadcast of a single element.
8293 /// For convenience, this code also bundles all of the subtarget feature set
8294 /// filtering. While a little annoying to re-dispatch on type here, there isn't
8295 /// a convenient way to factor it out.
8296 static SDValue lowerVectorShuffleAsBroadcast(MVT VT, SDLoc DL, SDValue V,
8298 const X86Subtarget *Subtarget,
8299 SelectionDAG &DAG) {
8300 if (!Subtarget->hasAVX())
8302 if (VT.isInteger() && !Subtarget->hasAVX2())
8305 // Check that the mask is a broadcast.
8306 int BroadcastIdx = -1;
8308 if (M >= 0 && BroadcastIdx == -1)
8310 else if (M >= 0 && M != BroadcastIdx)
8313 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
8314 "a sorted mask where the broadcast "
8317 // Go up the chain of (vector) values to try and find a scalar load that
8318 // we can combine with the broadcast.
8320 switch (V.getOpcode()) {
8321 case ISD::CONCAT_VECTORS: {
8322 int OperandSize = Mask.size() / V.getNumOperands();
8323 V = V.getOperand(BroadcastIdx / OperandSize);
8324 BroadcastIdx %= OperandSize;
8328 case ISD::INSERT_SUBVECTOR: {
8329 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
8330 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
8334 int BeginIdx = (int)ConstantIdx->getZExtValue();
8336 BeginIdx + (int)VInner.getValueType().getVectorNumElements();
8337 if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) {
8338 BroadcastIdx -= BeginIdx;
8349 // Check if this is a broadcast of a scalar. We special case lowering
8350 // for scalars so that we can more effectively fold with loads.
8351 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8352 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
8353 V = V.getOperand(BroadcastIdx);
8355 // If the scalar isn't a load we can't broadcast from it in AVX1, only with
8357 if (!Subtarget->hasAVX2() && !isShuffleFoldableLoad(V))
8359 } else if (BroadcastIdx != 0 || !Subtarget->hasAVX2()) {
8360 // We can't broadcast from a vector register w/o AVX2, and we can only
8361 // broadcast from the zero-element of a vector register.
8365 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, V);
8368 // Check for whether we can use INSERTPS to perform the shuffle. We only use
8369 // INSERTPS when the V1 elements are already in the correct locations
8370 // because otherwise we can just always use two SHUFPS instructions which
8371 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
8372 // perform INSERTPS if a single V1 element is out of place and all V2
8373 // elements are zeroable.
8374 static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
8376 SelectionDAG &DAG) {
8377 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8378 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8379 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8380 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8382 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8385 int V1DstIndex = -1;
8386 int V2DstIndex = -1;
8387 bool V1UsedInPlace = false;
8389 for (int i = 0; i < 4; ++i) {
8390 // Synthesize a zero mask from the zeroable elements (includes undefs).
8396 // Flag if we use any V1 inputs in place.
8398 V1UsedInPlace = true;
8402 // We can only insert a single non-zeroable element.
8403 if (V1DstIndex != -1 || V2DstIndex != -1)
8407 // V1 input out of place for insertion.
8410 // V2 input for insertion.
8415 // Don't bother if we have no (non-zeroable) element for insertion.
8416 if (V1DstIndex == -1 && V2DstIndex == -1)
8419 // Determine element insertion src/dst indices. The src index is from the
8420 // start of the inserted vector, not the start of the concatenated vector.
8421 unsigned V2SrcIndex = 0;
8422 if (V1DstIndex != -1) {
8423 // If we have a V1 input out of place, we use V1 as the V2 element insertion
8424 // and don't use the original V2 at all.
8425 V2SrcIndex = Mask[V1DstIndex];
8426 V2DstIndex = V1DstIndex;
8429 V2SrcIndex = Mask[V2DstIndex] - 4;
8432 // If no V1 inputs are used in place, then the result is created only from
8433 // the zero mask and the V2 insertion - so remove V1 dependency.
8435 V1 = DAG.getUNDEF(MVT::v4f32);
8437 unsigned InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
8438 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
8440 // Insert the V2 element into the desired position.
8442 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
8443 DAG.getConstant(InsertPSMask, MVT::i8));
8446 /// \brief Try to lower a shuffle as a permute of the inputs followed by an
8447 /// UNPCK instruction.
8449 /// This specifically targets cases where we end up with alternating between
8450 /// the two inputs, and so can permute them into something that feeds a single
8451 /// UNPCK instruction. Note that this routine only targets integer vectors
8452 /// because for floating point vectors we have a generalized SHUFPS lowering
8453 /// strategy that handles everything that doesn't *exactly* match an unpack,
8454 /// making this clever lowering unnecessary.
8455 static SDValue lowerVectorShuffleAsUnpack(MVT VT, SDLoc DL, SDValue V1,
8456 SDValue V2, ArrayRef<int> Mask,
8457 SelectionDAG &DAG) {
8458 assert(!VT.isFloatingPoint() &&
8459 "This routine only supports integer vectors.");
8460 assert(!isSingleInputShuffleMask(Mask) &&
8461 "This routine should only be used when blending two inputs.");
8462 assert(Mask.size() >= 2 && "Single element masks are invalid.");
8464 int Size = Mask.size();
8466 int NumLoInputs = std::count_if(Mask.begin(), Mask.end(), [Size](int M) {
8467 return M >= 0 && M % Size < Size / 2;
8469 int NumHiInputs = std::count_if(
8470 Mask.begin(), Mask.end(), [Size](int M) { return M % Size > Size / 2; });
8472 bool UnpackLo = NumLoInputs >= NumHiInputs;
8474 auto TryUnpack = [&](MVT UnpackVT, int Scale) {
8475 SmallVector<int, 32> V1Mask(Mask.size(), -1);
8476 SmallVector<int, 32> V2Mask(Mask.size(), -1);
8478 for (int i = 0; i < Size; ++i) {
8482 // Each element of the unpack contains Scale elements from this mask.
8483 int UnpackIdx = i / Scale;
8485 // We only handle the case where V1 feeds the first slots of the unpack.
8486 // We rely on canonicalization to ensure this is the case.
8487 if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
8490 // Setup the mask for this input. The indexing is tricky as we have to
8491 // handle the unpack stride.
8492 SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
8493 VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
8497 // Shuffle the inputs into place.
8498 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
8499 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
8501 // Cast the inputs to the type we will use to unpack them.
8502 V1 = DAG.getNode(ISD::BITCAST, DL, UnpackVT, V1);
8503 V2 = DAG.getNode(ISD::BITCAST, DL, UnpackVT, V2);
8505 // Unpack the inputs and cast the result back to the desired type.
8506 return DAG.getNode(ISD::BITCAST, DL, VT,
8507 DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH,
8508 DL, UnpackVT, V1, V2));
8511 // We try each unpack from the largest to the smallest to try and find one
8512 // that fits this mask.
8513 int OrigNumElements = VT.getVectorNumElements();
8514 int OrigScalarSize = VT.getScalarSizeInBits();
8515 for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2) {
8516 int Scale = ScalarSize / OrigScalarSize;
8517 int NumElements = OrigNumElements / Scale;
8518 MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), NumElements);
8519 if (SDValue Unpack = TryUnpack(UnpackVT, Scale))
8526 /// \brief Handle lowering of 2-lane 64-bit floating point shuffles.
8528 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
8529 /// support for floating point shuffles but not integer shuffles. These
8530 /// instructions will incur a domain crossing penalty on some chips though so
8531 /// it is better to avoid lowering through this for integer vectors where
8533 static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8534 const X86Subtarget *Subtarget,
8535 SelectionDAG &DAG) {
8537 assert(Op.getSimpleValueType() == MVT::v2f64 && "Bad shuffle type!");
8538 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8539 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8540 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8541 ArrayRef<int> Mask = SVOp->getMask();
8542 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8544 if (isSingleInputShuffleMask(Mask)) {
8545 // Use low duplicate instructions for masks that match their pattern.
8546 if (Subtarget->hasSSE3())
8547 if (isShuffleEquivalent(V1, V2, Mask, 0, 0))
8548 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, V1);
8550 // Straight shuffle of a single input vector. Simulate this by using the
8551 // single input as both of the "inputs" to this instruction..
8552 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
8554 if (Subtarget->hasAVX()) {
8555 // If we have AVX, we can use VPERMILPS which will allow folding a load
8556 // into the shuffle.
8557 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
8558 DAG.getConstant(SHUFPDMask, MVT::i8));
8561 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1,
8562 DAG.getConstant(SHUFPDMask, MVT::i8));
8564 assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
8565 assert(Mask[1] >= 2 && "Non-canonicalized blend!");
8567 // If we have a single input, insert that into V1 if we can do so cheaply.
8568 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8569 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8570 MVT::v2f64, DL, V1, V2, Mask, Subtarget, DAG))
8572 // Try inverting the insertion since for v2 masks it is easy to do and we
8573 // can't reliably sort the mask one way or the other.
8574 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8575 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8576 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8577 MVT::v2f64, DL, V2, V1, InverseMask, Subtarget, DAG))
8581 // Try to use one of the special instruction patterns to handle two common
8582 // blend patterns if a zero-blend above didn't work.
8583 if (isShuffleEquivalent(V1, V2, Mask, 0, 3) || isShuffleEquivalent(V1, V2, Mask, 1, 3))
8584 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
8585 // We can either use a special instruction to load over the low double or
8586 // to move just the low double.
8588 isShuffleFoldableLoad(V1S) ? X86ISD::MOVLPD : X86ISD::MOVSD,
8590 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
8592 if (Subtarget->hasSSE41())
8593 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
8597 // Use dedicated unpack instructions for masks that match their pattern.
8598 if (isShuffleEquivalent(V1, V2, Mask, 0, 2))
8599 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2f64, V1, V2);
8600 if (isShuffleEquivalent(V1, V2, Mask, 1, 3))
8601 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2f64, V1, V2);
8603 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
8604 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V2,
8605 DAG.getConstant(SHUFPDMask, MVT::i8));
8608 /// \brief Handle lowering of 2-lane 64-bit integer shuffles.
8610 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
8611 /// the integer unit to minimize domain crossing penalties. However, for blends
8612 /// it falls back to the floating point shuffle operation with appropriate bit
8614 static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8615 const X86Subtarget *Subtarget,
8616 SelectionDAG &DAG) {
8618 assert(Op.getSimpleValueType() == MVT::v2i64 && "Bad shuffle type!");
8619 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8620 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8621 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8622 ArrayRef<int> Mask = SVOp->getMask();
8623 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8625 if (isSingleInputShuffleMask(Mask)) {
8626 // Check for being able to broadcast a single element.
8627 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v2i64, DL, V1,
8628 Mask, Subtarget, DAG))
8631 // Straight shuffle of a single input vector. For everything from SSE2
8632 // onward this has a single fast instruction with no scary immediates.
8633 // We have to map the mask as it is actually a v4i32 shuffle instruction.
8634 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V1);
8635 int WidenedMask[4] = {
8636 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
8637 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
8639 ISD::BITCAST, DL, MVT::v2i64,
8640 DAG.getNode(X86ISD::PSHUFD, SDLoc(Op), MVT::v4i32, V1,
8641 getV4X86ShuffleImm8ForMask(WidenedMask, DAG)));
8643 assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
8644 assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
8645 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
8646 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
8648 // If we have a blend of two PACKUS operations an the blend aligns with the
8649 // low and half halves, we can just merge the PACKUS operations. This is
8650 // particularly important as it lets us merge shuffles that this routine itself
8652 auto GetPackNode = [](SDValue V) {
8653 while (V.getOpcode() == ISD::BITCAST)
8654 V = V.getOperand(0);
8656 return V.getOpcode() == X86ISD::PACKUS ? V : SDValue();
8658 if (SDValue V1Pack = GetPackNode(V1))
8659 if (SDValue V2Pack = GetPackNode(V2))
8660 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
8661 DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8,
8662 Mask[0] == 0 ? V1Pack.getOperand(0)
8663 : V1Pack.getOperand(1),
8664 Mask[1] == 2 ? V2Pack.getOperand(0)
8665 : V2Pack.getOperand(1)));
8667 // Try to use shift instructions.
8669 lowerVectorShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask, DAG))
8672 // When loading a scalar and then shuffling it into a vector we can often do
8673 // the insertion cheaply.
8674 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8675 MVT::v2i64, DL, V1, V2, Mask, Subtarget, DAG))
8677 // Try inverting the insertion since for v2 masks it is easy to do and we
8678 // can't reliably sort the mask one way or the other.
8679 int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
8680 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8681 MVT::v2i64, DL, V2, V1, InverseMask, Subtarget, DAG))
8684 // We have different paths for blend lowering, but they all must use the
8685 // *exact* same predicate.
8686 bool IsBlendSupported = Subtarget->hasSSE41();
8687 if (IsBlendSupported)
8688 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
8692 // Use dedicated unpack instructions for masks that match their pattern.
8693 if (isShuffleEquivalent(V1, V2, Mask, 0, 2))
8694 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, V1, V2);
8695 if (isShuffleEquivalent(V1, V2, Mask, 1, 3))
8696 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2i64, V1, V2);
8698 // Try to use byte rotation instructions.
8699 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8700 if (Subtarget->hasSSSE3())
8701 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8702 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
8705 // If we have direct support for blends, we should lower by decomposing into
8706 // a permute. That will be faster than the domain cross.
8707 if (IsBlendSupported)
8708 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2,
8711 // We implement this with SHUFPD which is pretty lame because it will likely
8712 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
8713 // However, all the alternatives are still more cycles and newer chips don't
8714 // have this problem. It would be really nice if x86 had better shuffles here.
8715 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V1);
8716 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V2);
8717 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
8718 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
8721 /// \brief Test whether this can be lowered with a single SHUFPS instruction.
8723 /// This is used to disable more specialized lowerings when the shufps lowering
8724 /// will happen to be efficient.
8725 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
8726 // This routine only handles 128-bit shufps.
8727 assert(Mask.size() == 4 && "Unsupported mask size!");
8729 // To lower with a single SHUFPS we need to have the low half and high half
8730 // each requiring a single input.
8731 if (Mask[0] != -1 && Mask[1] != -1 && (Mask[0] < 4) != (Mask[1] < 4))
8733 if (Mask[2] != -1 && Mask[3] != -1 && (Mask[2] < 4) != (Mask[3] < 4))
8739 /// \brief Lower a vector shuffle using the SHUFPS instruction.
8741 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
8742 /// It makes no assumptions about whether this is the *best* lowering, it simply
8744 static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
8745 ArrayRef<int> Mask, SDValue V1,
8746 SDValue V2, SelectionDAG &DAG) {
8747 SDValue LowV = V1, HighV = V2;
8748 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
8751 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8753 if (NumV2Elements == 1) {
8755 std::find_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }) -
8758 // Compute the index adjacent to V2Index and in the same half by toggling
8760 int V2AdjIndex = V2Index ^ 1;
8762 if (Mask[V2AdjIndex] == -1) {
8763 // Handles all the cases where we have a single V2 element and an undef.
8764 // This will only ever happen in the high lanes because we commute the
8765 // vector otherwise.
8767 std::swap(LowV, HighV);
8768 NewMask[V2Index] -= 4;
8770 // Handle the case where the V2 element ends up adjacent to a V1 element.
8771 // To make this work, blend them together as the first step.
8772 int V1Index = V2AdjIndex;
8773 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
8774 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
8775 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8777 // Now proceed to reconstruct the final blend as we have the necessary
8778 // high or low half formed.
8785 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
8786 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
8788 } else if (NumV2Elements == 2) {
8789 if (Mask[0] < 4 && Mask[1] < 4) {
8790 // Handle the easy case where we have V1 in the low lanes and V2 in the
8794 } else if (Mask[2] < 4 && Mask[3] < 4) {
8795 // We also handle the reversed case because this utility may get called
8796 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
8797 // arrange things in the right direction.
8803 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
8804 // trying to place elements directly, just blend them and set up the final
8805 // shuffle to place them.
8807 // The first two blend mask elements are for V1, the second two are for
8809 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
8810 Mask[2] < 4 ? Mask[2] : Mask[3],
8811 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
8812 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
8813 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
8814 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8816 // Now we do a normal shuffle of V1 by giving V1 as both operands to
8819 NewMask[0] = Mask[0] < 4 ? 0 : 2;
8820 NewMask[1] = Mask[0] < 4 ? 2 : 0;
8821 NewMask[2] = Mask[2] < 4 ? 1 : 3;
8822 NewMask[3] = Mask[2] < 4 ? 3 : 1;
8825 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
8826 getV4X86ShuffleImm8ForMask(NewMask, DAG));
8829 /// \brief Lower 4-lane 32-bit floating point shuffles.
8831 /// Uses instructions exclusively from the floating point unit to minimize
8832 /// domain crossing penalties, as these are sufficient to implement all v4f32
8834 static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8835 const X86Subtarget *Subtarget,
8836 SelectionDAG &DAG) {
8838 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8839 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8840 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8841 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8842 ArrayRef<int> Mask = SVOp->getMask();
8843 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8846 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8848 if (NumV2Elements == 0) {
8849 // Check for being able to broadcast a single element.
8850 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f32, DL, V1,
8851 Mask, Subtarget, DAG))
8854 // Use even/odd duplicate instructions for masks that match their pattern.
8855 if (Subtarget->hasSSE3()) {
8856 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2))
8857 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
8858 if (isShuffleEquivalent(V1, V2, Mask, 1, 1, 3, 3))
8859 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
8862 if (Subtarget->hasAVX()) {
8863 // If we have AVX, we can use VPERMILPS which will allow folding a load
8864 // into the shuffle.
8865 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
8866 getV4X86ShuffleImm8ForMask(Mask, DAG));
8869 // Otherwise, use a straight shuffle of a single input vector. We pass the
8870 // input vector to both operands to simulate this with a SHUFPS.
8871 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
8872 getV4X86ShuffleImm8ForMask(Mask, DAG));
8875 // There are special ways we can lower some single-element blends. However, we
8876 // have custom ways we can lower more complex single-element blends below that
8877 // we defer to if both this and BLENDPS fail to match, so restrict this to
8878 // when the V2 input is targeting element 0 of the mask -- that is the fast
8880 if (NumV2Elements == 1 && Mask[0] >= 4)
8881 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4f32, DL, V1, V2,
8882 Mask, Subtarget, DAG))
8885 if (Subtarget->hasSSE41()) {
8886 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
8890 // Use INSERTPS if we can complete the shuffle efficiently.
8891 if (SDValue V = lowerVectorShuffleAsInsertPS(Op, V1, V2, Mask, DAG))
8894 if (!isSingleSHUFPSMask(Mask))
8895 if (SDValue BlendPerm = lowerVectorShuffleAsBlendAndPermute(
8896 DL, MVT::v4f32, V1, V2, Mask, DAG))
8900 // Use dedicated unpack instructions for masks that match their pattern.
8901 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 1, 5))
8902 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V1, V2);
8903 if (isShuffleEquivalent(V1, V2, Mask, 2, 6, 3, 7))
8904 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V1, V2);
8905 if (isShuffleEquivalent(V1, V2, Mask, 4, 0, 5, 1))
8906 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V2, V1);
8907 if (isShuffleEquivalent(V1, V2, Mask, 6, 2, 7, 3))
8908 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V2, V1);
8910 // Otherwise fall back to a SHUFPS lowering strategy.
8911 return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
8914 /// \brief Lower 4-lane i32 vector shuffles.
8916 /// We try to handle these with integer-domain shuffles where we can, but for
8917 /// blends we use the floating point domain blend instructions.
8918 static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8919 const X86Subtarget *Subtarget,
8920 SelectionDAG &DAG) {
8922 assert(Op.getSimpleValueType() == MVT::v4i32 && "Bad shuffle type!");
8923 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8924 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8925 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8926 ArrayRef<int> Mask = SVOp->getMask();
8927 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8929 // Whenever we can lower this as a zext, that instruction is strictly faster
8930 // than any alternative. It also allows us to fold memory operands into the
8931 // shuffle in many cases.
8932 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2,
8933 Mask, Subtarget, DAG))
8937 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8939 if (NumV2Elements == 0) {
8940 // Check for being able to broadcast a single element.
8941 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i32, DL, V1,
8942 Mask, Subtarget, DAG))
8945 // Straight shuffle of a single input vector. For everything from SSE2
8946 // onward this has a single fast instruction with no scary immediates.
8947 // We coerce the shuffle pattern to be compatible with UNPCK instructions
8948 // but we aren't actually going to use the UNPCK instruction because doing
8949 // so prevents folding a load into this instruction or making a copy.
8950 const int UnpackLoMask[] = {0, 0, 1, 1};
8951 const int UnpackHiMask[] = {2, 2, 3, 3};
8952 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 1, 1))
8953 Mask = UnpackLoMask;
8954 else if (isShuffleEquivalent(V1, V2, Mask, 2, 2, 3, 3))
8955 Mask = UnpackHiMask;
8957 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
8958 getV4X86ShuffleImm8ForMask(Mask, DAG));
8961 // Try to use shift instructions.
8963 lowerVectorShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask, DAG))
8966 // There are special ways we can lower some single-element blends.
8967 if (NumV2Elements == 1)
8968 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4i32, DL, V1, V2,
8969 Mask, Subtarget, DAG))
8972 // We have different paths for blend lowering, but they all must use the
8973 // *exact* same predicate.
8974 bool IsBlendSupported = Subtarget->hasSSE41();
8975 if (IsBlendSupported)
8976 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
8980 if (SDValue Masked =
8981 lowerVectorShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask, DAG))
8984 // Use dedicated unpack instructions for masks that match their pattern.
8985 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 1, 5))
8986 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V1, V2);
8987 if (isShuffleEquivalent(V1, V2, Mask, 2, 6, 3, 7))
8988 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V1, V2);
8989 if (isShuffleEquivalent(V1, V2, Mask, 4, 0, 5, 1))
8990 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V2, V1);
8991 if (isShuffleEquivalent(V1, V2, Mask, 6, 2, 7, 3))
8992 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V2, V1);
8994 // Try to use byte rotation instructions.
8995 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8996 if (Subtarget->hasSSSE3())
8997 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8998 DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
9001 // If we have direct support for blends, we should lower by decomposing into
9002 // a permute. That will be faster than the domain cross.
9003 if (IsBlendSupported)
9004 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2,
9007 // Try to lower by permuting the inputs into an unpack instruction.
9008 if (SDValue Unpack =
9009 lowerVectorShuffleAsUnpack(MVT::v4i32, DL, V1, V2, Mask, DAG))
9012 // We implement this with SHUFPS because it can blend from two vectors.
9013 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
9014 // up the inputs, bypassing domain shift penalties that we would encur if we
9015 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
9017 return DAG.getNode(ISD::BITCAST, DL, MVT::v4i32,
9018 DAG.getVectorShuffle(
9020 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V1),
9021 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V2), Mask));
9024 /// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
9025 /// shuffle lowering, and the most complex part.
9027 /// The lowering strategy is to try to form pairs of input lanes which are
9028 /// targeted at the same half of the final vector, and then use a dword shuffle
9029 /// to place them onto the right half, and finally unpack the paired lanes into
9030 /// their final position.
9032 /// The exact breakdown of how to form these dword pairs and align them on the
9033 /// correct sides is really tricky. See the comments within the function for
9034 /// more of the details.
9035 static SDValue lowerV8I16SingleInputVectorShuffle(
9036 SDLoc DL, SDValue V, MutableArrayRef<int> Mask,
9037 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
9038 assert(V.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9039 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
9040 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
9042 SmallVector<int, 4> LoInputs;
9043 std::copy_if(LoMask.begin(), LoMask.end(), std::back_inserter(LoInputs),
9044 [](int M) { return M >= 0; });
9045 std::sort(LoInputs.begin(), LoInputs.end());
9046 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
9047 SmallVector<int, 4> HiInputs;
9048 std::copy_if(HiMask.begin(), HiMask.end(), std::back_inserter(HiInputs),
9049 [](int M) { return M >= 0; });
9050 std::sort(HiInputs.begin(), HiInputs.end());
9051 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
9053 std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
9054 int NumHToL = LoInputs.size() - NumLToL;
9056 std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
9057 int NumHToH = HiInputs.size() - NumLToH;
9058 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
9059 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
9060 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
9061 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
9063 // Check for being able to broadcast a single element.
9064 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i16, DL, V,
9065 Mask, Subtarget, DAG))
9068 // Try to use shift instructions.
9070 lowerVectorShuffleAsShift(DL, MVT::v8i16, V, V, Mask, DAG))
9073 // Use dedicated unpack instructions for masks that match their pattern.
9074 if (isShuffleEquivalent(V, V, Mask, 0, 0, 1, 1, 2, 2, 3, 3))
9075 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V, V);
9076 if (isShuffleEquivalent(V, V, Mask, 4, 4, 5, 5, 6, 6, 7, 7))
9077 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V, V);
9079 // Try to use byte rotation instructions.
9080 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9081 DL, MVT::v8i16, V, V, Mask, Subtarget, DAG))
9084 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
9085 // such inputs we can swap two of the dwords across the half mark and end up
9086 // with <=2 inputs to each half in each half. Once there, we can fall through
9087 // to the generic code below. For example:
9089 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
9090 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
9092 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
9093 // and an existing 2-into-2 on the other half. In this case we may have to
9094 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
9095 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
9096 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
9097 // because any other situation (including a 3-into-1 or 1-into-3 in the other
9098 // half than the one we target for fixing) will be fixed when we re-enter this
9099 // path. We will also combine away any sequence of PSHUFD instructions that
9100 // result into a single instruction. Here is an example of the tricky case:
9102 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
9103 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
9105 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
9107 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
9108 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
9110 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
9111 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
9113 // The result is fine to be handled by the generic logic.
9114 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
9115 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
9116 int AOffset, int BOffset) {
9117 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
9118 "Must call this with A having 3 or 1 inputs from the A half.");
9119 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
9120 "Must call this with B having 1 or 3 inputs from the B half.");
9121 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
9122 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
9124 // Compute the index of dword with only one word among the three inputs in
9125 // a half by taking the sum of the half with three inputs and subtracting
9126 // the sum of the actual three inputs. The difference is the remaining
9129 int &TripleDWord = AToAInputs.size() == 3 ? ADWord : BDWord;
9130 int &OneInputDWord = AToAInputs.size() == 3 ? BDWord : ADWord;
9131 int TripleInputOffset = AToAInputs.size() == 3 ? AOffset : BOffset;
9132 ArrayRef<int> TripleInputs = AToAInputs.size() == 3 ? AToAInputs : BToAInputs;
9133 int OneInput = AToAInputs.size() == 3 ? BToAInputs[0] : AToAInputs[0];
9134 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
9135 int TripleNonInputIdx =
9136 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
9137 TripleDWord = TripleNonInputIdx / 2;
9139 // We use xor with one to compute the adjacent DWord to whichever one the
9141 OneInputDWord = (OneInput / 2) ^ 1;
9143 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
9144 // and BToA inputs. If there is also such a problem with the BToB and AToB
9145 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
9146 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
9147 // is essential that we don't *create* a 3<-1 as then we might oscillate.
9148 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
9149 // Compute how many inputs will be flipped by swapping these DWords. We
9151 // to balance this to ensure we don't form a 3-1 shuffle in the other
9153 int NumFlippedAToBInputs =
9154 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
9155 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
9156 int NumFlippedBToBInputs =
9157 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
9158 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
9159 if ((NumFlippedAToBInputs == 1 &&
9160 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
9161 (NumFlippedBToBInputs == 1 &&
9162 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
9163 // We choose whether to fix the A half or B half based on whether that
9164 // half has zero flipped inputs. At zero, we may not be able to fix it
9165 // with that half. We also bias towards fixing the B half because that
9166 // will more commonly be the high half, and we have to bias one way.
9167 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
9168 ArrayRef<int> Inputs) {
9169 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
9170 bool IsFixIdxInput = std::find(Inputs.begin(), Inputs.end(),
9171 PinnedIdx ^ 1) != Inputs.end();
9172 // Determine whether the free index is in the flipped dword or the
9173 // unflipped dword based on where the pinned index is. We use this bit
9174 // in an xor to conditionally select the adjacent dword.
9175 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
9176 bool IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9177 FixFreeIdx) != Inputs.end();
9178 if (IsFixIdxInput == IsFixFreeIdxInput)
9180 IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9181 FixFreeIdx) != Inputs.end();
9182 assert(IsFixIdxInput != IsFixFreeIdxInput &&
9183 "We need to be changing the number of flipped inputs!");
9184 int PSHUFHalfMask[] = {0, 1, 2, 3};
9185 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
9186 V = DAG.getNode(FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
9188 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DAG));
9191 if (M != -1 && M == FixIdx)
9193 else if (M != -1 && M == FixFreeIdx)
9196 if (NumFlippedBToBInputs != 0) {
9198 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9199 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
9201 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
9203 AToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9204 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
9209 int PSHUFDMask[] = {0, 1, 2, 3};
9210 PSHUFDMask[ADWord] = BDWord;
9211 PSHUFDMask[BDWord] = ADWord;
9212 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9213 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9214 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9215 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9217 // Adjust the mask to match the new locations of A and B.
9219 if (M != -1 && M/2 == ADWord)
9220 M = 2 * BDWord + M % 2;
9221 else if (M != -1 && M/2 == BDWord)
9222 M = 2 * ADWord + M % 2;
9224 // Recurse back into this routine to re-compute state now that this isn't
9225 // a 3 and 1 problem.
9226 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9229 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
9230 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
9231 else if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
9232 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
9234 // At this point there are at most two inputs to the low and high halves from
9235 // each half. That means the inputs can always be grouped into dwords and
9236 // those dwords can then be moved to the correct half with a dword shuffle.
9237 // We use at most one low and one high word shuffle to collect these paired
9238 // inputs into dwords, and finally a dword shuffle to place them.
9239 int PSHUFLMask[4] = {-1, -1, -1, -1};
9240 int PSHUFHMask[4] = {-1, -1, -1, -1};
9241 int PSHUFDMask[4] = {-1, -1, -1, -1};
9243 // First fix the masks for all the inputs that are staying in their
9244 // original halves. This will then dictate the targets of the cross-half
9246 auto fixInPlaceInputs =
9247 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
9248 MutableArrayRef<int> SourceHalfMask,
9249 MutableArrayRef<int> HalfMask, int HalfOffset) {
9250 if (InPlaceInputs.empty())
9252 if (InPlaceInputs.size() == 1) {
9253 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9254 InPlaceInputs[0] - HalfOffset;
9255 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
9258 if (IncomingInputs.empty()) {
9259 // Just fix all of the in place inputs.
9260 for (int Input : InPlaceInputs) {
9261 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
9262 PSHUFDMask[Input / 2] = Input / 2;
9267 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
9268 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9269 InPlaceInputs[0] - HalfOffset;
9270 // Put the second input next to the first so that they are packed into
9271 // a dword. We find the adjacent index by toggling the low bit.
9272 int AdjIndex = InPlaceInputs[0] ^ 1;
9273 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
9274 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
9275 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
9277 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
9278 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
9280 // Now gather the cross-half inputs and place them into a free dword of
9281 // their target half.
9282 // FIXME: This operation could almost certainly be simplified dramatically to
9283 // look more like the 3-1 fixing operation.
9284 auto moveInputsToRightHalf = [&PSHUFDMask](
9285 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
9286 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
9287 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
9289 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
9290 return SourceHalfMask[Word] != -1 && SourceHalfMask[Word] != Word;
9292 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
9294 int LowWord = Word & ~1;
9295 int HighWord = Word | 1;
9296 return isWordClobbered(SourceHalfMask, LowWord) ||
9297 isWordClobbered(SourceHalfMask, HighWord);
9300 if (IncomingInputs.empty())
9303 if (ExistingInputs.empty()) {
9304 // Map any dwords with inputs from them into the right half.
9305 for (int Input : IncomingInputs) {
9306 // If the source half mask maps over the inputs, turn those into
9307 // swaps and use the swapped lane.
9308 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
9309 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == -1) {
9310 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
9311 Input - SourceOffset;
9312 // We have to swap the uses in our half mask in one sweep.
9313 for (int &M : HalfMask)
9314 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
9316 else if (M == Input)
9317 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9319 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
9320 Input - SourceOffset &&
9321 "Previous placement doesn't match!");
9323 // Note that this correctly re-maps both when we do a swap and when
9324 // we observe the other side of the swap above. We rely on that to
9325 // avoid swapping the members of the input list directly.
9326 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9329 // Map the input's dword into the correct half.
9330 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == -1)
9331 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
9333 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
9335 "Previous placement doesn't match!");
9338 // And just directly shift any other-half mask elements to be same-half
9339 // as we will have mirrored the dword containing the element into the
9340 // same position within that half.
9341 for (int &M : HalfMask)
9342 if (M >= SourceOffset && M < SourceOffset + 4) {
9343 M = M - SourceOffset + DestOffset;
9344 assert(M >= 0 && "This should never wrap below zero!");
9349 // Ensure we have the input in a viable dword of its current half. This
9350 // is particularly tricky because the original position may be clobbered
9351 // by inputs being moved and *staying* in that half.
9352 if (IncomingInputs.size() == 1) {
9353 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9354 int InputFixed = std::find(std::begin(SourceHalfMask),
9355 std::end(SourceHalfMask), -1) -
9356 std::begin(SourceHalfMask) + SourceOffset;
9357 SourceHalfMask[InputFixed - SourceOffset] =
9358 IncomingInputs[0] - SourceOffset;
9359 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
9361 IncomingInputs[0] = InputFixed;
9363 } else if (IncomingInputs.size() == 2) {
9364 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
9365 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9366 // We have two non-adjacent or clobbered inputs we need to extract from
9367 // the source half. To do this, we need to map them into some adjacent
9368 // dword slot in the source mask.
9369 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
9370 IncomingInputs[1] - SourceOffset};
9372 // If there is a free slot in the source half mask adjacent to one of
9373 // the inputs, place the other input in it. We use (Index XOR 1) to
9374 // compute an adjacent index.
9375 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
9376 SourceHalfMask[InputsFixed[0] ^ 1] == -1) {
9377 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
9378 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9379 InputsFixed[1] = InputsFixed[0] ^ 1;
9380 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
9381 SourceHalfMask[InputsFixed[1] ^ 1] == -1) {
9382 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
9383 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
9384 InputsFixed[0] = InputsFixed[1] ^ 1;
9385 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] == -1 &&
9386 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] == -1) {
9387 // The two inputs are in the same DWord but it is clobbered and the
9388 // adjacent DWord isn't used at all. Move both inputs to the free
9390 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
9391 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
9392 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
9393 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
9395 // The only way we hit this point is if there is no clobbering
9396 // (because there are no off-half inputs to this half) and there is no
9397 // free slot adjacent to one of the inputs. In this case, we have to
9398 // swap an input with a non-input.
9399 for (int i = 0; i < 4; ++i)
9400 assert((SourceHalfMask[i] == -1 || SourceHalfMask[i] == i) &&
9401 "We can't handle any clobbers here!");
9402 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
9403 "Cannot have adjacent inputs here!");
9405 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9406 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
9408 // We also have to update the final source mask in this case because
9409 // it may need to undo the above swap.
9410 for (int &M : FinalSourceHalfMask)
9411 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
9412 M = InputsFixed[1] + SourceOffset;
9413 else if (M == InputsFixed[1] + SourceOffset)
9414 M = (InputsFixed[0] ^ 1) + SourceOffset;
9416 InputsFixed[1] = InputsFixed[0] ^ 1;
9419 // Point everything at the fixed inputs.
9420 for (int &M : HalfMask)
9421 if (M == IncomingInputs[0])
9422 M = InputsFixed[0] + SourceOffset;
9423 else if (M == IncomingInputs[1])
9424 M = InputsFixed[1] + SourceOffset;
9426 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
9427 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
9430 llvm_unreachable("Unhandled input size!");
9433 // Now hoist the DWord down to the right half.
9434 int FreeDWord = (PSHUFDMask[DestOffset / 2] == -1 ? 0 : 1) + DestOffset / 2;
9435 assert(PSHUFDMask[FreeDWord] == -1 && "DWord not free");
9436 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
9437 for (int &M : HalfMask)
9438 for (int Input : IncomingInputs)
9440 M = FreeDWord * 2 + Input % 2;
9442 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
9443 /*SourceOffset*/ 4, /*DestOffset*/ 0);
9444 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
9445 /*SourceOffset*/ 0, /*DestOffset*/ 4);
9447 // Now enact all the shuffles we've computed to move the inputs into their
9449 if (!isNoopShuffleMask(PSHUFLMask))
9450 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9451 getV4X86ShuffleImm8ForMask(PSHUFLMask, DAG));
9452 if (!isNoopShuffleMask(PSHUFHMask))
9453 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9454 getV4X86ShuffleImm8ForMask(PSHUFHMask, DAG));
9455 if (!isNoopShuffleMask(PSHUFDMask))
9456 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9457 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9458 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9459 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9461 // At this point, each half should contain all its inputs, and we can then
9462 // just shuffle them into their final position.
9463 assert(std::count_if(LoMask.begin(), LoMask.end(),
9464 [](int M) { return M >= 4; }) == 0 &&
9465 "Failed to lift all the high half inputs to the low mask!");
9466 assert(std::count_if(HiMask.begin(), HiMask.end(),
9467 [](int M) { return M >= 0 && M < 4; }) == 0 &&
9468 "Failed to lift all the low half inputs to the high mask!");
9470 // Do a half shuffle for the low mask.
9471 if (!isNoopShuffleMask(LoMask))
9472 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9473 getV4X86ShuffleImm8ForMask(LoMask, DAG));
9475 // Do a half shuffle with the high mask after shifting its values down.
9476 for (int &M : HiMask)
9479 if (!isNoopShuffleMask(HiMask))
9480 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9481 getV4X86ShuffleImm8ForMask(HiMask, DAG));
9486 /// \brief Detect whether the mask pattern should be lowered through
9489 /// This essentially tests whether viewing the mask as an interleaving of two
9490 /// sub-sequences reduces the cross-input traffic of a blend operation. If so,
9491 /// lowering it through interleaving is a significantly better strategy.
9492 static bool shouldLowerAsInterleaving(ArrayRef<int> Mask) {
9493 int NumEvenInputs[2] = {0, 0};
9494 int NumOddInputs[2] = {0, 0};
9495 int NumLoInputs[2] = {0, 0};
9496 int NumHiInputs[2] = {0, 0};
9497 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
9501 int InputIdx = Mask[i] >= Size;
9504 ++NumLoInputs[InputIdx];
9506 ++NumHiInputs[InputIdx];
9509 ++NumEvenInputs[InputIdx];
9511 ++NumOddInputs[InputIdx];
9514 // The minimum number of cross-input results for both the interleaved and
9515 // split cases. If interleaving results in fewer cross-input results, return
9517 int InterleavedCrosses = std::min(NumEvenInputs[1] + NumOddInputs[0],
9518 NumEvenInputs[0] + NumOddInputs[1]);
9519 int SplitCrosses = std::min(NumLoInputs[1] + NumHiInputs[0],
9520 NumLoInputs[0] + NumHiInputs[1]);
9521 return InterleavedCrosses < SplitCrosses;
9524 /// \brief Helper to form a PSHUFB-based shuffle+blend.
9525 static SDValue lowerVectorShuffleAsPSHUFB(SDLoc DL, MVT VT, SDValue V1,
9526 SDValue V2, ArrayRef<int> Mask,
9527 SelectionDAG &DAG, bool &V1InUse,
9529 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
9535 int Size = Mask.size();
9536 int Scale = 16 / Size;
9537 for (int i = 0; i < 16; ++i) {
9538 if (Mask[i / Scale] == -1) {
9539 V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
9541 const int ZeroMask = 0x80;
9542 int V1Idx = Mask[i / Scale] < Size ? Mask[i / Scale] * Scale + i % Scale
9544 int V2Idx = Mask[i / Scale] < Size
9546 : (Mask[i / Scale] - Size) * Scale + i % Scale;
9547 if (Zeroable[i / Scale])
9548 V1Idx = V2Idx = ZeroMask;
9549 V1Mask[i] = DAG.getConstant(V1Idx, MVT::i8);
9550 V2Mask[i] = DAG.getConstant(V2Idx, MVT::i8);
9551 V1InUse |= (ZeroMask != V1Idx);
9552 V2InUse |= (ZeroMask != V2Idx);
9557 V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
9558 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, V1),
9559 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
9561 V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
9562 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, V2),
9563 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
9565 // If we need shuffled inputs from both, blend the two.
9567 if (V1InUse && V2InUse)
9568 V = DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
9570 V = V1InUse ? V1 : V2;
9572 // Cast the result back to the correct type.
9573 return DAG.getNode(ISD::BITCAST, DL, VT, V);
9576 /// \brief Generic lowering of 8-lane i16 shuffles.
9578 /// This handles both single-input shuffles and combined shuffle/blends with
9579 /// two inputs. The single input shuffles are immediately delegated to
9580 /// a dedicated lowering routine.
9582 /// The blends are lowered in one of three fundamental ways. If there are few
9583 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
9584 /// of the input is significantly cheaper when lowered as an interleaving of
9585 /// the two inputs, try to interleave them. Otherwise, blend the low and high
9586 /// halves of the inputs separately (making them have relatively few inputs)
9587 /// and then concatenate them.
9588 static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9589 const X86Subtarget *Subtarget,
9590 SelectionDAG &DAG) {
9592 assert(Op.getSimpleValueType() == MVT::v8i16 && "Bad shuffle type!");
9593 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9594 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9595 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9596 ArrayRef<int> OrigMask = SVOp->getMask();
9597 int MaskStorage[8] = {OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9598 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7]};
9599 MutableArrayRef<int> Mask(MaskStorage);
9601 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
9603 // Whenever we can lower this as a zext, that instruction is strictly faster
9604 // than any alternative.
9605 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9606 DL, MVT::v8i16, V1, V2, OrigMask, Subtarget, DAG))
9609 auto isV1 = [](int M) { return M >= 0 && M < 8; };
9610 auto isV2 = [](int M) { return M >= 8; };
9612 int NumV1Inputs = std::count_if(Mask.begin(), Mask.end(), isV1);
9613 int NumV2Inputs = std::count_if(Mask.begin(), Mask.end(), isV2);
9615 if (NumV2Inputs == 0)
9616 return lowerV8I16SingleInputVectorShuffle(DL, V1, Mask, Subtarget, DAG);
9618 assert(NumV1Inputs > 0 && "All single-input shuffles should be canonicalized "
9619 "to be V1-input shuffles.");
9621 // Try to use shift instructions.
9623 lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask, DAG))
9626 // There are special ways we can lower some single-element blends.
9627 if (NumV2Inputs == 1)
9628 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v8i16, DL, V1, V2,
9629 Mask, Subtarget, DAG))
9632 // We have different paths for blend lowering, but they all must use the
9633 // *exact* same predicate.
9634 bool IsBlendSupported = Subtarget->hasSSE41();
9635 if (IsBlendSupported)
9636 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
9640 if (SDValue Masked =
9641 lowerVectorShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask, DAG))
9644 // Use dedicated unpack instructions for masks that match their pattern.
9645 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 2, 10, 3, 11))
9646 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V1, V2);
9647 if (isShuffleEquivalent(V1, V2, Mask, 4, 12, 5, 13, 6, 14, 7, 15))
9648 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V1, V2);
9650 // Try to use byte rotation instructions.
9651 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9652 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
9655 if (SDValue BitBlend =
9656 lowerVectorShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
9659 // Check whether an interleaving lowering is likely to be more efficient.
9660 // This isn't perfect but it is a strong heuristic that tends to work well on
9661 // the kinds of shuffles that show up in practice.
9663 // FIXME: Handle 1x, 2x, and 4x interleaving.
9664 if (shouldLowerAsInterleaving(Mask)) {
9665 // FIXME: Figure out whether we should pack these into the low or high
9668 int EMask[8], OMask[8];
9669 for (int i = 0; i < 4; ++i) {
9670 EMask[i] = Mask[2*i];
9671 OMask[i] = Mask[2*i + 1];
9676 SDValue Evens = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, EMask);
9677 SDValue Odds = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, OMask);
9679 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, Evens, Odds);
9682 // Try to lower by permuting the inputs into an unpack instruction unless we
9683 // have direct support for blending.
9684 if (!IsBlendSupported) {
9685 if (SDValue Unpack =
9686 lowerVectorShuffleAsUnpack(MVT::v8i16, DL, V1, V2, Mask, DAG))
9689 // If we can use PSHUFB, that will be better as it can both shuffle and set
9690 // up an efficient blend.
9691 if (Subtarget->hasSSSE3()) {
9692 bool V1InUse, V2InUse;
9693 return lowerVectorShuffleAsPSHUFB(DL, MVT::v8i16, V1, V2, Mask, DAG,
9698 // We can always bit-blend if we have to so the fallback strategy is to
9699 // decompose into single-input permutes and blends.
9700 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
9704 /// \brief Check whether a compaction lowering can be done by dropping even
9705 /// elements and compute how many times even elements must be dropped.
9707 /// This handles shuffles which take every Nth element where N is a power of
9708 /// two. Example shuffle masks:
9710 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
9711 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
9712 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
9713 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
9714 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
9715 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
9717 /// Any of these lanes can of course be undef.
9719 /// This routine only supports N <= 3.
9720 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
9723 /// \returns N above, or the number of times even elements must be dropped if
9724 /// there is such a number. Otherwise returns zero.
9725 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
9726 // Figure out whether we're looping over two inputs or just one.
9727 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9729 // The modulus for the shuffle vector entries is based on whether this is
9730 // a single input or not.
9731 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
9732 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
9733 "We should only be called with masks with a power-of-2 size!");
9735 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
9737 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
9738 // and 2^3 simultaneously. This is because we may have ambiguity with
9739 // partially undef inputs.
9740 bool ViableForN[3] = {true, true, true};
9742 for (int i = 0, e = Mask.size(); i < e; ++i) {
9743 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
9748 bool IsAnyViable = false;
9749 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9750 if (ViableForN[j]) {
9753 // The shuffle mask must be equal to (i * 2^N) % M.
9754 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
9757 ViableForN[j] = false;
9759 // Early exit if we exhaust the possible powers of two.
9764 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9768 // Return 0 as there is no viable power of two.
9772 /// \brief Generic lowering of v16i8 shuffles.
9774 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
9775 /// detect any complexity reducing interleaving. If that doesn't help, it uses
9776 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
9777 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
9779 static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9780 const X86Subtarget *Subtarget,
9781 SelectionDAG &DAG) {
9783 assert(Op.getSimpleValueType() == MVT::v16i8 && "Bad shuffle type!");
9784 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9785 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9786 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9787 ArrayRef<int> Mask = SVOp->getMask();
9788 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
9790 // Try to use shift instructions.
9792 lowerVectorShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask, DAG))
9795 // Try to use byte rotation instructions.
9796 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9797 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
9800 // Try to use a zext lowering.
9801 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9802 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
9806 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 16; });
9808 // For single-input shuffles, there are some nicer lowering tricks we can use.
9809 if (NumV2Elements == 0) {
9810 // Check for being able to broadcast a single element.
9811 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i8, DL, V1,
9812 Mask, Subtarget, DAG))
9815 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
9816 // Notably, this handles splat and partial-splat shuffles more efficiently.
9817 // However, it only makes sense if the pre-duplication shuffle simplifies
9818 // things significantly. Currently, this means we need to be able to
9819 // express the pre-duplication shuffle as an i16 shuffle.
9821 // FIXME: We should check for other patterns which can be widened into an
9822 // i16 shuffle as well.
9823 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
9824 for (int i = 0; i < 16; i += 2)
9825 if (Mask[i] != -1 && Mask[i + 1] != -1 && Mask[i] != Mask[i + 1])
9830 auto tryToWidenViaDuplication = [&]() -> SDValue {
9831 if (!canWidenViaDuplication(Mask))
9833 SmallVector<int, 4> LoInputs;
9834 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(LoInputs),
9835 [](int M) { return M >= 0 && M < 8; });
9836 std::sort(LoInputs.begin(), LoInputs.end());
9837 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
9839 SmallVector<int, 4> HiInputs;
9840 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(HiInputs),
9841 [](int M) { return M >= 8; });
9842 std::sort(HiInputs.begin(), HiInputs.end());
9843 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
9846 bool TargetLo = LoInputs.size() >= HiInputs.size();
9847 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
9848 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
9850 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9851 SmallDenseMap<int, int, 8> LaneMap;
9852 for (int I : InPlaceInputs) {
9853 PreDupI16Shuffle[I/2] = I/2;
9856 int j = TargetLo ? 0 : 4, je = j + 4;
9857 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
9858 // Check if j is already a shuffle of this input. This happens when
9859 // there are two adjacent bytes after we move the low one.
9860 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
9861 // If we haven't yet mapped the input, search for a slot into which
9863 while (j < je && PreDupI16Shuffle[j] != -1)
9867 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
9870 // Map this input with the i16 shuffle.
9871 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
9874 // Update the lane map based on the mapping we ended up with.
9875 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
9878 ISD::BITCAST, DL, MVT::v16i8,
9879 DAG.getVectorShuffle(MVT::v8i16, DL,
9880 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9881 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
9883 // Unpack the bytes to form the i16s that will be shuffled into place.
9884 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9885 MVT::v16i8, V1, V1);
9887 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9888 for (int i = 0; i < 16; ++i)
9889 if (Mask[i] != -1) {
9890 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
9891 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
9892 if (PostDupI16Shuffle[i / 2] == -1)
9893 PostDupI16Shuffle[i / 2] = MappedMask;
9895 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
9896 "Conflicting entrties in the original shuffle!");
9899 ISD::BITCAST, DL, MVT::v16i8,
9900 DAG.getVectorShuffle(MVT::v8i16, DL,
9901 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9902 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
9904 if (SDValue V = tryToWidenViaDuplication())
9908 // Use dedicated unpack instructions for masks that match their pattern.
9909 if (isShuffleEquivalent(V1, V2, Mask,
9910 0, 16, 1, 17, 2, 18, 3, 19,
9911 4, 20, 5, 21, 6, 22, 7, 23))
9912 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V1, V2);
9913 if (isShuffleEquivalent(V1, V2, Mask,
9914 8, 24, 9, 25, 10, 26, 11, 27,
9915 12, 28, 13, 29, 14, 30, 15, 31))
9916 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V1, V2);
9918 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
9919 // with PSHUFB. It is important to do this before we attempt to generate any
9920 // blends but after all of the single-input lowerings. If the single input
9921 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
9922 // want to preserve that and we can DAG combine any longer sequences into
9923 // a PSHUFB in the end. But once we start blending from multiple inputs,
9924 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
9925 // and there are *very* few patterns that would actually be faster than the
9926 // PSHUFB approach because of its ability to zero lanes.
9928 // FIXME: The only exceptions to the above are blends which are exact
9929 // interleavings with direct instructions supporting them. We currently don't
9930 // handle those well here.
9931 if (Subtarget->hasSSSE3()) {
9932 bool V1InUse = false;
9933 bool V2InUse = false;
9935 SDValue PSHUFB = lowerVectorShuffleAsPSHUFB(DL, MVT::v16i8, V1, V2, Mask,
9936 DAG, V1InUse, V2InUse);
9938 // If both V1 and V2 are in use and we can use a direct blend or an unpack,
9939 // do so. This avoids using them to handle blends-with-zero which is
9940 // important as a single pshufb is significantly faster for that.
9941 if (V1InUse && V2InUse) {
9942 if (Subtarget->hasSSE41())
9943 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i8, V1, V2,
9944 Mask, Subtarget, DAG))
9947 // We can use an unpack to do the blending rather than an or in some
9948 // cases. Even though the or may be (very minorly) more efficient, we
9949 // preference this lowering because there are common cases where part of
9950 // the complexity of the shuffles goes away when we do the final blend as
9952 // FIXME: It might be worth trying to detect if the unpack-feeding
9953 // shuffles will both be pshufb, in which case we shouldn't bother with
9955 if (SDValue Unpack =
9956 lowerVectorShuffleAsUnpack(MVT::v16i8, DL, V1, V2, Mask, DAG))
9963 // There are special ways we can lower some single-element blends.
9964 if (NumV2Elements == 1)
9965 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v16i8, DL, V1, V2,
9966 Mask, Subtarget, DAG))
9969 if (SDValue BitBlend =
9970 lowerVectorShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
9973 // Check whether a compaction lowering can be done. This handles shuffles
9974 // which take every Nth element for some even N. See the helper function for
9977 // We special case these as they can be particularly efficiently handled with
9978 // the PACKUSB instruction on x86 and they show up in common patterns of
9979 // rearranging bytes to truncate wide elements.
9980 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask)) {
9981 // NumEvenDrops is the power of two stride of the elements. Another way of
9982 // thinking about it is that we need to drop the even elements this many
9983 // times to get the original input.
9984 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9986 // First we need to zero all the dropped bytes.
9987 assert(NumEvenDrops <= 3 &&
9988 "No support for dropping even elements more than 3 times.");
9989 // We use the mask type to pick which bytes are preserved based on how many
9990 // elements are dropped.
9991 MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
9992 SDValue ByteClearMask =
9993 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8,
9994 DAG.getConstant(0xFF, MaskVTs[NumEvenDrops - 1]));
9995 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
9997 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
9999 // Now pack things back together.
10000 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
10001 V2 = IsSingleInput ? V1 : DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
10002 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
10003 for (int i = 1; i < NumEvenDrops; ++i) {
10004 Result = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, Result);
10005 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
10011 // Handle multi-input cases by blending single-input shuffles.
10012 if (NumV2Elements > 0)
10013 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v16i8, V1, V2,
10016 // The fallback path for single-input shuffles widens this into two v8i16
10017 // vectors with unpacks, shuffles those, and then pulls them back together
10021 int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10022 int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10023 for (int i = 0; i < 16; ++i)
10025 (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
10027 SDValue Zero = getZeroVector(MVT::v8i16, Subtarget, DAG, DL);
10029 SDValue VLoHalf, VHiHalf;
10030 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
10031 // them out and avoid using UNPCK{L,H} to extract the elements of V as
10033 if (std::none_of(std::begin(LoBlendMask), std::end(LoBlendMask),
10034 [](int M) { return M >= 0 && M % 2 == 1; }) &&
10035 std::none_of(std::begin(HiBlendMask), std::end(HiBlendMask),
10036 [](int M) { return M >= 0 && M % 2 == 1; })) {
10037 // Use a mask to drop the high bytes.
10038 VLoHalf = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
10039 VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
10040 DAG.getConstant(0x00FF, MVT::v8i16));
10042 // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
10043 VHiHalf = DAG.getUNDEF(MVT::v8i16);
10045 // Squash the masks to point directly into VLoHalf.
10046 for (int &M : LoBlendMask)
10049 for (int &M : HiBlendMask)
10053 // Otherwise just unpack the low half of V into VLoHalf and the high half into
10054 // VHiHalf so that we can blend them as i16s.
10055 VLoHalf = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
10056 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
10057 VHiHalf = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
10058 DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
10061 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
10062 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
10064 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
10067 /// \brief Dispatching routine to lower various 128-bit x86 vector shuffles.
10069 /// This routine breaks down the specific type of 128-bit shuffle and
10070 /// dispatches to the lowering routines accordingly.
10071 static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10072 MVT VT, const X86Subtarget *Subtarget,
10073 SelectionDAG &DAG) {
10074 switch (VT.SimpleTy) {
10076 return lowerV2I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10078 return lowerV2F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10080 return lowerV4I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10082 return lowerV4F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10084 return lowerV8I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
10086 return lowerV16I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
10089 llvm_unreachable("Unimplemented!");
10093 /// \brief Helper function to test whether a shuffle mask could be
10094 /// simplified by widening the elements being shuffled.
10096 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
10097 /// leaves it in an unspecified state.
10099 /// NOTE: This must handle normal vector shuffle masks and *target* vector
10100 /// shuffle masks. The latter have the special property of a '-2' representing
10101 /// a zero-ed lane of a vector.
10102 static bool canWidenShuffleElements(ArrayRef<int> Mask,
10103 SmallVectorImpl<int> &WidenedMask) {
10104 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
10105 // If both elements are undef, its trivial.
10106 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] == SM_SentinelUndef) {
10107 WidenedMask.push_back(SM_SentinelUndef);
10111 // Check for an undef mask and a mask value properly aligned to fit with
10112 // a pair of values. If we find such a case, use the non-undef mask's value.
10113 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] >= 0 && Mask[i + 1] % 2 == 1) {
10114 WidenedMask.push_back(Mask[i + 1] / 2);
10117 if (Mask[i + 1] == SM_SentinelUndef && Mask[i] >= 0 && Mask[i] % 2 == 0) {
10118 WidenedMask.push_back(Mask[i] / 2);
10122 // When zeroing, we need to spread the zeroing across both lanes to widen.
10123 if (Mask[i] == SM_SentinelZero || Mask[i + 1] == SM_SentinelZero) {
10124 if ((Mask[i] == SM_SentinelZero || Mask[i] == SM_SentinelUndef) &&
10125 (Mask[i + 1] == SM_SentinelZero || Mask[i + 1] == SM_SentinelUndef)) {
10126 WidenedMask.push_back(SM_SentinelZero);
10132 // Finally check if the two mask values are adjacent and aligned with
10134 if (Mask[i] != SM_SentinelUndef && Mask[i] % 2 == 0 && Mask[i] + 1 == Mask[i + 1]) {
10135 WidenedMask.push_back(Mask[i] / 2);
10139 // Otherwise we can't safely widen the elements used in this shuffle.
10142 assert(WidenedMask.size() == Mask.size() / 2 &&
10143 "Incorrect size of mask after widening the elements!");
10148 /// \brief Generic routine to split vector shuffle into half-sized shuffles.
10150 /// This routine just extracts two subvectors, shuffles them independently, and
10151 /// then concatenates them back together. This should work effectively with all
10152 /// AVX vector shuffle types.
10153 static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10154 SDValue V2, ArrayRef<int> Mask,
10155 SelectionDAG &DAG) {
10156 assert(VT.getSizeInBits() >= 256 &&
10157 "Only for 256-bit or wider vector shuffles!");
10158 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
10159 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
10161 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
10162 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
10164 int NumElements = VT.getVectorNumElements();
10165 int SplitNumElements = NumElements / 2;
10166 MVT ScalarVT = VT.getScalarType();
10167 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
10169 // Rather than splitting build-vectors, just build two narrower build
10170 // vectors. This helps shuffling with splats and zeros.
10171 auto SplitVector = [&](SDValue V) {
10172 while (V.getOpcode() == ISD::BITCAST)
10173 V = V->getOperand(0);
10175 MVT OrigVT = V.getSimpleValueType();
10176 int OrigNumElements = OrigVT.getVectorNumElements();
10177 int OrigSplitNumElements = OrigNumElements / 2;
10178 MVT OrigScalarVT = OrigVT.getScalarType();
10179 MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
10183 auto *BV = dyn_cast<BuildVectorSDNode>(V);
10185 LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10186 DAG.getIntPtrConstant(0));
10187 HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10188 DAG.getIntPtrConstant(OrigSplitNumElements));
10191 SmallVector<SDValue, 16> LoOps, HiOps;
10192 for (int i = 0; i < OrigSplitNumElements; ++i) {
10193 LoOps.push_back(BV->getOperand(i));
10194 HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
10196 LoV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, LoOps);
10197 HiV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, HiOps);
10199 return std::make_pair(DAG.getNode(ISD::BITCAST, DL, SplitVT, LoV),
10200 DAG.getNode(ISD::BITCAST, DL, SplitVT, HiV));
10203 SDValue LoV1, HiV1, LoV2, HiV2;
10204 std::tie(LoV1, HiV1) = SplitVector(V1);
10205 std::tie(LoV2, HiV2) = SplitVector(V2);
10207 // Now create two 4-way blends of these half-width vectors.
10208 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
10209 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
10210 SmallVector<int, 32> V1BlendMask, V2BlendMask, BlendMask;
10211 for (int i = 0; i < SplitNumElements; ++i) {
10212 int M = HalfMask[i];
10213 if (M >= NumElements) {
10214 if (M >= NumElements + SplitNumElements)
10218 V2BlendMask.push_back(M - NumElements);
10219 V1BlendMask.push_back(-1);
10220 BlendMask.push_back(SplitNumElements + i);
10221 } else if (M >= 0) {
10222 if (M >= SplitNumElements)
10226 V2BlendMask.push_back(-1);
10227 V1BlendMask.push_back(M);
10228 BlendMask.push_back(i);
10230 V2BlendMask.push_back(-1);
10231 V1BlendMask.push_back(-1);
10232 BlendMask.push_back(-1);
10236 // Because the lowering happens after all combining takes place, we need to
10237 // manually combine these blend masks as much as possible so that we create
10238 // a minimal number of high-level vector shuffle nodes.
10240 // First try just blending the halves of V1 or V2.
10241 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
10242 return DAG.getUNDEF(SplitVT);
10243 if (!UseLoV2 && !UseHiV2)
10244 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10245 if (!UseLoV1 && !UseHiV1)
10246 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10248 SDValue V1Blend, V2Blend;
10249 if (UseLoV1 && UseHiV1) {
10251 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10253 // We only use half of V1 so map the usage down into the final blend mask.
10254 V1Blend = UseLoV1 ? LoV1 : HiV1;
10255 for (int i = 0; i < SplitNumElements; ++i)
10256 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
10257 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
10259 if (UseLoV2 && UseHiV2) {
10261 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10263 // We only use half of V2 so map the usage down into the final blend mask.
10264 V2Blend = UseLoV2 ? LoV2 : HiV2;
10265 for (int i = 0; i < SplitNumElements; ++i)
10266 if (BlendMask[i] >= SplitNumElements)
10267 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
10269 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
10271 SDValue Lo = HalfBlend(LoMask);
10272 SDValue Hi = HalfBlend(HiMask);
10273 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
10276 /// \brief Either split a vector in halves or decompose the shuffles and the
10279 /// This is provided as a good fallback for many lowerings of non-single-input
10280 /// shuffles with more than one 128-bit lane. In those cases, we want to select
10281 /// between splitting the shuffle into 128-bit components and stitching those
10282 /// back together vs. extracting the single-input shuffles and blending those
10284 static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
10285 SDValue V2, ArrayRef<int> Mask,
10286 SelectionDAG &DAG) {
10287 assert(!isSingleInputShuffleMask(Mask) && "This routine must not be used to "
10288 "lower single-input shuffles as it "
10289 "could then recurse on itself.");
10290 int Size = Mask.size();
10292 // If this can be modeled as a broadcast of two elements followed by a blend,
10293 // prefer that lowering. This is especially important because broadcasts can
10294 // often fold with memory operands.
10295 auto DoBothBroadcast = [&] {
10296 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
10299 if (V2BroadcastIdx == -1)
10300 V2BroadcastIdx = M - Size;
10301 else if (M - Size != V2BroadcastIdx)
10303 } else if (M >= 0) {
10304 if (V1BroadcastIdx == -1)
10305 V1BroadcastIdx = M;
10306 else if (M != V1BroadcastIdx)
10311 if (DoBothBroadcast())
10312 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
10315 // If the inputs all stem from a single 128-bit lane of each input, then we
10316 // split them rather than blending because the split will decompose to
10317 // unusually few instructions.
10318 int LaneCount = VT.getSizeInBits() / 128;
10319 int LaneSize = Size / LaneCount;
10320 SmallBitVector LaneInputs[2];
10321 LaneInputs[0].resize(LaneCount, false);
10322 LaneInputs[1].resize(LaneCount, false);
10323 for (int i = 0; i < Size; ++i)
10325 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
10326 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
10327 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10329 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
10330 // that the decomposed single-input shuffles don't end up here.
10331 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10334 /// \brief Lower a vector shuffle crossing multiple 128-bit lanes as
10335 /// a permutation and blend of those lanes.
10337 /// This essentially blends the out-of-lane inputs to each lane into the lane
10338 /// from a permuted copy of the vector. This lowering strategy results in four
10339 /// instructions in the worst case for a single-input cross lane shuffle which
10340 /// is lower than any other fully general cross-lane shuffle strategy I'm aware
10341 /// of. Special cases for each particular shuffle pattern should be handled
10342 /// prior to trying this lowering.
10343 static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT,
10344 SDValue V1, SDValue V2,
10345 ArrayRef<int> Mask,
10346 SelectionDAG &DAG) {
10347 // FIXME: This should probably be generalized for 512-bit vectors as well.
10348 assert(VT.getSizeInBits() == 256 && "Only for 256-bit vector shuffles!");
10349 int LaneSize = Mask.size() / 2;
10351 // If there are only inputs from one 128-bit lane, splitting will in fact be
10352 // less expensive. The flags track wether the given lane contains an element
10353 // that crosses to another lane.
10354 bool LaneCrossing[2] = {false, false};
10355 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10356 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10357 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
10358 if (!LaneCrossing[0] || !LaneCrossing[1])
10359 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10361 if (isSingleInputShuffleMask(Mask)) {
10362 SmallVector<int, 32> FlippedBlendMask;
10363 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10364 FlippedBlendMask.push_back(
10365 Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
10367 : Mask[i] % LaneSize +
10368 (i / LaneSize) * LaneSize + Size));
10370 // Flip the vector, and blend the results which should now be in-lane. The
10371 // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
10372 // 5 for the high source. The value 3 selects the high half of source 2 and
10373 // the value 2 selects the low half of source 2. We only use source 2 to
10374 // allow folding it into a memory operand.
10375 unsigned PERMMask = 3 | 2 << 4;
10376 SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
10377 V1, DAG.getConstant(PERMMask, MVT::i8));
10378 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
10381 // This now reduces to two single-input shuffles of V1 and V2 which at worst
10382 // will be handled by the above logic and a blend of the results, much like
10383 // other patterns in AVX.
10384 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10387 /// \brief Handle lowering 2-lane 128-bit shuffles.
10388 static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10389 SDValue V2, ArrayRef<int> Mask,
10390 const X86Subtarget *Subtarget,
10391 SelectionDAG &DAG) {
10392 // Blends are faster and handle all the non-lane-crossing cases.
10393 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask,
10397 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
10398 VT.getVectorNumElements() / 2);
10399 // Check for patterns which can be matched with a single insert of a 128-bit
10401 if (isShuffleEquivalent(V1, V2, Mask, 0, 1, 0, 1) ||
10402 isShuffleEquivalent(V1, V2, Mask, 0, 1, 4, 5)) {
10403 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10404 DAG.getIntPtrConstant(0));
10405 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
10406 Mask[2] < 4 ? V1 : V2, DAG.getIntPtrConstant(0));
10407 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10409 if (isShuffleEquivalent(V1, V2, Mask, 0, 1, 6, 7)) {
10410 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10411 DAG.getIntPtrConstant(0));
10412 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
10413 DAG.getIntPtrConstant(2));
10414 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10417 // Otherwise form a 128-bit permutation.
10418 // FIXME: Detect zero-vector inputs and use the VPERM2X128 to zero that half.
10419 unsigned PermMask = Mask[0] / 2 | (Mask[2] / 2) << 4;
10420 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
10421 DAG.getConstant(PermMask, MVT::i8));
10424 /// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
10425 /// shuffling each lane.
10427 /// This will only succeed when the result of fixing the 128-bit lanes results
10428 /// in a single-input non-lane-crossing shuffle with a repeating shuffle mask in
10429 /// each 128-bit lanes. This handles many cases where we can quickly blend away
10430 /// the lane crosses early and then use simpler shuffles within each lane.
10432 /// FIXME: It might be worthwhile at some point to support this without
10433 /// requiring the 128-bit lane-relative shuffles to be repeating, but currently
10434 /// in x86 only floating point has interesting non-repeating shuffles, and even
10435 /// those are still *marginally* more expensive.
10436 static SDValue lowerVectorShuffleByMerging128BitLanes(
10437 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
10438 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
10439 assert(!isSingleInputShuffleMask(Mask) &&
10440 "This is only useful with multiple inputs.");
10442 int Size = Mask.size();
10443 int LaneSize = 128 / VT.getScalarSizeInBits();
10444 int NumLanes = Size / LaneSize;
10445 assert(NumLanes > 1 && "Only handles 256-bit and wider shuffles.");
10447 // See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
10448 // check whether the in-128-bit lane shuffles share a repeating pattern.
10449 SmallVector<int, 4> Lanes;
10450 Lanes.resize(NumLanes, -1);
10451 SmallVector<int, 4> InLaneMask;
10452 InLaneMask.resize(LaneSize, -1);
10453 for (int i = 0; i < Size; ++i) {
10457 int j = i / LaneSize;
10459 if (Lanes[j] < 0) {
10460 // First entry we've seen for this lane.
10461 Lanes[j] = Mask[i] / LaneSize;
10462 } else if (Lanes[j] != Mask[i] / LaneSize) {
10463 // This doesn't match the lane selected previously!
10467 // Check that within each lane we have a consistent shuffle mask.
10468 int k = i % LaneSize;
10469 if (InLaneMask[k] < 0) {
10470 InLaneMask[k] = Mask[i] % LaneSize;
10471 } else if (InLaneMask[k] != Mask[i] % LaneSize) {
10472 // This doesn't fit a repeating in-lane mask.
10477 // First shuffle the lanes into place.
10478 MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
10479 VT.getSizeInBits() / 64);
10480 SmallVector<int, 8> LaneMask;
10481 LaneMask.resize(NumLanes * 2, -1);
10482 for (int i = 0; i < NumLanes; ++i)
10483 if (Lanes[i] >= 0) {
10484 LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
10485 LaneMask[2 * i + 1] = 2*Lanes[i] + 1;
10488 V1 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V1);
10489 V2 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V2);
10490 SDValue LaneShuffle = DAG.getVectorShuffle(LaneVT, DL, V1, V2, LaneMask);
10492 // Cast it back to the type we actually want.
10493 LaneShuffle = DAG.getNode(ISD::BITCAST, DL, VT, LaneShuffle);
10495 // Now do a simple shuffle that isn't lane crossing.
10496 SmallVector<int, 8> NewMask;
10497 NewMask.resize(Size, -1);
10498 for (int i = 0; i < Size; ++i)
10500 NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
10501 assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) &&
10502 "Must not introduce lane crosses at this point!");
10504 return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask);
10507 /// \brief Test whether the specified input (0 or 1) is in-place blended by the
10510 /// This returns true if the elements from a particular input are already in the
10511 /// slot required by the given mask and require no permutation.
10512 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
10513 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
10514 int Size = Mask.size();
10515 for (int i = 0; i < Size; ++i)
10516 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
10522 /// \brief Handle lowering of 4-lane 64-bit floating point shuffles.
10524 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
10525 /// isn't available.
10526 static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10527 const X86Subtarget *Subtarget,
10528 SelectionDAG &DAG) {
10530 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10531 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10532 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10533 ArrayRef<int> Mask = SVOp->getMask();
10534 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10536 SmallVector<int, 4> WidenedMask;
10537 if (canWidenShuffleElements(Mask, WidenedMask))
10538 return lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask, Subtarget,
10541 if (isSingleInputShuffleMask(Mask)) {
10542 // Check for being able to broadcast a single element.
10543 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f64, DL, V1,
10544 Mask, Subtarget, DAG))
10547 // Use low duplicate instructions for masks that match their pattern.
10548 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2))
10549 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
10551 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
10552 // Non-half-crossing single input shuffles can be lowerid with an
10553 // interleaved permutation.
10554 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
10555 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
10556 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
10557 DAG.getConstant(VPERMILPMask, MVT::i8));
10560 // With AVX2 we have direct support for this permutation.
10561 if (Subtarget->hasAVX2())
10562 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
10563 getV4X86ShuffleImm8ForMask(Mask, DAG));
10565 // Otherwise, fall back.
10566 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
10570 // X86 has dedicated unpack instructions that can handle specific blend
10571 // operations: UNPCKH and UNPCKL.
10572 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 2, 6))
10573 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V1, V2);
10574 if (isShuffleEquivalent(V1, V2, Mask, 1, 5, 3, 7))
10575 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V1, V2);
10576 if (isShuffleEquivalent(V1, V2, Mask, 4, 0, 6, 2))
10577 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V2, V1);
10578 if (isShuffleEquivalent(V1, V2, Mask, 5, 1, 7, 3))
10579 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V2, V1);
10581 // If we have a single input to the zero element, insert that into V1 if we
10582 // can do so cheaply.
10583 int NumV2Elements =
10584 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
10585 if (NumV2Elements == 1 && Mask[0] >= 4)
10586 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
10587 MVT::v4f64, DL, V1, V2, Mask, Subtarget, DAG))
10590 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
10594 // Check if the blend happens to exactly fit that of SHUFPD.
10595 if ((Mask[0] == -1 || Mask[0] < 2) &&
10596 (Mask[1] == -1 || (Mask[1] >= 4 && Mask[1] < 6)) &&
10597 (Mask[2] == -1 || (Mask[2] >= 2 && Mask[2] < 4)) &&
10598 (Mask[3] == -1 || Mask[3] >= 6)) {
10599 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 5) << 1) |
10600 ((Mask[2] == 3) << 2) | ((Mask[3] == 7) << 3);
10601 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V1, V2,
10602 DAG.getConstant(SHUFPDMask, MVT::i8));
10604 if ((Mask[0] == -1 || (Mask[0] >= 4 && Mask[0] < 6)) &&
10605 (Mask[1] == -1 || Mask[1] < 2) &&
10606 (Mask[2] == -1 || Mask[2] >= 6) &&
10607 (Mask[3] == -1 || (Mask[3] >= 2 && Mask[3] < 4))) {
10608 unsigned SHUFPDMask = (Mask[0] == 5) | ((Mask[1] == 1) << 1) |
10609 ((Mask[2] == 7) << 2) | ((Mask[3] == 3) << 3);
10610 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V2, V1,
10611 DAG.getConstant(SHUFPDMask, MVT::i8));
10614 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10615 // shuffle. However, if we have AVX2 and either inputs are already in place,
10616 // we will be able to shuffle even across lanes the other input in a single
10617 // instruction so skip this pattern.
10618 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10619 isShuffleMaskInputInPlace(1, Mask))))
10620 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10621 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
10624 // If we have AVX2 then we always want to lower with a blend because an v4 we
10625 // can fully permute the elements.
10626 if (Subtarget->hasAVX2())
10627 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
10630 // Otherwise fall back on generic lowering.
10631 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, DAG);
10634 /// \brief Handle lowering of 4-lane 64-bit integer shuffles.
10636 /// This routine is only called when we have AVX2 and thus a reasonable
10637 /// instruction set for v4i64 shuffling..
10638 static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10639 const X86Subtarget *Subtarget,
10640 SelectionDAG &DAG) {
10642 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10643 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10644 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10645 ArrayRef<int> Mask = SVOp->getMask();
10646 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10647 assert(Subtarget->hasAVX2() && "We can only lower v4i64 with AVX2!");
10649 SmallVector<int, 4> WidenedMask;
10650 if (canWidenShuffleElements(Mask, WidenedMask))
10651 return lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask, Subtarget,
10654 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
10658 // Check for being able to broadcast a single element.
10659 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i64, DL, V1,
10660 Mask, Subtarget, DAG))
10663 // When the shuffle is mirrored between the 128-bit lanes of the unit, we can
10664 // use lower latency instructions that will operate on both 128-bit lanes.
10665 SmallVector<int, 2> RepeatedMask;
10666 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
10667 if (isSingleInputShuffleMask(Mask)) {
10668 int PSHUFDMask[] = {-1, -1, -1, -1};
10669 for (int i = 0; i < 2; ++i)
10670 if (RepeatedMask[i] >= 0) {
10671 PSHUFDMask[2 * i] = 2 * RepeatedMask[i];
10672 PSHUFDMask[2 * i + 1] = 2 * RepeatedMask[i] + 1;
10674 return DAG.getNode(
10675 ISD::BITCAST, DL, MVT::v4i64,
10676 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
10677 DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, V1),
10678 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
10682 // AVX2 provides a direct instruction for permuting a single input across
10684 if (isSingleInputShuffleMask(Mask))
10685 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
10686 getV4X86ShuffleImm8ForMask(Mask, DAG));
10688 // Try to use shift instructions.
10689 if (SDValue Shift =
10690 lowerVectorShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask, DAG))
10693 // Use dedicated unpack instructions for masks that match their pattern.
10694 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 2, 6))
10695 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V1, V2);
10696 if (isShuffleEquivalent(V1, V2, Mask, 1, 5, 3, 7))
10697 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V1, V2);
10698 if (isShuffleEquivalent(V1, V2, Mask, 4, 0, 6, 2))
10699 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V2, V1);
10700 if (isShuffleEquivalent(V1, V2, Mask, 5, 1, 7, 3))
10701 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V2, V1);
10703 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10704 // shuffle. However, if we have AVX2 and either inputs are already in place,
10705 // we will be able to shuffle even across lanes the other input in a single
10706 // instruction so skip this pattern.
10707 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10708 isShuffleMaskInputInPlace(1, Mask))))
10709 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10710 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
10713 // Otherwise fall back on generic blend lowering.
10714 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
10718 /// \brief Handle lowering of 8-lane 32-bit floating point shuffles.
10720 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
10721 /// isn't available.
10722 static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10723 const X86Subtarget *Subtarget,
10724 SelectionDAG &DAG) {
10726 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10727 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10728 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10729 ArrayRef<int> Mask = SVOp->getMask();
10730 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10732 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
10736 // Check for being able to broadcast a single element.
10737 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8f32, DL, V1,
10738 Mask, Subtarget, DAG))
10741 // If the shuffle mask is repeated in each 128-bit lane, we have many more
10742 // options to efficiently lower the shuffle.
10743 SmallVector<int, 4> RepeatedMask;
10744 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
10745 assert(RepeatedMask.size() == 4 &&
10746 "Repeated masks must be half the mask width!");
10748 // Use even/odd duplicate instructions for masks that match their pattern.
10749 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2, 4, 4, 6, 6))
10750 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
10751 if (isShuffleEquivalent(V1, V2, Mask, 1, 1, 3, 3, 5, 5, 7, 7))
10752 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
10754 if (isSingleInputShuffleMask(Mask))
10755 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
10756 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10758 // Use dedicated unpack instructions for masks that match their pattern.
10759 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10760 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V1, V2);
10761 if (isShuffleEquivalent(V1, V2, Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10762 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V1, V2);
10763 if (isShuffleEquivalent(V1, V2, Mask, 8, 0, 9, 1, 12, 4, 13, 5))
10764 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V2, V1);
10765 if (isShuffleEquivalent(V1, V2, Mask, 10, 2, 11, 3, 14, 6, 15, 7))
10766 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V2, V1);
10768 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
10769 // have already handled any direct blends. We also need to squash the
10770 // repeated mask into a simulated v4f32 mask.
10771 for (int i = 0; i < 4; ++i)
10772 if (RepeatedMask[i] >= 8)
10773 RepeatedMask[i] -= 4;
10774 return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
10777 // If we have a single input shuffle with different shuffle patterns in the
10778 // two 128-bit lanes use the variable mask to VPERMILPS.
10779 if (isSingleInputShuffleMask(Mask)) {
10780 SDValue VPermMask[8];
10781 for (int i = 0; i < 8; ++i)
10782 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10783 : DAG.getConstant(Mask[i], MVT::i32);
10784 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
10785 return DAG.getNode(
10786 X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
10787 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask));
10789 if (Subtarget->hasAVX2())
10790 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32,
10791 DAG.getNode(ISD::BITCAST, DL, MVT::v8f32,
10792 DAG.getNode(ISD::BUILD_VECTOR, DL,
10793 MVT::v8i32, VPermMask)),
10796 // Otherwise, fall back.
10797 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
10801 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10803 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10804 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
10807 // If we have AVX2 then we always want to lower with a blend because at v8 we
10808 // can fully permute the elements.
10809 if (Subtarget->hasAVX2())
10810 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
10813 // Otherwise fall back on generic lowering.
10814 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, DAG);
10817 /// \brief Handle lowering of 8-lane 32-bit integer shuffles.
10819 /// This routine is only called when we have AVX2 and thus a reasonable
10820 /// instruction set for v8i32 shuffling..
10821 static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10822 const X86Subtarget *Subtarget,
10823 SelectionDAG &DAG) {
10825 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10826 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10827 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10828 ArrayRef<int> Mask = SVOp->getMask();
10829 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10830 assert(Subtarget->hasAVX2() && "We can only lower v8i32 with AVX2!");
10832 // Whenever we can lower this as a zext, that instruction is strictly faster
10833 // than any alternative. It also allows us to fold memory operands into the
10834 // shuffle in many cases.
10835 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2,
10836 Mask, Subtarget, DAG))
10839 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
10843 // Check for being able to broadcast a single element.
10844 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i32, DL, V1,
10845 Mask, Subtarget, DAG))
10848 // If the shuffle mask is repeated in each 128-bit lane we can use more
10849 // efficient instructions that mirror the shuffles across the two 128-bit
10851 SmallVector<int, 4> RepeatedMask;
10852 if (is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask)) {
10853 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
10854 if (isSingleInputShuffleMask(Mask))
10855 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
10856 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10858 // Use dedicated unpack instructions for masks that match their pattern.
10859 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10860 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V1, V2);
10861 if (isShuffleEquivalent(V1, V2, Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10862 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V1, V2);
10863 if (isShuffleEquivalent(V1, V2, Mask, 8, 0, 9, 1, 12, 4, 13, 5))
10864 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V2, V1);
10865 if (isShuffleEquivalent(V1, V2, Mask, 10, 2, 11, 3, 14, 6, 15, 7))
10866 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V2, V1);
10869 // Try to use shift instructions.
10870 if (SDValue Shift =
10871 lowerVectorShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask, DAG))
10874 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
10875 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10878 // If the shuffle patterns aren't repeated but it is a single input, directly
10879 // generate a cross-lane VPERMD instruction.
10880 if (isSingleInputShuffleMask(Mask)) {
10881 SDValue VPermMask[8];
10882 for (int i = 0; i < 8; ++i)
10883 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10884 : DAG.getConstant(Mask[i], MVT::i32);
10885 return DAG.getNode(
10886 X86ISD::VPERMV, DL, MVT::v8i32,
10887 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
10890 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10892 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10893 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10896 // Otherwise fall back on generic blend lowering.
10897 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
10901 /// \brief Handle lowering of 16-lane 16-bit integer shuffles.
10903 /// This routine is only called when we have AVX2 and thus a reasonable
10904 /// instruction set for v16i16 shuffling..
10905 static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10906 const X86Subtarget *Subtarget,
10907 SelectionDAG &DAG) {
10909 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10910 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10911 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10912 ArrayRef<int> Mask = SVOp->getMask();
10913 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10914 assert(Subtarget->hasAVX2() && "We can only lower v16i16 with AVX2!");
10916 // Whenever we can lower this as a zext, that instruction is strictly faster
10917 // than any alternative. It also allows us to fold memory operands into the
10918 // shuffle in many cases.
10919 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v16i16, V1, V2,
10920 Mask, Subtarget, DAG))
10923 // Check for being able to broadcast a single element.
10924 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i16, DL, V1,
10925 Mask, Subtarget, DAG))
10928 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
10932 // Use dedicated unpack instructions for masks that match their pattern.
10933 if (isShuffleEquivalent(V1, V2, Mask,
10934 // First 128-bit lane:
10935 0, 16, 1, 17, 2, 18, 3, 19,
10936 // Second 128-bit lane:
10937 8, 24, 9, 25, 10, 26, 11, 27))
10938 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i16, V1, V2);
10939 if (isShuffleEquivalent(V1, V2, Mask,
10940 // First 128-bit lane:
10941 4, 20, 5, 21, 6, 22, 7, 23,
10942 // Second 128-bit lane:
10943 12, 28, 13, 29, 14, 30, 15, 31))
10944 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i16, V1, V2);
10946 // Try to use shift instructions.
10947 if (SDValue Shift =
10948 lowerVectorShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask, DAG))
10951 // Try to use byte rotation instructions.
10952 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
10953 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
10956 if (isSingleInputShuffleMask(Mask)) {
10957 // There are no generalized cross-lane shuffle operations available on i16
10959 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
10960 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
10963 SDValue PSHUFBMask[32];
10964 for (int i = 0; i < 16; ++i) {
10965 if (Mask[i] == -1) {
10966 PSHUFBMask[2 * i] = PSHUFBMask[2 * i + 1] = DAG.getUNDEF(MVT::i8);
10970 int M = i < 8 ? Mask[i] : Mask[i] - 8;
10971 assert(M >= 0 && M < 8 && "Invalid single-input mask!");
10972 PSHUFBMask[2 * i] = DAG.getConstant(2 * M, MVT::i8);
10973 PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, MVT::i8);
10975 return DAG.getNode(
10976 ISD::BITCAST, DL, MVT::v16i16,
10978 X86ISD::PSHUFB, DL, MVT::v32i8,
10979 DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1),
10980 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask)));
10983 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10985 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10986 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
10989 // Otherwise fall back on generic lowering.
10990 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG);
10993 /// \brief Handle lowering of 32-lane 8-bit integer shuffles.
10995 /// This routine is only called when we have AVX2 and thus a reasonable
10996 /// instruction set for v32i8 shuffling..
10997 static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10998 const X86Subtarget *Subtarget,
10999 SelectionDAG &DAG) {
11001 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
11002 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
11003 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11004 ArrayRef<int> Mask = SVOp->getMask();
11005 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
11006 assert(Subtarget->hasAVX2() && "We can only lower v32i8 with AVX2!");
11008 // Whenever we can lower this as a zext, that instruction is strictly faster
11009 // than any alternative. It also allows us to fold memory operands into the
11010 // shuffle in many cases.
11011 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2,
11012 Mask, Subtarget, DAG))
11015 // Check for being able to broadcast a single element.
11016 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v32i8, DL, V1,
11017 Mask, Subtarget, DAG))
11020 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
11024 // Use dedicated unpack instructions for masks that match their pattern.
11025 // Note that these are repeated 128-bit lane unpacks, not unpacks across all
11027 if (isShuffleEquivalent(
11029 // First 128-bit lane:
11030 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39,
11031 // Second 128-bit lane:
11032 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55))
11033 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v32i8, V1, V2);
11034 if (isShuffleEquivalent(
11036 // First 128-bit lane:
11037 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47,
11038 // Second 128-bit lane:
11039 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63))
11040 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v32i8, V1, V2);
11042 // Try to use shift instructions.
11043 if (SDValue Shift =
11044 lowerVectorShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask, DAG))
11047 // Try to use byte rotation instructions.
11048 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
11049 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
11052 if (isSingleInputShuffleMask(Mask)) {
11053 // There are no generalized cross-lane shuffle operations available on i8
11055 if (is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
11056 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2,
11059 SDValue PSHUFBMask[32];
11060 for (int i = 0; i < 32; ++i)
11063 ? DAG.getUNDEF(MVT::i8)
11064 : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, MVT::i8);
11066 return DAG.getNode(
11067 X86ISD::PSHUFB, DL, MVT::v32i8, V1,
11068 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask));
11071 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11073 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11074 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
11077 // Otherwise fall back on generic lowering.
11078 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG);
11081 /// \brief High-level routine to lower various 256-bit x86 vector shuffles.
11083 /// This routine either breaks down the specific type of a 256-bit x86 vector
11084 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
11085 /// together based on the available instructions.
11086 static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11087 MVT VT, const X86Subtarget *Subtarget,
11088 SelectionDAG &DAG) {
11090 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11091 ArrayRef<int> Mask = SVOp->getMask();
11093 // There is a really nice hard cut-over between AVX1 and AVX2 that means we can
11094 // check for those subtargets here and avoid much of the subtarget querying in
11095 // the per-vector-type lowering routines. With AVX1 we have essentially *zero*
11096 // ability to manipulate a 256-bit vector with integer types. Since we'll use
11097 // floating point types there eventually, just immediately cast everything to
11098 // a float and operate entirely in that domain.
11099 if (VT.isInteger() && !Subtarget->hasAVX2()) {
11100 int ElementBits = VT.getScalarSizeInBits();
11101 if (ElementBits < 32)
11102 // No floating point type available, decompose into 128-bit vectors.
11103 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11105 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
11106 VT.getVectorNumElements());
11107 V1 = DAG.getNode(ISD::BITCAST, DL, FpVT, V1);
11108 V2 = DAG.getNode(ISD::BITCAST, DL, FpVT, V2);
11109 return DAG.getNode(ISD::BITCAST, DL, VT,
11110 DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
11113 switch (VT.SimpleTy) {
11115 return lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11117 return lowerV4I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11119 return lowerV8F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11121 return lowerV8I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11123 return lowerV16I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11125 return lowerV32I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11128 llvm_unreachable("Not a valid 256-bit x86 vector type!");
11132 /// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
11133 static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11134 const X86Subtarget *Subtarget,
11135 SelectionDAG &DAG) {
11137 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11138 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11139 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11140 ArrayRef<int> Mask = SVOp->getMask();
11141 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11143 // X86 has dedicated unpack instructions that can handle specific blend
11144 // operations: UNPCKH and UNPCKL.
11145 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 2, 10, 4, 12, 6, 14))
11146 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f64, V1, V2);
11147 if (isShuffleEquivalent(V1, V2, Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11148 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f64, V1, V2);
11150 // FIXME: Implement direct support for this type!
11151 return splitAndLowerVectorShuffle(DL, MVT::v8f64, V1, V2, Mask, DAG);
11154 /// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
11155 static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11156 const X86Subtarget *Subtarget,
11157 SelectionDAG &DAG) {
11159 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11160 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11161 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11162 ArrayRef<int> Mask = SVOp->getMask();
11163 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11165 // Use dedicated unpack instructions for masks that match their pattern.
11166 if (isShuffleEquivalent(V1, V2, Mask,
11167 0, 16, 1, 17, 4, 20, 5, 21,
11168 8, 24, 9, 25, 12, 28, 13, 29))
11169 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16f32, V1, V2);
11170 if (isShuffleEquivalent(V1, V2, Mask,
11171 2, 18, 3, 19, 6, 22, 7, 23,
11172 10, 26, 11, 27, 14, 30, 15, 31))
11173 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16f32, V1, V2);
11175 // FIXME: Implement direct support for this type!
11176 return splitAndLowerVectorShuffle(DL, MVT::v16f32, V1, V2, Mask, DAG);
11179 /// \brief Handle lowering of 8-lane 64-bit integer shuffles.
11180 static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11181 const X86Subtarget *Subtarget,
11182 SelectionDAG &DAG) {
11184 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11185 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11186 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11187 ArrayRef<int> Mask = SVOp->getMask();
11188 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11190 // X86 has dedicated unpack instructions that can handle specific blend
11191 // operations: UNPCKH and UNPCKL.
11192 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 2, 10, 4, 12, 6, 14))
11193 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i64, V1, V2);
11194 if (isShuffleEquivalent(V1, V2, Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11195 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i64, V1, V2);
11197 // FIXME: Implement direct support for this type!
11198 return splitAndLowerVectorShuffle(DL, MVT::v8i64, V1, V2, Mask, DAG);
11201 /// \brief Handle lowering of 16-lane 32-bit integer shuffles.
11202 static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11203 const X86Subtarget *Subtarget,
11204 SelectionDAG &DAG) {
11206 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11207 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11208 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11209 ArrayRef<int> Mask = SVOp->getMask();
11210 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11212 // Use dedicated unpack instructions for masks that match their pattern.
11213 if (isShuffleEquivalent(V1, V2, Mask,
11214 0, 16, 1, 17, 4, 20, 5, 21,
11215 8, 24, 9, 25, 12, 28, 13, 29))
11216 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i32, V1, V2);
11217 if (isShuffleEquivalent(V1, V2, Mask,
11218 2, 18, 3, 19, 6, 22, 7, 23,
11219 10, 26, 11, 27, 14, 30, 15, 31))
11220 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i32, V1, V2);
11222 // FIXME: Implement direct support for this type!
11223 return splitAndLowerVectorShuffle(DL, MVT::v16i32, V1, V2, Mask, DAG);
11226 /// \brief Handle lowering of 32-lane 16-bit integer shuffles.
11227 static SDValue lowerV32I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11228 const X86Subtarget *Subtarget,
11229 SelectionDAG &DAG) {
11231 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11232 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11233 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11234 ArrayRef<int> Mask = SVOp->getMask();
11235 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
11236 assert(Subtarget->hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
11238 // FIXME: Implement direct support for this type!
11239 return splitAndLowerVectorShuffle(DL, MVT::v32i16, V1, V2, Mask, DAG);
11242 /// \brief Handle lowering of 64-lane 8-bit integer shuffles.
11243 static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11244 const X86Subtarget *Subtarget,
11245 SelectionDAG &DAG) {
11247 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11248 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11249 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11250 ArrayRef<int> Mask = SVOp->getMask();
11251 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
11252 assert(Subtarget->hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
11254 // FIXME: Implement direct support for this type!
11255 return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
11258 /// \brief High-level routine to lower various 512-bit x86 vector shuffles.
11260 /// This routine either breaks down the specific type of a 512-bit x86 vector
11261 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
11262 /// together based on the available instructions.
11263 static SDValue lower512BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11264 MVT VT, const X86Subtarget *Subtarget,
11265 SelectionDAG &DAG) {
11267 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11268 ArrayRef<int> Mask = SVOp->getMask();
11269 assert(Subtarget->hasAVX512() &&
11270 "Cannot lower 512-bit vectors w/ basic ISA!");
11272 // Check for being able to broadcast a single element.
11273 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(VT.SimpleTy, DL, V1,
11274 Mask, Subtarget, DAG))
11277 // Dispatch to each element type for lowering. If we don't have supprot for
11278 // specific element type shuffles at 512 bits, immediately split them and
11279 // lower them. Each lowering routine of a given type is allowed to assume that
11280 // the requisite ISA extensions for that element type are available.
11281 switch (VT.SimpleTy) {
11283 return lowerV8F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11285 return lowerV16F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11287 return lowerV8I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11289 return lowerV16I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11291 if (Subtarget->hasBWI())
11292 return lowerV32I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11295 if (Subtarget->hasBWI())
11296 return lowerV64I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11300 llvm_unreachable("Not a valid 512-bit x86 vector type!");
11303 // Otherwise fall back on splitting.
11304 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11307 /// \brief Top-level lowering for x86 vector shuffles.
11309 /// This handles decomposition, canonicalization, and lowering of all x86
11310 /// vector shuffles. Most of the specific lowering strategies are encapsulated
11311 /// above in helper routines. The canonicalization attempts to widen shuffles
11312 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
11313 /// s.t. only one of the two inputs needs to be tested, etc.
11314 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
11315 SelectionDAG &DAG) {
11316 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11317 ArrayRef<int> Mask = SVOp->getMask();
11318 SDValue V1 = Op.getOperand(0);
11319 SDValue V2 = Op.getOperand(1);
11320 MVT VT = Op.getSimpleValueType();
11321 int NumElements = VT.getVectorNumElements();
11324 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
11326 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
11327 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11328 if (V1IsUndef && V2IsUndef)
11329 return DAG.getUNDEF(VT);
11331 // When we create a shuffle node we put the UNDEF node to second operand,
11332 // but in some cases the first operand may be transformed to UNDEF.
11333 // In this case we should just commute the node.
11335 return DAG.getCommutedVectorShuffle(*SVOp);
11337 // Check for non-undef masks pointing at an undef vector and make the masks
11338 // undef as well. This makes it easier to match the shuffle based solely on
11342 if (M >= NumElements) {
11343 SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
11344 for (int &M : NewMask)
11345 if (M >= NumElements)
11347 return DAG.getVectorShuffle(VT, dl, V1, V2, NewMask);
11350 // We actually see shuffles that are entirely re-arrangements of a set of
11351 // zero inputs. This mostly happens while decomposing complex shuffles into
11352 // simple ones. Directly lower these as a buildvector of zeros.
11353 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
11354 if (Zeroable.all())
11355 return getZeroVector(VT, Subtarget, DAG, dl);
11357 // Try to collapse shuffles into using a vector type with fewer elements but
11358 // wider element types. We cap this to not form integers or floating point
11359 // elements wider than 64 bits, but it might be interesting to form i128
11360 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
11361 SmallVector<int, 16> WidenedMask;
11362 if (VT.getScalarSizeInBits() < 64 &&
11363 canWidenShuffleElements(Mask, WidenedMask)) {
11364 MVT NewEltVT = VT.isFloatingPoint()
11365 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
11366 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
11367 MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
11368 // Make sure that the new vector type is legal. For example, v2f64 isn't
11370 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
11371 V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
11372 V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
11373 return DAG.getNode(ISD::BITCAST, dl, VT,
11374 DAG.getVectorShuffle(NewVT, dl, V1, V2, WidenedMask));
11378 int NumV1Elements = 0, NumUndefElements = 0, NumV2Elements = 0;
11379 for (int M : SVOp->getMask())
11381 ++NumUndefElements;
11382 else if (M < NumElements)
11387 // Commute the shuffle as needed such that more elements come from V1 than
11388 // V2. This allows us to match the shuffle pattern strictly on how many
11389 // elements come from V1 without handling the symmetric cases.
11390 if (NumV2Elements > NumV1Elements)
11391 return DAG.getCommutedVectorShuffle(*SVOp);
11393 // When the number of V1 and V2 elements are the same, try to minimize the
11394 // number of uses of V2 in the low half of the vector. When that is tied,
11395 // ensure that the sum of indices for V1 is equal to or lower than the sum
11396 // indices for V2. When those are equal, try to ensure that the number of odd
11397 // indices for V1 is lower than the number of odd indices for V2.
11398 if (NumV1Elements == NumV2Elements) {
11399 int LowV1Elements = 0, LowV2Elements = 0;
11400 for (int M : SVOp->getMask().slice(0, NumElements / 2))
11401 if (M >= NumElements)
11405 if (LowV2Elements > LowV1Elements) {
11406 return DAG.getCommutedVectorShuffle(*SVOp);
11407 } else if (LowV2Elements == LowV1Elements) {
11408 int SumV1Indices = 0, SumV2Indices = 0;
11409 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11410 if (SVOp->getMask()[i] >= NumElements)
11412 else if (SVOp->getMask()[i] >= 0)
11414 if (SumV2Indices < SumV1Indices) {
11415 return DAG.getCommutedVectorShuffle(*SVOp);
11416 } else if (SumV2Indices == SumV1Indices) {
11417 int NumV1OddIndices = 0, NumV2OddIndices = 0;
11418 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11419 if (SVOp->getMask()[i] >= NumElements)
11420 NumV2OddIndices += i % 2;
11421 else if (SVOp->getMask()[i] >= 0)
11422 NumV1OddIndices += i % 2;
11423 if (NumV2OddIndices < NumV1OddIndices)
11424 return DAG.getCommutedVectorShuffle(*SVOp);
11429 // For each vector width, delegate to a specialized lowering routine.
11430 if (VT.getSizeInBits() == 128)
11431 return lower128BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11433 if (VT.getSizeInBits() == 256)
11434 return lower256BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11436 // Force AVX-512 vectors to be scalarized for now.
11437 // FIXME: Implement AVX-512 support!
11438 if (VT.getSizeInBits() == 512)
11439 return lower512BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11441 llvm_unreachable("Unimplemented!");
11445 //===----------------------------------------------------------------------===//
11446 // Legacy vector shuffle lowering
11448 // This code is the legacy code handling vector shuffles until the above
11449 // replaces its functionality and performance.
11450 //===----------------------------------------------------------------------===//
11452 static bool isBlendMask(ArrayRef<int> MaskVals, MVT VT, bool hasSSE41,
11453 bool hasInt256, unsigned *MaskOut = nullptr) {
11454 MVT EltVT = VT.getVectorElementType();
11456 // There is no blend with immediate in AVX-512.
11457 if (VT.is512BitVector())
11460 if (!hasSSE41 || EltVT == MVT::i8)
11462 if (!hasInt256 && VT == MVT::v16i16)
11465 unsigned MaskValue = 0;
11466 unsigned NumElems = VT.getVectorNumElements();
11467 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
11468 unsigned NumLanes = (NumElems - 1) / 8 + 1;
11469 unsigned NumElemsInLane = NumElems / NumLanes;
11471 // Blend for v16i16 should be symmetric for both lanes.
11472 for (unsigned i = 0; i < NumElemsInLane; ++i) {
11474 int SndLaneEltIdx = (NumLanes == 2) ? MaskVals[i + NumElemsInLane] : -1;
11475 int EltIdx = MaskVals[i];
11477 if ((EltIdx < 0 || EltIdx == (int)i) &&
11478 (SndLaneEltIdx < 0 || SndLaneEltIdx == (int)(i + NumElemsInLane)))
11481 if (((unsigned)EltIdx == (i + NumElems)) &&
11482 (SndLaneEltIdx < 0 ||
11483 (unsigned)SndLaneEltIdx == i + NumElems + NumElemsInLane))
11484 MaskValue |= (1 << i);
11490 *MaskOut = MaskValue;
11494 // Try to lower a shuffle node into a simple blend instruction.
11495 // This function assumes isBlendMask returns true for this
11496 // SuffleVectorSDNode
11497 static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
11498 unsigned MaskValue,
11499 const X86Subtarget *Subtarget,
11500 SelectionDAG &DAG) {
11501 MVT VT = SVOp->getSimpleValueType(0);
11502 MVT EltVT = VT.getVectorElementType();
11503 assert(isBlendMask(SVOp->getMask(), VT, Subtarget->hasSSE41(),
11504 Subtarget->hasInt256() && "Trying to lower a "
11505 "VECTOR_SHUFFLE to a Blend but "
11506 "with the wrong mask"));
11507 SDValue V1 = SVOp->getOperand(0);
11508 SDValue V2 = SVOp->getOperand(1);
11510 unsigned NumElems = VT.getVectorNumElements();
11512 // Convert i32 vectors to floating point if it is not AVX2.
11513 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
11515 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
11516 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
11518 V1 = DAG.getNode(ISD::BITCAST, dl, VT, V1);
11519 V2 = DAG.getNode(ISD::BITCAST, dl, VT, V2);
11522 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, V1, V2,
11523 DAG.getConstant(MaskValue, MVT::i32));
11524 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
11527 /// In vector type \p VT, return true if the element at index \p InputIdx
11528 /// falls on a different 128-bit lane than \p OutputIdx.
11529 static bool ShuffleCrosses128bitLane(MVT VT, unsigned InputIdx,
11530 unsigned OutputIdx) {
11531 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
11532 return InputIdx * EltSize / 128 != OutputIdx * EltSize / 128;
11535 /// Generate a PSHUFB if possible. Selects elements from \p V1 according to
11536 /// \p MaskVals. MaskVals[OutputIdx] = InputIdx specifies that we want to
11537 /// shuffle the element at InputIdx in V1 to OutputIdx in the result. If \p
11538 /// MaskVals refers to elements outside of \p V1 or is undef (-1), insert a
11540 static SDValue getPSHUFB(ArrayRef<int> MaskVals, SDValue V1, SDLoc &dl,
11541 SelectionDAG &DAG) {
11542 MVT VT = V1.getSimpleValueType();
11543 assert(VT.is128BitVector() || VT.is256BitVector());
11545 MVT EltVT = VT.getVectorElementType();
11546 unsigned EltSizeInBytes = EltVT.getSizeInBits() / 8;
11547 unsigned NumElts = VT.getVectorNumElements();
11549 SmallVector<SDValue, 32> PshufbMask;
11550 for (unsigned OutputIdx = 0; OutputIdx < NumElts; ++OutputIdx) {
11551 int InputIdx = MaskVals[OutputIdx];
11552 unsigned InputByteIdx;
11554 if (InputIdx < 0 || NumElts <= (unsigned)InputIdx)
11555 InputByteIdx = 0x80;
11557 // Cross lane is not allowed.
11558 if (ShuffleCrosses128bitLane(VT, InputIdx, OutputIdx))
11560 InputByteIdx = InputIdx * EltSizeInBytes;
11561 // Index is an byte offset within the 128-bit lane.
11562 InputByteIdx &= 0xf;
11565 for (unsigned j = 0; j < EltSizeInBytes; ++j) {
11566 PshufbMask.push_back(DAG.getConstant(InputByteIdx, MVT::i8));
11567 if (InputByteIdx != 0x80)
11572 MVT ShufVT = MVT::getVectorVT(MVT::i8, PshufbMask.size());
11574 V1 = DAG.getNode(ISD::BITCAST, dl, ShufVT, V1);
11575 return DAG.getNode(X86ISD::PSHUFB, dl, ShufVT, V1,
11576 DAG.getNode(ISD::BUILD_VECTOR, dl, ShufVT, PshufbMask));
11579 // v8i16 shuffles - Prefer shuffles in the following order:
11580 // 1. [all] pshuflw, pshufhw, optional move
11581 // 2. [ssse3] 1 x pshufb
11582 // 3. [ssse3] 2 x pshufb + 1 x por
11583 // 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw)
11585 LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
11586 SelectionDAG &DAG) {
11587 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11588 SDValue V1 = SVOp->getOperand(0);
11589 SDValue V2 = SVOp->getOperand(1);
11591 SmallVector<int, 8> MaskVals;
11593 // Determine if more than 1 of the words in each of the low and high quadwords
11594 // of the result come from the same quadword of one of the two inputs. Undef
11595 // mask values count as coming from any quadword, for better codegen.
11597 // Lo/HiQuad[i] = j indicates how many words from the ith quad of the input
11598 // feeds this quad. For i, 0 and 1 refer to V1, 2 and 3 refer to V2.
11599 unsigned LoQuad[] = { 0, 0, 0, 0 };
11600 unsigned HiQuad[] = { 0, 0, 0, 0 };
11601 // Indices of quads used.
11602 std::bitset<4> InputQuads;
11603 for (unsigned i = 0; i < 8; ++i) {
11604 unsigned *Quad = i < 4 ? LoQuad : HiQuad;
11605 int EltIdx = SVOp->getMaskElt(i);
11606 MaskVals.push_back(EltIdx);
11614 ++Quad[EltIdx / 4];
11615 InputQuads.set(EltIdx / 4);
11618 int BestLoQuad = -1;
11619 unsigned MaxQuad = 1;
11620 for (unsigned i = 0; i < 4; ++i) {
11621 if (LoQuad[i] > MaxQuad) {
11623 MaxQuad = LoQuad[i];
11627 int BestHiQuad = -1;
11629 for (unsigned i = 0; i < 4; ++i) {
11630 if (HiQuad[i] > MaxQuad) {
11632 MaxQuad = HiQuad[i];
11636 // For SSSE3, If all 8 words of the result come from only 1 quadword of each
11637 // of the two input vectors, shuffle them into one input vector so only a
11638 // single pshufb instruction is necessary. If there are more than 2 input
11639 // quads, disable the next transformation since it does not help SSSE3.
11640 bool V1Used = InputQuads[0] || InputQuads[1];
11641 bool V2Used = InputQuads[2] || InputQuads[3];
11642 if (Subtarget->hasSSSE3()) {
11643 if (InputQuads.count() == 2 && V1Used && V2Used) {
11644 BestLoQuad = InputQuads[0] ? 0 : 1;
11645 BestHiQuad = InputQuads[2] ? 2 : 3;
11647 if (InputQuads.count() > 2) {
11653 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update
11654 // the shuffle mask. If a quad is scored as -1, that means that it contains
11655 // words from all 4 input quadwords.
11657 if (BestLoQuad >= 0 || BestHiQuad >= 0) {
11659 BestLoQuad < 0 ? 0 : BestLoQuad,
11660 BestHiQuad < 0 ? 1 : BestHiQuad
11662 NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
11663 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1),
11664 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]);
11665 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV);
11667 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
11668 // source words for the shuffle, to aid later transformations.
11669 bool AllWordsInNewV = true;
11670 bool InOrder[2] = { true, true };
11671 for (unsigned i = 0; i != 8; ++i) {
11672 int idx = MaskVals[i];
11674 InOrder[i/4] = false;
11675 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad)
11677 AllWordsInNewV = false;
11681 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV;
11682 if (AllWordsInNewV) {
11683 for (int i = 0; i != 8; ++i) {
11684 int idx = MaskVals[i];
11687 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4;
11688 if ((idx != i) && idx < 4)
11690 if ((idx != i) && idx > 3)
11699 // If we've eliminated the use of V2, and the new mask is a pshuflw or
11700 // pshufhw, that's as cheap as it gets. Return the new shuffle.
11701 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) {
11702 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW;
11703 unsigned TargetMask = 0;
11704 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV,
11705 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]);
11706 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11707 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp):
11708 getShufflePSHUFLWImmediate(SVOp);
11709 V1 = NewV.getOperand(0);
11710 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG);
11714 // Promote splats to a larger type which usually leads to more efficient code.
11715 // FIXME: Is this true if pshufb is available?
11716 if (SVOp->isSplat())
11717 return PromoteSplat(SVOp, DAG);
11719 // If we have SSSE3, and all words of the result are from 1 input vector,
11720 // case 2 is generated, otherwise case 3 is generated. If no SSSE3
11721 // is present, fall back to case 4.
11722 if (Subtarget->hasSSSE3()) {
11723 SmallVector<SDValue,16> pshufbMask;
11725 // If we have elements from both input vectors, set the high bit of the
11726 // shuffle mask element to zero out elements that come from V2 in the V1
11727 // mask, and elements that come from V1 in the V2 mask, so that the two
11728 // results can be OR'd together.
11729 bool TwoInputs = V1Used && V2Used;
11730 V1 = getPSHUFB(MaskVals, V1, dl, DAG);
11732 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11734 // Calculate the shuffle mask for the second input, shuffle it, and
11735 // OR it with the first shuffled input.
11736 CommuteVectorShuffleMask(MaskVals, 8);
11737 V2 = getPSHUFB(MaskVals, V2, dl, DAG);
11738 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11739 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11742 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
11743 // and update MaskVals with new element order.
11744 std::bitset<8> InOrder;
11745 if (BestLoQuad >= 0) {
11746 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 };
11747 for (int i = 0; i != 4; ++i) {
11748 int idx = MaskVals[i];
11751 } else if ((idx / 4) == BestLoQuad) {
11752 MaskV[i] = idx & 3;
11756 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11759 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11760 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11761 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16,
11762 NewV.getOperand(0),
11763 getShufflePSHUFLWImmediate(SVOp), DAG);
11767 // If BestHi >= 0, generate a pshufhw to put the high elements in order,
11768 // and update MaskVals with the new element order.
11769 if (BestHiQuad >= 0) {
11770 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 };
11771 for (unsigned i = 4; i != 8; ++i) {
11772 int idx = MaskVals[i];
11775 } else if ((idx / 4) == BestHiQuad) {
11776 MaskV[i] = (idx & 3) + 4;
11780 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11783 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11784 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11785 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16,
11786 NewV.getOperand(0),
11787 getShufflePSHUFHWImmediate(SVOp), DAG);
11791 // In case BestHi & BestLo were both -1, which means each quadword has a word
11792 // from each of the four input quadwords, calculate the InOrder bitvector now
11793 // before falling through to the insert/extract cleanup.
11794 if (BestLoQuad == -1 && BestHiQuad == -1) {
11796 for (int i = 0; i != 8; ++i)
11797 if (MaskVals[i] < 0 || MaskVals[i] == i)
11801 // The other elements are put in the right place using pextrw and pinsrw.
11802 for (unsigned i = 0; i != 8; ++i) {
11805 int EltIdx = MaskVals[i];
11808 SDValue ExtOp = (EltIdx < 8) ?
11809 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1,
11810 DAG.getIntPtrConstant(EltIdx)) :
11811 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2,
11812 DAG.getIntPtrConstant(EltIdx - 8));
11813 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp,
11814 DAG.getIntPtrConstant(i));
11819 /// \brief v16i16 shuffles
11821 /// FIXME: We only support generation of a single pshufb currently. We can
11822 /// generalize the other applicable cases from LowerVECTOR_SHUFFLEv8i16 as
11823 /// well (e.g 2 x pshufb + 1 x por).
11825 LowerVECTOR_SHUFFLEv16i16(SDValue Op, SelectionDAG &DAG) {
11826 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11827 SDValue V1 = SVOp->getOperand(0);
11828 SDValue V2 = SVOp->getOperand(1);
11831 if (V2.getOpcode() != ISD::UNDEF)
11834 SmallVector<int, 16> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11835 return getPSHUFB(MaskVals, V1, dl, DAG);
11838 // v16i8 shuffles - Prefer shuffles in the following order:
11839 // 1. [ssse3] 1 x pshufb
11840 // 2. [ssse3] 2 x pshufb + 1 x por
11841 // 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw
11842 static SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
11843 const X86Subtarget* Subtarget,
11844 SelectionDAG &DAG) {
11845 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11846 SDValue V1 = SVOp->getOperand(0);
11847 SDValue V2 = SVOp->getOperand(1);
11849 ArrayRef<int> MaskVals = SVOp->getMask();
11851 // Promote splats to a larger type which usually leads to more efficient code.
11852 // FIXME: Is this true if pshufb is available?
11853 if (SVOp->isSplat())
11854 return PromoteSplat(SVOp, DAG);
11856 // If we have SSSE3, case 1 is generated when all result bytes come from
11857 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is
11858 // present, fall back to case 3.
11860 // If SSSE3, use 1 pshufb instruction per vector with elements in the result.
11861 if (Subtarget->hasSSSE3()) {
11862 SmallVector<SDValue,16> pshufbMask;
11864 // If all result elements are from one input vector, then only translate
11865 // undef mask values to 0x80 (zero out result) in the pshufb mask.
11867 // Otherwise, we have elements from both input vectors, and must zero out
11868 // elements that come from V2 in the first mask, and V1 in the second mask
11869 // so that we can OR them together.
11870 for (unsigned i = 0; i != 16; ++i) {
11871 int EltIdx = MaskVals[i];
11872 if (EltIdx < 0 || EltIdx >= 16)
11874 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11876 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
11877 DAG.getNode(ISD::BUILD_VECTOR, dl,
11878 MVT::v16i8, pshufbMask));
11880 // As PSHUFB will zero elements with negative indices, it's safe to ignore
11881 // the 2nd operand if it's undefined or zero.
11882 if (V2.getOpcode() == ISD::UNDEF ||
11883 ISD::isBuildVectorAllZeros(V2.getNode()))
11886 // Calculate the shuffle mask for the second input, shuffle it, and
11887 // OR it with the first shuffled input.
11888 pshufbMask.clear();
11889 for (unsigned i = 0; i != 16; ++i) {
11890 int EltIdx = MaskVals[i];
11891 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16;
11892 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11894 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
11895 DAG.getNode(ISD::BUILD_VECTOR, dl,
11896 MVT::v16i8, pshufbMask));
11897 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11900 // No SSSE3 - Calculate in place words and then fix all out of place words
11901 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from
11902 // the 16 different words that comprise the two doublequadword input vectors.
11903 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11904 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
11906 for (int i = 0; i != 8; ++i) {
11907 int Elt0 = MaskVals[i*2];
11908 int Elt1 = MaskVals[i*2+1];
11910 // This word of the result is all undef, skip it.
11911 if (Elt0 < 0 && Elt1 < 0)
11914 // This word of the result is already in the correct place, skip it.
11915 if ((Elt0 == i*2) && (Elt1 == i*2+1))
11918 SDValue Elt0Src = Elt0 < 16 ? V1 : V2;
11919 SDValue Elt1Src = Elt1 < 16 ? V1 : V2;
11922 // If Elt0 and Elt1 are defined, are consecutive, and can be load
11923 // using a single extract together, load it and store it.
11924 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) {
11925 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11926 DAG.getIntPtrConstant(Elt1 / 2));
11927 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11928 DAG.getIntPtrConstant(i));
11932 // If Elt1 is defined, extract it from the appropriate source. If the
11933 // source byte is not also odd, shift the extracted word left 8 bits
11934 // otherwise clear the bottom 8 bits if we need to do an or.
11936 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11937 DAG.getIntPtrConstant(Elt1 / 2));
11938 if ((Elt1 & 1) == 0)
11939 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
11941 TLI.getShiftAmountTy(InsElt.getValueType())));
11942 else if (Elt0 >= 0)
11943 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
11944 DAG.getConstant(0xFF00, MVT::i16));
11946 // If Elt0 is defined, extract it from the appropriate source. If the
11947 // source byte is not also even, shift the extracted word right 8 bits. If
11948 // Elt1 was also defined, OR the extracted values together before
11949 // inserting them in the result.
11951 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
11952 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2));
11953 if ((Elt0 & 1) != 0)
11954 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
11956 TLI.getShiftAmountTy(InsElt0.getValueType())));
11957 else if (Elt1 >= 0)
11958 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
11959 DAG.getConstant(0x00FF, MVT::i16));
11960 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0)
11963 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11964 DAG.getIntPtrConstant(i));
11966 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV);
11969 // v32i8 shuffles - Translate to VPSHUFB if possible.
11971 SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp,
11972 const X86Subtarget *Subtarget,
11973 SelectionDAG &DAG) {
11974 MVT VT = SVOp->getSimpleValueType(0);
11975 SDValue V1 = SVOp->getOperand(0);
11976 SDValue V2 = SVOp->getOperand(1);
11978 SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11980 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11981 bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode());
11982 bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode());
11984 // VPSHUFB may be generated if
11985 // (1) one of input vector is undefined or zeroinitializer.
11986 // The mask value 0x80 puts 0 in the corresponding slot of the vector.
11987 // And (2) the mask indexes don't cross the 128-bit lane.
11988 if (VT != MVT::v32i8 || !Subtarget->hasInt256() ||
11989 (!V2IsUndef && !V2IsAllZero && !V1IsAllZero))
11992 if (V1IsAllZero && !V2IsAllZero) {
11993 CommuteVectorShuffleMask(MaskVals, 32);
11996 return getPSHUFB(MaskVals, V1, dl, DAG);
11999 /// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
12000 /// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be
12001 /// done when every pair / quad of shuffle mask elements point to elements in
12002 /// the right sequence. e.g.
12003 /// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15>
12005 SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
12006 SelectionDAG &DAG) {
12007 MVT VT = SVOp->getSimpleValueType(0);
12009 unsigned NumElems = VT.getVectorNumElements();
12012 switch (VT.SimpleTy) {
12013 default: llvm_unreachable("Unexpected!");
12016 return SDValue(SVOp, 0);
12017 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break;
12018 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break;
12019 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break;
12020 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break;
12021 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break;
12022 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break;
12025 SmallVector<int, 8> MaskVec;
12026 for (unsigned i = 0; i != NumElems; i += Scale) {
12028 for (unsigned j = 0; j != Scale; ++j) {
12029 int EltIdx = SVOp->getMaskElt(i+j);
12033 StartIdx = (EltIdx / Scale);
12034 if (EltIdx != (int)(StartIdx*Scale + j))
12037 MaskVec.push_back(StartIdx);
12040 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0));
12041 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1));
12042 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
12045 /// getVZextMovL - Return a zero-extending vector move low node.
12047 static SDValue getVZextMovL(MVT VT, MVT OpVT,
12048 SDValue SrcOp, SelectionDAG &DAG,
12049 const X86Subtarget *Subtarget, SDLoc dl) {
12050 if (VT == MVT::v2f64 || VT == MVT::v4f32) {
12051 LoadSDNode *LD = nullptr;
12052 if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
12053 LD = dyn_cast<LoadSDNode>(SrcOp);
12055 // movssrr and movsdrr do not clear top bits. Try to use movd, movq
12057 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
12058 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) &&
12059 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
12060 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST &&
12061 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {
12063 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
12064 return DAG.getNode(ISD::BITCAST, dl, VT,
12065 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
12066 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
12068 SrcOp.getOperand(0)
12074 return DAG.getNode(ISD::BITCAST, dl, VT,
12075 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
12076 DAG.getNode(ISD::BITCAST, dl,
12080 /// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles
12081 /// which could not be matched by any known target speficic shuffle
12083 LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
12085 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG);
12086 if (NewOp.getNode())
12089 MVT VT = SVOp->getSimpleValueType(0);
12091 unsigned NumElems = VT.getVectorNumElements();
12092 unsigned NumLaneElems = NumElems / 2;
12095 MVT EltVT = VT.getVectorElementType();
12096 MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems);
12099 SmallVector<int, 16> Mask;
12100 for (unsigned l = 0; l < 2; ++l) {
12101 // Build a shuffle mask for the output, discovering on the fly which
12102 // input vectors to use as shuffle operands (recorded in InputUsed).
12103 // If building a suitable shuffle vector proves too hard, then bail
12104 // out with UseBuildVector set.
12105 bool UseBuildVector = false;
12106 int InputUsed[2] = { -1, -1 }; // Not yet discovered.
12107 unsigned LaneStart = l * NumLaneElems;
12108 for (unsigned i = 0; i != NumLaneElems; ++i) {
12109 // The mask element. This indexes into the input.
12110 int Idx = SVOp->getMaskElt(i+LaneStart);
12112 // the mask element does not index into any input vector.
12113 Mask.push_back(-1);
12117 // The input vector this mask element indexes into.
12118 int Input = Idx / NumLaneElems;
12120 // Turn the index into an offset from the start of the input vector.
12121 Idx -= Input * NumLaneElems;
12123 // Find or create a shuffle vector operand to hold this input.
12125 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) {
12126 if (InputUsed[OpNo] == Input)
12127 // This input vector is already an operand.
12129 if (InputUsed[OpNo] < 0) {
12130 // Create a new operand for this input vector.
12131 InputUsed[OpNo] = Input;
12136 if (OpNo >= array_lengthof(InputUsed)) {
12137 // More than two input vectors used! Give up on trying to create a
12138 // shuffle vector. Insert all elements into a BUILD_VECTOR instead.
12139 UseBuildVector = true;
12143 // Add the mask index for the new shuffle vector.
12144 Mask.push_back(Idx + OpNo * NumLaneElems);
12147 if (UseBuildVector) {
12148 SmallVector<SDValue, 16> SVOps;
12149 for (unsigned i = 0; i != NumLaneElems; ++i) {
12150 // The mask element. This indexes into the input.
12151 int Idx = SVOp->getMaskElt(i+LaneStart);
12153 SVOps.push_back(DAG.getUNDEF(EltVT));
12157 // The input vector this mask element indexes into.
12158 int Input = Idx / NumElems;
12160 // Turn the index into an offset from the start of the input vector.
12161 Idx -= Input * NumElems;
12163 // Extract the vector element by hand.
12164 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
12165 SVOp->getOperand(Input),
12166 DAG.getIntPtrConstant(Idx)));
12169 // Construct the output using a BUILD_VECTOR.
12170 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, SVOps);
12171 } else if (InputUsed[0] < 0) {
12172 // No input vectors were used! The result is undefined.
12173 Output[l] = DAG.getUNDEF(NVT);
12175 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2),
12176 (InputUsed[0] % 2) * NumLaneElems,
12178 // If only one input was used, use an undefined vector for the other.
12179 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) :
12180 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2),
12181 (InputUsed[1] % 2) * NumLaneElems, DAG, dl);
12182 // At least one input vector was used. Create a new shuffle vector.
12183 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]);
12189 // Concatenate the result back
12190 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]);
12193 /// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with
12194 /// 4 elements, and match them with several different shuffle types.
12196 LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
12197 SDValue V1 = SVOp->getOperand(0);
12198 SDValue V2 = SVOp->getOperand(1);
12200 MVT VT = SVOp->getSimpleValueType(0);
12202 assert(VT.is128BitVector() && "Unsupported vector size");
12204 std::pair<int, int> Locs[4];
12205 int Mask1[] = { -1, -1, -1, -1 };
12206 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end());
12208 unsigned NumHi = 0;
12209 unsigned NumLo = 0;
12210 for (unsigned i = 0; i != 4; ++i) {
12211 int Idx = PermMask[i];
12213 Locs[i] = std::make_pair(-1, -1);
12215 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!");
12217 Locs[i] = std::make_pair(0, NumLo);
12218 Mask1[NumLo] = Idx;
12221 Locs[i] = std::make_pair(1, NumHi);
12223 Mask1[2+NumHi] = Idx;
12229 if (NumLo <= 2 && NumHi <= 2) {
12230 // If no more than two elements come from either vector. This can be
12231 // implemented with two shuffles. First shuffle gather the elements.
12232 // The second shuffle, which takes the first shuffle as both of its
12233 // vector operands, put the elements into the right order.
12234 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12236 int Mask2[] = { -1, -1, -1, -1 };
12238 for (unsigned i = 0; i != 4; ++i)
12239 if (Locs[i].first != -1) {
12240 unsigned Idx = (i < 2) ? 0 : 4;
12241 Idx += Locs[i].first * 2 + Locs[i].second;
12245 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]);
12248 if (NumLo == 3 || NumHi == 3) {
12249 // Otherwise, we must have three elements from one vector, call it X, and
12250 // one element from the other, call it Y. First, use a shufps to build an
12251 // intermediate vector with the one element from Y and the element from X
12252 // that will be in the same half in the final destination (the indexes don't
12253 // matter). Then, use a shufps to build the final vector, taking the half
12254 // containing the element from Y from the intermediate, and the other half
12257 // Normalize it so the 3 elements come from V1.
12258 CommuteVectorShuffleMask(PermMask, 4);
12262 // Find the element from V2.
12264 for (HiIndex = 0; HiIndex < 3; ++HiIndex) {
12265 int Val = PermMask[HiIndex];
12272 Mask1[0] = PermMask[HiIndex];
12274 Mask1[2] = PermMask[HiIndex^1];
12276 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12278 if (HiIndex >= 2) {
12279 Mask1[0] = PermMask[0];
12280 Mask1[1] = PermMask[1];
12281 Mask1[2] = HiIndex & 1 ? 6 : 4;
12282 Mask1[3] = HiIndex & 1 ? 4 : 6;
12283 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12286 Mask1[0] = HiIndex & 1 ? 2 : 0;
12287 Mask1[1] = HiIndex & 1 ? 0 : 2;
12288 Mask1[2] = PermMask[2];
12289 Mask1[3] = PermMask[3];
12294 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]);
12297 // Break it into (shuffle shuffle_hi, shuffle_lo).
12298 int LoMask[] = { -1, -1, -1, -1 };
12299 int HiMask[] = { -1, -1, -1, -1 };
12301 int *MaskPtr = LoMask;
12302 unsigned MaskIdx = 0;
12303 unsigned LoIdx = 0;
12304 unsigned HiIdx = 2;
12305 for (unsigned i = 0; i != 4; ++i) {
12312 int Idx = PermMask[i];
12314 Locs[i] = std::make_pair(-1, -1);
12315 } else if (Idx < 4) {
12316 Locs[i] = std::make_pair(MaskIdx, LoIdx);
12317 MaskPtr[LoIdx] = Idx;
12320 Locs[i] = std::make_pair(MaskIdx, HiIdx);
12321 MaskPtr[HiIdx] = Idx;
12326 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]);
12327 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]);
12328 int MaskOps[] = { -1, -1, -1, -1 };
12329 for (unsigned i = 0; i != 4; ++i)
12330 if (Locs[i].first != -1)
12331 MaskOps[i] = Locs[i].first * 4 + Locs[i].second;
12332 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]);
12335 static bool MayFoldVectorLoad(SDValue V) {
12336 while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
12337 V = V.getOperand(0);
12339 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
12340 V = V.getOperand(0);
12341 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR &&
12342 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF)
12343 // BUILD_VECTOR (load), undef
12344 V = V.getOperand(0);
12346 return MayFoldLoad(V);
12350 SDValue getMOVDDup(SDValue &Op, SDLoc &dl, SDValue V1, SelectionDAG &DAG) {
12351 MVT VT = Op.getSimpleValueType();
12353 // Canonicalize to v2f64.
12354 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
12355 return DAG.getNode(ISD::BITCAST, dl, VT,
12356 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64,
12361 SDValue getMOVLowToHigh(SDValue &Op, SDLoc &dl, SelectionDAG &DAG,
12363 SDValue V1 = Op.getOperand(0);
12364 SDValue V2 = Op.getOperand(1);
12365 MVT VT = Op.getSimpleValueType();
12367 assert(VT != MVT::v2i64 && "unsupported shuffle type");
12369 if (HasSSE2 && VT == MVT::v2f64)
12370 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG);
12372 // v4f32 or v4i32: canonicalize to v4f32 (which is legal for SSE1)
12373 return DAG.getNode(ISD::BITCAST, dl, VT,
12374 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32,
12375 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1),
12376 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG));
12380 SDValue getMOVHighToLow(SDValue &Op, SDLoc &dl, SelectionDAG &DAG) {
12381 SDValue V1 = Op.getOperand(0);
12382 SDValue V2 = Op.getOperand(1);
12383 MVT VT = Op.getSimpleValueType();
12385 assert((VT == MVT::v4i32 || VT == MVT::v4f32) &&
12386 "unsupported shuffle type");
12388 if (V2.getOpcode() == ISD::UNDEF)
12392 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG);
12396 SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
12397 SDValue V1 = Op.getOperand(0);
12398 SDValue V2 = Op.getOperand(1);
12399 MVT VT = Op.getSimpleValueType();
12400 unsigned NumElems = VT.getVectorNumElements();
12402 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second
12403 // operand of these instructions is only memory, so check if there's a
12404 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the
12406 bool CanFoldLoad = false;
12408 // Trivial case, when V2 comes from a load.
12409 if (MayFoldVectorLoad(V2))
12410 CanFoldLoad = true;
12412 // When V1 is a load, it can be folded later into a store in isel, example:
12413 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1)
12415 // (MOVLPSmr addr:$src1, VR128:$src2)
12416 // So, recognize this potential and also use MOVLPS or MOVLPD
12417 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op))
12418 CanFoldLoad = true;
12420 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12422 if (HasSSE2 && NumElems == 2)
12423 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
12426 // If we don't care about the second element, proceed to use movss.
12427 if (SVOp->getMaskElt(1) != -1)
12428 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG);
12431 // movl and movlp will both match v2i64, but v2i64 is never matched by
12432 // movl earlier because we make it strict to avoid messing with the movlp load
12433 // folding logic (see the code above getMOVLP call). Match it here then,
12434 // this is horrible, but will stay like this until we move all shuffle
12435 // matching to x86 specific nodes. Note that for the 1st condition all
12436 // types are matched with movsd.
12438 // FIXME: isMOVLMask should be checked and matched before getMOVLP,
12439 // as to remove this logic from here, as much as possible
12440 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT))
12441 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12442 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12445 assert(VT != MVT::v4i32 && "unsupported shuffle type");
12447 // Invert the operand order and use SHUFPS to match it.
12448 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1,
12449 getShuffleSHUFImmediate(SVOp), DAG);
12452 static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index,
12453 SelectionDAG &DAG) {
12455 MVT VT = Load->getSimpleValueType(0);
12456 MVT EVT = VT.getVectorElementType();
12457 SDValue Addr = Load->getOperand(1);
12458 SDValue NewAddr = DAG.getNode(
12459 ISD::ADD, dl, Addr.getSimpleValueType(), Addr,
12460 DAG.getConstant(Index * EVT.getStoreSize(), Addr.getSimpleValueType()));
12463 DAG.getLoad(EVT, dl, Load->getChain(), NewAddr,
12464 DAG.getMachineFunction().getMachineMemOperand(
12465 Load->getMemOperand(), 0, EVT.getStoreSize()));
12469 // It is only safe to call this function if isINSERTPSMask is true for
12470 // this shufflevector mask.
12471 static SDValue getINSERTPS(ShuffleVectorSDNode *SVOp, SDLoc &dl,
12472 SelectionDAG &DAG) {
12473 // Generate an insertps instruction when inserting an f32 from memory onto a
12474 // v4f32 or when copying a member from one v4f32 to another.
12475 // We also use it for transferring i32 from one register to another,
12476 // since it simply copies the same bits.
12477 // If we're transferring an i32 from memory to a specific element in a
12478 // register, we output a generic DAG that will match the PINSRD
12480 MVT VT = SVOp->getSimpleValueType(0);
12481 MVT EVT = VT.getVectorElementType();
12482 SDValue V1 = SVOp->getOperand(0);
12483 SDValue V2 = SVOp->getOperand(1);
12484 auto Mask = SVOp->getMask();
12485 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
12486 "unsupported vector type for insertps/pinsrd");
12488 auto FromV1Predicate = [](const int &i) { return i < 4 && i > -1; };
12489 auto FromV2Predicate = [](const int &i) { return i >= 4; };
12490 int FromV1 = std::count_if(Mask.begin(), Mask.end(), FromV1Predicate);
12494 unsigned DestIndex;
12498 DestIndex = std::find_if(Mask.begin(), Mask.end(), FromV1Predicate) -
12501 // If we have 1 element from each vector, we have to check if we're
12502 // changing V1's element's place. If so, we're done. Otherwise, we
12503 // should assume we're changing V2's element's place and behave
12505 int FromV2 = std::count_if(Mask.begin(), Mask.end(), FromV2Predicate);
12506 assert(DestIndex <= INT32_MAX && "truncated destination index");
12507 if (FromV1 == FromV2 &&
12508 static_cast<int>(DestIndex) == Mask[DestIndex] % 4) {
12512 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12515 assert(std::count_if(Mask.begin(), Mask.end(), FromV2Predicate) == 1 &&
12516 "More than one element from V1 and from V2, or no elements from one "
12517 "of the vectors. This case should not have returned true from "
12522 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12525 // Get an index into the source vector in the range [0,4) (the mask is
12526 // in the range [0,8) because it can address V1 and V2)
12527 unsigned SrcIndex = Mask[DestIndex] % 4;
12528 if (MayFoldLoad(From)) {
12529 // Trivial case, when From comes from a load and is only used by the
12530 // shuffle. Make it use insertps from the vector that we need from that
12533 NarrowVectorLoadToElement(cast<LoadSDNode>(From), SrcIndex, DAG);
12534 if (!NewLoad.getNode())
12537 if (EVT == MVT::f32) {
12538 // Create this as a scalar to vector to match the instruction pattern.
12539 SDValue LoadScalarToVector =
12540 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, NewLoad);
12541 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4);
12542 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, LoadScalarToVector,
12544 } else { // EVT == MVT::i32
12545 // If we're getting an i32 from memory, use an INSERT_VECTOR_ELT
12546 // instruction, to match the PINSRD instruction, which loads an i32 to a
12547 // certain vector element.
12548 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, To, NewLoad,
12549 DAG.getConstant(DestIndex, MVT::i32));
12553 // Vector-element-to-vector
12554 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4 | SrcIndex << 6);
12555 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, From, InsertpsMask);
12558 // Reduce a vector shuffle to zext.
12559 static SDValue LowerVectorIntExtend(SDValue Op, const X86Subtarget *Subtarget,
12560 SelectionDAG &DAG) {
12561 // PMOVZX is only available from SSE41.
12562 if (!Subtarget->hasSSE41())
12565 MVT VT = Op.getSimpleValueType();
12567 // Only AVX2 support 256-bit vector integer extending.
12568 if (!Subtarget->hasInt256() && VT.is256BitVector())
12571 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12573 SDValue V1 = Op.getOperand(0);
12574 SDValue V2 = Op.getOperand(1);
12575 unsigned NumElems = VT.getVectorNumElements();
12577 // Extending is an unary operation and the element type of the source vector
12578 // won't be equal to or larger than i64.
12579 if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() ||
12580 VT.getVectorElementType() == MVT::i64)
12583 // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4.
12584 unsigned Shift = 1; // Start from 2, i.e. 1 << 1.
12585 while ((1U << Shift) < NumElems) {
12586 if (SVOp->getMaskElt(1U << Shift) == 1)
12589 // The maximal ratio is 8, i.e. from i8 to i64.
12594 // Check the shuffle mask.
12595 unsigned Mask = (1U << Shift) - 1;
12596 for (unsigned i = 0; i != NumElems; ++i) {
12597 int EltIdx = SVOp->getMaskElt(i);
12598 if ((i & Mask) != 0 && EltIdx != -1)
12600 if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift))
12604 unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift;
12605 MVT NeVT = MVT::getIntegerVT(NBits);
12606 MVT NVT = MVT::getVectorVT(NeVT, NumElems >> Shift);
12608 if (!DAG.getTargetLoweringInfo().isTypeLegal(NVT))
12611 return DAG.getNode(ISD::BITCAST, DL, VT,
12612 DAG.getNode(X86ISD::VZEXT, DL, NVT, V1));
12615 static SDValue NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
12616 SelectionDAG &DAG) {
12617 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12618 MVT VT = Op.getSimpleValueType();
12620 SDValue V1 = Op.getOperand(0);
12621 SDValue V2 = Op.getOperand(1);
12623 if (isZeroShuffle(SVOp))
12624 return getZeroVector(VT, Subtarget, DAG, dl);
12626 // Handle splat operations
12627 if (SVOp->isSplat()) {
12628 // Use vbroadcast whenever the splat comes from a foldable load
12629 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
12630 if (Broadcast.getNode())
12634 // Check integer expanding shuffles.
12635 SDValue NewOp = LowerVectorIntExtend(Op, Subtarget, DAG);
12636 if (NewOp.getNode())
12639 // If the shuffle can be profitably rewritten as a narrower shuffle, then
12641 if (VT == MVT::v8i16 || VT == MVT::v16i8 || VT == MVT::v16i16 ||
12642 VT == MVT::v32i8) {
12643 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12644 if (NewOp.getNode())
12645 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
12646 } else if (VT.is128BitVector() && Subtarget->hasSSE2()) {
12647 // FIXME: Figure out a cleaner way to do this.
12648 if (ISD::isBuildVectorAllZeros(V2.getNode())) {
12649 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12650 if (NewOp.getNode()) {
12651 MVT NewVT = NewOp.getSimpleValueType();
12652 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(),
12653 NewVT, true, false))
12654 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), DAG, Subtarget,
12657 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
12658 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12659 if (NewOp.getNode()) {
12660 MVT NewVT = NewOp.getSimpleValueType();
12661 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT))
12662 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), DAG, Subtarget,
12671 X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
12672 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12673 SDValue V1 = Op.getOperand(0);
12674 SDValue V2 = Op.getOperand(1);
12675 MVT VT = Op.getSimpleValueType();
12677 unsigned NumElems = VT.getVectorNumElements();
12678 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
12679 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
12680 bool V1IsSplat = false;
12681 bool V2IsSplat = false;
12682 bool HasSSE2 = Subtarget->hasSSE2();
12683 bool HasFp256 = Subtarget->hasFp256();
12684 bool HasInt256 = Subtarget->hasInt256();
12685 MachineFunction &MF = DAG.getMachineFunction();
12687 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
12689 // Check if we should use the experimental vector shuffle lowering. If so,
12690 // delegate completely to that code path.
12691 if (ExperimentalVectorShuffleLowering)
12692 return lowerVectorShuffle(Op, Subtarget, DAG);
12694 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
12696 if (V1IsUndef && V2IsUndef)
12697 return DAG.getUNDEF(VT);
12699 // When we create a shuffle node we put the UNDEF node to second operand,
12700 // but in some cases the first operand may be transformed to UNDEF.
12701 // In this case we should just commute the node.
12703 return DAG.getCommutedVectorShuffle(*SVOp);
12705 // Vector shuffle lowering takes 3 steps:
12707 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable
12708 // narrowing and commutation of operands should be handled.
12709 // 2) Matching of shuffles with known shuffle masks to x86 target specific
12711 // 3) Rewriting of unmatched masks into new generic shuffle operations,
12712 // so the shuffle can be broken into other shuffles and the legalizer can
12713 // try the lowering again.
12715 // The general idea is that no vector_shuffle operation should be left to
12716 // be matched during isel, all of them must be converted to a target specific
12719 // Normalize the input vectors. Here splats, zeroed vectors, profitable
12720 // narrowing and commutation of operands should be handled. The actual code
12721 // doesn't include all of those, work in progress...
12722 SDValue NewOp = NormalizeVectorShuffle(Op, Subtarget, DAG);
12723 if (NewOp.getNode())
12726 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end());
12728 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and
12729 // unpckh_undef). Only use pshufd if speed is more important than size.
12730 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12731 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12732 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12733 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12735 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() &&
12736 V2IsUndef && MayFoldVectorLoad(V1))
12737 return getMOVDDup(Op, dl, V1, DAG);
12739 if (isMOVHLPS_v_undef_Mask(M, VT))
12740 return getMOVHighToLow(Op, dl, DAG);
12742 // Use to match splats
12743 if (HasSSE2 && isUNPCKHMask(M, VT, HasInt256) && V2IsUndef &&
12744 (VT == MVT::v2f64 || VT == MVT::v2i64))
12745 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12747 if (isPSHUFDMask(M, VT)) {
12748 // The actual implementation will match the mask in the if above and then
12749 // during isel it can match several different instructions, not only pshufd
12750 // as its name says, sad but true, emulate the behavior for now...
12751 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64)))
12752 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG);
12754 unsigned TargetMask = getShuffleSHUFImmediate(SVOp);
12756 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32))
12757 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG);
12759 if (HasFp256 && (VT == MVT::v4f32 || VT == MVT::v2f64))
12760 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1, TargetMask,
12763 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1,
12767 if (isPALIGNRMask(M, VT, Subtarget))
12768 return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2,
12769 getShufflePALIGNRImmediate(SVOp),
12772 if (isVALIGNMask(M, VT, Subtarget))
12773 return getTargetShuffleNode(X86ISD::VALIGN, dl, VT, V1, V2,
12774 getShuffleVALIGNImmediate(SVOp),
12777 // Check if this can be converted into a logical shift.
12778 bool isLeft = false;
12779 unsigned ShAmt = 0;
12781 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt);
12782 if (isShift && ShVal.hasOneUse()) {
12783 // If the shifted value has multiple uses, it may be cheaper to use
12784 // v_set0 + movlhps or movhlps, etc.
12785 MVT EltVT = VT.getVectorElementType();
12786 ShAmt *= EltVT.getSizeInBits();
12787 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12790 if (isMOVLMask(M, VT)) {
12791 if (ISD::isBuildVectorAllZeros(V1.getNode()))
12792 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
12793 if (!isMOVLPMask(M, VT)) {
12794 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
12795 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12797 if (VT == MVT::v4i32 || VT == MVT::v4f32)
12798 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12802 // FIXME: fold these into legal mask.
12803 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasInt256))
12804 return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
12806 if (isMOVHLPSMask(M, VT))
12807 return getMOVHighToLow(Op, dl, DAG);
12809 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget))
12810 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG);
12812 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget))
12813 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG);
12815 if (isMOVLPMask(M, VT))
12816 return getMOVLP(Op, dl, DAG, HasSSE2);
12818 if (ShouldXformToMOVHLPS(M, VT) ||
12819 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT))
12820 return DAG.getCommutedVectorShuffle(*SVOp);
12823 // No better options. Use a vshldq / vsrldq.
12824 MVT EltVT = VT.getVectorElementType();
12825 ShAmt *= EltVT.getSizeInBits();
12826 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12829 bool Commuted = false;
12830 // FIXME: This should also accept a bitcast of a splat? Be careful, not
12831 // 1,1,1,1 -> v8i16 though.
12832 BitVector UndefElements;
12833 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V1.getNode()))
12834 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12836 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V2.getNode()))
12837 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12840 // Canonicalize the splat or undef, if present, to be on the RHS.
12841 if (!V2IsUndef && V1IsSplat && !V2IsSplat) {
12842 CommuteVectorShuffleMask(M, NumElems);
12844 std::swap(V1IsSplat, V2IsSplat);
12848 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) {
12849 // Shuffling low element of v1 into undef, just return v1.
12852 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which
12853 // the instruction selector will not match, so get a canonical MOVL with
12854 // swapped operands to undo the commute.
12855 return getMOVL(DAG, dl, VT, V2, V1);
12858 if (isUNPCKLMask(M, VT, HasInt256))
12859 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12861 if (isUNPCKHMask(M, VT, HasInt256))
12862 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12865 // Normalize mask so all entries that point to V2 points to its first
12866 // element then try to match unpck{h|l} again. If match, return a
12867 // new vector_shuffle with the corrected mask.p
12868 SmallVector<int, 8> NewMask(M.begin(), M.end());
12869 NormalizeMask(NewMask, NumElems);
12870 if (isUNPCKLMask(NewMask, VT, HasInt256, true))
12871 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12872 if (isUNPCKHMask(NewMask, VT, HasInt256, true))
12873 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12877 // Commute is back and try unpck* again.
12878 // FIXME: this seems wrong.
12879 CommuteVectorShuffleMask(M, NumElems);
12881 std::swap(V1IsSplat, V2IsSplat);
12883 if (isUNPCKLMask(M, VT, HasInt256))
12884 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12886 if (isUNPCKHMask(M, VT, HasInt256))
12887 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12890 // Normalize the node to match x86 shuffle ops if needed
12891 if (!V2IsUndef && (isSHUFPMask(M, VT, /* Commuted */ true)))
12892 return DAG.getCommutedVectorShuffle(*SVOp);
12894 // The checks below are all present in isShuffleMaskLegal, but they are
12895 // inlined here right now to enable us to directly emit target specific
12896 // nodes, and remove one by one until they don't return Op anymore.
12898 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) &&
12899 SVOp->getSplatIndex() == 0 && V2IsUndef) {
12900 if (VT == MVT::v2f64 || VT == MVT::v2i64)
12901 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12904 if (isPSHUFHWMask(M, VT, HasInt256))
12905 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1,
12906 getShufflePSHUFHWImmediate(SVOp),
12909 if (isPSHUFLWMask(M, VT, HasInt256))
12910 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1,
12911 getShufflePSHUFLWImmediate(SVOp),
12914 unsigned MaskValue;
12915 if (isBlendMask(M, VT, Subtarget->hasSSE41(), HasInt256, &MaskValue))
12916 return LowerVECTOR_SHUFFLEtoBlend(SVOp, MaskValue, Subtarget, DAG);
12918 if (isSHUFPMask(M, VT))
12919 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2,
12920 getShuffleSHUFImmediate(SVOp), DAG);
12922 if (isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12923 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12924 if (isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12925 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12927 //===--------------------------------------------------------------------===//
12928 // Generate target specific nodes for 128 or 256-bit shuffles only
12929 // supported in the AVX instruction set.
12932 // Handle VMOVDDUPY permutations
12933 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasFp256))
12934 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG);
12936 // Handle VPERMILPS/D* permutations
12937 if (isVPERMILPMask(M, VT)) {
12938 if ((HasInt256 && VT == MVT::v8i32) || VT == MVT::v16i32)
12939 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1,
12940 getShuffleSHUFImmediate(SVOp), DAG);
12941 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1,
12942 getShuffleSHUFImmediate(SVOp), DAG);
12946 if (VT.is512BitVector() && isINSERT64x4Mask(M, VT, &Idx))
12947 return Insert256BitVector(V1, Extract256BitVector(V2, 0, DAG, dl),
12948 Idx*(NumElems/2), DAG, dl);
12950 // Handle VPERM2F128/VPERM2I128 permutations
12951 if (isVPERM2X128Mask(M, VT, HasFp256))
12952 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1,
12953 V2, getShuffleVPERM2X128Immediate(SVOp), DAG);
12955 if (Subtarget->hasSSE41() && isINSERTPSMask(M, VT))
12956 return getINSERTPS(SVOp, dl, DAG);
12959 if (V2IsUndef && HasInt256 && isPermImmMask(M, VT, Imm8))
12960 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, Imm8, DAG);
12962 if ((V2IsUndef && HasInt256 && VT.is256BitVector() && NumElems == 8) ||
12963 VT.is512BitVector()) {
12964 MVT MaskEltVT = MVT::getIntegerVT(VT.getVectorElementType().getSizeInBits());
12965 MVT MaskVectorVT = MVT::getVectorVT(MaskEltVT, NumElems);
12966 SmallVector<SDValue, 16> permclMask;
12967 for (unsigned i = 0; i != NumElems; ++i) {
12968 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MaskEltVT));
12971 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVectorVT, permclMask);
12973 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32
12974 return DAG.getNode(X86ISD::VPERMV, dl, VT,
12975 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1);
12976 return DAG.getNode(X86ISD::VPERMV3, dl, VT, V1,
12977 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V2);
12980 //===--------------------------------------------------------------------===//
12981 // Since no target specific shuffle was selected for this generic one,
12982 // lower it into other known shuffles. FIXME: this isn't true yet, but
12983 // this is the plan.
12986 // Handle v8i16 specifically since SSE can do byte extraction and insertion.
12987 if (VT == MVT::v8i16) {
12988 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG);
12989 if (NewOp.getNode())
12993 if (VT == MVT::v16i16 && HasInt256) {
12994 SDValue NewOp = LowerVECTOR_SHUFFLEv16i16(Op, DAG);
12995 if (NewOp.getNode())
12999 if (VT == MVT::v16i8) {
13000 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, Subtarget, DAG);
13001 if (NewOp.getNode())
13005 if (VT == MVT::v32i8) {
13006 SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG);
13007 if (NewOp.getNode())
13011 // Handle all 128-bit wide vectors with 4 elements, and match them with
13012 // several different shuffle types.
13013 if (NumElems == 4 && VT.is128BitVector())
13014 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG);
13016 // Handle general 256-bit shuffles
13017 if (VT.is256BitVector())
13018 return LowerVECTOR_SHUFFLE_256(SVOp, DAG);
13023 // This function assumes its argument is a BUILD_VECTOR of constants or
13024 // undef SDNodes. i.e: ISD::isBuildVectorOfConstantSDNodes(BuildVector) is
13026 static bool BUILD_VECTORtoBlendMask(BuildVectorSDNode *BuildVector,
13027 unsigned &MaskValue) {
13029 unsigned NumElems = BuildVector->getNumOperands();
13030 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
13031 unsigned NumLanes = (NumElems - 1) / 8 + 1;
13032 unsigned NumElemsInLane = NumElems / NumLanes;
13034 // Blend for v16i16 should be symetric for the both lanes.
13035 for (unsigned i = 0; i < NumElemsInLane; ++i) {
13036 SDValue EltCond = BuildVector->getOperand(i);
13037 SDValue SndLaneEltCond =
13038 (NumLanes == 2) ? BuildVector->getOperand(i + NumElemsInLane) : EltCond;
13040 int Lane1Cond = -1, Lane2Cond = -1;
13041 if (isa<ConstantSDNode>(EltCond))
13042 Lane1Cond = !isZero(EltCond);
13043 if (isa<ConstantSDNode>(SndLaneEltCond))
13044 Lane2Cond = !isZero(SndLaneEltCond);
13046 if (Lane1Cond == Lane2Cond || Lane2Cond < 0)
13047 // Lane1Cond != 0, means we want the first argument.
13048 // Lane1Cond == 0, means we want the second argument.
13049 // The encoding of this argument is 0 for the first argument, 1
13050 // for the second. Therefore, invert the condition.
13051 MaskValue |= !Lane1Cond << i;
13052 else if (Lane1Cond < 0)
13053 MaskValue |= !Lane2Cond << i;
13060 /// \brief Try to lower a VSELECT instruction to a vector shuffle.
13061 static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
13062 const X86Subtarget *Subtarget,
13063 SelectionDAG &DAG) {
13064 SDValue Cond = Op.getOperand(0);
13065 SDValue LHS = Op.getOperand(1);
13066 SDValue RHS = Op.getOperand(2);
13068 MVT VT = Op.getSimpleValueType();
13070 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
13072 auto *CondBV = cast<BuildVectorSDNode>(Cond);
13074 // Only non-legal VSELECTs reach this lowering, convert those into generic
13075 // shuffles and re-use the shuffle lowering path for blends.
13076 SmallVector<int, 32> Mask;
13077 for (int i = 0, Size = VT.getVectorNumElements(); i < Size; ++i) {
13078 SDValue CondElt = CondBV->getOperand(i);
13080 isa<ConstantSDNode>(CondElt) ? i + (isZero(CondElt) ? Size : 0) : -1);
13082 return DAG.getVectorShuffle(VT, dl, LHS, RHS, Mask);
13085 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
13086 // A vselect where all conditions and data are constants can be optimized into
13087 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
13088 if (ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(0).getNode()) &&
13089 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(1).getNode()) &&
13090 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(2).getNode()))
13093 // Try to lower this to a blend-style vector shuffle. This can handle all
13094 // constant condition cases.
13095 SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG);
13096 if (BlendOp.getNode())
13099 // Variable blends are only legal from SSE4.1 onward.
13100 if (!Subtarget->hasSSE41())
13103 // Some types for vselect were previously set to Expand, not Legal or
13104 // Custom. Return an empty SDValue so we fall-through to Expand, after
13105 // the Custom lowering phase.
13106 MVT VT = Op.getSimpleValueType();
13107 switch (VT.SimpleTy) {
13112 if (Subtarget->hasBWI() && Subtarget->hasVLX())
13117 // We couldn't create a "Blend with immediate" node.
13118 // This node should still be legal, but we'll have to emit a blendv*
13123 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
13124 MVT VT = Op.getSimpleValueType();
13127 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
13130 if (VT.getSizeInBits() == 8) {
13131 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
13132 Op.getOperand(0), Op.getOperand(1));
13133 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13134 DAG.getValueType(VT));
13135 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13138 if (VT.getSizeInBits() == 16) {
13139 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13140 // If Idx is 0, it's cheaper to do a move instead of a pextrw.
13142 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13143 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13144 DAG.getNode(ISD::BITCAST, dl,
13147 Op.getOperand(1)));
13148 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
13149 Op.getOperand(0), Op.getOperand(1));
13150 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13151 DAG.getValueType(VT));
13152 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13155 if (VT == MVT::f32) {
13156 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
13157 // the result back to FR32 register. It's only worth matching if the
13158 // result has a single use which is a store or a bitcast to i32. And in
13159 // the case of a store, it's not worth it if the index is a constant 0,
13160 // because a MOVSSmr can be used instead, which is smaller and faster.
13161 if (!Op.hasOneUse())
13163 SDNode *User = *Op.getNode()->use_begin();
13164 if ((User->getOpcode() != ISD::STORE ||
13165 (isa<ConstantSDNode>(Op.getOperand(1)) &&
13166 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
13167 (User->getOpcode() != ISD::BITCAST ||
13168 User->getValueType(0) != MVT::i32))
13170 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13171 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
13174 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
13177 if (VT == MVT::i32 || VT == MVT::i64) {
13178 // ExtractPS/pextrq works with constant index.
13179 if (isa<ConstantSDNode>(Op.getOperand(1)))
13185 /// Extract one bit from mask vector, like v16i1 or v8i1.
13186 /// AVX-512 feature.
13188 X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const {
13189 SDValue Vec = Op.getOperand(0);
13191 MVT VecVT = Vec.getSimpleValueType();
13192 SDValue Idx = Op.getOperand(1);
13193 MVT EltVT = Op.getSimpleValueType();
13195 assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
13196 assert((VecVT.getVectorNumElements() <= 16 || Subtarget->hasBWI()) &&
13197 "Unexpected vector type in ExtractBitFromMaskVector");
13199 // variable index can't be handled in mask registers,
13200 // extend vector to VR512
13201 if (!isa<ConstantSDNode>(Idx)) {
13202 MVT ExtVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13203 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Vec);
13204 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
13205 ExtVT.getVectorElementType(), Ext, Idx);
13206 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
13209 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13210 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13211 if (!Subtarget->hasDQI() && (VecVT.getVectorNumElements() <= 8))
13212 rc = getRegClassFor(MVT::v16i1);
13213 unsigned MaxSift = rc->getSize()*8 - 1;
13214 Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
13215 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13216 Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
13217 DAG.getConstant(MaxSift, MVT::i8));
13218 return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec,
13219 DAG.getIntPtrConstant(0));
13223 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
13224 SelectionDAG &DAG) const {
13226 SDValue Vec = Op.getOperand(0);
13227 MVT VecVT = Vec.getSimpleValueType();
13228 SDValue Idx = Op.getOperand(1);
13230 if (Op.getSimpleValueType() == MVT::i1)
13231 return ExtractBitFromMaskVector(Op, DAG);
13233 if (!isa<ConstantSDNode>(Idx)) {
13234 if (VecVT.is512BitVector() ||
13235 (VecVT.is256BitVector() && Subtarget->hasInt256() &&
13236 VecVT.getVectorElementType().getSizeInBits() == 32)) {
13239 MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
13240 MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
13241 MaskEltVT.getSizeInBits());
13243 Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT);
13244 SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT,
13245 getZeroVector(MaskVT, Subtarget, DAG, dl),
13246 Idx, DAG.getConstant(0, getPointerTy()));
13247 SDValue Perm = DAG.getNode(X86ISD::VPERMV, dl, VecVT, Mask, Vec);
13248 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(),
13249 Perm, DAG.getConstant(0, getPointerTy()));
13254 // If this is a 256-bit vector result, first extract the 128-bit vector and
13255 // then extract the element from the 128-bit vector.
13256 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
13258 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13259 // Get the 128-bit vector.
13260 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl);
13261 MVT EltVT = VecVT.getVectorElementType();
13263 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
13265 //if (IdxVal >= NumElems/2)
13266 // IdxVal -= NumElems/2;
13267 IdxVal -= (IdxVal/ElemsPerChunk)*ElemsPerChunk;
13268 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
13269 DAG.getConstant(IdxVal, MVT::i32));
13272 assert(VecVT.is128BitVector() && "Unexpected vector length");
13274 if (Subtarget->hasSSE41()) {
13275 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
13280 MVT VT = Op.getSimpleValueType();
13281 // TODO: handle v16i8.
13282 if (VT.getSizeInBits() == 16) {
13283 SDValue Vec = Op.getOperand(0);
13284 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13286 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13287 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13288 DAG.getNode(ISD::BITCAST, dl,
13290 Op.getOperand(1)));
13291 // Transform it so it match pextrw which produces a 32-bit result.
13292 MVT EltVT = MVT::i32;
13293 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT,
13294 Op.getOperand(0), Op.getOperand(1));
13295 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
13296 DAG.getValueType(VT));
13297 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13300 if (VT.getSizeInBits() == 32) {
13301 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13305 // SHUFPS the element to the lowest double word, then movss.
13306 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 };
13307 MVT VVT = Op.getOperand(0).getSimpleValueType();
13308 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13309 DAG.getUNDEF(VVT), Mask);
13310 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13311 DAG.getIntPtrConstant(0));
13314 if (VT.getSizeInBits() == 64) {
13315 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
13316 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
13317 // to match extract_elt for f64.
13318 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13322 // UNPCKHPD the element to the lowest double word, then movsd.
13323 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
13324 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
13325 int Mask[2] = { 1, -1 };
13326 MVT VVT = Op.getOperand(0).getSimpleValueType();
13327 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13328 DAG.getUNDEF(VVT), Mask);
13329 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13330 DAG.getIntPtrConstant(0));
13336 /// Insert one bit to mask vector, like v16i1 or v8i1.
13337 /// AVX-512 feature.
13339 X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
13341 SDValue Vec = Op.getOperand(0);
13342 SDValue Elt = Op.getOperand(1);
13343 SDValue Idx = Op.getOperand(2);
13344 MVT VecVT = Vec.getSimpleValueType();
13346 if (!isa<ConstantSDNode>(Idx)) {
13347 // Non constant index. Extend source and destination,
13348 // insert element and then truncate the result.
13349 MVT ExtVecVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13350 MVT ExtEltVT = (VecVT == MVT::v8i1 ? MVT::i64 : MVT::i32);
13351 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
13352 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVecVT, Vec),
13353 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx);
13354 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
13357 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13358 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
13359 if (Vec.getOpcode() == ISD::UNDEF)
13360 return DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13361 DAG.getConstant(IdxVal, MVT::i8));
13362 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13363 unsigned MaxSift = rc->getSize()*8 - 1;
13364 EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13365 DAG.getConstant(MaxSift, MVT::i8));
13366 EltInVec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, EltInVec,
13367 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13368 return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
13371 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
13372 SelectionDAG &DAG) const {
13373 MVT VT = Op.getSimpleValueType();
13374 MVT EltVT = VT.getVectorElementType();
13376 if (EltVT == MVT::i1)
13377 return InsertBitToMaskVector(Op, DAG);
13380 SDValue N0 = Op.getOperand(0);
13381 SDValue N1 = Op.getOperand(1);
13382 SDValue N2 = Op.getOperand(2);
13383 if (!isa<ConstantSDNode>(N2))
13385 auto *N2C = cast<ConstantSDNode>(N2);
13386 unsigned IdxVal = N2C->getZExtValue();
13388 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
13389 // into that, and then insert the subvector back into the result.
13390 if (VT.is256BitVector() || VT.is512BitVector()) {
13391 // Get the desired 128-bit vector half.
13392 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
13394 // Insert the element into the desired half.
13395 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
13396 unsigned IdxIn128 = IdxVal - (IdxVal / NumEltsIn128) * NumEltsIn128;
13398 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
13399 DAG.getConstant(IdxIn128, MVT::i32));
13401 // Insert the changed part back to the 256-bit vector
13402 return Insert128BitVector(N0, V, IdxVal, DAG, dl);
13404 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
13406 if (Subtarget->hasSSE41()) {
13407 if (EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) {
13409 if (VT == MVT::v8i16) {
13410 Opc = X86ISD::PINSRW;
13412 assert(VT == MVT::v16i8);
13413 Opc = X86ISD::PINSRB;
13416 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
13418 if (N1.getValueType() != MVT::i32)
13419 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13420 if (N2.getValueType() != MVT::i32)
13421 N2 = DAG.getIntPtrConstant(IdxVal);
13422 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
13425 if (EltVT == MVT::f32) {
13426 // Bits [7:6] of the constant are the source select. This will always be
13427 // zero here. The DAG Combiner may combine an extract_elt index into
13429 // bits. For example (insert (extract, 3), 2) could be matched by
13431 // the '3' into bits [7:6] of X86ISD::INSERTPS.
13432 // Bits [5:4] of the constant are the destination select. This is the
13433 // value of the incoming immediate.
13434 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
13435 // combine either bitwise AND or insert of float 0.0 to set these bits.
13436 N2 = DAG.getIntPtrConstant(IdxVal << 4);
13437 // Create this as a scalar to vector..
13438 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
13439 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
13442 if (EltVT == MVT::i32 || EltVT == MVT::i64) {
13443 // PINSR* works with constant index.
13448 if (EltVT == MVT::i8)
13451 if (EltVT.getSizeInBits() == 16) {
13452 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
13453 // as its second argument.
13454 if (N1.getValueType() != MVT::i32)
13455 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13456 if (N2.getValueType() != MVT::i32)
13457 N2 = DAG.getIntPtrConstant(IdxVal);
13458 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
13463 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
13465 MVT OpVT = Op.getSimpleValueType();
13467 // If this is a 256-bit vector result, first insert into a 128-bit
13468 // vector and then insert into the 256-bit vector.
13469 if (!OpVT.is128BitVector()) {
13470 // Insert into a 128-bit vector.
13471 unsigned SizeFactor = OpVT.getSizeInBits()/128;
13472 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
13473 OpVT.getVectorNumElements() / SizeFactor);
13475 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
13477 // Insert the 128-bit vector.
13478 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
13481 if (OpVT == MVT::v1i64 &&
13482 Op.getOperand(0).getValueType() == MVT::i64)
13483 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
13485 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
13486 assert(OpVT.is128BitVector() && "Expected an SSE type!");
13487 return DAG.getNode(ISD::BITCAST, dl, OpVT,
13488 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
13491 // Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
13492 // a simple subregister reference or explicit instructions to grab
13493 // upper bits of a vector.
13494 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13495 SelectionDAG &DAG) {
13497 SDValue In = Op.getOperand(0);
13498 SDValue Idx = Op.getOperand(1);
13499 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13500 MVT ResVT = Op.getSimpleValueType();
13501 MVT InVT = In.getSimpleValueType();
13503 if (Subtarget->hasFp256()) {
13504 if (ResVT.is128BitVector() &&
13505 (InVT.is256BitVector() || InVT.is512BitVector()) &&
13506 isa<ConstantSDNode>(Idx)) {
13507 return Extract128BitVector(In, IdxVal, DAG, dl);
13509 if (ResVT.is256BitVector() && InVT.is512BitVector() &&
13510 isa<ConstantSDNode>(Idx)) {
13511 return Extract256BitVector(In, IdxVal, DAG, dl);
13517 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
13518 // simple superregister reference or explicit instructions to insert
13519 // the upper bits of a vector.
13520 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13521 SelectionDAG &DAG) {
13522 if (!Subtarget->hasAVX())
13526 SDValue Vec = Op.getOperand(0);
13527 SDValue SubVec = Op.getOperand(1);
13528 SDValue Idx = Op.getOperand(2);
13530 if (!isa<ConstantSDNode>(Idx))
13533 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13534 MVT OpVT = Op.getSimpleValueType();
13535 MVT SubVecVT = SubVec.getSimpleValueType();
13537 // Fold two 16-byte subvector loads into one 32-byte load:
13538 // (insert_subvector (insert_subvector undef, (load addr), 0),
13539 // (load addr + 16), Elts/2)
13541 if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
13542 Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
13543 OpVT.is256BitVector() && SubVecVT.is128BitVector() &&
13544 !Subtarget->isUnalignedMem32Slow()) {
13545 SDValue SubVec2 = Vec.getOperand(1);
13546 if (auto *Idx2 = dyn_cast<ConstantSDNode>(Vec.getOperand(2))) {
13547 if (Idx2->getZExtValue() == 0) {
13548 SDValue Ops[] = { SubVec2, SubVec };
13549 SDValue LD = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, false);
13556 if ((OpVT.is256BitVector() || OpVT.is512BitVector()) &&
13557 SubVecVT.is128BitVector())
13558 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
13560 if (OpVT.is512BitVector() && SubVecVT.is256BitVector())
13561 return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
13566 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
13567 // their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
13568 // one of the above mentioned nodes. It has to be wrapped because otherwise
13569 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
13570 // be used to form addressing mode. These wrapped nodes will be selected
13573 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
13574 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
13576 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13577 // global base reg.
13578 unsigned char OpFlag = 0;
13579 unsigned WrapperKind = X86ISD::Wrapper;
13580 CodeModel::Model M = DAG.getTarget().getCodeModel();
13582 if (Subtarget->isPICStyleRIPRel() &&
13583 (M == CodeModel::Small || M == CodeModel::Kernel))
13584 WrapperKind = X86ISD::WrapperRIP;
13585 else if (Subtarget->isPICStyleGOT())
13586 OpFlag = X86II::MO_GOTOFF;
13587 else if (Subtarget->isPICStyleStubPIC())
13588 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13590 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
13591 CP->getAlignment(),
13592 CP->getOffset(), OpFlag);
13594 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13595 // With PIC, the address is actually $g + Offset.
13597 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13598 DAG.getNode(X86ISD::GlobalBaseReg,
13599 SDLoc(), getPointerTy()),
13606 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
13607 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
13609 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13610 // global base reg.
13611 unsigned char OpFlag = 0;
13612 unsigned WrapperKind = X86ISD::Wrapper;
13613 CodeModel::Model M = DAG.getTarget().getCodeModel();
13615 if (Subtarget->isPICStyleRIPRel() &&
13616 (M == CodeModel::Small || M == CodeModel::Kernel))
13617 WrapperKind = X86ISD::WrapperRIP;
13618 else if (Subtarget->isPICStyleGOT())
13619 OpFlag = X86II::MO_GOTOFF;
13620 else if (Subtarget->isPICStyleStubPIC())
13621 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13623 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
13626 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13628 // With PIC, the address is actually $g + Offset.
13630 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13631 DAG.getNode(X86ISD::GlobalBaseReg,
13632 SDLoc(), getPointerTy()),
13639 X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
13640 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
13642 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13643 // global base reg.
13644 unsigned char OpFlag = 0;
13645 unsigned WrapperKind = X86ISD::Wrapper;
13646 CodeModel::Model M = DAG.getTarget().getCodeModel();
13648 if (Subtarget->isPICStyleRIPRel() &&
13649 (M == CodeModel::Small || M == CodeModel::Kernel)) {
13650 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF())
13651 OpFlag = X86II::MO_GOTPCREL;
13652 WrapperKind = X86ISD::WrapperRIP;
13653 } else if (Subtarget->isPICStyleGOT()) {
13654 OpFlag = X86II::MO_GOT;
13655 } else if (Subtarget->isPICStyleStubPIC()) {
13656 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE;
13657 } else if (Subtarget->isPICStyleStubNoDynamic()) {
13658 OpFlag = X86II::MO_DARWIN_NONLAZY;
13661 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag);
13664 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13666 // With PIC, the address is actually $g + Offset.
13667 if (DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
13668 !Subtarget->is64Bit()) {
13669 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13670 DAG.getNode(X86ISD::GlobalBaseReg,
13671 SDLoc(), getPointerTy()),
13675 // For symbols that require a load from a stub to get the address, emit the
13677 if (isGlobalStubReference(OpFlag))
13678 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result,
13679 MachinePointerInfo::getGOT(), false, false, false, 0);
13685 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
13686 // Create the TargetBlockAddressAddress node.
13687 unsigned char OpFlags =
13688 Subtarget->ClassifyBlockAddressReference();
13689 CodeModel::Model M = DAG.getTarget().getCodeModel();
13690 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
13691 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
13693 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset,
13696 if (Subtarget->isPICStyleRIPRel() &&
13697 (M == CodeModel::Small || M == CodeModel::Kernel))
13698 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13700 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13702 // With PIC, the address is actually $g + Offset.
13703 if (isGlobalRelativeToPICBase(OpFlags)) {
13704 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13705 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13713 X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
13714 int64_t Offset, SelectionDAG &DAG) const {
13715 // Create the TargetGlobalAddress node, folding in the constant
13716 // offset if it is legal.
13717 unsigned char OpFlags =
13718 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget());
13719 CodeModel::Model M = DAG.getTarget().getCodeModel();
13721 if (OpFlags == X86II::MO_NO_FLAG &&
13722 X86::isOffsetSuitableForCodeModel(Offset, M)) {
13723 // A direct static reference to a global.
13724 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
13727 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
13730 if (Subtarget->isPICStyleRIPRel() &&
13731 (M == CodeModel::Small || M == CodeModel::Kernel))
13732 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13734 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13736 // With PIC, the address is actually $g + Offset.
13737 if (isGlobalRelativeToPICBase(OpFlags)) {
13738 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13739 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13743 // For globals that require a load from a stub to get the address, emit the
13745 if (isGlobalStubReference(OpFlags))
13746 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
13747 MachinePointerInfo::getGOT(), false, false, false, 0);
13749 // If there was a non-zero offset that we didn't fold, create an explicit
13750 // addition for it.
13752 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result,
13753 DAG.getConstant(Offset, getPointerTy()));
13759 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
13760 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
13761 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
13762 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
13766 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
13767 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
13768 unsigned char OperandFlags, bool LocalDynamic = false) {
13769 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13770 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13772 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13773 GA->getValueType(0),
13777 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
13781 SDValue Ops[] = { Chain, TGA, *InFlag };
13782 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13784 SDValue Ops[] = { Chain, TGA };
13785 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13788 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
13789 MFI->setAdjustsStack(true);
13790 MFI->setHasCalls(true);
13792 SDValue Flag = Chain.getValue(1);
13793 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
13796 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
13798 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13801 SDLoc dl(GA); // ? function entry point might be better
13802 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13803 DAG.getNode(X86ISD::GlobalBaseReg,
13804 SDLoc(), PtrVT), InFlag);
13805 InFlag = Chain.getValue(1);
13807 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
13810 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
13812 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13814 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
13815 X86::RAX, X86II::MO_TLSGD);
13818 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
13824 // Get the start address of the TLS block for this module.
13825 X86MachineFunctionInfo* MFI = DAG.getMachineFunction()
13826 .getInfo<X86MachineFunctionInfo>();
13827 MFI->incNumLocalDynamicTLSAccesses();
13831 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
13832 X86II::MO_TLSLD, /*LocalDynamic=*/true);
13835 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13836 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
13837 InFlag = Chain.getValue(1);
13838 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
13839 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
13842 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
13846 unsigned char OperandFlags = X86II::MO_DTPOFF;
13847 unsigned WrapperKind = X86ISD::Wrapper;
13848 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13849 GA->getValueType(0),
13850 GA->getOffset(), OperandFlags);
13851 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13853 // Add x@dtpoff with the base.
13854 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
13857 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
13858 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13859 const EVT PtrVT, TLSModel::Model model,
13860 bool is64Bit, bool isPIC) {
13863 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
13864 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
13865 is64Bit ? 257 : 256));
13867 SDValue ThreadPointer =
13868 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0),
13869 MachinePointerInfo(Ptr), false, false, false, 0);
13871 unsigned char OperandFlags = 0;
13872 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
13874 unsigned WrapperKind = X86ISD::Wrapper;
13875 if (model == TLSModel::LocalExec) {
13876 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
13877 } else if (model == TLSModel::InitialExec) {
13879 OperandFlags = X86II::MO_GOTTPOFF;
13880 WrapperKind = X86ISD::WrapperRIP;
13882 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
13885 llvm_unreachable("Unexpected model");
13888 // emit "addl x@ntpoff,%eax" (local exec)
13889 // or "addl x@indntpoff,%eax" (initial exec)
13890 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
13892 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
13893 GA->getOffset(), OperandFlags);
13894 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13896 if (model == TLSModel::InitialExec) {
13897 if (isPIC && !is64Bit) {
13898 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
13899 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
13903 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
13904 MachinePointerInfo::getGOT(), false, false, false, 0);
13907 // The address of the thread local variable is the add of the thread
13908 // pointer with the offset of the variable.
13909 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
13913 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
13915 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
13916 const GlobalValue *GV = GA->getGlobal();
13918 if (Subtarget->isTargetELF()) {
13919 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
13922 case TLSModel::GeneralDynamic:
13923 if (Subtarget->is64Bit())
13924 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
13925 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
13926 case TLSModel::LocalDynamic:
13927 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(),
13928 Subtarget->is64Bit());
13929 case TLSModel::InitialExec:
13930 case TLSModel::LocalExec:
13931 return LowerToTLSExecModel(
13932 GA, DAG, getPointerTy(), model, Subtarget->is64Bit(),
13933 DAG.getTarget().getRelocationModel() == Reloc::PIC_);
13935 llvm_unreachable("Unknown TLS model.");
13938 if (Subtarget->isTargetDarwin()) {
13939 // Darwin only has one model of TLS. Lower to that.
13940 unsigned char OpFlag = 0;
13941 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
13942 X86ISD::WrapperRIP : X86ISD::Wrapper;
13944 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13945 // global base reg.
13946 bool PIC32 = (DAG.getTarget().getRelocationModel() == Reloc::PIC_) &&
13947 !Subtarget->is64Bit();
13949 OpFlag = X86II::MO_TLVP_PIC_BASE;
13951 OpFlag = X86II::MO_TLVP;
13953 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
13954 GA->getValueType(0),
13955 GA->getOffset(), OpFlag);
13956 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13958 // With PIC32, the address is actually $g + Offset.
13960 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13961 DAG.getNode(X86ISD::GlobalBaseReg,
13962 SDLoc(), getPointerTy()),
13965 // Lowering the machine isd will make sure everything is in the right
13967 SDValue Chain = DAG.getEntryNode();
13968 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13969 SDValue Args[] = { Chain, Offset };
13970 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
13972 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
13973 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13974 MFI->setAdjustsStack(true);
13976 // And our return value (tls address) is in the standard call return value
13978 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
13979 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(),
13980 Chain.getValue(1));
13983 if (Subtarget->isTargetKnownWindowsMSVC() ||
13984 Subtarget->isTargetWindowsGNU()) {
13985 // Just use the implicit TLS architecture
13986 // Need to generate someting similar to:
13987 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
13989 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
13990 // mov rcx, qword [rdx+rcx*8]
13991 // mov eax, .tls$:tlsvar
13992 // [rax+rcx] contains the address
13993 // Windows 64bit: gs:0x58
13994 // Windows 32bit: fs:__tls_array
13997 SDValue Chain = DAG.getEntryNode();
13999 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
14000 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
14001 // use its literal value of 0x2C.
14002 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit()
14003 ? Type::getInt8PtrTy(*DAG.getContext(),
14005 : Type::getInt32PtrTy(*DAG.getContext(),
14009 Subtarget->is64Bit()
14010 ? DAG.getIntPtrConstant(0x58)
14011 : (Subtarget->isTargetWindowsGNU()
14012 ? DAG.getIntPtrConstant(0x2C)
14013 : DAG.getExternalSymbol("_tls_array", getPointerTy()));
14015 SDValue ThreadPointer =
14016 DAG.getLoad(getPointerTy(), dl, Chain, TlsArray,
14017 MachinePointerInfo(Ptr), false, false, false, 0);
14019 // Load the _tls_index variable
14020 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy());
14021 if (Subtarget->is64Bit())
14022 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain,
14023 IDX, MachinePointerInfo(), MVT::i32,
14024 false, false, false, 0);
14026 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(),
14027 false, false, false, 0);
14029 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()),
14031 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale);
14033 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX);
14034 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(),
14035 false, false, false, 0);
14037 // Get the offset of start of .tls section
14038 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
14039 GA->getValueType(0),
14040 GA->getOffset(), X86II::MO_SECREL);
14041 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA);
14043 // The address of the thread local variable is the add of the thread
14044 // pointer with the offset of the variable.
14045 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset);
14048 llvm_unreachable("TLS not implemented for this target.");
14051 /// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values
14052 /// and take a 2 x i32 value to shift plus a shift amount.
14053 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
14054 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
14055 MVT VT = Op.getSimpleValueType();
14056 unsigned VTBits = VT.getSizeInBits();
14058 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
14059 SDValue ShOpLo = Op.getOperand(0);
14060 SDValue ShOpHi = Op.getOperand(1);
14061 SDValue ShAmt = Op.getOperand(2);
14062 // X86ISD::SHLD and X86ISD::SHRD have defined overflow behavior but the
14063 // generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
14065 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14066 DAG.getConstant(VTBits - 1, MVT::i8));
14067 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
14068 DAG.getConstant(VTBits - 1, MVT::i8))
14069 : DAG.getConstant(0, VT);
14071 SDValue Tmp2, Tmp3;
14072 if (Op.getOpcode() == ISD::SHL_PARTS) {
14073 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
14074 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
14076 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
14077 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
14080 // If the shift amount is larger or equal than the width of a part we can't
14081 // rely on the results of shld/shrd. Insert a test and select the appropriate
14082 // values for large shift amounts.
14083 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14084 DAG.getConstant(VTBits, MVT::i8));
14085 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
14086 AndNode, DAG.getConstant(0, MVT::i8));
14089 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
14090 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
14091 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
14093 if (Op.getOpcode() == ISD::SHL_PARTS) {
14094 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14095 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14097 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14098 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14101 SDValue Ops[2] = { Lo, Hi };
14102 return DAG.getMergeValues(Ops, dl);
14105 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
14106 SelectionDAG &DAG) const {
14107 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
14110 if (SrcVT.isVector()) {
14111 if (SrcVT.getVectorElementType() == MVT::i1) {
14112 MVT IntegerVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
14113 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14114 DAG.getNode(ISD::SIGN_EXTEND, dl, IntegerVT,
14115 Op.getOperand(0)));
14120 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
14121 "Unknown SINT_TO_FP to lower!");
14123 // These are really Legal; return the operand so the caller accepts it as
14125 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
14127 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
14128 Subtarget->is64Bit()) {
14132 unsigned Size = SrcVT.getSizeInBits()/8;
14133 MachineFunction &MF = DAG.getMachineFunction();
14134 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false);
14135 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14136 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14138 MachinePointerInfo::getFixedStack(SSFI),
14140 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
14143 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
14145 SelectionDAG &DAG) const {
14149 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
14151 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
14153 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
14155 unsigned ByteSize = SrcVT.getSizeInBits()/8;
14157 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
14158 MachineMemOperand *MMO;
14160 int SSFI = FI->getIndex();
14162 DAG.getMachineFunction()
14163 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14164 MachineMemOperand::MOLoad, ByteSize, ByteSize);
14166 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
14167 StackSlot = StackSlot.getOperand(1);
14169 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
14170 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
14172 Tys, Ops, SrcVT, MMO);
14175 Chain = Result.getValue(1);
14176 SDValue InFlag = Result.getValue(2);
14178 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
14179 // shouldn't be necessary except that RFP cannot be live across
14180 // multiple blocks. When stackifier is fixed, they can be uncoupled.
14181 MachineFunction &MF = DAG.getMachineFunction();
14182 unsigned SSFISize = Op.getValueType().getSizeInBits()/8;
14183 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false);
14184 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14185 Tys = DAG.getVTList(MVT::Other);
14187 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
14189 MachineMemOperand *MMO =
14190 DAG.getMachineFunction()
14191 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14192 MachineMemOperand::MOStore, SSFISize, SSFISize);
14194 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
14195 Ops, Op.getValueType(), MMO);
14196 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot,
14197 MachinePointerInfo::getFixedStack(SSFI),
14198 false, false, false, 0);
14204 // LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
14205 SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
14206 SelectionDAG &DAG) const {
14207 // This algorithm is not obvious. Here it is what we're trying to output:
14210 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
14211 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
14213 haddpd %xmm0, %xmm0
14215 pshufd $0x4e, %xmm0, %xmm1
14221 LLVMContext *Context = DAG.getContext();
14223 // Build some magic constants.
14224 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
14225 Constant *C0 = ConstantDataVector::get(*Context, CV0);
14226 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
14228 SmallVector<Constant*,2> CV1;
14230 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14231 APInt(64, 0x4330000000000000ULL))));
14233 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14234 APInt(64, 0x4530000000000000ULL))));
14235 Constant *C1 = ConstantVector::get(CV1);
14236 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
14238 // Load the 64-bit value into an XMM register.
14239 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
14241 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
14242 MachinePointerInfo::getConstantPool(),
14243 false, false, false, 16);
14244 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32,
14245 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1),
14248 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
14249 MachinePointerInfo::getConstantPool(),
14250 false, false, false, 16);
14251 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1);
14252 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
14255 if (Subtarget->hasSSE3()) {
14256 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
14257 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
14259 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub);
14260 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32,
14262 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64,
14263 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle),
14267 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
14268 DAG.getIntPtrConstant(0));
14271 // LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
14272 SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
14273 SelectionDAG &DAG) const {
14275 // FP constant to bias correct the final result.
14276 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
14279 // Load the 32-bit value into an XMM register.
14280 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
14283 // Zero out the upper parts of the register.
14284 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
14286 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14287 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
14288 DAG.getIntPtrConstant(0));
14290 // Or the load with the bias.
14291 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
14292 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14293 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14294 MVT::v2f64, Load)),
14295 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14296 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14297 MVT::v2f64, Bias)));
14298 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14299 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or),
14300 DAG.getIntPtrConstant(0));
14302 // Subtract the bias.
14303 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
14305 // Handle final rounding.
14306 EVT DestVT = Op.getValueType();
14308 if (DestVT.bitsLT(MVT::f64))
14309 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
14310 DAG.getIntPtrConstant(0));
14311 if (DestVT.bitsGT(MVT::f64))
14312 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
14314 // Handle final rounding.
14318 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
14319 const X86Subtarget &Subtarget) {
14320 // The algorithm is the following:
14321 // #ifdef __SSE4_1__
14322 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14323 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14324 // (uint4) 0x53000000, 0xaa);
14326 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14327 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14329 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14330 // return (float4) lo + fhi;
14333 SDValue V = Op->getOperand(0);
14334 EVT VecIntVT = V.getValueType();
14335 bool Is128 = VecIntVT == MVT::v4i32;
14336 EVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
14337 // If we convert to something else than the supported type, e.g., to v4f64,
14339 if (VecFloatVT != Op->getValueType(0))
14342 unsigned NumElts = VecIntVT.getVectorNumElements();
14343 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
14344 "Unsupported custom type");
14345 assert(NumElts <= 8 && "The size of the constant array must be fixed");
14347 // In the #idef/#else code, we have in common:
14348 // - The vector of constants:
14354 // Create the splat vector for 0x4b000000.
14355 SDValue CstLow = DAG.getConstant(0x4b000000, MVT::i32);
14356 SDValue CstLowArray[] = {CstLow, CstLow, CstLow, CstLow,
14357 CstLow, CstLow, CstLow, CstLow};
14358 SDValue VecCstLow = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14359 makeArrayRef(&CstLowArray[0], NumElts));
14360 // Create the splat vector for 0x53000000.
14361 SDValue CstHigh = DAG.getConstant(0x53000000, MVT::i32);
14362 SDValue CstHighArray[] = {CstHigh, CstHigh, CstHigh, CstHigh,
14363 CstHigh, CstHigh, CstHigh, CstHigh};
14364 SDValue VecCstHigh = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14365 makeArrayRef(&CstHighArray[0], NumElts));
14367 // Create the right shift.
14368 SDValue CstShift = DAG.getConstant(16, MVT::i32);
14369 SDValue CstShiftArray[] = {CstShift, CstShift, CstShift, CstShift,
14370 CstShift, CstShift, CstShift, CstShift};
14371 SDValue VecCstShift = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14372 makeArrayRef(&CstShiftArray[0], NumElts));
14373 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
14376 if (Subtarget.hasSSE41()) {
14377 EVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
14378 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14379 SDValue VecCstLowBitcast =
14380 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstLow);
14381 SDValue VecBitcast = DAG.getNode(ISD::BITCAST, DL, VecI16VT, V);
14382 // Low will be bitcasted right away, so do not bother bitcasting back to its
14384 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
14385 VecCstLowBitcast, DAG.getConstant(0xaa, MVT::i32));
14386 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14387 // (uint4) 0x53000000, 0xaa);
14388 SDValue VecCstHighBitcast =
14389 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstHigh);
14390 SDValue VecShiftBitcast =
14391 DAG.getNode(ISD::BITCAST, DL, VecI16VT, HighShift);
14392 // High will be bitcasted right away, so do not bother bitcasting back to
14393 // its original type.
14394 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
14395 VecCstHighBitcast, DAG.getConstant(0xaa, MVT::i32));
14397 SDValue CstMask = DAG.getConstant(0xffff, MVT::i32);
14398 SDValue VecCstMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, CstMask,
14399 CstMask, CstMask, CstMask);
14400 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14401 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
14402 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
14404 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14405 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
14408 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
14409 SDValue CstFAdd = DAG.getConstantFP(
14410 APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), MVT::f32);
14411 SDValue CstFAddArray[] = {CstFAdd, CstFAdd, CstFAdd, CstFAdd,
14412 CstFAdd, CstFAdd, CstFAdd, CstFAdd};
14413 SDValue VecCstFAdd = DAG.getNode(ISD::BUILD_VECTOR, DL, VecFloatVT,
14414 makeArrayRef(&CstFAddArray[0], NumElts));
14416 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14417 SDValue HighBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, High);
14419 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
14420 // return (float4) lo + fhi;
14421 SDValue LowBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, Low);
14422 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
14425 SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
14426 SelectionDAG &DAG) const {
14427 SDValue N0 = Op.getOperand(0);
14428 MVT SVT = N0.getSimpleValueType();
14431 switch (SVT.SimpleTy) {
14433 llvm_unreachable("Custom UINT_TO_FP is not supported!");
14438 MVT NVT = MVT::getVectorVT(MVT::i32, SVT.getVectorNumElements());
14439 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14440 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0));
14444 return lowerUINT_TO_FP_vXi32(Op, DAG, *Subtarget);
14446 llvm_unreachable(nullptr);
14449 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
14450 SelectionDAG &DAG) const {
14451 SDValue N0 = Op.getOperand(0);
14454 if (Op.getValueType().isVector())
14455 return lowerUINT_TO_FP_vec(Op, DAG);
14457 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
14458 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
14459 // the optimization here.
14460 if (DAG.SignBitIsZero(N0))
14461 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
14463 MVT SrcVT = N0.getSimpleValueType();
14464 MVT DstVT = Op.getSimpleValueType();
14465 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
14466 return LowerUINT_TO_FP_i64(Op, DAG);
14467 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
14468 return LowerUINT_TO_FP_i32(Op, DAG);
14469 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
14472 // Make a 64-bit buffer, and use it to build an FILD.
14473 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
14474 if (SrcVT == MVT::i32) {
14475 SDValue WordOff = DAG.getConstant(4, getPointerTy());
14476 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
14477 getPointerTy(), StackSlot, WordOff);
14478 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14479 StackSlot, MachinePointerInfo(),
14481 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
14482 OffsetSlot, MachinePointerInfo(),
14484 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
14488 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
14489 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14490 StackSlot, MachinePointerInfo(),
14492 // For i64 source, we need to add the appropriate power of 2 if the input
14493 // was negative. This is the same as the optimization in
14494 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
14495 // we must be careful to do the computation in x87 extended precision, not
14496 // in SSE. (The generic code can't know it's OK to do this, or how to.)
14497 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
14498 MachineMemOperand *MMO =
14499 DAG.getMachineFunction()
14500 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14501 MachineMemOperand::MOLoad, 8, 8);
14503 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
14504 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
14505 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
14508 APInt FF(32, 0x5F800000ULL);
14510 // Check whether the sign bit is set.
14511 SDValue SignSet = DAG.getSetCC(dl,
14512 getSetCCResultType(*DAG.getContext(), MVT::i64),
14513 Op.getOperand(0), DAG.getConstant(0, MVT::i64),
14516 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
14517 SDValue FudgePtr = DAG.getConstantPool(
14518 ConstantInt::get(*DAG.getContext(), FF.zext(64)),
14521 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
14522 SDValue Zero = DAG.getIntPtrConstant(0);
14523 SDValue Four = DAG.getIntPtrConstant(4);
14524 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
14526 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset);
14528 // Load the value out, extending it from f32 to f80.
14529 // FIXME: Avoid the extend by constructing the right constant pool?
14530 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(),
14531 FudgePtr, MachinePointerInfo::getConstantPool(),
14532 MVT::f32, false, false, false, 4);
14533 // Extend everything to 80 bits to force it to be done on x87.
14534 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
14535 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0));
14538 std::pair<SDValue,SDValue>
14539 X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
14540 bool IsSigned, bool IsReplace) const {
14543 EVT DstTy = Op.getValueType();
14545 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) {
14546 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
14550 assert(DstTy.getSimpleVT() <= MVT::i64 &&
14551 DstTy.getSimpleVT() >= MVT::i16 &&
14552 "Unknown FP_TO_INT to lower!");
14554 // These are really Legal.
14555 if (DstTy == MVT::i32 &&
14556 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14557 return std::make_pair(SDValue(), SDValue());
14558 if (Subtarget->is64Bit() &&
14559 DstTy == MVT::i64 &&
14560 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14561 return std::make_pair(SDValue(), SDValue());
14563 // We lower FP->int64 either into FISTP64 followed by a load from a temporary
14564 // stack slot, or into the FTOL runtime function.
14565 MachineFunction &MF = DAG.getMachineFunction();
14566 unsigned MemSize = DstTy.getSizeInBits()/8;
14567 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14568 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14571 if (!IsSigned && isIntegerTypeFTOL(DstTy))
14572 Opc = X86ISD::WIN_FTOL;
14574 switch (DstTy.getSimpleVT().SimpleTy) {
14575 default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
14576 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
14577 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
14578 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
14581 SDValue Chain = DAG.getEntryNode();
14582 SDValue Value = Op.getOperand(0);
14583 EVT TheVT = Op.getOperand(0).getValueType();
14584 // FIXME This causes a redundant load/store if the SSE-class value is already
14585 // in memory, such as if it is on the callstack.
14586 if (isScalarFPTypeInSSEReg(TheVT)) {
14587 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
14588 Chain = DAG.getStore(Chain, DL, Value, StackSlot,
14589 MachinePointerInfo::getFixedStack(SSFI),
14591 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
14593 Chain, StackSlot, DAG.getValueType(TheVT)
14596 MachineMemOperand *MMO =
14597 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14598 MachineMemOperand::MOLoad, MemSize, MemSize);
14599 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
14600 Chain = Value.getValue(1);
14601 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14602 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14605 MachineMemOperand *MMO =
14606 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14607 MachineMemOperand::MOStore, MemSize, MemSize);
14609 if (Opc != X86ISD::WIN_FTOL) {
14610 // Build the FP_TO_INT*_IN_MEM
14611 SDValue Ops[] = { Chain, Value, StackSlot };
14612 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
14614 return std::make_pair(FIST, StackSlot);
14616 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL,
14617 DAG.getVTList(MVT::Other, MVT::Glue),
14619 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX,
14620 MVT::i32, ftol.getValue(1));
14621 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX,
14622 MVT::i32, eax.getValue(2));
14623 SDValue Ops[] = { eax, edx };
14624 SDValue pair = IsReplace
14625 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops)
14626 : DAG.getMergeValues(Ops, DL);
14627 return std::make_pair(pair, SDValue());
14631 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
14632 const X86Subtarget *Subtarget) {
14633 MVT VT = Op->getSimpleValueType(0);
14634 SDValue In = Op->getOperand(0);
14635 MVT InVT = In.getSimpleValueType();
14638 // Optimize vectors in AVX mode:
14641 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
14642 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
14643 // Concat upper and lower parts.
14646 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
14647 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
14648 // Concat upper and lower parts.
14651 if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) &&
14652 ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) &&
14653 ((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
14656 if (Subtarget->hasInt256())
14657 return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
14659 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
14660 SDValue Undef = DAG.getUNDEF(InVT);
14661 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
14662 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14663 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14665 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(),
14666 VT.getVectorNumElements()/2);
14668 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo);
14669 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi);
14671 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
14674 static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
14675 SelectionDAG &DAG) {
14676 MVT VT = Op->getSimpleValueType(0);
14677 SDValue In = Op->getOperand(0);
14678 MVT InVT = In.getSimpleValueType();
14680 unsigned int NumElts = VT.getVectorNumElements();
14681 if (NumElts != 8 && NumElts != 16)
14684 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
14685 return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
14687 EVT ExtVT = (NumElts == 8)? MVT::v8i64 : MVT::v16i32;
14688 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14689 // Now we have only mask extension
14690 assert(InVT.getVectorElementType() == MVT::i1);
14691 SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
14692 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14693 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
14694 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14695 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14696 MachinePointerInfo::getConstantPool(),
14697 false, false, false, Alignment);
14699 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, DL, ExtVT, In, Ld);
14700 if (VT.is512BitVector())
14702 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Brcst);
14705 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14706 SelectionDAG &DAG) {
14707 if (Subtarget->hasFp256()) {
14708 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14716 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14717 SelectionDAG &DAG) {
14719 MVT VT = Op.getSimpleValueType();
14720 SDValue In = Op.getOperand(0);
14721 MVT SVT = In.getSimpleValueType();
14723 if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
14724 return LowerZERO_EXTEND_AVX512(Op, DAG);
14726 if (Subtarget->hasFp256()) {
14727 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14732 assert(!VT.is256BitVector() || !SVT.is128BitVector() ||
14733 VT.getVectorNumElements() != SVT.getVectorNumElements());
14737 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
14739 MVT VT = Op.getSimpleValueType();
14740 SDValue In = Op.getOperand(0);
14741 MVT InVT = In.getSimpleValueType();
14743 if (VT == MVT::i1) {
14744 assert((InVT.isInteger() && (InVT.getSizeInBits() <= 64)) &&
14745 "Invalid scalar TRUNCATE operation");
14746 if (InVT.getSizeInBits() >= 32)
14748 In = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, In);
14749 return DAG.getNode(ISD::TRUNCATE, DL, VT, In);
14751 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
14752 "Invalid TRUNCATE operation");
14754 if (InVT.is512BitVector() || VT.getVectorElementType() == MVT::i1) {
14755 if (VT.getVectorElementType().getSizeInBits() >=8)
14756 return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
14758 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
14759 unsigned NumElts = InVT.getVectorNumElements();
14760 assert ((NumElts == 8 || NumElts == 16) && "Unexpected vector type");
14761 if (InVT.getSizeInBits() < 512) {
14762 MVT ExtVT = (NumElts == 16)? MVT::v16i32 : MVT::v8i64;
14763 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
14767 SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
14768 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14769 SDValue CP = DAG.getConstantPool(C, getPointerTy());
14770 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14771 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14772 MachinePointerInfo::getConstantPool(),
14773 false, false, false, Alignment);
14774 SDValue OneV = DAG.getNode(X86ISD::VBROADCAST, DL, InVT, Ld);
14775 SDValue And = DAG.getNode(ISD::AND, DL, InVT, OneV, In);
14776 return DAG.getNode(X86ISD::TESTM, DL, VT, And, And);
14779 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
14780 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
14781 if (Subtarget->hasInt256()) {
14782 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
14783 In = DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, In);
14784 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32),
14786 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
14787 DAG.getIntPtrConstant(0));
14790 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14791 DAG.getIntPtrConstant(0));
14792 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14793 DAG.getIntPtrConstant(2));
14794 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14795 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14796 static const int ShufMask[] = {0, 2, 4, 6};
14797 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
14800 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
14801 // On AVX2, v8i32 -> v8i16 becomed PSHUFB.
14802 if (Subtarget->hasInt256()) {
14803 In = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, In);
14805 SmallVector<SDValue,32> pshufbMask;
14806 for (unsigned i = 0; i < 2; ++i) {
14807 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8));
14808 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8));
14809 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8));
14810 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8));
14811 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8));
14812 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8));
14813 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8));
14814 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8));
14815 for (unsigned j = 0; j < 8; ++j)
14816 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
14818 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
14819 In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
14820 In = DAG.getNode(ISD::BITCAST, DL, MVT::v4i64, In);
14822 static const int ShufMask[] = {0, 2, -1, -1};
14823 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64),
14825 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14826 DAG.getIntPtrConstant(0));
14827 return DAG.getNode(ISD::BITCAST, DL, VT, In);
14830 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14831 DAG.getIntPtrConstant(0));
14833 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14834 DAG.getIntPtrConstant(4));
14836 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpLo);
14837 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpHi);
14839 // The PSHUFB mask:
14840 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
14841 -1, -1, -1, -1, -1, -1, -1, -1};
14843 SDValue Undef = DAG.getUNDEF(MVT::v16i8);
14844 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, Undef, ShufMask1);
14845 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, Undef, ShufMask1);
14847 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14848 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14850 // The MOVLHPS Mask:
14851 static const int ShufMask2[] = {0, 1, 4, 5};
14852 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
14853 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, res);
14856 // Handle truncation of V256 to V128 using shuffles.
14857 if (!VT.is128BitVector() || !InVT.is256BitVector())
14860 assert(Subtarget->hasFp256() && "256-bit vector without AVX!");
14862 unsigned NumElems = VT.getVectorNumElements();
14863 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
14865 SmallVector<int, 16> MaskVec(NumElems * 2, -1);
14866 // Prepare truncation shuffle mask
14867 for (unsigned i = 0; i != NumElems; ++i)
14868 MaskVec[i] = i * 2;
14869 SDValue V = DAG.getVectorShuffle(NVT, DL,
14870 DAG.getNode(ISD::BITCAST, DL, NVT, In),
14871 DAG.getUNDEF(NVT), &MaskVec[0]);
14872 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
14873 DAG.getIntPtrConstant(0));
14876 SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
14877 SelectionDAG &DAG) const {
14878 assert(!Op.getSimpleValueType().isVector());
14880 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14881 /*IsSigned=*/ true, /*IsReplace=*/ false);
14882 SDValue FIST = Vals.first, StackSlot = Vals.second;
14883 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
14884 if (!FIST.getNode()) return Op;
14886 if (StackSlot.getNode())
14887 // Load the result.
14888 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14889 FIST, StackSlot, MachinePointerInfo(),
14890 false, false, false, 0);
14892 // The node is the result.
14896 SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
14897 SelectionDAG &DAG) const {
14898 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14899 /*IsSigned=*/ false, /*IsReplace=*/ false);
14900 SDValue FIST = Vals.first, StackSlot = Vals.second;
14901 assert(FIST.getNode() && "Unexpected failure");
14903 if (StackSlot.getNode())
14904 // Load the result.
14905 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14906 FIST, StackSlot, MachinePointerInfo(),
14907 false, false, false, 0);
14909 // The node is the result.
14913 static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
14915 MVT VT = Op.getSimpleValueType();
14916 SDValue In = Op.getOperand(0);
14917 MVT SVT = In.getSimpleValueType();
14919 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
14921 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
14922 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
14923 In, DAG.getUNDEF(SVT)));
14926 /// The only differences between FABS and FNEG are the mask and the logic op.
14927 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
14928 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
14929 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
14930 "Wrong opcode for lowering FABS or FNEG.");
14932 bool IsFABS = (Op.getOpcode() == ISD::FABS);
14934 // If this is a FABS and it has an FNEG user, bail out to fold the combination
14935 // into an FNABS. We'll lower the FABS after that if it is still in use.
14937 for (SDNode *User : Op->uses())
14938 if (User->getOpcode() == ISD::FNEG)
14941 SDValue Op0 = Op.getOperand(0);
14942 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
14945 MVT VT = Op.getSimpleValueType();
14946 // Assume scalar op for initialization; update for vector if needed.
14947 // Note that there are no scalar bitwise logical SSE/AVX instructions, so we
14948 // generate a 16-byte vector constant and logic op even for the scalar case.
14949 // Using a 16-byte mask allows folding the load of the mask with
14950 // the logic op, so it can save (~4 bytes) on code size.
14952 unsigned NumElts = VT == MVT::f64 ? 2 : 4;
14953 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
14954 // decide if we should generate a 16-byte constant mask when we only need 4 or
14955 // 8 bytes for the scalar case.
14956 if (VT.isVector()) {
14957 EltVT = VT.getVectorElementType();
14958 NumElts = VT.getVectorNumElements();
14961 unsigned EltBits = EltVT.getSizeInBits();
14962 LLVMContext *Context = DAG.getContext();
14963 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
14965 IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignBit(EltBits);
14966 Constant *C = ConstantInt::get(*Context, MaskElt);
14967 C = ConstantVector::getSplat(NumElts, C);
14968 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14969 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy());
14970 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
14971 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
14972 MachinePointerInfo::getConstantPool(),
14973 false, false, false, Alignment);
14975 if (VT.isVector()) {
14976 // For a vector, cast operands to a vector type, perform the logic op,
14977 // and cast the result back to the original value type.
14978 MVT VecVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
14979 SDValue MaskCasted = DAG.getNode(ISD::BITCAST, dl, VecVT, Mask);
14980 SDValue Operand = IsFNABS ?
14981 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0.getOperand(0)) :
14982 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0);
14983 unsigned BitOp = IsFABS ? ISD::AND : IsFNABS ? ISD::OR : ISD::XOR;
14984 return DAG.getNode(ISD::BITCAST, dl, VT,
14985 DAG.getNode(BitOp, dl, VecVT, Operand, MaskCasted));
14988 // If not vector, then scalar.
14989 unsigned BitOp = IsFABS ? X86ISD::FAND : IsFNABS ? X86ISD::FOR : X86ISD::FXOR;
14990 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
14991 return DAG.getNode(BitOp, dl, VT, Operand, Mask);
14994 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
14995 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14996 LLVMContext *Context = DAG.getContext();
14997 SDValue Op0 = Op.getOperand(0);
14998 SDValue Op1 = Op.getOperand(1);
15000 MVT VT = Op.getSimpleValueType();
15001 MVT SrcVT = Op1.getSimpleValueType();
15003 // If second operand is smaller, extend it first.
15004 if (SrcVT.bitsLT(VT)) {
15005 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1);
15008 // And if it is bigger, shrink it first.
15009 if (SrcVT.bitsGT(VT)) {
15010 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1));
15014 // At this point the operands and the result should have the same
15015 // type, and that won't be f80 since that is not custom lowered.
15017 const fltSemantics &Sem =
15018 VT == MVT::f64 ? APFloat::IEEEdouble : APFloat::IEEEsingle;
15019 const unsigned SizeInBits = VT.getSizeInBits();
15021 SmallVector<Constant *, 4> CV(
15022 VT == MVT::f64 ? 2 : 4,
15023 ConstantFP::get(*Context, APFloat(Sem, APInt(SizeInBits, 0))));
15025 // First, clear all bits but the sign bit from the second operand (sign).
15026 CV[0] = ConstantFP::get(*Context,
15027 APFloat(Sem, APInt::getHighBitsSet(SizeInBits, 1)));
15028 Constant *C = ConstantVector::get(CV);
15029 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
15030 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
15031 MachinePointerInfo::getConstantPool(),
15032 false, false, false, 16);
15033 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1);
15035 // Next, clear the sign bit from the first operand (magnitude).
15036 // If it's a constant, we can clear it here.
15037 if (ConstantFPSDNode *Op0CN = dyn_cast<ConstantFPSDNode>(Op0)) {
15038 APFloat APF = Op0CN->getValueAPF();
15039 // If the magnitude is a positive zero, the sign bit alone is enough.
15040 if (APF.isPosZero())
15043 CV[0] = ConstantFP::get(*Context, APF);
15045 CV[0] = ConstantFP::get(
15047 APFloat(Sem, APInt::getLowBitsSet(SizeInBits, SizeInBits - 1)));
15049 C = ConstantVector::get(CV);
15050 CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
15051 SDValue Val = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
15052 MachinePointerInfo::getConstantPool(),
15053 false, false, false, 16);
15054 // If the magnitude operand wasn't a constant, we need to AND out the sign.
15055 if (!isa<ConstantFPSDNode>(Op0))
15056 Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Val);
15058 // OR the magnitude value with the sign bit.
15059 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit);
15062 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
15063 SDValue N0 = Op.getOperand(0);
15065 MVT VT = Op.getSimpleValueType();
15067 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
15068 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
15069 DAG.getConstant(1, VT));
15070 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT));
15073 // Check whether an OR'd tree is PTEST-able.
15074 static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
15075 SelectionDAG &DAG) {
15076 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
15078 if (!Subtarget->hasSSE41())
15081 if (!Op->hasOneUse())
15084 SDNode *N = Op.getNode();
15087 SmallVector<SDValue, 8> Opnds;
15088 DenseMap<SDValue, unsigned> VecInMap;
15089 SmallVector<SDValue, 8> VecIns;
15090 EVT VT = MVT::Other;
15092 // Recognize a special case where a vector is casted into wide integer to
15094 Opnds.push_back(N->getOperand(0));
15095 Opnds.push_back(N->getOperand(1));
15097 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
15098 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
15099 // BFS traverse all OR'd operands.
15100 if (I->getOpcode() == ISD::OR) {
15101 Opnds.push_back(I->getOperand(0));
15102 Opnds.push_back(I->getOperand(1));
15103 // Re-evaluate the number of nodes to be traversed.
15104 e += 2; // 2 more nodes (LHS and RHS) are pushed.
15108 // Quit if a non-EXTRACT_VECTOR_ELT
15109 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
15112 // Quit if without a constant index.
15113 SDValue Idx = I->getOperand(1);
15114 if (!isa<ConstantSDNode>(Idx))
15117 SDValue ExtractedFromVec = I->getOperand(0);
15118 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
15119 if (M == VecInMap.end()) {
15120 VT = ExtractedFromVec.getValueType();
15121 // Quit if not 128/256-bit vector.
15122 if (!VT.is128BitVector() && !VT.is256BitVector())
15124 // Quit if not the same type.
15125 if (VecInMap.begin() != VecInMap.end() &&
15126 VT != VecInMap.begin()->first.getValueType())
15128 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
15129 VecIns.push_back(ExtractedFromVec);
15131 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
15134 assert((VT.is128BitVector() || VT.is256BitVector()) &&
15135 "Not extracted from 128-/256-bit vector.");
15137 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
15139 for (DenseMap<SDValue, unsigned>::const_iterator
15140 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
15141 // Quit if not all elements are used.
15142 if (I->second != FullMask)
15146 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
15148 // Cast all vectors into TestVT for PTEST.
15149 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
15150 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]);
15152 // If more than one full vectors are evaluated, OR them first before PTEST.
15153 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
15154 // Each iteration will OR 2 nodes and append the result until there is only
15155 // 1 node left, i.e. the final OR'd value of all vectors.
15156 SDValue LHS = VecIns[Slot];
15157 SDValue RHS = VecIns[Slot + 1];
15158 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
15161 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
15162 VecIns.back(), VecIns.back());
15165 /// \brief return true if \c Op has a use that doesn't just read flags.
15166 static bool hasNonFlagsUse(SDValue Op) {
15167 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
15169 SDNode *User = *UI;
15170 unsigned UOpNo = UI.getOperandNo();
15171 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
15172 // Look pass truncate.
15173 UOpNo = User->use_begin().getOperandNo();
15174 User = *User->use_begin();
15177 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
15178 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
15184 /// Emit nodes that will be selected as "test Op0,Op0", or something
15186 SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
15187 SelectionDAG &DAG) const {
15188 if (Op.getValueType() == MVT::i1) {
15189 SDValue ExtOp = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Op);
15190 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, ExtOp,
15191 DAG.getConstant(0, MVT::i8));
15193 // CF and OF aren't always set the way we want. Determine which
15194 // of these we need.
15195 bool NeedCF = false;
15196 bool NeedOF = false;
15199 case X86::COND_A: case X86::COND_AE:
15200 case X86::COND_B: case X86::COND_BE:
15203 case X86::COND_G: case X86::COND_GE:
15204 case X86::COND_L: case X86::COND_LE:
15205 case X86::COND_O: case X86::COND_NO: {
15206 // Check if we really need to set the
15207 // Overflow flag. If NoSignedWrap is present
15208 // that is not actually needed.
15209 switch (Op->getOpcode()) {
15214 const BinaryWithFlagsSDNode *BinNode =
15215 cast<BinaryWithFlagsSDNode>(Op.getNode());
15216 if (BinNode->hasNoSignedWrap())
15226 // See if we can use the EFLAGS value from the operand instead of
15227 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
15228 // we prove that the arithmetic won't overflow, we can't use OF or CF.
15229 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
15230 // Emit a CMP with 0, which is the TEST pattern.
15231 //if (Op.getValueType() == MVT::i1)
15232 // return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op,
15233 // DAG.getConstant(0, MVT::i1));
15234 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15235 DAG.getConstant(0, Op.getValueType()));
15237 unsigned Opcode = 0;
15238 unsigned NumOperands = 0;
15240 // Truncate operations may prevent the merge of the SETCC instruction
15241 // and the arithmetic instruction before it. Attempt to truncate the operands
15242 // of the arithmetic instruction and use a reduced bit-width instruction.
15243 bool NeedTruncation = false;
15244 SDValue ArithOp = Op;
15245 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
15246 SDValue Arith = Op->getOperand(0);
15247 // Both the trunc and the arithmetic op need to have one user each.
15248 if (Arith->hasOneUse())
15249 switch (Arith.getOpcode()) {
15256 NeedTruncation = true;
15262 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
15263 // which may be the result of a CAST. We use the variable 'Op', which is the
15264 // non-casted variable when we check for possible users.
15265 switch (ArithOp.getOpcode()) {
15267 // Due to an isel shortcoming, be conservative if this add is likely to be
15268 // selected as part of a load-modify-store instruction. When the root node
15269 // in a match is a store, isel doesn't know how to remap non-chain non-flag
15270 // uses of other nodes in the match, such as the ADD in this case. This
15271 // leads to the ADD being left around and reselected, with the result being
15272 // two adds in the output. Alas, even if none our users are stores, that
15273 // doesn't prove we're O.K. Ergo, if we have any parents that aren't
15274 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
15275 // climbing the DAG back to the root, and it doesn't seem to be worth the
15277 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15278 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15279 if (UI->getOpcode() != ISD::CopyToReg &&
15280 UI->getOpcode() != ISD::SETCC &&
15281 UI->getOpcode() != ISD::STORE)
15284 if (ConstantSDNode *C =
15285 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
15286 // An add of one will be selected as an INC.
15287 if (C->getAPIntValue() == 1 && !Subtarget->slowIncDec()) {
15288 Opcode = X86ISD::INC;
15293 // An add of negative one (subtract of one) will be selected as a DEC.
15294 if (C->getAPIntValue().isAllOnesValue() && !Subtarget->slowIncDec()) {
15295 Opcode = X86ISD::DEC;
15301 // Otherwise use a regular EFLAGS-setting add.
15302 Opcode = X86ISD::ADD;
15307 // If we have a constant logical shift that's only used in a comparison
15308 // against zero turn it into an equivalent AND. This allows turning it into
15309 // a TEST instruction later.
15310 if ((X86CC == X86::COND_E || X86CC == X86::COND_NE) && Op->hasOneUse() &&
15311 isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
15312 EVT VT = Op.getValueType();
15313 unsigned BitWidth = VT.getSizeInBits();
15314 unsigned ShAmt = Op->getConstantOperandVal(1);
15315 if (ShAmt >= BitWidth) // Avoid undefined shifts.
15317 APInt Mask = ArithOp.getOpcode() == ISD::SRL
15318 ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
15319 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
15320 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
15322 SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
15323 DAG.getConstant(Mask, VT));
15324 DAG.ReplaceAllUsesWith(Op, New);
15330 // If the primary and result isn't used, don't bother using X86ISD::AND,
15331 // because a TEST instruction will be better.
15332 if (!hasNonFlagsUse(Op))
15338 // Due to the ISEL shortcoming noted above, be conservative if this op is
15339 // likely to be selected as part of a load-modify-store instruction.
15340 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15341 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15342 if (UI->getOpcode() == ISD::STORE)
15345 // Otherwise use a regular EFLAGS-setting instruction.
15346 switch (ArithOp.getOpcode()) {
15347 default: llvm_unreachable("unexpected operator!");
15348 case ISD::SUB: Opcode = X86ISD::SUB; break;
15349 case ISD::XOR: Opcode = X86ISD::XOR; break;
15350 case ISD::AND: Opcode = X86ISD::AND; break;
15352 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
15353 SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG);
15354 if (EFLAGS.getNode())
15357 Opcode = X86ISD::OR;
15371 return SDValue(Op.getNode(), 1);
15377 // If we found that truncation is beneficial, perform the truncation and
15379 if (NeedTruncation) {
15380 EVT VT = Op.getValueType();
15381 SDValue WideVal = Op->getOperand(0);
15382 EVT WideVT = WideVal.getValueType();
15383 unsigned ConvertedOp = 0;
15384 // Use a target machine opcode to prevent further DAGCombine
15385 // optimizations that may separate the arithmetic operations
15386 // from the setcc node.
15387 switch (WideVal.getOpcode()) {
15389 case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
15390 case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
15391 case ISD::AND: ConvertedOp = X86ISD::AND; break;
15392 case ISD::OR: ConvertedOp = X86ISD::OR; break;
15393 case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
15397 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15398 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
15399 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
15400 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
15401 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1);
15407 // Emit a CMP with 0, which is the TEST pattern.
15408 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15409 DAG.getConstant(0, Op.getValueType()));
15411 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
15412 SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
15414 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
15415 DAG.ReplaceAllUsesWith(Op, New);
15416 return SDValue(New.getNode(), 1);
15419 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
15421 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
15422 SDLoc dl, SelectionDAG &DAG) const {
15423 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) {
15424 if (C->getAPIntValue() == 0)
15425 return EmitTest(Op0, X86CC, dl, DAG);
15427 if (Op0.getValueType() == MVT::i1)
15428 llvm_unreachable("Unexpected comparison operation for MVT::i1 operands");
15431 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
15432 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
15433 // Do the comparison at i32 if it's smaller, besides the Atom case.
15434 // This avoids subregister aliasing issues. Keep the smaller reference
15435 // if we're optimizing for size, however, as that'll allow better folding
15436 // of memory operations.
15437 if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
15438 !DAG.getMachineFunction().getFunction()->hasFnAttribute(
15439 Attribute::MinSize) &&
15440 !Subtarget->isAtom()) {
15441 unsigned ExtendOp =
15442 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
15443 Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
15444 Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
15446 // Use SUB instead of CMP to enable CSE between SUB and CMP.
15447 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
15448 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs,
15450 return SDValue(Sub.getNode(), 1);
15452 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
15455 /// Convert a comparison if required by the subtarget.
15456 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
15457 SelectionDAG &DAG) const {
15458 // If the subtarget does not support the FUCOMI instruction, floating-point
15459 // comparisons have to be converted.
15460 if (Subtarget->hasCMov() ||
15461 Cmp.getOpcode() != X86ISD::CMP ||
15462 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
15463 !Cmp.getOperand(1).getValueType().isFloatingPoint())
15466 // The instruction selector will select an FUCOM instruction instead of
15467 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
15468 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
15469 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
15471 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
15472 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
15473 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
15474 DAG.getConstant(8, MVT::i8));
15475 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
15476 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
15479 /// The minimum architected relative accuracy is 2^-12. We need one
15480 /// Newton-Raphson step to have a good float result (24 bits of precision).
15481 SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
15482 DAGCombinerInfo &DCI,
15483 unsigned &RefinementSteps,
15484 bool &UseOneConstNR) const {
15485 // FIXME: We should use instruction latency models to calculate the cost of
15486 // each potential sequence, but this is very hard to do reliably because
15487 // at least Intel's Core* chips have variable timing based on the number of
15488 // significant digits in the divisor and/or sqrt operand.
15489 if (!Subtarget->useSqrtEst())
15492 EVT VT = Op.getValueType();
15494 // SSE1 has rsqrtss and rsqrtps.
15495 // TODO: Add support for AVX512 (v16f32).
15496 // It is likely not profitable to do this for f64 because a double-precision
15497 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
15498 // instructions: convert to single, rsqrtss, convert back to double, refine
15499 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
15500 // along with FMA, this could be a throughput win.
15501 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15502 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15503 RefinementSteps = 1;
15504 UseOneConstNR = false;
15505 return DCI.DAG.getNode(X86ISD::FRSQRT, SDLoc(Op), VT, Op);
15510 /// The minimum architected relative accuracy is 2^-12. We need one
15511 /// Newton-Raphson step to have a good float result (24 bits of precision).
15512 SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
15513 DAGCombinerInfo &DCI,
15514 unsigned &RefinementSteps) const {
15515 // FIXME: We should use instruction latency models to calculate the cost of
15516 // each potential sequence, but this is very hard to do reliably because
15517 // at least Intel's Core* chips have variable timing based on the number of
15518 // significant digits in the divisor.
15519 if (!Subtarget->useReciprocalEst())
15522 EVT VT = Op.getValueType();
15524 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
15525 // TODO: Add support for AVX512 (v16f32).
15526 // It is likely not profitable to do this for f64 because a double-precision
15527 // reciprocal estimate with refinement on x86 prior to FMA requires
15528 // 15 instructions: convert to single, rcpss, convert back to double, refine
15529 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
15530 // along with FMA, this could be a throughput win.
15531 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15532 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15533 RefinementSteps = ReciprocalEstimateRefinementSteps;
15534 return DCI.DAG.getNode(X86ISD::FRCP, SDLoc(Op), VT, Op);
15539 static bool isAllOnes(SDValue V) {
15540 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
15541 return C && C->isAllOnesValue();
15544 /// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
15545 /// if it's possible.
15546 SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
15547 SDLoc dl, SelectionDAG &DAG) const {
15548 SDValue Op0 = And.getOperand(0);
15549 SDValue Op1 = And.getOperand(1);
15550 if (Op0.getOpcode() == ISD::TRUNCATE)
15551 Op0 = Op0.getOperand(0);
15552 if (Op1.getOpcode() == ISD::TRUNCATE)
15553 Op1 = Op1.getOperand(0);
15556 if (Op1.getOpcode() == ISD::SHL)
15557 std::swap(Op0, Op1);
15558 if (Op0.getOpcode() == ISD::SHL) {
15559 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0)))
15560 if (And00C->getZExtValue() == 1) {
15561 // If we looked past a truncate, check that it's only truncating away
15563 unsigned BitWidth = Op0.getValueSizeInBits();
15564 unsigned AndBitWidth = And.getValueSizeInBits();
15565 if (BitWidth > AndBitWidth) {
15567 DAG.computeKnownBits(Op0, Zeros, Ones);
15568 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
15572 RHS = Op0.getOperand(1);
15574 } else if (Op1.getOpcode() == ISD::Constant) {
15575 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
15576 uint64_t AndRHSVal = AndRHS->getZExtValue();
15577 SDValue AndLHS = Op0;
15579 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
15580 LHS = AndLHS.getOperand(0);
15581 RHS = AndLHS.getOperand(1);
15584 // Use BT if the immediate can't be encoded in a TEST instruction.
15585 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
15587 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType());
15591 if (LHS.getNode()) {
15592 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT
15593 // instruction. Since the shift amount is in-range-or-undefined, we know
15594 // that doing a bittest on the i32 value is ok. We extend to i32 because
15595 // the encoding for the i16 version is larger than the i32 version.
15596 // Also promote i16 to i32 for performance / code size reason.
15597 if (LHS.getValueType() == MVT::i8 ||
15598 LHS.getValueType() == MVT::i16)
15599 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
15601 // If the operand types disagree, extend the shift amount to match. Since
15602 // BT ignores high bits (like shifts) we can use anyextend.
15603 if (LHS.getValueType() != RHS.getValueType())
15604 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS);
15606 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
15607 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
15608 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15609 DAG.getConstant(Cond, MVT::i8), BT);
15615 /// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point
15617 static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
15622 // SSE Condition code mapping:
15631 switch (SetCCOpcode) {
15632 default: llvm_unreachable("Unexpected SETCC condition");
15634 case ISD::SETEQ: SSECC = 0; break;
15636 case ISD::SETGT: Swap = true; // Fallthrough
15638 case ISD::SETOLT: SSECC = 1; break;
15640 case ISD::SETGE: Swap = true; // Fallthrough
15642 case ISD::SETOLE: SSECC = 2; break;
15643 case ISD::SETUO: SSECC = 3; break;
15645 case ISD::SETNE: SSECC = 4; break;
15646 case ISD::SETULE: Swap = true; // Fallthrough
15647 case ISD::SETUGE: SSECC = 5; break;
15648 case ISD::SETULT: Swap = true; // Fallthrough
15649 case ISD::SETUGT: SSECC = 6; break;
15650 case ISD::SETO: SSECC = 7; break;
15652 case ISD::SETONE: SSECC = 8; break;
15655 std::swap(Op0, Op1);
15660 // Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128
15661 // ones, and then concatenate the result back.
15662 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
15663 MVT VT = Op.getSimpleValueType();
15665 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
15666 "Unsupported value type for operation");
15668 unsigned NumElems = VT.getVectorNumElements();
15670 SDValue CC = Op.getOperand(2);
15672 // Extract the LHS vectors
15673 SDValue LHS = Op.getOperand(0);
15674 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
15675 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
15677 // Extract the RHS vectors
15678 SDValue RHS = Op.getOperand(1);
15679 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
15680 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
15682 // Issue the operation on the smaller types and concatenate the result back
15683 MVT EltVT = VT.getVectorElementType();
15684 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
15685 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
15686 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
15687 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
15690 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
15691 const X86Subtarget *Subtarget) {
15692 SDValue Op0 = Op.getOperand(0);
15693 SDValue Op1 = Op.getOperand(1);
15694 SDValue CC = Op.getOperand(2);
15695 MVT VT = Op.getSimpleValueType();
15698 assert(Op0.getValueType().getVectorElementType().getSizeInBits() >= 8 &&
15699 Op.getValueType().getScalarType() == MVT::i1 &&
15700 "Cannot set masked compare for this operation");
15702 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15704 bool Unsigned = false;
15707 switch (SetCCOpcode) {
15708 default: llvm_unreachable("Unexpected SETCC condition");
15709 case ISD::SETNE: SSECC = 4; break;
15710 case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
15711 case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
15712 case ISD::SETLT: Swap = true; //fall-through
15713 case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
15714 case ISD::SETULT: SSECC = 1; Unsigned = true; break;
15715 case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
15716 case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
15717 case ISD::SETULE: Unsigned = true; //fall-through
15718 case ISD::SETLE: SSECC = 2; break;
15722 std::swap(Op0, Op1);
15724 return DAG.getNode(Opc, dl, VT, Op0, Op1);
15725 Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
15726 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15727 DAG.getConstant(SSECC, MVT::i8));
15730 /// \brief Try to turn a VSETULT into a VSETULE by modifying its second
15731 /// operand \p Op1. If non-trivial (for example because it's not constant)
15732 /// return an empty value.
15733 static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
15735 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
15739 MVT VT = Op1.getSimpleValueType();
15740 MVT EVT = VT.getVectorElementType();
15741 unsigned n = VT.getVectorNumElements();
15742 SmallVector<SDValue, 8> ULTOp1;
15744 for (unsigned i = 0; i < n; ++i) {
15745 ConstantSDNode *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
15746 if (!Elt || Elt->isOpaque() || Elt->getValueType(0) != EVT)
15749 // Avoid underflow.
15750 APInt Val = Elt->getAPIntValue();
15754 ULTOp1.push_back(DAG.getConstant(Val - 1, EVT));
15757 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
15760 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
15761 SelectionDAG &DAG) {
15762 SDValue Op0 = Op.getOperand(0);
15763 SDValue Op1 = Op.getOperand(1);
15764 SDValue CC = Op.getOperand(2);
15765 MVT VT = Op.getSimpleValueType();
15766 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15767 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
15772 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
15773 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
15776 unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
15777 unsigned Opc = X86ISD::CMPP;
15778 if (Subtarget->hasAVX512() && VT.getVectorElementType() == MVT::i1) {
15779 assert(VT.getVectorNumElements() <= 16);
15780 Opc = X86ISD::CMPM;
15782 // In the two special cases we can't handle, emit two comparisons.
15785 unsigned CombineOpc;
15786 if (SetCCOpcode == ISD::SETUEQ) {
15787 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR;
15789 assert(SetCCOpcode == ISD::SETONE);
15790 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND;
15793 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15794 DAG.getConstant(CC0, MVT::i8));
15795 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15796 DAG.getConstant(CC1, MVT::i8));
15797 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
15799 // Handle all other FP comparisons here.
15800 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15801 DAG.getConstant(SSECC, MVT::i8));
15804 // Break 256-bit integer vector compare into smaller ones.
15805 if (VT.is256BitVector() && !Subtarget->hasInt256())
15806 return Lower256IntVSETCC(Op, DAG);
15808 bool MaskResult = (VT.getVectorElementType() == MVT::i1);
15809 EVT OpVT = Op1.getValueType();
15810 if (Subtarget->hasAVX512()) {
15811 if (Op1.getValueType().is512BitVector() ||
15812 (Subtarget->hasBWI() && Subtarget->hasVLX()) ||
15813 (MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32))
15814 return LowerIntVSETCC_AVX512(Op, DAG, Subtarget);
15816 // In AVX-512 architecture setcc returns mask with i1 elements,
15817 // But there is no compare instruction for i8 and i16 elements in KNL.
15818 // We are not talking about 512-bit operands in this case, these
15819 // types are illegal.
15821 (OpVT.getVectorElementType().getSizeInBits() < 32 &&
15822 OpVT.getVectorElementType().getSizeInBits() >= 8))
15823 return DAG.getNode(ISD::TRUNCATE, dl, VT,
15824 DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
15827 // We are handling one of the integer comparisons here. Since SSE only has
15828 // GT and EQ comparisons for integer, swapping operands and multiple
15829 // operations may be required for some comparisons.
15831 bool Swap = false, Invert = false, FlipSigns = false, MinMax = false;
15832 bool Subus = false;
15834 switch (SetCCOpcode) {
15835 default: llvm_unreachable("Unexpected SETCC condition");
15836 case ISD::SETNE: Invert = true;
15837 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
15838 case ISD::SETLT: Swap = true;
15839 case ISD::SETGT: Opc = X86ISD::PCMPGT; break;
15840 case ISD::SETGE: Swap = true;
15841 case ISD::SETLE: Opc = X86ISD::PCMPGT;
15842 Invert = true; break;
15843 case ISD::SETULT: Swap = true;
15844 case ISD::SETUGT: Opc = X86ISD::PCMPGT;
15845 FlipSigns = true; break;
15846 case ISD::SETUGE: Swap = true;
15847 case ISD::SETULE: Opc = X86ISD::PCMPGT;
15848 FlipSigns = true; Invert = true; break;
15851 // Special case: Use min/max operations for SETULE/SETUGE
15852 MVT VET = VT.getVectorElementType();
15854 (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
15855 || (Subtarget->hasSSE2() && (VET == MVT::i8));
15858 switch (SetCCOpcode) {
15860 case ISD::SETULE: Opc = X86ISD::UMIN; MinMax = true; break;
15861 case ISD::SETUGE: Opc = X86ISD::UMAX; MinMax = true; break;
15864 if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
15867 bool hasSubus = Subtarget->hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
15868 if (!MinMax && hasSubus) {
15869 // As another special case, use PSUBUS[BW] when it's profitable. E.g. for
15871 // t = psubus Op0, Op1
15872 // pcmpeq t, <0..0>
15873 switch (SetCCOpcode) {
15875 case ISD::SETULT: {
15876 // If the comparison is against a constant we can turn this into a
15877 // setule. With psubus, setule does not require a swap. This is
15878 // beneficial because the constant in the register is no longer
15879 // destructed as the destination so it can be hoisted out of a loop.
15880 // Only do this pre-AVX since vpcmp* is no longer destructive.
15881 if (Subtarget->hasAVX())
15883 SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
15884 if (ULEOp1.getNode()) {
15886 Subus = true; Invert = false; Swap = false;
15890 // Psubus is better than flip-sign because it requires no inversion.
15891 case ISD::SETUGE: Subus = true; Invert = false; Swap = true; break;
15892 case ISD::SETULE: Subus = true; Invert = false; Swap = false; break;
15896 Opc = X86ISD::SUBUS;
15902 std::swap(Op0, Op1);
15904 // Check that the operation in question is available (most are plain SSE2,
15905 // but PCMPGTQ and PCMPEQQ have different requirements).
15906 if (VT == MVT::v2i64) {
15907 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) {
15908 assert(Subtarget->hasSSE2() && "Don't know how to lower!");
15910 // First cast everything to the right type.
15911 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15912 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15914 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15915 // bits of the inputs before performing those operations. The lower
15916 // compare is always unsigned.
15919 SB = DAG.getConstant(0x80000000U, MVT::v4i32);
15921 SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32);
15922 SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32);
15923 SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
15924 Sign, Zero, Sign, Zero);
15926 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
15927 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
15929 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
15930 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
15931 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
15933 // Create masks for only the low parts/high parts of the 64 bit integers.
15934 static const int MaskHi[] = { 1, 1, 3, 3 };
15935 static const int MaskLo[] = { 0, 0, 2, 2 };
15936 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
15937 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
15938 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
15940 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
15941 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
15944 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15946 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15949 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) {
15950 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
15951 // pcmpeqd + pshufd + pand.
15952 assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!");
15954 // First cast everything to the right type.
15955 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15956 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15959 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
15961 // Make sure the lower and upper halves are both all-ones.
15962 static const int Mask[] = { 1, 0, 3, 2 };
15963 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
15964 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
15967 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15969 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15973 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15974 // bits of the inputs before performing those operations.
15976 EVT EltVT = VT.getVectorElementType();
15977 SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT);
15978 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB);
15979 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB);
15982 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
15984 // If the logical-not of the result is required, perform that now.
15986 Result = DAG.getNOT(dl, Result, VT);
15989 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
15992 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
15993 getZeroVector(VT, Subtarget, DAG, dl));
15998 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
16000 MVT VT = Op.getSimpleValueType();
16002 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
16004 assert(((!Subtarget->hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
16005 && "SetCC type must be 8-bit or 1-bit integer");
16006 SDValue Op0 = Op.getOperand(0);
16007 SDValue Op1 = Op.getOperand(1);
16009 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
16011 // Optimize to BT if possible.
16012 // Lower (X & (1 << N)) == 0 to BT(X, N).
16013 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
16014 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
16015 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() &&
16016 Op1.getOpcode() == ISD::Constant &&
16017 cast<ConstantSDNode>(Op1)->isNullValue() &&
16018 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16019 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG);
16020 if (NewSetCC.getNode()) {
16022 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
16027 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
16029 if (Op1.getOpcode() == ISD::Constant &&
16030 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
16031 cast<ConstantSDNode>(Op1)->isNullValue()) &&
16032 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16034 // If the input is a setcc, then reuse the input setcc or use a new one with
16035 // the inverted condition.
16036 if (Op0.getOpcode() == X86ISD::SETCC) {
16037 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
16038 bool Invert = (CC == ISD::SETNE) ^
16039 cast<ConstantSDNode>(Op1)->isNullValue();
16043 CCode = X86::GetOppositeBranchCondition(CCode);
16044 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16045 DAG.getConstant(CCode, MVT::i8),
16046 Op0.getOperand(1));
16048 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
16052 if ((Op0.getValueType() == MVT::i1) && (Op1.getOpcode() == ISD::Constant) &&
16053 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1) &&
16054 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16056 ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
16057 return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, MVT::i1), NewCC);
16060 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
16061 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
16062 if (X86CC == X86::COND_INVALID)
16065 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
16066 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
16067 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16068 DAG.getConstant(X86CC, MVT::i8), EFLAGS);
16070 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
16074 // isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
16075 static bool isX86LogicalCmp(SDValue Op) {
16076 unsigned Opc = Op.getNode()->getOpcode();
16077 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
16078 Opc == X86ISD::SAHF)
16080 if (Op.getResNo() == 1 &&
16081 (Opc == X86ISD::ADD ||
16082 Opc == X86ISD::SUB ||
16083 Opc == X86ISD::ADC ||
16084 Opc == X86ISD::SBB ||
16085 Opc == X86ISD::SMUL ||
16086 Opc == X86ISD::UMUL ||
16087 Opc == X86ISD::INC ||
16088 Opc == X86ISD::DEC ||
16089 Opc == X86ISD::OR ||
16090 Opc == X86ISD::XOR ||
16091 Opc == X86ISD::AND))
16094 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
16100 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
16101 if (V.getOpcode() != ISD::TRUNCATE)
16104 SDValue VOp0 = V.getOperand(0);
16105 unsigned InBits = VOp0.getValueSizeInBits();
16106 unsigned Bits = V.getValueSizeInBits();
16107 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
16110 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
16111 bool addTest = true;
16112 SDValue Cond = Op.getOperand(0);
16113 SDValue Op1 = Op.getOperand(1);
16114 SDValue Op2 = Op.getOperand(2);
16116 EVT VT = Op1.getValueType();
16119 // Lower fp selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
16120 // are available. Otherwise fp cmovs get lowered into a less efficient branch
16121 // sequence later on.
16122 if (Cond.getOpcode() == ISD::SETCC &&
16123 ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
16124 (Subtarget->hasSSE1() && VT == MVT::f32)) &&
16125 VT == Cond.getOperand(0).getValueType() && Cond->hasOneUse()) {
16126 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
16127 int SSECC = translateX86FSETCC(
16128 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
16131 if (Subtarget->hasAVX512()) {
16132 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
16133 DAG.getConstant(SSECC, MVT::i8));
16134 return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
16136 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
16137 DAG.getConstant(SSECC, MVT::i8));
16138 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
16139 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
16140 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
16144 if (Cond.getOpcode() == ISD::SETCC) {
16145 SDValue NewCond = LowerSETCC(Cond, DAG);
16146 if (NewCond.getNode())
16150 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
16151 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
16152 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
16153 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
16154 if (Cond.getOpcode() == X86ISD::SETCC &&
16155 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
16156 isZero(Cond.getOperand(1).getOperand(1))) {
16157 SDValue Cmp = Cond.getOperand(1);
16159 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
16161 if ((isAllOnes(Op1) || isAllOnes(Op2)) &&
16162 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
16163 SDValue Y = isAllOnes(Op2) ? Op1 : Op2;
16165 SDValue CmpOp0 = Cmp.getOperand(0);
16166 // Apply further optimizations for special cases
16167 // (select (x != 0), -1, 0) -> neg & sbb
16168 // (select (x == 0), 0, -1) -> neg & sbb
16169 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y))
16170 if (YC->isNullValue() &&
16171 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) {
16172 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
16173 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs,
16174 DAG.getConstant(0, CmpOp0.getValueType()),
16176 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16177 DAG.getConstant(X86::COND_B, MVT::i8),
16178 SDValue(Neg.getNode(), 1));
16182 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
16183 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
16184 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16186 SDValue Res = // Res = 0 or -1.
16187 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16188 DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
16190 if (isAllOnes(Op1) != (CondCode == X86::COND_E))
16191 Res = DAG.getNOT(DL, Res, Res.getValueType());
16193 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2);
16194 if (!N2C || !N2C->isNullValue())
16195 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
16200 // Look past (and (setcc_carry (cmp ...)), 1).
16201 if (Cond.getOpcode() == ISD::AND &&
16202 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16203 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16204 if (C && C->getAPIntValue() == 1)
16205 Cond = Cond.getOperand(0);
16208 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16209 // setting operand in place of the X86ISD::SETCC.
16210 unsigned CondOpcode = Cond.getOpcode();
16211 if (CondOpcode == X86ISD::SETCC ||
16212 CondOpcode == X86ISD::SETCC_CARRY) {
16213 CC = Cond.getOperand(0);
16215 SDValue Cmp = Cond.getOperand(1);
16216 unsigned Opc = Cmp.getOpcode();
16217 MVT VT = Op.getSimpleValueType();
16219 bool IllegalFPCMov = false;
16220 if (VT.isFloatingPoint() && !VT.isVector() &&
16221 !isScalarFPTypeInSSEReg(VT)) // FPStack?
16222 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
16224 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
16225 Opc == X86ISD::BT) { // FIXME
16229 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16230 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16231 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16232 Cond.getOperand(0).getValueType() != MVT::i8)) {
16233 SDValue LHS = Cond.getOperand(0);
16234 SDValue RHS = Cond.getOperand(1);
16235 unsigned X86Opcode;
16238 switch (CondOpcode) {
16239 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16240 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16241 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16242 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16243 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16244 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16245 default: llvm_unreachable("unexpected overflowing operator");
16247 if (CondOpcode == ISD::UMULO)
16248 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16251 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16253 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
16255 if (CondOpcode == ISD::UMULO)
16256 Cond = X86Op.getValue(2);
16258 Cond = X86Op.getValue(1);
16260 CC = DAG.getConstant(X86Cond, MVT::i8);
16265 // Look pass the truncate if the high bits are known zero.
16266 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16267 Cond = Cond.getOperand(0);
16269 // We know the result of AND is compared against zero. Try to match
16271 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16272 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG);
16273 if (NewSetCC.getNode()) {
16274 CC = NewSetCC.getOperand(0);
16275 Cond = NewSetCC.getOperand(1);
16282 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16283 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
16286 // a < b ? -1 : 0 -> RES = ~setcc_carry
16287 // a < b ? 0 : -1 -> RES = setcc_carry
16288 // a >= b ? -1 : 0 -> RES = setcc_carry
16289 // a >= b ? 0 : -1 -> RES = ~setcc_carry
16290 if (Cond.getOpcode() == X86ISD::SUB) {
16291 Cond = ConvertCmpIfNecessary(Cond, DAG);
16292 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
16294 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
16295 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) {
16296 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16297 DAG.getConstant(X86::COND_B, MVT::i8), Cond);
16298 if (isAllOnes(Op1) != (CondCode == X86::COND_B))
16299 return DAG.getNOT(DL, Res, Res.getValueType());
16304 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
16305 // widen the cmov and push the truncate through. This avoids introducing a new
16306 // branch during isel and doesn't add any extensions.
16307 if (Op.getValueType() == MVT::i8 &&
16308 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
16309 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
16310 if (T1.getValueType() == T2.getValueType() &&
16311 // Blacklist CopyFromReg to avoid partial register stalls.
16312 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
16313 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
16314 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond);
16315 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
16319 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
16320 // condition is true.
16321 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
16322 SDValue Ops[] = { Op2, Op1, CC, Cond };
16323 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops);
16326 static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, const X86Subtarget *Subtarget,
16327 SelectionDAG &DAG) {
16328 MVT VT = Op->getSimpleValueType(0);
16329 SDValue In = Op->getOperand(0);
16330 MVT InVT = In.getSimpleValueType();
16331 MVT VTElt = VT.getVectorElementType();
16332 MVT InVTElt = InVT.getVectorElementType();
16336 if ((InVTElt == MVT::i1) &&
16337 (((Subtarget->hasBWI() && Subtarget->hasVLX() &&
16338 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() <= 16)) ||
16340 ((Subtarget->hasBWI() && VT.is512BitVector() &&
16341 VTElt.getSizeInBits() <= 16)) ||
16343 ((Subtarget->hasDQI() && Subtarget->hasVLX() &&
16344 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) ||
16346 ((Subtarget->hasDQI() && VT.is512BitVector() &&
16347 VTElt.getSizeInBits() >= 32))))
16348 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16350 unsigned int NumElts = VT.getVectorNumElements();
16352 if (NumElts != 8 && NumElts != 16)
16355 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) {
16356 if (In.getOpcode() == X86ISD::VSEXT || In.getOpcode() == X86ISD::VZEXT)
16357 return DAG.getNode(In.getOpcode(), dl, VT, In.getOperand(0));
16358 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16361 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16362 assert (InVT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
16364 MVT ExtVT = (NumElts == 8) ? MVT::v8i64 : MVT::v16i32;
16365 Constant *C = ConstantInt::get(*DAG.getContext(),
16366 APInt::getAllOnesValue(ExtVT.getScalarType().getSizeInBits()));
16368 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
16369 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
16370 SDValue Ld = DAG.getLoad(ExtVT.getScalarType(), dl, DAG.getEntryNode(), CP,
16371 MachinePointerInfo::getConstantPool(),
16372 false, false, false, Alignment);
16373 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, dl, ExtVT, In, Ld);
16374 if (VT.is512BitVector())
16376 return DAG.getNode(X86ISD::VTRUNC, dl, VT, Brcst);
16379 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
16380 SelectionDAG &DAG) {
16381 MVT VT = Op->getSimpleValueType(0);
16382 SDValue In = Op->getOperand(0);
16383 MVT InVT = In.getSimpleValueType();
16386 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
16387 return LowerSIGN_EXTEND_AVX512(Op, Subtarget, DAG);
16389 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) &&
16390 (VT != MVT::v8i32 || InVT != MVT::v8i16) &&
16391 (VT != MVT::v16i16 || InVT != MVT::v16i8))
16394 if (Subtarget->hasInt256())
16395 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16397 // Optimize vectors in AVX mode
16398 // Sign extend v8i16 to v8i32 and
16401 // Divide input vector into two parts
16402 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
16403 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
16404 // concat the vectors to original VT
16406 unsigned NumElems = InVT.getVectorNumElements();
16407 SDValue Undef = DAG.getUNDEF(InVT);
16409 SmallVector<int,8> ShufMask1(NumElems, -1);
16410 for (unsigned i = 0; i != NumElems/2; ++i)
16413 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]);
16415 SmallVector<int,8> ShufMask2(NumElems, -1);
16416 for (unsigned i = 0; i != NumElems/2; ++i)
16417 ShufMask2[i] = i + NumElems/2;
16419 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]);
16421 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(),
16422 VT.getVectorNumElements()/2);
16424 OpLo = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpLo);
16425 OpHi = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpHi);
16427 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
16430 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
16431 // may emit an illegal shuffle but the expansion is still better than scalar
16432 // code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
16433 // we'll emit a shuffle and a arithmetic shift.
16434 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
16435 // TODO: It is possible to support ZExt by zeroing the undef values during
16436 // the shuffle phase or after the shuffle.
16437 static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
16438 SelectionDAG &DAG) {
16439 MVT RegVT = Op.getSimpleValueType();
16440 assert(RegVT.isVector() && "We only custom lower vector sext loads.");
16441 assert(RegVT.isInteger() &&
16442 "We only custom lower integer vector sext loads.");
16444 // Nothing useful we can do without SSE2 shuffles.
16445 assert(Subtarget->hasSSE2() && "We only custom lower sext loads with SSE2.");
16447 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
16449 EVT MemVT = Ld->getMemoryVT();
16450 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16451 unsigned RegSz = RegVT.getSizeInBits();
16453 ISD::LoadExtType Ext = Ld->getExtensionType();
16455 assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
16456 && "Only anyext and sext are currently implemented.");
16457 assert(MemVT != RegVT && "Cannot extend to the same type");
16458 assert(MemVT.isVector() && "Must load a vector from memory");
16460 unsigned NumElems = RegVT.getVectorNumElements();
16461 unsigned MemSz = MemVT.getSizeInBits();
16462 assert(RegSz > MemSz && "Register size must be greater than the mem size");
16464 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) {
16465 // The only way in which we have a legal 256-bit vector result but not the
16466 // integer 256-bit operations needed to directly lower a sextload is if we
16467 // have AVX1 but not AVX2. In that case, we can always emit a sextload to
16468 // a 128-bit vector and a normal sign_extend to 256-bits that should get
16469 // correctly legalized. We do this late to allow the canonical form of
16470 // sextload to persist throughout the rest of the DAG combiner -- it wants
16471 // to fold together any extensions it can, and so will fuse a sign_extend
16472 // of an sextload into a sextload targeting a wider value.
16474 if (MemSz == 128) {
16475 // Just switch this to a normal load.
16476 assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
16477 "it must be a legal 128-bit vector "
16479 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
16480 Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(),
16481 Ld->isInvariant(), Ld->getAlignment());
16483 assert(MemSz < 128 &&
16484 "Can't extend a type wider than 128 bits to a 256 bit vector!");
16485 // Do an sext load to a 128-bit vector type. We want to use the same
16486 // number of elements, but elements half as wide. This will end up being
16487 // recursively lowered by this routine, but will succeed as we definitely
16488 // have all the necessary features if we're using AVX1.
16490 EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
16491 EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
16493 DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
16494 Ld->getPointerInfo(), MemVT, Ld->isVolatile(),
16495 Ld->isNonTemporal(), Ld->isInvariant(),
16496 Ld->getAlignment());
16499 // Replace chain users with the new chain.
16500 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
16501 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
16503 // Finally, do a normal sign-extend to the desired register.
16504 return DAG.getSExtOrTrunc(Load, dl, RegVT);
16507 // All sizes must be a power of two.
16508 assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
16509 "Non-power-of-two elements are not custom lowered!");
16511 // Attempt to load the original value using scalar loads.
16512 // Find the largest scalar type that divides the total loaded size.
16513 MVT SclrLoadTy = MVT::i8;
16514 for (MVT Tp : MVT::integer_valuetypes()) {
16515 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
16520 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
16521 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
16523 SclrLoadTy = MVT::f64;
16525 // Calculate the number of scalar loads that we need to perform
16526 // in order to load our vector from memory.
16527 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
16529 assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
16530 "Can only lower sext loads with a single scalar load!");
16532 unsigned loadRegZize = RegSz;
16533 if (Ext == ISD::SEXTLOAD && RegSz == 256)
16536 // Represent our vector as a sequence of elements which are the
16537 // largest scalar that we can load.
16538 EVT LoadUnitVecVT = EVT::getVectorVT(
16539 *DAG.getContext(), SclrLoadTy, loadRegZize / SclrLoadTy.getSizeInBits());
16541 // Represent the data using the same element type that is stored in
16542 // memory. In practice, we ''widen'' MemVT.
16544 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
16545 loadRegZize / MemVT.getScalarType().getSizeInBits());
16547 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
16548 "Invalid vector type");
16550 // We can't shuffle using an illegal type.
16551 assert(TLI.isTypeLegal(WideVecVT) &&
16552 "We only lower types that form legal widened vector types");
16554 SmallVector<SDValue, 8> Chains;
16555 SDValue Ptr = Ld->getBasePtr();
16556 SDValue Increment =
16557 DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, TLI.getPointerTy());
16558 SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
16560 for (unsigned i = 0; i < NumLoads; ++i) {
16561 // Perform a single load.
16562 SDValue ScalarLoad =
16563 DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
16564 Ld->isVolatile(), Ld->isNonTemporal(), Ld->isInvariant(),
16565 Ld->getAlignment());
16566 Chains.push_back(ScalarLoad.getValue(1));
16567 // Create the first element type using SCALAR_TO_VECTOR in order to avoid
16568 // another round of DAGCombining.
16570 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
16572 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
16573 ScalarLoad, DAG.getIntPtrConstant(i));
16575 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
16578 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
16580 // Bitcast the loaded value to a vector of the original element type, in
16581 // the size of the target vector type.
16582 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res);
16583 unsigned SizeRatio = RegSz / MemSz;
16585 if (Ext == ISD::SEXTLOAD) {
16586 // If we have SSE4.1, we can directly emit a VSEXT node.
16587 if (Subtarget->hasSSE41()) {
16588 SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec);
16589 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16593 // Otherwise we'll shuffle the small elements in the high bits of the
16594 // larger type and perform an arithmetic shift. If the shift is not legal
16595 // it's better to scalarize.
16596 assert(TLI.isOperationLegalOrCustom(ISD::SRA, RegVT) &&
16597 "We can't implement a sext load without an arithmetic right shift!");
16599 // Redistribute the loaded elements into the different locations.
16600 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16601 for (unsigned i = 0; i != NumElems; ++i)
16602 ShuffleVec[i * SizeRatio + SizeRatio - 1] = i;
16604 SDValue Shuff = DAG.getVectorShuffle(
16605 WideVecVT, dl, SlicedVec, DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16607 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16609 // Build the arithmetic shift.
16610 unsigned Amt = RegVT.getVectorElementType().getSizeInBits() -
16611 MemVT.getVectorElementType().getSizeInBits();
16613 DAG.getNode(ISD::SRA, dl, RegVT, Shuff, DAG.getConstant(Amt, RegVT));
16615 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16619 // Redistribute the loaded elements into the different locations.
16620 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16621 for (unsigned i = 0; i != NumElems; ++i)
16622 ShuffleVec[i * SizeRatio] = i;
16624 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
16625 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16627 // Bitcast to the requested type.
16628 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16629 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16633 // isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
16634 // ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart
16635 // from the AND / OR.
16636 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
16637 Opc = Op.getOpcode();
16638 if (Opc != ISD::OR && Opc != ISD::AND)
16640 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16641 Op.getOperand(0).hasOneUse() &&
16642 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
16643 Op.getOperand(1).hasOneUse());
16646 // isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and
16647 // 1 and that the SETCC node has a single use.
16648 static bool isXor1OfSetCC(SDValue Op) {
16649 if (Op.getOpcode() != ISD::XOR)
16651 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
16652 if (N1C && N1C->getAPIntValue() == 1) {
16653 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16654 Op.getOperand(0).hasOneUse();
16659 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
16660 bool addTest = true;
16661 SDValue Chain = Op.getOperand(0);
16662 SDValue Cond = Op.getOperand(1);
16663 SDValue Dest = Op.getOperand(2);
16666 bool Inverted = false;
16668 if (Cond.getOpcode() == ISD::SETCC) {
16669 // Check for setcc([su]{add,sub,mul}o == 0).
16670 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
16671 isa<ConstantSDNode>(Cond.getOperand(1)) &&
16672 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() &&
16673 Cond.getOperand(0).getResNo() == 1 &&
16674 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
16675 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
16676 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
16677 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
16678 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
16679 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
16681 Cond = Cond.getOperand(0);
16683 SDValue NewCond = LowerSETCC(Cond, DAG);
16684 if (NewCond.getNode())
16689 // FIXME: LowerXALUO doesn't handle these!!
16690 else if (Cond.getOpcode() == X86ISD::ADD ||
16691 Cond.getOpcode() == X86ISD::SUB ||
16692 Cond.getOpcode() == X86ISD::SMUL ||
16693 Cond.getOpcode() == X86ISD::UMUL)
16694 Cond = LowerXALUO(Cond, DAG);
16697 // Look pass (and (setcc_carry (cmp ...)), 1).
16698 if (Cond.getOpcode() == ISD::AND &&
16699 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16700 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16701 if (C && C->getAPIntValue() == 1)
16702 Cond = Cond.getOperand(0);
16705 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16706 // setting operand in place of the X86ISD::SETCC.
16707 unsigned CondOpcode = Cond.getOpcode();
16708 if (CondOpcode == X86ISD::SETCC ||
16709 CondOpcode == X86ISD::SETCC_CARRY) {
16710 CC = Cond.getOperand(0);
16712 SDValue Cmp = Cond.getOperand(1);
16713 unsigned Opc = Cmp.getOpcode();
16714 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
16715 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
16719 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
16723 // These can only come from an arithmetic instruction with overflow,
16724 // e.g. SADDO, UADDO.
16725 Cond = Cond.getNode()->getOperand(1);
16731 CondOpcode = Cond.getOpcode();
16732 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16733 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16734 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16735 Cond.getOperand(0).getValueType() != MVT::i8)) {
16736 SDValue LHS = Cond.getOperand(0);
16737 SDValue RHS = Cond.getOperand(1);
16738 unsigned X86Opcode;
16741 // Keep this in sync with LowerXALUO, otherwise we might create redundant
16742 // instructions that can't be removed afterwards (i.e. X86ISD::ADD and
16744 switch (CondOpcode) {
16745 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16747 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16749 X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
16752 X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16753 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16755 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16757 X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
16760 X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16761 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16762 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16763 default: llvm_unreachable("unexpected overflowing operator");
16766 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
16767 if (CondOpcode == ISD::UMULO)
16768 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16771 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16773 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
16775 if (CondOpcode == ISD::UMULO)
16776 Cond = X86Op.getValue(2);
16778 Cond = X86Op.getValue(1);
16780 CC = DAG.getConstant(X86Cond, MVT::i8);
16784 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
16785 SDValue Cmp = Cond.getOperand(0).getOperand(1);
16786 if (CondOpc == ISD::OR) {
16787 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
16788 // two branches instead of an explicit OR instruction with a
16790 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16791 isX86LogicalCmp(Cmp)) {
16792 CC = Cond.getOperand(0).getOperand(0);
16793 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16794 Chain, Dest, CC, Cmp);
16795 CC = Cond.getOperand(1).getOperand(0);
16799 } else { // ISD::AND
16800 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
16801 // two branches instead of an explicit AND instruction with a
16802 // separate test. However, we only do this if this block doesn't
16803 // have a fall-through edge, because this requires an explicit
16804 // jmp when the condition is false.
16805 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16806 isX86LogicalCmp(Cmp) &&
16807 Op.getNode()->hasOneUse()) {
16808 X86::CondCode CCode =
16809 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16810 CCode = X86::GetOppositeBranchCondition(CCode);
16811 CC = DAG.getConstant(CCode, MVT::i8);
16812 SDNode *User = *Op.getNode()->use_begin();
16813 // Look for an unconditional branch following this conditional branch.
16814 // We need this because we need to reverse the successors in order
16815 // to implement FCMP_OEQ.
16816 if (User->getOpcode() == ISD::BR) {
16817 SDValue FalseBB = User->getOperand(1);
16819 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16820 assert(NewBR == User);
16824 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16825 Chain, Dest, CC, Cmp);
16826 X86::CondCode CCode =
16827 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
16828 CCode = X86::GetOppositeBranchCondition(CCode);
16829 CC = DAG.getConstant(CCode, MVT::i8);
16835 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
16836 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
16837 // It should be transformed during dag combiner except when the condition
16838 // is set by a arithmetics with overflow node.
16839 X86::CondCode CCode =
16840 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16841 CCode = X86::GetOppositeBranchCondition(CCode);
16842 CC = DAG.getConstant(CCode, MVT::i8);
16843 Cond = Cond.getOperand(0).getOperand(1);
16845 } else if (Cond.getOpcode() == ISD::SETCC &&
16846 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
16847 // For FCMP_OEQ, we can emit
16848 // two branches instead of an explicit AND instruction with a
16849 // separate test. However, we only do this if this block doesn't
16850 // have a fall-through edge, because this requires an explicit
16851 // jmp when the condition is false.
16852 if (Op.getNode()->hasOneUse()) {
16853 SDNode *User = *Op.getNode()->use_begin();
16854 // Look for an unconditional branch following this conditional branch.
16855 // We need this because we need to reverse the successors in order
16856 // to implement FCMP_OEQ.
16857 if (User->getOpcode() == ISD::BR) {
16858 SDValue FalseBB = User->getOperand(1);
16860 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16861 assert(NewBR == User);
16865 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16866 Cond.getOperand(0), Cond.getOperand(1));
16867 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16868 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16869 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16870 Chain, Dest, CC, Cmp);
16871 CC = DAG.getConstant(X86::COND_P, MVT::i8);
16876 } else if (Cond.getOpcode() == ISD::SETCC &&
16877 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
16878 // For FCMP_UNE, we can emit
16879 // two branches instead of an explicit AND instruction with a
16880 // separate test. However, we only do this if this block doesn't
16881 // have a fall-through edge, because this requires an explicit
16882 // jmp when the condition is false.
16883 if (Op.getNode()->hasOneUse()) {
16884 SDNode *User = *Op.getNode()->use_begin();
16885 // Look for an unconditional branch following this conditional branch.
16886 // We need this because we need to reverse the successors in order
16887 // to implement FCMP_UNE.
16888 if (User->getOpcode() == ISD::BR) {
16889 SDValue FalseBB = User->getOperand(1);
16891 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16892 assert(NewBR == User);
16895 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16896 Cond.getOperand(0), Cond.getOperand(1));
16897 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16898 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16899 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16900 Chain, Dest, CC, Cmp);
16901 CC = DAG.getConstant(X86::COND_NP, MVT::i8);
16911 // Look pass the truncate if the high bits are known zero.
16912 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16913 Cond = Cond.getOperand(0);
16915 // We know the result of AND is compared against zero. Try to match
16917 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16918 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG);
16919 if (NewSetCC.getNode()) {
16920 CC = NewSetCC.getOperand(0);
16921 Cond = NewSetCC.getOperand(1);
16928 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
16929 CC = DAG.getConstant(X86Cond, MVT::i8);
16930 Cond = EmitTest(Cond, X86Cond, dl, DAG);
16932 Cond = ConvertCmpIfNecessary(Cond, DAG);
16933 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16934 Chain, Dest, CC, Cond);
16937 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
16938 // Calls to _alloca are needed to probe the stack when allocating more than 4k
16939 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
16940 // that the guard pages used by the OS virtual memory manager are allocated in
16941 // correct sequence.
16943 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
16944 SelectionDAG &DAG) const {
16945 MachineFunction &MF = DAG.getMachineFunction();
16946 bool SplitStack = MF.shouldSplitStack();
16947 bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMachO()) ||
16952 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16953 SDNode* Node = Op.getNode();
16955 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
16956 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
16957 " not tell us which reg is the stack pointer!");
16958 EVT VT = Node->getValueType(0);
16959 SDValue Tmp1 = SDValue(Node, 0);
16960 SDValue Tmp2 = SDValue(Node, 1);
16961 SDValue Tmp3 = Node->getOperand(2);
16962 SDValue Chain = Tmp1.getOperand(0);
16964 // Chain the dynamic stack allocation so that it doesn't modify the stack
16965 // pointer when other instructions are using the stack.
16966 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true),
16969 SDValue Size = Tmp2.getOperand(1);
16970 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
16971 Chain = SP.getValue(1);
16972 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
16973 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
16974 unsigned StackAlign = TFI.getStackAlignment();
16975 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
16976 if (Align > StackAlign)
16977 Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
16978 DAG.getConstant(-(uint64_t)Align, VT));
16979 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
16981 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
16982 DAG.getIntPtrConstant(0, true), SDValue(),
16985 SDValue Ops[2] = { Tmp1, Tmp2 };
16986 return DAG.getMergeValues(Ops, dl);
16990 SDValue Chain = Op.getOperand(0);
16991 SDValue Size = Op.getOperand(1);
16992 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
16993 EVT VT = Op.getNode()->getValueType(0);
16995 bool Is64Bit = Subtarget->is64Bit();
16996 EVT SPTy = getPointerTy();
16999 MachineRegisterInfo &MRI = MF.getRegInfo();
17002 // The 64 bit implementation of segmented stacks needs to clobber both r10
17003 // r11. This makes it impossible to use it along with nested parameters.
17004 const Function *F = MF.getFunction();
17006 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
17008 if (I->hasNestAttr())
17009 report_fatal_error("Cannot use segmented stacks with functions that "
17010 "have nested arguments.");
17013 const TargetRegisterClass *AddrRegClass =
17014 getRegClassFor(getPointerTy());
17015 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
17016 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
17017 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
17018 DAG.getRegister(Vreg, SPTy));
17019 SDValue Ops1[2] = { Value, Chain };
17020 return DAG.getMergeValues(Ops1, dl);
17023 const unsigned Reg = (Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX);
17025 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag);
17026 Flag = Chain.getValue(1);
17027 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
17029 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
17031 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17032 unsigned SPReg = RegInfo->getStackRegister();
17033 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
17034 Chain = SP.getValue(1);
17037 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
17038 DAG.getConstant(-(uint64_t)Align, VT));
17039 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
17042 SDValue Ops1[2] = { SP, Chain };
17043 return DAG.getMergeValues(Ops1, dl);
17047 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
17048 MachineFunction &MF = DAG.getMachineFunction();
17049 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
17051 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
17054 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) {
17055 // vastart just stores the address of the VarArgsFrameIndex slot into the
17056 // memory location argument.
17057 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
17059 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
17060 MachinePointerInfo(SV), false, false, 0);
17064 // gp_offset (0 - 6 * 8)
17065 // fp_offset (48 - 48 + 8 * 16)
17066 // overflow_arg_area (point to parameters coming in memory).
17068 SmallVector<SDValue, 8> MemOps;
17069 SDValue FIN = Op.getOperand(1);
17071 SDValue Store = DAG.getStore(Op.getOperand(0), DL,
17072 DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
17074 FIN, MachinePointerInfo(SV), false, false, 0);
17075 MemOps.push_back(Store);
17078 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17079 FIN, DAG.getIntPtrConstant(4));
17080 Store = DAG.getStore(Op.getOperand(0), DL,
17081 DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
17083 FIN, MachinePointerInfo(SV, 4), false, false, 0);
17084 MemOps.push_back(Store);
17086 // Store ptr to overflow_arg_area
17087 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17088 FIN, DAG.getIntPtrConstant(4));
17089 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
17091 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
17092 MachinePointerInfo(SV, 8),
17094 MemOps.push_back(Store);
17096 // Store ptr to reg_save_area.
17097 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17098 FIN, DAG.getIntPtrConstant(8));
17099 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
17101 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN,
17102 MachinePointerInfo(SV, 16), false, false, 0);
17103 MemOps.push_back(Store);
17104 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
17107 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
17108 assert(Subtarget->is64Bit() &&
17109 "LowerVAARG only handles 64-bit va_arg!");
17110 assert((Subtarget->isTargetLinux() ||
17111 Subtarget->isTargetDarwin()) &&
17112 "Unhandled target in LowerVAARG");
17113 assert(Op.getNode()->getNumOperands() == 4);
17114 SDValue Chain = Op.getOperand(0);
17115 SDValue SrcPtr = Op.getOperand(1);
17116 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
17117 unsigned Align = Op.getConstantOperandVal(3);
17120 EVT ArgVT = Op.getNode()->getValueType(0);
17121 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
17122 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy);
17125 // Decide which area this value should be read from.
17126 // TODO: Implement the AMD64 ABI in its entirety. This simple
17127 // selection mechanism works only for the basic types.
17128 if (ArgVT == MVT::f80) {
17129 llvm_unreachable("va_arg for f80 not yet implemented");
17130 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
17131 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
17132 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
17133 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
17135 llvm_unreachable("Unhandled argument type in LowerVAARG");
17138 if (ArgMode == 2) {
17139 // Sanity Check: Make sure using fp_offset makes sense.
17140 assert(!DAG.getTarget().Options.UseSoftFloat &&
17141 !(DAG.getMachineFunction().getFunction()->hasFnAttribute(
17142 Attribute::NoImplicitFloat)) &&
17143 Subtarget->hasSSE1());
17146 // Insert VAARG_64 node into the DAG
17147 // VAARG_64 returns two values: Variable Argument Address, Chain
17148 SmallVector<SDValue, 11> InstOps;
17149 InstOps.push_back(Chain);
17150 InstOps.push_back(SrcPtr);
17151 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32));
17152 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8));
17153 InstOps.push_back(DAG.getConstant(Align, MVT::i32));
17154 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other);
17155 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
17156 VTs, InstOps, MVT::i64,
17157 MachinePointerInfo(SV),
17159 /*Volatile=*/false,
17161 /*WriteMem=*/true);
17162 Chain = VAARG.getValue(1);
17164 // Load the next argument and return it
17165 return DAG.getLoad(ArgVT, dl,
17168 MachinePointerInfo(),
17169 false, false, false, 0);
17172 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
17173 SelectionDAG &DAG) {
17174 // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
17175 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
17176 SDValue Chain = Op.getOperand(0);
17177 SDValue DstPtr = Op.getOperand(1);
17178 SDValue SrcPtr = Op.getOperand(2);
17179 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
17180 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
17183 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
17184 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
17186 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
17189 // getTargetVShiftByConstNode - Handle vector element shifts where the shift
17190 // amount is a constant. Takes immediate version of shift as input.
17191 static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
17192 SDValue SrcOp, uint64_t ShiftAmt,
17193 SelectionDAG &DAG) {
17194 MVT ElementType = VT.getVectorElementType();
17196 // Fold this packed shift into its first operand if ShiftAmt is 0.
17200 // Check for ShiftAmt >= element width
17201 if (ShiftAmt >= ElementType.getSizeInBits()) {
17202 if (Opc == X86ISD::VSRAI)
17203 ShiftAmt = ElementType.getSizeInBits() - 1;
17205 return DAG.getConstant(0, VT);
17208 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
17209 && "Unknown target vector shift-by-constant node");
17211 // Fold this packed vector shift into a build vector if SrcOp is a
17212 // vector of Constants or UNDEFs, and SrcOp valuetype is the same as VT.
17213 if (VT == SrcOp.getSimpleValueType() &&
17214 ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
17215 SmallVector<SDValue, 8> Elts;
17216 unsigned NumElts = SrcOp->getNumOperands();
17217 ConstantSDNode *ND;
17220 default: llvm_unreachable(nullptr);
17221 case X86ISD::VSHLI:
17222 for (unsigned i=0; i!=NumElts; ++i) {
17223 SDValue CurrentOp = SrcOp->getOperand(i);
17224 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17225 Elts.push_back(CurrentOp);
17228 ND = cast<ConstantSDNode>(CurrentOp);
17229 const APInt &C = ND->getAPIntValue();
17230 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), ElementType));
17233 case X86ISD::VSRLI:
17234 for (unsigned i=0; i!=NumElts; ++i) {
17235 SDValue CurrentOp = SrcOp->getOperand(i);
17236 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17237 Elts.push_back(CurrentOp);
17240 ND = cast<ConstantSDNode>(CurrentOp);
17241 const APInt &C = ND->getAPIntValue();
17242 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), ElementType));
17245 case X86ISD::VSRAI:
17246 for (unsigned i=0; i!=NumElts; ++i) {
17247 SDValue CurrentOp = SrcOp->getOperand(i);
17248 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17249 Elts.push_back(CurrentOp);
17252 ND = cast<ConstantSDNode>(CurrentOp);
17253 const APInt &C = ND->getAPIntValue();
17254 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), ElementType));
17259 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
17262 return DAG.getNode(Opc, dl, VT, SrcOp, DAG.getConstant(ShiftAmt, MVT::i8));
17265 // getTargetVShiftNode - Handle vector element shifts where the shift amount
17266 // may or may not be a constant. Takes immediate version of shift as input.
17267 static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
17268 SDValue SrcOp, SDValue ShAmt,
17269 SelectionDAG &DAG) {
17270 MVT SVT = ShAmt.getSimpleValueType();
17271 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
17273 // Catch shift-by-constant.
17274 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
17275 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
17276 CShAmt->getZExtValue(), DAG);
17278 // Change opcode to non-immediate version
17280 default: llvm_unreachable("Unknown target vector shift node");
17281 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break;
17282 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break;
17283 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
17286 const X86Subtarget &Subtarget =
17287 static_cast<const X86Subtarget &>(DAG.getSubtarget());
17288 if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
17289 ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
17290 // Let the shuffle legalizer expand this shift amount node.
17291 SDValue Op0 = ShAmt.getOperand(0);
17292 Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0);
17293 ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, &Subtarget, DAG);
17295 // Need to build a vector containing shift amount.
17296 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
17297 SmallVector<SDValue, 4> ShOps;
17298 ShOps.push_back(ShAmt);
17299 if (SVT == MVT::i32) {
17300 ShOps.push_back(DAG.getConstant(0, SVT));
17301 ShOps.push_back(DAG.getUNDEF(SVT));
17303 ShOps.push_back(DAG.getUNDEF(SVT));
17305 MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
17306 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps);
17309 // The return type has to be a 128-bit type with the same element
17310 // type as the input type.
17311 MVT EltVT = VT.getVectorElementType();
17312 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
17314 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt);
17315 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
17318 /// \brief Return (and \p Op, \p Mask) for compare instructions or
17319 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
17320 /// necessary casting for \p Mask when lowering masking intrinsics.
17321 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
17322 SDValue PreservedSrc,
17323 const X86Subtarget *Subtarget,
17324 SelectionDAG &DAG) {
17325 EVT VT = Op.getValueType();
17326 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(),
17327 MVT::i1, VT.getVectorNumElements());
17328 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17329 Mask.getValueType().getSizeInBits());
17332 assert(MaskVT.isSimple() && "invalid mask type");
17334 if (isAllOnes(Mask))
17337 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
17338 // are extracted by EXTRACT_SUBVECTOR.
17339 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17340 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17341 DAG.getIntPtrConstant(0));
17343 switch (Op.getOpcode()) {
17345 case X86ISD::PCMPEQM:
17346 case X86ISD::PCMPGTM:
17348 case X86ISD::CMPMU:
17349 return DAG.getNode(ISD::AND, dl, VT, Op, VMask);
17351 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17352 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17353 return DAG.getNode(ISD::VSELECT, dl, VT, VMask, Op, PreservedSrc);
17356 /// \brief Creates an SDNode for a predicated scalar operation.
17357 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
17358 /// The mask is comming as MVT::i8 and it should be truncated
17359 /// to MVT::i1 while lowering masking intrinsics.
17360 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
17361 /// "X86select" instead of "vselect". We just can't create the "vselect" node for
17362 /// a scalar instruction.
17363 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
17364 SDValue PreservedSrc,
17365 const X86Subtarget *Subtarget,
17366 SelectionDAG &DAG) {
17367 if (isAllOnes(Mask))
17370 EVT VT = Op.getValueType();
17372 // The mask should be of type MVT::i1
17373 SDValue IMask = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Mask);
17375 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17376 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17377 return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc);
17380 static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17381 SelectionDAG &DAG) {
17383 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17384 EVT VT = Op.getValueType();
17385 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
17387 switch(IntrData->Type) {
17388 case INTR_TYPE_1OP:
17389 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
17390 case INTR_TYPE_2OP:
17391 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17393 case INTR_TYPE_3OP:
17394 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17395 Op.getOperand(2), Op.getOperand(3));
17396 case INTR_TYPE_1OP_MASK_RM: {
17397 SDValue Src = Op.getOperand(1);
17398 SDValue Src0 = Op.getOperand(2);
17399 SDValue Mask = Op.getOperand(3);
17400 SDValue RoundingMode = Op.getOperand(4);
17401 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
17403 Mask, Src0, Subtarget, DAG);
17405 case INTR_TYPE_SCALAR_MASK_RM: {
17406 SDValue Src1 = Op.getOperand(1);
17407 SDValue Src2 = Op.getOperand(2);
17408 SDValue Src0 = Op.getOperand(3);
17409 SDValue Mask = Op.getOperand(4);
17410 // There are 2 kinds of intrinsics in this group:
17411 // (1) With supress-all-exceptions (sae) - 6 operands
17412 // (2) With rounding mode and sae - 7 operands.
17413 if (Op.getNumOperands() == 6) {
17414 SDValue Sae = Op.getOperand(5);
17415 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
17417 Mask, Src0, Subtarget, DAG);
17419 assert(Op.getNumOperands() == 7 && "Unexpected intrinsic form");
17420 SDValue RoundingMode = Op.getOperand(5);
17421 SDValue Sae = Op.getOperand(6);
17422 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
17423 RoundingMode, Sae),
17424 Mask, Src0, Subtarget, DAG);
17426 case INTR_TYPE_2OP_MASK: {
17427 SDValue Src1 = Op.getOperand(1);
17428 SDValue Src2 = Op.getOperand(2);
17429 SDValue PassThru = Op.getOperand(3);
17430 SDValue Mask = Op.getOperand(4);
17431 // We specify 2 possible opcodes for intrinsics with rounding modes.
17432 // First, we check if the intrinsic may have non-default rounding mode,
17433 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
17434 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17435 if (IntrWithRoundingModeOpcode != 0) {
17436 SDValue Rnd = Op.getOperand(5);
17437 unsigned Round = cast<ConstantSDNode>(Rnd)->getZExtValue();
17438 if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION) {
17439 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17440 dl, Op.getValueType(),
17442 Mask, PassThru, Subtarget, DAG);
17445 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
17447 Mask, PassThru, Subtarget, DAG);
17449 case FMA_OP_MASK: {
17450 SDValue Src1 = Op.getOperand(1);
17451 SDValue Src2 = Op.getOperand(2);
17452 SDValue Src3 = Op.getOperand(3);
17453 SDValue Mask = Op.getOperand(4);
17454 // We specify 2 possible opcodes for intrinsics with rounding modes.
17455 // First, we check if the intrinsic may have non-default rounding mode,
17456 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
17457 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17458 if (IntrWithRoundingModeOpcode != 0) {
17459 SDValue Rnd = Op.getOperand(5);
17460 if (cast<ConstantSDNode>(Rnd)->getZExtValue() !=
17461 X86::STATIC_ROUNDING::CUR_DIRECTION)
17462 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17463 dl, Op.getValueType(),
17464 Src1, Src2, Src3, Rnd),
17465 Mask, Src1, Subtarget, DAG);
17467 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
17468 dl, Op.getValueType(),
17470 Mask, Src1, Subtarget, DAG);
17473 case CMP_MASK_CC: {
17474 // Comparison intrinsics with masks.
17475 // Example of transformation:
17476 // (i8 (int_x86_avx512_mask_pcmpeq_q_128
17477 // (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
17479 // (v8i1 (insert_subvector undef,
17480 // (v2i1 (and (PCMPEQM %a, %b),
17481 // (extract_subvector
17482 // (v8i1 (bitcast %mask)), 0))), 0))))
17483 EVT VT = Op.getOperand(1).getValueType();
17484 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17485 VT.getVectorNumElements());
17486 SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3);
17487 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17488 Mask.getValueType().getSizeInBits());
17490 if (IntrData->Type == CMP_MASK_CC) {
17491 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17492 Op.getOperand(2), Op.getOperand(3));
17494 assert(IntrData->Type == CMP_MASK && "Unexpected intrinsic type!");
17495 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17498 SDValue CmpMask = getVectorMaskingNode(Cmp, Mask,
17499 DAG.getTargetConstant(0, MaskVT),
17501 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
17502 DAG.getUNDEF(BitcastVT), CmpMask,
17503 DAG.getIntPtrConstant(0));
17504 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
17506 case COMI: { // Comparison intrinsics
17507 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
17508 SDValue LHS = Op.getOperand(1);
17509 SDValue RHS = Op.getOperand(2);
17510 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG);
17511 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!");
17512 SDValue Cond = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
17513 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17514 DAG.getConstant(X86CC, MVT::i8), Cond);
17515 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17518 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
17519 Op.getOperand(1), Op.getOperand(2), DAG);
17521 return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl,
17522 Op.getSimpleValueType(),
17524 Op.getOperand(2), DAG),
17525 Op.getOperand(4), Op.getOperand(3), Subtarget,
17527 case COMPRESS_EXPAND_IN_REG: {
17528 SDValue Mask = Op.getOperand(3);
17529 SDValue DataToCompress = Op.getOperand(1);
17530 SDValue PassThru = Op.getOperand(2);
17531 if (isAllOnes(Mask)) // return data as is
17532 return Op.getOperand(1);
17533 EVT VT = Op.getValueType();
17534 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17535 VT.getVectorNumElements());
17536 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17537 Mask.getValueType().getSizeInBits());
17539 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17540 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17541 DAG.getIntPtrConstant(0));
17543 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToCompress,
17547 SDValue Mask = Op.getOperand(3);
17548 EVT VT = Op.getValueType();
17549 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17550 VT.getVectorNumElements());
17551 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17552 Mask.getValueType().getSizeInBits());
17554 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17555 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17556 DAG.getIntPtrConstant(0));
17557 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1),
17566 default: return SDValue(); // Don't custom lower most intrinsics.
17568 case Intrinsic::x86_avx512_mask_valign_q_512:
17569 case Intrinsic::x86_avx512_mask_valign_d_512:
17570 // Vector source operands are swapped.
17571 return getVectorMaskingNode(DAG.getNode(X86ISD::VALIGN, dl,
17572 Op.getValueType(), Op.getOperand(2),
17575 Op.getOperand(5), Op.getOperand(4),
17578 // ptest and testp intrinsics. The intrinsic these come from are designed to
17579 // return an integer value, not just an instruction so lower it to the ptest
17580 // or testp pattern and a setcc for the result.
17581 case Intrinsic::x86_sse41_ptestz:
17582 case Intrinsic::x86_sse41_ptestc:
17583 case Intrinsic::x86_sse41_ptestnzc:
17584 case Intrinsic::x86_avx_ptestz_256:
17585 case Intrinsic::x86_avx_ptestc_256:
17586 case Intrinsic::x86_avx_ptestnzc_256:
17587 case Intrinsic::x86_avx_vtestz_ps:
17588 case Intrinsic::x86_avx_vtestc_ps:
17589 case Intrinsic::x86_avx_vtestnzc_ps:
17590 case Intrinsic::x86_avx_vtestz_pd:
17591 case Intrinsic::x86_avx_vtestc_pd:
17592 case Intrinsic::x86_avx_vtestnzc_pd:
17593 case Intrinsic::x86_avx_vtestz_ps_256:
17594 case Intrinsic::x86_avx_vtestc_ps_256:
17595 case Intrinsic::x86_avx_vtestnzc_ps_256:
17596 case Intrinsic::x86_avx_vtestz_pd_256:
17597 case Intrinsic::x86_avx_vtestc_pd_256:
17598 case Intrinsic::x86_avx_vtestnzc_pd_256: {
17599 bool IsTestPacked = false;
17602 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
17603 case Intrinsic::x86_avx_vtestz_ps:
17604 case Intrinsic::x86_avx_vtestz_pd:
17605 case Intrinsic::x86_avx_vtestz_ps_256:
17606 case Intrinsic::x86_avx_vtestz_pd_256:
17607 IsTestPacked = true; // Fallthrough
17608 case Intrinsic::x86_sse41_ptestz:
17609 case Intrinsic::x86_avx_ptestz_256:
17611 X86CC = X86::COND_E;
17613 case Intrinsic::x86_avx_vtestc_ps:
17614 case Intrinsic::x86_avx_vtestc_pd:
17615 case Intrinsic::x86_avx_vtestc_ps_256:
17616 case Intrinsic::x86_avx_vtestc_pd_256:
17617 IsTestPacked = true; // Fallthrough
17618 case Intrinsic::x86_sse41_ptestc:
17619 case Intrinsic::x86_avx_ptestc_256:
17621 X86CC = X86::COND_B;
17623 case Intrinsic::x86_avx_vtestnzc_ps:
17624 case Intrinsic::x86_avx_vtestnzc_pd:
17625 case Intrinsic::x86_avx_vtestnzc_ps_256:
17626 case Intrinsic::x86_avx_vtestnzc_pd_256:
17627 IsTestPacked = true; // Fallthrough
17628 case Intrinsic::x86_sse41_ptestnzc:
17629 case Intrinsic::x86_avx_ptestnzc_256:
17631 X86CC = X86::COND_A;
17635 SDValue LHS = Op.getOperand(1);
17636 SDValue RHS = Op.getOperand(2);
17637 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
17638 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
17639 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17640 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
17641 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17643 case Intrinsic::x86_avx512_kortestz_w:
17644 case Intrinsic::x86_avx512_kortestc_w: {
17645 unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz_w)? X86::COND_E: X86::COND_B;
17646 SDValue LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(1));
17647 SDValue RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(2));
17648 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17649 SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
17650 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test);
17651 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17654 case Intrinsic::x86_sse42_pcmpistria128:
17655 case Intrinsic::x86_sse42_pcmpestria128:
17656 case Intrinsic::x86_sse42_pcmpistric128:
17657 case Intrinsic::x86_sse42_pcmpestric128:
17658 case Intrinsic::x86_sse42_pcmpistrio128:
17659 case Intrinsic::x86_sse42_pcmpestrio128:
17660 case Intrinsic::x86_sse42_pcmpistris128:
17661 case Intrinsic::x86_sse42_pcmpestris128:
17662 case Intrinsic::x86_sse42_pcmpistriz128:
17663 case Intrinsic::x86_sse42_pcmpestriz128: {
17667 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
17668 case Intrinsic::x86_sse42_pcmpistria128:
17669 Opcode = X86ISD::PCMPISTRI;
17670 X86CC = X86::COND_A;
17672 case Intrinsic::x86_sse42_pcmpestria128:
17673 Opcode = X86ISD::PCMPESTRI;
17674 X86CC = X86::COND_A;
17676 case Intrinsic::x86_sse42_pcmpistric128:
17677 Opcode = X86ISD::PCMPISTRI;
17678 X86CC = X86::COND_B;
17680 case Intrinsic::x86_sse42_pcmpestric128:
17681 Opcode = X86ISD::PCMPESTRI;
17682 X86CC = X86::COND_B;
17684 case Intrinsic::x86_sse42_pcmpistrio128:
17685 Opcode = X86ISD::PCMPISTRI;
17686 X86CC = X86::COND_O;
17688 case Intrinsic::x86_sse42_pcmpestrio128:
17689 Opcode = X86ISD::PCMPESTRI;
17690 X86CC = X86::COND_O;
17692 case Intrinsic::x86_sse42_pcmpistris128:
17693 Opcode = X86ISD::PCMPISTRI;
17694 X86CC = X86::COND_S;
17696 case Intrinsic::x86_sse42_pcmpestris128:
17697 Opcode = X86ISD::PCMPESTRI;
17698 X86CC = X86::COND_S;
17700 case Intrinsic::x86_sse42_pcmpistriz128:
17701 Opcode = X86ISD::PCMPISTRI;
17702 X86CC = X86::COND_E;
17704 case Intrinsic::x86_sse42_pcmpestriz128:
17705 Opcode = X86ISD::PCMPESTRI;
17706 X86CC = X86::COND_E;
17709 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17710 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17711 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps);
17712 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17713 DAG.getConstant(X86CC, MVT::i8),
17714 SDValue(PCMP.getNode(), 1));
17715 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17718 case Intrinsic::x86_sse42_pcmpistri128:
17719 case Intrinsic::x86_sse42_pcmpestri128: {
17721 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
17722 Opcode = X86ISD::PCMPISTRI;
17724 Opcode = X86ISD::PCMPESTRI;
17726 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17727 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17728 return DAG.getNode(Opcode, dl, VTs, NewOps);
17733 static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17734 SDValue Src, SDValue Mask, SDValue Base,
17735 SDValue Index, SDValue ScaleOp, SDValue Chain,
17736 const X86Subtarget * Subtarget) {
17738 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17739 assert(C && "Invalid scale type");
17740 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17741 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17742 Index.getSimpleValueType().getVectorNumElements());
17744 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17746 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17748 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17749 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
17750 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17751 SDValue Segment = DAG.getRegister(0, MVT::i32);
17752 if (Src.getOpcode() == ISD::UNDEF)
17753 Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
17754 SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17755 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17756 SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
17757 return DAG.getMergeValues(RetOps, dl);
17760 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17761 SDValue Src, SDValue Mask, SDValue Base,
17762 SDValue Index, SDValue ScaleOp, SDValue Chain) {
17764 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17765 assert(C && "Invalid scale type");
17766 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17767 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17768 SDValue Segment = DAG.getRegister(0, MVT::i32);
17769 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17770 Index.getSimpleValueType().getVectorNumElements());
17772 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17774 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17776 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17777 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
17778 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
17779 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17780 return SDValue(Res, 1);
17783 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17784 SDValue Mask, SDValue Base, SDValue Index,
17785 SDValue ScaleOp, SDValue Chain) {
17787 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17788 assert(C && "Invalid scale type");
17789 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17790 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17791 SDValue Segment = DAG.getRegister(0, MVT::i32);
17793 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
17795 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17797 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17799 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17800 //SDVTList VTs = DAG.getVTList(MVT::Other);
17801 SDValue Ops[] = {MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17802 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
17803 return SDValue(Res, 0);
17806 // getReadPerformanceCounter - Handles the lowering of builtin intrinsics that
17807 // read performance monitor counters (x86_rdpmc).
17808 static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
17809 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17810 SmallVectorImpl<SDValue> &Results) {
17811 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17812 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17815 // The ECX register is used to select the index of the performance counter
17817 SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
17819 SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
17821 // Reads the content of a 64-bit performance counter and returns it in the
17822 // registers EDX:EAX.
17823 if (Subtarget->is64Bit()) {
17824 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17825 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17828 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17829 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17832 Chain = HI.getValue(1);
17834 if (Subtarget->is64Bit()) {
17835 // The EAX register is loaded with the low-order 32 bits. The EDX register
17836 // is loaded with the supported high-order bits of the counter.
17837 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17838 DAG.getConstant(32, MVT::i8));
17839 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17840 Results.push_back(Chain);
17844 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17845 SDValue Ops[] = { LO, HI };
17846 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17847 Results.push_back(Pair);
17848 Results.push_back(Chain);
17851 // getReadTimeStampCounter - Handles the lowering of builtin intrinsics that
17852 // read the time stamp counter (x86_rdtsc and x86_rdtscp). This function is
17853 // also used to custom lower READCYCLECOUNTER nodes.
17854 static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
17855 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17856 SmallVectorImpl<SDValue> &Results) {
17857 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17858 SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
17861 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
17862 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
17863 // and the EAX register is loaded with the low-order 32 bits.
17864 if (Subtarget->is64Bit()) {
17865 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17866 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17869 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17870 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17873 SDValue Chain = HI.getValue(1);
17875 if (Opcode == X86ISD::RDTSCP_DAG) {
17876 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17878 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
17879 // the ECX register. Add 'ecx' explicitly to the chain.
17880 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
17882 // Explicitly store the content of ECX at the location passed in input
17883 // to the 'rdtscp' intrinsic.
17884 Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
17885 MachinePointerInfo(), false, false, 0);
17888 if (Subtarget->is64Bit()) {
17889 // The EDX register is loaded with the high-order 32 bits of the MSR, and
17890 // the EAX register is loaded with the low-order 32 bits.
17891 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17892 DAG.getConstant(32, MVT::i8));
17893 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17894 Results.push_back(Chain);
17898 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17899 SDValue Ops[] = { LO, HI };
17900 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17901 Results.push_back(Pair);
17902 Results.push_back(Chain);
17905 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
17906 SelectionDAG &DAG) {
17907 SmallVector<SDValue, 2> Results;
17909 getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
17911 return DAG.getMergeValues(Results, DL);
17915 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17916 SelectionDAG &DAG) {
17917 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
17919 const IntrinsicData* IntrData = getIntrinsicWithChain(IntNo);
17924 switch(IntrData->Type) {
17926 llvm_unreachable("Unknown Intrinsic Type");
17930 // Emit the node with the right value type.
17931 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other);
17932 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17934 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
17935 // Otherwise return the value from Rand, which is always 0, casted to i32.
17936 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
17937 DAG.getConstant(1, Op->getValueType(1)),
17938 DAG.getConstant(X86::COND_B, MVT::i32),
17939 SDValue(Result.getNode(), 1) };
17940 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
17941 DAG.getVTList(Op->getValueType(1), MVT::Glue),
17944 // Return { result, isValid, chain }.
17945 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
17946 SDValue(Result.getNode(), 2));
17949 //gather(v1, mask, index, base, scale);
17950 SDValue Chain = Op.getOperand(0);
17951 SDValue Src = Op.getOperand(2);
17952 SDValue Base = Op.getOperand(3);
17953 SDValue Index = Op.getOperand(4);
17954 SDValue Mask = Op.getOperand(5);
17955 SDValue Scale = Op.getOperand(6);
17956 return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain,
17960 //scatter(base, mask, index, v1, scale);
17961 SDValue Chain = Op.getOperand(0);
17962 SDValue Base = Op.getOperand(2);
17963 SDValue Mask = Op.getOperand(3);
17964 SDValue Index = Op.getOperand(4);
17965 SDValue Src = Op.getOperand(5);
17966 SDValue Scale = Op.getOperand(6);
17967 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain);
17970 SDValue Hint = Op.getOperand(6);
17972 if (dyn_cast<ConstantSDNode> (Hint) == nullptr ||
17973 (HintVal = dyn_cast<ConstantSDNode> (Hint)->getZExtValue()) > 1)
17974 llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1");
17975 unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
17976 SDValue Chain = Op.getOperand(0);
17977 SDValue Mask = Op.getOperand(2);
17978 SDValue Index = Op.getOperand(3);
17979 SDValue Base = Op.getOperand(4);
17980 SDValue Scale = Op.getOperand(5);
17981 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain);
17983 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
17985 SmallVector<SDValue, 2> Results;
17986 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget, Results);
17987 return DAG.getMergeValues(Results, dl);
17989 // Read Performance Monitoring Counters.
17991 SmallVector<SDValue, 2> Results;
17992 getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
17993 return DAG.getMergeValues(Results, dl);
17995 // XTEST intrinsics.
17997 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17998 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17999 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
18000 DAG.getConstant(X86::COND_NE, MVT::i8),
18002 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
18003 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
18004 Ret, SDValue(InTrans.getNode(), 1));
18008 SmallVector<SDValue, 2> Results;
18009 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
18010 SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
18011 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
18012 DAG.getConstant(-1, MVT::i8));
18013 SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
18014 Op.getOperand(4), GenCF.getValue(1));
18015 SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
18016 Op.getOperand(5), MachinePointerInfo(),
18018 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
18019 DAG.getConstant(X86::COND_B, MVT::i8),
18021 Results.push_back(SetCC);
18022 Results.push_back(Store);
18023 return DAG.getMergeValues(Results, dl);
18025 case COMPRESS_TO_MEM: {
18027 SDValue Mask = Op.getOperand(4);
18028 SDValue DataToCompress = Op.getOperand(3);
18029 SDValue Addr = Op.getOperand(2);
18030 SDValue Chain = Op.getOperand(0);
18032 if (isAllOnes(Mask)) // return just a store
18033 return DAG.getStore(Chain, dl, DataToCompress, Addr,
18034 MachinePointerInfo(), false, false, 0);
18036 EVT VT = DataToCompress.getValueType();
18037 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18038 VT.getVectorNumElements());
18039 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18040 Mask.getValueType().getSizeInBits());
18041 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
18042 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
18043 DAG.getIntPtrConstant(0));
18045 SDValue Compressed = DAG.getNode(IntrData->Opc0, dl, VT, VMask,
18046 DataToCompress, DAG.getUNDEF(VT));
18047 return DAG.getStore(Chain, dl, Compressed, Addr,
18048 MachinePointerInfo(), false, false, 0);
18050 case EXPAND_FROM_MEM: {
18052 SDValue Mask = Op.getOperand(4);
18053 SDValue PathThru = Op.getOperand(3);
18054 SDValue Addr = Op.getOperand(2);
18055 SDValue Chain = Op.getOperand(0);
18056 EVT VT = Op.getValueType();
18058 if (isAllOnes(Mask)) // return just a load
18059 return DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(), false, false,
18061 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18062 VT.getVectorNumElements());
18063 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18064 Mask.getValueType().getSizeInBits());
18065 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
18066 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
18067 DAG.getIntPtrConstant(0));
18069 SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(),
18070 false, false, false, 0);
18072 SmallVector<SDValue, 2> Results;
18073 Results.push_back(DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToExpand,
18075 Results.push_back(Chain);
18076 return DAG.getMergeValues(Results, dl);
18081 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
18082 SelectionDAG &DAG) const {
18083 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
18084 MFI->setReturnAddressIsTaken(true);
18086 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
18089 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18091 EVT PtrVT = getPointerTy();
18094 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
18095 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18096 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
18097 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
18098 DAG.getNode(ISD::ADD, dl, PtrVT,
18099 FrameAddr, Offset),
18100 MachinePointerInfo(), false, false, false, 0);
18103 // Just load the return address.
18104 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
18105 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
18106 RetAddrFI, MachinePointerInfo(), false, false, false, 0);
18109 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
18110 MachineFunction &MF = DAG.getMachineFunction();
18111 MachineFrameInfo *MFI = MF.getFrameInfo();
18112 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
18113 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18114 EVT VT = Op.getValueType();
18116 MFI->setFrameAddressIsTaken(true);
18118 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
18119 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
18120 // is not possible to crawl up the stack without looking at the unwind codes
18122 int FrameAddrIndex = FuncInfo->getFAIndex();
18123 if (!FrameAddrIndex) {
18124 // Set up a frame object for the return address.
18125 unsigned SlotSize = RegInfo->getSlotSize();
18126 FrameAddrIndex = MF.getFrameInfo()->CreateFixedObject(
18127 SlotSize, /*Offset=*/INT64_MIN, /*IsImmutable=*/false);
18128 FuncInfo->setFAIndex(FrameAddrIndex);
18130 return DAG.getFrameIndex(FrameAddrIndex, VT);
18133 unsigned FrameReg =
18134 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
18135 SDLoc dl(Op); // FIXME probably not meaningful
18136 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18137 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
18138 (FrameReg == X86::EBP && VT == MVT::i32)) &&
18139 "Invalid Frame Register!");
18140 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
18142 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
18143 MachinePointerInfo(),
18144 false, false, false, 0);
18148 // FIXME? Maybe this could be a TableGen attribute on some registers and
18149 // this table could be generated automatically from RegInfo.
18150 unsigned X86TargetLowering::getRegisterByName(const char* RegName,
18152 unsigned Reg = StringSwitch<unsigned>(RegName)
18153 .Case("esp", X86::ESP)
18154 .Case("rsp", X86::RSP)
18158 report_fatal_error("Invalid register name global variable");
18161 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
18162 SelectionDAG &DAG) const {
18163 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18164 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
18167 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
18168 SDValue Chain = Op.getOperand(0);
18169 SDValue Offset = Op.getOperand(1);
18170 SDValue Handler = Op.getOperand(2);
18173 EVT PtrVT = getPointerTy();
18174 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18175 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
18176 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
18177 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
18178 "Invalid Frame Register!");
18179 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
18180 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
18182 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
18183 DAG.getIntPtrConstant(RegInfo->getSlotSize()));
18184 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
18185 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
18187 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
18189 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
18190 DAG.getRegister(StoreAddrReg, PtrVT));
18193 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
18194 SelectionDAG &DAG) const {
18196 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
18197 DAG.getVTList(MVT::i32, MVT::Other),
18198 Op.getOperand(0), Op.getOperand(1));
18201 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
18202 SelectionDAG &DAG) const {
18204 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
18205 Op.getOperand(0), Op.getOperand(1));
18208 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
18209 return Op.getOperand(0);
18212 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
18213 SelectionDAG &DAG) const {
18214 SDValue Root = Op.getOperand(0);
18215 SDValue Trmp = Op.getOperand(1); // trampoline
18216 SDValue FPtr = Op.getOperand(2); // nested function
18217 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
18220 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
18221 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
18223 if (Subtarget->is64Bit()) {
18224 SDValue OutChains[6];
18226 // Large code-model.
18227 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
18228 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
18230 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
18231 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
18233 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
18235 // Load the pointer to the nested function into R11.
18236 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
18237 SDValue Addr = Trmp;
18238 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18239 Addr, MachinePointerInfo(TrmpAddr),
18242 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18243 DAG.getConstant(2, MVT::i64));
18244 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
18245 MachinePointerInfo(TrmpAddr, 2),
18248 // Load the 'nest' parameter value into R10.
18249 // R10 is specified in X86CallingConv.td
18250 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
18251 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18252 DAG.getConstant(10, MVT::i64));
18253 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18254 Addr, MachinePointerInfo(TrmpAddr, 10),
18257 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18258 DAG.getConstant(12, MVT::i64));
18259 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
18260 MachinePointerInfo(TrmpAddr, 12),
18263 // Jump to the nested function.
18264 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
18265 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18266 DAG.getConstant(20, MVT::i64));
18267 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18268 Addr, MachinePointerInfo(TrmpAddr, 20),
18271 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
18272 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18273 DAG.getConstant(22, MVT::i64));
18274 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
18275 MachinePointerInfo(TrmpAddr, 22),
18278 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18280 const Function *Func =
18281 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
18282 CallingConv::ID CC = Func->getCallingConv();
18287 llvm_unreachable("Unsupported calling convention");
18288 case CallingConv::C:
18289 case CallingConv::X86_StdCall: {
18290 // Pass 'nest' parameter in ECX.
18291 // Must be kept in sync with X86CallingConv.td
18292 NestReg = X86::ECX;
18294 // Check that ECX wasn't needed by an 'inreg' parameter.
18295 FunctionType *FTy = Func->getFunctionType();
18296 const AttributeSet &Attrs = Func->getAttributes();
18298 if (!Attrs.isEmpty() && !Func->isVarArg()) {
18299 unsigned InRegCount = 0;
18302 for (FunctionType::param_iterator I = FTy->param_begin(),
18303 E = FTy->param_end(); I != E; ++I, ++Idx)
18304 if (Attrs.hasAttribute(Idx, Attribute::InReg))
18305 // FIXME: should only count parameters that are lowered to integers.
18306 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
18308 if (InRegCount > 2) {
18309 report_fatal_error("Nest register in use - reduce number of inreg"
18315 case CallingConv::X86_FastCall:
18316 case CallingConv::X86_ThisCall:
18317 case CallingConv::Fast:
18318 // Pass 'nest' parameter in EAX.
18319 // Must be kept in sync with X86CallingConv.td
18320 NestReg = X86::EAX;
18324 SDValue OutChains[4];
18325 SDValue Addr, Disp;
18327 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18328 DAG.getConstant(10, MVT::i32));
18329 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
18331 // This is storing the opcode for MOV32ri.
18332 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
18333 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
18334 OutChains[0] = DAG.getStore(Root, dl,
18335 DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
18336 Trmp, MachinePointerInfo(TrmpAddr),
18339 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18340 DAG.getConstant(1, MVT::i32));
18341 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
18342 MachinePointerInfo(TrmpAddr, 1),
18345 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
18346 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18347 DAG.getConstant(5, MVT::i32));
18348 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
18349 MachinePointerInfo(TrmpAddr, 5),
18352 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18353 DAG.getConstant(6, MVT::i32));
18354 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
18355 MachinePointerInfo(TrmpAddr, 6),
18358 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18362 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
18363 SelectionDAG &DAG) const {
18365 The rounding mode is in bits 11:10 of FPSR, and has the following
18367 00 Round to nearest
18372 FLT_ROUNDS, on the other hand, expects the following:
18379 To perform the conversion, we do:
18380 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
18383 MachineFunction &MF = DAG.getMachineFunction();
18384 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
18385 unsigned StackAlignment = TFI.getStackAlignment();
18386 MVT VT = Op.getSimpleValueType();
18389 // Save FP Control Word to stack slot
18390 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
18391 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
18393 MachineMemOperand *MMO =
18394 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
18395 MachineMemOperand::MOStore, 2, 2);
18397 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
18398 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
18399 DAG.getVTList(MVT::Other),
18400 Ops, MVT::i16, MMO);
18402 // Load FP Control Word from stack slot
18403 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
18404 MachinePointerInfo(), false, false, false, 0);
18406 // Transform as necessary
18408 DAG.getNode(ISD::SRL, DL, MVT::i16,
18409 DAG.getNode(ISD::AND, DL, MVT::i16,
18410 CWD, DAG.getConstant(0x800, MVT::i16)),
18411 DAG.getConstant(11, MVT::i8));
18413 DAG.getNode(ISD::SRL, DL, MVT::i16,
18414 DAG.getNode(ISD::AND, DL, MVT::i16,
18415 CWD, DAG.getConstant(0x400, MVT::i16)),
18416 DAG.getConstant(9, MVT::i8));
18419 DAG.getNode(ISD::AND, DL, MVT::i16,
18420 DAG.getNode(ISD::ADD, DL, MVT::i16,
18421 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
18422 DAG.getConstant(1, MVT::i16)),
18423 DAG.getConstant(3, MVT::i16));
18425 return DAG.getNode((VT.getSizeInBits() < 16 ?
18426 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
18429 static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
18430 MVT VT = Op.getSimpleValueType();
18432 unsigned NumBits = VT.getSizeInBits();
18435 Op = Op.getOperand(0);
18436 if (VT == MVT::i8) {
18437 // Zero extend to i32 since there is not an i8 bsr.
18439 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18442 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
18443 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18444 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18446 // If src is zero (i.e. bsr sets ZF), returns NumBits.
18449 DAG.getConstant(NumBits+NumBits-1, OpVT),
18450 DAG.getConstant(X86::COND_E, MVT::i8),
18453 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
18455 // Finally xor with NumBits-1.
18456 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18459 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18463 static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
18464 MVT VT = Op.getSimpleValueType();
18466 unsigned NumBits = VT.getSizeInBits();
18469 Op = Op.getOperand(0);
18470 if (VT == MVT::i8) {
18471 // Zero extend to i32 since there is not an i8 bsr.
18473 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18476 // Issue a bsr (scan bits in reverse).
18477 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18478 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18480 // And xor with NumBits-1.
18481 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18484 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18488 static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
18489 MVT VT = Op.getSimpleValueType();
18490 unsigned NumBits = VT.getSizeInBits();
18492 Op = Op.getOperand(0);
18494 // Issue a bsf (scan bits forward) which also sets EFLAGS.
18495 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
18496 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op);
18498 // If src is zero (i.e. bsf sets ZF), returns NumBits.
18501 DAG.getConstant(NumBits, VT),
18502 DAG.getConstant(X86::COND_E, MVT::i8),
18505 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
18508 // Lower256IntArith - Break a 256-bit integer operation into two new 128-bit
18509 // ones, and then concatenate the result back.
18510 static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
18511 MVT VT = Op.getSimpleValueType();
18513 assert(VT.is256BitVector() && VT.isInteger() &&
18514 "Unsupported value type for operation");
18516 unsigned NumElems = VT.getVectorNumElements();
18519 // Extract the LHS vectors
18520 SDValue LHS = Op.getOperand(0);
18521 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
18522 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
18524 // Extract the RHS vectors
18525 SDValue RHS = Op.getOperand(1);
18526 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
18527 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
18529 MVT EltVT = VT.getVectorElementType();
18530 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
18532 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
18533 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
18534 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
18537 static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) {
18538 assert(Op.getSimpleValueType().is256BitVector() &&
18539 Op.getSimpleValueType().isInteger() &&
18540 "Only handle AVX 256-bit vector integer operation");
18541 return Lower256IntArith(Op, DAG);
18544 static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
18545 assert(Op.getSimpleValueType().is256BitVector() &&
18546 Op.getSimpleValueType().isInteger() &&
18547 "Only handle AVX 256-bit vector integer operation");
18548 return Lower256IntArith(Op, DAG);
18551 static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
18552 SelectionDAG &DAG) {
18554 MVT VT = Op.getSimpleValueType();
18556 // Decompose 256-bit ops into smaller 128-bit ops.
18557 if (VT.is256BitVector() && !Subtarget->hasInt256())
18558 return Lower256IntArith(Op, DAG);
18560 SDValue A = Op.getOperand(0);
18561 SDValue B = Op.getOperand(1);
18563 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
18564 if (VT == MVT::v4i32) {
18565 assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() &&
18566 "Should not custom lower when pmuldq is available!");
18568 // Extract the odd parts.
18569 static const int UnpackMask[] = { 1, -1, 3, -1 };
18570 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
18571 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
18573 // Multiply the even parts.
18574 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B);
18575 // Now multiply odd parts.
18576 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds);
18578 Evens = DAG.getNode(ISD::BITCAST, dl, VT, Evens);
18579 Odds = DAG.getNode(ISD::BITCAST, dl, VT, Odds);
18581 // Merge the two vectors back together with a shuffle. This expands into 2
18583 static const int ShufMask[] = { 0, 4, 2, 6 };
18584 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
18587 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
18588 "Only know how to lower V2I64/V4I64/V8I64 multiply");
18590 // Ahi = psrlqi(a, 32);
18591 // Bhi = psrlqi(b, 32);
18593 // AloBlo = pmuludq(a, b);
18594 // AloBhi = pmuludq(a, Bhi);
18595 // AhiBlo = pmuludq(Ahi, b);
18597 // AloBhi = psllqi(AloBhi, 32);
18598 // AhiBlo = psllqi(AhiBlo, 32);
18599 // return AloBlo + AloBhi + AhiBlo;
18601 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
18602 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
18604 // Bit cast to 32-bit vectors for MULUDQ
18605 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 :
18606 (VT == MVT::v4i64) ? MVT::v8i32 : MVT::v16i32;
18607 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A);
18608 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B);
18609 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi);
18610 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi);
18612 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
18613 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
18614 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
18616 AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG);
18617 AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG);
18619 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
18620 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
18623 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
18624 assert(Subtarget->isTargetWin64() && "Unexpected target");
18625 EVT VT = Op.getValueType();
18626 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
18627 "Unexpected return type for lowering");
18631 switch (Op->getOpcode()) {
18632 default: llvm_unreachable("Unexpected request for libcall!");
18633 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
18634 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
18635 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
18636 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
18637 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
18638 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
18642 SDValue InChain = DAG.getEntryNode();
18644 TargetLowering::ArgListTy Args;
18645 TargetLowering::ArgListEntry Entry;
18646 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
18647 EVT ArgVT = Op->getOperand(i).getValueType();
18648 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
18649 "Unexpected argument type for lowering");
18650 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
18651 Entry.Node = StackPtr;
18652 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MachinePointerInfo(),
18654 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
18655 Entry.Ty = PointerType::get(ArgTy,0);
18656 Entry.isSExt = false;
18657 Entry.isZExt = false;
18658 Args.push_back(Entry);
18661 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
18664 TargetLowering::CallLoweringInfo CLI(DAG);
18665 CLI.setDebugLoc(dl).setChain(InChain)
18666 .setCallee(getLibcallCallingConv(LC),
18667 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()),
18668 Callee, std::move(Args), 0)
18669 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
18671 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
18672 return DAG.getNode(ISD::BITCAST, dl, VT, CallInfo.first);
18675 static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
18676 SelectionDAG &DAG) {
18677 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
18678 EVT VT = Op0.getValueType();
18681 assert((VT == MVT::v4i32 && Subtarget->hasSSE2()) ||
18682 (VT == MVT::v8i32 && Subtarget->hasInt256()));
18684 // PMULxD operations multiply each even value (starting at 0) of LHS with
18685 // the related value of RHS and produce a widen result.
18686 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18687 // => <2 x i64> <ae|cg>
18689 // In other word, to have all the results, we need to perform two PMULxD:
18690 // 1. one with the even values.
18691 // 2. one with the odd values.
18692 // To achieve #2, with need to place the odd values at an even position.
18694 // Place the odd value at an even position (basically, shift all values 1
18695 // step to the left):
18696 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
18697 // <a|b|c|d> => <b|undef|d|undef>
18698 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0, Mask);
18699 // <e|f|g|h> => <f|undef|h|undef>
18700 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1, Mask);
18702 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
18704 MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
18705 bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
18707 (!IsSigned || !Subtarget->hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
18708 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18709 // => <2 x i64> <ae|cg>
18710 SDValue Mul1 = DAG.getNode(ISD::BITCAST, dl, VT,
18711 DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
18712 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
18713 // => <2 x i64> <bf|dh>
18714 SDValue Mul2 = DAG.getNode(ISD::BITCAST, dl, VT,
18715 DAG.getNode(Opcode, dl, MulVT, Odd0, Odd1));
18717 // Shuffle it back into the right order.
18718 SDValue Highs, Lows;
18719 if (VT == MVT::v8i32) {
18720 const int HighMask[] = {1, 9, 3, 11, 5, 13, 7, 15};
18721 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18722 const int LowMask[] = {0, 8, 2, 10, 4, 12, 6, 14};
18723 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18725 const int HighMask[] = {1, 5, 3, 7};
18726 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18727 const int LowMask[] = {0, 4, 2, 6};
18728 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18731 // If we have a signed multiply but no PMULDQ fix up the high parts of a
18732 // unsigned multiply.
18733 if (IsSigned && !Subtarget->hasSSE41()) {
18735 DAG.getConstant(31, DAG.getTargetLoweringInfo().getShiftAmountTy(VT));
18736 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
18737 DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
18738 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
18739 DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0);
18741 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
18742 Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup);
18745 // The first result of MUL_LOHI is actually the low value, followed by the
18747 SDValue Ops[] = {Lows, Highs};
18748 return DAG.getMergeValues(Ops, dl);
18751 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
18752 const X86Subtarget *Subtarget) {
18753 MVT VT = Op.getSimpleValueType();
18755 SDValue R = Op.getOperand(0);
18756 SDValue Amt = Op.getOperand(1);
18758 // Optimize shl/srl/sra with constant shift amount.
18759 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
18760 if (auto *ShiftConst = BVAmt->getConstantSplatNode()) {
18761 uint64_t ShiftAmt = ShiftConst->getZExtValue();
18763 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
18764 (Subtarget->hasInt256() &&
18765 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18766 (Subtarget->hasAVX512() &&
18767 (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18768 if (Op.getOpcode() == ISD::SHL)
18769 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18771 if (Op.getOpcode() == ISD::SRL)
18772 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18774 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64)
18775 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18779 if (VT == MVT::v16i8) {
18780 if (Op.getOpcode() == ISD::SHL) {
18781 // Make a large shift.
18782 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18783 MVT::v8i16, R, ShiftAmt,
18785 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18786 // Zero out the rightmost bits.
18787 SmallVector<SDValue, 16> V(16,
18788 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18790 return DAG.getNode(ISD::AND, dl, VT, SHL,
18791 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18793 if (Op.getOpcode() == ISD::SRL) {
18794 // Make a large shift.
18795 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18796 MVT::v8i16, R, ShiftAmt,
18798 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18799 // Zero out the leftmost bits.
18800 SmallVector<SDValue, 16> V(16,
18801 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18803 return DAG.getNode(ISD::AND, dl, VT, SRL,
18804 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18806 if (Op.getOpcode() == ISD::SRA) {
18807 if (ShiftAmt == 7) {
18808 // R s>> 7 === R s< 0
18809 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18810 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18813 // R s>> a === ((R u>> a) ^ m) - m
18814 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18815 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt,
18817 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18818 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18819 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18822 llvm_unreachable("Unknown shift opcode.");
18825 if (Subtarget->hasInt256() && VT == MVT::v32i8) {
18826 if (Op.getOpcode() == ISD::SHL) {
18827 // Make a large shift.
18828 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18829 MVT::v16i16, R, ShiftAmt,
18831 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18832 // Zero out the rightmost bits.
18833 SmallVector<SDValue, 32> V(32,
18834 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18836 return DAG.getNode(ISD::AND, dl, VT, SHL,
18837 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18839 if (Op.getOpcode() == ISD::SRL) {
18840 // Make a large shift.
18841 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18842 MVT::v16i16, R, ShiftAmt,
18844 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18845 // Zero out the leftmost bits.
18846 SmallVector<SDValue, 32> V(32,
18847 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18849 return DAG.getNode(ISD::AND, dl, VT, SRL,
18850 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18852 if (Op.getOpcode() == ISD::SRA) {
18853 if (ShiftAmt == 7) {
18854 // R s>> 7 === R s< 0
18855 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18856 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18859 // R s>> a === ((R u>> a) ^ m) - m
18860 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18861 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt,
18863 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18864 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18865 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18868 llvm_unreachable("Unknown shift opcode.");
18873 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18874 if (!Subtarget->is64Bit() &&
18875 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) &&
18876 Amt.getOpcode() == ISD::BITCAST &&
18877 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18878 Amt = Amt.getOperand(0);
18879 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18880 VT.getVectorNumElements();
18881 unsigned RatioInLog2 = Log2_32_Ceil(Ratio);
18882 uint64_t ShiftAmt = 0;
18883 for (unsigned i = 0; i != Ratio; ++i) {
18884 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i));
18888 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2)));
18890 // Check remaining shift amounts.
18891 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18892 uint64_t ShAmt = 0;
18893 for (unsigned j = 0; j != Ratio; ++j) {
18894 ConstantSDNode *C =
18895 dyn_cast<ConstantSDNode>(Amt.getOperand(i + j));
18899 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2)));
18901 if (ShAmt != ShiftAmt)
18904 switch (Op.getOpcode()) {
18906 llvm_unreachable("Unknown shift opcode!");
18908 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18911 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18914 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18922 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
18923 const X86Subtarget* Subtarget) {
18924 MVT VT = Op.getSimpleValueType();
18926 SDValue R = Op.getOperand(0);
18927 SDValue Amt = Op.getOperand(1);
18929 if ((VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) ||
18930 VT == MVT::v4i32 || VT == MVT::v8i16 ||
18931 (Subtarget->hasInt256() &&
18932 ((VT == MVT::v4i64 && Op.getOpcode() != ISD::SRA) ||
18933 VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18934 (Subtarget->hasAVX512() && (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18936 EVT EltVT = VT.getVectorElementType();
18938 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Amt)) {
18939 // Check if this build_vector node is doing a splat.
18940 // If so, then set BaseShAmt equal to the splat value.
18941 BaseShAmt = BV->getSplatValue();
18942 if (BaseShAmt && BaseShAmt.getOpcode() == ISD::UNDEF)
18943 BaseShAmt = SDValue();
18945 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
18946 Amt = Amt.getOperand(0);
18948 ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt);
18949 if (SVN && SVN->isSplat()) {
18950 unsigned SplatIdx = (unsigned)SVN->getSplatIndex();
18951 SDValue InVec = Amt.getOperand(0);
18952 if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
18953 assert((SplatIdx < InVec.getValueType().getVectorNumElements()) &&
18954 "Unexpected shuffle index found!");
18955 BaseShAmt = InVec.getOperand(SplatIdx);
18956 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
18957 if (ConstantSDNode *C =
18958 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) {
18959 if (C->getZExtValue() == SplatIdx)
18960 BaseShAmt = InVec.getOperand(1);
18965 // Avoid introducing an extract element from a shuffle.
18966 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec,
18967 DAG.getIntPtrConstant(SplatIdx));
18971 if (BaseShAmt.getNode()) {
18972 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
18973 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
18974 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
18975 else if (EltVT.bitsLT(MVT::i32))
18976 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
18978 switch (Op.getOpcode()) {
18980 llvm_unreachable("Unknown shift opcode!");
18982 switch (VT.SimpleTy) {
18983 default: return SDValue();
18992 return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG);
18995 switch (VT.SimpleTy) {
18996 default: return SDValue();
19003 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG);
19006 switch (VT.SimpleTy) {
19007 default: return SDValue();
19016 return getTargetVShiftNode(X86ISD::VSRLI, dl, VT, R, BaseShAmt, DAG);
19022 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
19023 if (!Subtarget->is64Bit() &&
19024 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64) ||
19025 (Subtarget->hasAVX512() && VT == MVT::v8i64)) &&
19026 Amt.getOpcode() == ISD::BITCAST &&
19027 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
19028 Amt = Amt.getOperand(0);
19029 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
19030 VT.getVectorNumElements();
19031 std::vector<SDValue> Vals(Ratio);
19032 for (unsigned i = 0; i != Ratio; ++i)
19033 Vals[i] = Amt.getOperand(i);
19034 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
19035 for (unsigned j = 0; j != Ratio; ++j)
19036 if (Vals[j] != Amt.getOperand(i + j))
19039 switch (Op.getOpcode()) {
19041 llvm_unreachable("Unknown shift opcode!");
19043 return DAG.getNode(X86ISD::VSHL, dl, VT, R, Op.getOperand(1));
19045 return DAG.getNode(X86ISD::VSRL, dl, VT, R, Op.getOperand(1));
19047 return DAG.getNode(X86ISD::VSRA, dl, VT, R, Op.getOperand(1));
19054 static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
19055 SelectionDAG &DAG) {
19056 MVT VT = Op.getSimpleValueType();
19058 SDValue R = Op.getOperand(0);
19059 SDValue Amt = Op.getOperand(1);
19062 assert(VT.isVector() && "Custom lowering only for vector shifts!");
19063 assert(Subtarget->hasSSE2() && "Only custom lower when we have SSE2!");
19065 V = LowerScalarImmediateShift(Op, DAG, Subtarget);
19069 V = LowerScalarVariableShift(Op, DAG, Subtarget);
19073 if (Subtarget->hasAVX512() && (VT == MVT::v16i32 || VT == MVT::v8i64))
19075 // AVX2 has VPSLLV/VPSRAV/VPSRLV.
19076 if (Subtarget->hasInt256()) {
19077 if (Op.getOpcode() == ISD::SRL &&
19078 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
19079 VT == MVT::v4i64 || VT == MVT::v8i32))
19081 if (Op.getOpcode() == ISD::SHL &&
19082 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
19083 VT == MVT::v4i64 || VT == MVT::v8i32))
19085 if (Op.getOpcode() == ISD::SRA && (VT == MVT::v4i32 || VT == MVT::v8i32))
19089 // If possible, lower this packed shift into a vector multiply instead of
19090 // expanding it into a sequence of scalar shifts.
19091 // Do this only if the vector shift count is a constant build_vector.
19092 if (Op.getOpcode() == ISD::SHL &&
19093 (VT == MVT::v8i16 || VT == MVT::v4i32 ||
19094 (Subtarget->hasInt256() && VT == MVT::v16i16)) &&
19095 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
19096 SmallVector<SDValue, 8> Elts;
19097 EVT SVT = VT.getScalarType();
19098 unsigned SVTBits = SVT.getSizeInBits();
19099 const APInt &One = APInt(SVTBits, 1);
19100 unsigned NumElems = VT.getVectorNumElements();
19102 for (unsigned i=0; i !=NumElems; ++i) {
19103 SDValue Op = Amt->getOperand(i);
19104 if (Op->getOpcode() == ISD::UNDEF) {
19105 Elts.push_back(Op);
19109 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
19110 const APInt &C = APInt(SVTBits, ND->getAPIntValue().getZExtValue());
19111 uint64_t ShAmt = C.getZExtValue();
19112 if (ShAmt >= SVTBits) {
19113 Elts.push_back(DAG.getUNDEF(SVT));
19116 Elts.push_back(DAG.getConstant(One.shl(ShAmt), SVT));
19118 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
19119 return DAG.getNode(ISD::MUL, dl, VT, R, BV);
19122 // Lower SHL with variable shift amount.
19123 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
19124 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT));
19126 Op = DAG.getNode(ISD::ADD, dl, VT, Op, DAG.getConstant(0x3f800000U, VT));
19127 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
19128 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
19129 return DAG.getNode(ISD::MUL, dl, VT, Op, R);
19132 // If possible, lower this shift as a sequence of two shifts by
19133 // constant plus a MOVSS/MOVSD instead of scalarizing it.
19135 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
19137 // Could be rewritten as:
19138 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
19140 // The advantage is that the two shifts from the example would be
19141 // lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
19142 // the vector shift into four scalar shifts plus four pairs of vector
19144 if ((VT == MVT::v8i16 || VT == MVT::v4i32) &&
19145 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
19146 unsigned TargetOpcode = X86ISD::MOVSS;
19147 bool CanBeSimplified;
19148 // The splat value for the first packed shift (the 'X' from the example).
19149 SDValue Amt1 = Amt->getOperand(0);
19150 // The splat value for the second packed shift (the 'Y' from the example).
19151 SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) :
19152 Amt->getOperand(2);
19154 // See if it is possible to replace this node with a sequence of
19155 // two shifts followed by a MOVSS/MOVSD
19156 if (VT == MVT::v4i32) {
19157 // Check if it is legal to use a MOVSS.
19158 CanBeSimplified = Amt2 == Amt->getOperand(2) &&
19159 Amt2 == Amt->getOperand(3);
19160 if (!CanBeSimplified) {
19161 // Otherwise, check if we can still simplify this node using a MOVSD.
19162 CanBeSimplified = Amt1 == Amt->getOperand(1) &&
19163 Amt->getOperand(2) == Amt->getOperand(3);
19164 TargetOpcode = X86ISD::MOVSD;
19165 Amt2 = Amt->getOperand(2);
19168 // Do similar checks for the case where the machine value type
19170 CanBeSimplified = Amt1 == Amt->getOperand(1);
19171 for (unsigned i=3; i != 8 && CanBeSimplified; ++i)
19172 CanBeSimplified = Amt2 == Amt->getOperand(i);
19174 if (!CanBeSimplified) {
19175 TargetOpcode = X86ISD::MOVSD;
19176 CanBeSimplified = true;
19177 Amt2 = Amt->getOperand(4);
19178 for (unsigned i=0; i != 4 && CanBeSimplified; ++i)
19179 CanBeSimplified = Amt1 == Amt->getOperand(i);
19180 for (unsigned j=4; j != 8 && CanBeSimplified; ++j)
19181 CanBeSimplified = Amt2 == Amt->getOperand(j);
19185 if (CanBeSimplified && isa<ConstantSDNode>(Amt1) &&
19186 isa<ConstantSDNode>(Amt2)) {
19187 // Replace this node with two shifts followed by a MOVSS/MOVSD.
19188 EVT CastVT = MVT::v4i32;
19190 DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), VT);
19191 SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
19193 DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), VT);
19194 SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
19195 if (TargetOpcode == X86ISD::MOVSD)
19196 CastVT = MVT::v2i64;
19197 SDValue BitCast1 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift1);
19198 SDValue BitCast2 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift2);
19199 SDValue Result = getTargetShuffleNode(TargetOpcode, dl, CastVT, BitCast2,
19201 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
19205 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
19206 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq.");
19209 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, VT));
19210 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op);
19212 // Turn 'a' into a mask suitable for VSELECT
19213 SDValue VSelM = DAG.getConstant(0x80, VT);
19214 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19215 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19217 SDValue CM1 = DAG.getConstant(0x0f, VT);
19218 SDValue CM2 = DAG.getConstant(0x3f, VT);
19220 // r = VSELECT(r, psllw(r & (char16)15, 4), a);
19221 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1);
19222 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 4, DAG);
19223 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19224 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19227 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19228 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19229 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19231 // r = VSELECT(r, psllw(r & (char16)63, 2), a);
19232 M = DAG.getNode(ISD::AND, dl, VT, R, CM2);
19233 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 2, DAG);
19234 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19235 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19238 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19239 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19240 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19242 // return VSELECT(r, r+r, a);
19243 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel,
19244 DAG.getNode(ISD::ADD, dl, VT, R, R), R);
19248 // It's worth extending once and using the v8i32 shifts for 16-bit types, but
19249 // the extra overheads to get from v16i8 to v8i32 make the existing SSE
19250 // solution better.
19251 if (Subtarget->hasInt256() && VT == MVT::v8i16) {
19252 MVT NewVT = VT == MVT::v8i16 ? MVT::v8i32 : MVT::v16i16;
19254 Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
19255 R = DAG.getNode(ExtOpc, dl, NewVT, R);
19256 Amt = DAG.getNode(ISD::ANY_EXTEND, dl, NewVT, Amt);
19257 return DAG.getNode(ISD::TRUNCATE, dl, VT,
19258 DAG.getNode(Op.getOpcode(), dl, NewVT, R, Amt));
19261 // Decompose 256-bit shifts into smaller 128-bit shifts.
19262 if (VT.is256BitVector()) {
19263 unsigned NumElems = VT.getVectorNumElements();
19264 MVT EltVT = VT.getVectorElementType();
19265 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19267 // Extract the two vectors
19268 SDValue V1 = Extract128BitVector(R, 0, DAG, dl);
19269 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl);
19271 // Recreate the shift amount vectors
19272 SDValue Amt1, Amt2;
19273 if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
19274 // Constant shift amount
19275 SmallVector<SDValue, 4> Amt1Csts;
19276 SmallVector<SDValue, 4> Amt2Csts;
19277 for (unsigned i = 0; i != NumElems/2; ++i)
19278 Amt1Csts.push_back(Amt->getOperand(i));
19279 for (unsigned i = NumElems/2; i != NumElems; ++i)
19280 Amt2Csts.push_back(Amt->getOperand(i));
19282 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt1Csts);
19283 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt2Csts);
19285 // Variable shift amount
19286 Amt1 = Extract128BitVector(Amt, 0, DAG, dl);
19287 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl);
19290 // Issue new vector shifts for the smaller types
19291 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1);
19292 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2);
19294 // Concatenate the result back
19295 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
19301 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
19302 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
19303 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
19304 // looks for this combo and may remove the "setcc" instruction if the "setcc"
19305 // has only one use.
19306 SDNode *N = Op.getNode();
19307 SDValue LHS = N->getOperand(0);
19308 SDValue RHS = N->getOperand(1);
19309 unsigned BaseOp = 0;
19312 switch (Op.getOpcode()) {
19313 default: llvm_unreachable("Unknown ovf instruction!");
19315 // A subtract of one will be selected as a INC. Note that INC doesn't
19316 // set CF, so we can't do this for UADDO.
19317 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19319 BaseOp = X86ISD::INC;
19320 Cond = X86::COND_O;
19323 BaseOp = X86ISD::ADD;
19324 Cond = X86::COND_O;
19327 BaseOp = X86ISD::ADD;
19328 Cond = X86::COND_B;
19331 // A subtract of one will be selected as a DEC. Note that DEC doesn't
19332 // set CF, so we can't do this for USUBO.
19333 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19335 BaseOp = X86ISD::DEC;
19336 Cond = X86::COND_O;
19339 BaseOp = X86ISD::SUB;
19340 Cond = X86::COND_O;
19343 BaseOp = X86ISD::SUB;
19344 Cond = X86::COND_B;
19347 BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL;
19348 Cond = X86::COND_O;
19350 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
19351 if (N->getValueType(0) == MVT::i8) {
19352 BaseOp = X86ISD::UMUL8;
19353 Cond = X86::COND_O;
19356 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
19358 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
19361 DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
19362 DAG.getConstant(X86::COND_O, MVT::i32),
19363 SDValue(Sum.getNode(), 2));
19365 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19369 // Also sets EFLAGS.
19370 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
19371 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
19374 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
19375 DAG.getConstant(Cond, MVT::i32),
19376 SDValue(Sum.getNode(), 1));
19378 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19381 // Sign extension of the low part of vector elements. This may be used either
19382 // when sign extend instructions are not available or if the vector element
19383 // sizes already match the sign-extended size. If the vector elements are in
19384 // their pre-extended size and sign extend instructions are available, that will
19385 // be handled by LowerSIGN_EXTEND.
19386 SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
19387 SelectionDAG &DAG) const {
19389 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
19390 MVT VT = Op.getSimpleValueType();
19392 if (!Subtarget->hasSSE2() || !VT.isVector())
19395 unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
19396 ExtraVT.getScalarType().getSizeInBits();
19398 switch (VT.SimpleTy) {
19399 default: return SDValue();
19402 if (!Subtarget->hasFp256())
19404 if (!Subtarget->hasInt256()) {
19405 // needs to be split
19406 unsigned NumElems = VT.getVectorNumElements();
19408 // Extract the LHS vectors
19409 SDValue LHS = Op.getOperand(0);
19410 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
19411 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
19413 MVT EltVT = VT.getVectorElementType();
19414 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19416 EVT ExtraEltVT = ExtraVT.getVectorElementType();
19417 unsigned ExtraNumElems = ExtraVT.getVectorNumElements();
19418 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT,
19420 SDValue Extra = DAG.getValueType(ExtraVT);
19422 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra);
19423 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra);
19425 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);
19430 SDValue Op0 = Op.getOperand(0);
19432 // This is a sign extension of some low part of vector elements without
19433 // changing the size of the vector elements themselves:
19434 // Shift-Left + Shift-Right-Algebraic.
19435 SDValue Shl = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Op0,
19437 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Shl, BitsDiff,
19443 /// Returns true if the operand type is exactly twice the native width, and
19444 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
19445 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
19446 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
19447 bool X86TargetLowering::needsCmpXchgNb(const Type *MemType) const {
19448 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
19451 return !Subtarget->is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
19452 else if (OpWidth == 128)
19453 return Subtarget->hasCmpxchg16b();
19458 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
19459 return needsCmpXchgNb(SI->getValueOperand()->getType());
19462 // Note: this turns large loads into lock cmpxchg8b/16b.
19463 // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
19464 bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
19465 auto PTy = cast<PointerType>(LI->getPointerOperand()->getType());
19466 return needsCmpXchgNb(PTy->getElementType());
19469 bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
19470 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19471 const Type *MemType = AI->getType();
19473 // If the operand is too big, we must see if cmpxchg8/16b is available
19474 // and default to library calls otherwise.
19475 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19476 return needsCmpXchgNb(MemType);
19478 AtomicRMWInst::BinOp Op = AI->getOperation();
19481 llvm_unreachable("Unknown atomic operation");
19482 case AtomicRMWInst::Xchg:
19483 case AtomicRMWInst::Add:
19484 case AtomicRMWInst::Sub:
19485 // It's better to use xadd, xsub or xchg for these in all cases.
19487 case AtomicRMWInst::Or:
19488 case AtomicRMWInst::And:
19489 case AtomicRMWInst::Xor:
19490 // If the atomicrmw's result isn't actually used, we can just add a "lock"
19491 // prefix to a normal instruction for these operations.
19492 return !AI->use_empty();
19493 case AtomicRMWInst::Nand:
19494 case AtomicRMWInst::Max:
19495 case AtomicRMWInst::Min:
19496 case AtomicRMWInst::UMax:
19497 case AtomicRMWInst::UMin:
19498 // These always require a non-trivial set of data operations on x86. We must
19499 // use a cmpxchg loop.
19504 static bool hasMFENCE(const X86Subtarget& Subtarget) {
19505 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
19506 // no-sse2). There isn't any reason to disable it if the target processor
19508 return Subtarget.hasSSE2() || Subtarget.is64Bit();
19512 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
19513 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19514 const Type *MemType = AI->getType();
19515 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
19516 // there is no benefit in turning such RMWs into loads, and it is actually
19517 // harmful as it introduces a mfence.
19518 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19521 auto Builder = IRBuilder<>(AI);
19522 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
19523 auto SynchScope = AI->getSynchScope();
19524 // We must restrict the ordering to avoid generating loads with Release or
19525 // ReleaseAcquire orderings.
19526 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
19527 auto Ptr = AI->getPointerOperand();
19529 // Before the load we need a fence. Here is an example lifted from
19530 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
19533 // x.store(1, relaxed);
19534 // r1 = y.fetch_add(0, release);
19536 // y.fetch_add(42, acquire);
19537 // r2 = x.load(relaxed);
19538 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
19539 // lowered to just a load without a fence. A mfence flushes the store buffer,
19540 // making the optimization clearly correct.
19541 // FIXME: it is required if isAtLeastRelease(Order) but it is not clear
19542 // otherwise, we might be able to be more agressive on relaxed idempotent
19543 // rmw. In practice, they do not look useful, so we don't try to be
19544 // especially clever.
19545 if (SynchScope == SingleThread) {
19546 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
19547 // the IR level, so we must wrap it in an intrinsic.
19549 } else if (hasMFENCE(*Subtarget)) {
19550 Function *MFence = llvm::Intrinsic::getDeclaration(M,
19551 Intrinsic::x86_sse2_mfence);
19552 Builder.CreateCall(MFence);
19554 // FIXME: it might make sense to use a locked operation here but on a
19555 // different cache-line to prevent cache-line bouncing. In practice it
19556 // is probably a small win, and x86 processors without mfence are rare
19557 // enough that we do not bother.
19561 // Finally we can emit the atomic load.
19562 LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
19563 AI->getType()->getPrimitiveSizeInBits());
19564 Loaded->setAtomic(Order, SynchScope);
19565 AI->replaceAllUsesWith(Loaded);
19566 AI->eraseFromParent();
19570 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
19571 SelectionDAG &DAG) {
19573 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
19574 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
19575 SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
19576 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
19578 // The only fence that needs an instruction is a sequentially-consistent
19579 // cross-thread fence.
19580 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
19581 if (hasMFENCE(*Subtarget))
19582 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
19584 SDValue Chain = Op.getOperand(0);
19585 SDValue Zero = DAG.getConstant(0, MVT::i32);
19587 DAG.getRegister(X86::ESP, MVT::i32), // Base
19588 DAG.getTargetConstant(1, MVT::i8), // Scale
19589 DAG.getRegister(0, MVT::i32), // Index
19590 DAG.getTargetConstant(0, MVT::i32), // Disp
19591 DAG.getRegister(0, MVT::i32), // Segment.
19595 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
19596 return SDValue(Res, 0);
19599 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
19600 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
19603 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
19604 SelectionDAG &DAG) {
19605 MVT T = Op.getSimpleValueType();
19609 switch(T.SimpleTy) {
19610 default: llvm_unreachable("Invalid value type!");
19611 case MVT::i8: Reg = X86::AL; size = 1; break;
19612 case MVT::i16: Reg = X86::AX; size = 2; break;
19613 case MVT::i32: Reg = X86::EAX; size = 4; break;
19615 assert(Subtarget->is64Bit() && "Node not type legal!");
19616 Reg = X86::RAX; size = 8;
19619 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
19620 Op.getOperand(2), SDValue());
19621 SDValue Ops[] = { cpIn.getValue(0),
19624 DAG.getTargetConstant(size, MVT::i8),
19625 cpIn.getValue(1) };
19626 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
19627 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
19628 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
19632 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
19633 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
19634 MVT::i32, cpOut.getValue(2));
19635 SDValue Success = DAG.getNode(X86ISD::SETCC, DL, Op->getValueType(1),
19636 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
19638 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
19639 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
19640 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
19644 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
19645 SelectionDAG &DAG) {
19646 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
19647 MVT DstVT = Op.getSimpleValueType();
19649 if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) {
19650 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19651 if (DstVT != MVT::f64)
19652 // This conversion needs to be expanded.
19655 SDValue InVec = Op->getOperand(0);
19657 unsigned NumElts = SrcVT.getVectorNumElements();
19658 EVT SVT = SrcVT.getVectorElementType();
19660 // Widen the vector in input in the case of MVT::v2i32.
19661 // Example: from MVT::v2i32 to MVT::v4i32.
19662 SmallVector<SDValue, 16> Elts;
19663 for (unsigned i = 0, e = NumElts; i != e; ++i)
19664 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec,
19665 DAG.getIntPtrConstant(i)));
19667 // Explicitly mark the extra elements as Undef.
19668 Elts.append(NumElts, DAG.getUNDEF(SVT));
19670 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
19671 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
19672 SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
19673 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
19674 DAG.getIntPtrConstant(0));
19677 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
19678 Subtarget->hasMMX() && "Unexpected custom BITCAST");
19679 assert((DstVT == MVT::i64 ||
19680 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
19681 "Unexpected custom BITCAST");
19682 // i64 <=> MMX conversions are Legal.
19683 if (SrcVT==MVT::i64 && DstVT.isVector())
19685 if (DstVT==MVT::i64 && SrcVT.isVector())
19687 // MMX <=> MMX conversions are Legal.
19688 if (SrcVT.isVector() && DstVT.isVector())
19690 // All other conversions need to be expanded.
19694 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
19695 SelectionDAG &DAG) {
19696 SDNode *Node = Op.getNode();
19699 Op = Op.getOperand(0);
19700 EVT VT = Op.getValueType();
19701 assert((VT.is128BitVector() || VT.is256BitVector()) &&
19702 "CTPOP lowering only implemented for 128/256-bit wide vector types");
19704 unsigned NumElts = VT.getVectorNumElements();
19705 EVT EltVT = VT.getVectorElementType();
19706 unsigned Len = EltVT.getSizeInBits();
19708 // This is the vectorized version of the "best" algorithm from
19709 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
19710 // with a minor tweak to use a series of adds + shifts instead of vector
19711 // multiplications. Implemented for the v2i64, v4i64, v4i32, v8i32 types:
19713 // v2i64, v4i64, v4i32 => Only profitable w/ popcnt disabled
19714 // v8i32 => Always profitable
19716 // FIXME: There a couple of possible improvements:
19718 // 1) Support for i8 and i16 vectors (needs measurements if popcnt enabled).
19719 // 2) Use strategies from http://wm.ite.pl/articles/sse-popcount.html
19721 assert(EltVT.isInteger() && (Len == 32 || Len == 64) && Len % 8 == 0 &&
19722 "CTPOP not implemented for this vector element type.");
19724 // X86 canonicalize ANDs to vXi64, generate the appropriate bitcasts to avoid
19725 // extra legalization.
19726 bool NeedsBitcast = EltVT == MVT::i32;
19727 MVT BitcastVT = VT.is256BitVector() ? MVT::v4i64 : MVT::v2i64;
19729 SDValue Cst55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), EltVT);
19730 SDValue Cst33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), EltVT);
19731 SDValue Cst0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), EltVT);
19733 // v = v - ((v >> 1) & 0x55555555...)
19734 SmallVector<SDValue, 8> Ones(NumElts, DAG.getConstant(1, EltVT));
19735 SDValue OnesV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ones);
19736 SDValue Srl = DAG.getNode(ISD::SRL, dl, VT, Op, OnesV);
19738 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19740 SmallVector<SDValue, 8> Mask55(NumElts, Cst55);
19741 SDValue M55 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask55);
19743 M55 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M55);
19745 SDValue And = DAG.getNode(ISD::AND, dl, Srl.getValueType(), Srl, M55);
19746 if (VT != And.getValueType())
19747 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19748 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Op, And);
19750 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
19751 SmallVector<SDValue, 8> Mask33(NumElts, Cst33);
19752 SDValue M33 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask33);
19753 SmallVector<SDValue, 8> Twos(NumElts, DAG.getConstant(2, EltVT));
19754 SDValue TwosV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Twos);
19756 Srl = DAG.getNode(ISD::SRL, dl, VT, Sub, TwosV);
19757 if (NeedsBitcast) {
19758 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19759 M33 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M33);
19760 Sub = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Sub);
19763 SDValue AndRHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Srl, M33);
19764 SDValue AndLHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Sub, M33);
19765 if (VT != AndRHS.getValueType()) {
19766 AndRHS = DAG.getNode(ISD::BITCAST, dl, VT, AndRHS);
19767 AndLHS = DAG.getNode(ISD::BITCAST, dl, VT, AndLHS);
19769 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, AndLHS, AndRHS);
19771 // v = (v + (v >> 4)) & 0x0F0F0F0F...
19772 SmallVector<SDValue, 8> Fours(NumElts, DAG.getConstant(4, EltVT));
19773 SDValue FoursV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Fours);
19774 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, FoursV);
19775 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19777 SmallVector<SDValue, 8> Mask0F(NumElts, Cst0F);
19778 SDValue M0F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask0F);
19779 if (NeedsBitcast) {
19780 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19781 M0F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M0F);
19783 And = DAG.getNode(ISD::AND, dl, M0F.getValueType(), Add, M0F);
19784 if (VT != And.getValueType())
19785 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19787 // The algorithm mentioned above uses:
19788 // v = (v * 0x01010101...) >> (Len - 8)
19790 // Change it to use vector adds + vector shifts which yield faster results on
19791 // Haswell than using vector integer multiplication.
19793 // For i32 elements:
19794 // v = v + (v >> 8)
19795 // v = v + (v >> 16)
19797 // For i64 elements:
19798 // v = v + (v >> 8)
19799 // v = v + (v >> 16)
19800 // v = v + (v >> 32)
19803 SmallVector<SDValue, 8> Csts;
19804 for (unsigned i = 8; i <= Len/2; i *= 2) {
19805 Csts.assign(NumElts, DAG.getConstant(i, EltVT));
19806 SDValue CstsV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Csts);
19807 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, CstsV);
19808 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19812 // The result is on the least significant 6-bits on i32 and 7-bits on i64.
19813 SDValue Cst3F = DAG.getConstant(APInt(Len, Len == 32 ? 0x3F : 0x7F), EltVT);
19814 SmallVector<SDValue, 8> Cst3FV(NumElts, Cst3F);
19815 SDValue M3F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Cst3FV);
19816 if (NeedsBitcast) {
19817 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19818 M3F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M3F);
19820 And = DAG.getNode(ISD::AND, dl, M3F.getValueType(), Add, M3F);
19821 if (VT != And.getValueType())
19822 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19827 static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
19828 SDNode *Node = Op.getNode();
19830 EVT T = Node->getValueType(0);
19831 SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
19832 DAG.getConstant(0, T), Node->getOperand(2));
19833 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
19834 cast<AtomicSDNode>(Node)->getMemoryVT(),
19835 Node->getOperand(0),
19836 Node->getOperand(1), negOp,
19837 cast<AtomicSDNode>(Node)->getMemOperand(),
19838 cast<AtomicSDNode>(Node)->getOrdering(),
19839 cast<AtomicSDNode>(Node)->getSynchScope());
19842 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
19843 SDNode *Node = Op.getNode();
19845 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
19847 // Convert seq_cst store -> xchg
19848 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
19849 // FIXME: On 32-bit, store -> fist or movq would be more efficient
19850 // (The only way to get a 16-byte store is cmpxchg16b)
19851 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
19852 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
19853 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
19854 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
19855 cast<AtomicSDNode>(Node)->getMemoryVT(),
19856 Node->getOperand(0),
19857 Node->getOperand(1), Node->getOperand(2),
19858 cast<AtomicSDNode>(Node)->getMemOperand(),
19859 cast<AtomicSDNode>(Node)->getOrdering(),
19860 cast<AtomicSDNode>(Node)->getSynchScope());
19861 return Swap.getValue(1);
19863 // Other atomic stores have a simple pattern.
19867 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
19868 EVT VT = Op.getNode()->getSimpleValueType(0);
19870 // Let legalize expand this if it isn't a legal type yet.
19871 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
19874 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
19877 bool ExtraOp = false;
19878 switch (Op.getOpcode()) {
19879 default: llvm_unreachable("Invalid code");
19880 case ISD::ADDC: Opc = X86ISD::ADD; break;
19881 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break;
19882 case ISD::SUBC: Opc = X86ISD::SUB; break;
19883 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break;
19887 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19889 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19890 Op.getOperand(1), Op.getOperand(2));
19893 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
19894 SelectionDAG &DAG) {
19895 assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit());
19897 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
19898 // which returns the values as { float, float } (in XMM0) or
19899 // { double, double } (which is returned in XMM0, XMM1).
19901 SDValue Arg = Op.getOperand(0);
19902 EVT ArgVT = Arg.getValueType();
19903 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
19905 TargetLowering::ArgListTy Args;
19906 TargetLowering::ArgListEntry Entry;
19910 Entry.isSExt = false;
19911 Entry.isZExt = false;
19912 Args.push_back(Entry);
19914 bool isF64 = ArgVT == MVT::f64;
19915 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
19916 // the small struct {f32, f32} is returned in (eax, edx). For f64,
19917 // the results are returned via SRet in memory.
19918 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret";
19919 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19920 SDValue Callee = DAG.getExternalSymbol(LibcallName, TLI.getPointerTy());
19922 Type *RetTy = isF64
19923 ? (Type*)StructType::get(ArgTy, ArgTy, nullptr)
19924 : (Type*)VectorType::get(ArgTy, 4);
19926 TargetLowering::CallLoweringInfo CLI(DAG);
19927 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
19928 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
19930 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
19933 // Returned in xmm0 and xmm1.
19934 return CallResult.first;
19936 // Returned in bits 0:31 and 32:64 xmm0.
19937 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19938 CallResult.first, DAG.getIntPtrConstant(0));
19939 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19940 CallResult.first, DAG.getIntPtrConstant(1));
19941 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
19942 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
19945 /// LowerOperation - Provide custom lowering hooks for some operations.
19947 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
19948 switch (Op.getOpcode()) {
19949 default: llvm_unreachable("Should not custom lower this!");
19950 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG);
19951 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
19952 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
19953 return LowerCMP_SWAP(Op, Subtarget, DAG);
19954 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
19955 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
19956 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
19957 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
19958 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
19959 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
19960 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
19961 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
19962 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
19963 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
19964 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
19965 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
19966 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
19967 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
19968 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
19969 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
19970 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
19971 case ISD::SHL_PARTS:
19972 case ISD::SRA_PARTS:
19973 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
19974 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
19975 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
19976 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
19977 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
19978 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
19979 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
19980 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
19981 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
19982 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
19983 case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG);
19985 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
19986 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
19987 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
19988 case ISD::SETCC: return LowerSETCC(Op, DAG);
19989 case ISD::SELECT: return LowerSELECT(Op, DAG);
19990 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
19991 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
19992 case ISD::VASTART: return LowerVASTART(Op, DAG);
19993 case ISD::VAARG: return LowerVAARG(Op, DAG);
19994 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
19995 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, Subtarget, DAG);
19996 case ISD::INTRINSIC_VOID:
19997 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
19998 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
19999 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
20000 case ISD::FRAME_TO_ARGS_OFFSET:
20001 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
20002 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
20003 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
20004 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
20005 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
20006 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
20007 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
20008 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
20009 case ISD::CTLZ: return LowerCTLZ(Op, DAG);
20010 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG);
20011 case ISD::CTTZ: return LowerCTTZ(Op, DAG);
20012 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
20013 case ISD::UMUL_LOHI:
20014 case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
20017 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
20023 case ISD::UMULO: return LowerXALUO(Op, DAG);
20024 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
20025 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
20029 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
20030 case ISD::ADD: return LowerADD(Op, DAG);
20031 case ISD::SUB: return LowerSUB(Op, DAG);
20032 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
20036 /// ReplaceNodeResults - Replace a node with an illegal result type
20037 /// with a new node built out of custom code.
20038 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
20039 SmallVectorImpl<SDValue>&Results,
20040 SelectionDAG &DAG) const {
20042 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20043 switch (N->getOpcode()) {
20045 llvm_unreachable("Do not know how to custom type legalize this operation!");
20046 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
20047 case X86ISD::FMINC:
20049 case X86ISD::FMAXC:
20050 case X86ISD::FMAX: {
20051 EVT VT = N->getValueType(0);
20052 if (VT != MVT::v2f32)
20053 llvm_unreachable("Unexpected type (!= v2f32) on FMIN/FMAX.");
20054 SDValue UNDEF = DAG.getUNDEF(VT);
20055 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
20056 N->getOperand(0), UNDEF);
20057 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
20058 N->getOperand(1), UNDEF);
20059 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
20062 case ISD::SIGN_EXTEND_INREG:
20067 // We don't want to expand or promote these.
20074 case ISD::UDIVREM: {
20075 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
20076 Results.push_back(V);
20079 case ISD::FP_TO_SINT:
20080 case ISD::FP_TO_UINT: {
20081 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
20083 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType()))
20086 std::pair<SDValue,SDValue> Vals =
20087 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
20088 SDValue FIST = Vals.first, StackSlot = Vals.second;
20089 if (FIST.getNode()) {
20090 EVT VT = N->getValueType(0);
20091 // Return a load from the stack slot.
20092 if (StackSlot.getNode())
20093 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
20094 MachinePointerInfo(),
20095 false, false, false, 0));
20097 Results.push_back(FIST);
20101 case ISD::UINT_TO_FP: {
20102 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20103 if (N->getOperand(0).getValueType() != MVT::v2i32 ||
20104 N->getValueType(0) != MVT::v2f32)
20106 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
20108 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
20110 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias);
20111 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
20112 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias));
20113 Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or);
20114 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
20115 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
20118 case ISD::FP_ROUND: {
20119 if (!TLI.isTypeLegal(N->getOperand(0).getValueType()))
20121 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
20122 Results.push_back(V);
20125 case ISD::INTRINSIC_W_CHAIN: {
20126 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
20128 default : llvm_unreachable("Do not know how to custom type "
20129 "legalize this intrinsic operation!");
20130 case Intrinsic::x86_rdtsc:
20131 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20133 case Intrinsic::x86_rdtscp:
20134 return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
20136 case Intrinsic::x86_rdpmc:
20137 return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
20140 case ISD::READCYCLECOUNTER: {
20141 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20144 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
20145 EVT T = N->getValueType(0);
20146 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
20147 bool Regs64bit = T == MVT::i128;
20148 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
20149 SDValue cpInL, cpInH;
20150 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20151 DAG.getConstant(0, HalfT));
20152 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20153 DAG.getConstant(1, HalfT));
20154 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
20155 Regs64bit ? X86::RAX : X86::EAX,
20157 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
20158 Regs64bit ? X86::RDX : X86::EDX,
20159 cpInH, cpInL.getValue(1));
20160 SDValue swapInL, swapInH;
20161 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20162 DAG.getConstant(0, HalfT));
20163 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20164 DAG.getConstant(1, HalfT));
20165 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl,
20166 Regs64bit ? X86::RBX : X86::EBX,
20167 swapInL, cpInH.getValue(1));
20168 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl,
20169 Regs64bit ? X86::RCX : X86::ECX,
20170 swapInH, swapInL.getValue(1));
20171 SDValue Ops[] = { swapInH.getValue(0),
20173 swapInH.getValue(1) };
20174 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
20175 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
20176 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG :
20177 X86ISD::LCMPXCHG8_DAG;
20178 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
20179 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
20180 Regs64bit ? X86::RAX : X86::EAX,
20181 HalfT, Result.getValue(1));
20182 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
20183 Regs64bit ? X86::RDX : X86::EDX,
20184 HalfT, cpOutL.getValue(2));
20185 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
20187 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
20188 MVT::i32, cpOutH.getValue(2));
20190 DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
20191 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
20192 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
20194 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
20195 Results.push_back(Success);
20196 Results.push_back(EFLAGS.getValue(1));
20199 case ISD::ATOMIC_SWAP:
20200 case ISD::ATOMIC_LOAD_ADD:
20201 case ISD::ATOMIC_LOAD_SUB:
20202 case ISD::ATOMIC_LOAD_AND:
20203 case ISD::ATOMIC_LOAD_OR:
20204 case ISD::ATOMIC_LOAD_XOR:
20205 case ISD::ATOMIC_LOAD_NAND:
20206 case ISD::ATOMIC_LOAD_MIN:
20207 case ISD::ATOMIC_LOAD_MAX:
20208 case ISD::ATOMIC_LOAD_UMIN:
20209 case ISD::ATOMIC_LOAD_UMAX:
20210 case ISD::ATOMIC_LOAD: {
20211 // Delegate to generic TypeLegalization. Situations we can really handle
20212 // should have already been dealt with by AtomicExpandPass.cpp.
20215 case ISD::BITCAST: {
20216 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20217 EVT DstVT = N->getValueType(0);
20218 EVT SrcVT = N->getOperand(0)->getValueType(0);
20220 if (SrcVT != MVT::f64 ||
20221 (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
20224 unsigned NumElts = DstVT.getVectorNumElements();
20225 EVT SVT = DstVT.getVectorElementType();
20226 EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
20227 SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
20228 MVT::v2f64, N->getOperand(0));
20229 SDValue ToVecInt = DAG.getNode(ISD::BITCAST, dl, WiderVT, Expanded);
20231 if (ExperimentalVectorWideningLegalization) {
20232 // If we are legalizing vectors by widening, we already have the desired
20233 // legal vector type, just return it.
20234 Results.push_back(ToVecInt);
20238 SmallVector<SDValue, 8> Elts;
20239 for (unsigned i = 0, e = NumElts; i != e; ++i)
20240 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
20241 ToVecInt, DAG.getIntPtrConstant(i)));
20243 Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
20248 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
20250 default: return nullptr;
20251 case X86ISD::BSF: return "X86ISD::BSF";
20252 case X86ISD::BSR: return "X86ISD::BSR";
20253 case X86ISD::SHLD: return "X86ISD::SHLD";
20254 case X86ISD::SHRD: return "X86ISD::SHRD";
20255 case X86ISD::FAND: return "X86ISD::FAND";
20256 case X86ISD::FANDN: return "X86ISD::FANDN";
20257 case X86ISD::FOR: return "X86ISD::FOR";
20258 case X86ISD::FXOR: return "X86ISD::FXOR";
20259 case X86ISD::FSRL: return "X86ISD::FSRL";
20260 case X86ISD::FILD: return "X86ISD::FILD";
20261 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
20262 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
20263 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
20264 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
20265 case X86ISD::FLD: return "X86ISD::FLD";
20266 case X86ISD::FST: return "X86ISD::FST";
20267 case X86ISD::CALL: return "X86ISD::CALL";
20268 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
20269 case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
20270 case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
20271 case X86ISD::BT: return "X86ISD::BT";
20272 case X86ISD::CMP: return "X86ISD::CMP";
20273 case X86ISD::COMI: return "X86ISD::COMI";
20274 case X86ISD::UCOMI: return "X86ISD::UCOMI";
20275 case X86ISD::CMPM: return "X86ISD::CMPM";
20276 case X86ISD::CMPMU: return "X86ISD::CMPMU";
20277 case X86ISD::SETCC: return "X86ISD::SETCC";
20278 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
20279 case X86ISD::FSETCC: return "X86ISD::FSETCC";
20280 case X86ISD::CMOV: return "X86ISD::CMOV";
20281 case X86ISD::BRCOND: return "X86ISD::BRCOND";
20282 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
20283 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
20284 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
20285 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
20286 case X86ISD::Wrapper: return "X86ISD::Wrapper";
20287 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
20288 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
20289 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
20290 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
20291 case X86ISD::PINSRB: return "X86ISD::PINSRB";
20292 case X86ISD::PINSRW: return "X86ISD::PINSRW";
20293 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
20294 case X86ISD::ANDNP: return "X86ISD::ANDNP";
20295 case X86ISD::PSIGN: return "X86ISD::PSIGN";
20296 case X86ISD::BLENDI: return "X86ISD::BLENDI";
20297 case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
20298 case X86ISD::SUBUS: return "X86ISD::SUBUS";
20299 case X86ISD::HADD: return "X86ISD::HADD";
20300 case X86ISD::HSUB: return "X86ISD::HSUB";
20301 case X86ISD::FHADD: return "X86ISD::FHADD";
20302 case X86ISD::FHSUB: return "X86ISD::FHSUB";
20303 case X86ISD::UMAX: return "X86ISD::UMAX";
20304 case X86ISD::UMIN: return "X86ISD::UMIN";
20305 case X86ISD::SMAX: return "X86ISD::SMAX";
20306 case X86ISD::SMIN: return "X86ISD::SMIN";
20307 case X86ISD::FMAX: return "X86ISD::FMAX";
20308 case X86ISD::FMIN: return "X86ISD::FMIN";
20309 case X86ISD::FMAXC: return "X86ISD::FMAXC";
20310 case X86ISD::FMINC: return "X86ISD::FMINC";
20311 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
20312 case X86ISD::FRCP: return "X86ISD::FRCP";
20313 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
20314 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
20315 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
20316 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
20317 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
20318 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
20319 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
20320 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
20321 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
20322 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
20323 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
20324 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
20325 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
20326 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
20327 case X86ISD::VZEXT: return "X86ISD::VZEXT";
20328 case X86ISD::VSEXT: return "X86ISD::VSEXT";
20329 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
20330 case X86ISD::VTRUNCM: return "X86ISD::VTRUNCM";
20331 case X86ISD::VINSERT: return "X86ISD::VINSERT";
20332 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
20333 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
20334 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
20335 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
20336 case X86ISD::VSHL: return "X86ISD::VSHL";
20337 case X86ISD::VSRL: return "X86ISD::VSRL";
20338 case X86ISD::VSRA: return "X86ISD::VSRA";
20339 case X86ISD::VSHLI: return "X86ISD::VSHLI";
20340 case X86ISD::VSRLI: return "X86ISD::VSRLI";
20341 case X86ISD::VSRAI: return "X86ISD::VSRAI";
20342 case X86ISD::CMPP: return "X86ISD::CMPP";
20343 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
20344 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
20345 case X86ISD::PCMPEQM: return "X86ISD::PCMPEQM";
20346 case X86ISD::PCMPGTM: return "X86ISD::PCMPGTM";
20347 case X86ISD::ADD: return "X86ISD::ADD";
20348 case X86ISD::SUB: return "X86ISD::SUB";
20349 case X86ISD::ADC: return "X86ISD::ADC";
20350 case X86ISD::SBB: return "X86ISD::SBB";
20351 case X86ISD::SMUL: return "X86ISD::SMUL";
20352 case X86ISD::UMUL: return "X86ISD::UMUL";
20353 case X86ISD::SMUL8: return "X86ISD::SMUL8";
20354 case X86ISD::UMUL8: return "X86ISD::UMUL8";
20355 case X86ISD::SDIVREM8_SEXT_HREG: return "X86ISD::SDIVREM8_SEXT_HREG";
20356 case X86ISD::UDIVREM8_ZEXT_HREG: return "X86ISD::UDIVREM8_ZEXT_HREG";
20357 case X86ISD::INC: return "X86ISD::INC";
20358 case X86ISD::DEC: return "X86ISD::DEC";
20359 case X86ISD::OR: return "X86ISD::OR";
20360 case X86ISD::XOR: return "X86ISD::XOR";
20361 case X86ISD::AND: return "X86ISD::AND";
20362 case X86ISD::BEXTR: return "X86ISD::BEXTR";
20363 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
20364 case X86ISD::PTEST: return "X86ISD::PTEST";
20365 case X86ISD::TESTP: return "X86ISD::TESTP";
20366 case X86ISD::TESTM: return "X86ISD::TESTM";
20367 case X86ISD::TESTNM: return "X86ISD::TESTNM";
20368 case X86ISD::KORTEST: return "X86ISD::KORTEST";
20369 case X86ISD::PACKSS: return "X86ISD::PACKSS";
20370 case X86ISD::PACKUS: return "X86ISD::PACKUS";
20371 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
20372 case X86ISD::VALIGN: return "X86ISD::VALIGN";
20373 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
20374 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
20375 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
20376 case X86ISD::SHUFP: return "X86ISD::SHUFP";
20377 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
20378 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
20379 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
20380 case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
20381 case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
20382 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
20383 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
20384 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
20385 case X86ISD::MOVSD: return "X86ISD::MOVSD";
20386 case X86ISD::MOVSS: return "X86ISD::MOVSS";
20387 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
20388 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
20389 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
20390 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
20391 case X86ISD::VEXTRACT: return "X86ISD::VEXTRACT";
20392 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
20393 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
20394 case X86ISD::VPERMV: return "X86ISD::VPERMV";
20395 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
20396 case X86ISD::VPERMIV3: return "X86ISD::VPERMIV3";
20397 case X86ISD::VPERMI: return "X86ISD::VPERMI";
20398 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
20399 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
20400 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
20401 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
20402 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
20403 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
20404 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
20405 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL";
20406 case X86ISD::SAHF: return "X86ISD::SAHF";
20407 case X86ISD::RDRAND: return "X86ISD::RDRAND";
20408 case X86ISD::RDSEED: return "X86ISD::RDSEED";
20409 case X86ISD::FMADD: return "X86ISD::FMADD";
20410 case X86ISD::FMSUB: return "X86ISD::FMSUB";
20411 case X86ISD::FNMADD: return "X86ISD::FNMADD";
20412 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
20413 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
20414 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
20415 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI";
20416 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI";
20417 case X86ISD::XTEST: return "X86ISD::XTEST";
20418 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
20419 case X86ISD::EXPAND: return "X86ISD::EXPAND";
20420 case X86ISD::SELECT: return "X86ISD::SELECT";
20421 case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
20422 case X86ISD::RCP28: return "X86ISD::RCP28";
20423 case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
20424 case X86ISD::FADD_RND: return "X86ISD::FADD_RND";
20425 case X86ISD::FSUB_RND: return "X86ISD::FSUB_RND";
20426 case X86ISD::FMUL_RND: return "X86ISD::FMUL_RND";
20427 case X86ISD::FDIV_RND: return "X86ISD::FDIV_RND";
20431 // isLegalAddressingMode - Return true if the addressing mode represented
20432 // by AM is legal for this target, for a load/store of the specified type.
20433 bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
20435 // X86 supports extremely general addressing modes.
20436 CodeModel::Model M = getTargetMachine().getCodeModel();
20437 Reloc::Model R = getTargetMachine().getRelocationModel();
20439 // X86 allows a sign-extended 32-bit immediate field as a displacement.
20440 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
20445 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
20447 // If a reference to this global requires an extra load, we can't fold it.
20448 if (isGlobalStubReference(GVFlags))
20451 // If BaseGV requires a register for the PIC base, we cannot also have a
20452 // BaseReg specified.
20453 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
20456 // If lower 4G is not available, then we must use rip-relative addressing.
20457 if ((M != CodeModel::Small || R != Reloc::Static) &&
20458 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
20462 switch (AM.Scale) {
20468 // These scales always work.
20473 // These scales are formed with basereg+scalereg. Only accept if there is
20478 default: // Other stuff never works.
20485 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
20486 unsigned Bits = Ty->getScalarSizeInBits();
20488 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
20489 // particularly cheaper than those without.
20493 // On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
20494 // variable shifts just as cheap as scalar ones.
20495 if (Subtarget->hasInt256() && (Bits == 32 || Bits == 64))
20498 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
20499 // fully general vector.
20503 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
20504 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20506 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
20507 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
20508 return NumBits1 > NumBits2;
20511 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
20512 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20515 if (!isTypeLegal(EVT::getEVT(Ty1)))
20518 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
20520 // Assuming the caller doesn't have a zeroext or signext return parameter,
20521 // truncation all the way down to i1 is valid.
20525 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
20526 return isInt<32>(Imm);
20529 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
20530 // Can also use sub to handle negated immediates.
20531 return isInt<32>(Imm);
20534 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
20535 if (!VT1.isInteger() || !VT2.isInteger())
20537 unsigned NumBits1 = VT1.getSizeInBits();
20538 unsigned NumBits2 = VT2.getSizeInBits();
20539 return NumBits1 > NumBits2;
20542 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
20543 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20544 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit();
20547 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
20548 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20549 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
20552 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
20553 EVT VT1 = Val.getValueType();
20554 if (isZExtFree(VT1, VT2))
20557 if (Val.getOpcode() != ISD::LOAD)
20560 if (!VT1.isSimple() || !VT1.isInteger() ||
20561 !VT2.isSimple() || !VT2.isInteger())
20564 switch (VT1.getSimpleVT().SimpleTy) {
20569 // X86 has 8, 16, and 32-bit zero-extending loads.
20576 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue) const { return true; }
20579 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
20580 if (!(Subtarget->hasFMA() || Subtarget->hasFMA4()))
20583 VT = VT.getScalarType();
20585 if (!VT.isSimple())
20588 switch (VT.getSimpleVT().SimpleTy) {
20599 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
20600 // i16 instructions are longer (0x66 prefix) and potentially slower.
20601 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
20604 /// isShuffleMaskLegal - Targets can use this to indicate that they only
20605 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
20606 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
20607 /// are assumed to be legal.
20609 X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
20611 if (!VT.isSimple())
20614 MVT SVT = VT.getSimpleVT();
20616 // Very little shuffling can be done for 64-bit vectors right now.
20617 if (VT.getSizeInBits() == 64)
20620 // This is an experimental legality test that is tailored to match the
20621 // legality test of the experimental lowering more closely. They are gated
20622 // separately to ease testing of performance differences.
20623 if (ExperimentalVectorShuffleLegality)
20624 // We only care that the types being shuffled are legal. The lowering can
20625 // handle any possible shuffle mask that results.
20626 return isTypeLegal(SVT);
20628 // If this is a single-input shuffle with no 128 bit lane crossings we can
20629 // lower it into pshufb.
20630 if ((SVT.is128BitVector() && Subtarget->hasSSSE3()) ||
20631 (SVT.is256BitVector() && Subtarget->hasInt256())) {
20632 bool isLegal = true;
20633 for (unsigned I = 0, E = M.size(); I != E; ++I) {
20634 if (M[I] >= (int)SVT.getVectorNumElements() ||
20635 ShuffleCrosses128bitLane(SVT, I, M[I])) {
20644 // FIXME: blends, shifts.
20645 return (SVT.getVectorNumElements() == 2 ||
20646 ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
20647 isMOVLMask(M, SVT) ||
20648 isCommutedMOVLMask(M, SVT) ||
20649 isMOVHLPSMask(M, SVT) ||
20650 isSHUFPMask(M, SVT) ||
20651 isSHUFPMask(M, SVT, /* Commuted */ true) ||
20652 isPSHUFDMask(M, SVT) ||
20653 isPSHUFDMask(M, SVT, /* SecondOperand */ true) ||
20654 isPSHUFHWMask(M, SVT, Subtarget->hasInt256()) ||
20655 isPSHUFLWMask(M, SVT, Subtarget->hasInt256()) ||
20656 isPALIGNRMask(M, SVT, Subtarget) ||
20657 isUNPCKLMask(M, SVT, Subtarget->hasInt256()) ||
20658 isUNPCKHMask(M, SVT, Subtarget->hasInt256()) ||
20659 isUNPCKL_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20660 isUNPCKH_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20661 isBlendMask(M, SVT, Subtarget->hasSSE41(), Subtarget->hasInt256()) ||
20662 (Subtarget->hasSSE41() && isINSERTPSMask(M, SVT)));
20666 X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
20668 if (!VT.isSimple())
20671 MVT SVT = VT.getSimpleVT();
20673 // This is an experimental legality test that is tailored to match the
20674 // legality test of the experimental lowering more closely. They are gated
20675 // separately to ease testing of performance differences.
20676 if (ExperimentalVectorShuffleLegality)
20677 // The new vector shuffle lowering is very good at managing zero-inputs.
20678 return isShuffleMaskLegal(Mask, VT);
20680 unsigned NumElts = SVT.getVectorNumElements();
20681 // FIXME: This collection of masks seems suspect.
20684 if (NumElts == 4 && SVT.is128BitVector()) {
20685 return (isMOVLMask(Mask, SVT) ||
20686 isCommutedMOVLMask(Mask, SVT, true) ||
20687 isSHUFPMask(Mask, SVT) ||
20688 isSHUFPMask(Mask, SVT, /* Commuted */ true) ||
20689 isBlendMask(Mask, SVT, Subtarget->hasSSE41(),
20690 Subtarget->hasInt256()));
20695 //===----------------------------------------------------------------------===//
20696 // X86 Scheduler Hooks
20697 //===----------------------------------------------------------------------===//
20699 /// Utility function to emit xbegin specifying the start of an RTM region.
20700 static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
20701 const TargetInstrInfo *TII) {
20702 DebugLoc DL = MI->getDebugLoc();
20704 const BasicBlock *BB = MBB->getBasicBlock();
20705 MachineFunction::iterator I = MBB;
20708 // For the v = xbegin(), we generate
20719 MachineBasicBlock *thisMBB = MBB;
20720 MachineFunction *MF = MBB->getParent();
20721 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
20722 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
20723 MF->insert(I, mainMBB);
20724 MF->insert(I, sinkMBB);
20726 // Transfer the remainder of BB and its successor edges to sinkMBB.
20727 sinkMBB->splice(sinkMBB->begin(), MBB,
20728 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
20729 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
20733 // # fallthrough to mainMBB
20734 // # abortion to sinkMBB
20735 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB);
20736 thisMBB->addSuccessor(mainMBB);
20737 thisMBB->addSuccessor(sinkMBB);
20741 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1);
20742 mainMBB->addSuccessor(sinkMBB);
20745 // EAX is live into the sinkMBB
20746 sinkMBB->addLiveIn(X86::EAX);
20747 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
20748 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20751 MI->eraseFromParent();
20755 // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
20756 // or XMM0_V32I8 in AVX all of this code can be replaced with that
20757 // in the .td file.
20758 static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
20759 const TargetInstrInfo *TII) {
20761 switch (MI->getOpcode()) {
20762 default: llvm_unreachable("illegal opcode!");
20763 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
20764 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
20765 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break;
20766 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break;
20767 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break;
20768 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break;
20769 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break;
20770 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
20773 DebugLoc dl = MI->getDebugLoc();
20774 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20776 unsigned NumArgs = MI->getNumOperands();
20777 for (unsigned i = 1; i < NumArgs; ++i) {
20778 MachineOperand &Op = MI->getOperand(i);
20779 if (!(Op.isReg() && Op.isImplicit()))
20780 MIB.addOperand(Op);
20782 if (MI->hasOneMemOperand())
20783 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20785 BuildMI(*BB, MI, dl,
20786 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20787 .addReg(X86::XMM0);
20789 MI->eraseFromParent();
20793 // FIXME: Custom handling because TableGen doesn't support multiple implicit
20794 // defs in an instruction pattern
20795 static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
20796 const TargetInstrInfo *TII) {
20798 switch (MI->getOpcode()) {
20799 default: llvm_unreachable("illegal opcode!");
20800 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
20801 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
20802 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break;
20803 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break;
20804 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break;
20805 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break;
20806 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break;
20807 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
20810 DebugLoc dl = MI->getDebugLoc();
20811 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20813 unsigned NumArgs = MI->getNumOperands(); // remove the results
20814 for (unsigned i = 1; i < NumArgs; ++i) {
20815 MachineOperand &Op = MI->getOperand(i);
20816 if (!(Op.isReg() && Op.isImplicit()))
20817 MIB.addOperand(Op);
20819 if (MI->hasOneMemOperand())
20820 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20822 BuildMI(*BB, MI, dl,
20823 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20826 MI->eraseFromParent();
20830 static MachineBasicBlock *EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
20831 const X86Subtarget *Subtarget) {
20832 DebugLoc dl = MI->getDebugLoc();
20833 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20834 // Address into RAX/EAX, other two args into ECX, EDX.
20835 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
20836 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
20837 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
20838 for (int i = 0; i < X86::AddrNumOperands; ++i)
20839 MIB.addOperand(MI->getOperand(i));
20841 unsigned ValOps = X86::AddrNumOperands;
20842 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
20843 .addReg(MI->getOperand(ValOps).getReg());
20844 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
20845 .addReg(MI->getOperand(ValOps+1).getReg());
20847 // The instruction doesn't actually take any operands though.
20848 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
20850 MI->eraseFromParent(); // The pseudo is gone now.
20854 MachineBasicBlock *
20855 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
20856 MachineBasicBlock *MBB) const {
20857 // Emit va_arg instruction on X86-64.
20859 // Operands to this pseudo-instruction:
20860 // 0 ) Output : destination address (reg)
20861 // 1-5) Input : va_list address (addr, i64mem)
20862 // 6 ) ArgSize : Size (in bytes) of vararg type
20863 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
20864 // 8 ) Align : Alignment of type
20865 // 9 ) EFLAGS (implicit-def)
20867 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
20868 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands");
20870 unsigned DestReg = MI->getOperand(0).getReg();
20871 MachineOperand &Base = MI->getOperand(1);
20872 MachineOperand &Scale = MI->getOperand(2);
20873 MachineOperand &Index = MI->getOperand(3);
20874 MachineOperand &Disp = MI->getOperand(4);
20875 MachineOperand &Segment = MI->getOperand(5);
20876 unsigned ArgSize = MI->getOperand(6).getImm();
20877 unsigned ArgMode = MI->getOperand(7).getImm();
20878 unsigned Align = MI->getOperand(8).getImm();
20880 // Memory Reference
20881 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
20882 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
20883 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
20885 // Machine Information
20886 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20887 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
20888 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
20889 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
20890 DebugLoc DL = MI->getDebugLoc();
20892 // struct va_list {
20895 // i64 overflow_area (address)
20896 // i64 reg_save_area (address)
20898 // sizeof(va_list) = 24
20899 // alignment(va_list) = 8
20901 unsigned TotalNumIntRegs = 6;
20902 unsigned TotalNumXMMRegs = 8;
20903 bool UseGPOffset = (ArgMode == 1);
20904 bool UseFPOffset = (ArgMode == 2);
20905 unsigned MaxOffset = TotalNumIntRegs * 8 +
20906 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
20908 /* Align ArgSize to a multiple of 8 */
20909 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
20910 bool NeedsAlign = (Align > 8);
20912 MachineBasicBlock *thisMBB = MBB;
20913 MachineBasicBlock *overflowMBB;
20914 MachineBasicBlock *offsetMBB;
20915 MachineBasicBlock *endMBB;
20917 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
20918 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
20919 unsigned OffsetReg = 0;
20921 if (!UseGPOffset && !UseFPOffset) {
20922 // If we only pull from the overflow region, we don't create a branch.
20923 // We don't need to alter control flow.
20924 OffsetDestReg = 0; // unused
20925 OverflowDestReg = DestReg;
20927 offsetMBB = nullptr;
20928 overflowMBB = thisMBB;
20931 // First emit code to check if gp_offset (or fp_offset) is below the bound.
20932 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
20933 // If not, pull from overflow_area. (branch to overflowMBB)
20938 // offsetMBB overflowMBB
20943 // Registers for the PHI in endMBB
20944 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
20945 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
20947 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
20948 MachineFunction *MF = MBB->getParent();
20949 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20950 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20951 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20953 MachineFunction::iterator MBBIter = MBB;
20956 // Insert the new basic blocks
20957 MF->insert(MBBIter, offsetMBB);
20958 MF->insert(MBBIter, overflowMBB);
20959 MF->insert(MBBIter, endMBB);
20961 // Transfer the remainder of MBB and its successor edges to endMBB.
20962 endMBB->splice(endMBB->begin(), thisMBB,
20963 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
20964 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
20966 // Make offsetMBB and overflowMBB successors of thisMBB
20967 thisMBB->addSuccessor(offsetMBB);
20968 thisMBB->addSuccessor(overflowMBB);
20970 // endMBB is a successor of both offsetMBB and overflowMBB
20971 offsetMBB->addSuccessor(endMBB);
20972 overflowMBB->addSuccessor(endMBB);
20974 // Load the offset value into a register
20975 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
20976 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
20980 .addDisp(Disp, UseFPOffset ? 4 : 0)
20981 .addOperand(Segment)
20982 .setMemRefs(MMOBegin, MMOEnd);
20984 // Check if there is enough room left to pull this argument.
20985 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
20987 .addImm(MaxOffset + 8 - ArgSizeA8);
20989 // Branch to "overflowMBB" if offset >= max
20990 // Fall through to "offsetMBB" otherwise
20991 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
20992 .addMBB(overflowMBB);
20995 // In offsetMBB, emit code to use the reg_save_area.
20997 assert(OffsetReg != 0);
20999 // Read the reg_save_area address.
21000 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
21001 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
21006 .addOperand(Segment)
21007 .setMemRefs(MMOBegin, MMOEnd);
21009 // Zero-extend the offset
21010 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
21011 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
21014 .addImm(X86::sub_32bit);
21016 // Add the offset to the reg_save_area to get the final address.
21017 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
21018 .addReg(OffsetReg64)
21019 .addReg(RegSaveReg);
21021 // Compute the offset for the next argument
21022 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
21023 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
21025 .addImm(UseFPOffset ? 16 : 8);
21027 // Store it back into the va_list.
21028 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
21032 .addDisp(Disp, UseFPOffset ? 4 : 0)
21033 .addOperand(Segment)
21034 .addReg(NextOffsetReg)
21035 .setMemRefs(MMOBegin, MMOEnd);
21038 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
21043 // Emit code to use overflow area
21046 // Load the overflow_area address into a register.
21047 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
21048 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
21053 .addOperand(Segment)
21054 .setMemRefs(MMOBegin, MMOEnd);
21056 // If we need to align it, do so. Otherwise, just copy the address
21057 // to OverflowDestReg.
21059 // Align the overflow address
21060 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2");
21061 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
21063 // aligned_addr = (addr + (align-1)) & ~(align-1)
21064 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
21065 .addReg(OverflowAddrReg)
21068 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
21070 .addImm(~(uint64_t)(Align-1));
21072 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
21073 .addReg(OverflowAddrReg);
21076 // Compute the next overflow address after this argument.
21077 // (the overflow address should be kept 8-byte aligned)
21078 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
21079 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
21080 .addReg(OverflowDestReg)
21081 .addImm(ArgSizeA8);
21083 // Store the new overflow address.
21084 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
21089 .addOperand(Segment)
21090 .addReg(NextAddrReg)
21091 .setMemRefs(MMOBegin, MMOEnd);
21093 // If we branched, emit the PHI to the front of endMBB.
21095 BuildMI(*endMBB, endMBB->begin(), DL,
21096 TII->get(X86::PHI), DestReg)
21097 .addReg(OffsetDestReg).addMBB(offsetMBB)
21098 .addReg(OverflowDestReg).addMBB(overflowMBB);
21101 // Erase the pseudo instruction
21102 MI->eraseFromParent();
21107 MachineBasicBlock *
21108 X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
21110 MachineBasicBlock *MBB) const {
21111 // Emit code to save XMM registers to the stack. The ABI says that the
21112 // number of registers to save is given in %al, so it's theoretically
21113 // possible to do an indirect jump trick to avoid saving all of them,
21114 // however this code takes a simpler approach and just executes all
21115 // of the stores if %al is non-zero. It's less code, and it's probably
21116 // easier on the hardware branch predictor, and stores aren't all that
21117 // expensive anyway.
21119 // Create the new basic blocks. One block contains all the XMM stores,
21120 // and one block is the final destination regardless of whether any
21121 // stores were performed.
21122 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
21123 MachineFunction *F = MBB->getParent();
21124 MachineFunction::iterator MBBIter = MBB;
21126 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
21127 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
21128 F->insert(MBBIter, XMMSaveMBB);
21129 F->insert(MBBIter, EndMBB);
21131 // Transfer the remainder of MBB and its successor edges to EndMBB.
21132 EndMBB->splice(EndMBB->begin(), MBB,
21133 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21134 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
21136 // The original block will now fall through to the XMM save block.
21137 MBB->addSuccessor(XMMSaveMBB);
21138 // The XMMSaveMBB will fall through to the end block.
21139 XMMSaveMBB->addSuccessor(EndMBB);
21141 // Now add the instructions.
21142 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21143 DebugLoc DL = MI->getDebugLoc();
21145 unsigned CountReg = MI->getOperand(0).getReg();
21146 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm();
21147 int64_t VarArgsFPOffset = MI->getOperand(2).getImm();
21149 if (!Subtarget->isTargetWin64()) {
21150 // If %al is 0, branch around the XMM save block.
21151 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
21152 BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
21153 MBB->addSuccessor(EndMBB);
21156 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
21157 // that was just emitted, but clearly shouldn't be "saved".
21158 assert((MI->getNumOperands() <= 3 ||
21159 !MI->getOperand(MI->getNumOperands() - 1).isReg() ||
21160 MI->getOperand(MI->getNumOperands() - 1).getReg() == X86::EFLAGS)
21161 && "Expected last argument to be EFLAGS");
21162 unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
21163 // In the XMM save block, save all the XMM argument registers.
21164 for (int i = 3, e = MI->getNumOperands() - 1; i != e; ++i) {
21165 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
21166 MachineMemOperand *MMO =
21167 F->getMachineMemOperand(
21168 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset),
21169 MachineMemOperand::MOStore,
21170 /*Size=*/16, /*Align=*/16);
21171 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
21172 .addFrameIndex(RegSaveFrameIndex)
21173 .addImm(/*Scale=*/1)
21174 .addReg(/*IndexReg=*/0)
21175 .addImm(/*Disp=*/Offset)
21176 .addReg(/*Segment=*/0)
21177 .addReg(MI->getOperand(i).getReg())
21178 .addMemOperand(MMO);
21181 MI->eraseFromParent(); // The pseudo instruction is gone now.
21186 // The EFLAGS operand of SelectItr might be missing a kill marker
21187 // because there were multiple uses of EFLAGS, and ISel didn't know
21188 // which to mark. Figure out whether SelectItr should have had a
21189 // kill marker, and set it if it should. Returns the correct kill
21191 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
21192 MachineBasicBlock* BB,
21193 const TargetRegisterInfo* TRI) {
21194 // Scan forward through BB for a use/def of EFLAGS.
21195 MachineBasicBlock::iterator miI(std::next(SelectItr));
21196 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
21197 const MachineInstr& mi = *miI;
21198 if (mi.readsRegister(X86::EFLAGS))
21200 if (mi.definesRegister(X86::EFLAGS))
21201 break; // Should have kill-flag - update below.
21204 // If we hit the end of the block, check whether EFLAGS is live into a
21206 if (miI == BB->end()) {
21207 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
21208 sEnd = BB->succ_end();
21209 sItr != sEnd; ++sItr) {
21210 MachineBasicBlock* succ = *sItr;
21211 if (succ->isLiveIn(X86::EFLAGS))
21216 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
21217 // out. SelectMI should have a kill flag on EFLAGS.
21218 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
21222 MachineBasicBlock *
21223 X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
21224 MachineBasicBlock *BB) const {
21225 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21226 DebugLoc DL = MI->getDebugLoc();
21228 // To "insert" a SELECT_CC instruction, we actually have to insert the
21229 // diamond control-flow pattern. The incoming instruction knows the
21230 // destination vreg to set, the condition code register to branch on, the
21231 // true/false values to select between, and a branch opcode to use.
21232 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21233 MachineFunction::iterator It = BB;
21239 // cmpTY ccX, r1, r2
21241 // fallthrough --> copy0MBB
21242 MachineBasicBlock *thisMBB = BB;
21243 MachineFunction *F = BB->getParent();
21244 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
21245 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
21246 F->insert(It, copy0MBB);
21247 F->insert(It, sinkMBB);
21249 // If the EFLAGS register isn't dead in the terminator, then claim that it's
21250 // live into the sink and copy blocks.
21251 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
21252 if (!MI->killsRegister(X86::EFLAGS) &&
21253 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) {
21254 copy0MBB->addLiveIn(X86::EFLAGS);
21255 sinkMBB->addLiveIn(X86::EFLAGS);
21258 // Transfer the remainder of BB and its successor edges to sinkMBB.
21259 sinkMBB->splice(sinkMBB->begin(), BB,
21260 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21261 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
21263 // Add the true and fallthrough blocks as its successors.
21264 BB->addSuccessor(copy0MBB);
21265 BB->addSuccessor(sinkMBB);
21267 // Create the conditional branch instruction.
21269 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
21270 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
21273 // %FalseValue = ...
21274 // # fallthrough to sinkMBB
21275 copy0MBB->addSuccessor(sinkMBB);
21278 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
21280 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21281 TII->get(X86::PHI), MI->getOperand(0).getReg())
21282 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
21283 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
21285 MI->eraseFromParent(); // The pseudo instruction is gone now.
21289 MachineBasicBlock *
21290 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
21291 MachineBasicBlock *BB) const {
21292 MachineFunction *MF = BB->getParent();
21293 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21294 DebugLoc DL = MI->getDebugLoc();
21295 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21297 assert(MF->shouldSplitStack());
21299 const bool Is64Bit = Subtarget->is64Bit();
21300 const bool IsLP64 = Subtarget->isTarget64BitLP64();
21302 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
21303 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
21306 // ... [Till the alloca]
21307 // If stacklet is not large enough, jump to mallocMBB
21310 // Allocate by subtracting from RSP
21311 // Jump to continueMBB
21314 // Allocate by call to runtime
21318 // [rest of original BB]
21321 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21322 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21323 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21325 MachineRegisterInfo &MRI = MF->getRegInfo();
21326 const TargetRegisterClass *AddrRegClass =
21327 getRegClassFor(getPointerTy());
21329 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21330 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21331 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
21332 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
21333 sizeVReg = MI->getOperand(1).getReg(),
21334 physSPReg = IsLP64 || Subtarget->isTargetNaCl64() ? X86::RSP : X86::ESP;
21336 MachineFunction::iterator MBBIter = BB;
21339 MF->insert(MBBIter, bumpMBB);
21340 MF->insert(MBBIter, mallocMBB);
21341 MF->insert(MBBIter, continueMBB);
21343 continueMBB->splice(continueMBB->begin(), BB,
21344 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21345 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
21347 // Add code to the main basic block to check if the stack limit has been hit,
21348 // and if so, jump to mallocMBB otherwise to bumpMBB.
21349 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
21350 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
21351 .addReg(tmpSPVReg).addReg(sizeVReg);
21352 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
21353 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
21354 .addReg(SPLimitVReg);
21355 BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
21357 // bumpMBB simply decreases the stack pointer, since we know the current
21358 // stacklet has enough space.
21359 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
21360 .addReg(SPLimitVReg);
21361 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
21362 .addReg(SPLimitVReg);
21363 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21365 // Calls into a routine in libgcc to allocate more space from the heap.
21366 const uint32_t *RegMask =
21367 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21369 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
21371 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21372 .addExternalSymbol("__morestack_allocate_stack_space")
21373 .addRegMask(RegMask)
21374 .addReg(X86::RDI, RegState::Implicit)
21375 .addReg(X86::RAX, RegState::ImplicitDefine);
21376 } else if (Is64Bit) {
21377 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
21379 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21380 .addExternalSymbol("__morestack_allocate_stack_space")
21381 .addRegMask(RegMask)
21382 .addReg(X86::EDI, RegState::Implicit)
21383 .addReg(X86::EAX, RegState::ImplicitDefine);
21385 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
21387 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
21388 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
21389 .addExternalSymbol("__morestack_allocate_stack_space")
21390 .addRegMask(RegMask)
21391 .addReg(X86::EAX, RegState::ImplicitDefine);
21395 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
21398 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
21399 .addReg(IsLP64 ? X86::RAX : X86::EAX);
21400 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21402 // Set up the CFG correctly.
21403 BB->addSuccessor(bumpMBB);
21404 BB->addSuccessor(mallocMBB);
21405 mallocMBB->addSuccessor(continueMBB);
21406 bumpMBB->addSuccessor(continueMBB);
21408 // Take care of the PHI nodes.
21409 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
21410 MI->getOperand(0).getReg())
21411 .addReg(mallocPtrVReg).addMBB(mallocMBB)
21412 .addReg(bumpSPPtrVReg).addMBB(bumpMBB);
21414 // Delete the original pseudo instruction.
21415 MI->eraseFromParent();
21418 return continueMBB;
21421 MachineBasicBlock *
21422 X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
21423 MachineBasicBlock *BB) const {
21424 DebugLoc DL = MI->getDebugLoc();
21426 assert(!Subtarget->isTargetMachO());
21428 X86FrameLowering::emitStackProbeCall(*BB->getParent(), *BB, MI, DL);
21430 MI->eraseFromParent(); // The pseudo instruction is gone now.
21434 MachineBasicBlock *
21435 X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
21436 MachineBasicBlock *BB) const {
21437 // This is pretty easy. We're taking the value that we received from
21438 // our load from the relocation, sticking it in either RDI (x86-64)
21439 // or EAX and doing an indirect call. The return value will then
21440 // be in the normal return register.
21441 MachineFunction *F = BB->getParent();
21442 const X86InstrInfo *TII = Subtarget->getInstrInfo();
21443 DebugLoc DL = MI->getDebugLoc();
21445 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
21446 assert(MI->getOperand(3).isGlobal() && "This should be a global");
21448 // Get a register mask for the lowered call.
21449 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
21450 // proper register mask.
21451 const uint32_t *RegMask =
21452 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21453 if (Subtarget->is64Bit()) {
21454 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21455 TII->get(X86::MOV64rm), X86::RDI)
21457 .addImm(0).addReg(0)
21458 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21459 MI->getOperand(3).getTargetFlags())
21461 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
21462 addDirectMem(MIB, X86::RDI);
21463 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
21464 } else if (F->getTarget().getRelocationModel() != Reloc::PIC_) {
21465 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21466 TII->get(X86::MOV32rm), X86::EAX)
21468 .addImm(0).addReg(0)
21469 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21470 MI->getOperand(3).getTargetFlags())
21472 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21473 addDirectMem(MIB, X86::EAX);
21474 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21476 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21477 TII->get(X86::MOV32rm), X86::EAX)
21478 .addReg(TII->getGlobalBaseReg(F))
21479 .addImm(0).addReg(0)
21480 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21481 MI->getOperand(3).getTargetFlags())
21483 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21484 addDirectMem(MIB, X86::EAX);
21485 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21488 MI->eraseFromParent(); // The pseudo instruction is gone now.
21492 MachineBasicBlock *
21493 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
21494 MachineBasicBlock *MBB) const {
21495 DebugLoc DL = MI->getDebugLoc();
21496 MachineFunction *MF = MBB->getParent();
21497 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21498 MachineRegisterInfo &MRI = MF->getRegInfo();
21500 const BasicBlock *BB = MBB->getBasicBlock();
21501 MachineFunction::iterator I = MBB;
21504 // Memory Reference
21505 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21506 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21509 unsigned MemOpndSlot = 0;
21511 unsigned CurOp = 0;
21513 DstReg = MI->getOperand(CurOp++).getReg();
21514 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
21515 assert(RC->hasType(MVT::i32) && "Invalid destination!");
21516 unsigned mainDstReg = MRI.createVirtualRegister(RC);
21517 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
21519 MemOpndSlot = CurOp;
21521 MVT PVT = getPointerTy();
21522 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21523 "Invalid Pointer Size!");
21525 // For v = setjmp(buf), we generate
21528 // buf[LabelOffset] = restoreMBB
21529 // SjLjSetup restoreMBB
21535 // v = phi(main, restore)
21538 // if base pointer being used, load it from frame
21541 MachineBasicBlock *thisMBB = MBB;
21542 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
21543 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
21544 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
21545 MF->insert(I, mainMBB);
21546 MF->insert(I, sinkMBB);
21547 MF->push_back(restoreMBB);
21549 MachineInstrBuilder MIB;
21551 // Transfer the remainder of BB and its successor edges to sinkMBB.
21552 sinkMBB->splice(sinkMBB->begin(), MBB,
21553 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21554 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
21557 unsigned PtrStoreOpc = 0;
21558 unsigned LabelReg = 0;
21559 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21560 Reloc::Model RM = MF->getTarget().getRelocationModel();
21561 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
21562 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC);
21564 // Prepare IP either in reg or imm.
21565 if (!UseImmLabel) {
21566 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
21567 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
21568 LabelReg = MRI.createVirtualRegister(PtrRC);
21569 if (Subtarget->is64Bit()) {
21570 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
21574 .addMBB(restoreMBB)
21577 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
21578 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
21579 .addReg(XII->getGlobalBaseReg(MF))
21582 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference())
21586 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
21588 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
21589 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21590 if (i == X86::AddrDisp)
21591 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset);
21593 MIB.addOperand(MI->getOperand(MemOpndSlot + i));
21596 MIB.addReg(LabelReg);
21598 MIB.addMBB(restoreMBB);
21599 MIB.setMemRefs(MMOBegin, MMOEnd);
21601 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
21602 .addMBB(restoreMBB);
21604 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21605 MIB.addRegMask(RegInfo->getNoPreservedMask());
21606 thisMBB->addSuccessor(mainMBB);
21607 thisMBB->addSuccessor(restoreMBB);
21611 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
21612 mainMBB->addSuccessor(sinkMBB);
21615 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21616 TII->get(X86::PHI), DstReg)
21617 .addReg(mainDstReg).addMBB(mainMBB)
21618 .addReg(restoreDstReg).addMBB(restoreMBB);
21621 if (RegInfo->hasBasePointer(*MF)) {
21622 const bool Uses64BitFramePtr =
21623 Subtarget->isTarget64BitLP64() || Subtarget->isTargetNaCl64();
21624 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
21625 X86FI->setRestoreBasePointer(MF);
21626 unsigned FramePtr = RegInfo->getFrameRegister(*MF);
21627 unsigned BasePtr = RegInfo->getBaseRegister();
21628 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
21629 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
21630 FramePtr, true, X86FI->getRestoreBasePointerOffset())
21631 .setMIFlag(MachineInstr::FrameSetup);
21633 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
21634 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
21635 restoreMBB->addSuccessor(sinkMBB);
21637 MI->eraseFromParent();
21641 MachineBasicBlock *
21642 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
21643 MachineBasicBlock *MBB) const {
21644 DebugLoc DL = MI->getDebugLoc();
21645 MachineFunction *MF = MBB->getParent();
21646 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21647 MachineRegisterInfo &MRI = MF->getRegInfo();
21649 // Memory Reference
21650 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21651 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21653 MVT PVT = getPointerTy();
21654 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21655 "Invalid Pointer Size!");
21657 const TargetRegisterClass *RC =
21658 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
21659 unsigned Tmp = MRI.createVirtualRegister(RC);
21660 // Since FP is only updated here but NOT referenced, it's treated as GPR.
21661 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21662 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
21663 unsigned SP = RegInfo->getStackRegister();
21665 MachineInstrBuilder MIB;
21667 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21668 const int64_t SPOffset = 2 * PVT.getStoreSize();
21670 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
21671 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
21674 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
21675 for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
21676 MIB.addOperand(MI->getOperand(i));
21677 MIB.setMemRefs(MMOBegin, MMOEnd);
21679 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
21680 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21681 if (i == X86::AddrDisp)
21682 MIB.addDisp(MI->getOperand(i), LabelOffset);
21684 MIB.addOperand(MI->getOperand(i));
21686 MIB.setMemRefs(MMOBegin, MMOEnd);
21688 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
21689 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21690 if (i == X86::AddrDisp)
21691 MIB.addDisp(MI->getOperand(i), SPOffset);
21693 MIB.addOperand(MI->getOperand(i));
21695 MIB.setMemRefs(MMOBegin, MMOEnd);
21697 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
21699 MI->eraseFromParent();
21703 // Replace 213-type (isel default) FMA3 instructions with 231-type for
21704 // accumulator loops. Writing back to the accumulator allows the coalescer
21705 // to remove extra copies in the loop.
21706 MachineBasicBlock *
21707 X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
21708 MachineBasicBlock *MBB) const {
21709 MachineOperand &AddendOp = MI->getOperand(3);
21711 // Bail out early if the addend isn't a register - we can't switch these.
21712 if (!AddendOp.isReg())
21715 MachineFunction &MF = *MBB->getParent();
21716 MachineRegisterInfo &MRI = MF.getRegInfo();
21718 // Check whether the addend is defined by a PHI:
21719 assert(MRI.hasOneDef(AddendOp.getReg()) && "Multiple defs in SSA?");
21720 MachineInstr &AddendDef = *MRI.def_instr_begin(AddendOp.getReg());
21721 if (!AddendDef.isPHI())
21724 // Look for the following pattern:
21726 // %addend = phi [%entry, 0], [%loop, %result]
21728 // %result<tied1> = FMA213 %m2<tied0>, %m1, %addend
21732 // %addend = phi [%entry, 0], [%loop, %result]
21734 // %result<tied1> = FMA231 %addend<tied0>, %m1, %m2
21736 for (unsigned i = 1, e = AddendDef.getNumOperands(); i < e; i += 2) {
21737 assert(AddendDef.getOperand(i).isReg());
21738 MachineOperand PHISrcOp = AddendDef.getOperand(i);
21739 MachineInstr &PHISrcInst = *MRI.def_instr_begin(PHISrcOp.getReg());
21740 if (&PHISrcInst == MI) {
21741 // Found a matching instruction.
21742 unsigned NewFMAOpc = 0;
21743 switch (MI->getOpcode()) {
21744 case X86::VFMADDPDr213r: NewFMAOpc = X86::VFMADDPDr231r; break;
21745 case X86::VFMADDPSr213r: NewFMAOpc = X86::VFMADDPSr231r; break;
21746 case X86::VFMADDSDr213r: NewFMAOpc = X86::VFMADDSDr231r; break;
21747 case X86::VFMADDSSr213r: NewFMAOpc = X86::VFMADDSSr231r; break;
21748 case X86::VFMSUBPDr213r: NewFMAOpc = X86::VFMSUBPDr231r; break;
21749 case X86::VFMSUBPSr213r: NewFMAOpc = X86::VFMSUBPSr231r; break;
21750 case X86::VFMSUBSDr213r: NewFMAOpc = X86::VFMSUBSDr231r; break;
21751 case X86::VFMSUBSSr213r: NewFMAOpc = X86::VFMSUBSSr231r; break;
21752 case X86::VFNMADDPDr213r: NewFMAOpc = X86::VFNMADDPDr231r; break;
21753 case X86::VFNMADDPSr213r: NewFMAOpc = X86::VFNMADDPSr231r; break;
21754 case X86::VFNMADDSDr213r: NewFMAOpc = X86::VFNMADDSDr231r; break;
21755 case X86::VFNMADDSSr213r: NewFMAOpc = X86::VFNMADDSSr231r; break;
21756 case X86::VFNMSUBPDr213r: NewFMAOpc = X86::VFNMSUBPDr231r; break;
21757 case X86::VFNMSUBPSr213r: NewFMAOpc = X86::VFNMSUBPSr231r; break;
21758 case X86::VFNMSUBSDr213r: NewFMAOpc = X86::VFNMSUBSDr231r; break;
21759 case X86::VFNMSUBSSr213r: NewFMAOpc = X86::VFNMSUBSSr231r; break;
21760 case X86::VFMADDSUBPDr213r: NewFMAOpc = X86::VFMADDSUBPDr231r; break;
21761 case X86::VFMADDSUBPSr213r: NewFMAOpc = X86::VFMADDSUBPSr231r; break;
21762 case X86::VFMSUBADDPDr213r: NewFMAOpc = X86::VFMSUBADDPDr231r; break;
21763 case X86::VFMSUBADDPSr213r: NewFMAOpc = X86::VFMSUBADDPSr231r; break;
21765 case X86::VFMADDPDr213rY: NewFMAOpc = X86::VFMADDPDr231rY; break;
21766 case X86::VFMADDPSr213rY: NewFMAOpc = X86::VFMADDPSr231rY; break;
21767 case X86::VFMSUBPDr213rY: NewFMAOpc = X86::VFMSUBPDr231rY; break;
21768 case X86::VFMSUBPSr213rY: NewFMAOpc = X86::VFMSUBPSr231rY; break;
21769 case X86::VFNMADDPDr213rY: NewFMAOpc = X86::VFNMADDPDr231rY; break;
21770 case X86::VFNMADDPSr213rY: NewFMAOpc = X86::VFNMADDPSr231rY; break;
21771 case X86::VFNMSUBPDr213rY: NewFMAOpc = X86::VFNMSUBPDr231rY; break;
21772 case X86::VFNMSUBPSr213rY: NewFMAOpc = X86::VFNMSUBPSr231rY; break;
21773 case X86::VFMADDSUBPDr213rY: NewFMAOpc = X86::VFMADDSUBPDr231rY; break;
21774 case X86::VFMADDSUBPSr213rY: NewFMAOpc = X86::VFMADDSUBPSr231rY; break;
21775 case X86::VFMSUBADDPDr213rY: NewFMAOpc = X86::VFMSUBADDPDr231rY; break;
21776 case X86::VFMSUBADDPSr213rY: NewFMAOpc = X86::VFMSUBADDPSr231rY; break;
21777 default: llvm_unreachable("Unrecognized FMA variant.");
21780 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
21781 MachineInstrBuilder MIB =
21782 BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
21783 .addOperand(MI->getOperand(0))
21784 .addOperand(MI->getOperand(3))
21785 .addOperand(MI->getOperand(2))
21786 .addOperand(MI->getOperand(1));
21787 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
21788 MI->eraseFromParent();
21795 MachineBasicBlock *
21796 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
21797 MachineBasicBlock *BB) const {
21798 switch (MI->getOpcode()) {
21799 default: llvm_unreachable("Unexpected instr type to insert");
21800 case X86::TAILJMPd64:
21801 case X86::TAILJMPr64:
21802 case X86::TAILJMPm64:
21803 case X86::TAILJMPd64_REX:
21804 case X86::TAILJMPr64_REX:
21805 case X86::TAILJMPm64_REX:
21806 llvm_unreachable("TAILJMP64 would not be touched here.");
21807 case X86::TCRETURNdi64:
21808 case X86::TCRETURNri64:
21809 case X86::TCRETURNmi64:
21811 case X86::WIN_ALLOCA:
21812 return EmitLoweredWinAlloca(MI, BB);
21813 case X86::SEG_ALLOCA_32:
21814 case X86::SEG_ALLOCA_64:
21815 return EmitLoweredSegAlloca(MI, BB);
21816 case X86::TLSCall_32:
21817 case X86::TLSCall_64:
21818 return EmitLoweredTLSCall(MI, BB);
21819 case X86::CMOV_GR8:
21820 case X86::CMOV_FR32:
21821 case X86::CMOV_FR64:
21822 case X86::CMOV_V4F32:
21823 case X86::CMOV_V2F64:
21824 case X86::CMOV_V2I64:
21825 case X86::CMOV_V8F32:
21826 case X86::CMOV_V4F64:
21827 case X86::CMOV_V4I64:
21828 case X86::CMOV_V16F32:
21829 case X86::CMOV_V8F64:
21830 case X86::CMOV_V8I64:
21831 case X86::CMOV_GR16:
21832 case X86::CMOV_GR32:
21833 case X86::CMOV_RFP32:
21834 case X86::CMOV_RFP64:
21835 case X86::CMOV_RFP80:
21836 return EmitLoweredSelect(MI, BB);
21838 case X86::FP32_TO_INT16_IN_MEM:
21839 case X86::FP32_TO_INT32_IN_MEM:
21840 case X86::FP32_TO_INT64_IN_MEM:
21841 case X86::FP64_TO_INT16_IN_MEM:
21842 case X86::FP64_TO_INT32_IN_MEM:
21843 case X86::FP64_TO_INT64_IN_MEM:
21844 case X86::FP80_TO_INT16_IN_MEM:
21845 case X86::FP80_TO_INT32_IN_MEM:
21846 case X86::FP80_TO_INT64_IN_MEM: {
21847 MachineFunction *F = BB->getParent();
21848 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21849 DebugLoc DL = MI->getDebugLoc();
21851 // Change the floating point control register to use "round towards zero"
21852 // mode when truncating to an integer value.
21853 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
21854 addFrameReference(BuildMI(*BB, MI, DL,
21855 TII->get(X86::FNSTCW16m)), CWFrameIdx);
21857 // Load the old value of the high byte of the control word...
21859 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
21860 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
21863 // Set the high part to be round to zero...
21864 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
21867 // Reload the modified control word now...
21868 addFrameReference(BuildMI(*BB, MI, DL,
21869 TII->get(X86::FLDCW16m)), CWFrameIdx);
21871 // Restore the memory image of control word to original value
21872 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
21875 // Get the X86 opcode to use.
21877 switch (MI->getOpcode()) {
21878 default: llvm_unreachable("illegal opcode!");
21879 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
21880 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
21881 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
21882 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
21883 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
21884 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
21885 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
21886 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
21887 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
21891 MachineOperand &Op = MI->getOperand(0);
21893 AM.BaseType = X86AddressMode::RegBase;
21894 AM.Base.Reg = Op.getReg();
21896 AM.BaseType = X86AddressMode::FrameIndexBase;
21897 AM.Base.FrameIndex = Op.getIndex();
21899 Op = MI->getOperand(1);
21901 AM.Scale = Op.getImm();
21902 Op = MI->getOperand(2);
21904 AM.IndexReg = Op.getImm();
21905 Op = MI->getOperand(3);
21906 if (Op.isGlobal()) {
21907 AM.GV = Op.getGlobal();
21909 AM.Disp = Op.getImm();
21911 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
21912 .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
21914 // Reload the original control word now.
21915 addFrameReference(BuildMI(*BB, MI, DL,
21916 TII->get(X86::FLDCW16m)), CWFrameIdx);
21918 MI->eraseFromParent(); // The pseudo instruction is gone now.
21921 // String/text processing lowering.
21922 case X86::PCMPISTRM128REG:
21923 case X86::VPCMPISTRM128REG:
21924 case X86::PCMPISTRM128MEM:
21925 case X86::VPCMPISTRM128MEM:
21926 case X86::PCMPESTRM128REG:
21927 case X86::VPCMPESTRM128REG:
21928 case X86::PCMPESTRM128MEM:
21929 case X86::VPCMPESTRM128MEM:
21930 assert(Subtarget->hasSSE42() &&
21931 "Target must have SSE4.2 or AVX features enabled");
21932 return EmitPCMPSTRM(MI, BB, Subtarget->getInstrInfo());
21934 // String/text processing lowering.
21935 case X86::PCMPISTRIREG:
21936 case X86::VPCMPISTRIREG:
21937 case X86::PCMPISTRIMEM:
21938 case X86::VPCMPISTRIMEM:
21939 case X86::PCMPESTRIREG:
21940 case X86::VPCMPESTRIREG:
21941 case X86::PCMPESTRIMEM:
21942 case X86::VPCMPESTRIMEM:
21943 assert(Subtarget->hasSSE42() &&
21944 "Target must have SSE4.2 or AVX features enabled");
21945 return EmitPCMPSTRI(MI, BB, Subtarget->getInstrInfo());
21947 // Thread synchronization.
21949 return EmitMonitor(MI, BB, Subtarget);
21953 return EmitXBegin(MI, BB, Subtarget->getInstrInfo());
21955 case X86::VASTART_SAVE_XMM_REGS:
21956 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
21958 case X86::VAARG_64:
21959 return EmitVAARG64WithCustomInserter(MI, BB);
21961 case X86::EH_SjLj_SetJmp32:
21962 case X86::EH_SjLj_SetJmp64:
21963 return emitEHSjLjSetJmp(MI, BB);
21965 case X86::EH_SjLj_LongJmp32:
21966 case X86::EH_SjLj_LongJmp64:
21967 return emitEHSjLjLongJmp(MI, BB);
21969 case TargetOpcode::STATEPOINT:
21970 // As an implementation detail, STATEPOINT shares the STACKMAP format at
21971 // this point in the process. We diverge later.
21972 return emitPatchPoint(MI, BB);
21974 case TargetOpcode::STACKMAP:
21975 case TargetOpcode::PATCHPOINT:
21976 return emitPatchPoint(MI, BB);
21978 case X86::VFMADDPDr213r:
21979 case X86::VFMADDPSr213r:
21980 case X86::VFMADDSDr213r:
21981 case X86::VFMADDSSr213r:
21982 case X86::VFMSUBPDr213r:
21983 case X86::VFMSUBPSr213r:
21984 case X86::VFMSUBSDr213r:
21985 case X86::VFMSUBSSr213r:
21986 case X86::VFNMADDPDr213r:
21987 case X86::VFNMADDPSr213r:
21988 case X86::VFNMADDSDr213r:
21989 case X86::VFNMADDSSr213r:
21990 case X86::VFNMSUBPDr213r:
21991 case X86::VFNMSUBPSr213r:
21992 case X86::VFNMSUBSDr213r:
21993 case X86::VFNMSUBSSr213r:
21994 case X86::VFMADDSUBPDr213r:
21995 case X86::VFMADDSUBPSr213r:
21996 case X86::VFMSUBADDPDr213r:
21997 case X86::VFMSUBADDPSr213r:
21998 case X86::VFMADDPDr213rY:
21999 case X86::VFMADDPSr213rY:
22000 case X86::VFMSUBPDr213rY:
22001 case X86::VFMSUBPSr213rY:
22002 case X86::VFNMADDPDr213rY:
22003 case X86::VFNMADDPSr213rY:
22004 case X86::VFNMSUBPDr213rY:
22005 case X86::VFNMSUBPSr213rY:
22006 case X86::VFMADDSUBPDr213rY:
22007 case X86::VFMADDSUBPSr213rY:
22008 case X86::VFMSUBADDPDr213rY:
22009 case X86::VFMSUBADDPSr213rY:
22010 return emitFMA3Instr(MI, BB);
22014 //===----------------------------------------------------------------------===//
22015 // X86 Optimization Hooks
22016 //===----------------------------------------------------------------------===//
22018 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
22021 const SelectionDAG &DAG,
22022 unsigned Depth) const {
22023 unsigned BitWidth = KnownZero.getBitWidth();
22024 unsigned Opc = Op.getOpcode();
22025 assert((Opc >= ISD::BUILTIN_OP_END ||
22026 Opc == ISD::INTRINSIC_WO_CHAIN ||
22027 Opc == ISD::INTRINSIC_W_CHAIN ||
22028 Opc == ISD::INTRINSIC_VOID) &&
22029 "Should use MaskedValueIsZero if you don't know whether Op"
22030 " is a target node!");
22032 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
22046 // These nodes' second result is a boolean.
22047 if (Op.getResNo() == 0)
22050 case X86ISD::SETCC:
22051 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
22053 case ISD::INTRINSIC_WO_CHAIN: {
22054 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
22055 unsigned NumLoBits = 0;
22058 case Intrinsic::x86_sse_movmsk_ps:
22059 case Intrinsic::x86_avx_movmsk_ps_256:
22060 case Intrinsic::x86_sse2_movmsk_pd:
22061 case Intrinsic::x86_avx_movmsk_pd_256:
22062 case Intrinsic::x86_mmx_pmovmskb:
22063 case Intrinsic::x86_sse2_pmovmskb_128:
22064 case Intrinsic::x86_avx2_pmovmskb: {
22065 // High bits of movmskp{s|d}, pmovmskb are known zero.
22067 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
22068 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break;
22069 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break;
22070 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break;
22071 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break;
22072 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break;
22073 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
22074 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
22076 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
22085 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
22087 const SelectionDAG &,
22088 unsigned Depth) const {
22089 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
22090 if (Op.getOpcode() == X86ISD::SETCC_CARRY)
22091 return Op.getValueType().getScalarType().getSizeInBits();
22097 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
22098 /// node is a GlobalAddress + offset.
22099 bool X86TargetLowering::isGAPlusOffset(SDNode *N,
22100 const GlobalValue* &GA,
22101 int64_t &Offset) const {
22102 if (N->getOpcode() == X86ISD::Wrapper) {
22103 if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
22104 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
22105 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
22109 return TargetLowering::isGAPlusOffset(N, GA, Offset);
22112 /// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the
22113 /// same as extracting the high 128-bit part of 256-bit vector and then
22114 /// inserting the result into the low part of a new 256-bit vector
22115 static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) {
22116 EVT VT = SVOp->getValueType(0);
22117 unsigned NumElems = VT.getVectorNumElements();
22119 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
22120 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j)
22121 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
22122 SVOp->getMaskElt(j) >= 0)
22128 /// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the
22129 /// same as extracting the low 128-bit part of 256-bit vector and then
22130 /// inserting the result into the high part of a new 256-bit vector
22131 static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) {
22132 EVT VT = SVOp->getValueType(0);
22133 unsigned NumElems = VT.getVectorNumElements();
22135 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22136 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j)
22137 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
22138 SVOp->getMaskElt(j) >= 0)
22144 /// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors.
22145 static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
22146 TargetLowering::DAGCombinerInfo &DCI,
22147 const X86Subtarget* Subtarget) {
22149 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
22150 SDValue V1 = SVOp->getOperand(0);
22151 SDValue V2 = SVOp->getOperand(1);
22152 EVT VT = SVOp->getValueType(0);
22153 unsigned NumElems = VT.getVectorNumElements();
22155 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
22156 V2.getOpcode() == ISD::CONCAT_VECTORS) {
22160 // V UNDEF BUILD_VECTOR UNDEF
22162 // CONCAT_VECTOR CONCAT_VECTOR
22165 // RESULT: V + zero extended
22167 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR ||
22168 V2.getOperand(1).getOpcode() != ISD::UNDEF ||
22169 V1.getOperand(1).getOpcode() != ISD::UNDEF)
22172 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()))
22175 // To match the shuffle mask, the first half of the mask should
22176 // be exactly the first vector, and all the rest a splat with the
22177 // first element of the second one.
22178 for (unsigned i = 0; i != NumElems/2; ++i)
22179 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) ||
22180 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems))
22183 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD.
22184 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) {
22185 if (Ld->hasNUsesOfValue(1, 0)) {
22186 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other);
22187 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
22189 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
22191 Ld->getPointerInfo(),
22192 Ld->getAlignment(),
22193 false/*isVolatile*/, true/*ReadMem*/,
22194 false/*WriteMem*/);
22196 // Make sure the newly-created LOAD is in the same position as Ld in
22197 // terms of dependency. We create a TokenFactor for Ld and ResNode,
22198 // and update uses of Ld's output chain to use the TokenFactor.
22199 if (Ld->hasAnyUseOfValue(1)) {
22200 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
22201 SDValue(Ld, 1), SDValue(ResNode.getNode(), 1));
22202 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
22203 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1),
22204 SDValue(ResNode.getNode(), 1));
22207 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode);
22211 // Emit a zeroed vector and insert the desired subvector on its
22213 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
22214 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
22215 return DCI.CombineTo(N, InsV);
22218 //===--------------------------------------------------------------------===//
22219 // Combine some shuffles into subvector extracts and inserts:
22222 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
22223 if (isShuffleHigh128VectorInsertLow(SVOp)) {
22224 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl);
22225 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl);
22226 return DCI.CombineTo(N, InsV);
22229 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22230 if (isShuffleLow128VectorInsertHigh(SVOp)) {
22231 SDValue V = Extract128BitVector(V1, 0, DAG, dl);
22232 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl);
22233 return DCI.CombineTo(N, InsV);
22239 /// \brief Combine an arbitrary chain of shuffles into a single instruction if
22242 /// This is the leaf of the recursive combinine below. When we have found some
22243 /// chain of single-use x86 shuffle instructions and accumulated the combined
22244 /// shuffle mask represented by them, this will try to pattern match that mask
22245 /// into either a single instruction if there is a special purpose instruction
22246 /// for this operation, or into a PSHUFB instruction which is a fully general
22247 /// instruction but should only be used to replace chains over a certain depth.
22248 static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
22249 int Depth, bool HasPSHUFB, SelectionDAG &DAG,
22250 TargetLowering::DAGCombinerInfo &DCI,
22251 const X86Subtarget *Subtarget) {
22252 assert(!Mask.empty() && "Cannot combine an empty shuffle mask!");
22254 // Find the operand that enters the chain. Note that multiple uses are OK
22255 // here, we're not going to remove the operand we find.
22256 SDValue Input = Op.getOperand(0);
22257 while (Input.getOpcode() == ISD::BITCAST)
22258 Input = Input.getOperand(0);
22260 MVT VT = Input.getSimpleValueType();
22261 MVT RootVT = Root.getSimpleValueType();
22264 // Just remove no-op shuffle masks.
22265 if (Mask.size() == 1) {
22266 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Input),
22271 // Use the float domain if the operand type is a floating point type.
22272 bool FloatDomain = VT.isFloatingPoint();
22274 // For floating point shuffles, we don't have free copies in the shuffle
22275 // instructions or the ability to load as part of the instruction, so
22276 // canonicalize their shuffles to UNPCK or MOV variants.
22278 // Note that even with AVX we prefer the PSHUFD form of shuffle for integer
22279 // vectors because it can have a load folded into it that UNPCK cannot. This
22280 // doesn't preclude something switching to the shorter encoding post-RA.
22282 if (Mask.equals(0, 0) || Mask.equals(1, 1)) {
22283 bool Lo = Mask.equals(0, 0);
22286 // Check if we have SSE3 which will let us use MOVDDUP. That instruction
22287 // is no slower than UNPCKLPD but has the option to fold the input operand
22288 // into even an unaligned memory load.
22289 if (Lo && Subtarget->hasSSE3()) {
22290 Shuffle = X86ISD::MOVDDUP;
22291 ShuffleVT = MVT::v2f64;
22293 // We have MOVLHPS and MOVHLPS throughout SSE and they encode smaller
22294 // than the UNPCK variants.
22295 Shuffle = Lo ? X86ISD::MOVLHPS : X86ISD::MOVHLPS;
22296 ShuffleVT = MVT::v4f32;
22298 if (Depth == 1 && Root->getOpcode() == Shuffle)
22299 return false; // Nothing to do!
22300 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22301 DCI.AddToWorklist(Op.getNode());
22302 if (Shuffle == X86ISD::MOVDDUP)
22303 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22305 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22306 DCI.AddToWorklist(Op.getNode());
22307 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22311 if (Subtarget->hasSSE3() &&
22312 (Mask.equals(0, 0, 2, 2) || Mask.equals(1, 1, 3, 3))) {
22313 bool Lo = Mask.equals(0, 0, 2, 2);
22314 unsigned Shuffle = Lo ? X86ISD::MOVSLDUP : X86ISD::MOVSHDUP;
22315 MVT ShuffleVT = MVT::v4f32;
22316 if (Depth == 1 && Root->getOpcode() == Shuffle)
22317 return false; // Nothing to do!
22318 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22319 DCI.AddToWorklist(Op.getNode());
22320 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22321 DCI.AddToWorklist(Op.getNode());
22322 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22326 if (Mask.equals(0, 0, 1, 1) || Mask.equals(2, 2, 3, 3)) {
22327 bool Lo = Mask.equals(0, 0, 1, 1);
22328 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22329 MVT ShuffleVT = MVT::v4f32;
22330 if (Depth == 1 && Root->getOpcode() == Shuffle)
22331 return false; // Nothing to do!
22332 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22333 DCI.AddToWorklist(Op.getNode());
22334 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22335 DCI.AddToWorklist(Op.getNode());
22336 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22342 // We always canonicalize the 8 x i16 and 16 x i8 shuffles into their UNPCK
22343 // variants as none of these have single-instruction variants that are
22344 // superior to the UNPCK formulation.
22345 if (!FloatDomain &&
22346 (Mask.equals(0, 0, 1, 1, 2, 2, 3, 3) ||
22347 Mask.equals(4, 4, 5, 5, 6, 6, 7, 7) ||
22348 Mask.equals(0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7) ||
22349 Mask.equals(8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
22351 bool Lo = Mask[0] == 0;
22352 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22353 if (Depth == 1 && Root->getOpcode() == Shuffle)
22354 return false; // Nothing to do!
22356 switch (Mask.size()) {
22358 ShuffleVT = MVT::v8i16;
22361 ShuffleVT = MVT::v16i8;
22364 llvm_unreachable("Impossible mask size!");
22366 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22367 DCI.AddToWorklist(Op.getNode());
22368 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22369 DCI.AddToWorklist(Op.getNode());
22370 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22375 // Don't try to re-form single instruction chains under any circumstances now
22376 // that we've done encoding canonicalization for them.
22380 // If we have 3 or more shuffle instructions or a chain involving PSHUFB, we
22381 // can replace them with a single PSHUFB instruction profitably. Intel's
22382 // manuals suggest only using PSHUFB if doing so replacing 5 instructions, but
22383 // in practice PSHUFB tends to be *very* fast so we're more aggressive.
22384 if ((Depth >= 3 || HasPSHUFB) && Subtarget->hasSSSE3()) {
22385 SmallVector<SDValue, 16> PSHUFBMask;
22386 assert(Mask.size() <= 16 && "Can't shuffle elements smaller than bytes!");
22387 int Ratio = 16 / Mask.size();
22388 for (unsigned i = 0; i < 16; ++i) {
22389 if (Mask[i / Ratio] == SM_SentinelUndef) {
22390 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
22393 int M = Mask[i / Ratio] != SM_SentinelZero
22394 ? Ratio * Mask[i / Ratio] + i % Ratio
22396 PSHUFBMask.push_back(DAG.getConstant(M, MVT::i8));
22398 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Input);
22399 DCI.AddToWorklist(Op.getNode());
22400 SDValue PSHUFBMaskOp =
22401 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, PSHUFBMask);
22402 DCI.AddToWorklist(PSHUFBMaskOp.getNode());
22403 Op = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, Op, PSHUFBMaskOp);
22404 DCI.AddToWorklist(Op.getNode());
22405 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22410 // Failed to find any combines.
22414 /// \brief Fully generic combining of x86 shuffle instructions.
22416 /// This should be the last combine run over the x86 shuffle instructions. Once
22417 /// they have been fully optimized, this will recursively consider all chains
22418 /// of single-use shuffle instructions, build a generic model of the cumulative
22419 /// shuffle operation, and check for simpler instructions which implement this
22420 /// operation. We use this primarily for two purposes:
22422 /// 1) Collapse generic shuffles to specialized single instructions when
22423 /// equivalent. In most cases, this is just an encoding size win, but
22424 /// sometimes we will collapse multiple generic shuffles into a single
22425 /// special-purpose shuffle.
22426 /// 2) Look for sequences of shuffle instructions with 3 or more total
22427 /// instructions, and replace them with the slightly more expensive SSSE3
22428 /// PSHUFB instruction if available. We do this as the last combining step
22429 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
22430 /// a suitable short sequence of other instructions. The PHUFB will either
22431 /// use a register or have to read from memory and so is slightly (but only
22432 /// slightly) more expensive than the other shuffle instructions.
22434 /// Because this is inherently a quadratic operation (for each shuffle in
22435 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
22436 /// This should never be an issue in practice as the shuffle lowering doesn't
22437 /// produce sequences of more than 8 instructions.
22439 /// FIXME: We will currently miss some cases where the redundant shuffling
22440 /// would simplify under the threshold for PSHUFB formation because of
22441 /// combine-ordering. To fix this, we should do the redundant instruction
22442 /// combining in this recursive walk.
22443 static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
22444 ArrayRef<int> RootMask,
22445 int Depth, bool HasPSHUFB,
22447 TargetLowering::DAGCombinerInfo &DCI,
22448 const X86Subtarget *Subtarget) {
22449 // Bound the depth of our recursive combine because this is ultimately
22450 // quadratic in nature.
22454 // Directly rip through bitcasts to find the underlying operand.
22455 while (Op.getOpcode() == ISD::BITCAST && Op.getOperand(0).hasOneUse())
22456 Op = Op.getOperand(0);
22458 MVT VT = Op.getSimpleValueType();
22459 if (!VT.isVector())
22460 return false; // Bail if we hit a non-vector.
22461 // FIXME: This routine should be taught about 256-bit shuffles, or a 256-bit
22462 // version should be added.
22463 if (VT.getSizeInBits() != 128)
22466 assert(Root.getSimpleValueType().isVector() &&
22467 "Shuffles operate on vector types!");
22468 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
22469 "Can only combine shuffles of the same vector register size.");
22471 if (!isTargetShuffle(Op.getOpcode()))
22473 SmallVector<int, 16> OpMask;
22475 bool HaveMask = getTargetShuffleMask(Op.getNode(), VT, OpMask, IsUnary);
22476 // We only can combine unary shuffles which we can decode the mask for.
22477 if (!HaveMask || !IsUnary)
22480 assert(VT.getVectorNumElements() == OpMask.size() &&
22481 "Different mask size from vector size!");
22482 assert(((RootMask.size() > OpMask.size() &&
22483 RootMask.size() % OpMask.size() == 0) ||
22484 (OpMask.size() > RootMask.size() &&
22485 OpMask.size() % RootMask.size() == 0) ||
22486 OpMask.size() == RootMask.size()) &&
22487 "The smaller number of elements must divide the larger.");
22488 int RootRatio = std::max<int>(1, OpMask.size() / RootMask.size());
22489 int OpRatio = std::max<int>(1, RootMask.size() / OpMask.size());
22490 assert(((RootRatio == 1 && OpRatio == 1) ||
22491 (RootRatio == 1) != (OpRatio == 1)) &&
22492 "Must not have a ratio for both incoming and op masks!");
22494 SmallVector<int, 16> Mask;
22495 Mask.reserve(std::max(OpMask.size(), RootMask.size()));
22497 // Merge this shuffle operation's mask into our accumulated mask. Note that
22498 // this shuffle's mask will be the first applied to the input, followed by the
22499 // root mask to get us all the way to the root value arrangement. The reason
22500 // for this order is that we are recursing up the operation chain.
22501 for (int i = 0, e = std::max(OpMask.size(), RootMask.size()); i < e; ++i) {
22502 int RootIdx = i / RootRatio;
22503 if (RootMask[RootIdx] < 0) {
22504 // This is a zero or undef lane, we're done.
22505 Mask.push_back(RootMask[RootIdx]);
22509 int RootMaskedIdx = RootMask[RootIdx] * RootRatio + i % RootRatio;
22510 int OpIdx = RootMaskedIdx / OpRatio;
22511 if (OpMask[OpIdx] < 0) {
22512 // The incoming lanes are zero or undef, it doesn't matter which ones we
22514 Mask.push_back(OpMask[OpIdx]);
22518 // Ok, we have non-zero lanes, map them through.
22519 Mask.push_back(OpMask[OpIdx] * OpRatio +
22520 RootMaskedIdx % OpRatio);
22523 // See if we can recurse into the operand to combine more things.
22524 switch (Op.getOpcode()) {
22525 case X86ISD::PSHUFB:
22527 case X86ISD::PSHUFD:
22528 case X86ISD::PSHUFHW:
22529 case X86ISD::PSHUFLW:
22530 if (Op.getOperand(0).hasOneUse() &&
22531 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22532 HasPSHUFB, DAG, DCI, Subtarget))
22536 case X86ISD::UNPCKL:
22537 case X86ISD::UNPCKH:
22538 assert(Op.getOperand(0) == Op.getOperand(1) && "We only combine unary shuffles!");
22539 // We can't check for single use, we have to check that this shuffle is the only user.
22540 if (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
22541 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22542 HasPSHUFB, DAG, DCI, Subtarget))
22547 // Minor canonicalization of the accumulated shuffle mask to make it easier
22548 // to match below. All this does is detect masks with squential pairs of
22549 // elements, and shrink them to the half-width mask. It does this in a loop
22550 // so it will reduce the size of the mask to the minimal width mask which
22551 // performs an equivalent shuffle.
22552 SmallVector<int, 16> WidenedMask;
22553 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
22554 Mask = std::move(WidenedMask);
22555 WidenedMask.clear();
22558 return combineX86ShuffleChain(Op, Root, Mask, Depth, HasPSHUFB, DAG, DCI,
22562 /// \brief Get the PSHUF-style mask from PSHUF node.
22564 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
22565 /// PSHUF-style masks that can be reused with such instructions.
22566 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
22567 SmallVector<int, 4> Mask;
22569 bool HaveMask = getTargetShuffleMask(N.getNode(), N.getSimpleValueType(), Mask, IsUnary);
22573 switch (N.getOpcode()) {
22574 case X86ISD::PSHUFD:
22576 case X86ISD::PSHUFLW:
22579 case X86ISD::PSHUFHW:
22580 Mask.erase(Mask.begin(), Mask.begin() + 4);
22581 for (int &M : Mask)
22585 llvm_unreachable("No valid shuffle instruction found!");
22589 /// \brief Search for a combinable shuffle across a chain ending in pshufd.
22591 /// We walk up the chain and look for a combinable shuffle, skipping over
22592 /// shuffles that we could hoist this shuffle's transformation past without
22593 /// altering anything.
22595 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
22597 TargetLowering::DAGCombinerInfo &DCI) {
22598 assert(N.getOpcode() == X86ISD::PSHUFD &&
22599 "Called with something other than an x86 128-bit half shuffle!");
22602 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
22603 // of the shuffles in the chain so that we can form a fresh chain to replace
22605 SmallVector<SDValue, 8> Chain;
22606 SDValue V = N.getOperand(0);
22607 for (; V.hasOneUse(); V = V.getOperand(0)) {
22608 switch (V.getOpcode()) {
22610 return SDValue(); // Nothing combined!
22613 // Skip bitcasts as we always know the type for the target specific
22617 case X86ISD::PSHUFD:
22618 // Found another dword shuffle.
22621 case X86ISD::PSHUFLW:
22622 // Check that the low words (being shuffled) are the identity in the
22623 // dword shuffle, and the high words are self-contained.
22624 if (Mask[0] != 0 || Mask[1] != 1 ||
22625 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
22628 Chain.push_back(V);
22631 case X86ISD::PSHUFHW:
22632 // Check that the high words (being shuffled) are the identity in the
22633 // dword shuffle, and the low words are self-contained.
22634 if (Mask[2] != 2 || Mask[3] != 3 ||
22635 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
22638 Chain.push_back(V);
22641 case X86ISD::UNPCKL:
22642 case X86ISD::UNPCKH:
22643 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
22644 // shuffle into a preceding word shuffle.
22645 if (V.getValueType() != MVT::v16i8 && V.getValueType() != MVT::v8i16)
22648 // Search for a half-shuffle which we can combine with.
22649 unsigned CombineOp =
22650 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
22651 if (V.getOperand(0) != V.getOperand(1) ||
22652 !V->isOnlyUserOf(V.getOperand(0).getNode()))
22654 Chain.push_back(V);
22655 V = V.getOperand(0);
22657 switch (V.getOpcode()) {
22659 return SDValue(); // Nothing to combine.
22661 case X86ISD::PSHUFLW:
22662 case X86ISD::PSHUFHW:
22663 if (V.getOpcode() == CombineOp)
22666 Chain.push_back(V);
22670 V = V.getOperand(0);
22674 } while (V.hasOneUse());
22677 // Break out of the loop if we break out of the switch.
22681 if (!V.hasOneUse())
22682 // We fell out of the loop without finding a viable combining instruction.
22685 // Merge this node's mask and our incoming mask.
22686 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22687 for (int &M : Mask)
22689 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
22690 getV4X86ShuffleImm8ForMask(Mask, DAG));
22692 // Rebuild the chain around this new shuffle.
22693 while (!Chain.empty()) {
22694 SDValue W = Chain.pop_back_val();
22696 if (V.getValueType() != W.getOperand(0).getValueType())
22697 V = DAG.getNode(ISD::BITCAST, DL, W.getOperand(0).getValueType(), V);
22699 switch (W.getOpcode()) {
22701 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
22703 case X86ISD::UNPCKL:
22704 case X86ISD::UNPCKH:
22705 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
22708 case X86ISD::PSHUFD:
22709 case X86ISD::PSHUFLW:
22710 case X86ISD::PSHUFHW:
22711 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
22715 if (V.getValueType() != N.getValueType())
22716 V = DAG.getNode(ISD::BITCAST, DL, N.getValueType(), V);
22718 // Return the new chain to replace N.
22722 /// \brief Search for a combinable shuffle across a chain ending in pshuflw or pshufhw.
22724 /// We walk up the chain, skipping shuffles of the other half and looking
22725 /// through shuffles which switch halves trying to find a shuffle of the same
22726 /// pair of dwords.
22727 static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
22729 TargetLowering::DAGCombinerInfo &DCI) {
22731 (N.getOpcode() == X86ISD::PSHUFLW || N.getOpcode() == X86ISD::PSHUFHW) &&
22732 "Called with something other than an x86 128-bit half shuffle!");
22734 unsigned CombineOpcode = N.getOpcode();
22736 // Walk up a single-use chain looking for a combinable shuffle.
22737 SDValue V = N.getOperand(0);
22738 for (; V.hasOneUse(); V = V.getOperand(0)) {
22739 switch (V.getOpcode()) {
22741 return false; // Nothing combined!
22744 // Skip bitcasts as we always know the type for the target specific
22748 case X86ISD::PSHUFLW:
22749 case X86ISD::PSHUFHW:
22750 if (V.getOpcode() == CombineOpcode)
22753 // Other-half shuffles are no-ops.
22756 // Break out of the loop if we break out of the switch.
22760 if (!V.hasOneUse())
22761 // We fell out of the loop without finding a viable combining instruction.
22764 // Combine away the bottom node as its shuffle will be accumulated into
22765 // a preceding shuffle.
22766 DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22768 // Record the old value.
22771 // Merge this node's mask and our incoming mask (adjusted to account for all
22772 // the pshufd instructions encountered).
22773 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22774 for (int &M : Mask)
22776 V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
22777 getV4X86ShuffleImm8ForMask(Mask, DAG));
22779 // Check that the shuffles didn't cancel each other out. If not, we need to
22780 // combine to the new one.
22782 // Replace the combinable shuffle with the combined one, updating all users
22783 // so that we re-evaluate the chain here.
22784 DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
22789 /// \brief Try to combine x86 target specific shuffles.
22790 static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
22791 TargetLowering::DAGCombinerInfo &DCI,
22792 const X86Subtarget *Subtarget) {
22794 MVT VT = N.getSimpleValueType();
22795 SmallVector<int, 4> Mask;
22797 switch (N.getOpcode()) {
22798 case X86ISD::PSHUFD:
22799 case X86ISD::PSHUFLW:
22800 case X86ISD::PSHUFHW:
22801 Mask = getPSHUFShuffleMask(N);
22802 assert(Mask.size() == 4);
22808 // Nuke no-op shuffles that show up after combining.
22809 if (isNoopShuffleMask(Mask))
22810 return DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22812 // Look for simplifications involving one or two shuffle instructions.
22813 SDValue V = N.getOperand(0);
22814 switch (N.getOpcode()) {
22817 case X86ISD::PSHUFLW:
22818 case X86ISD::PSHUFHW:
22819 assert(VT == MVT::v8i16);
22822 if (combineRedundantHalfShuffle(N, Mask, DAG, DCI))
22823 return SDValue(); // We combined away this shuffle, so we're done.
22825 // See if this reduces to a PSHUFD which is no more expensive and can
22826 // combine with more operations. Note that it has to at least flip the
22827 // dwords as otherwise it would have been removed as a no-op.
22828 if (Mask[0] == 2 && Mask[1] == 3 && Mask[2] == 0 && Mask[3] == 1) {
22829 int DMask[] = {0, 1, 2, 3};
22830 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
22831 DMask[DOffset + 0] = DOffset + 1;
22832 DMask[DOffset + 1] = DOffset + 0;
22833 V = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V);
22834 DCI.AddToWorklist(V.getNode());
22835 V = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V,
22836 getV4X86ShuffleImm8ForMask(DMask, DAG));
22837 DCI.AddToWorklist(V.getNode());
22838 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
22841 // Look for shuffle patterns which can be implemented as a single unpack.
22842 // FIXME: This doesn't handle the location of the PSHUFD generically, and
22843 // only works when we have a PSHUFD followed by two half-shuffles.
22844 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
22845 (V.getOpcode() == X86ISD::PSHUFLW ||
22846 V.getOpcode() == X86ISD::PSHUFHW) &&
22847 V.getOpcode() != N.getOpcode() &&
22849 SDValue D = V.getOperand(0);
22850 while (D.getOpcode() == ISD::BITCAST && D.hasOneUse())
22851 D = D.getOperand(0);
22852 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
22853 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22854 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
22855 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22856 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22858 for (int i = 0; i < 4; ++i) {
22859 WordMask[i + NOffset] = Mask[i] + NOffset;
22860 WordMask[i + VOffset] = VMask[i] + VOffset;
22862 // Map the word mask through the DWord mask.
22864 for (int i = 0; i < 8; ++i)
22865 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
22866 const int UnpackLoMask[] = {0, 0, 1, 1, 2, 2, 3, 3};
22867 const int UnpackHiMask[] = {4, 4, 5, 5, 6, 6, 7, 7};
22868 if (std::equal(std::begin(MappedMask), std::end(MappedMask),
22869 std::begin(UnpackLoMask)) ||
22870 std::equal(std::begin(MappedMask), std::end(MappedMask),
22871 std::begin(UnpackHiMask))) {
22872 // We can replace all three shuffles with an unpack.
22873 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, D.getOperand(0));
22874 DCI.AddToWorklist(V.getNode());
22875 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
22877 DL, MVT::v8i16, V, V);
22884 case X86ISD::PSHUFD:
22885 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG, DCI))
22894 /// \brief Try to combine a shuffle into a target-specific add-sub node.
22896 /// We combine this directly on the abstract vector shuffle nodes so it is
22897 /// easier to generically match. We also insert dummy vector shuffle nodes for
22898 /// the operands which explicitly discard the lanes which are unused by this
22899 /// operation to try to flow through the rest of the combiner the fact that
22900 /// they're unused.
22901 static SDValue combineShuffleToAddSub(SDNode *N, SelectionDAG &DAG) {
22903 EVT VT = N->getValueType(0);
22905 // We only handle target-independent shuffles.
22906 // FIXME: It would be easy and harmless to use the target shuffle mask
22907 // extraction tool to support more.
22908 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
22911 auto *SVN = cast<ShuffleVectorSDNode>(N);
22912 ArrayRef<int> Mask = SVN->getMask();
22913 SDValue V1 = N->getOperand(0);
22914 SDValue V2 = N->getOperand(1);
22916 // We require the first shuffle operand to be the SUB node, and the second to
22917 // be the ADD node.
22918 // FIXME: We should support the commuted patterns.
22919 if (V1->getOpcode() != ISD::FSUB || V2->getOpcode() != ISD::FADD)
22922 // If there are other uses of these operations we can't fold them.
22923 if (!V1->hasOneUse() || !V2->hasOneUse())
22926 // Ensure that both operations have the same operands. Note that we can
22927 // commute the FADD operands.
22928 SDValue LHS = V1->getOperand(0), RHS = V1->getOperand(1);
22929 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
22930 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
22933 // We're looking for blends between FADD and FSUB nodes. We insist on these
22934 // nodes being lined up in a specific expected pattern.
22935 if (!(isShuffleEquivalent(V1, V2, Mask, 0, 3) ||
22936 isShuffleEquivalent(V1, V2, Mask, 0, 5, 2, 7) ||
22937 isShuffleEquivalent(V1, V2, Mask, 0, 9, 2, 11, 4, 13, 6, 15)))
22940 // Only specific types are legal at this point, assert so we notice if and
22941 // when these change.
22942 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v8f32 ||
22943 VT == MVT::v4f64) &&
22944 "Unknown vector type encountered!");
22946 return DAG.getNode(X86ISD::ADDSUB, DL, VT, LHS, RHS);
22949 /// PerformShuffleCombine - Performs several different shuffle combines.
22950 static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
22951 TargetLowering::DAGCombinerInfo &DCI,
22952 const X86Subtarget *Subtarget) {
22954 SDValue N0 = N->getOperand(0);
22955 SDValue N1 = N->getOperand(1);
22956 EVT VT = N->getValueType(0);
22958 // Don't create instructions with illegal types after legalize types has run.
22959 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22960 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType()))
22963 // If we have legalized the vector types, look for blends of FADD and FSUB
22964 // nodes that we can fuse into an ADDSUB node.
22965 if (TLI.isTypeLegal(VT) && Subtarget->hasSSE3())
22966 if (SDValue AddSub = combineShuffleToAddSub(N, DAG))
22969 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode
22970 if (Subtarget->hasFp256() && VT.is256BitVector() &&
22971 N->getOpcode() == ISD::VECTOR_SHUFFLE)
22972 return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
22974 // During Type Legalization, when promoting illegal vector types,
22975 // the backend might introduce new shuffle dag nodes and bitcasts.
22977 // This code performs the following transformation:
22978 // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
22979 // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
22981 // We do this only if both the bitcast and the BINOP dag nodes have
22982 // one use. Also, perform this transformation only if the new binary
22983 // operation is legal. This is to avoid introducing dag nodes that
22984 // potentially need to be further expanded (or custom lowered) into a
22985 // less optimal sequence of dag nodes.
22986 if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
22987 N1.getOpcode() == ISD::UNDEF && N0.hasOneUse() &&
22988 N0.getOpcode() == ISD::BITCAST) {
22989 SDValue BC0 = N0.getOperand(0);
22990 EVT SVT = BC0.getValueType();
22991 unsigned Opcode = BC0.getOpcode();
22992 unsigned NumElts = VT.getVectorNumElements();
22994 if (BC0.hasOneUse() && SVT.isVector() &&
22995 SVT.getVectorNumElements() * 2 == NumElts &&
22996 TLI.isOperationLegal(Opcode, VT)) {
22997 bool CanFold = false;
23009 unsigned SVTNumElts = SVT.getVectorNumElements();
23010 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
23011 for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
23012 CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
23013 for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
23014 CanFold = SVOp->getMaskElt(i) < 0;
23017 SDValue BC00 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(0));
23018 SDValue BC01 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(1));
23019 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
23020 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, &SVOp->getMask()[0]);
23025 // Only handle 128 wide vector from here on.
23026 if (!VT.is128BitVector())
23029 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
23030 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
23031 // consecutive, non-overlapping, and in the right order.
23032 SmallVector<SDValue, 16> Elts;
23033 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
23034 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
23036 SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true);
23040 if (isTargetShuffle(N->getOpcode())) {
23042 PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget);
23043 if (Shuffle.getNode())
23046 // Try recursively combining arbitrary sequences of x86 shuffle
23047 // instructions into higher-order shuffles. We do this after combining
23048 // specific PSHUF instruction sequences into their minimal form so that we
23049 // can evaluate how many specialized shuffle instructions are involved in
23050 // a particular chain.
23051 SmallVector<int, 1> NonceMask; // Just a placeholder.
23052 NonceMask.push_back(0);
23053 if (combineX86ShufflesRecursively(SDValue(N, 0), SDValue(N, 0), NonceMask,
23054 /*Depth*/ 1, /*HasPSHUFB*/ false, DAG,
23056 return SDValue(); // This routine will use CombineTo to replace N.
23062 /// PerformTruncateCombine - Converts truncate operation to
23063 /// a sequence of vector shuffle operations.
23064 /// It is possible when we truncate 256-bit vector to 128-bit vector
23065 static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
23066 TargetLowering::DAGCombinerInfo &DCI,
23067 const X86Subtarget *Subtarget) {
23071 /// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target
23072 /// specific shuffle of a load can be folded into a single element load.
23073 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
23074 /// shuffles have been custom lowered so we need to handle those here.
23075 static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
23076 TargetLowering::DAGCombinerInfo &DCI) {
23077 if (DCI.isBeforeLegalizeOps())
23080 SDValue InVec = N->getOperand(0);
23081 SDValue EltNo = N->getOperand(1);
23083 if (!isa<ConstantSDNode>(EltNo))
23086 EVT OriginalVT = InVec.getValueType();
23088 if (InVec.getOpcode() == ISD::BITCAST) {
23089 // Don't duplicate a load with other uses.
23090 if (!InVec.hasOneUse())
23092 EVT BCVT = InVec.getOperand(0).getValueType();
23093 if (BCVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
23095 InVec = InVec.getOperand(0);
23098 EVT CurrentVT = InVec.getValueType();
23100 if (!isTargetShuffle(InVec.getOpcode()))
23103 // Don't duplicate a load with other uses.
23104 if (!InVec.hasOneUse())
23107 SmallVector<int, 16> ShuffleMask;
23109 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(),
23110 ShuffleMask, UnaryShuffle))
23113 // Select the input vector, guarding against out of range extract vector.
23114 unsigned NumElems = CurrentVT.getVectorNumElements();
23115 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
23116 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt];
23117 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0)
23118 : InVec.getOperand(1);
23120 // If inputs to shuffle are the same for both ops, then allow 2 uses
23121 unsigned AllowedUses = InVec.getNumOperands() > 1 &&
23122 InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
23124 if (LdNode.getOpcode() == ISD::BITCAST) {
23125 // Don't duplicate a load with other uses.
23126 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
23129 AllowedUses = 1; // only allow 1 load use if we have a bitcast
23130 LdNode = LdNode.getOperand(0);
23133 if (!ISD::isNormalLoad(LdNode.getNode()))
23136 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
23138 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
23141 EVT EltVT = N->getValueType(0);
23142 // If there's a bitcast before the shuffle, check if the load type and
23143 // alignment is valid.
23144 unsigned Align = LN0->getAlignment();
23145 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23146 unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
23147 EltVT.getTypeForEVT(*DAG.getContext()));
23149 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
23152 // All checks match so transform back to vector_shuffle so that DAG combiner
23153 // can finish the job
23156 // Create shuffle node taking into account the case that its a unary shuffle
23157 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT)
23158 : InVec.getOperand(1);
23159 Shuffle = DAG.getVectorShuffle(CurrentVT, dl,
23160 InVec.getOperand(0), Shuffle,
23162 Shuffle = DAG.getNode(ISD::BITCAST, dl, OriginalVT, Shuffle);
23163 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
23167 /// \brief Detect bitcasts between i32 to x86mmx low word. Since MMX types are
23168 /// special and don't usually play with other vector types, it's better to
23169 /// handle them early to be sure we emit efficient code by avoiding
23170 /// store-load conversions.
23171 static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG) {
23172 if (N->getValueType(0) != MVT::x86mmx ||
23173 N->getOperand(0)->getOpcode() != ISD::BUILD_VECTOR ||
23174 N->getOperand(0)->getValueType(0) != MVT::v2i32)
23177 SDValue V = N->getOperand(0);
23178 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V.getOperand(1));
23179 if (C && C->getZExtValue() == 0 && V.getOperand(0).getValueType() == MVT::i32)
23180 return DAG.getNode(X86ISD::MMX_MOVW2D, SDLoc(V.getOperand(0)),
23181 N->getValueType(0), V.getOperand(0));
23186 /// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
23187 /// generation and convert it from being a bunch of shuffles and extracts
23188 /// into a somewhat faster sequence. For i686, the best sequence is apparently
23189 /// storing the value and loading scalars back, while for x64 we should
23190 /// use 64-bit extracts and shifts.
23191 static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
23192 TargetLowering::DAGCombinerInfo &DCI) {
23193 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI);
23194 if (NewOp.getNode())
23197 SDValue InputVector = N->getOperand(0);
23199 // Detect mmx to i32 conversion through a v2i32 elt extract.
23200 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
23201 N->getValueType(0) == MVT::i32 &&
23202 InputVector.getValueType() == MVT::v2i32) {
23204 // The bitcast source is a direct mmx result.
23205 SDValue MMXSrc = InputVector.getNode()->getOperand(0);
23206 if (MMXSrc.getValueType() == MVT::x86mmx)
23207 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23208 N->getValueType(0),
23209 InputVector.getNode()->getOperand(0));
23211 // The mmx is indirect: (i64 extract_elt (v1i64 bitcast (x86mmx ...))).
23212 SDValue MMXSrcOp = MMXSrc.getOperand(0);
23213 if (MMXSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT && MMXSrc.hasOneUse() &&
23214 MMXSrc.getValueType() == MVT::i64 && MMXSrcOp.hasOneUse() &&
23215 MMXSrcOp.getOpcode() == ISD::BITCAST &&
23216 MMXSrcOp.getValueType() == MVT::v1i64 &&
23217 MMXSrcOp.getOperand(0).getValueType() == MVT::x86mmx)
23218 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23219 N->getValueType(0),
23220 MMXSrcOp.getOperand(0));
23223 // Only operate on vectors of 4 elements, where the alternative shuffling
23224 // gets to be more expensive.
23225 if (InputVector.getValueType() != MVT::v4i32)
23228 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
23229 // single use which is a sign-extend or zero-extend, and all elements are
23231 SmallVector<SDNode *, 4> Uses;
23232 unsigned ExtractedElements = 0;
23233 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
23234 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
23235 if (UI.getUse().getResNo() != InputVector.getResNo())
23238 SDNode *Extract = *UI;
23239 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
23242 if (Extract->getValueType(0) != MVT::i32)
23244 if (!Extract->hasOneUse())
23246 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
23247 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
23249 if (!isa<ConstantSDNode>(Extract->getOperand(1)))
23252 // Record which element was extracted.
23253 ExtractedElements |=
23254 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue();
23256 Uses.push_back(Extract);
23259 // If not all the elements were used, this may not be worthwhile.
23260 if (ExtractedElements != 15)
23263 // Ok, we've now decided to do the transformation.
23264 // If 64-bit shifts are legal, use the extract-shift sequence,
23265 // otherwise bounce the vector off the cache.
23266 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23268 SDLoc dl(InputVector);
23270 if (TLI.isOperationLegal(ISD::SRA, MVT::i64)) {
23271 SDValue Cst = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, InputVector);
23272 EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy();
23273 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23274 DAG.getConstant(0, VecIdxTy));
23275 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23276 DAG.getConstant(1, VecIdxTy));
23278 SDValue ShAmt = DAG.getConstant(32,
23279 DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64));
23280 Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
23281 Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23282 DAG.getNode(ISD::SRA, dl, MVT::i64, BottomHalf, ShAmt));
23283 Vals[2] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, TopHalf);
23284 Vals[3] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23285 DAG.getNode(ISD::SRA, dl, MVT::i64, TopHalf, ShAmt));
23287 // Store the value to a temporary stack slot.
23288 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
23289 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
23290 MachinePointerInfo(), false, false, 0);
23292 EVT ElementType = InputVector.getValueType().getVectorElementType();
23293 unsigned EltSize = ElementType.getSizeInBits() / 8;
23295 // Replace each use (extract) with a load of the appropriate element.
23296 for (unsigned i = 0; i < 4; ++i) {
23297 uint64_t Offset = EltSize * i;
23298 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
23300 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
23301 StackPtr, OffsetVal);
23303 // Load the scalar.
23304 Vals[i] = DAG.getLoad(ElementType, dl, Ch,
23305 ScalarAddr, MachinePointerInfo(),
23306 false, false, false, 0);
23311 // Replace the extracts
23312 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
23313 UE = Uses.end(); UI != UE; ++UI) {
23314 SDNode *Extract = *UI;
23316 SDValue Idx = Extract->getOperand(1);
23317 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
23318 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), Vals[IdxVal]);
23321 // The replacement was made in place; don't return anything.
23325 /// \brief Matches a VSELECT onto min/max or return 0 if the node doesn't match.
23326 static std::pair<unsigned, bool>
23327 matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, SDValue RHS,
23328 SelectionDAG &DAG, const X86Subtarget *Subtarget) {
23329 if (!VT.isVector())
23330 return std::make_pair(0, false);
23332 bool NeedSplit = false;
23333 switch (VT.getSimpleVT().SimpleTy) {
23334 default: return std::make_pair(0, false);
23337 if (!Subtarget->hasVLX())
23338 return std::make_pair(0, false);
23342 if (!Subtarget->hasBWI())
23343 return std::make_pair(0, false);
23347 if (!Subtarget->hasAVX512())
23348 return std::make_pair(0, false);
23353 if (!Subtarget->hasAVX2())
23355 if (!Subtarget->hasAVX())
23356 return std::make_pair(0, false);
23361 if (!Subtarget->hasSSE2())
23362 return std::make_pair(0, false);
23365 // SSE2 has only a small subset of the operations.
23366 bool hasUnsigned = Subtarget->hasSSE41() ||
23367 (Subtarget->hasSSE2() && VT == MVT::v16i8);
23368 bool hasSigned = Subtarget->hasSSE41() ||
23369 (Subtarget->hasSSE2() && VT == MVT::v8i16);
23371 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23374 // Check for x CC y ? x : y.
23375 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23376 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23381 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23384 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23387 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23390 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23392 // Check for x CC y ? y : x -- a min/max with reversed arms.
23393 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23394 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23399 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23402 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23405 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23408 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23412 return std::make_pair(Opc, NeedSplit);
23416 transformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
23417 const X86Subtarget *Subtarget) {
23419 SDValue Cond = N->getOperand(0);
23420 SDValue LHS = N->getOperand(1);
23421 SDValue RHS = N->getOperand(2);
23423 if (Cond.getOpcode() == ISD::SIGN_EXTEND) {
23424 SDValue CondSrc = Cond->getOperand(0);
23425 if (CondSrc->getOpcode() == ISD::SIGN_EXTEND_INREG)
23426 Cond = CondSrc->getOperand(0);
23429 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
23432 // A vselect where all conditions and data are constants can be optimized into
23433 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
23434 if (ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
23435 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
23438 unsigned MaskValue = 0;
23439 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
23442 MVT VT = N->getSimpleValueType(0);
23443 unsigned NumElems = VT.getVectorNumElements();
23444 SmallVector<int, 8> ShuffleMask(NumElems, -1);
23445 for (unsigned i = 0; i < NumElems; ++i) {
23446 // Be sure we emit undef where we can.
23447 if (Cond.getOperand(i)->getOpcode() == ISD::UNDEF)
23448 ShuffleMask[i] = -1;
23450 ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1);
23453 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23454 if (!TLI.isShuffleMaskLegal(ShuffleMask, VT))
23456 return DAG.getVectorShuffle(VT, dl, LHS, RHS, &ShuffleMask[0]);
23459 /// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
23461 static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
23462 TargetLowering::DAGCombinerInfo &DCI,
23463 const X86Subtarget *Subtarget) {
23465 SDValue Cond = N->getOperand(0);
23466 // Get the LHS/RHS of the select.
23467 SDValue LHS = N->getOperand(1);
23468 SDValue RHS = N->getOperand(2);
23469 EVT VT = LHS.getValueType();
23470 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23472 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
23473 // instructions match the semantics of the common C idiom x<y?x:y but not
23474 // x<=y?x:y, because of how they handle negative zero (which can be
23475 // ignored in unsafe-math mode).
23476 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
23477 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
23478 VT != MVT::f80 && (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
23479 (Subtarget->hasSSE2() ||
23480 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
23481 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23483 unsigned Opcode = 0;
23484 // Check for x CC y ? x : y.
23485 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23486 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23490 // Converting this to a min would handle NaNs incorrectly, and swapping
23491 // the operands would cause it to handle comparisons between positive
23492 // and negative zero incorrectly.
23493 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23494 if (!DAG.getTarget().Options.UnsafeFPMath &&
23495 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23497 std::swap(LHS, RHS);
23499 Opcode = X86ISD::FMIN;
23502 // Converting this to a min would handle comparisons between positive
23503 // and negative zero incorrectly.
23504 if (!DAG.getTarget().Options.UnsafeFPMath &&
23505 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23507 Opcode = X86ISD::FMIN;
23510 // Converting this to a min would handle both negative zeros and NaNs
23511 // incorrectly, but we can swap the operands to fix both.
23512 std::swap(LHS, RHS);
23516 Opcode = X86ISD::FMIN;
23520 // Converting this to a max would handle comparisons between positive
23521 // and negative zero incorrectly.
23522 if (!DAG.getTarget().Options.UnsafeFPMath &&
23523 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23525 Opcode = X86ISD::FMAX;
23528 // Converting this to a max would handle NaNs incorrectly, and swapping
23529 // the operands would cause it to handle comparisons between positive
23530 // and negative zero incorrectly.
23531 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23532 if (!DAG.getTarget().Options.UnsafeFPMath &&
23533 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23535 std::swap(LHS, RHS);
23537 Opcode = X86ISD::FMAX;
23540 // Converting this to a max would handle both negative zeros and NaNs
23541 // incorrectly, but we can swap the operands to fix both.
23542 std::swap(LHS, RHS);
23546 Opcode = X86ISD::FMAX;
23549 // Check for x CC y ? y : x -- a min/max with reversed arms.
23550 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23551 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23555 // Converting this to a min would handle comparisons between positive
23556 // and negative zero incorrectly, and swapping the operands would
23557 // cause it to handle NaNs incorrectly.
23558 if (!DAG.getTarget().Options.UnsafeFPMath &&
23559 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
23560 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23562 std::swap(LHS, RHS);
23564 Opcode = X86ISD::FMIN;
23567 // Converting this to a min would handle NaNs incorrectly.
23568 if (!DAG.getTarget().Options.UnsafeFPMath &&
23569 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
23571 Opcode = X86ISD::FMIN;
23574 // Converting this to a min would handle both negative zeros and NaNs
23575 // incorrectly, but we can swap the operands to fix both.
23576 std::swap(LHS, RHS);
23580 Opcode = X86ISD::FMIN;
23584 // Converting this to a max would handle NaNs incorrectly.
23585 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23587 Opcode = X86ISD::FMAX;
23590 // Converting this to a max would handle comparisons between positive
23591 // and negative zero incorrectly, and swapping the operands would
23592 // cause it to handle NaNs incorrectly.
23593 if (!DAG.getTarget().Options.UnsafeFPMath &&
23594 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
23595 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23597 std::swap(LHS, RHS);
23599 Opcode = X86ISD::FMAX;
23602 // Converting this to a max would handle both negative zeros and NaNs
23603 // incorrectly, but we can swap the operands to fix both.
23604 std::swap(LHS, RHS);
23608 Opcode = X86ISD::FMAX;
23614 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
23617 EVT CondVT = Cond.getValueType();
23618 if (Subtarget->hasAVX512() && VT.isVector() && CondVT.isVector() &&
23619 CondVT.getVectorElementType() == MVT::i1) {
23620 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
23621 // lowering on KNL. In this case we convert it to
23622 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
23623 // The same situation for all 128 and 256-bit vectors of i8 and i16.
23624 // Since SKX these selects have a proper lowering.
23625 EVT OpVT = LHS.getValueType();
23626 if ((OpVT.is128BitVector() || OpVT.is256BitVector()) &&
23627 (OpVT.getVectorElementType() == MVT::i8 ||
23628 OpVT.getVectorElementType() == MVT::i16) &&
23629 !(Subtarget->hasBWI() && Subtarget->hasVLX())) {
23630 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond);
23631 DCI.AddToWorklist(Cond.getNode());
23632 return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS);
23635 // If this is a select between two integer constants, try to do some
23637 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
23638 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS))
23639 // Don't do this for crazy integer types.
23640 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) {
23641 // If this is efficiently invertible, canonicalize the LHSC/RHSC values
23642 // so that TrueC (the true value) is larger than FalseC.
23643 bool NeedsCondInvert = false;
23645 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
23646 // Efficiently invertible.
23647 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
23648 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
23649 isa<ConstantSDNode>(Cond.getOperand(1))))) {
23650 NeedsCondInvert = true;
23651 std::swap(TrueC, FalseC);
23654 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
23655 if (FalseC->getAPIntValue() == 0 &&
23656 TrueC->getAPIntValue().isPowerOf2()) {
23657 if (NeedsCondInvert) // Invert the condition if needed.
23658 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23659 DAG.getConstant(1, Cond.getValueType()));
23661 // Zero extend the condition if needed.
23662 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
23664 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
23665 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
23666 DAG.getConstant(ShAmt, MVT::i8));
23669 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
23670 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
23671 if (NeedsCondInvert) // Invert the condition if needed.
23672 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23673 DAG.getConstant(1, Cond.getValueType()));
23675 // Zero extend the condition if needed.
23676 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
23677 FalseC->getValueType(0), Cond);
23678 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23679 SDValue(FalseC, 0));
23682 // Optimize cases that will turn into an LEA instruction. This requires
23683 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
23684 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
23685 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
23686 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
23688 bool isFastMultiplier = false;
23690 switch ((unsigned char)Diff) {
23692 case 1: // result = add base, cond
23693 case 2: // result = lea base( , cond*2)
23694 case 3: // result = lea base(cond, cond*2)
23695 case 4: // result = lea base( , cond*4)
23696 case 5: // result = lea base(cond, cond*4)
23697 case 8: // result = lea base( , cond*8)
23698 case 9: // result = lea base(cond, cond*8)
23699 isFastMultiplier = true;
23704 if (isFastMultiplier) {
23705 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
23706 if (NeedsCondInvert) // Invert the condition if needed.
23707 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23708 DAG.getConstant(1, Cond.getValueType()));
23710 // Zero extend the condition if needed.
23711 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
23713 // Scale the condition by the difference.
23715 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
23716 DAG.getConstant(Diff, Cond.getValueType()));
23718 // Add the base if non-zero.
23719 if (FalseC->getAPIntValue() != 0)
23720 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23721 SDValue(FalseC, 0));
23728 // Canonicalize max and min:
23729 // (x > y) ? x : y -> (x >= y) ? x : y
23730 // (x < y) ? x : y -> (x <= y) ? x : y
23731 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
23732 // the need for an extra compare
23733 // against zero. e.g.
23734 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
23736 // testl %edi, %edi
23738 // cmovgl %edi, %eax
23742 // cmovsl %eax, %edi
23743 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
23744 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23745 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23746 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23751 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
23752 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
23753 Cond.getOperand(0), Cond.getOperand(1), NewCC);
23754 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS);
23759 // Early exit check
23760 if (!TLI.isTypeLegal(VT))
23763 // Match VSELECTs into subs with unsigned saturation.
23764 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
23765 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
23766 ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
23767 (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
23768 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23770 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
23771 // left side invert the predicate to simplify logic below.
23773 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
23775 CC = ISD::getSetCCInverse(CC, true);
23776 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
23780 if (Other.getNode() && Other->getNumOperands() == 2 &&
23781 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) {
23782 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
23783 SDValue CondRHS = Cond->getOperand(1);
23785 // Look for a general sub with unsigned saturation first.
23786 // x >= y ? x-y : 0 --> subus x, y
23787 // x > y ? x-y : 0 --> subus x, y
23788 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
23789 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS))
23790 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS);
23792 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS))
23793 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
23794 if (auto *CondRHSBV = dyn_cast<BuildVectorSDNode>(CondRHS))
23795 if (auto *CondRHSConst = CondRHSBV->getConstantSplatNode())
23796 // If the RHS is a constant we have to reverse the const
23797 // canonicalization.
23798 // x > C-1 ? x+-C : 0 --> subus x, C
23799 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
23800 CondRHSConst->getAPIntValue() ==
23801 (-OpRHSConst->getAPIntValue() - 1))
23802 return DAG.getNode(
23803 X86ISD::SUBUS, DL, VT, OpLHS,
23804 DAG.getConstant(-OpRHSConst->getAPIntValue(), VT));
23806 // Another special case: If C was a sign bit, the sub has been
23807 // canonicalized into a xor.
23808 // FIXME: Would it be better to use computeKnownBits to determine
23809 // whether it's safe to decanonicalize the xor?
23810 // x s< 0 ? x^C : 0 --> subus x, C
23811 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
23812 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
23813 OpRHSConst->getAPIntValue().isSignBit())
23814 // Note that we have to rebuild the RHS constant here to ensure we
23815 // don't rely on particular values of undef lanes.
23816 return DAG.getNode(
23817 X86ISD::SUBUS, DL, VT, OpLHS,
23818 DAG.getConstant(OpRHSConst->getAPIntValue(), VT));
23823 // Try to match a min/max vector operation.
23824 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC) {
23825 std::pair<unsigned, bool> ret = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget);
23826 unsigned Opc = ret.first;
23827 bool NeedSplit = ret.second;
23829 if (Opc && NeedSplit) {
23830 unsigned NumElems = VT.getVectorNumElements();
23831 // Extract the LHS vectors
23832 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, DL);
23833 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, DL);
23835 // Extract the RHS vectors
23836 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, DL);
23837 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, DL);
23839 // Create min/max for each subvector
23840 LHS = DAG.getNode(Opc, DL, LHS1.getValueType(), LHS1, RHS1);
23841 RHS = DAG.getNode(Opc, DL, LHS2.getValueType(), LHS2, RHS2);
23843 // Merge the result
23844 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS, RHS);
23846 return DAG.getNode(Opc, DL, VT, LHS, RHS);
23849 // Simplify vector selection if condition value type matches vselect
23851 if (N->getOpcode() == ISD::VSELECT && CondVT == VT) {
23852 assert(Cond.getValueType().isVector() &&
23853 "vector select expects a vector selector!");
23855 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
23856 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
23858 // Try invert the condition if true value is not all 1s and false value
23860 if (!TValIsAllOnes && !FValIsAllZeros &&
23861 // Check if the selector will be produced by CMPP*/PCMP*
23862 Cond.getOpcode() == ISD::SETCC &&
23863 // Check if SETCC has already been promoted
23864 TLI.getSetCCResultType(*DAG.getContext(), VT) == CondVT) {
23865 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
23866 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
23868 if (TValIsAllZeros || FValIsAllOnes) {
23869 SDValue CC = Cond.getOperand(2);
23870 ISD::CondCode NewCC =
23871 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
23872 Cond.getOperand(0).getValueType().isInteger());
23873 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), NewCC);
23874 std::swap(LHS, RHS);
23875 TValIsAllOnes = FValIsAllOnes;
23876 FValIsAllZeros = TValIsAllZeros;
23880 if (TValIsAllOnes || FValIsAllZeros) {
23883 if (TValIsAllOnes && FValIsAllZeros)
23885 else if (TValIsAllOnes)
23886 Ret = DAG.getNode(ISD::OR, DL, CondVT, Cond,
23887 DAG.getNode(ISD::BITCAST, DL, CondVT, RHS));
23888 else if (FValIsAllZeros)
23889 Ret = DAG.getNode(ISD::AND, DL, CondVT, Cond,
23890 DAG.getNode(ISD::BITCAST, DL, CondVT, LHS));
23892 return DAG.getNode(ISD::BITCAST, DL, VT, Ret);
23896 // If we know that this node is legal then we know that it is going to be
23897 // matched by one of the SSE/AVX BLEND instructions. These instructions only
23898 // depend on the highest bit in each word. Try to use SimplifyDemandedBits
23899 // to simplify previous instructions.
23900 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
23901 !DCI.isBeforeLegalize() &&
23902 // We explicitly check against SSE4.1, v8i16 and v16i16 because, although
23903 // vselect nodes may be marked as Custom, they might only be legal when
23904 // Cond is a build_vector of constants. This will be taken care in
23905 // a later condition.
23906 (TLI.isOperationLegalOrCustom(ISD::VSELECT, VT) &&
23907 Subtarget->hasSSE41() && VT != MVT::v16i16 && VT != MVT::v8i16) &&
23908 // Don't optimize vector of constants. Those are handled by
23909 // the generic code and all the bits must be properly set for
23910 // the generic optimizer.
23911 !ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
23912 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits();
23914 // Don't optimize vector selects that map to mask-registers.
23918 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
23919 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
23921 APInt KnownZero, KnownOne;
23922 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
23923 DCI.isBeforeLegalizeOps());
23924 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) ||
23925 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne,
23927 // If we changed the computation somewhere in the DAG, this change
23928 // will affect all users of Cond.
23929 // Make sure it is fine and update all the nodes so that we do not
23930 // use the generic VSELECT anymore. Otherwise, we may perform
23931 // wrong optimizations as we messed up with the actual expectation
23932 // for the vector boolean values.
23933 if (Cond != TLO.Old) {
23934 // Check all uses of that condition operand to check whether it will be
23935 // consumed by non-BLEND instructions, which may depend on all bits are
23937 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23939 if (I->getOpcode() != ISD::VSELECT)
23940 // TODO: Add other opcodes eventually lowered into BLEND.
23943 // Update all the users of the condition, before committing the change,
23944 // so that the VSELECT optimizations that expect the correct vector
23945 // boolean value will not be triggered.
23946 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23948 DAG.ReplaceAllUsesOfValueWith(
23950 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(*I), I->getValueType(0),
23951 Cond, I->getOperand(1), I->getOperand(2)));
23952 DCI.CommitTargetLoweringOpt(TLO);
23955 // At this point, only Cond is changed. Change the condition
23956 // just for N to keep the opportunity to optimize all other
23957 // users their own way.
23958 DAG.ReplaceAllUsesOfValueWith(
23960 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(N), N->getValueType(0),
23961 TLO.New, N->getOperand(1), N->getOperand(2)));
23966 // We should generate an X86ISD::BLENDI from a vselect if its argument
23967 // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
23968 // constants. This specific pattern gets generated when we split a
23969 // selector for a 512 bit vector in a machine without AVX512 (but with
23970 // 256-bit vectors), during legalization:
23972 // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
23974 // Iff we find this pattern and the build_vectors are built from
23975 // constants, we translate the vselect into a shuffle_vector that we
23976 // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
23977 if ((N->getOpcode() == ISD::VSELECT ||
23978 N->getOpcode() == X86ISD::SHRUNKBLEND) &&
23979 !DCI.isBeforeLegalize()) {
23980 SDValue Shuffle = transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
23981 if (Shuffle.getNode())
23988 // Check whether a boolean test is testing a boolean value generated by
23989 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
23992 // Simplify the following patterns:
23993 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
23994 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
23995 // to (Op EFLAGS Cond)
23997 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
23998 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
23999 // to (Op EFLAGS !Cond)
24001 // where Op could be BRCOND or CMOV.
24003 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
24004 // Quit if not CMP and SUB with its value result used.
24005 if (Cmp.getOpcode() != X86ISD::CMP &&
24006 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0)))
24009 // Quit if not used as a boolean value.
24010 if (CC != X86::COND_E && CC != X86::COND_NE)
24013 // Check CMP operands. One of them should be 0 or 1 and the other should be
24014 // an SetCC or extended from it.
24015 SDValue Op1 = Cmp.getOperand(0);
24016 SDValue Op2 = Cmp.getOperand(1);
24019 const ConstantSDNode* C = nullptr;
24020 bool needOppositeCond = (CC == X86::COND_E);
24021 bool checkAgainstTrue = false; // Is it a comparison against 1?
24023 if ((C = dyn_cast<ConstantSDNode>(Op1)))
24025 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
24027 else // Quit if all operands are not constants.
24030 if (C->getZExtValue() == 1) {
24031 needOppositeCond = !needOppositeCond;
24032 checkAgainstTrue = true;
24033 } else if (C->getZExtValue() != 0)
24034 // Quit if the constant is neither 0 or 1.
24037 bool truncatedToBoolWithAnd = false;
24038 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
24039 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
24040 SetCC.getOpcode() == ISD::TRUNCATE ||
24041 SetCC.getOpcode() == ISD::AND) {
24042 if (SetCC.getOpcode() == ISD::AND) {
24044 ConstantSDNode *CS;
24045 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(0))) &&
24046 CS->getZExtValue() == 1)
24048 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(1))) &&
24049 CS->getZExtValue() == 1)
24053 SetCC = SetCC.getOperand(OpIdx);
24054 truncatedToBoolWithAnd = true;
24056 SetCC = SetCC.getOperand(0);
24059 switch (SetCC.getOpcode()) {
24060 case X86ISD::SETCC_CARRY:
24061 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
24062 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
24063 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
24064 // truncated to i1 using 'and'.
24065 if (checkAgainstTrue && !truncatedToBoolWithAnd)
24067 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
24068 "Invalid use of SETCC_CARRY!");
24070 case X86ISD::SETCC:
24071 // Set the condition code or opposite one if necessary.
24072 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
24073 if (needOppositeCond)
24074 CC = X86::GetOppositeBranchCondition(CC);
24075 return SetCC.getOperand(1);
24076 case X86ISD::CMOV: {
24077 // Check whether false/true value has canonical one, i.e. 0 or 1.
24078 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
24079 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
24080 // Quit if true value is not a constant.
24083 // Quit if false value is not a constant.
24085 SDValue Op = SetCC.getOperand(0);
24086 // Skip 'zext' or 'trunc' node.
24087 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
24088 Op.getOpcode() == ISD::TRUNCATE)
24089 Op = Op.getOperand(0);
24090 // A special case for rdrand/rdseed, where 0 is set if false cond is
24092 if ((Op.getOpcode() != X86ISD::RDRAND &&
24093 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
24096 // Quit if false value is not the constant 0 or 1.
24097 bool FValIsFalse = true;
24098 if (FVal && FVal->getZExtValue() != 0) {
24099 if (FVal->getZExtValue() != 1)
24101 // If FVal is 1, opposite cond is needed.
24102 needOppositeCond = !needOppositeCond;
24103 FValIsFalse = false;
24105 // Quit if TVal is not the constant opposite of FVal.
24106 if (FValIsFalse && TVal->getZExtValue() != 1)
24108 if (!FValIsFalse && TVal->getZExtValue() != 0)
24110 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
24111 if (needOppositeCond)
24112 CC = X86::GetOppositeBranchCondition(CC);
24113 return SetCC.getOperand(3);
24120 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
24121 static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
24122 TargetLowering::DAGCombinerInfo &DCI,
24123 const X86Subtarget *Subtarget) {
24126 // If the flag operand isn't dead, don't touch this CMOV.
24127 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
24130 SDValue FalseOp = N->getOperand(0);
24131 SDValue TrueOp = N->getOperand(1);
24132 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
24133 SDValue Cond = N->getOperand(3);
24135 if (CC == X86::COND_E || CC == X86::COND_NE) {
24136 switch (Cond.getOpcode()) {
24140 // If operand of BSR / BSF are proven never zero, then ZF cannot be set.
24141 if (DAG.isKnownNeverZero(Cond.getOperand(0)))
24142 return (CC == X86::COND_E) ? FalseOp : TrueOp;
24148 Flags = checkBoolTestSetCCCombine(Cond, CC);
24149 if (Flags.getNode() &&
24150 // Extra check as FCMOV only supports a subset of X86 cond.
24151 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
24152 SDValue Ops[] = { FalseOp, TrueOp,
24153 DAG.getConstant(CC, MVT::i8), Flags };
24154 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
24157 // If this is a select between two integer constants, try to do some
24158 // optimizations. Note that the operands are ordered the opposite of SELECT
24160 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
24161 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
24162 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
24163 // larger than FalseC (the false value).
24164 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
24165 CC = X86::GetOppositeBranchCondition(CC);
24166 std::swap(TrueC, FalseC);
24167 std::swap(TrueOp, FalseOp);
24170 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
24171 // This is efficient for any integer data type (including i8/i16) and
24173 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
24174 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24175 DAG.getConstant(CC, MVT::i8), Cond);
24177 // Zero extend the condition if needed.
24178 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
24180 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
24181 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
24182 DAG.getConstant(ShAmt, MVT::i8));
24183 if (N->getNumValues() == 2) // Dead flag value?
24184 return DCI.CombineTo(N, Cond, SDValue());
24188 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
24189 // for any integer data type, including i8/i16.
24190 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
24191 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24192 DAG.getConstant(CC, MVT::i8), Cond);
24194 // Zero extend the condition if needed.
24195 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
24196 FalseC->getValueType(0), Cond);
24197 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24198 SDValue(FalseC, 0));
24200 if (N->getNumValues() == 2) // Dead flag value?
24201 return DCI.CombineTo(N, Cond, SDValue());
24205 // Optimize cases that will turn into an LEA instruction. This requires
24206 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
24207 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
24208 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
24209 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
24211 bool isFastMultiplier = false;
24213 switch ((unsigned char)Diff) {
24215 case 1: // result = add base, cond
24216 case 2: // result = lea base( , cond*2)
24217 case 3: // result = lea base(cond, cond*2)
24218 case 4: // result = lea base( , cond*4)
24219 case 5: // result = lea base(cond, cond*4)
24220 case 8: // result = lea base( , cond*8)
24221 case 9: // result = lea base(cond, cond*8)
24222 isFastMultiplier = true;
24227 if (isFastMultiplier) {
24228 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
24229 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24230 DAG.getConstant(CC, MVT::i8), Cond);
24231 // Zero extend the condition if needed.
24232 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
24234 // Scale the condition by the difference.
24236 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
24237 DAG.getConstant(Diff, Cond.getValueType()));
24239 // Add the base if non-zero.
24240 if (FalseC->getAPIntValue() != 0)
24241 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24242 SDValue(FalseC, 0));
24243 if (N->getNumValues() == 2) // Dead flag value?
24244 return DCI.CombineTo(N, Cond, SDValue());
24251 // Handle these cases:
24252 // (select (x != c), e, c) -> select (x != c), e, x),
24253 // (select (x == c), c, e) -> select (x == c), x, e)
24254 // where the c is an integer constant, and the "select" is the combination
24255 // of CMOV and CMP.
24257 // The rationale for this change is that the conditional-move from a constant
24258 // needs two instructions, however, conditional-move from a register needs
24259 // only one instruction.
24261 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
24262 // some instruction-combining opportunities. This opt needs to be
24263 // postponed as late as possible.
24265 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
24266 // the DCI.xxxx conditions are provided to postpone the optimization as
24267 // late as possible.
24269 ConstantSDNode *CmpAgainst = nullptr;
24270 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
24271 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
24272 !isa<ConstantSDNode>(Cond.getOperand(0))) {
24274 if (CC == X86::COND_NE &&
24275 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
24276 CC = X86::GetOppositeBranchCondition(CC);
24277 std::swap(TrueOp, FalseOp);
24280 if (CC == X86::COND_E &&
24281 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
24282 SDValue Ops[] = { FalseOp, Cond.getOperand(0),
24283 DAG.getConstant(CC, MVT::i8), Cond };
24284 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops);
24292 static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
24293 const X86Subtarget *Subtarget) {
24294 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
24296 default: return SDValue();
24297 // SSE/AVX/AVX2 blend intrinsics.
24298 case Intrinsic::x86_avx2_pblendvb:
24299 case Intrinsic::x86_avx2_pblendw:
24300 case Intrinsic::x86_avx2_pblendd_128:
24301 case Intrinsic::x86_avx2_pblendd_256:
24302 // Don't try to simplify this intrinsic if we don't have AVX2.
24303 if (!Subtarget->hasAVX2())
24306 case Intrinsic::x86_avx_blend_pd_256:
24307 case Intrinsic::x86_avx_blend_ps_256:
24308 case Intrinsic::x86_avx_blendv_pd_256:
24309 case Intrinsic::x86_avx_blendv_ps_256:
24310 // Don't try to simplify this intrinsic if we don't have AVX.
24311 if (!Subtarget->hasAVX())
24314 case Intrinsic::x86_sse41_pblendw:
24315 case Intrinsic::x86_sse41_blendpd:
24316 case Intrinsic::x86_sse41_blendps:
24317 case Intrinsic::x86_sse41_blendvps:
24318 case Intrinsic::x86_sse41_blendvpd:
24319 case Intrinsic::x86_sse41_pblendvb: {
24320 SDValue Op0 = N->getOperand(1);
24321 SDValue Op1 = N->getOperand(2);
24322 SDValue Mask = N->getOperand(3);
24324 // Don't try to simplify this intrinsic if we don't have SSE4.1.
24325 if (!Subtarget->hasSSE41())
24328 // fold (blend A, A, Mask) -> A
24331 // fold (blend A, B, allZeros) -> A
24332 if (ISD::isBuildVectorAllZeros(Mask.getNode()))
24334 // fold (blend A, B, allOnes) -> B
24335 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
24338 // Simplify the case where the mask is a constant i32 value.
24339 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Mask)) {
24340 if (C->isNullValue())
24342 if (C->isAllOnesValue())
24349 // Packed SSE2/AVX2 arithmetic shift immediate intrinsics.
24350 case Intrinsic::x86_sse2_psrai_w:
24351 case Intrinsic::x86_sse2_psrai_d:
24352 case Intrinsic::x86_avx2_psrai_w:
24353 case Intrinsic::x86_avx2_psrai_d:
24354 case Intrinsic::x86_sse2_psra_w:
24355 case Intrinsic::x86_sse2_psra_d:
24356 case Intrinsic::x86_avx2_psra_w:
24357 case Intrinsic::x86_avx2_psra_d: {
24358 SDValue Op0 = N->getOperand(1);
24359 SDValue Op1 = N->getOperand(2);
24360 EVT VT = Op0.getValueType();
24361 assert(VT.isVector() && "Expected a vector type!");
24363 if (isa<BuildVectorSDNode>(Op1))
24364 Op1 = Op1.getOperand(0);
24366 if (!isa<ConstantSDNode>(Op1))
24369 EVT SVT = VT.getVectorElementType();
24370 unsigned SVTBits = SVT.getSizeInBits();
24372 ConstantSDNode *CND = cast<ConstantSDNode>(Op1);
24373 const APInt &C = APInt(SVTBits, CND->getAPIntValue().getZExtValue());
24374 uint64_t ShAmt = C.getZExtValue();
24376 // Don't try to convert this shift into a ISD::SRA if the shift
24377 // count is bigger than or equal to the element size.
24378 if (ShAmt >= SVTBits)
24381 // Trivial case: if the shift count is zero, then fold this
24382 // into the first operand.
24386 // Replace this packed shift intrinsic with a target independent
24388 SDValue Splat = DAG.getConstant(C, VT);
24389 return DAG.getNode(ISD::SRA, SDLoc(N), VT, Op0, Splat);
24394 /// PerformMulCombine - Optimize a single multiply with constant into two
24395 /// in order to implement it with two cheaper instructions, e.g.
24396 /// LEA + SHL, LEA + LEA.
24397 static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
24398 TargetLowering::DAGCombinerInfo &DCI) {
24399 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
24402 EVT VT = N->getValueType(0);
24403 if (VT != MVT::i64 && VT != MVT::i32)
24406 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
24409 uint64_t MulAmt = C->getZExtValue();
24410 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
24413 uint64_t MulAmt1 = 0;
24414 uint64_t MulAmt2 = 0;
24415 if ((MulAmt % 9) == 0) {
24417 MulAmt2 = MulAmt / 9;
24418 } else if ((MulAmt % 5) == 0) {
24420 MulAmt2 = MulAmt / 5;
24421 } else if ((MulAmt % 3) == 0) {
24423 MulAmt2 = MulAmt / 3;
24426 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
24429 if (isPowerOf2_64(MulAmt2) &&
24430 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
24431 // If second multiplifer is pow2, issue it first. We want the multiply by
24432 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
24434 std::swap(MulAmt1, MulAmt2);
24437 if (isPowerOf2_64(MulAmt1))
24438 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
24439 DAG.getConstant(Log2_64(MulAmt1), MVT::i8));
24441 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
24442 DAG.getConstant(MulAmt1, VT));
24444 if (isPowerOf2_64(MulAmt2))
24445 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
24446 DAG.getConstant(Log2_64(MulAmt2), MVT::i8));
24448 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
24449 DAG.getConstant(MulAmt2, VT));
24451 // Do not add new nodes to DAG combiner worklist.
24452 DCI.CombineTo(N, NewMul, false);
24457 static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
24458 SDValue N0 = N->getOperand(0);
24459 SDValue N1 = N->getOperand(1);
24460 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
24461 EVT VT = N0.getValueType();
24463 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
24464 // since the result of setcc_c is all zero's or all ones.
24465 if (VT.isInteger() && !VT.isVector() &&
24466 N1C && N0.getOpcode() == ISD::AND &&
24467 N0.getOperand(1).getOpcode() == ISD::Constant) {
24468 SDValue N00 = N0.getOperand(0);
24469 if (N00.getOpcode() == X86ISD::SETCC_CARRY ||
24470 ((N00.getOpcode() == ISD::ANY_EXTEND ||
24471 N00.getOpcode() == ISD::ZERO_EXTEND) &&
24472 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) {
24473 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
24474 APInt ShAmt = N1C->getAPIntValue();
24475 Mask = Mask.shl(ShAmt);
24477 return DAG.getNode(ISD::AND, SDLoc(N), VT,
24478 N00, DAG.getConstant(Mask, VT));
24482 // Hardware support for vector shifts is sparse which makes us scalarize the
24483 // vector operations in many cases. Also, on sandybridge ADD is faster than
24485 // (shl V, 1) -> add V,V
24486 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
24487 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
24488 assert(N0.getValueType().isVector() && "Invalid vector shift type");
24489 // We shift all of the values by one. In many cases we do not have
24490 // hardware support for this operation. This is better expressed as an ADD
24492 if (N1SplatC->getZExtValue() == 1)
24493 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
24499 /// \brief Returns a vector of 0s if the node in input is a vector logical
24500 /// shift by a constant amount which is known to be bigger than or equal
24501 /// to the vector element size in bits.
24502 static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
24503 const X86Subtarget *Subtarget) {
24504 EVT VT = N->getValueType(0);
24506 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
24507 (!Subtarget->hasInt256() ||
24508 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
24511 SDValue Amt = N->getOperand(1);
24513 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
24514 if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
24515 APInt ShiftAmt = AmtSplat->getAPIntValue();
24516 unsigned MaxAmount = VT.getVectorElementType().getSizeInBits();
24518 // SSE2/AVX2 logical shifts always return a vector of 0s
24519 // if the shift amount is bigger than or equal to
24520 // the element size. The constant shift amount will be
24521 // encoded as a 8-bit immediate.
24522 if (ShiftAmt.trunc(8).uge(MaxAmount))
24523 return getZeroVector(VT, Subtarget, DAG, DL);
24529 /// PerformShiftCombine - Combine shifts.
24530 static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
24531 TargetLowering::DAGCombinerInfo &DCI,
24532 const X86Subtarget *Subtarget) {
24533 if (N->getOpcode() == ISD::SHL) {
24534 SDValue V = PerformSHLCombine(N, DAG);
24535 if (V.getNode()) return V;
24538 if (N->getOpcode() != ISD::SRA) {
24539 // Try to fold this logical shift into a zero vector.
24540 SDValue V = performShiftToAllZeros(N, DAG, Subtarget);
24541 if (V.getNode()) return V;
24547 // CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..))
24548 // where both setccs reference the same FP CMP, and rewrite for CMPEQSS
24549 // and friends. Likewise for OR -> CMPNEQSS.
24550 static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
24551 TargetLowering::DAGCombinerInfo &DCI,
24552 const X86Subtarget *Subtarget) {
24555 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
24556 // we're requiring SSE2 for both.
24557 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
24558 SDValue N0 = N->getOperand(0);
24559 SDValue N1 = N->getOperand(1);
24560 SDValue CMP0 = N0->getOperand(1);
24561 SDValue CMP1 = N1->getOperand(1);
24564 // The SETCCs should both refer to the same CMP.
24565 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
24568 SDValue CMP00 = CMP0->getOperand(0);
24569 SDValue CMP01 = CMP0->getOperand(1);
24570 EVT VT = CMP00.getValueType();
24572 if (VT == MVT::f32 || VT == MVT::f64) {
24573 bool ExpectingFlags = false;
24574 // Check for any users that want flags:
24575 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
24576 !ExpectingFlags && UI != UE; ++UI)
24577 switch (UI->getOpcode()) {
24582 ExpectingFlags = true;
24584 case ISD::CopyToReg:
24585 case ISD::SIGN_EXTEND:
24586 case ISD::ZERO_EXTEND:
24587 case ISD::ANY_EXTEND:
24591 if (!ExpectingFlags) {
24592 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
24593 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
24595 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
24596 X86::CondCode tmp = cc0;
24601 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
24602 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
24603 // FIXME: need symbolic constants for these magic numbers.
24604 // See X86ATTInstPrinter.cpp:printSSECC().
24605 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
24606 if (Subtarget->hasAVX512()) {
24607 SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
24608 CMP01, DAG.getConstant(x86cc, MVT::i8));
24609 if (N->getValueType(0) != MVT::i1)
24610 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0),
24614 SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
24615 CMP00.getValueType(), CMP00, CMP01,
24616 DAG.getConstant(x86cc, MVT::i8));
24618 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
24619 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
24621 if (is64BitFP && !Subtarget->is64Bit()) {
24622 // On a 32-bit target, we cannot bitcast the 64-bit float to a
24623 // 64-bit integer, since that's not a legal type. Since
24624 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
24625 // bits, but can do this little dance to extract the lowest 32 bits
24626 // and work with those going forward.
24627 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
24629 SDValue Vector32 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32,
24631 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
24632 Vector32, DAG.getIntPtrConstant(0));
24636 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, IntVT, OnesOrZeroesF);
24637 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
24638 DAG.getConstant(1, IntVT));
24639 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed);
24640 return OneBitOfTruth;
24648 /// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector
24649 /// so it can be folded inside ANDNP.
24650 static bool CanFoldXORWithAllOnes(const SDNode *N) {
24651 EVT VT = N->getValueType(0);
24653 // Match direct AllOnes for 128 and 256-bit vectors
24654 if (ISD::isBuildVectorAllOnes(N))
24657 // Look through a bit convert.
24658 if (N->getOpcode() == ISD::BITCAST)
24659 N = N->getOperand(0).getNode();
24661 // Sometimes the operand may come from a insert_subvector building a 256-bit
24663 if (VT.is256BitVector() &&
24664 N->getOpcode() == ISD::INSERT_SUBVECTOR) {
24665 SDValue V1 = N->getOperand(0);
24666 SDValue V2 = N->getOperand(1);
24668 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR &&
24669 V1.getOperand(0).getOpcode() == ISD::UNDEF &&
24670 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) &&
24671 ISD::isBuildVectorAllOnes(V2.getNode()))
24678 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
24679 // register. In most cases we actually compare or select YMM-sized registers
24680 // and mixing the two types creates horrible code. This method optimizes
24681 // some of the transition sequences.
24682 static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
24683 TargetLowering::DAGCombinerInfo &DCI,
24684 const X86Subtarget *Subtarget) {
24685 EVT VT = N->getValueType(0);
24686 if (!VT.is256BitVector())
24689 assert((N->getOpcode() == ISD::ANY_EXTEND ||
24690 N->getOpcode() == ISD::ZERO_EXTEND ||
24691 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
24693 SDValue Narrow = N->getOperand(0);
24694 EVT NarrowVT = Narrow->getValueType(0);
24695 if (!NarrowVT.is128BitVector())
24698 if (Narrow->getOpcode() != ISD::XOR &&
24699 Narrow->getOpcode() != ISD::AND &&
24700 Narrow->getOpcode() != ISD::OR)
24703 SDValue N0 = Narrow->getOperand(0);
24704 SDValue N1 = Narrow->getOperand(1);
24707 // The Left side has to be a trunc.
24708 if (N0.getOpcode() != ISD::TRUNCATE)
24711 // The type of the truncated inputs.
24712 EVT WideVT = N0->getOperand(0)->getValueType(0);
24716 // The right side has to be a 'trunc' or a constant vector.
24717 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE;
24718 ConstantSDNode *RHSConstSplat = nullptr;
24719 if (auto *RHSBV = dyn_cast<BuildVectorSDNode>(N1))
24720 RHSConstSplat = RHSBV->getConstantSplatNode();
24721 if (!RHSTrunc && !RHSConstSplat)
24724 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24726 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT))
24729 // Set N0 and N1 to hold the inputs to the new wide operation.
24730 N0 = N0->getOperand(0);
24731 if (RHSConstSplat) {
24732 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getScalarType(),
24733 SDValue(RHSConstSplat, 0));
24734 SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1);
24735 N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C);
24736 } else if (RHSTrunc) {
24737 N1 = N1->getOperand(0);
24740 // Generate the wide operation.
24741 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1);
24742 unsigned Opcode = N->getOpcode();
24744 case ISD::ANY_EXTEND:
24746 case ISD::ZERO_EXTEND: {
24747 unsigned InBits = NarrowVT.getScalarType().getSizeInBits();
24748 APInt Mask = APInt::getAllOnesValue(InBits);
24749 Mask = Mask.zext(VT.getScalarType().getSizeInBits());
24750 return DAG.getNode(ISD::AND, DL, VT,
24751 Op, DAG.getConstant(Mask, VT));
24753 case ISD::SIGN_EXTEND:
24754 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
24755 Op, DAG.getValueType(NarrowVT));
24757 llvm_unreachable("Unexpected opcode");
24761 static SDValue VectorZextCombine(SDNode *N, SelectionDAG &DAG,
24762 TargetLowering::DAGCombinerInfo &DCI,
24763 const X86Subtarget *Subtarget) {
24764 SDValue N0 = N->getOperand(0);
24765 SDValue N1 = N->getOperand(1);
24768 // A vector zext_in_reg may be represented as a shuffle,
24769 // feeding into a bitcast (this represents anyext) feeding into
24770 // an and with a mask.
24771 // We'd like to try to combine that into a shuffle with zero
24772 // plus a bitcast, removing the and.
24773 if (N0.getOpcode() != ISD::BITCAST ||
24774 N0.getOperand(0).getOpcode() != ISD::VECTOR_SHUFFLE)
24777 // The other side of the AND should be a splat of 2^C, where C
24778 // is the number of bits in the source type.
24779 if (N1.getOpcode() == ISD::BITCAST)
24780 N1 = N1.getOperand(0);
24781 if (N1.getOpcode() != ISD::BUILD_VECTOR)
24783 BuildVectorSDNode *Vector = cast<BuildVectorSDNode>(N1);
24785 ShuffleVectorSDNode *Shuffle = cast<ShuffleVectorSDNode>(N0.getOperand(0));
24786 EVT SrcType = Shuffle->getValueType(0);
24788 // We expect a single-source shuffle
24789 if (Shuffle->getOperand(1)->getOpcode() != ISD::UNDEF)
24792 unsigned SrcSize = SrcType.getScalarSizeInBits();
24794 APInt SplatValue, SplatUndef;
24795 unsigned SplatBitSize;
24797 if (!Vector->isConstantSplat(SplatValue, SplatUndef,
24798 SplatBitSize, HasAnyUndefs))
24801 unsigned ResSize = N1.getValueType().getScalarSizeInBits();
24802 // Make sure the splat matches the mask we expect
24803 if (SplatBitSize > ResSize ||
24804 (SplatValue + 1).exactLogBase2() != (int)SrcSize)
24807 // Make sure the input and output size make sense
24808 if (SrcSize >= ResSize || ResSize % SrcSize)
24811 // We expect a shuffle of the form <0, u, u, u, 1, u, u, u...>
24812 // The number of u's between each two values depends on the ratio between
24813 // the source and dest type.
24814 unsigned ZextRatio = ResSize / SrcSize;
24815 bool IsZext = true;
24816 for (unsigned i = 0; i < SrcType.getVectorNumElements(); ++i) {
24817 if (i % ZextRatio) {
24818 if (Shuffle->getMaskElt(i) > 0) {
24824 if (Shuffle->getMaskElt(i) != (int)(i / ZextRatio)) {
24825 // Expected element number
24835 // Ok, perform the transformation - replace the shuffle with
24836 // a shuffle of the form <0, k, k, k, 1, k, k, k> with zero
24837 // (instead of undef) where the k elements come from the zero vector.
24838 SmallVector<int, 8> Mask;
24839 unsigned NumElems = SrcType.getVectorNumElements();
24840 for (unsigned i = 0; i < NumElems; ++i)
24842 Mask.push_back(NumElems);
24844 Mask.push_back(i / ZextRatio);
24846 SDValue NewShuffle = DAG.getVectorShuffle(Shuffle->getValueType(0), DL,
24847 Shuffle->getOperand(0), DAG.getConstant(0, SrcType), Mask);
24848 return DAG.getNode(ISD::BITCAST, DL, N0.getValueType(), NewShuffle);
24851 static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
24852 TargetLowering::DAGCombinerInfo &DCI,
24853 const X86Subtarget *Subtarget) {
24854 if (DCI.isBeforeLegalizeOps())
24857 SDValue Zext = VectorZextCombine(N, DAG, DCI, Subtarget);
24858 if (Zext.getNode())
24861 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24865 EVT VT = N->getValueType(0);
24866 SDValue N0 = N->getOperand(0);
24867 SDValue N1 = N->getOperand(1);
24870 // Create BEXTR instructions
24871 // BEXTR is ((X >> imm) & (2**size-1))
24872 if (VT == MVT::i32 || VT == MVT::i64) {
24873 // Check for BEXTR.
24874 if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
24875 (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
24876 ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
24877 ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
24878 if (MaskNode && ShiftNode) {
24879 uint64_t Mask = MaskNode->getZExtValue();
24880 uint64_t Shift = ShiftNode->getZExtValue();
24881 if (isMask_64(Mask)) {
24882 uint64_t MaskSize = countPopulation(Mask);
24883 if (Shift + MaskSize <= VT.getSizeInBits())
24884 return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
24885 DAG.getConstant(Shift | (MaskSize << 8), VT));
24893 // Want to form ANDNP nodes:
24894 // 1) In the hopes of then easily combining them with OR and AND nodes
24895 // to form PBLEND/PSIGN.
24896 // 2) To match ANDN packed intrinsics
24897 if (VT != MVT::v2i64 && VT != MVT::v4i64)
24900 // Check LHS for vnot
24901 if (N0.getOpcode() == ISD::XOR &&
24902 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
24903 CanFoldXORWithAllOnes(N0.getOperand(1).getNode()))
24904 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1);
24906 // Check RHS for vnot
24907 if (N1.getOpcode() == ISD::XOR &&
24908 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
24909 CanFoldXORWithAllOnes(N1.getOperand(1).getNode()))
24910 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0);
24915 static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
24916 TargetLowering::DAGCombinerInfo &DCI,
24917 const X86Subtarget *Subtarget) {
24918 if (DCI.isBeforeLegalizeOps())
24921 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24925 SDValue N0 = N->getOperand(0);
24926 SDValue N1 = N->getOperand(1);
24927 EVT VT = N->getValueType(0);
24929 // look for psign/blend
24930 if (VT == MVT::v2i64 || VT == MVT::v4i64) {
24931 if (!Subtarget->hasSSSE3() ||
24932 (VT == MVT::v4i64 && !Subtarget->hasInt256()))
24935 // Canonicalize pandn to RHS
24936 if (N0.getOpcode() == X86ISD::ANDNP)
24938 // or (and (m, y), (pandn m, x))
24939 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
24940 SDValue Mask = N1.getOperand(0);
24941 SDValue X = N1.getOperand(1);
24943 if (N0.getOperand(0) == Mask)
24944 Y = N0.getOperand(1);
24945 if (N0.getOperand(1) == Mask)
24946 Y = N0.getOperand(0);
24948 // Check to see if the mask appeared in both the AND and ANDNP and
24952 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
24953 // Look through mask bitcast.
24954 if (Mask.getOpcode() == ISD::BITCAST)
24955 Mask = Mask.getOperand(0);
24956 if (X.getOpcode() == ISD::BITCAST)
24957 X = X.getOperand(0);
24958 if (Y.getOpcode() == ISD::BITCAST)
24959 Y = Y.getOperand(0);
24961 EVT MaskVT = Mask.getValueType();
24963 // Validate that the Mask operand is a vector sra node.
24964 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
24965 // there is no psrai.b
24966 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
24967 unsigned SraAmt = ~0;
24968 if (Mask.getOpcode() == ISD::SRA) {
24969 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
24970 if (auto *AmtConst = AmtBV->getConstantSplatNode())
24971 SraAmt = AmtConst->getZExtValue();
24972 } else if (Mask.getOpcode() == X86ISD::VSRAI) {
24973 SDValue SraC = Mask.getOperand(1);
24974 SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
24976 if ((SraAmt + 1) != EltBits)
24981 // Now we know we at least have a plendvb with the mask val. See if
24982 // we can form a psignb/w/d.
24983 // psign = x.type == y.type == mask.type && y = sub(0, x);
24984 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
24985 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
24986 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
24987 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
24988 "Unsupported VT for PSIGN");
24989 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0));
24990 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24992 // PBLENDVB only available on SSE 4.1
24993 if (!Subtarget->hasSSE41())
24996 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
24998 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X);
24999 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y);
25000 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask);
25001 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
25002 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
25006 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
25009 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
25010 MachineFunction &MF = DAG.getMachineFunction();
25012 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
25014 // SHLD/SHRD instructions have lower register pressure, but on some
25015 // platforms they have higher latency than the equivalent
25016 // series of shifts/or that would otherwise be generated.
25017 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
25018 // have higher latencies and we are not optimizing for size.
25019 if (!OptForSize && Subtarget->isSHLDSlow())
25022 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
25024 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
25026 if (!N0.hasOneUse() || !N1.hasOneUse())
25029 SDValue ShAmt0 = N0.getOperand(1);
25030 if (ShAmt0.getValueType() != MVT::i8)
25032 SDValue ShAmt1 = N1.getOperand(1);
25033 if (ShAmt1.getValueType() != MVT::i8)
25035 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
25036 ShAmt0 = ShAmt0.getOperand(0);
25037 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
25038 ShAmt1 = ShAmt1.getOperand(0);
25041 unsigned Opc = X86ISD::SHLD;
25042 SDValue Op0 = N0.getOperand(0);
25043 SDValue Op1 = N1.getOperand(0);
25044 if (ShAmt0.getOpcode() == ISD::SUB) {
25045 Opc = X86ISD::SHRD;
25046 std::swap(Op0, Op1);
25047 std::swap(ShAmt0, ShAmt1);
25050 unsigned Bits = VT.getSizeInBits();
25051 if (ShAmt1.getOpcode() == ISD::SUB) {
25052 SDValue Sum = ShAmt1.getOperand(0);
25053 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
25054 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
25055 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
25056 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
25057 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
25058 return DAG.getNode(Opc, DL, VT,
25060 DAG.getNode(ISD::TRUNCATE, DL,
25063 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
25064 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
25066 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits)
25067 return DAG.getNode(Opc, DL, VT,
25068 N0.getOperand(0), N1.getOperand(0),
25069 DAG.getNode(ISD::TRUNCATE, DL,
25076 // Generate NEG and CMOV for integer abs.
25077 static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
25078 EVT VT = N->getValueType(0);
25080 // Since X86 does not have CMOV for 8-bit integer, we don't convert
25081 // 8-bit integer abs to NEG and CMOV.
25082 if (VT.isInteger() && VT.getSizeInBits() == 8)
25085 SDValue N0 = N->getOperand(0);
25086 SDValue N1 = N->getOperand(1);
25089 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
25090 // and change it to SUB and CMOV.
25091 if (VT.isInteger() && N->getOpcode() == ISD::XOR &&
25092 N0.getOpcode() == ISD::ADD &&
25093 N0.getOperand(1) == N1 &&
25094 N1.getOpcode() == ISD::SRA &&
25095 N1.getOperand(0) == N0.getOperand(0))
25096 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1)))
25097 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) {
25098 // Generate SUB & CMOV.
25099 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
25100 DAG.getConstant(0, VT), N0.getOperand(0));
25102 SDValue Ops[] = { N0.getOperand(0), Neg,
25103 DAG.getConstant(X86::COND_GE, MVT::i8),
25104 SDValue(Neg.getNode(), 1) };
25105 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops);
25110 // PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes
25111 static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
25112 TargetLowering::DAGCombinerInfo &DCI,
25113 const X86Subtarget *Subtarget) {
25114 if (DCI.isBeforeLegalizeOps())
25117 if (Subtarget->hasCMov()) {
25118 SDValue RV = performIntegerAbsCombine(N, DAG);
25126 /// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
25127 static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
25128 TargetLowering::DAGCombinerInfo &DCI,
25129 const X86Subtarget *Subtarget) {
25130 LoadSDNode *Ld = cast<LoadSDNode>(N);
25131 EVT RegVT = Ld->getValueType(0);
25132 EVT MemVT = Ld->getMemoryVT();
25134 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25136 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
25137 // into two 16-byte operations.
25138 ISD::LoadExtType Ext = Ld->getExtensionType();
25139 unsigned Alignment = Ld->getAlignment();
25140 bool IsAligned = Alignment == 0 || Alignment >= MemVT.getSizeInBits()/8;
25141 if (RegVT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25142 !DCI.isBeforeLegalizeOps() && !IsAligned && Ext == ISD::NON_EXTLOAD) {
25143 unsigned NumElems = RegVT.getVectorNumElements();
25147 SDValue Ptr = Ld->getBasePtr();
25148 SDValue Increment = DAG.getConstant(16, TLI.getPointerTy());
25150 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
25152 SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
25153 Ld->getPointerInfo(), Ld->isVolatile(),
25154 Ld->isNonTemporal(), Ld->isInvariant(),
25156 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25157 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
25158 Ld->getPointerInfo(), Ld->isVolatile(),
25159 Ld->isNonTemporal(), Ld->isInvariant(),
25160 std::min(16U, Alignment));
25161 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
25163 Load2.getValue(1));
25165 SDValue NewVec = DAG.getUNDEF(RegVT);
25166 NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl);
25167 NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl);
25168 return DCI.CombineTo(N, NewVec, TF, true);
25174 /// PerformMLOADCombine - Resolve extending loads
25175 static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
25176 TargetLowering::DAGCombinerInfo &DCI,
25177 const X86Subtarget *Subtarget) {
25178 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
25179 if (Mld->getExtensionType() != ISD::SEXTLOAD)
25182 EVT VT = Mld->getValueType(0);
25183 unsigned NumElems = VT.getVectorNumElements();
25184 EVT LdVT = Mld->getMemoryVT();
25187 assert(LdVT != VT && "Cannot extend to the same type");
25188 unsigned ToSz = VT.getVectorElementType().getSizeInBits();
25189 unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
25190 // From, To sizes and ElemCount must be pow of two
25191 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
25192 "Unexpected size for extending masked load");
25194 unsigned SizeRatio = ToSz / FromSz;
25195 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
25197 // Create a type on which we perform the shuffle
25198 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25199 LdVT.getScalarType(), NumElems*SizeRatio);
25200 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25202 // Convert Src0 value
25203 SDValue WideSrc0 = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mld->getSrc0());
25204 if (Mld->getSrc0().getOpcode() != ISD::UNDEF) {
25205 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25206 for (unsigned i = 0; i != NumElems; ++i)
25207 ShuffleVec[i] = i * SizeRatio;
25209 // Can't shuffle using an illegal type.
25210 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
25211 && "WideVecVT should be legal");
25212 WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
25213 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
25215 // Prepare the new mask
25217 SDValue Mask = Mld->getMask();
25218 if (Mask.getValueType() == VT) {
25219 // Mask and original value have the same type
25220 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
25221 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25222 for (unsigned i = 0; i != NumElems; ++i)
25223 ShuffleVec[i] = i * SizeRatio;
25224 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
25225 ShuffleVec[i] = NumElems*SizeRatio;
25226 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
25227 DAG.getConstant(0, WideVecVT),
25231 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
25232 unsigned WidenNumElts = NumElems*SizeRatio;
25233 unsigned MaskNumElts = VT.getVectorNumElements();
25234 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
25237 unsigned NumConcat = WidenNumElts / MaskNumElts;
25238 SmallVector<SDValue, 16> Ops(NumConcat);
25239 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
25241 for (unsigned i = 1; i != NumConcat; ++i)
25244 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
25247 SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
25248 Mld->getBasePtr(), NewMask, WideSrc0,
25249 Mld->getMemoryVT(), Mld->getMemOperand(),
25251 SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd);
25252 return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
25255 /// PerformMSTORECombine - Resolve truncating stores
25256 static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
25257 const X86Subtarget *Subtarget) {
25258 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
25259 if (!Mst->isTruncatingStore())
25262 EVT VT = Mst->getValue().getValueType();
25263 unsigned NumElems = VT.getVectorNumElements();
25264 EVT StVT = Mst->getMemoryVT();
25267 assert(StVT != VT && "Cannot truncate to the same type");
25268 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25269 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25271 // From, To sizes and ElemCount must be pow of two
25272 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
25273 "Unexpected size for truncating masked store");
25274 // We are going to use the original vector elt for storing.
25275 // Accumulated smaller vector elements must be a multiple of the store size.
25276 assert (((NumElems * FromSz) % ToSz) == 0 &&
25277 "Unexpected ratio for truncating masked store");
25279 unsigned SizeRatio = FromSz / ToSz;
25280 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25282 // Create a type on which we perform the shuffle
25283 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25284 StVT.getScalarType(), NumElems*SizeRatio);
25286 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25288 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mst->getValue());
25289 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25290 for (unsigned i = 0; i != NumElems; ++i)
25291 ShuffleVec[i] = i * SizeRatio;
25293 // Can't shuffle using an illegal type.
25294 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
25295 && "WideVecVT should be legal");
25297 SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25298 DAG.getUNDEF(WideVecVT),
25302 SDValue Mask = Mst->getMask();
25303 if (Mask.getValueType() == VT) {
25304 // Mask and original value have the same type
25305 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
25306 for (unsigned i = 0; i != NumElems; ++i)
25307 ShuffleVec[i] = i * SizeRatio;
25308 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
25309 ShuffleVec[i] = NumElems*SizeRatio;
25310 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
25311 DAG.getConstant(0, WideVecVT),
25315 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
25316 unsigned WidenNumElts = NumElems*SizeRatio;
25317 unsigned MaskNumElts = VT.getVectorNumElements();
25318 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
25321 unsigned NumConcat = WidenNumElts / MaskNumElts;
25322 SmallVector<SDValue, 16> Ops(NumConcat);
25323 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
25325 for (unsigned i = 1; i != NumConcat; ++i)
25328 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
25331 return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal, Mst->getBasePtr(),
25332 NewMask, StVT, Mst->getMemOperand(), false);
25334 /// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
25335 static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
25336 const X86Subtarget *Subtarget) {
25337 StoreSDNode *St = cast<StoreSDNode>(N);
25338 EVT VT = St->getValue().getValueType();
25339 EVT StVT = St->getMemoryVT();
25341 SDValue StoredVal = St->getOperand(1);
25342 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25344 // If we are saving a concatenation of two XMM registers and 32-byte stores
25345 // are slow, such as on Sandy Bridge, perform two 16-byte stores.
25346 unsigned Alignment = St->getAlignment();
25347 bool IsAligned = Alignment == 0 || Alignment >= VT.getSizeInBits()/8;
25348 if (VT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25349 StVT == VT && !IsAligned) {
25350 unsigned NumElems = VT.getVectorNumElements();
25354 SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl);
25355 SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl);
25357 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy());
25358 SDValue Ptr0 = St->getBasePtr();
25359 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride);
25361 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0,
25362 St->getPointerInfo(), St->isVolatile(),
25363 St->isNonTemporal(), Alignment);
25364 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1,
25365 St->getPointerInfo(), St->isVolatile(),
25366 St->isNonTemporal(),
25367 std::min(16U, Alignment));
25368 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
25371 // Optimize trunc store (of multiple scalars) to shuffle and store.
25372 // First, pack all of the elements in one place. Next, store to memory
25373 // in fewer chunks.
25374 if (St->isTruncatingStore() && VT.isVector()) {
25375 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25376 unsigned NumElems = VT.getVectorNumElements();
25377 assert(StVT != VT && "Cannot truncate to the same type");
25378 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25379 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25381 // From, To sizes and ElemCount must be pow of two
25382 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
25383 // We are going to use the original vector elt for storing.
25384 // Accumulated smaller vector elements must be a multiple of the store size.
25385 if (0 != (NumElems * FromSz) % ToSz) return SDValue();
25387 unsigned SizeRatio = FromSz / ToSz;
25389 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25391 // Create a type on which we perform the shuffle
25392 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25393 StVT.getScalarType(), NumElems*SizeRatio);
25395 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25397 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue());
25398 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
25399 for (unsigned i = 0; i != NumElems; ++i)
25400 ShuffleVec[i] = i * SizeRatio;
25402 // Can't shuffle using an illegal type.
25403 if (!TLI.isTypeLegal(WideVecVT))
25406 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25407 DAG.getUNDEF(WideVecVT),
25409 // At this point all of the data is stored at the bottom of the
25410 // register. We now need to save it to mem.
25412 // Find the largest store unit
25413 MVT StoreType = MVT::i8;
25414 for (MVT Tp : MVT::integer_valuetypes()) {
25415 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
25419 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
25420 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
25421 (64 <= NumElems * ToSz))
25422 StoreType = MVT::f64;
25424 // Bitcast the original vector into a vector of store-size units
25425 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
25426 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
25427 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
25428 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff);
25429 SmallVector<SDValue, 8> Chains;
25430 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8,
25431 TLI.getPointerTy());
25432 SDValue Ptr = St->getBasePtr();
25434 // Perform one or more big stores into memory.
25435 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
25436 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
25437 StoreType, ShuffWide,
25438 DAG.getIntPtrConstant(i));
25439 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr,
25440 St->getPointerInfo(), St->isVolatile(),
25441 St->isNonTemporal(), St->getAlignment());
25442 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25443 Chains.push_back(Ch);
25446 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
25449 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
25450 // the FP state in cases where an emms may be missing.
25451 // A preferable solution to the general problem is to figure out the right
25452 // places to insert EMMS. This qualifies as a quick hack.
25454 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
25455 if (VT.getSizeInBits() != 64)
25458 const Function *F = DAG.getMachineFunction().getFunction();
25459 bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat);
25460 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
25461 && Subtarget->hasSSE2();
25462 if ((VT.isVector() ||
25463 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
25464 isa<LoadSDNode>(St->getValue()) &&
25465 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
25466 St->getChain().hasOneUse() && !St->isVolatile()) {
25467 SDNode* LdVal = St->getValue().getNode();
25468 LoadSDNode *Ld = nullptr;
25469 int TokenFactorIndex = -1;
25470 SmallVector<SDValue, 8> Ops;
25471 SDNode* ChainVal = St->getChain().getNode();
25472 // Must be a store of a load. We currently handle two cases: the load
25473 // is a direct child, and it's under an intervening TokenFactor. It is
25474 // possible to dig deeper under nested TokenFactors.
25475 if (ChainVal == LdVal)
25476 Ld = cast<LoadSDNode>(St->getChain());
25477 else if (St->getValue().hasOneUse() &&
25478 ChainVal->getOpcode() == ISD::TokenFactor) {
25479 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) {
25480 if (ChainVal->getOperand(i).getNode() == LdVal) {
25481 TokenFactorIndex = i;
25482 Ld = cast<LoadSDNode>(St->getValue());
25484 Ops.push_back(ChainVal->getOperand(i));
25488 if (!Ld || !ISD::isNormalLoad(Ld))
25491 // If this is not the MMX case, i.e. we are just turning i64 load/store
25492 // into f64 load/store, avoid the transformation if there are multiple
25493 // uses of the loaded value.
25494 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
25499 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
25500 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
25502 if (Subtarget->is64Bit() || F64IsLegal) {
25503 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
25504 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
25505 Ld->getPointerInfo(), Ld->isVolatile(),
25506 Ld->isNonTemporal(), Ld->isInvariant(),
25507 Ld->getAlignment());
25508 SDValue NewChain = NewLd.getValue(1);
25509 if (TokenFactorIndex != -1) {
25510 Ops.push_back(NewChain);
25511 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25513 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
25514 St->getPointerInfo(),
25515 St->isVolatile(), St->isNonTemporal(),
25516 St->getAlignment());
25519 // Otherwise, lower to two pairs of 32-bit loads / stores.
25520 SDValue LoAddr = Ld->getBasePtr();
25521 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
25522 DAG.getConstant(4, MVT::i32));
25524 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
25525 Ld->getPointerInfo(),
25526 Ld->isVolatile(), Ld->isNonTemporal(),
25527 Ld->isInvariant(), Ld->getAlignment());
25528 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
25529 Ld->getPointerInfo().getWithOffset(4),
25530 Ld->isVolatile(), Ld->isNonTemporal(),
25532 MinAlign(Ld->getAlignment(), 4));
25534 SDValue NewChain = LoLd.getValue(1);
25535 if (TokenFactorIndex != -1) {
25536 Ops.push_back(LoLd);
25537 Ops.push_back(HiLd);
25538 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25541 LoAddr = St->getBasePtr();
25542 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
25543 DAG.getConstant(4, MVT::i32));
25545 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
25546 St->getPointerInfo(),
25547 St->isVolatile(), St->isNonTemporal(),
25548 St->getAlignment());
25549 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
25550 St->getPointerInfo().getWithOffset(4),
25552 St->isNonTemporal(),
25553 MinAlign(St->getAlignment(), 4));
25554 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
25559 /// Return 'true' if this vector operation is "horizontal"
25560 /// and return the operands for the horizontal operation in LHS and RHS. A
25561 /// horizontal operation performs the binary operation on successive elements
25562 /// of its first operand, then on successive elements of its second operand,
25563 /// returning the resulting values in a vector. For example, if
25564 /// A = < float a0, float a1, float a2, float a3 >
25566 /// B = < float b0, float b1, float b2, float b3 >
25567 /// then the result of doing a horizontal operation on A and B is
25568 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
25569 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
25570 /// A horizontal-op B, for some already available A and B, and if so then LHS is
25571 /// set to A, RHS to B, and the routine returns 'true'.
25572 /// Note that the binary operation should have the property that if one of the
25573 /// operands is UNDEF then the result is UNDEF.
25574 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
25575 // Look for the following pattern: if
25576 // A = < float a0, float a1, float a2, float a3 >
25577 // B = < float b0, float b1, float b2, float b3 >
25579 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
25580 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
25581 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
25582 // which is A horizontal-op B.
25584 // At least one of the operands should be a vector shuffle.
25585 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
25586 RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
25589 MVT VT = LHS.getSimpleValueType();
25591 assert((VT.is128BitVector() || VT.is256BitVector()) &&
25592 "Unsupported vector type for horizontal add/sub");
25594 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to
25595 // operate independently on 128-bit lanes.
25596 unsigned NumElts = VT.getVectorNumElements();
25597 unsigned NumLanes = VT.getSizeInBits()/128;
25598 unsigned NumLaneElts = NumElts / NumLanes;
25599 assert((NumLaneElts % 2 == 0) &&
25600 "Vector type should have an even number of elements in each lane");
25601 unsigned HalfLaneElts = NumLaneElts/2;
25603 // View LHS in the form
25604 // LHS = VECTOR_SHUFFLE A, B, LMask
25605 // If LHS is not a shuffle then pretend it is the shuffle
25606 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
25607 // NOTE: in what follows a default initialized SDValue represents an UNDEF of
25610 SmallVector<int, 16> LMask(NumElts);
25611 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25612 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF)
25613 A = LHS.getOperand(0);
25614 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF)
25615 B = LHS.getOperand(1);
25616 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
25617 std::copy(Mask.begin(), Mask.end(), LMask.begin());
25619 if (LHS.getOpcode() != ISD::UNDEF)
25621 for (unsigned i = 0; i != NumElts; ++i)
25625 // Likewise, view RHS in the form
25626 // RHS = VECTOR_SHUFFLE C, D, RMask
25628 SmallVector<int, 16> RMask(NumElts);
25629 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25630 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF)
25631 C = RHS.getOperand(0);
25632 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF)
25633 D = RHS.getOperand(1);
25634 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
25635 std::copy(Mask.begin(), Mask.end(), RMask.begin());
25637 if (RHS.getOpcode() != ISD::UNDEF)
25639 for (unsigned i = 0; i != NumElts; ++i)
25643 // Check that the shuffles are both shuffling the same vectors.
25644 if (!(A == C && B == D) && !(A == D && B == C))
25647 // If everything is UNDEF then bail out: it would be better to fold to UNDEF.
25648 if (!A.getNode() && !B.getNode())
25651 // If A and B occur in reverse order in RHS, then "swap" them (which means
25652 // rewriting the mask).
25654 CommuteVectorShuffleMask(RMask, NumElts);
25656 // At this point LHS and RHS are equivalent to
25657 // LHS = VECTOR_SHUFFLE A, B, LMask
25658 // RHS = VECTOR_SHUFFLE A, B, RMask
25659 // Check that the masks correspond to performing a horizontal operation.
25660 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
25661 for (unsigned i = 0; i != NumLaneElts; ++i) {
25662 int LIdx = LMask[i+l], RIdx = RMask[i+l];
25664 // Ignore any UNDEF components.
25665 if (LIdx < 0 || RIdx < 0 ||
25666 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
25667 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
25670 // Check that successive elements are being operated on. If not, this is
25671 // not a horizontal operation.
25672 unsigned Src = (i/HalfLaneElts); // each lane is split between srcs
25673 int Index = 2*(i%HalfLaneElts) + NumElts*Src + l;
25674 if (!(LIdx == Index && RIdx == Index + 1) &&
25675 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
25680 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
25681 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
25685 /// Do target-specific dag combines on floating point adds.
25686 static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
25687 const X86Subtarget *Subtarget) {
25688 EVT VT = N->getValueType(0);
25689 SDValue LHS = N->getOperand(0);
25690 SDValue RHS = N->getOperand(1);
25692 // Try to synthesize horizontal adds from adds of shuffles.
25693 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25694 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25695 isHorizontalBinOp(LHS, RHS, true))
25696 return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS);
25700 /// Do target-specific dag combines on floating point subs.
25701 static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
25702 const X86Subtarget *Subtarget) {
25703 EVT VT = N->getValueType(0);
25704 SDValue LHS = N->getOperand(0);
25705 SDValue RHS = N->getOperand(1);
25707 // Try to synthesize horizontal subs from subs of shuffles.
25708 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25709 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25710 isHorizontalBinOp(LHS, RHS, false))
25711 return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS);
25715 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
25716 static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
25717 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
25719 // F[X]OR(0.0, x) -> x
25720 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25721 if (C->getValueAPF().isPosZero())
25722 return N->getOperand(1);
25724 // F[X]OR(x, 0.0) -> x
25725 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25726 if (C->getValueAPF().isPosZero())
25727 return N->getOperand(0);
25731 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
25732 static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
25733 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
25735 // Only perform optimizations if UnsafeMath is used.
25736 if (!DAG.getTarget().Options.UnsafeFPMath)
25739 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
25740 // into FMINC and FMAXC, which are Commutative operations.
25741 unsigned NewOp = 0;
25742 switch (N->getOpcode()) {
25743 default: llvm_unreachable("unknown opcode");
25744 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
25745 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
25748 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
25749 N->getOperand(0), N->getOperand(1));
25752 /// Do target-specific dag combines on X86ISD::FAND nodes.
25753 static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
25754 // FAND(0.0, x) -> 0.0
25755 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25756 if (C->getValueAPF().isPosZero())
25757 return N->getOperand(0);
25759 // FAND(x, 0.0) -> 0.0
25760 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25761 if (C->getValueAPF().isPosZero())
25762 return N->getOperand(1);
25767 /// Do target-specific dag combines on X86ISD::FANDN nodes
25768 static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG) {
25769 // FANDN(0.0, x) -> x
25770 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25771 if (C->getValueAPF().isPosZero())
25772 return N->getOperand(1);
25774 // FANDN(x, 0.0) -> 0.0
25775 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25776 if (C->getValueAPF().isPosZero())
25777 return N->getOperand(1);
25782 static SDValue PerformBTCombine(SDNode *N,
25784 TargetLowering::DAGCombinerInfo &DCI) {
25785 // BT ignores high bits in the bit index operand.
25786 SDValue Op1 = N->getOperand(1);
25787 if (Op1.hasOneUse()) {
25788 unsigned BitWidth = Op1.getValueSizeInBits();
25789 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
25790 APInt KnownZero, KnownOne;
25791 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
25792 !DCI.isBeforeLegalizeOps());
25793 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25794 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) ||
25795 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
25796 DCI.CommitTargetLoweringOpt(TLO);
25801 static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
25802 SDValue Op = N->getOperand(0);
25803 if (Op.getOpcode() == ISD::BITCAST)
25804 Op = Op.getOperand(0);
25805 EVT VT = N->getValueType(0), OpVT = Op.getValueType();
25806 if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
25807 VT.getVectorElementType().getSizeInBits() ==
25808 OpVT.getVectorElementType().getSizeInBits()) {
25809 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
25814 static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
25815 const X86Subtarget *Subtarget) {
25816 EVT VT = N->getValueType(0);
25817 if (!VT.isVector())
25820 SDValue N0 = N->getOperand(0);
25821 SDValue N1 = N->getOperand(1);
25822 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
25825 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
25826 // both SSE and AVX2 since there is no sign-extended shift right
25827 // operation on a vector with 64-bit elements.
25828 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
25829 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
25830 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
25831 N0.getOpcode() == ISD::SIGN_EXTEND)) {
25832 SDValue N00 = N0.getOperand(0);
25834 // EXTLOAD has a better solution on AVX2,
25835 // it may be replaced with X86ISD::VSEXT node.
25836 if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256())
25837 if (!ISD::isNormalLoad(N00.getNode()))
25840 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
25841 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
25843 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
25849 static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
25850 TargetLowering::DAGCombinerInfo &DCI,
25851 const X86Subtarget *Subtarget) {
25852 SDValue N0 = N->getOperand(0);
25853 EVT VT = N->getValueType(0);
25855 // (i8,i32 sext (sdivrem (i8 x, i8 y)) ->
25856 // (i8,i32 (sdivrem_sext_hreg (i8 x, i8 y)
25857 // This exposes the sext to the sdivrem lowering, so that it directly extends
25858 // from AH (which we otherwise need to do contortions to access).
25859 if (N0.getOpcode() == ISD::SDIVREM && N0.getResNo() == 1 &&
25860 N0.getValueType() == MVT::i8 && VT == MVT::i32) {
25862 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25863 SDValue R = DAG.getNode(X86ISD::SDIVREM8_SEXT_HREG, dl, NodeTys,
25864 N0.getOperand(0), N0.getOperand(1));
25865 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25866 return R.getValue(1);
25869 if (!DCI.isBeforeLegalizeOps())
25872 if (!Subtarget->hasFp256())
25875 if (VT.isVector() && VT.getSizeInBits() == 256) {
25876 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25884 static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
25885 const X86Subtarget* Subtarget) {
25887 EVT VT = N->getValueType(0);
25889 // Let legalize expand this if it isn't a legal type yet.
25890 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
25893 EVT ScalarVT = VT.getScalarType();
25894 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
25895 (!Subtarget->hasFMA() && !Subtarget->hasFMA4()))
25898 SDValue A = N->getOperand(0);
25899 SDValue B = N->getOperand(1);
25900 SDValue C = N->getOperand(2);
25902 bool NegA = (A.getOpcode() == ISD::FNEG);
25903 bool NegB = (B.getOpcode() == ISD::FNEG);
25904 bool NegC = (C.getOpcode() == ISD::FNEG);
25906 // Negative multiplication when NegA xor NegB
25907 bool NegMul = (NegA != NegB);
25909 A = A.getOperand(0);
25911 B = B.getOperand(0);
25913 C = C.getOperand(0);
25917 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB;
25919 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB;
25921 return DAG.getNode(Opcode, dl, VT, A, B, C);
25924 static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
25925 TargetLowering::DAGCombinerInfo &DCI,
25926 const X86Subtarget *Subtarget) {
25927 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
25928 // (and (i32 x86isd::setcc_carry), 1)
25929 // This eliminates the zext. This transformation is necessary because
25930 // ISD::SETCC is always legalized to i8.
25932 SDValue N0 = N->getOperand(0);
25933 EVT VT = N->getValueType(0);
25935 if (N0.getOpcode() == ISD::AND &&
25937 N0.getOperand(0).hasOneUse()) {
25938 SDValue N00 = N0.getOperand(0);
25939 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25940 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
25941 if (!C || C->getZExtValue() != 1)
25943 return DAG.getNode(ISD::AND, dl, VT,
25944 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25945 N00.getOperand(0), N00.getOperand(1)),
25946 DAG.getConstant(1, VT));
25950 if (N0.getOpcode() == ISD::TRUNCATE &&
25952 N0.getOperand(0).hasOneUse()) {
25953 SDValue N00 = N0.getOperand(0);
25954 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25955 return DAG.getNode(ISD::AND, dl, VT,
25956 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25957 N00.getOperand(0), N00.getOperand(1)),
25958 DAG.getConstant(1, VT));
25961 if (VT.is256BitVector()) {
25962 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25967 // (i8,i32 zext (udivrem (i8 x, i8 y)) ->
25968 // (i8,i32 (udivrem_zext_hreg (i8 x, i8 y)
25969 // This exposes the zext to the udivrem lowering, so that it directly extends
25970 // from AH (which we otherwise need to do contortions to access).
25971 if (N0.getOpcode() == ISD::UDIVREM &&
25972 N0.getResNo() == 1 && N0.getValueType() == MVT::i8 &&
25973 (VT == MVT::i32 || VT == MVT::i64)) {
25974 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25975 SDValue R = DAG.getNode(X86ISD::UDIVREM8_ZEXT_HREG, dl, NodeTys,
25976 N0.getOperand(0), N0.getOperand(1));
25977 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25978 return R.getValue(1);
25984 // Optimize x == -y --> x+y == 0
25985 // x != -y --> x+y != 0
25986 static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
25987 const X86Subtarget* Subtarget) {
25988 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
25989 SDValue LHS = N->getOperand(0);
25990 SDValue RHS = N->getOperand(1);
25991 EVT VT = N->getValueType(0);
25994 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
25995 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0)))
25996 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) {
25997 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25998 LHS.getValueType(), RHS, LHS.getOperand(1));
25999 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
26000 addV, DAG.getConstant(0, addV.getValueType()), CC);
26002 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB)
26003 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0)))
26004 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) {
26005 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
26006 RHS.getValueType(), LHS, RHS.getOperand(1));
26007 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
26008 addV, DAG.getConstant(0, addV.getValueType()), CC);
26011 if (VT.getScalarType() == MVT::i1) {
26012 bool IsSEXT0 = (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
26013 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
26014 bool IsVZero0 = ISD::isBuildVectorAllZeros(LHS.getNode());
26015 if (!IsSEXT0 && !IsVZero0)
26017 bool IsSEXT1 = (RHS.getOpcode() == ISD::SIGN_EXTEND) &&
26018 (RHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
26019 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
26021 if (!IsSEXT1 && !IsVZero1)
26024 if (IsSEXT0 && IsVZero1) {
26025 assert(VT == LHS.getOperand(0).getValueType() && "Uexpected operand type");
26026 if (CC == ISD::SETEQ)
26027 return DAG.getNOT(DL, LHS.getOperand(0), VT);
26028 return LHS.getOperand(0);
26030 if (IsSEXT1 && IsVZero0) {
26031 assert(VT == RHS.getOperand(0).getValueType() && "Uexpected operand type");
26032 if (CC == ISD::SETEQ)
26033 return DAG.getNOT(DL, RHS.getOperand(0), VT);
26034 return RHS.getOperand(0);
26041 static SDValue PerformINSERTPSCombine(SDNode *N, SelectionDAG &DAG,
26042 const X86Subtarget *Subtarget) {
26044 MVT VT = N->getOperand(1)->getSimpleValueType(0);
26045 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
26046 "X86insertps is only defined for v4x32");
26048 SDValue Ld = N->getOperand(1);
26049 if (MayFoldLoad(Ld)) {
26050 // Extract the countS bits from the immediate so we can get the proper
26051 // address when narrowing the vector load to a specific element.
26052 // When the second source op is a memory address, interps doesn't use
26053 // countS and just gets an f32 from that address.
26054 unsigned DestIndex =
26055 cast<ConstantSDNode>(N->getOperand(2))->getZExtValue() >> 6;
26056 Ld = NarrowVectorLoadToElement(cast<LoadSDNode>(Ld), DestIndex, DAG);
26060 // Create this as a scalar to vector to match the instruction pattern.
26061 SDValue LoadScalarToVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Ld);
26062 // countS bits are ignored when loading from memory on insertps, which
26063 // means we don't need to explicitly set them to 0.
26064 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N->getOperand(0),
26065 LoadScalarToVector, N->getOperand(2));
26068 // Helper function of PerformSETCCCombine. It is to materialize "setb reg"
26069 // as "sbb reg,reg", since it can be extended without zext and produces
26070 // an all-ones bit which is more useful than 0/1 in some cases.
26071 static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
26074 return DAG.getNode(ISD::AND, DL, VT,
26075 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
26076 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS),
26077 DAG.getConstant(1, VT));
26078 assert (VT == MVT::i1 && "Unexpected type for SECCC node");
26079 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1,
26080 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
26081 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS));
26084 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
26085 static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
26086 TargetLowering::DAGCombinerInfo &DCI,
26087 const X86Subtarget *Subtarget) {
26089 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
26090 SDValue EFLAGS = N->getOperand(1);
26092 if (CC == X86::COND_A) {
26093 // Try to convert COND_A into COND_B in an attempt to facilitate
26094 // materializing "setb reg".
26096 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
26097 // cannot take an immediate as its first operand.
26099 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
26100 EFLAGS.getValueType().isInteger() &&
26101 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
26102 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
26103 EFLAGS.getNode()->getVTList(),
26104 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
26105 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
26106 return MaterializeSETB(DL, NewEFLAGS, DAG, N->getSimpleValueType(0));
26110 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without
26111 // a zext and produces an all-ones bit which is more useful than 0/1 in some
26113 if (CC == X86::COND_B)
26114 return MaterializeSETB(DL, EFLAGS, DAG, N->getSimpleValueType(0));
26118 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
26119 if (Flags.getNode()) {
26120 SDValue Cond = DAG.getConstant(CC, MVT::i8);
26121 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
26127 // Optimize branch condition evaluation.
26129 static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
26130 TargetLowering::DAGCombinerInfo &DCI,
26131 const X86Subtarget *Subtarget) {
26133 SDValue Chain = N->getOperand(0);
26134 SDValue Dest = N->getOperand(1);
26135 SDValue EFLAGS = N->getOperand(3);
26136 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
26140 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
26141 if (Flags.getNode()) {
26142 SDValue Cond = DAG.getConstant(CC, MVT::i8);
26143 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
26150 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
26151 SelectionDAG &DAG) {
26152 // Take advantage of vector comparisons producing 0 or -1 in each lane to
26153 // optimize away operation when it's from a constant.
26155 // The general transformation is:
26156 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
26157 // AND(VECTOR_CMP(x,y), constant2)
26158 // constant2 = UNARYOP(constant)
26160 // Early exit if this isn't a vector operation, the operand of the
26161 // unary operation isn't a bitwise AND, or if the sizes of the operations
26162 // aren't the same.
26163 EVT VT = N->getValueType(0);
26164 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
26165 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
26166 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
26169 // Now check that the other operand of the AND is a constant. We could
26170 // make the transformation for non-constant splats as well, but it's unclear
26171 // that would be a benefit as it would not eliminate any operations, just
26172 // perform one more step in scalar code before moving to the vector unit.
26173 if (BuildVectorSDNode *BV =
26174 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
26175 // Bail out if the vector isn't a constant.
26176 if (!BV->isConstant())
26179 // Everything checks out. Build up the new and improved node.
26181 EVT IntVT = BV->getValueType(0);
26182 // Create a new constant of the appropriate type for the transformed
26184 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
26185 // The AND node needs bitcasts to/from an integer vector type around it.
26186 SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
26187 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
26188 N->getOperand(0)->getOperand(0), MaskConst);
26189 SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
26196 static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
26197 const X86Subtarget *Subtarget) {
26198 // First try to optimize away the conversion entirely when it's
26199 // conditionally from a constant. Vectors only.
26200 SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
26201 if (Res != SDValue())
26204 // Now move on to more general possibilities.
26205 SDValue Op0 = N->getOperand(0);
26206 EVT InVT = Op0->getValueType(0);
26208 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32))
26209 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) {
26211 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
26212 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
26213 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P);
26216 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
26217 // a 32-bit target where SSE doesn't support i64->FP operations.
26218 if (Op0.getOpcode() == ISD::LOAD) {
26219 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
26220 EVT VT = Ld->getValueType(0);
26221 if (!Ld->isVolatile() && !N->getValueType(0).isVector() &&
26222 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
26223 !Subtarget->is64Bit() && VT == MVT::i64) {
26224 SDValue FILDChain = Subtarget->getTargetLowering()->BuildFILD(
26225 SDValue(N, 0), Ld->getValueType(0), Ld->getChain(), Op0, DAG);
26226 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
26233 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
26234 static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
26235 X86TargetLowering::DAGCombinerInfo &DCI) {
26236 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
26237 // the result is either zero or one (depending on the input carry bit).
26238 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
26239 if (X86::isZeroNode(N->getOperand(0)) &&
26240 X86::isZeroNode(N->getOperand(1)) &&
26241 // We don't have a good way to replace an EFLAGS use, so only do this when
26243 SDValue(N, 1).use_empty()) {
26245 EVT VT = N->getValueType(0);
26246 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1));
26247 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
26248 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
26249 DAG.getConstant(X86::COND_B,MVT::i8),
26251 DAG.getConstant(1, VT));
26252 return DCI.CombineTo(N, Res1, CarryOut);
26258 // fold (add Y, (sete X, 0)) -> adc 0, Y
26259 // (add Y, (setne X, 0)) -> sbb -1, Y
26260 // (sub (sete X, 0), Y) -> sbb 0, Y
26261 // (sub (setne X, 0), Y) -> adc -1, Y
26262 static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
26265 // Look through ZExts.
26266 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
26267 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse())
26270 SDValue SetCC = Ext.getOperand(0);
26271 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse())
26274 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
26275 if (CC != X86::COND_E && CC != X86::COND_NE)
26278 SDValue Cmp = SetCC.getOperand(1);
26279 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
26280 !X86::isZeroNode(Cmp.getOperand(1)) ||
26281 !Cmp.getOperand(0).getValueType().isInteger())
26284 SDValue CmpOp0 = Cmp.getOperand(0);
26285 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0,
26286 DAG.getConstant(1, CmpOp0.getValueType()));
26288 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1);
26289 if (CC == X86::COND_NE)
26290 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB,
26291 DL, OtherVal.getValueType(), OtherVal,
26292 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp);
26293 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC,
26294 DL, OtherVal.getValueType(), OtherVal,
26295 DAG.getConstant(0, OtherVal.getValueType()), NewCmp);
26298 /// PerformADDCombine - Do target-specific dag combines on integer adds.
26299 static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG,
26300 const X86Subtarget *Subtarget) {
26301 EVT VT = N->getValueType(0);
26302 SDValue Op0 = N->getOperand(0);
26303 SDValue Op1 = N->getOperand(1);
26305 // Try to synthesize horizontal adds from adds of shuffles.
26306 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26307 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26308 isHorizontalBinOp(Op0, Op1, true))
26309 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
26311 return OptimizeConditionalInDecrement(N, DAG);
26314 static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
26315 const X86Subtarget *Subtarget) {
26316 SDValue Op0 = N->getOperand(0);
26317 SDValue Op1 = N->getOperand(1);
26319 // X86 can't encode an immediate LHS of a sub. See if we can push the
26320 // negation into a preceding instruction.
26321 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
26322 // If the RHS of the sub is a XOR with one use and a constant, invert the
26323 // immediate. Then add one to the LHS of the sub so we can turn
26324 // X-Y -> X+~Y+1, saving one register.
26325 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
26326 isa<ConstantSDNode>(Op1.getOperand(1))) {
26327 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
26328 EVT VT = Op0.getValueType();
26329 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
26331 DAG.getConstant(~XorC, VT));
26332 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
26333 DAG.getConstant(C->getAPIntValue()+1, VT));
26337 // Try to synthesize horizontal adds from adds of shuffles.
26338 EVT VT = N->getValueType(0);
26339 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26340 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26341 isHorizontalBinOp(Op0, Op1, true))
26342 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
26344 return OptimizeConditionalInDecrement(N, DAG);
26347 /// performVZEXTCombine - Performs build vector combines
26348 static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
26349 TargetLowering::DAGCombinerInfo &DCI,
26350 const X86Subtarget *Subtarget) {
26352 MVT VT = N->getSimpleValueType(0);
26353 SDValue Op = N->getOperand(0);
26354 MVT OpVT = Op.getSimpleValueType();
26355 MVT OpEltVT = OpVT.getVectorElementType();
26356 unsigned InputBits = OpEltVT.getSizeInBits() * VT.getVectorNumElements();
26358 // (vzext (bitcast (vzext (x)) -> (vzext x)
26360 while (V.getOpcode() == ISD::BITCAST)
26361 V = V.getOperand(0);
26363 if (V != Op && V.getOpcode() == X86ISD::VZEXT) {
26364 MVT InnerVT = V.getSimpleValueType();
26365 MVT InnerEltVT = InnerVT.getVectorElementType();
26367 // If the element sizes match exactly, we can just do one larger vzext. This
26368 // is always an exact type match as vzext operates on integer types.
26369 if (OpEltVT == InnerEltVT) {
26370 assert(OpVT == InnerVT && "Types must match for vzext!");
26371 return DAG.getNode(X86ISD::VZEXT, DL, VT, V.getOperand(0));
26374 // The only other way we can combine them is if only a single element of the
26375 // inner vzext is used in the input to the outer vzext.
26376 if (InnerEltVT.getSizeInBits() < InputBits)
26379 // In this case, the inner vzext is completely dead because we're going to
26380 // only look at bits inside of the low element. Just do the outer vzext on
26381 // a bitcast of the input to the inner.
26382 return DAG.getNode(X86ISD::VZEXT, DL, VT,
26383 DAG.getNode(ISD::BITCAST, DL, OpVT, V));
26386 // Check if we can bypass extracting and re-inserting an element of an input
26387 // vector. Essentialy:
26388 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
26389 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
26390 V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
26391 V.getOperand(0).getSimpleValueType().getSizeInBits() == InputBits) {
26392 SDValue ExtractedV = V.getOperand(0);
26393 SDValue OrigV = ExtractedV.getOperand(0);
26394 if (auto *ExtractIdx = dyn_cast<ConstantSDNode>(ExtractedV.getOperand(1)))
26395 if (ExtractIdx->getZExtValue() == 0) {
26396 MVT OrigVT = OrigV.getSimpleValueType();
26397 // Extract a subvector if necessary...
26398 if (OrigVT.getSizeInBits() > OpVT.getSizeInBits()) {
26399 int Ratio = OrigVT.getSizeInBits() / OpVT.getSizeInBits();
26400 OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(),
26401 OrigVT.getVectorNumElements() / Ratio);
26402 OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV,
26403 DAG.getIntPtrConstant(0));
26405 Op = DAG.getNode(ISD::BITCAST, DL, OpVT, OrigV);
26406 return DAG.getNode(X86ISD::VZEXT, DL, VT, Op);
26413 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
26414 DAGCombinerInfo &DCI) const {
26415 SelectionDAG &DAG = DCI.DAG;
26416 switch (N->getOpcode()) {
26418 case ISD::EXTRACT_VECTOR_ELT:
26419 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI);
26422 case X86ISD::SHRUNKBLEND:
26423 return PerformSELECTCombine(N, DAG, DCI, Subtarget);
26424 case ISD::BITCAST: return PerformBITCASTCombine(N, DAG);
26425 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget);
26426 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
26427 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
26428 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
26429 case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
26432 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget);
26433 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
26434 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
26435 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
26436 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
26437 case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget);
26438 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
26439 case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget);
26440 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, Subtarget);
26441 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
26442 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
26444 case X86ISD::FOR: return PerformFORCombine(N, DAG);
26446 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG);
26447 case X86ISD::FAND: return PerformFANDCombine(N, DAG);
26448 case X86ISD::FANDN: return PerformFANDNCombine(N, DAG);
26449 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
26450 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
26451 case ISD::ANY_EXTEND:
26452 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
26453 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
26454 case ISD::SIGN_EXTEND_INREG:
26455 return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
26456 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget);
26457 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG, Subtarget);
26458 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget);
26459 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
26460 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget);
26461 case X86ISD::SHUFP: // Handle all target specific shuffles
26462 case X86ISD::PALIGNR:
26463 case X86ISD::UNPCKH:
26464 case X86ISD::UNPCKL:
26465 case X86ISD::MOVHLPS:
26466 case X86ISD::MOVLHPS:
26467 case X86ISD::PSHUFB:
26468 case X86ISD::PSHUFD:
26469 case X86ISD::PSHUFHW:
26470 case X86ISD::PSHUFLW:
26471 case X86ISD::MOVSS:
26472 case X86ISD::MOVSD:
26473 case X86ISD::VPERMILPI:
26474 case X86ISD::VPERM2X128:
26475 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
26476 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
26477 case ISD::INTRINSIC_WO_CHAIN:
26478 return PerformINTRINSIC_WO_CHAINCombine(N, DAG, Subtarget);
26479 case X86ISD::INSERTPS: {
26480 if (getTargetMachine().getOptLevel() > CodeGenOpt::None)
26481 return PerformINSERTPSCombine(N, DAG, Subtarget);
26484 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DAG, Subtarget);
26490 /// isTypeDesirableForOp - Return true if the target has native support for
26491 /// the specified value type and it is 'desirable' to use the type for the
26492 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
26493 /// instruction encodings are longer and some i16 instructions are slow.
26494 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
26495 if (!isTypeLegal(VT))
26497 if (VT != MVT::i16)
26504 case ISD::SIGN_EXTEND:
26505 case ISD::ZERO_EXTEND:
26506 case ISD::ANY_EXTEND:
26519 /// IsDesirableToPromoteOp - This method query the target whether it is
26520 /// beneficial for dag combiner to promote the specified node. If true, it
26521 /// should return the desired promotion type by reference.
26522 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
26523 EVT VT = Op.getValueType();
26524 if (VT != MVT::i16)
26527 bool Promote = false;
26528 bool Commute = false;
26529 switch (Op.getOpcode()) {
26532 LoadSDNode *LD = cast<LoadSDNode>(Op);
26533 // If the non-extending load has a single use and it's not live out, then it
26534 // might be folded.
26535 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
26536 Op.hasOneUse()*/) {
26537 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
26538 UE = Op.getNode()->use_end(); UI != UE; ++UI) {
26539 // The only case where we'd want to promote LOAD (rather then it being
26540 // promoted as an operand is when it's only use is liveout.
26541 if (UI->getOpcode() != ISD::CopyToReg)
26548 case ISD::SIGN_EXTEND:
26549 case ISD::ZERO_EXTEND:
26550 case ISD::ANY_EXTEND:
26555 SDValue N0 = Op.getOperand(0);
26556 // Look out for (store (shl (load), x)).
26557 if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
26570 SDValue N0 = Op.getOperand(0);
26571 SDValue N1 = Op.getOperand(1);
26572 if (!Commute && MayFoldLoad(N1))
26574 // Avoid disabling potential load folding opportunities.
26575 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op)))
26577 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op)))
26587 //===----------------------------------------------------------------------===//
26588 // X86 Inline Assembly Support
26589 //===----------------------------------------------------------------------===//
26592 // Helper to match a string separated by whitespace.
26593 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) {
26594 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace.
26596 for (unsigned i = 0, e = args.size(); i != e; ++i) {
26597 StringRef piece(*args[i]);
26598 if (!s.startswith(piece)) // Check if the piece matches.
26601 s = s.substr(piece.size());
26602 StringRef::size_type pos = s.find_first_not_of(" \t");
26603 if (pos == 0) // We matched a prefix.
26611 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={};
26614 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
26616 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
26617 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
26618 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
26619 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
26621 if (AsmPieces.size() == 3)
26623 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
26630 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
26631 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
26633 std::string AsmStr = IA->getAsmString();
26635 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
26636 if (!Ty || Ty->getBitWidth() % 16 != 0)
26639 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
26640 SmallVector<StringRef, 4> AsmPieces;
26641 SplitString(AsmStr, AsmPieces, ";\n");
26643 switch (AsmPieces.size()) {
26644 default: return false;
26646 // FIXME: this should verify that we are targeting a 486 or better. If not,
26647 // we will turn this bswap into something that will be lowered to logical
26648 // ops instead of emitting the bswap asm. For now, we don't support 486 or
26649 // lower so don't worry about this.
26651 if (matchAsm(AsmPieces[0], "bswap", "$0") ||
26652 matchAsm(AsmPieces[0], "bswapl", "$0") ||
26653 matchAsm(AsmPieces[0], "bswapq", "$0") ||
26654 matchAsm(AsmPieces[0], "bswap", "${0:q}") ||
26655 matchAsm(AsmPieces[0], "bswapl", "${0:q}") ||
26656 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) {
26657 // No need to check constraints, nothing other than the equivalent of
26658 // "=r,0" would be valid here.
26659 return IntrinsicLowering::LowerToByteSwap(CI);
26662 // rorw $$8, ${0:w} --> llvm.bswap.i16
26663 if (CI->getType()->isIntegerTy(16) &&
26664 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26665 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") ||
26666 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) {
26668 const std::string &ConstraintsStr = IA->getConstraintString();
26669 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26670 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26671 if (clobbersFlagRegisters(AsmPieces))
26672 return IntrinsicLowering::LowerToByteSwap(CI);
26676 if (CI->getType()->isIntegerTy(32) &&
26677 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26678 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") &&
26679 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") &&
26680 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) {
26682 const std::string &ConstraintsStr = IA->getConstraintString();
26683 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26684 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26685 if (clobbersFlagRegisters(AsmPieces))
26686 return IntrinsicLowering::LowerToByteSwap(CI);
26689 if (CI->getType()->isIntegerTy(64)) {
26690 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
26691 if (Constraints.size() >= 2 &&
26692 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
26693 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
26694 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
26695 if (matchAsm(AsmPieces[0], "bswap", "%eax") &&
26696 matchAsm(AsmPieces[1], "bswap", "%edx") &&
26697 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx"))
26698 return IntrinsicLowering::LowerToByteSwap(CI);
26706 /// getConstraintType - Given a constraint letter, return the type of
26707 /// constraint it is for this target.
26708 X86TargetLowering::ConstraintType
26709 X86TargetLowering::getConstraintType(const std::string &Constraint) const {
26710 if (Constraint.size() == 1) {
26711 switch (Constraint[0]) {
26722 return C_RegisterClass;
26746 return TargetLowering::getConstraintType(Constraint);
26749 /// Examine constraint type and operand type and determine a weight value.
26750 /// This object must already have been set up with the operand type
26751 /// and the current alternative constraint selected.
26752 TargetLowering::ConstraintWeight
26753 X86TargetLowering::getSingleConstraintMatchWeight(
26754 AsmOperandInfo &info, const char *constraint) const {
26755 ConstraintWeight weight = CW_Invalid;
26756 Value *CallOperandVal = info.CallOperandVal;
26757 // If we don't have a value, we can't do a match,
26758 // but allow it at the lowest weight.
26759 if (!CallOperandVal)
26761 Type *type = CallOperandVal->getType();
26762 // Look at the constraint type.
26763 switch (*constraint) {
26765 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
26776 if (CallOperandVal->getType()->isIntegerTy())
26777 weight = CW_SpecificReg;
26782 if (type->isFloatingPointTy())
26783 weight = CW_SpecificReg;
26786 if (type->isX86_MMXTy() && Subtarget->hasMMX())
26787 weight = CW_SpecificReg;
26791 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) ||
26792 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256()))
26793 weight = CW_Register;
26796 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
26797 if (C->getZExtValue() <= 31)
26798 weight = CW_Constant;
26802 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26803 if (C->getZExtValue() <= 63)
26804 weight = CW_Constant;
26808 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26809 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
26810 weight = CW_Constant;
26814 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26815 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
26816 weight = CW_Constant;
26820 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26821 if (C->getZExtValue() <= 3)
26822 weight = CW_Constant;
26826 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26827 if (C->getZExtValue() <= 0xff)
26828 weight = CW_Constant;
26833 if (dyn_cast<ConstantFP>(CallOperandVal)) {
26834 weight = CW_Constant;
26838 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26839 if ((C->getSExtValue() >= -0x80000000LL) &&
26840 (C->getSExtValue() <= 0x7fffffffLL))
26841 weight = CW_Constant;
26845 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26846 if (C->getZExtValue() <= 0xffffffff)
26847 weight = CW_Constant;
26854 /// LowerXConstraint - try to replace an X constraint, which matches anything,
26855 /// with another that has more specific requirements based on the type of the
26856 /// corresponding operand.
26857 const char *X86TargetLowering::
26858 LowerXConstraint(EVT ConstraintVT) const {
26859 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
26860 // 'f' like normal targets.
26861 if (ConstraintVT.isFloatingPoint()) {
26862 if (Subtarget->hasSSE2())
26864 if (Subtarget->hasSSE1())
26868 return TargetLowering::LowerXConstraint(ConstraintVT);
26871 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
26872 /// vector. If it is invalid, don't add anything to Ops.
26873 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
26874 std::string &Constraint,
26875 std::vector<SDValue>&Ops,
26876 SelectionDAG &DAG) const {
26879 // Only support length 1 constraints for now.
26880 if (Constraint.length() > 1) return;
26882 char ConstraintLetter = Constraint[0];
26883 switch (ConstraintLetter) {
26886 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26887 if (C->getZExtValue() <= 31) {
26888 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26894 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26895 if (C->getZExtValue() <= 63) {
26896 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26902 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26903 if (isInt<8>(C->getSExtValue())) {
26904 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26910 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26911 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
26912 (Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) {
26913 Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType());
26919 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26920 if (C->getZExtValue() <= 3) {
26921 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26927 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26928 if (C->getZExtValue() <= 255) {
26929 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26935 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26936 if (C->getZExtValue() <= 127) {
26937 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26943 // 32-bit signed value
26944 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26945 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26946 C->getSExtValue())) {
26947 // Widen to 64 bits here to get it sign extended.
26948 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
26951 // FIXME gcc accepts some relocatable values here too, but only in certain
26952 // memory models; it's complicated.
26957 // 32-bit unsigned value
26958 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26959 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26960 C->getZExtValue())) {
26961 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26965 // FIXME gcc accepts some relocatable values here too, but only in certain
26966 // memory models; it's complicated.
26970 // Literal immediates are always ok.
26971 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
26972 // Widen to 64 bits here to get it sign extended.
26973 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64);
26977 // In any sort of PIC mode addresses need to be computed at runtime by
26978 // adding in a register or some sort of table lookup. These can't
26979 // be used as immediates.
26980 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
26983 // If we are in non-pic codegen mode, we allow the address of a global (with
26984 // an optional displacement) to be used with 'i'.
26985 GlobalAddressSDNode *GA = nullptr;
26986 int64_t Offset = 0;
26988 // Match either (GA), (GA+C), (GA+C1+C2), etc.
26990 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
26991 Offset += GA->getOffset();
26993 } else if (Op.getOpcode() == ISD::ADD) {
26994 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26995 Offset += C->getZExtValue();
26996 Op = Op.getOperand(0);
26999 } else if (Op.getOpcode() == ISD::SUB) {
27000 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
27001 Offset += -C->getZExtValue();
27002 Op = Op.getOperand(0);
27007 // Otherwise, this isn't something we can handle, reject it.
27011 const GlobalValue *GV = GA->getGlobal();
27012 // If we require an extra load to get this address, as in PIC mode, we
27013 // can't accept it.
27014 if (isGlobalStubReference(
27015 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget())))
27018 Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
27019 GA->getValueType(0), Offset);
27024 if (Result.getNode()) {
27025 Ops.push_back(Result);
27028 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
27031 std::pair<unsigned, const TargetRegisterClass*>
27032 X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
27034 // First, see if this is a constraint that directly corresponds to an LLVM
27036 if (Constraint.size() == 1) {
27037 // GCC Constraint Letters
27038 switch (Constraint[0]) {
27040 // TODO: Slight differences here in allocation order and leaving
27041 // RIP in the class. Do they matter any more here than they do
27042 // in the normal allocation?
27043 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
27044 if (Subtarget->is64Bit()) {
27045 if (VT == MVT::i32 || VT == MVT::f32)
27046 return std::make_pair(0U, &X86::GR32RegClass);
27047 if (VT == MVT::i16)
27048 return std::make_pair(0U, &X86::GR16RegClass);
27049 if (VT == MVT::i8 || VT == MVT::i1)
27050 return std::make_pair(0U, &X86::GR8RegClass);
27051 if (VT == MVT::i64 || VT == MVT::f64)
27052 return std::make_pair(0U, &X86::GR64RegClass);
27055 // 32-bit fallthrough
27056 case 'Q': // Q_REGS
27057 if (VT == MVT::i32 || VT == MVT::f32)
27058 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
27059 if (VT == MVT::i16)
27060 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
27061 if (VT == MVT::i8 || VT == MVT::i1)
27062 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
27063 if (VT == MVT::i64)
27064 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
27066 case 'r': // GENERAL_REGS
27067 case 'l': // INDEX_REGS
27068 if (VT == MVT::i8 || VT == MVT::i1)
27069 return std::make_pair(0U, &X86::GR8RegClass);
27070 if (VT == MVT::i16)
27071 return std::make_pair(0U, &X86::GR16RegClass);
27072 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit())
27073 return std::make_pair(0U, &X86::GR32RegClass);
27074 return std::make_pair(0U, &X86::GR64RegClass);
27075 case 'R': // LEGACY_REGS
27076 if (VT == MVT::i8 || VT == MVT::i1)
27077 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
27078 if (VT == MVT::i16)
27079 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
27080 if (VT == MVT::i32 || !Subtarget->is64Bit())
27081 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
27082 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
27083 case 'f': // FP Stack registers.
27084 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
27085 // value to the correct fpstack register class.
27086 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
27087 return std::make_pair(0U, &X86::RFP32RegClass);
27088 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
27089 return std::make_pair(0U, &X86::RFP64RegClass);
27090 return std::make_pair(0U, &X86::RFP80RegClass);
27091 case 'y': // MMX_REGS if MMX allowed.
27092 if (!Subtarget->hasMMX()) break;
27093 return std::make_pair(0U, &X86::VR64RegClass);
27094 case 'Y': // SSE_REGS if SSE2 allowed
27095 if (!Subtarget->hasSSE2()) break;
27097 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
27098 if (!Subtarget->hasSSE1()) break;
27100 switch (VT.SimpleTy) {
27102 // Scalar SSE types.
27105 return std::make_pair(0U, &X86::FR32RegClass);
27108 return std::make_pair(0U, &X86::FR64RegClass);
27116 return std::make_pair(0U, &X86::VR128RegClass);
27124 return std::make_pair(0U, &X86::VR256RegClass);
27129 return std::make_pair(0U, &X86::VR512RegClass);
27135 // Use the default implementation in TargetLowering to convert the register
27136 // constraint into a member of a register class.
27137 std::pair<unsigned, const TargetRegisterClass*> Res;
27138 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
27140 // Not found as a standard register?
27142 // Map st(0) -> st(7) -> ST0
27143 if (Constraint.size() == 7 && Constraint[0] == '{' &&
27144 tolower(Constraint[1]) == 's' &&
27145 tolower(Constraint[2]) == 't' &&
27146 Constraint[3] == '(' &&
27147 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
27148 Constraint[5] == ')' &&
27149 Constraint[6] == '}') {
27151 Res.first = X86::FP0+Constraint[4]-'0';
27152 Res.second = &X86::RFP80RegClass;
27156 // GCC allows "st(0)" to be called just plain "st".
27157 if (StringRef("{st}").equals_lower(Constraint)) {
27158 Res.first = X86::FP0;
27159 Res.second = &X86::RFP80RegClass;
27164 if (StringRef("{flags}").equals_lower(Constraint)) {
27165 Res.first = X86::EFLAGS;
27166 Res.second = &X86::CCRRegClass;
27170 // 'A' means EAX + EDX.
27171 if (Constraint == "A") {
27172 Res.first = X86::EAX;
27173 Res.second = &X86::GR32_ADRegClass;
27179 // Otherwise, check to see if this is a register class of the wrong value
27180 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
27181 // turn into {ax},{dx}.
27182 if (Res.second->hasType(VT))
27183 return Res; // Correct type already, nothing to do.
27185 // All of the single-register GCC register classes map their values onto
27186 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
27187 // really want an 8-bit or 32-bit register, map to the appropriate register
27188 // class and return the appropriate register.
27189 if (Res.second == &X86::GR16RegClass) {
27190 if (VT == MVT::i8 || VT == MVT::i1) {
27191 unsigned DestReg = 0;
27192 switch (Res.first) {
27194 case X86::AX: DestReg = X86::AL; break;
27195 case X86::DX: DestReg = X86::DL; break;
27196 case X86::CX: DestReg = X86::CL; break;
27197 case X86::BX: DestReg = X86::BL; break;
27200 Res.first = DestReg;
27201 Res.second = &X86::GR8RegClass;
27203 } else if (VT == MVT::i32 || VT == MVT::f32) {
27204 unsigned DestReg = 0;
27205 switch (Res.first) {
27207 case X86::AX: DestReg = X86::EAX; break;
27208 case X86::DX: DestReg = X86::EDX; break;
27209 case X86::CX: DestReg = X86::ECX; break;
27210 case X86::BX: DestReg = X86::EBX; break;
27211 case X86::SI: DestReg = X86::ESI; break;
27212 case X86::DI: DestReg = X86::EDI; break;
27213 case X86::BP: DestReg = X86::EBP; break;
27214 case X86::SP: DestReg = X86::ESP; break;
27217 Res.first = DestReg;
27218 Res.second = &X86::GR32RegClass;
27220 } else if (VT == MVT::i64 || VT == MVT::f64) {
27221 unsigned DestReg = 0;
27222 switch (Res.first) {
27224 case X86::AX: DestReg = X86::RAX; break;
27225 case X86::DX: DestReg = X86::RDX; break;
27226 case X86::CX: DestReg = X86::RCX; break;
27227 case X86::BX: DestReg = X86::RBX; break;
27228 case X86::SI: DestReg = X86::RSI; break;
27229 case X86::DI: DestReg = X86::RDI; break;
27230 case X86::BP: DestReg = X86::RBP; break;
27231 case X86::SP: DestReg = X86::RSP; break;
27234 Res.first = DestReg;
27235 Res.second = &X86::GR64RegClass;
27238 } else if (Res.second == &X86::FR32RegClass ||
27239 Res.second == &X86::FR64RegClass ||
27240 Res.second == &X86::VR128RegClass ||
27241 Res.second == &X86::VR256RegClass ||
27242 Res.second == &X86::FR32XRegClass ||
27243 Res.second == &X86::FR64XRegClass ||
27244 Res.second == &X86::VR128XRegClass ||
27245 Res.second == &X86::VR256XRegClass ||
27246 Res.second == &X86::VR512RegClass) {
27247 // Handle references to XMM physical registers that got mapped into the
27248 // wrong class. This can happen with constraints like {xmm0} where the
27249 // target independent register mapper will just pick the first match it can
27250 // find, ignoring the required type.
27252 if (VT == MVT::f32 || VT == MVT::i32)
27253 Res.second = &X86::FR32RegClass;
27254 else if (VT == MVT::f64 || VT == MVT::i64)
27255 Res.second = &X86::FR64RegClass;
27256 else if (X86::VR128RegClass.hasType(VT))
27257 Res.second = &X86::VR128RegClass;
27258 else if (X86::VR256RegClass.hasType(VT))
27259 Res.second = &X86::VR256RegClass;
27260 else if (X86::VR512RegClass.hasType(VT))
27261 Res.second = &X86::VR512RegClass;
27267 int X86TargetLowering::getScalingFactorCost(const AddrMode &AM,
27269 // Scaling factors are not free at all.
27270 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
27271 // will take 2 allocations in the out of order engine instead of 1
27272 // for plain addressing mode, i.e. inst (reg1).
27274 // vaddps (%rsi,%drx), %ymm0, %ymm1
27275 // Requires two allocations (one for the load, one for the computation)
27277 // vaddps (%rsi), %ymm0, %ymm1
27278 // Requires just 1 allocation, i.e., freeing allocations for other operations
27279 // and having less micro operations to execute.
27281 // For some X86 architectures, this is even worse because for instance for
27282 // stores, the complex addressing mode forces the instruction to use the
27283 // "load" ports instead of the dedicated "store" port.
27284 // E.g., on Haswell:
27285 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
27286 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
27287 if (isLegalAddressingMode(AM, Ty))
27288 // Scale represents reg2 * scale, thus account for 1
27289 // as soon as we use a second register.
27290 return AM.Scale != 0;
27294 bool X86TargetLowering::isTargetFTOL() const {
27295 return Subtarget->isTargetKnownWindowsMSVC() && !Subtarget->is64Bit();