1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "X86ISelLowering.h"
16 #include "Utils/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "X86TargetMachine.h"
22 #include "X86TargetObjectFile.h"
23 #include "llvm/ADT/SmallBitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/ADT/VariadicFunction.h"
29 #include "llvm/CodeGen/IntrinsicLowering.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineJumpTableInfo.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/CallingConv.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/MC/MCAsmInfo.h"
46 #include "llvm/MC/MCContext.h"
47 #include "llvm/MC/MCExpr.h"
48 #include "llvm/MC/MCSymbol.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Target/TargetOptions.h"
54 #include "X86IntrinsicsInfo.h"
60 #define DEBUG_TYPE "x86-isel"
62 STATISTIC(NumTailCalls, "Number of tail calls");
64 static cl::opt<bool> ExperimentalVectorWideningLegalization(
65 "x86-experimental-vector-widening-legalization", cl::init(false),
66 cl::desc("Enable an experimental vector type legalization through widening "
67 "rather than promotion."),
70 static cl::opt<bool> ExperimentalVectorShuffleLowering(
71 "x86-experimental-vector-shuffle-lowering", cl::init(true),
72 cl::desc("Enable an experimental vector shuffle lowering code path."),
75 static cl::opt<bool> ExperimentalVectorShuffleLegality(
76 "x86-experimental-vector-shuffle-legality", cl::init(false),
77 cl::desc("Enable experimental shuffle legality based on the experimental "
78 "shuffle lowering. Should only be used with the experimental "
82 static cl::opt<int> ReciprocalEstimateRefinementSteps(
83 "x86-recip-refinement-steps", cl::init(1),
84 cl::desc("Specify the number of Newton-Raphson iterations applied to the "
85 "result of the hardware reciprocal estimate instruction."),
88 // Forward declarations.
89 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
92 static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
93 SelectionDAG &DAG, SDLoc dl,
94 unsigned vectorWidth) {
95 assert((vectorWidth == 128 || vectorWidth == 256) &&
96 "Unsupported vector width");
97 EVT VT = Vec.getValueType();
98 EVT ElVT = VT.getVectorElementType();
99 unsigned Factor = VT.getSizeInBits()/vectorWidth;
100 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
101 VT.getVectorNumElements()/Factor);
103 // Extract from UNDEF is UNDEF.
104 if (Vec.getOpcode() == ISD::UNDEF)
105 return DAG.getUNDEF(ResultVT);
107 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
108 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
110 // This is the index of the first element of the vectorWidth-bit chunk
112 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / vectorWidth)
115 // If the input is a buildvector just emit a smaller one.
116 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
117 return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
118 makeArrayRef(Vec->op_begin() + NormalizedIdxVal,
121 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
122 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
125 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
126 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
127 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
128 /// instructions or a simple subregister reference. Idx is an index in the
129 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
130 /// lowering EXTRACT_VECTOR_ELT operations easier.
131 static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
132 SelectionDAG &DAG, SDLoc dl) {
133 assert((Vec.getValueType().is256BitVector() ||
134 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
135 return ExtractSubVector(Vec, IdxVal, DAG, dl, 128);
138 /// Generate a DAG to grab 256-bits from a 512-bit vector.
139 static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal,
140 SelectionDAG &DAG, SDLoc dl) {
141 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
142 return ExtractSubVector(Vec, IdxVal, DAG, dl, 256);
145 static SDValue InsertSubVector(SDValue Result, SDValue Vec,
146 unsigned IdxVal, SelectionDAG &DAG,
147 SDLoc dl, unsigned vectorWidth) {
148 assert((vectorWidth == 128 || vectorWidth == 256) &&
149 "Unsupported vector width");
150 // Inserting UNDEF is Result
151 if (Vec.getOpcode() == ISD::UNDEF)
153 EVT VT = Vec.getValueType();
154 EVT ElVT = VT.getVectorElementType();
155 EVT ResultVT = Result.getValueType();
157 // Insert the relevant vectorWidth bits.
158 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
160 // This is the index of the first element of the vectorWidth-bit chunk
162 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/vectorWidth)
165 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
166 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
169 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
170 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
171 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
172 /// simple superregister reference. Idx is an index in the 128 bits
173 /// we want. It need not be aligned to a 128-bit boundary. That makes
174 /// lowering INSERT_VECTOR_ELT operations easier.
175 static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
176 SelectionDAG &DAG,SDLoc dl) {
177 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
178 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
181 static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
182 SelectionDAG &DAG, SDLoc dl) {
183 assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
184 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
187 /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128
188 /// instructions. This is used because creating CONCAT_VECTOR nodes of
189 /// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
190 /// large BUILD_VECTORS.
191 static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
192 unsigned NumElems, SelectionDAG &DAG,
194 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
195 return Insert128BitVector(V, V2, NumElems/2, DAG, dl);
198 static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
199 unsigned NumElems, SelectionDAG &DAG,
201 SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
202 return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
205 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
206 const X86Subtarget &STI)
207 : TargetLowering(TM), Subtarget(&STI) {
208 X86ScalarSSEf64 = Subtarget->hasSSE2();
209 X86ScalarSSEf32 = Subtarget->hasSSE1();
210 TD = getDataLayout();
212 // Set up the TargetLowering object.
213 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
215 // X86 is weird. It always uses i8 for shift amounts and setcc results.
216 setBooleanContents(ZeroOrOneBooleanContent);
217 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
218 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
220 // For 64-bit, since we have so many registers, use the ILP scheduler.
221 // For 32-bit, use the register pressure specific scheduling.
222 // For Atom, always use ILP scheduling.
223 if (Subtarget->isAtom())
224 setSchedulingPreference(Sched::ILP);
225 else if (Subtarget->is64Bit())
226 setSchedulingPreference(Sched::ILP);
228 setSchedulingPreference(Sched::RegPressure);
229 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
230 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
232 // Bypass expensive divides on Atom when compiling with O2.
233 if (TM.getOptLevel() >= CodeGenOpt::Default) {
234 if (Subtarget->hasSlowDivide32())
235 addBypassSlowDiv(32, 8);
236 if (Subtarget->hasSlowDivide64() && Subtarget->is64Bit())
237 addBypassSlowDiv(64, 16);
240 if (Subtarget->isTargetKnownWindowsMSVC()) {
241 // Setup Windows compiler runtime calls.
242 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
243 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
244 setLibcallName(RTLIB::SREM_I64, "_allrem");
245 setLibcallName(RTLIB::UREM_I64, "_aullrem");
246 setLibcallName(RTLIB::MUL_I64, "_allmul");
247 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
248 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
249 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
250 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
251 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
253 // The _ftol2 runtime function has an unusual calling conv, which
254 // is modeled by a special pseudo-instruction.
255 setLibcallName(RTLIB::FPTOUINT_F64_I64, nullptr);
256 setLibcallName(RTLIB::FPTOUINT_F32_I64, nullptr);
257 setLibcallName(RTLIB::FPTOUINT_F64_I32, nullptr);
258 setLibcallName(RTLIB::FPTOUINT_F32_I32, nullptr);
261 if (Subtarget->isTargetDarwin()) {
262 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
263 setUseUnderscoreSetJmp(false);
264 setUseUnderscoreLongJmp(false);
265 } else if (Subtarget->isTargetWindowsGNU()) {
266 // MS runtime is weird: it exports _setjmp, but longjmp!
267 setUseUnderscoreSetJmp(true);
268 setUseUnderscoreLongJmp(false);
270 setUseUnderscoreSetJmp(true);
271 setUseUnderscoreLongJmp(true);
274 // Set up the register classes.
275 addRegisterClass(MVT::i8, &X86::GR8RegClass);
276 addRegisterClass(MVT::i16, &X86::GR16RegClass);
277 addRegisterClass(MVT::i32, &X86::GR32RegClass);
278 if (Subtarget->is64Bit())
279 addRegisterClass(MVT::i64, &X86::GR64RegClass);
281 for (MVT VT : MVT::integer_valuetypes())
282 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
284 // We don't accept any truncstore of integer registers.
285 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
286 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
287 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
288 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
289 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
290 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
292 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
294 // SETOEQ and SETUNE require checking two conditions.
295 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
296 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
297 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
298 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
299 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
300 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
302 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
304 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
305 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
306 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
308 if (Subtarget->is64Bit()) {
309 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
310 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
311 } else if (!TM.Options.UseSoftFloat) {
312 // We have an algorithm for SSE2->double, and we turn this into a
313 // 64-bit FILD followed by conditional FADD for other targets.
314 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
315 // We have an algorithm for SSE2, and we turn this into a 64-bit
316 // FILD for other targets.
317 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
320 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
322 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
323 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
325 if (!TM.Options.UseSoftFloat) {
326 // SSE has no i16 to fp conversion, only i32
327 if (X86ScalarSSEf32) {
328 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
329 // f32 and f64 cases are Legal, f80 case is not
330 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
332 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
333 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
336 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
337 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
340 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
341 // are Legal, f80 is custom lowered.
342 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
343 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
345 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
347 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
348 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
350 if (X86ScalarSSEf32) {
351 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
352 // f32 and f64 cases are Legal, f80 case is not
353 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
355 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
356 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
359 // Handle FP_TO_UINT by promoting the destination to a larger signed
361 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
362 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
363 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
365 if (Subtarget->is64Bit()) {
366 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
367 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
368 } else if (!TM.Options.UseSoftFloat) {
369 // Since AVX is a superset of SSE3, only check for SSE here.
370 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3())
371 // Expand FP_TO_UINT into a select.
372 // FIXME: We would like to use a Custom expander here eventually to do
373 // the optimal thing for SSE vs. the default expansion in the legalizer.
374 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
376 // With SSE3 we can use fisttpll to convert to a signed i64; without
377 // SSE, we're stuck with a fistpll.
378 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
381 if (isTargetFTOL()) {
382 // Use the _ftol2 runtime function, which has a pseudo-instruction
383 // to handle its weird calling convention.
384 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
387 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
388 if (!X86ScalarSSEf64) {
389 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
390 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
391 if (Subtarget->is64Bit()) {
392 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
393 // Without SSE, i64->f64 goes through memory.
394 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
398 // Scalar integer divide and remainder are lowered to use operations that
399 // produce two results, to match the available instructions. This exposes
400 // the two-result form to trivial CSE, which is able to combine x/y and x%y
401 // into a single instruction.
403 // Scalar integer multiply-high is also lowered to use two-result
404 // operations, to match the available instructions. However, plain multiply
405 // (low) operations are left as Legal, as there are single-result
406 // instructions for this in x86. Using the two-result multiply instructions
407 // when both high and low results are needed must be arranged by dagcombine.
408 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
410 setOperationAction(ISD::MULHS, VT, Expand);
411 setOperationAction(ISD::MULHU, VT, Expand);
412 setOperationAction(ISD::SDIV, VT, Expand);
413 setOperationAction(ISD::UDIV, VT, Expand);
414 setOperationAction(ISD::SREM, VT, Expand);
415 setOperationAction(ISD::UREM, VT, Expand);
417 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences.
418 setOperationAction(ISD::ADDC, VT, Custom);
419 setOperationAction(ISD::ADDE, VT, Custom);
420 setOperationAction(ISD::SUBC, VT, Custom);
421 setOperationAction(ISD::SUBE, VT, Custom);
424 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
425 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
426 setOperationAction(ISD::BR_CC , MVT::f32, Expand);
427 setOperationAction(ISD::BR_CC , MVT::f64, Expand);
428 setOperationAction(ISD::BR_CC , MVT::f80, Expand);
429 setOperationAction(ISD::BR_CC , MVT::i8, Expand);
430 setOperationAction(ISD::BR_CC , MVT::i16, Expand);
431 setOperationAction(ISD::BR_CC , MVT::i32, Expand);
432 setOperationAction(ISD::BR_CC , MVT::i64, Expand);
433 setOperationAction(ISD::SELECT_CC , MVT::f32, Expand);
434 setOperationAction(ISD::SELECT_CC , MVT::f64, Expand);
435 setOperationAction(ISD::SELECT_CC , MVT::f80, Expand);
436 setOperationAction(ISD::SELECT_CC , MVT::i8, Expand);
437 setOperationAction(ISD::SELECT_CC , MVT::i16, Expand);
438 setOperationAction(ISD::SELECT_CC , MVT::i32, Expand);
439 setOperationAction(ISD::SELECT_CC , MVT::i64, Expand);
440 if (Subtarget->is64Bit())
441 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
442 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
443 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
444 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
445 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
446 setOperationAction(ISD::FREM , MVT::f32 , Expand);
447 setOperationAction(ISD::FREM , MVT::f64 , Expand);
448 setOperationAction(ISD::FREM , MVT::f80 , Expand);
449 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
451 // Promote the i8 variants and force them on up to i32 which has a shorter
453 setOperationAction(ISD::CTTZ , MVT::i8 , Promote);
454 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32);
455 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote);
456 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32);
457 if (Subtarget->hasBMI()) {
458 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand);
459 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand);
460 if (Subtarget->is64Bit())
461 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
463 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
464 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
465 if (Subtarget->is64Bit())
466 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
469 if (Subtarget->hasLZCNT()) {
470 // When promoting the i8 variants, force them to i32 for a shorter
472 setOperationAction(ISD::CTLZ , MVT::i8 , Promote);
473 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32);
474 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote);
475 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
476 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand);
477 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand);
478 if (Subtarget->is64Bit())
479 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
481 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
482 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
483 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
484 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
485 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
486 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
487 if (Subtarget->is64Bit()) {
488 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
489 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
493 // Special handling for half-precision floating point conversions.
494 // If we don't have F16C support, then lower half float conversions
495 // into library calls.
496 if (TM.Options.UseSoftFloat || !Subtarget->hasF16C()) {
497 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
498 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
501 // There's never any support for operations beyond MVT::f32.
502 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
503 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
504 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
505 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
507 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
508 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
509 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
510 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
511 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
512 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
514 if (Subtarget->hasPOPCNT()) {
515 setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
517 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
518 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
519 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
520 if (Subtarget->is64Bit())
521 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
524 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
526 if (!Subtarget->hasMOVBE())
527 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
529 // These should be promoted to a larger select which is supported.
530 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
531 // X86 wants to expand cmov itself.
532 setOperationAction(ISD::SELECT , MVT::i8 , Custom);
533 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
534 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
535 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
536 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
537 setOperationAction(ISD::SELECT , MVT::f80 , Custom);
538 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
539 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
540 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
541 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
542 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
543 setOperationAction(ISD::SETCC , MVT::f80 , Custom);
544 if (Subtarget->is64Bit()) {
545 setOperationAction(ISD::SELECT , MVT::i64 , Custom);
546 setOperationAction(ISD::SETCC , MVT::i64 , Custom);
548 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
549 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
550 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
551 // support continuation, user-level threading, and etc.. As a result, no
552 // other SjLj exception interfaces are implemented and please don't build
553 // your own exception handling based on them.
554 // LLVM/Clang supports zero-cost DWARF exception handling.
555 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
556 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
559 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
560 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
561 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
562 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
563 if (Subtarget->is64Bit())
564 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
565 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
566 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom);
567 if (Subtarget->is64Bit()) {
568 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
569 setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
570 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
571 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
572 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom);
574 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
575 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
576 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
577 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
578 if (Subtarget->is64Bit()) {
579 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
580 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
581 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
584 if (Subtarget->hasSSE1())
585 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
587 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
589 // Expand certain atomics
590 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
592 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
593 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
594 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
597 if (Subtarget->hasCmpxchg16b()) {
598 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
601 // FIXME - use subtarget debug flags
602 if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetELF() &&
603 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWin64()) {
604 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
607 if (Subtarget->is64Bit()) {
608 setExceptionPointerRegister(X86::RAX);
609 setExceptionSelectorRegister(X86::RDX);
611 setExceptionPointerRegister(X86::EAX);
612 setExceptionSelectorRegister(X86::EDX);
614 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
615 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
617 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
618 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
620 setOperationAction(ISD::TRAP, MVT::Other, Legal);
621 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
623 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
624 setOperationAction(ISD::VASTART , MVT::Other, Custom);
625 setOperationAction(ISD::VAEND , MVT::Other, Expand);
626 if (Subtarget->is64Bit() && !Subtarget->isTargetWin64()) {
627 // TargetInfo::X86_64ABIBuiltinVaList
628 setOperationAction(ISD::VAARG , MVT::Other, Custom);
629 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
631 // TargetInfo::CharPtrBuiltinVaList
632 setOperationAction(ISD::VAARG , MVT::Other, Expand);
633 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
636 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
637 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
639 setOperationAction(ISD::DYNAMIC_STACKALLOC, getPointerTy(), Custom);
641 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) {
642 // f32 and f64 use SSE.
643 // Set up the FP register classes.
644 addRegisterClass(MVT::f32, &X86::FR32RegClass);
645 addRegisterClass(MVT::f64, &X86::FR64RegClass);
647 // Use ANDPD to simulate FABS.
648 setOperationAction(ISD::FABS , MVT::f64, Custom);
649 setOperationAction(ISD::FABS , MVT::f32, Custom);
651 // Use XORP to simulate FNEG.
652 setOperationAction(ISD::FNEG , MVT::f64, Custom);
653 setOperationAction(ISD::FNEG , MVT::f32, Custom);
655 // Use ANDPD and ORPD to simulate FCOPYSIGN.
656 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
657 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
659 // Lower this to FGETSIGNx86 plus an AND.
660 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
661 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
663 // We don't support sin/cos/fmod
664 setOperationAction(ISD::FSIN , MVT::f64, Expand);
665 setOperationAction(ISD::FCOS , MVT::f64, Expand);
666 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
667 setOperationAction(ISD::FSIN , MVT::f32, Expand);
668 setOperationAction(ISD::FCOS , MVT::f32, Expand);
669 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
671 // Expand FP immediates into loads from the stack, except for the special
673 addLegalFPImmediate(APFloat(+0.0)); // xorpd
674 addLegalFPImmediate(APFloat(+0.0f)); // xorps
675 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) {
676 // Use SSE for f32, x87 for f64.
677 // Set up the FP register classes.
678 addRegisterClass(MVT::f32, &X86::FR32RegClass);
679 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
681 // Use ANDPS to simulate FABS.
682 setOperationAction(ISD::FABS , MVT::f32, Custom);
684 // Use XORP to simulate FNEG.
685 setOperationAction(ISD::FNEG , MVT::f32, Custom);
687 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
689 // Use ANDPS and ORPS to simulate FCOPYSIGN.
690 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
691 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
693 // We don't support sin/cos/fmod
694 setOperationAction(ISD::FSIN , MVT::f32, Expand);
695 setOperationAction(ISD::FCOS , MVT::f32, Expand);
696 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
698 // Special cases we handle for FP constants.
699 addLegalFPImmediate(APFloat(+0.0f)); // xorps
700 addLegalFPImmediate(APFloat(+0.0)); // FLD0
701 addLegalFPImmediate(APFloat(+1.0)); // FLD1
702 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
703 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
705 if (!TM.Options.UnsafeFPMath) {
706 setOperationAction(ISD::FSIN , MVT::f64, Expand);
707 setOperationAction(ISD::FCOS , MVT::f64, Expand);
708 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
710 } else if (!TM.Options.UseSoftFloat) {
711 // f32 and f64 in x87.
712 // Set up the FP register classes.
713 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
714 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
716 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
717 setOperationAction(ISD::UNDEF, MVT::f32, Expand);
718 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
719 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
721 if (!TM.Options.UnsafeFPMath) {
722 setOperationAction(ISD::FSIN , MVT::f64, Expand);
723 setOperationAction(ISD::FSIN , MVT::f32, Expand);
724 setOperationAction(ISD::FCOS , MVT::f64, Expand);
725 setOperationAction(ISD::FCOS , MVT::f32, Expand);
726 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
727 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
729 addLegalFPImmediate(APFloat(+0.0)); // FLD0
730 addLegalFPImmediate(APFloat(+1.0)); // FLD1
731 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
732 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
733 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
734 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
735 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
736 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
739 // We don't support FMA.
740 setOperationAction(ISD::FMA, MVT::f64, Expand);
741 setOperationAction(ISD::FMA, MVT::f32, Expand);
743 // Long double always uses X87.
744 if (!TM.Options.UseSoftFloat) {
745 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
746 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
747 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
749 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended);
750 addLegalFPImmediate(TmpFlt); // FLD0
752 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
755 APFloat TmpFlt2(+1.0);
756 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
758 addLegalFPImmediate(TmpFlt2); // FLD1
759 TmpFlt2.changeSign();
760 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
763 if (!TM.Options.UnsafeFPMath) {
764 setOperationAction(ISD::FSIN , MVT::f80, Expand);
765 setOperationAction(ISD::FCOS , MVT::f80, Expand);
766 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
769 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
770 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
771 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
772 setOperationAction(ISD::FRINT, MVT::f80, Expand);
773 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
774 setOperationAction(ISD::FMA, MVT::f80, Expand);
777 // Always use a library call for pow.
778 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
779 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
780 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
782 setOperationAction(ISD::FLOG, MVT::f80, Expand);
783 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
784 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
785 setOperationAction(ISD::FEXP, MVT::f80, Expand);
786 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
787 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
788 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
790 // First set operation action for all vector types to either promote
791 // (for widening) or expand (for scalarization). Then we will selectively
792 // turn on ones that can be effectively codegen'd.
793 for (MVT VT : MVT::vector_valuetypes()) {
794 setOperationAction(ISD::ADD , VT, Expand);
795 setOperationAction(ISD::SUB , VT, Expand);
796 setOperationAction(ISD::FADD, VT, Expand);
797 setOperationAction(ISD::FNEG, VT, Expand);
798 setOperationAction(ISD::FSUB, VT, Expand);
799 setOperationAction(ISD::MUL , VT, Expand);
800 setOperationAction(ISD::FMUL, VT, Expand);
801 setOperationAction(ISD::SDIV, VT, Expand);
802 setOperationAction(ISD::UDIV, VT, Expand);
803 setOperationAction(ISD::FDIV, VT, Expand);
804 setOperationAction(ISD::SREM, VT, Expand);
805 setOperationAction(ISD::UREM, VT, Expand);
806 setOperationAction(ISD::LOAD, VT, Expand);
807 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
808 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
809 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
810 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
811 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
812 setOperationAction(ISD::FABS, VT, Expand);
813 setOperationAction(ISD::FSIN, VT, Expand);
814 setOperationAction(ISD::FSINCOS, VT, Expand);
815 setOperationAction(ISD::FCOS, VT, Expand);
816 setOperationAction(ISD::FSINCOS, VT, Expand);
817 setOperationAction(ISD::FREM, VT, Expand);
818 setOperationAction(ISD::FMA, VT, Expand);
819 setOperationAction(ISD::FPOWI, VT, Expand);
820 setOperationAction(ISD::FSQRT, VT, Expand);
821 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
822 setOperationAction(ISD::FFLOOR, VT, Expand);
823 setOperationAction(ISD::FCEIL, VT, Expand);
824 setOperationAction(ISD::FTRUNC, VT, Expand);
825 setOperationAction(ISD::FRINT, VT, Expand);
826 setOperationAction(ISD::FNEARBYINT, VT, Expand);
827 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
828 setOperationAction(ISD::MULHS, VT, Expand);
829 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
830 setOperationAction(ISD::MULHU, VT, Expand);
831 setOperationAction(ISD::SDIVREM, VT, Expand);
832 setOperationAction(ISD::UDIVREM, VT, Expand);
833 setOperationAction(ISD::FPOW, VT, Expand);
834 setOperationAction(ISD::CTPOP, VT, Expand);
835 setOperationAction(ISD::CTTZ, VT, Expand);
836 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
837 setOperationAction(ISD::CTLZ, VT, Expand);
838 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
839 setOperationAction(ISD::SHL, VT, Expand);
840 setOperationAction(ISD::SRA, VT, Expand);
841 setOperationAction(ISD::SRL, VT, Expand);
842 setOperationAction(ISD::ROTL, VT, Expand);
843 setOperationAction(ISD::ROTR, VT, Expand);
844 setOperationAction(ISD::BSWAP, VT, Expand);
845 setOperationAction(ISD::SETCC, VT, Expand);
846 setOperationAction(ISD::FLOG, VT, Expand);
847 setOperationAction(ISD::FLOG2, VT, Expand);
848 setOperationAction(ISD::FLOG10, VT, Expand);
849 setOperationAction(ISD::FEXP, VT, Expand);
850 setOperationAction(ISD::FEXP2, VT, Expand);
851 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
852 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
853 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
854 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
855 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
856 setOperationAction(ISD::TRUNCATE, VT, Expand);
857 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
858 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
859 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
860 setOperationAction(ISD::VSELECT, VT, Expand);
861 setOperationAction(ISD::SELECT_CC, VT, Expand);
862 for (MVT InnerVT : MVT::vector_valuetypes()) {
863 setTruncStoreAction(InnerVT, VT, Expand);
865 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
866 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
868 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
869 // types, we have to deal with them whether we ask for Expansion or not.
870 // Setting Expand causes its own optimisation problems though, so leave
872 if (VT.getVectorElementType() == MVT::i1)
873 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
877 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
878 // with -msoft-float, disable use of MMX as well.
879 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) {
880 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
881 // No operations on x86mmx supported, everything uses intrinsics.
884 // MMX-sized vectors (other than x86mmx) are expected to be expanded
885 // into smaller operations.
886 setOperationAction(ISD::MULHS, MVT::v8i8, Expand);
887 setOperationAction(ISD::MULHS, MVT::v4i16, Expand);
888 setOperationAction(ISD::MULHS, MVT::v2i32, Expand);
889 setOperationAction(ISD::MULHS, MVT::v1i64, Expand);
890 setOperationAction(ISD::AND, MVT::v8i8, Expand);
891 setOperationAction(ISD::AND, MVT::v4i16, Expand);
892 setOperationAction(ISD::AND, MVT::v2i32, Expand);
893 setOperationAction(ISD::AND, MVT::v1i64, Expand);
894 setOperationAction(ISD::OR, MVT::v8i8, Expand);
895 setOperationAction(ISD::OR, MVT::v4i16, Expand);
896 setOperationAction(ISD::OR, MVT::v2i32, Expand);
897 setOperationAction(ISD::OR, MVT::v1i64, Expand);
898 setOperationAction(ISD::XOR, MVT::v8i8, Expand);
899 setOperationAction(ISD::XOR, MVT::v4i16, Expand);
900 setOperationAction(ISD::XOR, MVT::v2i32, Expand);
901 setOperationAction(ISD::XOR, MVT::v1i64, Expand);
902 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand);
903 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand);
904 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand);
905 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand);
906 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
907 setOperationAction(ISD::SELECT, MVT::v8i8, Expand);
908 setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
909 setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
910 setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
911 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand);
912 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand);
913 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand);
914 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
916 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) {
917 addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
919 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
920 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
921 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
922 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
923 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
924 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
925 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
926 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
927 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
928 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
929 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
930 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
931 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
934 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) {
935 addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
937 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
938 // registers cannot be used even for integer operations.
939 addRegisterClass(MVT::v16i8, &X86::VR128RegClass);
940 addRegisterClass(MVT::v8i16, &X86::VR128RegClass);
941 addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
942 addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
944 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
945 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
946 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
947 setOperationAction(ISD::ADD, MVT::v2i64, Legal);
948 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
949 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
950 setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
951 setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
952 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
953 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
954 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
955 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
956 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
957 setOperationAction(ISD::SUB, MVT::v2i64, Legal);
958 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
959 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
960 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
961 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
962 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
963 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
964 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
965 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
967 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
968 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
969 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
970 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
972 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
973 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
974 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
975 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
976 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
978 // Only provide customized ctpop vector bit twiddling for vector types we
979 // know to perform better than using the popcnt instructions on each vector
980 // element. If popcnt isn't supported, always provide the custom version.
981 if (!Subtarget->hasPOPCNT()) {
982 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
983 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom);
986 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
987 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
988 MVT VT = (MVT::SimpleValueType)i;
989 // Do not attempt to custom lower non-power-of-2 vectors
990 if (!isPowerOf2_32(VT.getVectorNumElements()))
992 // Do not attempt to custom lower non-128-bit vectors
993 if (!VT.is128BitVector())
995 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
996 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
997 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1000 // We support custom legalizing of sext and anyext loads for specific
1001 // memory vector types which we can load as a scalar (or sequence of
1002 // scalars) and extend in-register to a legal 128-bit vector type. For sext
1003 // loads these must work with a single scalar load.
1004 for (MVT VT : MVT::integer_vector_valuetypes()) {
1005 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
1006 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
1007 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
1008 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
1009 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
1010 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
1011 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
1012 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
1013 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
1016 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1017 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1018 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
1019 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
1020 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
1021 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
1023 if (Subtarget->is64Bit()) {
1024 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1025 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1028 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
1029 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
1030 MVT VT = (MVT::SimpleValueType)i;
1032 // Do not attempt to promote non-128-bit vectors
1033 if (!VT.is128BitVector())
1036 setOperationAction(ISD::AND, VT, Promote);
1037 AddPromotedToType (ISD::AND, VT, MVT::v2i64);
1038 setOperationAction(ISD::OR, VT, Promote);
1039 AddPromotedToType (ISD::OR, VT, MVT::v2i64);
1040 setOperationAction(ISD::XOR, VT, Promote);
1041 AddPromotedToType (ISD::XOR, VT, MVT::v2i64);
1042 setOperationAction(ISD::LOAD, VT, Promote);
1043 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
1044 setOperationAction(ISD::SELECT, VT, Promote);
1045 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64);
1048 // Custom lower v2i64 and v2f64 selects.
1049 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
1050 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
1051 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
1052 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
1054 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
1055 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
1057 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1058 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1059 // As there is no 64-bit GPR available, we need build a special custom
1060 // sequence to convert from v2i32 to v2f32.
1061 if (!Subtarget->is64Bit())
1062 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
1064 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1065 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
1067 for (MVT VT : MVT::fp_vector_valuetypes())
1068 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
1070 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1071 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1072 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1075 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
1076 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1077 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1078 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1079 setOperationAction(ISD::FRINT, MVT::f32, Legal);
1080 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1081 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1082 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1083 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1084 setOperationAction(ISD::FRINT, MVT::f64, Legal);
1085 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1087 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1088 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
1089 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1090 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
1091 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
1092 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
1093 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
1094 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
1095 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
1096 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
1098 // FIXME: Do we need to handle scalar-to-vector here?
1099 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1101 setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
1102 setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
1103 setOperationAction(ISD::VSELECT, MVT::v4i32, Custom);
1104 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
1105 setOperationAction(ISD::VSELECT, MVT::v8i16, Custom);
1106 // There is no BLENDI for byte vectors. We don't need to custom lower
1107 // some vselects for now.
1108 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1110 // SSE41 brings specific instructions for doing vector sign extend even in
1111 // cases where we don't have SRA.
1112 for (MVT VT : MVT::integer_vector_valuetypes()) {
1113 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
1114 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
1115 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
1118 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1119 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1120 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1121 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1122 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1123 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1124 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1126 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1127 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1128 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1129 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1130 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1131 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1133 // i8 and i16 vectors are custom because the source register and source
1134 // source memory operand types are not the same width. f32 vectors are
1135 // custom since the immediate controlling the insert encodes additional
1137 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1138 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1139 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1140 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1142 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
1143 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
1144 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
1145 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
1147 // FIXME: these should be Legal, but that's only for the case where
1148 // the index is constant. For now custom expand to deal with that.
1149 if (Subtarget->is64Bit()) {
1150 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1151 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1155 if (Subtarget->hasSSE2()) {
1156 setOperationAction(ISD::SRL, MVT::v8i16, Custom);
1157 setOperationAction(ISD::SRL, MVT::v16i8, Custom);
1159 setOperationAction(ISD::SHL, MVT::v8i16, Custom);
1160 setOperationAction(ISD::SHL, MVT::v16i8, Custom);
1162 setOperationAction(ISD::SRA, MVT::v8i16, Custom);
1163 setOperationAction(ISD::SRA, MVT::v16i8, Custom);
1165 // In the customized shift lowering, the legal cases in AVX2 will be
1167 setOperationAction(ISD::SRL, MVT::v2i64, Custom);
1168 setOperationAction(ISD::SRL, MVT::v4i32, Custom);
1170 setOperationAction(ISD::SHL, MVT::v2i64, Custom);
1171 setOperationAction(ISD::SHL, MVT::v4i32, Custom);
1173 setOperationAction(ISD::SRA, MVT::v4i32, Custom);
1176 if (!TM.Options.UseSoftFloat && Subtarget->hasFp256()) {
1177 addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
1178 addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
1179 addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
1180 addRegisterClass(MVT::v8f32, &X86::VR256RegClass);
1181 addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
1182 addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
1184 setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
1185 setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
1186 setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
1188 setOperationAction(ISD::FADD, MVT::v8f32, Legal);
1189 setOperationAction(ISD::FSUB, MVT::v8f32, Legal);
1190 setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
1191 setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
1192 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
1193 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal);
1194 setOperationAction(ISD::FCEIL, MVT::v8f32, Legal);
1195 setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal);
1196 setOperationAction(ISD::FRINT, MVT::v8f32, Legal);
1197 setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal);
1198 setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
1199 setOperationAction(ISD::FABS, MVT::v8f32, Custom);
1201 setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1202 setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1203 setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1204 setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1205 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1206 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1207 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
1208 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1209 setOperationAction(ISD::FRINT, MVT::v4f64, Legal);
1210 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal);
1211 setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
1212 setOperationAction(ISD::FABS, MVT::v4f64, Custom);
1214 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1215 // even though v8i16 is a legal type.
1216 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Promote);
1217 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Promote);
1218 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1220 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote);
1221 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1222 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
1224 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
1225 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
1227 for (MVT VT : MVT::fp_vector_valuetypes())
1228 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
1230 setOperationAction(ISD::SRL, MVT::v16i16, Custom);
1231 setOperationAction(ISD::SRL, MVT::v32i8, Custom);
1233 setOperationAction(ISD::SHL, MVT::v16i16, Custom);
1234 setOperationAction(ISD::SHL, MVT::v32i8, Custom);
1236 setOperationAction(ISD::SRA, MVT::v16i16, Custom);
1237 setOperationAction(ISD::SRA, MVT::v32i8, Custom);
1239 setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
1240 setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
1241 setOperationAction(ISD::SETCC, MVT::v8i32, Custom);
1242 setOperationAction(ISD::SETCC, MVT::v4i64, Custom);
1244 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1245 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1246 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1248 setOperationAction(ISD::VSELECT, MVT::v4f64, Custom);
1249 setOperationAction(ISD::VSELECT, MVT::v4i64, Custom);
1250 setOperationAction(ISD::VSELECT, MVT::v8i32, Custom);
1251 setOperationAction(ISD::VSELECT, MVT::v8f32, Custom);
1253 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1254 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
1255 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1256 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom);
1257 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
1258 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom);
1259 setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom);
1260 setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom);
1261 setOperationAction(ISD::ANY_EXTEND, MVT::v16i16, Custom);
1262 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1263 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1264 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1266 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) {
1267 setOperationAction(ISD::FMA, MVT::v8f32, Legal);
1268 setOperationAction(ISD::FMA, MVT::v4f64, Legal);
1269 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
1270 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
1271 setOperationAction(ISD::FMA, MVT::f32, Legal);
1272 setOperationAction(ISD::FMA, MVT::f64, Legal);
1275 if (Subtarget->hasInt256()) {
1276 setOperationAction(ISD::ADD, MVT::v4i64, Legal);
1277 setOperationAction(ISD::ADD, MVT::v8i32, Legal);
1278 setOperationAction(ISD::ADD, MVT::v16i16, Legal);
1279 setOperationAction(ISD::ADD, MVT::v32i8, Legal);
1281 setOperationAction(ISD::SUB, MVT::v4i64, Legal);
1282 setOperationAction(ISD::SUB, MVT::v8i32, Legal);
1283 setOperationAction(ISD::SUB, MVT::v16i16, Legal);
1284 setOperationAction(ISD::SUB, MVT::v32i8, Legal);
1286 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1287 setOperationAction(ISD::MUL, MVT::v8i32, Legal);
1288 setOperationAction(ISD::MUL, MVT::v16i16, Legal);
1289 // Don't lower v32i8 because there is no 128-bit byte mul
1291 setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
1292 setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
1293 setOperationAction(ISD::MULHU, MVT::v16i16, Legal);
1294 setOperationAction(ISD::MULHS, MVT::v16i16, Legal);
1296 setOperationAction(ISD::VSELECT, MVT::v16i16, Custom);
1297 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1299 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1300 // when we have a 256bit-wide blend with immediate.
1301 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1303 // Only provide customized ctpop vector bit twiddling for vector types we
1304 // know to perform better than using the popcnt instructions on each
1305 // vector element. If popcnt isn't supported, always provide the custom
1307 if (!Subtarget->hasPOPCNT())
1308 setOperationAction(ISD::CTPOP, MVT::v4i64, Custom);
1310 // Custom CTPOP always performs better on natively supported v8i32
1311 setOperationAction(ISD::CTPOP, MVT::v8i32, Custom);
1313 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1314 setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1315 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1316 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1317 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1318 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1319 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1321 setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1322 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1323 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1324 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1325 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1326 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1328 setOperationAction(ISD::ADD, MVT::v4i64, Custom);
1329 setOperationAction(ISD::ADD, MVT::v8i32, Custom);
1330 setOperationAction(ISD::ADD, MVT::v16i16, Custom);
1331 setOperationAction(ISD::ADD, MVT::v32i8, Custom);
1333 setOperationAction(ISD::SUB, MVT::v4i64, Custom);
1334 setOperationAction(ISD::SUB, MVT::v8i32, Custom);
1335 setOperationAction(ISD::SUB, MVT::v16i16, Custom);
1336 setOperationAction(ISD::SUB, MVT::v32i8, Custom);
1338 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1339 setOperationAction(ISD::MUL, MVT::v8i32, Custom);
1340 setOperationAction(ISD::MUL, MVT::v16i16, Custom);
1341 // Don't lower v32i8 because there is no 128-bit byte mul
1344 // In the customized shift lowering, the legal cases in AVX2 will be
1346 setOperationAction(ISD::SRL, MVT::v4i64, Custom);
1347 setOperationAction(ISD::SRL, MVT::v8i32, Custom);
1349 setOperationAction(ISD::SHL, MVT::v4i64, Custom);
1350 setOperationAction(ISD::SHL, MVT::v8i32, Custom);
1352 setOperationAction(ISD::SRA, MVT::v8i32, Custom);
1354 // Custom lower several nodes for 256-bit types.
1355 for (MVT VT : MVT::vector_valuetypes()) {
1356 if (VT.getScalarSizeInBits() >= 32) {
1357 setOperationAction(ISD::MLOAD, VT, Legal);
1358 setOperationAction(ISD::MSTORE, VT, Legal);
1360 // Extract subvector is special because the value type
1361 // (result) is 128-bit but the source is 256-bit wide.
1362 if (VT.is128BitVector()) {
1363 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1365 // Do not attempt to custom lower other non-256-bit vectors
1366 if (!VT.is256BitVector())
1369 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1370 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1371 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1372 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1373 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1374 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1375 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1378 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
1379 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) {
1380 MVT VT = (MVT::SimpleValueType)i;
1382 // Do not attempt to promote non-256-bit vectors
1383 if (!VT.is256BitVector())
1386 setOperationAction(ISD::AND, VT, Promote);
1387 AddPromotedToType (ISD::AND, VT, MVT::v4i64);
1388 setOperationAction(ISD::OR, VT, Promote);
1389 AddPromotedToType (ISD::OR, VT, MVT::v4i64);
1390 setOperationAction(ISD::XOR, VT, Promote);
1391 AddPromotedToType (ISD::XOR, VT, MVT::v4i64);
1392 setOperationAction(ISD::LOAD, VT, Promote);
1393 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
1394 setOperationAction(ISD::SELECT, VT, Promote);
1395 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
1399 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512()) {
1400 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1401 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1402 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1403 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1405 addRegisterClass(MVT::i1, &X86::VK1RegClass);
1406 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1407 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1409 for (MVT VT : MVT::fp_vector_valuetypes())
1410 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
1412 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1413 setOperationAction(ISD::SETCC, MVT::i1, Custom);
1414 setOperationAction(ISD::XOR, MVT::i1, Legal);
1415 setOperationAction(ISD::OR, MVT::i1, Legal);
1416 setOperationAction(ISD::AND, MVT::i1, Legal);
1417 setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
1418 setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
1419 setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
1420 setOperationAction(ISD::LOAD, MVT::v16i32, Legal);
1421 setOperationAction(ISD::LOAD, MVT::v16i1, Legal);
1423 setOperationAction(ISD::FADD, MVT::v16f32, Legal);
1424 setOperationAction(ISD::FSUB, MVT::v16f32, Legal);
1425 setOperationAction(ISD::FMUL, MVT::v16f32, Legal);
1426 setOperationAction(ISD::FDIV, MVT::v16f32, Legal);
1427 setOperationAction(ISD::FSQRT, MVT::v16f32, Legal);
1428 setOperationAction(ISD::FNEG, MVT::v16f32, Custom);
1430 setOperationAction(ISD::FADD, MVT::v8f64, Legal);
1431 setOperationAction(ISD::FSUB, MVT::v8f64, Legal);
1432 setOperationAction(ISD::FMUL, MVT::v8f64, Legal);
1433 setOperationAction(ISD::FDIV, MVT::v8f64, Legal);
1434 setOperationAction(ISD::FSQRT, MVT::v8f64, Legal);
1435 setOperationAction(ISD::FNEG, MVT::v8f64, Custom);
1436 setOperationAction(ISD::FMA, MVT::v8f64, Legal);
1437 setOperationAction(ISD::FMA, MVT::v16f32, Legal);
1439 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1440 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1441 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1442 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1443 if (Subtarget->is64Bit()) {
1444 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1445 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1446 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1447 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1449 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1450 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1451 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1452 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1453 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1454 setOperationAction(ISD::SINT_TO_FP, MVT::v8i1, Custom);
1455 setOperationAction(ISD::SINT_TO_FP, MVT::v16i1, Custom);
1456 setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Promote);
1457 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Promote);
1458 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1459 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1460 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1461 setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal);
1462 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1464 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
1465 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1466 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1467 setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
1468 setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
1469 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1470 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1471 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1472 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1473 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1474 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
1475 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
1476 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1478 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1479 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1480 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1481 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1482 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
1483 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Legal);
1485 setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
1486 setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
1488 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1490 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
1491 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
1492 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
1493 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
1494 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
1495 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
1496 setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
1497 setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
1498 setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
1500 setOperationAction(ISD::ADD, MVT::v8i64, Legal);
1501 setOperationAction(ISD::ADD, MVT::v16i32, Legal);
1503 setOperationAction(ISD::SUB, MVT::v8i64, Legal);
1504 setOperationAction(ISD::SUB, MVT::v16i32, Legal);
1506 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1508 setOperationAction(ISD::SRL, MVT::v8i64, Custom);
1509 setOperationAction(ISD::SRL, MVT::v16i32, Custom);
1511 setOperationAction(ISD::SHL, MVT::v8i64, Custom);
1512 setOperationAction(ISD::SHL, MVT::v16i32, Custom);
1514 setOperationAction(ISD::SRA, MVT::v8i64, Custom);
1515 setOperationAction(ISD::SRA, MVT::v16i32, Custom);
1517 setOperationAction(ISD::AND, MVT::v8i64, Legal);
1518 setOperationAction(ISD::OR, MVT::v8i64, Legal);
1519 setOperationAction(ISD::XOR, MVT::v8i64, Legal);
1520 setOperationAction(ISD::AND, MVT::v16i32, Legal);
1521 setOperationAction(ISD::OR, MVT::v16i32, Legal);
1522 setOperationAction(ISD::XOR, MVT::v16i32, Legal);
1524 if (Subtarget->hasCDI()) {
1525 setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
1526 setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
1529 // Custom lower several nodes.
1530 for (MVT VT : MVT::vector_valuetypes()) {
1531 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1532 // Extract subvector is special because the value type
1533 // (result) is 256/128-bit but the source is 512-bit wide.
1534 if (VT.is128BitVector() || VT.is256BitVector()) {
1535 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1537 if (VT.getVectorElementType() == MVT::i1)
1538 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1540 // Do not attempt to custom lower other non-512-bit vectors
1541 if (!VT.is512BitVector())
1544 if ( EltSize >= 32) {
1545 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1546 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1547 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1548 setOperationAction(ISD::VSELECT, VT, Legal);
1549 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1550 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1551 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1552 setOperationAction(ISD::MLOAD, VT, Legal);
1553 setOperationAction(ISD::MSTORE, VT, Legal);
1556 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1557 MVT VT = (MVT::SimpleValueType)i;
1559 // Do not attempt to promote non-512-bit vectors.
1560 if (!VT.is512BitVector())
1563 setOperationAction(ISD::SELECT, VT, Promote);
1564 AddPromotedToType (ISD::SELECT, VT, MVT::v8i64);
1568 if (!TM.Options.UseSoftFloat && Subtarget->hasBWI()) {
1569 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1570 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1572 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1573 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1575 setOperationAction(ISD::LOAD, MVT::v32i16, Legal);
1576 setOperationAction(ISD::LOAD, MVT::v64i8, Legal);
1577 setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
1578 setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
1579 setOperationAction(ISD::ADD, MVT::v32i16, Legal);
1580 setOperationAction(ISD::ADD, MVT::v64i8, Legal);
1581 setOperationAction(ISD::SUB, MVT::v32i16, Legal);
1582 setOperationAction(ISD::SUB, MVT::v64i8, Legal);
1583 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1585 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1586 const MVT VT = (MVT::SimpleValueType)i;
1588 const unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1590 // Do not attempt to promote non-512-bit vectors.
1591 if (!VT.is512BitVector())
1595 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1596 setOperationAction(ISD::VSELECT, VT, Legal);
1601 if (!TM.Options.UseSoftFloat && Subtarget->hasVLX()) {
1602 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1603 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1605 setOperationAction(ISD::SETCC, MVT::v4i1, Custom);
1606 setOperationAction(ISD::SETCC, MVT::v2i1, Custom);
1607 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Legal);
1609 setOperationAction(ISD::AND, MVT::v8i32, Legal);
1610 setOperationAction(ISD::OR, MVT::v8i32, Legal);
1611 setOperationAction(ISD::XOR, MVT::v8i32, Legal);
1612 setOperationAction(ISD::AND, MVT::v4i32, Legal);
1613 setOperationAction(ISD::OR, MVT::v4i32, Legal);
1614 setOperationAction(ISD::XOR, MVT::v4i32, Legal);
1617 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion
1618 // of this type with custom code.
1619 for (MVT VT : MVT::vector_valuetypes())
1620 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
1622 // We want to custom lower some of our intrinsics.
1623 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1624 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1625 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1626 if (!Subtarget->is64Bit())
1627 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1629 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1630 // handle type legalization for these operations here.
1632 // FIXME: We really should do custom legalization for addition and
1633 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1634 // than generic legalization for 64-bit multiplication-with-overflow, though.
1635 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) {
1636 // Add/Sub/Mul with overflow operations are custom lowered.
1638 setOperationAction(ISD::SADDO, VT, Custom);
1639 setOperationAction(ISD::UADDO, VT, Custom);
1640 setOperationAction(ISD::SSUBO, VT, Custom);
1641 setOperationAction(ISD::USUBO, VT, Custom);
1642 setOperationAction(ISD::SMULO, VT, Custom);
1643 setOperationAction(ISD::UMULO, VT, Custom);
1647 if (!Subtarget->is64Bit()) {
1648 // These libcalls are not available in 32-bit.
1649 setLibcallName(RTLIB::SHL_I128, nullptr);
1650 setLibcallName(RTLIB::SRL_I128, nullptr);
1651 setLibcallName(RTLIB::SRA_I128, nullptr);
1654 // Combine sin / cos into one node or libcall if possible.
1655 if (Subtarget->hasSinCos()) {
1656 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1657 setLibcallName(RTLIB::SINCOS_F64, "sincos");
1658 if (Subtarget->isTargetDarwin()) {
1659 // For MacOSX, we don't want the normal expansion of a libcall to sincos.
1660 // We want to issue a libcall to __sincos_stret to avoid memory traffic.
1661 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1662 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1666 if (Subtarget->isTargetWin64()) {
1667 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1668 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1669 setOperationAction(ISD::SREM, MVT::i128, Custom);
1670 setOperationAction(ISD::UREM, MVT::i128, Custom);
1671 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1672 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1675 // We have target-specific dag combine patterns for the following nodes:
1676 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1677 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1678 setTargetDAGCombine(ISD::BITCAST);
1679 setTargetDAGCombine(ISD::VSELECT);
1680 setTargetDAGCombine(ISD::SELECT);
1681 setTargetDAGCombine(ISD::SHL);
1682 setTargetDAGCombine(ISD::SRA);
1683 setTargetDAGCombine(ISD::SRL);
1684 setTargetDAGCombine(ISD::OR);
1685 setTargetDAGCombine(ISD::AND);
1686 setTargetDAGCombine(ISD::ADD);
1687 setTargetDAGCombine(ISD::FADD);
1688 setTargetDAGCombine(ISD::FSUB);
1689 setTargetDAGCombine(ISD::FMA);
1690 setTargetDAGCombine(ISD::SUB);
1691 setTargetDAGCombine(ISD::LOAD);
1692 setTargetDAGCombine(ISD::MLOAD);
1693 setTargetDAGCombine(ISD::STORE);
1694 setTargetDAGCombine(ISD::MSTORE);
1695 setTargetDAGCombine(ISD::ZERO_EXTEND);
1696 setTargetDAGCombine(ISD::ANY_EXTEND);
1697 setTargetDAGCombine(ISD::SIGN_EXTEND);
1698 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1699 setTargetDAGCombine(ISD::TRUNCATE);
1700 setTargetDAGCombine(ISD::SINT_TO_FP);
1701 setTargetDAGCombine(ISD::SETCC);
1702 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1703 setTargetDAGCombine(ISD::BUILD_VECTOR);
1704 setTargetDAGCombine(ISD::MUL);
1705 setTargetDAGCombine(ISD::XOR);
1707 computeRegisterProperties();
1709 // On Darwin, -Os means optimize for size without hurting performance,
1710 // do not reduce the limit.
1711 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1712 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8;
1713 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1714 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1715 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1716 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1717 setPrefLoopAlignment(4); // 2^4 bytes.
1719 // Predictable cmov don't hurt on atom because it's in-order.
1720 PredictableSelectIsExpensive = !Subtarget->isAtom();
1721 EnableExtLdPromotion = true;
1722 setPrefFunctionAlignment(4); // 2^4 bytes.
1724 verifyIntrinsicTables();
1727 // This has so far only been implemented for 64-bit MachO.
1728 bool X86TargetLowering::useLoadStackGuardNode() const {
1729 return Subtarget->isTargetMachO() && Subtarget->is64Bit();
1732 TargetLoweringBase::LegalizeTypeAction
1733 X86TargetLowering::getPreferredVectorAction(EVT VT) const {
1734 if (ExperimentalVectorWideningLegalization &&
1735 VT.getVectorNumElements() != 1 &&
1736 VT.getVectorElementType().getSimpleVT() != MVT::i1)
1737 return TypeWidenVector;
1739 return TargetLoweringBase::getPreferredVectorAction(VT);
1742 EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
1744 return Subtarget->hasAVX512() ? MVT::i1: MVT::i8;
1746 const unsigned NumElts = VT.getVectorNumElements();
1747 const EVT EltVT = VT.getVectorElementType();
1748 if (VT.is512BitVector()) {
1749 if (Subtarget->hasAVX512())
1750 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1751 EltVT == MVT::f32 || EltVT == MVT::f64)
1753 case 8: return MVT::v8i1;
1754 case 16: return MVT::v16i1;
1756 if (Subtarget->hasBWI())
1757 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1759 case 32: return MVT::v32i1;
1760 case 64: return MVT::v64i1;
1764 if (VT.is256BitVector() || VT.is128BitVector()) {
1765 if (Subtarget->hasVLX())
1766 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1767 EltVT == MVT::f32 || EltVT == MVT::f64)
1769 case 2: return MVT::v2i1;
1770 case 4: return MVT::v4i1;
1771 case 8: return MVT::v8i1;
1773 if (Subtarget->hasBWI() && Subtarget->hasVLX())
1774 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1776 case 8: return MVT::v8i1;
1777 case 16: return MVT::v16i1;
1778 case 32: return MVT::v32i1;
1782 return VT.changeVectorElementTypeToInteger();
1785 /// Helper for getByValTypeAlignment to determine
1786 /// the desired ByVal argument alignment.
1787 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
1790 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1791 if (VTy->getBitWidth() == 128)
1793 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1794 unsigned EltAlign = 0;
1795 getMaxByValAlign(ATy->getElementType(), EltAlign);
1796 if (EltAlign > MaxAlign)
1797 MaxAlign = EltAlign;
1798 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1799 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1800 unsigned EltAlign = 0;
1801 getMaxByValAlign(STy->getElementType(i), EltAlign);
1802 if (EltAlign > MaxAlign)
1803 MaxAlign = EltAlign;
1810 /// Return the desired alignment for ByVal aggregate
1811 /// function arguments in the caller parameter area. For X86, aggregates
1812 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
1813 /// are at 4-byte boundaries.
1814 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
1815 if (Subtarget->is64Bit()) {
1816 // Max of 8 and alignment of type.
1817 unsigned TyAlign = TD->getABITypeAlignment(Ty);
1824 if (Subtarget->hasSSE1())
1825 getMaxByValAlign(Ty, Align);
1829 /// Returns the target specific optimal type for load
1830 /// and store operations as a result of memset, memcpy, and memmove
1831 /// lowering. If DstAlign is zero that means it's safe to destination
1832 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
1833 /// means there isn't a need to check it against alignment requirement,
1834 /// probably because the source does not need to be loaded. If 'IsMemset' is
1835 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
1836 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
1837 /// source is constant so it does not need to be loaded.
1838 /// It returns EVT::Other if the type should be determined using generic
1839 /// target-independent logic.
1841 X86TargetLowering::getOptimalMemOpType(uint64_t Size,
1842 unsigned DstAlign, unsigned SrcAlign,
1843 bool IsMemset, bool ZeroMemset,
1845 MachineFunction &MF) const {
1846 const Function *F = MF.getFunction();
1847 if ((!IsMemset || ZeroMemset) &&
1848 !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
1850 (Subtarget->isUnalignedMemAccessFast() ||
1851 ((DstAlign == 0 || DstAlign >= 16) &&
1852 (SrcAlign == 0 || SrcAlign >= 16)))) {
1854 if (Subtarget->hasInt256())
1856 if (Subtarget->hasFp256())
1859 if (Subtarget->hasSSE2())
1861 if (Subtarget->hasSSE1())
1863 } else if (!MemcpyStrSrc && Size >= 8 &&
1864 !Subtarget->is64Bit() &&
1865 Subtarget->hasSSE2()) {
1866 // Do not use f64 to lower memcpy if source is string constant. It's
1867 // better to use i32 to avoid the loads.
1871 if (Subtarget->is64Bit() && Size >= 8)
1876 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
1878 return X86ScalarSSEf32;
1879 else if (VT == MVT::f64)
1880 return X86ScalarSSEf64;
1885 X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
1890 *Fast = Subtarget->isUnalignedMemAccessFast();
1894 /// Return the entry encoding for a jump table in the
1895 /// current function. The returned value is a member of the
1896 /// MachineJumpTableInfo::JTEntryKind enum.
1897 unsigned X86TargetLowering::getJumpTableEncoding() const {
1898 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
1900 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1901 Subtarget->isPICStyleGOT())
1902 return MachineJumpTableInfo::EK_Custom32;
1904 // Otherwise, use the normal jump table encoding heuristics.
1905 return TargetLowering::getJumpTableEncoding();
1909 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
1910 const MachineBasicBlock *MBB,
1911 unsigned uid,MCContext &Ctx) const{
1912 assert(MBB->getParent()->getTarget().getRelocationModel() == Reloc::PIC_ &&
1913 Subtarget->isPICStyleGOT());
1914 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
1916 return MCSymbolRefExpr::Create(MBB->getSymbol(),
1917 MCSymbolRefExpr::VK_GOTOFF, Ctx);
1920 /// Returns relocation base for the given PIC jumptable.
1921 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
1922 SelectionDAG &DAG) const {
1923 if (!Subtarget->is64Bit())
1924 // This doesn't have SDLoc associated with it, but is not really the
1925 // same as a Register.
1926 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy());
1930 /// This returns the relocation base for the given PIC jumptable,
1931 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
1932 const MCExpr *X86TargetLowering::
1933 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
1934 MCContext &Ctx) const {
1935 // X86-64 uses RIP relative addressing based on the jump table label.
1936 if (Subtarget->isPICStyleRIPRel())
1937 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
1939 // Otherwise, the reference is relative to the PIC base.
1940 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
1943 // FIXME: Why this routine is here? Move to RegInfo!
1944 std::pair<const TargetRegisterClass*, uint8_t>
1945 X86TargetLowering::findRepresentativeClass(MVT VT) const{
1946 const TargetRegisterClass *RRC = nullptr;
1948 switch (VT.SimpleTy) {
1950 return TargetLowering::findRepresentativeClass(VT);
1951 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
1952 RRC = Subtarget->is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
1955 RRC = &X86::VR64RegClass;
1957 case MVT::f32: case MVT::f64:
1958 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1959 case MVT::v4f32: case MVT::v2f64:
1960 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32:
1962 RRC = &X86::VR128RegClass;
1965 return std::make_pair(RRC, Cost);
1968 bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
1969 unsigned &Offset) const {
1970 if (!Subtarget->isTargetLinux())
1973 if (Subtarget->is64Bit()) {
1974 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
1976 if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
1988 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1989 unsigned DestAS) const {
1990 assert(SrcAS != DestAS && "Expected different address spaces!");
1992 return SrcAS < 256 && DestAS < 256;
1995 //===----------------------------------------------------------------------===//
1996 // Return Value Calling Convention Implementation
1997 //===----------------------------------------------------------------------===//
1999 #include "X86GenCallingConv.inc"
2002 X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2003 MachineFunction &MF, bool isVarArg,
2004 const SmallVectorImpl<ISD::OutputArg> &Outs,
2005 LLVMContext &Context) const {
2006 SmallVector<CCValAssign, 16> RVLocs;
2007 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2008 return CCInfo.CheckReturn(Outs, RetCC_X86);
2011 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2012 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2017 X86TargetLowering::LowerReturn(SDValue Chain,
2018 CallingConv::ID CallConv, bool isVarArg,
2019 const SmallVectorImpl<ISD::OutputArg> &Outs,
2020 const SmallVectorImpl<SDValue> &OutVals,
2021 SDLoc dl, SelectionDAG &DAG) const {
2022 MachineFunction &MF = DAG.getMachineFunction();
2023 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2025 SmallVector<CCValAssign, 16> RVLocs;
2026 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2027 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2030 SmallVector<SDValue, 6> RetOps;
2031 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2032 // Operand #1 = Bytes To Pop
2033 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(),
2036 // Copy the result values into the output registers.
2037 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2038 CCValAssign &VA = RVLocs[i];
2039 assert(VA.isRegLoc() && "Can only return in registers!");
2040 SDValue ValToCopy = OutVals[i];
2041 EVT ValVT = ValToCopy.getValueType();
2043 // Promote values to the appropriate types.
2044 if (VA.getLocInfo() == CCValAssign::SExt)
2045 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2046 else if (VA.getLocInfo() == CCValAssign::ZExt)
2047 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2048 else if (VA.getLocInfo() == CCValAssign::AExt)
2049 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2050 else if (VA.getLocInfo() == CCValAssign::BCvt)
2051 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy);
2053 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2054 "Unexpected FP-extend for return value.");
2056 // If this is x86-64, and we disabled SSE, we can't return FP values,
2057 // or SSE or MMX vectors.
2058 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2059 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2060 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
2061 report_fatal_error("SSE register return with SSE disabled");
2063 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2064 // llvm-gcc has never done it right and no one has noticed, so this
2065 // should be OK for now.
2066 if (ValVT == MVT::f64 &&
2067 (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
2068 report_fatal_error("SSE2 register return with SSE2 disabled");
2070 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2071 // the RET instruction and handled by the FP Stackifier.
2072 if (VA.getLocReg() == X86::FP0 ||
2073 VA.getLocReg() == X86::FP1) {
2074 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2075 // change the value to the FP stack register class.
2076 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2077 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2078 RetOps.push_back(ValToCopy);
2079 // Don't emit a copytoreg.
2083 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2084 // which is returned in RAX / RDX.
2085 if (Subtarget->is64Bit()) {
2086 if (ValVT == MVT::x86mmx) {
2087 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2088 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy);
2089 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2091 // If we don't have SSE2 available, convert to v4f32 so the generated
2092 // register is legal.
2093 if (!Subtarget->hasSSE2())
2094 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy);
2099 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
2100 Flag = Chain.getValue(1);
2101 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2104 // The x86-64 ABIs require that for returning structs by value we copy
2105 // the sret argument into %rax/%eax (depending on ABI) for the return.
2106 // Win32 requires us to put the sret argument to %eax as well.
2107 // We saved the argument into a virtual register in the entry block,
2108 // so now we copy the value out and into %rax/%eax.
2110 // Checking Function.hasStructRetAttr() here is insufficient because the IR
2111 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2112 // false, then an sret argument may be implicitly inserted in the SelDAG. In
2113 // either case FuncInfo->setSRetReturnReg() will have been called.
2114 if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
2115 assert((Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) &&
2116 "No need for an sret register");
2117 SDValue Val = DAG.getCopyFromReg(Chain, dl, SRetReg, getPointerTy());
2120 = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ?
2121 X86::RAX : X86::EAX;
2122 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2123 Flag = Chain.getValue(1);
2125 // RAX/EAX now acts like a return value.
2126 RetOps.push_back(DAG.getRegister(RetValReg, getPointerTy()));
2129 RetOps[0] = Chain; // Update chain.
2131 // Add the flag if we have it.
2133 RetOps.push_back(Flag);
2135 return DAG.getNode(X86ISD::RET_FLAG, dl, MVT::Other, RetOps);
2138 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2139 if (N->getNumValues() != 1)
2141 if (!N->hasNUsesOfValue(1, 0))
2144 SDValue TCChain = Chain;
2145 SDNode *Copy = *N->use_begin();
2146 if (Copy->getOpcode() == ISD::CopyToReg) {
2147 // If the copy has a glue operand, we conservatively assume it isn't safe to
2148 // perform a tail call.
2149 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2151 TCChain = Copy->getOperand(0);
2152 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2155 bool HasRet = false;
2156 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2158 if (UI->getOpcode() != X86ISD::RET_FLAG)
2160 // If we are returning more than one value, we can definitely
2161 // not make a tail call see PR19530
2162 if (UI->getNumOperands() > 4)
2164 if (UI->getNumOperands() == 4 &&
2165 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2178 X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
2179 ISD::NodeType ExtendKind) const {
2181 // TODO: Is this also valid on 32-bit?
2182 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND)
2183 ReturnMVT = MVT::i8;
2185 ReturnMVT = MVT::i32;
2187 EVT MinVT = getRegisterType(Context, ReturnMVT);
2188 return VT.bitsLT(MinVT) ? MinVT : VT;
2191 /// Lower the result values of a call into the
2192 /// appropriate copies out of appropriate physical registers.
2195 X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
2196 CallingConv::ID CallConv, bool isVarArg,
2197 const SmallVectorImpl<ISD::InputArg> &Ins,
2198 SDLoc dl, SelectionDAG &DAG,
2199 SmallVectorImpl<SDValue> &InVals) const {
2201 // Assign locations to each value returned by this call.
2202 SmallVector<CCValAssign, 16> RVLocs;
2203 bool Is64Bit = Subtarget->is64Bit();
2204 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2206 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2208 // Copy all of the result registers out of their specified physreg.
2209 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2210 CCValAssign &VA = RVLocs[i];
2211 EVT CopyVT = VA.getValVT();
2213 // If this is x86-64, and we disabled SSE, we can't return FP values
2214 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
2215 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
2216 report_fatal_error("SSE register return with SSE disabled");
2219 // If we prefer to use the value in xmm registers, copy it out as f80 and
2220 // use a truncate to move it from fp stack reg to xmm reg.
2221 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2222 isScalarFPTypeInSSEReg(VA.getValVT()))
2225 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
2226 CopyVT, InFlag).getValue(1);
2227 SDValue Val = Chain.getValue(0);
2229 if (CopyVT != VA.getValVT())
2230 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2231 // This truncation won't change the value.
2232 DAG.getIntPtrConstant(1));
2234 InFlag = Chain.getValue(2);
2235 InVals.push_back(Val);
2241 //===----------------------------------------------------------------------===//
2242 // C & StdCall & Fast Calling Convention implementation
2243 //===----------------------------------------------------------------------===//
2244 // StdCall calling convention seems to be standard for many Windows' API
2245 // routines and around. It differs from C calling convention just a little:
2246 // callee should clean up the stack, not caller. Symbols should be also
2247 // decorated in some fancy way :) It doesn't support any vector arguments.
2248 // For info on fast calling convention see Fast Calling Convention (tail call)
2249 // implementation LowerX86_32FastCCCallTo.
2251 /// CallIsStructReturn - Determines whether a call uses struct return
2253 enum StructReturnType {
2258 static StructReturnType
2259 callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
2261 return NotStructReturn;
2263 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2264 if (!Flags.isSRet())
2265 return NotStructReturn;
2266 if (Flags.isInReg())
2267 return RegStructReturn;
2268 return StackStructReturn;
2271 /// Determines whether a function uses struct return semantics.
2272 static StructReturnType
2273 argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
2275 return NotStructReturn;
2277 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2278 if (!Flags.isSRet())
2279 return NotStructReturn;
2280 if (Flags.isInReg())
2281 return RegStructReturn;
2282 return StackStructReturn;
2285 /// Make a copy of an aggregate at address specified by "Src" to address
2286 /// "Dst" with size and alignment information specified by the specific
2287 /// parameter attribute. The copy will be passed as a byval function parameter.
2289 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
2290 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
2292 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
2294 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2295 /*isVolatile*/false, /*AlwaysInline=*/true,
2296 MachinePointerInfo(), MachinePointerInfo());
2299 /// Return true if the calling convention is one that
2300 /// supports tail call optimization.
2301 static bool IsTailCallConvention(CallingConv::ID CC) {
2302 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2303 CC == CallingConv::HiPE);
2306 /// \brief Return true if the calling convention is a C calling convention.
2307 static bool IsCCallConvention(CallingConv::ID CC) {
2308 return (CC == CallingConv::C || CC == CallingConv::X86_64_Win64 ||
2309 CC == CallingConv::X86_64_SysV);
2312 bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
2313 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
2317 CallingConv::ID CalleeCC = CS.getCallingConv();
2318 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
2324 /// Return true if the function is being made into
2325 /// a tailcall target by changing its ABI.
2326 static bool FuncIsMadeTailCallSafe(CallingConv::ID CC,
2327 bool GuaranteedTailCallOpt) {
2328 return GuaranteedTailCallOpt && IsTailCallConvention(CC);
2332 X86TargetLowering::LowerMemArgument(SDValue Chain,
2333 CallingConv::ID CallConv,
2334 const SmallVectorImpl<ISD::InputArg> &Ins,
2335 SDLoc dl, SelectionDAG &DAG,
2336 const CCValAssign &VA,
2337 MachineFrameInfo *MFI,
2339 // Create the nodes corresponding to a load from this parameter slot.
2340 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2341 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(
2342 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
2343 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
2346 // If value is passed by pointer we have address passed instead of the value
2348 if (VA.getLocInfo() == CCValAssign::Indirect)
2349 ValVT = VA.getLocVT();
2351 ValVT = VA.getValVT();
2353 // FIXME: For now, all byval parameter objects are marked mutable. This can be
2354 // changed with more analysis.
2355 // In case of tail call optimization mark all arguments mutable. Since they
2356 // could be overwritten by lowering of arguments in case of a tail call.
2357 if (Flags.isByVal()) {
2358 unsigned Bytes = Flags.getByValSize();
2359 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
2360 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable);
2361 return DAG.getFrameIndex(FI, getPointerTy());
2363 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
2364 VA.getLocMemOffset(), isImmutable);
2365 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2366 return DAG.getLoad(ValVT, dl, Chain, FIN,
2367 MachinePointerInfo::getFixedStack(FI),
2368 false, false, false, 0);
2372 // FIXME: Get this from tablegen.
2373 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
2374 const X86Subtarget *Subtarget) {
2375 assert(Subtarget->is64Bit());
2377 if (Subtarget->isCallingConvWin64(CallConv)) {
2378 static const MCPhysReg GPR64ArgRegsWin64[] = {
2379 X86::RCX, X86::RDX, X86::R8, X86::R9
2381 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
2384 static const MCPhysReg GPR64ArgRegs64Bit[] = {
2385 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
2387 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
2390 // FIXME: Get this from tablegen.
2391 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
2392 CallingConv::ID CallConv,
2393 const X86Subtarget *Subtarget) {
2394 assert(Subtarget->is64Bit());
2395 if (Subtarget->isCallingConvWin64(CallConv)) {
2396 // The XMM registers which might contain var arg parameters are shadowed
2397 // in their paired GPR. So we only need to save the GPR to their home
2399 // TODO: __vectorcall will change this.
2403 const Function *Fn = MF.getFunction();
2404 bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat);
2405 assert(!(MF.getTarget().Options.UseSoftFloat && NoImplicitFloatOps) &&
2406 "SSE register cannot be used when SSE is disabled!");
2407 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps ||
2408 !Subtarget->hasSSE1())
2409 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
2413 static const MCPhysReg XMMArgRegs64Bit[] = {
2414 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2415 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2417 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
2421 X86TargetLowering::LowerFormalArguments(SDValue Chain,
2422 CallingConv::ID CallConv,
2424 const SmallVectorImpl<ISD::InputArg> &Ins,
2427 SmallVectorImpl<SDValue> &InVals)
2429 MachineFunction &MF = DAG.getMachineFunction();
2430 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2432 const Function* Fn = MF.getFunction();
2433 if (Fn->hasExternalLinkage() &&
2434 Subtarget->isTargetCygMing() &&
2435 Fn->getName() == "main")
2436 FuncInfo->setForceFramePointer(true);
2438 MachineFrameInfo *MFI = MF.getFrameInfo();
2439 bool Is64Bit = Subtarget->is64Bit();
2440 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2442 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2443 "Var args not supported with calling convention fastcc, ghc or hipe");
2445 // Assign locations to all of the incoming arguments.
2446 SmallVector<CCValAssign, 16> ArgLocs;
2447 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2449 // Allocate shadow area for Win64
2451 CCInfo.AllocateStack(32, 8);
2453 CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
2455 unsigned LastVal = ~0U;
2457 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2458 CCValAssign &VA = ArgLocs[i];
2459 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
2461 assert(VA.getValNo() != LastVal &&
2462 "Don't support value assigned to multiple locs yet");
2464 LastVal = VA.getValNo();
2466 if (VA.isRegLoc()) {
2467 EVT RegVT = VA.getLocVT();
2468 const TargetRegisterClass *RC;
2469 if (RegVT == MVT::i32)
2470 RC = &X86::GR32RegClass;
2471 else if (Is64Bit && RegVT == MVT::i64)
2472 RC = &X86::GR64RegClass;
2473 else if (RegVT == MVT::f32)
2474 RC = &X86::FR32RegClass;
2475 else if (RegVT == MVT::f64)
2476 RC = &X86::FR64RegClass;
2477 else if (RegVT.is512BitVector())
2478 RC = &X86::VR512RegClass;
2479 else if (RegVT.is256BitVector())
2480 RC = &X86::VR256RegClass;
2481 else if (RegVT.is128BitVector())
2482 RC = &X86::VR128RegClass;
2483 else if (RegVT == MVT::x86mmx)
2484 RC = &X86::VR64RegClass;
2485 else if (RegVT == MVT::i1)
2486 RC = &X86::VK1RegClass;
2487 else if (RegVT == MVT::v8i1)
2488 RC = &X86::VK8RegClass;
2489 else if (RegVT == MVT::v16i1)
2490 RC = &X86::VK16RegClass;
2491 else if (RegVT == MVT::v32i1)
2492 RC = &X86::VK32RegClass;
2493 else if (RegVT == MVT::v64i1)
2494 RC = &X86::VK64RegClass;
2496 llvm_unreachable("Unknown argument type!");
2498 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2499 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2501 // If this is an 8 or 16-bit value, it is really passed promoted to 32
2502 // bits. Insert an assert[sz]ext to capture this, then truncate to the
2504 if (VA.getLocInfo() == CCValAssign::SExt)
2505 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2506 DAG.getValueType(VA.getValVT()));
2507 else if (VA.getLocInfo() == CCValAssign::ZExt)
2508 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2509 DAG.getValueType(VA.getValVT()));
2510 else if (VA.getLocInfo() == CCValAssign::BCvt)
2511 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
2513 if (VA.isExtInLoc()) {
2514 // Handle MMX values passed in XMM regs.
2515 if (RegVT.isVector())
2516 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
2518 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2521 assert(VA.isMemLoc());
2522 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i);
2525 // If value is passed via pointer - do a load.
2526 if (VA.getLocInfo() == CCValAssign::Indirect)
2527 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
2528 MachinePointerInfo(), false, false, false, 0);
2530 InVals.push_back(ArgValue);
2533 if (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) {
2534 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2535 // The x86-64 ABIs require that for returning structs by value we copy
2536 // the sret argument into %rax/%eax (depending on ABI) for the return.
2537 // Win32 requires us to put the sret argument to %eax as well.
2538 // Save the argument into a virtual register so that we can access it
2539 // from the return points.
2540 if (Ins[i].Flags.isSRet()) {
2541 unsigned Reg = FuncInfo->getSRetReturnReg();
2543 MVT PtrTy = getPointerTy();
2544 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
2545 FuncInfo->setSRetReturnReg(Reg);
2547 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[i]);
2548 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
2554 unsigned StackSize = CCInfo.getNextStackOffset();
2555 // Align stack specially for tail calls.
2556 if (FuncIsMadeTailCallSafe(CallConv,
2557 MF.getTarget().Options.GuaranteedTailCallOpt))
2558 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
2560 // If the function takes variable number of arguments, make a frame index for
2561 // the start of the first vararg value... for expansion of llvm.va_start. We
2562 // can skip this if there are no va_start calls.
2563 if (MFI->hasVAStart() &&
2564 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
2565 CallConv != CallingConv::X86_ThisCall))) {
2566 FuncInfo->setVarArgsFrameIndex(
2567 MFI->CreateFixedObject(1, StackSize, true));
2570 // Figure out if XMM registers are in use.
2571 assert(!(MF.getTarget().Options.UseSoftFloat &&
2572 Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
2573 "SSE register cannot be used when SSE is disabled!");
2575 // 64-bit calling conventions support varargs and register parameters, so we
2576 // have to do extra work to spill them in the prologue.
2577 if (Is64Bit && isVarArg && MFI->hasVAStart()) {
2578 // Find the first unallocated argument registers.
2579 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
2580 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
2581 unsigned NumIntRegs =
2582 CCInfo.getFirstUnallocated(ArgGPRs.data(), ArgGPRs.size());
2583 unsigned NumXMMRegs =
2584 CCInfo.getFirstUnallocated(ArgXMMs.data(), ArgXMMs.size());
2585 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
2586 "SSE register cannot be used when SSE is disabled!");
2588 // Gather all the live in physical registers.
2589 SmallVector<SDValue, 6> LiveGPRs;
2590 SmallVector<SDValue, 8> LiveXMMRegs;
2592 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
2593 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
2595 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
2597 if (!ArgXMMs.empty()) {
2598 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2599 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
2600 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
2601 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
2602 LiveXMMRegs.push_back(
2603 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
2608 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
2609 // Get to the caller-allocated home save location. Add 8 to account
2610 // for the return address.
2611 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
2612 FuncInfo->setRegSaveFrameIndex(
2613 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
2614 // Fixup to set vararg frame on shadow area (4 x i64).
2616 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
2618 // For X86-64, if there are vararg parameters that are passed via
2619 // registers, then we must store them to their spots on the stack so
2620 // they may be loaded by deferencing the result of va_next.
2621 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
2622 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
2623 FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
2624 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
2627 // Store the integer parameter registers.
2628 SmallVector<SDValue, 8> MemOps;
2629 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
2631 unsigned Offset = FuncInfo->getVarArgsGPOffset();
2632 for (SDValue Val : LiveGPRs) {
2633 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
2634 DAG.getIntPtrConstant(Offset));
2636 DAG.getStore(Val.getValue(1), dl, Val, FIN,
2637 MachinePointerInfo::getFixedStack(
2638 FuncInfo->getRegSaveFrameIndex(), Offset),
2640 MemOps.push_back(Store);
2644 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
2645 // Now store the XMM (fp + vector) parameter registers.
2646 SmallVector<SDValue, 12> SaveXMMOps;
2647 SaveXMMOps.push_back(Chain);
2648 SaveXMMOps.push_back(ALVal);
2649 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2650 FuncInfo->getRegSaveFrameIndex()));
2651 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2652 FuncInfo->getVarArgsFPOffset()));
2653 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
2655 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
2656 MVT::Other, SaveXMMOps));
2659 if (!MemOps.empty())
2660 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
2663 if (isVarArg && MFI->hasMustTailInVarArgFunc()) {
2664 // Find the largest legal vector type.
2665 MVT VecVT = MVT::Other;
2666 // FIXME: Only some x86_32 calling conventions support AVX512.
2667 if (Subtarget->hasAVX512() &&
2668 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
2669 CallConv == CallingConv::Intel_OCL_BI)))
2670 VecVT = MVT::v16f32;
2671 else if (Subtarget->hasAVX())
2673 else if (Subtarget->hasSSE2())
2676 // We forward some GPRs and some vector types.
2677 SmallVector<MVT, 2> RegParmTypes;
2678 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
2679 RegParmTypes.push_back(IntVT);
2680 if (VecVT != MVT::Other)
2681 RegParmTypes.push_back(VecVT);
2683 // Compute the set of forwarded registers. The rest are scratch.
2684 SmallVectorImpl<ForwardedRegister> &Forwards =
2685 FuncInfo->getForwardedMustTailRegParms();
2686 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
2688 // Conservatively forward AL on x86_64, since it might be used for varargs.
2689 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
2690 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2691 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
2694 // Copy all forwards from physical to virtual registers.
2695 for (ForwardedRegister &F : Forwards) {
2696 // FIXME: Can we use a less constrained schedule?
2697 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
2698 F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
2699 Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
2703 // Some CCs need callee pop.
2704 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
2705 MF.getTarget().Options.GuaranteedTailCallOpt)) {
2706 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
2708 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
2709 // If this is an sret function, the return should pop the hidden pointer.
2710 if (!Is64Bit && !IsTailCallConvention(CallConv) &&
2711 !Subtarget->getTargetTriple().isOSMSVCRT() &&
2712 argsAreStructReturn(Ins) == StackStructReturn)
2713 FuncInfo->setBytesToPopOnReturn(4);
2717 // RegSaveFrameIndex is X86-64 only.
2718 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
2719 if (CallConv == CallingConv::X86_FastCall ||
2720 CallConv == CallingConv::X86_ThisCall)
2721 // fastcc functions can't have varargs.
2722 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
2725 FuncInfo->setArgumentStackSize(StackSize);
2731 X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
2732 SDValue StackPtr, SDValue Arg,
2733 SDLoc dl, SelectionDAG &DAG,
2734 const CCValAssign &VA,
2735 ISD::ArgFlagsTy Flags) const {
2736 unsigned LocMemOffset = VA.getLocMemOffset();
2737 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
2738 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
2739 if (Flags.isByVal())
2740 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
2742 return DAG.getStore(Chain, dl, Arg, PtrOff,
2743 MachinePointerInfo::getStack(LocMemOffset),
2747 /// Emit a load of return address if tail call
2748 /// optimization is performed and it is required.
2750 X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
2751 SDValue &OutRetAddr, SDValue Chain,
2752 bool IsTailCall, bool Is64Bit,
2753 int FPDiff, SDLoc dl) const {
2754 // Adjust the Return address stack slot.
2755 EVT VT = getPointerTy();
2756 OutRetAddr = getReturnAddressFrameIndex(DAG);
2758 // Load the "old" Return address.
2759 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
2760 false, false, false, 0);
2761 return SDValue(OutRetAddr.getNode(), 1);
2764 /// Emit a store of the return address if tail call
2765 /// optimization is performed and it is required (FPDiff!=0).
2766 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
2767 SDValue Chain, SDValue RetAddrFrIdx,
2768 EVT PtrVT, unsigned SlotSize,
2769 int FPDiff, SDLoc dl) {
2770 // Store the return address to the appropriate stack slot.
2771 if (!FPDiff) return Chain;
2772 // Calculate the new stack slot for the return address.
2773 int NewReturnAddrFI =
2774 MF.getFrameInfo()->CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
2776 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
2777 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
2778 MachinePointerInfo::getFixedStack(NewReturnAddrFI),
2784 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2785 SmallVectorImpl<SDValue> &InVals) const {
2786 SelectionDAG &DAG = CLI.DAG;
2788 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2789 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2790 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2791 SDValue Chain = CLI.Chain;
2792 SDValue Callee = CLI.Callee;
2793 CallingConv::ID CallConv = CLI.CallConv;
2794 bool &isTailCall = CLI.IsTailCall;
2795 bool isVarArg = CLI.IsVarArg;
2797 MachineFunction &MF = DAG.getMachineFunction();
2798 bool Is64Bit = Subtarget->is64Bit();
2799 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2800 StructReturnType SR = callIsStructReturn(Outs);
2801 bool IsSibcall = false;
2802 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
2804 if (MF.getTarget().Options.DisableTailCalls)
2807 bool IsMustTail = CLI.CS && CLI.CS->isMustTailCall();
2809 // Force this to be a tail call. The verifier rules are enough to ensure
2810 // that we can lower this successfully without moving the return address
2813 } else if (isTailCall) {
2814 // Check if it's really possible to do a tail call.
2815 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
2816 isVarArg, SR != NotStructReturn,
2817 MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
2818 Outs, OutVals, Ins, DAG);
2820 // Sibcalls are automatically detected tailcalls which do not require
2822 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
2829 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2830 "Var args not supported with calling convention fastcc, ghc or hipe");
2832 // Analyze operands of the call, assigning locations to each operand.
2833 SmallVector<CCValAssign, 16> ArgLocs;
2834 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2836 // Allocate shadow area for Win64
2838 CCInfo.AllocateStack(32, 8);
2840 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
2842 // Get a count of how many bytes are to be pushed on the stack.
2843 unsigned NumBytes = CCInfo.getNextStackOffset();
2845 // This is a sibcall. The memory operands are available in caller's
2846 // own caller's stack.
2848 else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2849 IsTailCallConvention(CallConv))
2850 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
2853 if (isTailCall && !IsSibcall && !IsMustTail) {
2854 // Lower arguments at fp - stackoffset + fpdiff.
2855 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
2857 FPDiff = NumBytesCallerPushed - NumBytes;
2859 // Set the delta of movement of the returnaddr stackslot.
2860 // But only set if delta is greater than previous delta.
2861 if (FPDiff < X86Info->getTCReturnAddrDelta())
2862 X86Info->setTCReturnAddrDelta(FPDiff);
2865 unsigned NumBytesToPush = NumBytes;
2866 unsigned NumBytesToPop = NumBytes;
2868 // If we have an inalloca argument, all stack space has already been allocated
2869 // for us and be right at the top of the stack. We don't support multiple
2870 // arguments passed in memory when using inalloca.
2871 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
2873 if (!ArgLocs.back().isMemLoc())
2874 report_fatal_error("cannot use inalloca attribute on a register "
2876 if (ArgLocs.back().getLocMemOffset() != 0)
2877 report_fatal_error("any parameter with the inalloca attribute must be "
2878 "the only memory argument");
2882 Chain = DAG.getCALLSEQ_START(
2883 Chain, DAG.getIntPtrConstant(NumBytesToPush, true), dl);
2885 SDValue RetAddrFrIdx;
2886 // Load return address for tail calls.
2887 if (isTailCall && FPDiff)
2888 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2889 Is64Bit, FPDiff, dl);
2891 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2892 SmallVector<SDValue, 8> MemOpChains;
2895 // Walk the register/memloc assignments, inserting copies/loads. In the case
2896 // of tail call optimization arguments are handle later.
2897 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2898 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2899 // Skip inalloca arguments, they have already been written.
2900 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2901 if (Flags.isInAlloca())
2904 CCValAssign &VA = ArgLocs[i];
2905 EVT RegVT = VA.getLocVT();
2906 SDValue Arg = OutVals[i];
2907 bool isByVal = Flags.isByVal();
2909 // Promote the value if needed.
2910 switch (VA.getLocInfo()) {
2911 default: llvm_unreachable("Unknown loc info!");
2912 case CCValAssign::Full: break;
2913 case CCValAssign::SExt:
2914 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
2916 case CCValAssign::ZExt:
2917 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
2919 case CCValAssign::AExt:
2920 if (RegVT.is128BitVector()) {
2921 // Special case: passing MMX values in XMM registers.
2922 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
2923 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
2924 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
2926 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
2928 case CCValAssign::BCvt:
2929 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg);
2931 case CCValAssign::Indirect: {
2932 // Store the argument.
2933 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
2934 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2935 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot,
2936 MachinePointerInfo::getFixedStack(FI),
2943 if (VA.isRegLoc()) {
2944 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2945 if (isVarArg && IsWin64) {
2946 // Win64 ABI requires argument XMM reg to be copied to the corresponding
2947 // shadow reg if callee is a varargs function.
2948 unsigned ShadowReg = 0;
2949 switch (VA.getLocReg()) {
2950 case X86::XMM0: ShadowReg = X86::RCX; break;
2951 case X86::XMM1: ShadowReg = X86::RDX; break;
2952 case X86::XMM2: ShadowReg = X86::R8; break;
2953 case X86::XMM3: ShadowReg = X86::R9; break;
2956 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
2958 } else if (!IsSibcall && (!isTailCall || isByVal)) {
2959 assert(VA.isMemLoc());
2960 if (!StackPtr.getNode())
2961 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
2963 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2964 dl, DAG, VA, Flags));
2968 if (!MemOpChains.empty())
2969 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2971 if (Subtarget->isPICStyleGOT()) {
2972 // ELF / PIC requires GOT in the EBX register before function calls via PLT
2975 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX),
2976 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy())));
2978 // If we are tail calling and generating PIC/GOT style code load the
2979 // address of the callee into ECX. The value in ecx is used as target of
2980 // the tail jump. This is done to circumvent the ebx/callee-saved problem
2981 // for tail calls on PIC/GOT architectures. Normally we would just put the
2982 // address of GOT into ebx and then call target@PLT. But for tail calls
2983 // ebx would be restored (since ebx is callee saved) before jumping to the
2986 // Note: The actual moving to ECX is done further down.
2987 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
2988 if (G && !G->getGlobal()->hasHiddenVisibility() &&
2989 !G->getGlobal()->hasProtectedVisibility())
2990 Callee = LowerGlobalAddress(Callee, DAG);
2991 else if (isa<ExternalSymbolSDNode>(Callee))
2992 Callee = LowerExternalSymbol(Callee, DAG);
2996 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
2997 // From AMD64 ABI document:
2998 // For calls that may call functions that use varargs or stdargs
2999 // (prototype-less calls or calls to functions containing ellipsis (...) in
3000 // the declaration) %al is used as hidden argument to specify the number
3001 // of SSE registers used. The contents of %al do not need to match exactly
3002 // the number of registers, but must be an ubound on the number of SSE
3003 // registers used and is in the range 0 - 8 inclusive.
3005 // Count the number of XMM registers allocated.
3006 static const MCPhysReg XMMArgRegs[] = {
3007 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3008 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3010 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
3011 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3012 && "SSE registers cannot be used when SSE is disabled");
3014 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3015 DAG.getConstant(NumXMMRegs, MVT::i8)));
3018 if (isVarArg && IsMustTail) {
3019 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3020 for (const auto &F : Forwards) {
3021 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3022 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3026 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3027 // don't need this because the eligibility check rejects calls that require
3028 // shuffling arguments passed in memory.
3029 if (!IsSibcall && isTailCall) {
3030 // Force all the incoming stack arguments to be loaded from the stack
3031 // before any new outgoing arguments are stored to the stack, because the
3032 // outgoing stack slots may alias the incoming argument stack slots, and
3033 // the alias isn't otherwise explicit. This is slightly more conservative
3034 // than necessary, because it means that each store effectively depends
3035 // on every argument instead of just those arguments it would clobber.
3036 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3038 SmallVector<SDValue, 8> MemOpChains2;
3041 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3042 CCValAssign &VA = ArgLocs[i];
3045 assert(VA.isMemLoc());
3046 SDValue Arg = OutVals[i];
3047 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3048 // Skip inalloca arguments. They don't require any work.
3049 if (Flags.isInAlloca())
3051 // Create frame index.
3052 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3053 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3054 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
3055 FIN = DAG.getFrameIndex(FI, getPointerTy());
3057 if (Flags.isByVal()) {
3058 // Copy relative to framepointer.
3059 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
3060 if (!StackPtr.getNode())
3061 StackPtr = DAG.getCopyFromReg(Chain, dl,
3062 RegInfo->getStackRegister(),
3064 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
3066 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3070 // Store relative to framepointer.
3071 MemOpChains2.push_back(
3072 DAG.getStore(ArgChain, dl, Arg, FIN,
3073 MachinePointerInfo::getFixedStack(FI),
3078 if (!MemOpChains2.empty())
3079 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3081 // Store the return address to the appropriate stack slot.
3082 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3083 getPointerTy(), RegInfo->getSlotSize(),
3087 // Build a sequence of copy-to-reg nodes chained together with token chain
3088 // and flag operands which copy the outgoing args into registers.
3090 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3091 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3092 RegsToPass[i].second, InFlag);
3093 InFlag = Chain.getValue(1);
3096 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3097 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3098 // In the 64-bit large code model, we have to make all calls
3099 // through a register, since the call instruction's 32-bit
3100 // pc-relative offset may not be large enough to hold the whole
3102 } else if (Callee->getOpcode() == ISD::GlobalAddress) {
3103 // If the callee is a GlobalAddress node (quite common, every direct call
3104 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
3106 GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
3108 // We should use extra load for direct calls to dllimported functions in
3110 const GlobalValue *GV = G->getGlobal();
3111 if (!GV->hasDLLImportStorageClass()) {
3112 unsigned char OpFlags = 0;
3113 bool ExtraLoad = false;
3114 unsigned WrapperKind = ISD::DELETED_NODE;
3116 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
3117 // external symbols most go through the PLT in PIC mode. If the symbol
3118 // has hidden or protected visibility, or if it is static or local, then
3119 // we don't need to use the PLT - we can directly call it.
3120 if (Subtarget->isTargetELF() &&
3121 DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
3122 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
3123 OpFlags = X86II::MO_PLT;
3124 } else if (Subtarget->isPICStyleStubAny() &&
3125 (GV->isDeclaration() || GV->isWeakForLinker()) &&
3126 (!Subtarget->getTargetTriple().isMacOSX() ||
3127 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3128 // PC-relative references to external symbols should go through $stub,
3129 // unless we're building with the leopard linker or later, which
3130 // automatically synthesizes these stubs.
3131 OpFlags = X86II::MO_DARWIN_STUB;
3132 } else if (Subtarget->isPICStyleRIPRel() && isa<Function>(GV) &&
3133 cast<Function>(GV)->hasFnAttribute(Attribute::NonLazyBind)) {
3134 // If the function is marked as non-lazy, generate an indirect call
3135 // which loads from the GOT directly. This avoids runtime overhead
3136 // at the cost of eager binding (and one extra byte of encoding).
3137 OpFlags = X86II::MO_GOTPCREL;
3138 WrapperKind = X86ISD::WrapperRIP;
3142 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
3143 G->getOffset(), OpFlags);
3145 // Add a wrapper if needed.
3146 if (WrapperKind != ISD::DELETED_NODE)
3147 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee);
3148 // Add extra indirection if needed.
3150 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
3151 MachinePointerInfo::getGOT(),
3152 false, false, false, 0);
3154 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3155 unsigned char OpFlags = 0;
3157 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
3158 // external symbols should go through the PLT.
3159 if (Subtarget->isTargetELF() &&
3160 DAG.getTarget().getRelocationModel() == Reloc::PIC_) {
3161 OpFlags = X86II::MO_PLT;
3162 } else if (Subtarget->isPICStyleStubAny() &&
3163 (!Subtarget->getTargetTriple().isMacOSX() ||
3164 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3165 // PC-relative references to external symbols should go through $stub,
3166 // unless we're building with the leopard linker or later, which
3167 // automatically synthesizes these stubs.
3168 OpFlags = X86II::MO_DARWIN_STUB;
3171 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
3173 } else if (Subtarget->isTarget64BitILP32() &&
3174 Callee->getValueType(0) == MVT::i32) {
3175 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
3176 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
3179 // Returns a chain & a flag for retval copy to use.
3180 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3181 SmallVector<SDValue, 8> Ops;
3183 if (!IsSibcall && isTailCall) {
3184 Chain = DAG.getCALLSEQ_END(Chain,
3185 DAG.getIntPtrConstant(NumBytesToPop, true),
3186 DAG.getIntPtrConstant(0, true), InFlag, dl);
3187 InFlag = Chain.getValue(1);
3190 Ops.push_back(Chain);
3191 Ops.push_back(Callee);
3194 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
3196 // Add argument registers to the end of the list so that they are known live
3198 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3199 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3200 RegsToPass[i].second.getValueType()));
3202 // Add a register mask operand representing the call-preserved registers.
3203 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
3204 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
3205 assert(Mask && "Missing call preserved mask for calling convention");
3206 Ops.push_back(DAG.getRegisterMask(Mask));
3208 if (InFlag.getNode())
3209 Ops.push_back(InFlag);
3213 //// If this is the first return lowered for this function, add the regs
3214 //// to the liveout set for the function.
3215 // This isn't right, although it's probably harmless on x86; liveouts
3216 // should be computed from returns not tail calls. Consider a void
3217 // function making a tail call to a function returning int.
3218 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
3221 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
3222 InFlag = Chain.getValue(1);
3224 // Create the CALLSEQ_END node.
3225 unsigned NumBytesForCalleeToPop;
3226 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3227 DAG.getTarget().Options.GuaranteedTailCallOpt))
3228 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
3229 else if (!Is64Bit && !IsTailCallConvention(CallConv) &&
3230 !Subtarget->getTargetTriple().isOSMSVCRT() &&
3231 SR == StackStructReturn)
3232 // If this is a call to a struct-return function, the callee
3233 // pops the hidden struct pointer, so we have to push it back.
3234 // This is common for Darwin/X86, Linux & Mingw32 targets.
3235 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
3236 NumBytesForCalleeToPop = 4;
3238 NumBytesForCalleeToPop = 0; // Callee pops nothing.
3240 // Returns a flag for retval copy to use.
3242 Chain = DAG.getCALLSEQ_END(Chain,
3243 DAG.getIntPtrConstant(NumBytesToPop, true),
3244 DAG.getIntPtrConstant(NumBytesForCalleeToPop,
3247 InFlag = Chain.getValue(1);
3250 // Handle result values, copying them out of physregs into vregs that we
3252 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
3253 Ins, dl, DAG, InVals);
3256 //===----------------------------------------------------------------------===//
3257 // Fast Calling Convention (tail call) implementation
3258 //===----------------------------------------------------------------------===//
3260 // Like std call, callee cleans arguments, convention except that ECX is
3261 // reserved for storing the tail called function address. Only 2 registers are
3262 // free for argument passing (inreg). Tail call optimization is performed
3264 // * tailcallopt is enabled
3265 // * caller/callee are fastcc
3266 // On X86_64 architecture with GOT-style position independent code only local
3267 // (within module) calls are supported at the moment.
3268 // To keep the stack aligned according to platform abi the function
3269 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
3270 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
3271 // If a tail called function callee has more arguments than the caller the
3272 // caller needs to make sure that there is room to move the RETADDR to. This is
3273 // achieved by reserving an area the size of the argument delta right after the
3274 // original RETADDR, but before the saved framepointer or the spilled registers
3275 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
3287 /// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
3288 /// for a 16 byte align requirement.
3290 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
3291 SelectionDAG& DAG) const {
3292 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3293 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
3294 unsigned StackAlignment = TFI.getStackAlignment();
3295 uint64_t AlignMask = StackAlignment - 1;
3296 int64_t Offset = StackSize;
3297 unsigned SlotSize = RegInfo->getSlotSize();
3298 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
3299 // Number smaller than 12 so just add the difference.
3300 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
3302 // Mask out lower bits, add stackalignment once plus the 12 bytes.
3303 Offset = ((~AlignMask) & Offset) + StackAlignment +
3304 (StackAlignment-SlotSize);
3309 /// MatchingStackOffset - Return true if the given stack call argument is
3310 /// already available in the same position (relatively) of the caller's
3311 /// incoming argument stack.
3313 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
3314 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
3315 const X86InstrInfo *TII) {
3316 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
3318 if (Arg.getOpcode() == ISD::CopyFromReg) {
3319 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
3320 if (!TargetRegisterInfo::isVirtualRegister(VR))
3322 MachineInstr *Def = MRI->getVRegDef(VR);
3325 if (!Flags.isByVal()) {
3326 if (!TII->isLoadFromStackSlot(Def, FI))
3329 unsigned Opcode = Def->getOpcode();
3330 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
3331 Opcode == X86::LEA64_32r) &&
3332 Def->getOperand(1).isFI()) {
3333 FI = Def->getOperand(1).getIndex();
3334 Bytes = Flags.getByValSize();
3338 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
3339 if (Flags.isByVal())
3340 // ByVal argument is passed in as a pointer but it's now being
3341 // dereferenced. e.g.
3342 // define @foo(%struct.X* %A) {
3343 // tail call @bar(%struct.X* byval %A)
3346 SDValue Ptr = Ld->getBasePtr();
3347 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
3350 FI = FINode->getIndex();
3351 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
3352 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
3353 FI = FINode->getIndex();
3354 Bytes = Flags.getByValSize();
3358 assert(FI != INT_MAX);
3359 if (!MFI->isFixedObjectIndex(FI))
3361 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
3364 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
3365 /// for tail call optimization. Targets which want to do tail call
3366 /// optimization should implement this function.
3368 X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
3369 CallingConv::ID CalleeCC,
3371 bool isCalleeStructRet,
3372 bool isCallerStructRet,
3374 const SmallVectorImpl<ISD::OutputArg> &Outs,
3375 const SmallVectorImpl<SDValue> &OutVals,
3376 const SmallVectorImpl<ISD::InputArg> &Ins,
3377 SelectionDAG &DAG) const {
3378 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
3381 // If -tailcallopt is specified, make fastcc functions tail-callable.
3382 const MachineFunction &MF = DAG.getMachineFunction();
3383 const Function *CallerF = MF.getFunction();
3385 // If the function return type is x86_fp80 and the callee return type is not,
3386 // then the FP_EXTEND of the call result is not a nop. It's not safe to
3387 // perform a tailcall optimization here.
3388 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
3391 CallingConv::ID CallerCC = CallerF->getCallingConv();
3392 bool CCMatch = CallerCC == CalleeCC;
3393 bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
3394 bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
3396 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
3397 if (IsTailCallConvention(CalleeCC) && CCMatch)
3402 // Look for obvious safe cases to perform tail call optimization that do not
3403 // require ABI changes. This is what gcc calls sibcall.
3405 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
3406 // emit a special epilogue.
3407 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3408 if (RegInfo->needsStackRealignment(MF))
3411 // Also avoid sibcall optimization if either caller or callee uses struct
3412 // return semantics.
3413 if (isCalleeStructRet || isCallerStructRet)
3416 // An stdcall/thiscall caller is expected to clean up its arguments; the
3417 // callee isn't going to do that.
3418 // FIXME: this is more restrictive than needed. We could produce a tailcall
3419 // when the stack adjustment matches. For example, with a thiscall that takes
3420 // only one argument.
3421 if (!CCMatch && (CallerCC == CallingConv::X86_StdCall ||
3422 CallerCC == CallingConv::X86_ThisCall))
3425 // Do not sibcall optimize vararg calls unless all arguments are passed via
3427 if (isVarArg && !Outs.empty()) {
3429 // Optimizing for varargs on Win64 is unlikely to be safe without
3430 // additional testing.
3431 if (IsCalleeWin64 || IsCallerWin64)
3434 SmallVector<CCValAssign, 16> ArgLocs;
3435 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3438 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3439 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
3440 if (!ArgLocs[i].isRegLoc())
3444 // If the call result is in ST0 / ST1, it needs to be popped off the x87
3445 // stack. Therefore, if it's not used by the call it is not safe to optimize
3446 // this into a sibcall.
3447 bool Unused = false;
3448 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3455 SmallVector<CCValAssign, 16> RVLocs;
3456 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), RVLocs,
3458 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3459 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
3460 CCValAssign &VA = RVLocs[i];
3461 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
3466 // If the calling conventions do not match, then we'd better make sure the
3467 // results are returned in the same way as what the caller expects.
3469 SmallVector<CCValAssign, 16> RVLocs1;
3470 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
3472 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
3474 SmallVector<CCValAssign, 16> RVLocs2;
3475 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
3477 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
3479 if (RVLocs1.size() != RVLocs2.size())
3481 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
3482 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
3484 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
3486 if (RVLocs1[i].isRegLoc()) {
3487 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
3490 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
3496 // If the callee takes no arguments then go on to check the results of the
3498 if (!Outs.empty()) {
3499 // Check if stack adjustment is needed. For now, do not do this if any
3500 // argument is passed on the stack.
3501 SmallVector<CCValAssign, 16> ArgLocs;
3502 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3505 // Allocate shadow area for Win64
3507 CCInfo.AllocateStack(32, 8);
3509 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3510 if (CCInfo.getNextStackOffset()) {
3511 MachineFunction &MF = DAG.getMachineFunction();
3512 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn())
3515 // Check if the arguments are already laid out in the right way as
3516 // the caller's fixed stack objects.
3517 MachineFrameInfo *MFI = MF.getFrameInfo();
3518 const MachineRegisterInfo *MRI = &MF.getRegInfo();
3519 const X86InstrInfo *TII = Subtarget->getInstrInfo();
3520 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3521 CCValAssign &VA = ArgLocs[i];
3522 SDValue Arg = OutVals[i];
3523 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3524 if (VA.getLocInfo() == CCValAssign::Indirect)
3526 if (!VA.isRegLoc()) {
3527 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
3534 // If the tailcall address may be in a register, then make sure it's
3535 // possible to register allocate for it. In 32-bit, the call address can
3536 // only target EAX, EDX, or ECX since the tail call must be scheduled after
3537 // callee-saved registers are restored. These happen to be the same
3538 // registers used to pass 'inreg' arguments so watch out for those.
3539 if (!Subtarget->is64Bit() &&
3540 ((!isa<GlobalAddressSDNode>(Callee) &&
3541 !isa<ExternalSymbolSDNode>(Callee)) ||
3542 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
3543 unsigned NumInRegs = 0;
3544 // In PIC we need an extra register to formulate the address computation
3546 unsigned MaxInRegs =
3547 (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
3549 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3550 CCValAssign &VA = ArgLocs[i];
3553 unsigned Reg = VA.getLocReg();
3556 case X86::EAX: case X86::EDX: case X86::ECX:
3557 if (++NumInRegs == MaxInRegs)
3569 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
3570 const TargetLibraryInfo *libInfo) const {
3571 return X86::createFastISel(funcInfo, libInfo);
3574 //===----------------------------------------------------------------------===//
3575 // Other Lowering Hooks
3576 //===----------------------------------------------------------------------===//
3578 static bool MayFoldLoad(SDValue Op) {
3579 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
3582 static bool MayFoldIntoStore(SDValue Op) {
3583 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
3586 static bool isTargetShuffle(unsigned Opcode) {
3588 default: return false;
3589 case X86ISD::BLENDI:
3590 case X86ISD::PSHUFB:
3591 case X86ISD::PSHUFD:
3592 case X86ISD::PSHUFHW:
3593 case X86ISD::PSHUFLW:
3595 case X86ISD::PALIGNR:
3596 case X86ISD::MOVLHPS:
3597 case X86ISD::MOVLHPD:
3598 case X86ISD::MOVHLPS:
3599 case X86ISD::MOVLPS:
3600 case X86ISD::MOVLPD:
3601 case X86ISD::MOVSHDUP:
3602 case X86ISD::MOVSLDUP:
3603 case X86ISD::MOVDDUP:
3606 case X86ISD::UNPCKL:
3607 case X86ISD::UNPCKH:
3608 case X86ISD::VPERMILPI:
3609 case X86ISD::VPERM2X128:
3610 case X86ISD::VPERMI:
3615 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3616 SDValue V1, SelectionDAG &DAG) {
3618 default: llvm_unreachable("Unknown x86 shuffle node");
3619 case X86ISD::MOVSHDUP:
3620 case X86ISD::MOVSLDUP:
3621 case X86ISD::MOVDDUP:
3622 return DAG.getNode(Opc, dl, VT, V1);
3626 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3627 SDValue V1, unsigned TargetMask,
3628 SelectionDAG &DAG) {
3630 default: llvm_unreachable("Unknown x86 shuffle node");
3631 case X86ISD::PSHUFD:
3632 case X86ISD::PSHUFHW:
3633 case X86ISD::PSHUFLW:
3634 case X86ISD::VPERMILPI:
3635 case X86ISD::VPERMI:
3636 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8));
3640 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3641 SDValue V1, SDValue V2, unsigned TargetMask,
3642 SelectionDAG &DAG) {
3644 default: llvm_unreachable("Unknown x86 shuffle node");
3645 case X86ISD::PALIGNR:
3646 case X86ISD::VALIGN:
3648 case X86ISD::VPERM2X128:
3649 return DAG.getNode(Opc, dl, VT, V1, V2,
3650 DAG.getConstant(TargetMask, MVT::i8));
3654 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3655 SDValue V1, SDValue V2, SelectionDAG &DAG) {
3657 default: llvm_unreachable("Unknown x86 shuffle node");
3658 case X86ISD::MOVLHPS:
3659 case X86ISD::MOVLHPD:
3660 case X86ISD::MOVHLPS:
3661 case X86ISD::MOVLPS:
3662 case X86ISD::MOVLPD:
3665 case X86ISD::UNPCKL:
3666 case X86ISD::UNPCKH:
3667 return DAG.getNode(Opc, dl, VT, V1, V2);
3671 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
3672 MachineFunction &MF = DAG.getMachineFunction();
3673 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3674 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3675 int ReturnAddrIndex = FuncInfo->getRAIndex();
3677 if (ReturnAddrIndex == 0) {
3678 // Set up a frame object for the return address.
3679 unsigned SlotSize = RegInfo->getSlotSize();
3680 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize,
3683 FuncInfo->setRAIndex(ReturnAddrIndex);
3686 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
3689 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
3690 bool hasSymbolicDisplacement) {
3691 // Offset should fit into 32 bit immediate field.
3692 if (!isInt<32>(Offset))
3695 // If we don't have a symbolic displacement - we don't have any extra
3697 if (!hasSymbolicDisplacement)
3700 // FIXME: Some tweaks might be needed for medium code model.
3701 if (M != CodeModel::Small && M != CodeModel::Kernel)
3704 // For small code model we assume that latest object is 16MB before end of 31
3705 // bits boundary. We may also accept pretty large negative constants knowing
3706 // that all objects are in the positive half of address space.
3707 if (M == CodeModel::Small && Offset < 16*1024*1024)
3710 // For kernel code model we know that all object resist in the negative half
3711 // of 32bits address space. We may not accept negative offsets, since they may
3712 // be just off and we may accept pretty large positive ones.
3713 if (M == CodeModel::Kernel && Offset >= 0)
3719 /// isCalleePop - Determines whether the callee is required to pop its
3720 /// own arguments. Callee pop is necessary to support tail calls.
3721 bool X86::isCalleePop(CallingConv::ID CallingConv,
3722 bool is64Bit, bool IsVarArg, bool TailCallOpt) {
3723 switch (CallingConv) {
3726 case CallingConv::X86_StdCall:
3727 case CallingConv::X86_FastCall:
3728 case CallingConv::X86_ThisCall:
3730 case CallingConv::Fast:
3731 case CallingConv::GHC:
3732 case CallingConv::HiPE:
3739 /// \brief Return true if the condition is an unsigned comparison operation.
3740 static bool isX86CCUnsigned(unsigned X86CC) {
3742 default: llvm_unreachable("Invalid integer condition!");
3743 case X86::COND_E: return true;
3744 case X86::COND_G: return false;
3745 case X86::COND_GE: return false;
3746 case X86::COND_L: return false;
3747 case X86::COND_LE: return false;
3748 case X86::COND_NE: return true;
3749 case X86::COND_B: return true;
3750 case X86::COND_A: return true;
3751 case X86::COND_BE: return true;
3752 case X86::COND_AE: return true;
3754 llvm_unreachable("covered switch fell through?!");
3757 /// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
3758 /// specific condition code, returning the condition code and the LHS/RHS of the
3759 /// comparison to make.
3760 static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
3761 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
3763 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
3764 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
3765 // X > -1 -> X == 0, jump !sign.
3766 RHS = DAG.getConstant(0, RHS.getValueType());
3767 return X86::COND_NS;
3769 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
3770 // X < 0 -> X == 0, jump on sign.
3773 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
3775 RHS = DAG.getConstant(0, RHS.getValueType());
3776 return X86::COND_LE;
3780 switch (SetCCOpcode) {
3781 default: llvm_unreachable("Invalid integer condition!");
3782 case ISD::SETEQ: return X86::COND_E;
3783 case ISD::SETGT: return X86::COND_G;
3784 case ISD::SETGE: return X86::COND_GE;
3785 case ISD::SETLT: return X86::COND_L;
3786 case ISD::SETLE: return X86::COND_LE;
3787 case ISD::SETNE: return X86::COND_NE;
3788 case ISD::SETULT: return X86::COND_B;
3789 case ISD::SETUGT: return X86::COND_A;
3790 case ISD::SETULE: return X86::COND_BE;
3791 case ISD::SETUGE: return X86::COND_AE;
3795 // First determine if it is required or is profitable to flip the operands.
3797 // If LHS is a foldable load, but RHS is not, flip the condition.
3798 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
3799 !ISD::isNON_EXTLoad(RHS.getNode())) {
3800 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
3801 std::swap(LHS, RHS);
3804 switch (SetCCOpcode) {
3810 std::swap(LHS, RHS);
3814 // On a floating point condition, the flags are set as follows:
3816 // 0 | 0 | 0 | X > Y
3817 // 0 | 0 | 1 | X < Y
3818 // 1 | 0 | 0 | X == Y
3819 // 1 | 1 | 1 | unordered
3820 switch (SetCCOpcode) {
3821 default: llvm_unreachable("Condcode should be pre-legalized away");
3823 case ISD::SETEQ: return X86::COND_E;
3824 case ISD::SETOLT: // flipped
3826 case ISD::SETGT: return X86::COND_A;
3827 case ISD::SETOLE: // flipped
3829 case ISD::SETGE: return X86::COND_AE;
3830 case ISD::SETUGT: // flipped
3832 case ISD::SETLT: return X86::COND_B;
3833 case ISD::SETUGE: // flipped
3835 case ISD::SETLE: return X86::COND_BE;
3837 case ISD::SETNE: return X86::COND_NE;
3838 case ISD::SETUO: return X86::COND_P;
3839 case ISD::SETO: return X86::COND_NP;
3841 case ISD::SETUNE: return X86::COND_INVALID;
3845 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
3846 /// code. Current x86 isa includes the following FP cmov instructions:
3847 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
3848 static bool hasFPCMov(unsigned X86CC) {
3864 /// isFPImmLegal - Returns true if the target can instruction select the
3865 /// specified FP immediate natively. If false, the legalizer will
3866 /// materialize the FP immediate as a load from a constant pool.
3867 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
3868 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
3869 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
3875 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
3876 ISD::LoadExtType ExtTy,
3878 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
3879 // relocation target a movq or addq instruction: don't let the load shrink.
3880 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
3881 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
3882 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
3883 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
3887 /// \brief Returns true if it is beneficial to convert a load of a constant
3888 /// to just the constant itself.
3889 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
3891 assert(Ty->isIntegerTy());
3893 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3894 if (BitSize == 0 || BitSize > 64)
3899 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
3900 unsigned Index) const {
3901 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
3904 return (Index == 0 || Index == ResVT.getVectorNumElements());
3907 bool X86TargetLowering::isCheapToSpeculateCttz() const {
3908 // Speculate cttz only if we can directly use TZCNT.
3909 return Subtarget->hasBMI();
3912 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
3913 // Speculate ctlz only if we can directly use LZCNT.
3914 return Subtarget->hasLZCNT();
3917 /// isUndefOrInRange - Return true if Val is undef or if its value falls within
3918 /// the specified range (L, H].
3919 static bool isUndefOrInRange(int Val, int Low, int Hi) {
3920 return (Val < 0) || (Val >= Low && Val < Hi);
3923 /// isUndefOrEqual - Val is either less than zero (undef) or equal to the
3924 /// specified value.
3925 static bool isUndefOrEqual(int Val, int CmpVal) {
3926 return (Val < 0 || Val == CmpVal);
3929 /// isSequentialOrUndefInRange - Return true if every element in Mask, beginning
3930 /// from position Pos and ending in Pos+Size, falls within the specified
3931 /// sequential range (Low, Low+Size]. or is undef.
3932 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
3933 unsigned Pos, unsigned Size, int Low) {
3934 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
3935 if (!isUndefOrEqual(Mask[i], Low))
3940 /// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
3941 /// is suitable for input to PSHUFD. That is, it doesn't reference the other
3942 /// operand - by default will match for first operand.
3943 static bool isPSHUFDMask(ArrayRef<int> Mask, MVT VT,
3944 bool TestSecondOperand = false) {
3945 if (VT != MVT::v4f32 && VT != MVT::v4i32 &&
3946 VT != MVT::v2f64 && VT != MVT::v2i64)
3949 unsigned NumElems = VT.getVectorNumElements();
3950 unsigned Lo = TestSecondOperand ? NumElems : 0;
3951 unsigned Hi = Lo + NumElems;
3953 for (unsigned i = 0; i < NumElems; ++i)
3954 if (!isUndefOrInRange(Mask[i], (int)Lo, (int)Hi))
3960 /// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
3961 /// is suitable for input to PSHUFHW.
3962 static bool isPSHUFHWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3963 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3966 // Lower quadword copied in order or undef.
3967 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0))
3970 // Upper quadword shuffled.
3971 for (unsigned i = 4; i != 8; ++i)
3972 if (!isUndefOrInRange(Mask[i], 4, 8))
3975 if (VT == MVT::v16i16) {
3976 // Lower quadword copied in order or undef.
3977 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8))
3980 // Upper quadword shuffled.
3981 for (unsigned i = 12; i != 16; ++i)
3982 if (!isUndefOrInRange(Mask[i], 12, 16))
3989 /// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
3990 /// is suitable for input to PSHUFLW.
3991 static bool isPSHUFLWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3992 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3995 // Upper quadword copied in order.
3996 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4))
3999 // Lower quadword shuffled.
4000 for (unsigned i = 0; i != 4; ++i)
4001 if (!isUndefOrInRange(Mask[i], 0, 4))
4004 if (VT == MVT::v16i16) {
4005 // Upper quadword copied in order.
4006 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12))
4009 // Lower quadword shuffled.
4010 for (unsigned i = 8; i != 12; ++i)
4011 if (!isUndefOrInRange(Mask[i], 8, 12))
4018 /// \brief Return true if the mask specifies a shuffle of elements that is
4019 /// suitable for input to intralane (palignr) or interlane (valign) vector
4021 static bool isAlignrMask(ArrayRef<int> Mask, MVT VT, bool InterLane) {
4022 unsigned NumElts = VT.getVectorNumElements();
4023 unsigned NumLanes = InterLane ? 1: VT.getSizeInBits()/128;
4024 unsigned NumLaneElts = NumElts/NumLanes;
4026 // Do not handle 64-bit element shuffles with palignr.
4027 if (NumLaneElts == 2)
4030 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) {
4032 for (i = 0; i != NumLaneElts; ++i) {
4037 // Lane is all undef, go to next lane
4038 if (i == NumLaneElts)
4041 int Start = Mask[i+l];
4043 // Make sure its in this lane in one of the sources
4044 if (!isUndefOrInRange(Start, l, l+NumLaneElts) &&
4045 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts))
4048 // If not lane 0, then we must match lane 0
4049 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l))
4052 // Correct second source to be contiguous with first source
4053 if (Start >= (int)NumElts)
4054 Start -= NumElts - NumLaneElts;
4056 // Make sure we're shifting in the right direction.
4057 if (Start <= (int)(i+l))
4062 // Check the rest of the elements to see if they are consecutive.
4063 for (++i; i != NumLaneElts; ++i) {
4064 int Idx = Mask[i+l];
4066 // Make sure its in this lane
4067 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) &&
4068 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts))
4071 // If not lane 0, then we must match lane 0
4072 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l))
4075 if (Idx >= (int)NumElts)
4076 Idx -= NumElts - NumLaneElts;
4078 if (!isUndefOrEqual(Idx, Start+i))
4087 /// \brief Return true if the node specifies a shuffle of elements that is
4088 /// suitable for input to PALIGNR.
4089 static bool isPALIGNRMask(ArrayRef<int> Mask, MVT VT,
4090 const X86Subtarget *Subtarget) {
4091 if ((VT.is128BitVector() && !Subtarget->hasSSSE3()) ||
4092 (VT.is256BitVector() && !Subtarget->hasInt256()) ||
4093 VT.is512BitVector())
4094 // FIXME: Add AVX512BW.
4097 return isAlignrMask(Mask, VT, false);
4100 /// \brief Return true if the node specifies a shuffle of elements that is
4101 /// suitable for input to VALIGN.
4102 static bool isVALIGNMask(ArrayRef<int> Mask, MVT VT,
4103 const X86Subtarget *Subtarget) {
4104 // FIXME: Add AVX512VL.
4105 if (!VT.is512BitVector() || !Subtarget->hasAVX512())
4107 return isAlignrMask(Mask, VT, true);
4110 /// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
4111 /// the two vector operands have swapped position.
4112 static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask,
4113 unsigned NumElems) {
4114 for (unsigned i = 0; i != NumElems; ++i) {
4118 else if (idx < (int)NumElems)
4119 Mask[i] = idx + NumElems;
4121 Mask[i] = idx - NumElems;
4125 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
4126 /// specifies a shuffle of elements that is suitable for input to 128/256-bit
4127 /// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be
4128 /// reverse of what x86 shuffles want.
4129 static bool isSHUFPMask(ArrayRef<int> Mask, MVT VT, bool Commuted = false) {
4131 unsigned NumElems = VT.getVectorNumElements();
4132 unsigned NumLanes = VT.getSizeInBits()/128;
4133 unsigned NumLaneElems = NumElems/NumLanes;
4135 if (NumLaneElems != 2 && NumLaneElems != 4)
4138 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4139 bool symmetricMaskRequired =
4140 (VT.getSizeInBits() >= 256) && (EltSize == 32);
4142 // VSHUFPSY divides the resulting vector into 4 chunks.
4143 // The sources are also splitted into 4 chunks, and each destination
4144 // chunk must come from a different source chunk.
4146 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0
4147 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9
4149 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4,
4150 // Y3..Y0, Y3..Y0, X3..X0, X3..X0
4152 // VSHUFPDY divides the resulting vector into 4 chunks.
4153 // The sources are also splitted into 4 chunks, and each destination
4154 // chunk must come from a different source chunk.
4156 // SRC1 => X3 X2 X1 X0
4157 // SRC2 => Y3 Y2 Y1 Y0
4159 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0
4161 SmallVector<int, 4> MaskVal(NumLaneElems, -1);
4162 unsigned HalfLaneElems = NumLaneElems/2;
4163 for (unsigned l = 0; l != NumElems; l += NumLaneElems) {
4164 for (unsigned i = 0; i != NumLaneElems; ++i) {
4165 int Idx = Mask[i+l];
4166 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0);
4167 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems))
4169 // For VSHUFPSY, the mask of the second half must be the same as the
4170 // first but with the appropriate offsets. This works in the same way as
4171 // VPERMILPS works with masks.
4172 if (!symmetricMaskRequired || Idx < 0)
4174 if (MaskVal[i] < 0) {
4175 MaskVal[i] = Idx - l;
4178 if ((signed)(Idx - l) != MaskVal[i])
4186 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
4187 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
4188 static bool isMOVHLPSMask(ArrayRef<int> Mask, MVT VT) {
4189 if (!VT.is128BitVector())
4192 unsigned NumElems = VT.getVectorNumElements();
4197 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
4198 return isUndefOrEqual(Mask[0], 6) &&
4199 isUndefOrEqual(Mask[1], 7) &&
4200 isUndefOrEqual(Mask[2], 2) &&
4201 isUndefOrEqual(Mask[3], 3);
4204 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
4205 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
4207 static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, MVT VT) {
4208 if (!VT.is128BitVector())
4211 unsigned NumElems = VT.getVectorNumElements();
4216 return isUndefOrEqual(Mask[0], 2) &&
4217 isUndefOrEqual(Mask[1], 3) &&
4218 isUndefOrEqual(Mask[2], 2) &&
4219 isUndefOrEqual(Mask[3], 3);
4222 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
4223 /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
4224 static bool isMOVLPMask(ArrayRef<int> Mask, MVT VT) {
4225 if (!VT.is128BitVector())
4228 unsigned NumElems = VT.getVectorNumElements();
4230 if (NumElems != 2 && NumElems != 4)
4233 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4234 if (!isUndefOrEqual(Mask[i], i + NumElems))
4237 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
4238 if (!isUndefOrEqual(Mask[i], i))
4244 /// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
4245 /// specifies a shuffle of elements that is suitable for input to MOVLHPS.
4246 static bool isMOVLHPSMask(ArrayRef<int> Mask, MVT VT) {
4247 if (!VT.is128BitVector())
4250 unsigned NumElems = VT.getVectorNumElements();
4252 if (NumElems != 2 && NumElems != 4)
4255 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4256 if (!isUndefOrEqual(Mask[i], i))
4259 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4260 if (!isUndefOrEqual(Mask[i + e], i + NumElems))
4266 /// isINSERTPSMask - Return true if the specified VECTOR_SHUFFLE operand
4267 /// specifies a shuffle of elements that is suitable for input to INSERTPS.
4268 /// i. e: If all but one element come from the same vector.
4269 static bool isINSERTPSMask(ArrayRef<int> Mask, MVT VT) {
4270 // TODO: Deal with AVX's VINSERTPS
4271 if (!VT.is128BitVector() || (VT != MVT::v4f32 && VT != MVT::v4i32))
4274 unsigned CorrectPosV1 = 0;
4275 unsigned CorrectPosV2 = 0;
4276 for (int i = 0, e = (int)VT.getVectorNumElements(); i != e; ++i) {
4277 if (Mask[i] == -1) {
4285 else if (Mask[i] == i + 4)
4289 if (CorrectPosV1 == 3 || CorrectPosV2 == 3)
4290 // We have 3 elements (undefs count as elements from any vector) from one
4291 // vector, and one from another.
4298 // Some special combinations that can be optimized.
4301 SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp,
4302 SelectionDAG &DAG) {
4303 MVT VT = SVOp->getSimpleValueType(0);
4306 if (VT != MVT::v8i32 && VT != MVT::v8f32)
4309 ArrayRef<int> Mask = SVOp->getMask();
4311 // These are the special masks that may be optimized.
4312 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14};
4313 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15};
4314 bool MatchEvenMask = true;
4315 bool MatchOddMask = true;
4316 for (int i=0; i<8; ++i) {
4317 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i]))
4318 MatchEvenMask = false;
4319 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i]))
4320 MatchOddMask = false;
4323 if (!MatchEvenMask && !MatchOddMask)
4326 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT);
4328 SDValue Op0 = SVOp->getOperand(0);
4329 SDValue Op1 = SVOp->getOperand(1);
4331 if (MatchEvenMask) {
4332 // Shift the second operand right to 32 bits.
4333 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 };
4334 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask);
4336 // Shift the first operand left to 32 bits.
4337 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 };
4338 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask);
4340 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15};
4341 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask);
4344 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
4345 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
4346 static bool isUNPCKLMask(ArrayRef<int> Mask, MVT VT,
4347 bool HasInt256, bool V2IsSplat = false) {
4349 assert(VT.getSizeInBits() >= 128 &&
4350 "Unsupported vector type for unpckl");
4352 unsigned NumElts = VT.getVectorNumElements();
4353 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4354 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4357 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4358 "Unsupported vector type for unpckh");
4360 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4361 unsigned NumLanes = VT.getSizeInBits()/128;
4362 unsigned NumLaneElts = NumElts/NumLanes;
4364 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4365 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4366 int BitI = Mask[l+i];
4367 int BitI1 = Mask[l+i+1];
4368 if (!isUndefOrEqual(BitI, j))
4371 if (!isUndefOrEqual(BitI1, NumElts))
4374 if (!isUndefOrEqual(BitI1, j + NumElts))
4383 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
4384 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
4385 static bool isUNPCKHMask(ArrayRef<int> Mask, MVT VT,
4386 bool HasInt256, bool V2IsSplat = false) {
4387 assert(VT.getSizeInBits() >= 128 &&
4388 "Unsupported vector type for unpckh");
4390 unsigned NumElts = VT.getVectorNumElements();
4391 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4392 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4395 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4396 "Unsupported vector type for unpckh");
4398 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4399 unsigned NumLanes = VT.getSizeInBits()/128;
4400 unsigned NumLaneElts = NumElts/NumLanes;
4402 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4403 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4404 int BitI = Mask[l+i];
4405 int BitI1 = Mask[l+i+1];
4406 if (!isUndefOrEqual(BitI, j))
4409 if (isUndefOrEqual(BitI1, NumElts))
4412 if (!isUndefOrEqual(BitI1, j+NumElts))
4420 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
4421 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
4423 static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4424 unsigned NumElts = VT.getVectorNumElements();
4425 bool Is256BitVec = VT.is256BitVector();
4427 if (VT.is512BitVector())
4429 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4430 "Unsupported vector type for unpckh");
4432 if (Is256BitVec && NumElts != 4 && NumElts != 8 &&
4433 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4436 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern
4437 // FIXME: Need a better way to get rid of this, there's no latency difference
4438 // between UNPCKLPD and MOVDDUP, the later should always be checked first and
4439 // the former later. We should also remove the "_undef" special mask.
4440 if (NumElts == 4 && Is256BitVec)
4443 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4444 // independently on 128-bit lanes.
4445 unsigned NumLanes = VT.getSizeInBits()/128;
4446 unsigned NumLaneElts = NumElts/NumLanes;
4448 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4449 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4450 int BitI = Mask[l+i];
4451 int BitI1 = Mask[l+i+1];
4453 if (!isUndefOrEqual(BitI, j))
4455 if (!isUndefOrEqual(BitI1, j))
4463 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
4464 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
4466 static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4467 unsigned NumElts = VT.getVectorNumElements();
4469 if (VT.is512BitVector())
4472 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4473 "Unsupported vector type for unpckh");
4475 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4476 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4479 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4480 // independently on 128-bit lanes.
4481 unsigned NumLanes = VT.getSizeInBits()/128;
4482 unsigned NumLaneElts = NumElts/NumLanes;
4484 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4485 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4486 int BitI = Mask[l+i];
4487 int BitI1 = Mask[l+i+1];
4488 if (!isUndefOrEqual(BitI, j))
4490 if (!isUndefOrEqual(BitI1, j))
4497 // Match for INSERTI64x4 INSERTF64x4 instructions (src0[0], src1[0]) or
4498 // (src1[0], src0[1]), manipulation with 256-bit sub-vectors
4499 static bool isINSERT64x4Mask(ArrayRef<int> Mask, MVT VT, unsigned int *Imm) {
4500 if (!VT.is512BitVector())
4503 unsigned NumElts = VT.getVectorNumElements();
4504 unsigned HalfSize = NumElts/2;
4505 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, 0)) {
4506 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, NumElts)) {
4511 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, NumElts)) {
4512 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, HalfSize)) {
4520 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
4521 /// specifies a shuffle of elements that is suitable for input to MOVSS,
4522 /// MOVSD, and MOVD, i.e. setting the lowest element.
4523 static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) {
4524 if (VT.getVectorElementType().getSizeInBits() < 32)
4526 if (!VT.is128BitVector())
4529 unsigned NumElts = VT.getVectorNumElements();
4531 if (!isUndefOrEqual(Mask[0], NumElts))
4534 for (unsigned i = 1; i != NumElts; ++i)
4535 if (!isUndefOrEqual(Mask[i], i))
4541 /// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered
4542 /// as permutations between 128-bit chunks or halves. As an example: this
4544 /// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15>
4545 /// The first half comes from the second half of V1 and the second half from the
4546 /// the second half of V2.
4547 static bool isVPERM2X128Mask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4548 if (!HasFp256 || !VT.is256BitVector())
4551 // The shuffle result is divided into half A and half B. In total the two
4552 // sources have 4 halves, namely: C, D, E, F. The final values of A and
4553 // B must come from C, D, E or F.
4554 unsigned HalfSize = VT.getVectorNumElements()/2;
4555 bool MatchA = false, MatchB = false;
4557 // Check if A comes from one of C, D, E, F.
4558 for (unsigned Half = 0; Half != 4; ++Half) {
4559 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) {
4565 // Check if B comes from one of C, D, E, F.
4566 for (unsigned Half = 0; Half != 4; ++Half) {
4567 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) {
4573 return MatchA && MatchB;
4576 /// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle
4577 /// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions.
4578 static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) {
4579 MVT VT = SVOp->getSimpleValueType(0);
4581 unsigned HalfSize = VT.getVectorNumElements()/2;
4583 unsigned FstHalf = 0, SndHalf = 0;
4584 for (unsigned i = 0; i < HalfSize; ++i) {
4585 if (SVOp->getMaskElt(i) > 0) {
4586 FstHalf = SVOp->getMaskElt(i)/HalfSize;
4590 for (unsigned i = HalfSize; i < HalfSize*2; ++i) {
4591 if (SVOp->getMaskElt(i) > 0) {
4592 SndHalf = SVOp->getMaskElt(i)/HalfSize;
4597 return (FstHalf | (SndHalf << 4));
4600 // Symmetric in-lane mask. Each lane has 4 elements (for imm8)
4601 static bool isPermImmMask(ArrayRef<int> Mask, MVT VT, unsigned& Imm8) {
4602 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4606 unsigned NumElts = VT.getVectorNumElements();
4608 if (VT.is128BitVector() || (VT.is256BitVector() && EltSize == 64)) {
4609 for (unsigned i = 0; i != NumElts; ++i) {
4612 Imm8 |= Mask[i] << (i*2);
4617 unsigned LaneSize = 4;
4618 SmallVector<int, 4> MaskVal(LaneSize, -1);
4620 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4621 for (unsigned i = 0; i != LaneSize; ++i) {
4622 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4626 if (MaskVal[i] < 0) {
4627 MaskVal[i] = Mask[i+l] - l;
4628 Imm8 |= MaskVal[i] << (i*2);
4631 if (Mask[i+l] != (signed)(MaskVal[i]+l))
4638 /// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand
4639 /// specifies a shuffle of elements that is suitable for input to VPERMILPD*.
4640 /// Note that VPERMIL mask matching is different depending whether theunderlying
4641 /// type is 32 or 64. In the VPERMILPS the high half of the mask should point
4642 /// to the same elements of the low, but to the higher half of the source.
4643 /// In VPERMILPD the two lanes could be shuffled independently of each other
4644 /// with the same restriction that lanes can't be crossed. Also handles PSHUFDY.
4645 static bool isVPERMILPMask(ArrayRef<int> Mask, MVT VT) {
4646 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4647 if (VT.getSizeInBits() < 256 || EltSize < 32)
4649 bool symmetricMaskRequired = (EltSize == 32);
4650 unsigned NumElts = VT.getVectorNumElements();
4652 unsigned NumLanes = VT.getSizeInBits()/128;
4653 unsigned LaneSize = NumElts/NumLanes;
4654 // 2 or 4 elements in one lane
4656 SmallVector<int, 4> ExpectedMaskVal(LaneSize, -1);
4657 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4658 for (unsigned i = 0; i != LaneSize; ++i) {
4659 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4661 if (symmetricMaskRequired) {
4662 if (ExpectedMaskVal[i] < 0 && Mask[i+l] >= 0) {
4663 ExpectedMaskVal[i] = Mask[i+l] - l;
4666 if (!isUndefOrEqual(Mask[i+l], ExpectedMaskVal[i]+l))
4674 /// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse
4675 /// of what x86 movss want. X86 movs requires the lowest element to be lowest
4676 /// element of vector 2 and the other elements to come from vector 1 in order.
4677 static bool isCommutedMOVLMask(ArrayRef<int> Mask, MVT VT,
4678 bool V2IsSplat = false, bool V2IsUndef = false) {
4679 if (!VT.is128BitVector())
4682 unsigned NumOps = VT.getVectorNumElements();
4683 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
4686 if (!isUndefOrEqual(Mask[0], 0))
4689 for (unsigned i = 1; i != NumOps; ++i)
4690 if (!(isUndefOrEqual(Mask[i], i+NumOps) ||
4691 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) ||
4692 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps))))
4698 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4699 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
4700 /// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7>
4701 static bool isMOVSHDUPMask(ArrayRef<int> Mask, MVT VT,
4702 const X86Subtarget *Subtarget) {
4703 if (!Subtarget->hasSSE3())
4706 unsigned NumElems = VT.getVectorNumElements();
4708 if ((VT.is128BitVector() && NumElems != 4) ||
4709 (VT.is256BitVector() && NumElems != 8) ||
4710 (VT.is512BitVector() && NumElems != 16))
4713 // "i+1" is the value the indexed mask element must have
4714 for (unsigned i = 0; i != NumElems; i += 2)
4715 if (!isUndefOrEqual(Mask[i], i+1) ||
4716 !isUndefOrEqual(Mask[i+1], i+1))
4722 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4723 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
4724 /// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6>
4725 static bool isMOVSLDUPMask(ArrayRef<int> Mask, MVT VT,
4726 const X86Subtarget *Subtarget) {
4727 if (!Subtarget->hasSSE3())
4730 unsigned NumElems = VT.getVectorNumElements();
4732 if ((VT.is128BitVector() && NumElems != 4) ||
4733 (VT.is256BitVector() && NumElems != 8) ||
4734 (VT.is512BitVector() && NumElems != 16))
4737 // "i" is the value the indexed mask element must have
4738 for (unsigned i = 0; i != NumElems; i += 2)
4739 if (!isUndefOrEqual(Mask[i], i) ||
4740 !isUndefOrEqual(Mask[i+1], i))
4746 /// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand
4747 /// specifies a shuffle of elements that is suitable for input to 256-bit
4748 /// version of MOVDDUP.
4749 static bool isMOVDDUPYMask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4750 if (!HasFp256 || !VT.is256BitVector())
4753 unsigned NumElts = VT.getVectorNumElements();
4757 for (unsigned i = 0; i != NumElts/2; ++i)
4758 if (!isUndefOrEqual(Mask[i], 0))
4760 for (unsigned i = NumElts/2; i != NumElts; ++i)
4761 if (!isUndefOrEqual(Mask[i], NumElts/2))
4766 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4767 /// specifies a shuffle of elements that is suitable for input to 128-bit
4768 /// version of MOVDDUP.
4769 static bool isMOVDDUPMask(ArrayRef<int> Mask, MVT VT) {
4770 if (!VT.is128BitVector())
4773 unsigned e = VT.getVectorNumElements() / 2;
4774 for (unsigned i = 0; i != e; ++i)
4775 if (!isUndefOrEqual(Mask[i], i))
4777 for (unsigned i = 0; i != e; ++i)
4778 if (!isUndefOrEqual(Mask[e+i], i))
4783 /// isVEXTRACTIndex - Return true if the specified
4784 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
4785 /// suitable for instruction that extract 128 or 256 bit vectors
4786 static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
4787 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4788 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4791 // The index should be aligned on a vecWidth-bit boundary.
4793 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4795 MVT VT = N->getSimpleValueType(0);
4796 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4797 bool Result = (Index * ElSize) % vecWidth == 0;
4802 /// isVINSERTIndex - Return true if the specified INSERT_SUBVECTOR
4803 /// operand specifies a subvector insert that is suitable for input to
4804 /// insertion of 128 or 256-bit subvectors
4805 static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) {
4806 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4807 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4809 // The index should be aligned on a vecWidth-bit boundary.
4811 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4813 MVT VT = N->getSimpleValueType(0);
4814 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4815 bool Result = (Index * ElSize) % vecWidth == 0;
4820 bool X86::isVINSERT128Index(SDNode *N) {
4821 return isVINSERTIndex(N, 128);
4824 bool X86::isVINSERT256Index(SDNode *N) {
4825 return isVINSERTIndex(N, 256);
4828 bool X86::isVEXTRACT128Index(SDNode *N) {
4829 return isVEXTRACTIndex(N, 128);
4832 bool X86::isVEXTRACT256Index(SDNode *N) {
4833 return isVEXTRACTIndex(N, 256);
4836 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
4837 /// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions.
4838 /// Handles 128-bit and 256-bit.
4839 static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
4840 MVT VT = N->getSimpleValueType(0);
4842 assert((VT.getSizeInBits() >= 128) &&
4843 "Unsupported vector type for PSHUF/SHUFP");
4845 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate
4846 // independently on 128-bit lanes.
4847 unsigned NumElts = VT.getVectorNumElements();
4848 unsigned NumLanes = VT.getSizeInBits()/128;
4849 unsigned NumLaneElts = NumElts/NumLanes;
4851 assert((NumLaneElts == 2 || NumLaneElts == 4 || NumLaneElts == 8) &&
4852 "Only supports 2, 4 or 8 elements per lane");
4854 unsigned Shift = (NumLaneElts >= 4) ? 1 : 0;
4856 for (unsigned i = 0; i != NumElts; ++i) {
4857 int Elt = N->getMaskElt(i);
4858 if (Elt < 0) continue;
4859 Elt &= NumLaneElts - 1;
4860 unsigned ShAmt = (i << Shift) % 8;
4861 Mask |= Elt << ShAmt;
4867 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
4868 /// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction.
4869 static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) {
4870 MVT VT = N->getSimpleValueType(0);
4872 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4873 "Unsupported vector type for PSHUFHW");
4875 unsigned NumElts = VT.getVectorNumElements();
4878 for (unsigned l = 0; l != NumElts; l += 8) {
4879 // 8 nodes per lane, but we only care about the last 4.
4880 for (unsigned i = 0; i < 4; ++i) {
4881 int Elt = N->getMaskElt(l+i+4);
4882 if (Elt < 0) continue;
4883 Elt &= 0x3; // only 2-bits.
4884 Mask |= Elt << (i * 2);
4891 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
4892 /// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction.
4893 static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) {
4894 MVT VT = N->getSimpleValueType(0);
4896 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4897 "Unsupported vector type for PSHUFHW");
4899 unsigned NumElts = VT.getVectorNumElements();
4902 for (unsigned l = 0; l != NumElts; l += 8) {
4903 // 8 nodes per lane, but we only care about the first 4.
4904 for (unsigned i = 0; i < 4; ++i) {
4905 int Elt = N->getMaskElt(l+i);
4906 if (Elt < 0) continue;
4907 Elt &= 0x3; // only 2-bits
4908 Mask |= Elt << (i * 2);
4915 /// \brief Return the appropriate immediate to shuffle the specified
4916 /// VECTOR_SHUFFLE mask with the PALIGNR (if InterLane is false) or with
4917 /// VALIGN (if Interlane is true) instructions.
4918 static unsigned getShuffleAlignrImmediate(ShuffleVectorSDNode *SVOp,
4920 MVT VT = SVOp->getSimpleValueType(0);
4921 unsigned EltSize = InterLane ? 1 :
4922 VT.getVectorElementType().getSizeInBits() >> 3;
4924 unsigned NumElts = VT.getVectorNumElements();
4925 unsigned NumLanes = VT.is512BitVector() ? 1 : VT.getSizeInBits()/128;
4926 unsigned NumLaneElts = NumElts/NumLanes;
4930 for (i = 0; i != NumElts; ++i) {
4931 Val = SVOp->getMaskElt(i);
4935 if (Val >= (int)NumElts)
4936 Val -= NumElts - NumLaneElts;
4938 assert(Val - i > 0 && "PALIGNR imm should be positive");
4939 return (Val - i) * EltSize;
4942 /// \brief Return the appropriate immediate to shuffle the specified
4943 /// VECTOR_SHUFFLE mask with the PALIGNR instruction.
4944 static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) {
4945 return getShuffleAlignrImmediate(SVOp, false);
4948 /// \brief Return the appropriate immediate to shuffle the specified
4949 /// VECTOR_SHUFFLE mask with the VALIGN instruction.
4950 static unsigned getShuffleVALIGNImmediate(ShuffleVectorSDNode *SVOp) {
4951 return getShuffleAlignrImmediate(SVOp, true);
4955 static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) {
4956 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4957 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4958 llvm_unreachable("Illegal extract subvector for VEXTRACT");
4961 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4963 MVT VecVT = N->getOperand(0).getSimpleValueType();
4964 MVT ElVT = VecVT.getVectorElementType();
4966 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4967 return Index / NumElemsPerChunk;
4970 static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) {
4971 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4972 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4973 llvm_unreachable("Illegal insert subvector for VINSERT");
4976 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4978 MVT VecVT = N->getSimpleValueType(0);
4979 MVT ElVT = VecVT.getVectorElementType();
4981 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4982 return Index / NumElemsPerChunk;
4985 /// getExtractVEXTRACT128Immediate - Return the appropriate immediate
4986 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128
4987 /// and VINSERTI128 instructions.
4988 unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) {
4989 return getExtractVEXTRACTImmediate(N, 128);
4992 /// getExtractVEXTRACT256Immediate - Return the appropriate immediate
4993 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF64x4
4994 /// and VINSERTI64x4 instructions.
4995 unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) {
4996 return getExtractVEXTRACTImmediate(N, 256);
4999 /// getInsertVINSERT128Immediate - Return the appropriate immediate
5000 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128
5001 /// and VINSERTI128 instructions.
5002 unsigned X86::getInsertVINSERT128Immediate(SDNode *N) {
5003 return getInsertVINSERTImmediate(N, 128);
5006 /// getInsertVINSERT256Immediate - Return the appropriate immediate
5007 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF46x4
5008 /// and VINSERTI64x4 instructions.
5009 unsigned X86::getInsertVINSERT256Immediate(SDNode *N) {
5010 return getInsertVINSERTImmediate(N, 256);
5013 /// isZero - Returns true if Elt is a constant integer zero
5014 static bool isZero(SDValue V) {
5015 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
5016 return C && C->isNullValue();
5019 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
5021 bool X86::isZeroNode(SDValue Elt) {
5024 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Elt))
5025 return CFP->getValueAPF().isPosZero();
5029 /// ShouldXformToMOVHLPS - Return true if the node should be transformed to
5030 /// match movhlps. The lower half elements should come from upper half of
5031 /// V1 (and in order), and the upper half elements should come from the upper
5032 /// half of V2 (and in order).
5033 static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, MVT VT) {
5034 if (!VT.is128BitVector())
5036 if (VT.getVectorNumElements() != 4)
5038 for (unsigned i = 0, e = 2; i != e; ++i)
5039 if (!isUndefOrEqual(Mask[i], i+2))
5041 for (unsigned i = 2; i != 4; ++i)
5042 if (!isUndefOrEqual(Mask[i], i+4))
5047 /// isScalarLoadToVector - Returns true if the node is a scalar load that
5048 /// is promoted to a vector. It also returns the LoadSDNode by reference if
5050 static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = nullptr) {
5051 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR)
5053 N = N->getOperand(0).getNode();
5054 if (!ISD::isNON_EXTLoad(N))
5057 *LD = cast<LoadSDNode>(N);
5061 // Test whether the given value is a vector value which will be legalized
5063 static bool WillBeConstantPoolLoad(SDNode *N) {
5064 if (N->getOpcode() != ISD::BUILD_VECTOR)
5067 // Check for any non-constant elements.
5068 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
5069 switch (N->getOperand(i).getNode()->getOpcode()) {
5071 case ISD::ConstantFP:
5078 // Vectors of all-zeros and all-ones are materialized with special
5079 // instructions rather than being loaded.
5080 return !ISD::isBuildVectorAllZeros(N) &&
5081 !ISD::isBuildVectorAllOnes(N);
5084 /// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
5085 /// match movlp{s|d}. The lower half elements should come from lower half of
5086 /// V1 (and in order), and the upper half elements should come from the upper
5087 /// half of V2 (and in order). And since V1 will become the source of the
5088 /// MOVLP, it must be either a vector load or a scalar load to vector.
5089 static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
5090 ArrayRef<int> Mask, MVT VT) {
5091 if (!VT.is128BitVector())
5094 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
5096 // Is V2 is a vector load, don't do this transformation. We will try to use
5097 // load folding shufps op.
5098 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2))
5101 unsigned NumElems = VT.getVectorNumElements();
5103 if (NumElems != 2 && NumElems != 4)
5105 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
5106 if (!isUndefOrEqual(Mask[i], i))
5108 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
5109 if (!isUndefOrEqual(Mask[i], i+NumElems))
5114 /// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
5115 /// to an zero vector.
5116 /// FIXME: move to dag combiner / method on ShuffleVectorSDNode
5117 static bool isZeroShuffle(ShuffleVectorSDNode *N) {
5118 SDValue V1 = N->getOperand(0);
5119 SDValue V2 = N->getOperand(1);
5120 unsigned NumElems = N->getValueType(0).getVectorNumElements();
5121 for (unsigned i = 0; i != NumElems; ++i) {
5122 int Idx = N->getMaskElt(i);
5123 if (Idx >= (int)NumElems) {
5124 unsigned Opc = V2.getOpcode();
5125 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode()))
5127 if (Opc != ISD::BUILD_VECTOR ||
5128 !X86::isZeroNode(V2.getOperand(Idx-NumElems)))
5130 } else if (Idx >= 0) {
5131 unsigned Opc = V1.getOpcode();
5132 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode()))
5134 if (Opc != ISD::BUILD_VECTOR ||
5135 !X86::isZeroNode(V1.getOperand(Idx)))
5142 /// getZeroVector - Returns a vector of specified type with all zero elements.
5144 static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
5145 SelectionDAG &DAG, SDLoc dl) {
5146 assert(VT.isVector() && "Expected a vector type");
5148 // Always build SSE zero vectors as <4 x i32> bitcasted
5149 // to their dest type. This ensures they get CSE'd.
5151 if (VT.is128BitVector()) { // SSE
5152 if (Subtarget->hasSSE2()) { // SSE2
5153 SDValue Cst = DAG.getConstant(0, MVT::i32);
5154 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5156 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5157 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
5159 } else if (VT.is256BitVector()) { // AVX
5160 if (Subtarget->hasInt256()) { // AVX2
5161 SDValue Cst = DAG.getConstant(0, MVT::i32);
5162 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5163 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5165 // 256-bit logic and arithmetic instructions in AVX are all
5166 // floating-point, no support for integer ops. Emit fp zeroed vectors.
5167 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5168 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5169 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops);
5171 } else if (VT.is512BitVector()) { // AVX-512
5172 SDValue Cst = DAG.getConstant(0, MVT::i32);
5173 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
5174 Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5175 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
5176 } else if (VT.getScalarType() == MVT::i1) {
5177 assert(VT.getVectorNumElements() <= 16 && "Unexpected vector type");
5178 SDValue Cst = DAG.getConstant(0, MVT::i1);
5179 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
5180 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
5182 llvm_unreachable("Unexpected vector type");
5184 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5187 /// getOnesVector - Returns a vector of specified type with all bits set.
5188 /// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with
5189 /// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately.
5190 /// Then bitcast to their original type, ensuring they get CSE'd.
5191 static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG,
5193 assert(VT.isVector() && "Expected a vector type");
5195 SDValue Cst = DAG.getConstant(~0U, MVT::i32);
5197 if (VT.is256BitVector()) {
5198 if (HasInt256) { // AVX2
5199 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5200 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5202 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5203 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
5205 } else if (VT.is128BitVector()) {
5206 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5208 llvm_unreachable("Unexpected vector type");
5210 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5213 /// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
5214 /// that point to V2 points to its first element.
5215 static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) {
5216 for (unsigned i = 0; i != NumElems; ++i) {
5217 if (Mask[i] > (int)NumElems) {
5223 /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
5224 /// operation of specified width.
5225 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
5227 unsigned NumElems = VT.getVectorNumElements();
5228 SmallVector<int, 8> Mask;
5229 Mask.push_back(NumElems);
5230 for (unsigned i = 1; i != NumElems; ++i)
5232 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5235 /// getUnpackl - Returns a vector_shuffle node for an unpackl operation.
5236 static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5238 unsigned NumElems = VT.getVectorNumElements();
5239 SmallVector<int, 8> Mask;
5240 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
5242 Mask.push_back(i + NumElems);
5244 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5247 /// getUnpackh - Returns a vector_shuffle node for an unpackh operation.
5248 static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5250 unsigned NumElems = VT.getVectorNumElements();
5251 SmallVector<int, 8> Mask;
5252 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) {
5253 Mask.push_back(i + Half);
5254 Mask.push_back(i + NumElems + Half);
5256 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5259 // PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by
5260 // a generic shuffle instruction because the target has no such instructions.
5261 // Generate shuffles which repeat i16 and i8 several times until they can be
5262 // represented by v4f32 and then be manipulated by target suported shuffles.
5263 static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) {
5264 MVT VT = V.getSimpleValueType();
5265 int NumElems = VT.getVectorNumElements();
5268 while (NumElems > 4) {
5269 if (EltNo < NumElems/2) {
5270 V = getUnpackl(DAG, dl, VT, V, V);
5272 V = getUnpackh(DAG, dl, VT, V, V);
5273 EltNo -= NumElems/2;
5280 /// getLegalSplat - Generate a legal splat with supported x86 shuffles
5281 static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) {
5282 MVT VT = V.getSimpleValueType();
5285 if (VT.is128BitVector()) {
5286 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V);
5287 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
5288 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32),
5290 } else if (VT.is256BitVector()) {
5291 // To use VPERMILPS to splat scalars, the second half of indicies must
5292 // refer to the higher part, which is a duplication of the lower one,
5293 // because VPERMILPS can only handle in-lane permutations.
5294 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo,
5295 EltNo+4, EltNo+4, EltNo+4, EltNo+4 };
5297 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V);
5298 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32),
5301 llvm_unreachable("Vector size not supported");
5303 return DAG.getNode(ISD::BITCAST, dl, VT, V);
5306 /// PromoteSplat - Splat is promoted to target supported vector shuffles.
5307 static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
5308 MVT SrcVT = SV->getSimpleValueType(0);
5309 SDValue V1 = SV->getOperand(0);
5312 int EltNo = SV->getSplatIndex();
5313 int NumElems = SrcVT.getVectorNumElements();
5314 bool Is256BitVec = SrcVT.is256BitVector();
5316 assert(((SrcVT.is128BitVector() && NumElems > 4) || Is256BitVec) &&
5317 "Unknown how to promote splat for type");
5319 // Extract the 128-bit part containing the splat element and update
5320 // the splat element index when it refers to the higher register.
5322 V1 = Extract128BitVector(V1, EltNo, DAG, dl);
5323 if (EltNo >= NumElems/2)
5324 EltNo -= NumElems/2;
5327 // All i16 and i8 vector types can't be used directly by a generic shuffle
5328 // instruction because the target has no such instruction. Generate shuffles
5329 // which repeat i16 and i8 several times until they fit in i32, and then can
5330 // be manipulated by target suported shuffles.
5331 MVT EltVT = SrcVT.getVectorElementType();
5332 if (EltVT == MVT::i8 || EltVT == MVT::i16)
5333 V1 = PromoteSplati8i16(V1, DAG, EltNo);
5335 // Recreate the 256-bit vector and place the same 128-bit vector
5336 // into the low and high part. This is necessary because we want
5337 // to use VPERM* to shuffle the vectors
5339 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1);
5342 return getLegalSplat(DAG, V1, EltNo);
5345 /// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
5346 /// vector of zero or undef vector. This produces a shuffle where the low
5347 /// element of V2 is swizzled into the zero/undef vector, landing at element
5348 /// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
5349 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
5351 const X86Subtarget *Subtarget,
5352 SelectionDAG &DAG) {
5353 MVT VT = V2.getSimpleValueType();
5355 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
5356 unsigned NumElems = VT.getVectorNumElements();
5357 SmallVector<int, 16> MaskVec;
5358 for (unsigned i = 0; i != NumElems; ++i)
5359 // If this is the insertion idx, put the low elt of V2 here.
5360 MaskVec.push_back(i == Idx ? NumElems : i);
5361 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]);
5364 /// getTargetShuffleMask - Calculates the shuffle mask corresponding to the
5365 /// target specific opcode. Returns true if the Mask could be calculated. Sets
5366 /// IsUnary to true if only uses one source. Note that this will set IsUnary for
5367 /// shuffles which use a single input multiple times, and in those cases it will
5368 /// adjust the mask to only have indices within that single input.
5369 static bool getTargetShuffleMask(SDNode *N, MVT VT,
5370 SmallVectorImpl<int> &Mask, bool &IsUnary) {
5371 unsigned NumElems = VT.getVectorNumElements();
5375 bool IsFakeUnary = false;
5376 switch(N->getOpcode()) {
5377 case X86ISD::BLENDI:
5378 ImmN = N->getOperand(N->getNumOperands()-1);
5379 DecodeBLENDMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5382 ImmN = N->getOperand(N->getNumOperands()-1);
5383 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5384 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5386 case X86ISD::UNPCKH:
5387 DecodeUNPCKHMask(VT, Mask);
5388 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5390 case X86ISD::UNPCKL:
5391 DecodeUNPCKLMask(VT, Mask);
5392 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5394 case X86ISD::MOVHLPS:
5395 DecodeMOVHLPSMask(NumElems, Mask);
5396 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5398 case X86ISD::MOVLHPS:
5399 DecodeMOVLHPSMask(NumElems, Mask);
5400 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5402 case X86ISD::PALIGNR:
5403 ImmN = N->getOperand(N->getNumOperands()-1);
5404 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5406 case X86ISD::PSHUFD:
5407 case X86ISD::VPERMILPI:
5408 ImmN = N->getOperand(N->getNumOperands()-1);
5409 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5412 case X86ISD::PSHUFHW:
5413 ImmN = N->getOperand(N->getNumOperands()-1);
5414 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5417 case X86ISD::PSHUFLW:
5418 ImmN = N->getOperand(N->getNumOperands()-1);
5419 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5422 case X86ISD::PSHUFB: {
5424 SDValue MaskNode = N->getOperand(1);
5425 while (MaskNode->getOpcode() == ISD::BITCAST)
5426 MaskNode = MaskNode->getOperand(0);
5428 if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
5429 // If we have a build-vector, then things are easy.
5430 EVT VT = MaskNode.getValueType();
5431 assert(VT.isVector() &&
5432 "Can't produce a non-vector with a build_vector!");
5433 if (!VT.isInteger())
5436 int NumBytesPerElement = VT.getVectorElementType().getSizeInBits() / 8;
5438 SmallVector<uint64_t, 32> RawMask;
5439 for (int i = 0, e = MaskNode->getNumOperands(); i < e; ++i) {
5440 SDValue Op = MaskNode->getOperand(i);
5441 if (Op->getOpcode() == ISD::UNDEF) {
5442 RawMask.push_back((uint64_t)SM_SentinelUndef);
5445 auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
5448 APInt MaskElement = CN->getAPIntValue();
5450 // We now have to decode the element which could be any integer size and
5451 // extract each byte of it.
5452 for (int j = 0; j < NumBytesPerElement; ++j) {
5453 // Note that this is x86 and so always little endian: the low byte is
5454 // the first byte of the mask.
5455 RawMask.push_back(MaskElement.getLoBits(8).getZExtValue());
5456 MaskElement = MaskElement.lshr(8);
5459 DecodePSHUFBMask(RawMask, Mask);
5463 auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
5467 SDValue Ptr = MaskLoad->getBasePtr();
5468 if (Ptr->getOpcode() == X86ISD::Wrapper)
5469 Ptr = Ptr->getOperand(0);
5471 auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
5472 if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
5475 if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
5476 DecodePSHUFBMask(C, Mask);
5484 case X86ISD::VPERMI:
5485 ImmN = N->getOperand(N->getNumOperands()-1);
5486 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5491 DecodeScalarMoveMask(VT, /* IsLoad */ false, Mask);
5493 case X86ISD::VPERM2X128:
5494 ImmN = N->getOperand(N->getNumOperands()-1);
5495 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5496 if (Mask.empty()) return false;
5498 case X86ISD::MOVSLDUP:
5499 DecodeMOVSLDUPMask(VT, Mask);
5502 case X86ISD::MOVSHDUP:
5503 DecodeMOVSHDUPMask(VT, Mask);
5506 case X86ISD::MOVDDUP:
5507 DecodeMOVDDUPMask(VT, Mask);
5510 case X86ISD::MOVLHPD:
5511 case X86ISD::MOVLPD:
5512 case X86ISD::MOVLPS:
5513 // Not yet implemented
5515 default: llvm_unreachable("unknown target shuffle node");
5518 // If we have a fake unary shuffle, the shuffle mask is spread across two
5519 // inputs that are actually the same node. Re-map the mask to always point
5520 // into the first input.
5523 if (M >= (int)Mask.size())
5529 /// getShuffleScalarElt - Returns the scalar element that will make up the ith
5530 /// element of the result of the vector shuffle.
5531 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
5534 return SDValue(); // Limit search depth.
5536 SDValue V = SDValue(N, 0);
5537 EVT VT = V.getValueType();
5538 unsigned Opcode = V.getOpcode();
5540 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
5541 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
5542 int Elt = SV->getMaskElt(Index);
5545 return DAG.getUNDEF(VT.getVectorElementType());
5547 unsigned NumElems = VT.getVectorNumElements();
5548 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
5549 : SV->getOperand(1);
5550 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
5553 // Recurse into target specific vector shuffles to find scalars.
5554 if (isTargetShuffle(Opcode)) {
5555 MVT ShufVT = V.getSimpleValueType();
5556 unsigned NumElems = ShufVT.getVectorNumElements();
5557 SmallVector<int, 16> ShuffleMask;
5560 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary))
5563 int Elt = ShuffleMask[Index];
5565 return DAG.getUNDEF(ShufVT.getVectorElementType());
5567 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0)
5569 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
5573 // Actual nodes that may contain scalar elements
5574 if (Opcode == ISD::BITCAST) {
5575 V = V.getOperand(0);
5576 EVT SrcVT = V.getValueType();
5577 unsigned NumElems = VT.getVectorNumElements();
5579 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
5583 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
5584 return (Index == 0) ? V.getOperand(0)
5585 : DAG.getUNDEF(VT.getVectorElementType());
5587 if (V.getOpcode() == ISD::BUILD_VECTOR)
5588 return V.getOperand(Index);
5593 /// getNumOfConsecutiveZeros - Return the number of elements of a vector
5594 /// shuffle operation which come from a consecutively from a zero. The
5595 /// search can start in two different directions, from left or right.
5596 /// We count undefs as zeros until PreferredNum is reached.
5597 static unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp,
5598 unsigned NumElems, bool ZerosFromLeft,
5600 unsigned PreferredNum = -1U) {
5601 unsigned NumZeros = 0;
5602 for (unsigned i = 0; i != NumElems; ++i) {
5603 unsigned Index = ZerosFromLeft ? i : NumElems - i - 1;
5604 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0);
5608 if (X86::isZeroNode(Elt))
5610 else if (Elt.getOpcode() == ISD::UNDEF) // Undef as zero up to PreferredNum.
5611 NumZeros = std::min(NumZeros + 1, PreferredNum);
5619 /// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE)
5620 /// correspond consecutively to elements from one of the vector operands,
5621 /// starting from its index OpIdx. Also tell OpNum which source vector operand.
5623 bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp,
5624 unsigned MaskI, unsigned MaskE, unsigned OpIdx,
5625 unsigned NumElems, unsigned &OpNum) {
5626 bool SeenV1 = false;
5627 bool SeenV2 = false;
5629 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) {
5630 int Idx = SVOp->getMaskElt(i);
5631 // Ignore undef indicies
5635 if (Idx < (int)NumElems)
5640 // Only accept consecutive elements from the same vector
5641 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2))
5645 OpNum = SeenV1 ? 0 : 1;
5649 /// isVectorShiftRight - Returns true if the shuffle can be implemented as a
5650 /// logical left shift of a vector.
5651 static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5652 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5654 SVOp->getSimpleValueType(0).getVectorNumElements();
5655 unsigned NumZeros = getNumOfConsecutiveZeros(
5656 SVOp, NumElems, false /* check zeros from right */, DAG,
5657 SVOp->getMaskElt(0));
5663 // Considering the elements in the mask that are not consecutive zeros,
5664 // check if they consecutively come from only one of the source vectors.
5666 // V1 = {X, A, B, C} 0
5668 // vector_shuffle V1, V2 <1, 2, 3, X>
5670 if (!isShuffleMaskConsecutive(SVOp,
5671 0, // Mask Start Index
5672 NumElems-NumZeros, // Mask End Index(exclusive)
5673 NumZeros, // Where to start looking in the src vector
5674 NumElems, // Number of elements in vector
5675 OpSrc)) // Which source operand ?
5680 ShVal = SVOp->getOperand(OpSrc);
5684 /// isVectorShiftLeft - Returns true if the shuffle can be implemented as a
5685 /// logical left shift of a vector.
5686 static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5687 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5689 SVOp->getSimpleValueType(0).getVectorNumElements();
5690 unsigned NumZeros = getNumOfConsecutiveZeros(
5691 SVOp, NumElems, true /* check zeros from left */, DAG,
5692 NumElems - SVOp->getMaskElt(NumElems - 1) - 1);
5698 // Considering the elements in the mask that are not consecutive zeros,
5699 // check if they consecutively come from only one of the source vectors.
5701 // 0 { A, B, X, X } = V2
5703 // vector_shuffle V1, V2 <X, X, 4, 5>
5705 if (!isShuffleMaskConsecutive(SVOp,
5706 NumZeros, // Mask Start Index
5707 NumElems, // Mask End Index(exclusive)
5708 0, // Where to start looking in the src vector
5709 NumElems, // Number of elements in vector
5710 OpSrc)) // Which source operand ?
5715 ShVal = SVOp->getOperand(OpSrc);
5719 /// isVectorShift - Returns true if the shuffle can be implemented as a
5720 /// logical left or right shift of a vector.
5721 static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5722 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5723 // Although the logic below support any bitwidth size, there are no
5724 // shift instructions which handle more than 128-bit vectors.
5725 if (!SVOp->getSimpleValueType(0).is128BitVector())
5728 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) ||
5729 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt))
5735 /// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
5737 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
5738 unsigned NumNonZero, unsigned NumZero,
5740 const X86Subtarget* Subtarget,
5741 const TargetLowering &TLI) {
5748 for (unsigned i = 0; i < 16; ++i) {
5749 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
5750 if (ThisIsNonZero && First) {
5752 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5754 V = DAG.getUNDEF(MVT::v8i16);
5759 SDValue ThisElt, LastElt;
5760 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
5761 if (LastIsNonZero) {
5762 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
5763 MVT::i16, Op.getOperand(i-1));
5765 if (ThisIsNonZero) {
5766 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
5767 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
5768 ThisElt, DAG.getConstant(8, MVT::i8));
5770 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
5774 if (ThisElt.getNode())
5775 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
5776 DAG.getIntPtrConstant(i/2));
5780 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V);
5783 /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
5785 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
5786 unsigned NumNonZero, unsigned NumZero,
5788 const X86Subtarget* Subtarget,
5789 const TargetLowering &TLI) {
5796 for (unsigned i = 0; i < 8; ++i) {
5797 bool isNonZero = (NonZeros & (1 << i)) != 0;
5801 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5803 V = DAG.getUNDEF(MVT::v8i16);
5806 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
5807 MVT::v8i16, V, Op.getOperand(i),
5808 DAG.getIntPtrConstant(i));
5815 /// LowerBuildVectorv4x32 - Custom lower build_vector of v4i32 or v4f32.
5816 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
5817 const X86Subtarget *Subtarget,
5818 const TargetLowering &TLI) {
5819 // Find all zeroable elements.
5821 for (int i=0; i < 4; ++i) {
5822 SDValue Elt = Op->getOperand(i);
5823 Zeroable[i] = (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt));
5825 assert(std::count_if(&Zeroable[0], &Zeroable[4],
5826 [](bool M) { return !M; }) > 1 &&
5827 "We expect at least two non-zero elements!");
5829 // We only know how to deal with build_vector nodes where elements are either
5830 // zeroable or extract_vector_elt with constant index.
5831 SDValue FirstNonZero;
5832 unsigned FirstNonZeroIdx;
5833 for (unsigned i=0; i < 4; ++i) {
5836 SDValue Elt = Op->getOperand(i);
5837 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5838 !isa<ConstantSDNode>(Elt.getOperand(1)))
5840 // Make sure that this node is extracting from a 128-bit vector.
5841 MVT VT = Elt.getOperand(0).getSimpleValueType();
5842 if (!VT.is128BitVector())
5844 if (!FirstNonZero.getNode()) {
5846 FirstNonZeroIdx = i;
5850 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
5851 SDValue V1 = FirstNonZero.getOperand(0);
5852 MVT VT = V1.getSimpleValueType();
5854 // See if this build_vector can be lowered as a blend with zero.
5856 unsigned EltMaskIdx, EltIdx;
5858 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
5859 if (Zeroable[EltIdx]) {
5860 // The zero vector will be on the right hand side.
5861 Mask[EltIdx] = EltIdx+4;
5865 Elt = Op->getOperand(EltIdx);
5866 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
5867 EltMaskIdx = cast<ConstantSDNode>(Elt.getOperand(1))->getZExtValue();
5868 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
5870 Mask[EltIdx] = EltIdx;
5874 // Let the shuffle legalizer deal with blend operations.
5875 SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
5876 if (V1.getSimpleValueType() != VT)
5877 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), VT, V1);
5878 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, &Mask[0]);
5881 // See if we can lower this build_vector to a INSERTPS.
5882 if (!Subtarget->hasSSE41())
5885 SDValue V2 = Elt.getOperand(0);
5886 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
5889 bool CanFold = true;
5890 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
5894 SDValue Current = Op->getOperand(i);
5895 SDValue SrcVector = Current->getOperand(0);
5898 CanFold = SrcVector == V1 &&
5899 cast<ConstantSDNode>(Current.getOperand(1))->getZExtValue() == i;
5905 assert(V1.getNode() && "Expected at least two non-zero elements!");
5906 if (V1.getSimpleValueType() != MVT::v4f32)
5907 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), MVT::v4f32, V1);
5908 if (V2.getSimpleValueType() != MVT::v4f32)
5909 V2 = DAG.getNode(ISD::BITCAST, SDLoc(V2), MVT::v4f32, V2);
5911 // Ok, we can emit an INSERTPS instruction.
5913 for (int i = 0; i < 4; ++i)
5917 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
5918 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
5919 SDValue Result = DAG.getNode(X86ISD::INSERTPS, SDLoc(Op), MVT::v4f32, V1, V2,
5920 DAG.getIntPtrConstant(InsertPSMask));
5921 return DAG.getNode(ISD::BITCAST, SDLoc(Op), VT, Result);
5924 /// Return a vector logical shift node.
5925 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
5926 unsigned NumBits, SelectionDAG &DAG,
5927 const TargetLowering &TLI, SDLoc dl) {
5928 assert(VT.is128BitVector() && "Unknown type for VShift");
5929 MVT ShVT = MVT::v2i64;
5930 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
5931 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
5932 MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(SrcOp.getValueType());
5933 SDValue ShiftVal = DAG.getConstant(NumBits, ScalarShiftTy);
5934 return DAG.getNode(ISD::BITCAST, dl, VT,
5935 DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
5939 LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
5941 // Check if the scalar load can be widened into a vector load. And if
5942 // the address is "base + cst" see if the cst can be "absorbed" into
5943 // the shuffle mask.
5944 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
5945 SDValue Ptr = LD->getBasePtr();
5946 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
5948 EVT PVT = LD->getValueType(0);
5949 if (PVT != MVT::i32 && PVT != MVT::f32)
5954 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
5955 FI = FINode->getIndex();
5957 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
5958 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
5959 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5960 Offset = Ptr.getConstantOperandVal(1);
5961 Ptr = Ptr.getOperand(0);
5966 // FIXME: 256-bit vector instructions don't require a strict alignment,
5967 // improve this code to support it better.
5968 unsigned RequiredAlign = VT.getSizeInBits()/8;
5969 SDValue Chain = LD->getChain();
5970 // Make sure the stack object alignment is at least 16 or 32.
5971 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
5972 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
5973 if (MFI->isFixedObjectIndex(FI)) {
5974 // Can't change the alignment. FIXME: It's possible to compute
5975 // the exact stack offset and reference FI + adjust offset instead.
5976 // If someone *really* cares about this. That's the way to implement it.
5979 MFI->setObjectAlignment(FI, RequiredAlign);
5983 // (Offset % 16 or 32) must be multiple of 4. Then address is then
5984 // Ptr + (Offset & ~15).
5987 if ((Offset % RequiredAlign) & 3)
5989 int64_t StartOffset = Offset & ~(RequiredAlign-1);
5991 Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(),
5992 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType()));
5994 int EltNo = (Offset - StartOffset) >> 2;
5995 unsigned NumElems = VT.getVectorNumElements();
5997 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
5998 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
5999 LD->getPointerInfo().getWithOffset(StartOffset),
6000 false, false, false, 0);
6002 SmallVector<int, 8> Mask;
6003 for (unsigned i = 0; i != NumElems; ++i)
6004 Mask.push_back(EltNo);
6006 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
6012 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
6013 /// elements can be replaced by a single large load which has the same value as
6014 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
6016 /// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
6018 /// FIXME: we'd also like to handle the case where the last elements are zero
6019 /// rather than undef via VZEXT_LOAD, but we do not detect that case today.
6020 /// There's even a handy isZeroNode for that purpose.
6021 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
6022 SDLoc &DL, SelectionDAG &DAG,
6023 bool isAfterLegalize) {
6024 unsigned NumElems = Elts.size();
6026 LoadSDNode *LDBase = nullptr;
6027 unsigned LastLoadedElt = -1U;
6029 // For each element in the initializer, see if we've found a load or an undef.
6030 // If we don't find an initial load element, or later load elements are
6031 // non-consecutive, bail out.
6032 for (unsigned i = 0; i < NumElems; ++i) {
6033 SDValue Elt = Elts[i];
6034 // Look through a bitcast.
6035 if (Elt.getNode() && Elt.getOpcode() == ISD::BITCAST)
6036 Elt = Elt.getOperand(0);
6037 if (!Elt.getNode() ||
6038 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
6041 if (Elt.getNode()->getOpcode() == ISD::UNDEF)
6043 LDBase = cast<LoadSDNode>(Elt.getNode());
6047 if (Elt.getOpcode() == ISD::UNDEF)
6050 LoadSDNode *LD = cast<LoadSDNode>(Elt);
6051 EVT LdVT = Elt.getValueType();
6052 // Each loaded element must be the correct fractional portion of the
6053 // requested vector load.
6054 if (LdVT.getSizeInBits() != VT.getSizeInBits() / NumElems)
6056 if (!DAG.isConsecutiveLoad(LD, LDBase, LdVT.getSizeInBits() / 8, i))
6061 // If we have found an entire vector of loads and undefs, then return a large
6062 // load of the entire vector width starting at the base pointer. If we found
6063 // consecutive loads for the low half, generate a vzext_load node.
6064 if (LastLoadedElt == NumElems - 1) {
6065 assert(LDBase && "Did not find base load for merging consecutive loads");
6066 EVT EltVT = LDBase->getValueType(0);
6067 // Ensure that the input vector size for the merged loads matches the
6068 // cumulative size of the input elements.
6069 if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems)
6072 if (isAfterLegalize &&
6073 !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
6076 SDValue NewLd = SDValue();
6078 NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
6079 LDBase->getPointerInfo(), LDBase->isVolatile(),
6080 LDBase->isNonTemporal(), LDBase->isInvariant(),
6081 LDBase->getAlignment());
6083 if (LDBase->hasAnyUseOfValue(1)) {
6084 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6086 SDValue(NewLd.getNode(), 1));
6087 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6088 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6089 SDValue(NewLd.getNode(), 1));
6095 //TODO: The code below fires only for for loading the low v2i32 / v2f32
6096 //of a v4i32 / v4f32. It's probably worth generalizing.
6097 EVT EltVT = VT.getVectorElementType();
6098 if (NumElems == 4 && LastLoadedElt == 1 && (EltVT.getSizeInBits() == 32) &&
6099 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
6100 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
6101 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
6103 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::i64,
6104 LDBase->getPointerInfo(),
6105 LDBase->getAlignment(),
6106 false/*isVolatile*/, true/*ReadMem*/,
6109 // Make sure the newly-created LOAD is in the same position as LDBase in
6110 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and
6111 // update uses of LDBase's output chain to use the TokenFactor.
6112 if (LDBase->hasAnyUseOfValue(1)) {
6113 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6114 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1));
6115 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6116 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6117 SDValue(ResNode.getNode(), 1));
6120 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode);
6125 /// LowerVectorBroadcast - Attempt to use the vbroadcast instruction
6126 /// to generate a splat value for the following cases:
6127 /// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
6128 /// 2. A splat shuffle which uses a scalar_to_vector node which comes from
6129 /// a scalar load, or a constant.
6130 /// The VBROADCAST node is returned when a pattern is found,
6131 /// or SDValue() otherwise.
6132 static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
6133 SelectionDAG &DAG) {
6134 // VBROADCAST requires AVX.
6135 // TODO: Splats could be generated for non-AVX CPUs using SSE
6136 // instructions, but there's less potential gain for only 128-bit vectors.
6137 if (!Subtarget->hasAVX())
6140 MVT VT = Op.getSimpleValueType();
6143 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
6144 "Unsupported vector type for broadcast.");
6149 switch (Op.getOpcode()) {
6151 // Unknown pattern found.
6154 case ISD::BUILD_VECTOR: {
6155 auto *BVOp = cast<BuildVectorSDNode>(Op.getNode());
6156 BitVector UndefElements;
6157 SDValue Splat = BVOp->getSplatValue(&UndefElements);
6159 // We need a splat of a single value to use broadcast, and it doesn't
6160 // make any sense if the value is only in one element of the vector.
6161 if (!Splat || (VT.getVectorNumElements() - UndefElements.count()) <= 1)
6165 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6166 Ld.getOpcode() == ISD::ConstantFP);
6168 // Make sure that all of the users of a non-constant load are from the
6169 // BUILD_VECTOR node.
6170 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
6175 case ISD::VECTOR_SHUFFLE: {
6176 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
6178 // Shuffles must have a splat mask where the first element is
6180 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0)
6183 SDValue Sc = Op.getOperand(0);
6184 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
6185 Sc.getOpcode() != ISD::BUILD_VECTOR) {
6187 if (!Subtarget->hasInt256())
6190 // Use the register form of the broadcast instruction available on AVX2.
6191 if (VT.getSizeInBits() >= 256)
6192 Sc = Extract128BitVector(Sc, 0, DAG, dl);
6193 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
6196 Ld = Sc.getOperand(0);
6197 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6198 Ld.getOpcode() == ISD::ConstantFP);
6200 // The scalar_to_vector node and the suspected
6201 // load node must have exactly one user.
6202 // Constants may have multiple users.
6204 // AVX-512 has register version of the broadcast
6205 bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() &&
6206 Ld.getValueType().getSizeInBits() >= 32;
6207 if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
6214 unsigned ScalarSize = Ld.getValueType().getSizeInBits();
6215 bool IsGE256 = (VT.getSizeInBits() >= 256);
6217 // When optimizing for size, generate up to 5 extra bytes for a broadcast
6218 // instruction to save 8 or more bytes of constant pool data.
6219 // TODO: If multiple splats are generated to load the same constant,
6220 // it may be detrimental to overall size. There needs to be a way to detect
6221 // that condition to know if this is truly a size win.
6222 const Function *F = DAG.getMachineFunction().getFunction();
6223 bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize);
6225 // Handle broadcasting a single constant scalar from the constant pool
6227 // On Sandybridge (no AVX2), it is still better to load a constant vector
6228 // from the constant pool and not to broadcast it from a scalar.
6229 // But override that restriction when optimizing for size.
6230 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
6231 if (ConstSplatVal && (Subtarget->hasAVX2() || OptForSize)) {
6232 EVT CVT = Ld.getValueType();
6233 assert(!CVT.isVector() && "Must not broadcast a vector type");
6235 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
6236 // For size optimization, also splat v2f64 and v2i64, and for size opt
6237 // with AVX2, also splat i8 and i16.
6238 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
6239 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6240 (OptForSize && (ScalarSize == 64 || Subtarget->hasAVX2()))) {
6241 const Constant *C = nullptr;
6242 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
6243 C = CI->getConstantIntValue();
6244 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
6245 C = CF->getConstantFPValue();
6247 assert(C && "Invalid constant type");
6249 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6250 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
6251 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
6252 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP,
6253 MachinePointerInfo::getConstantPool(),
6254 false, false, false, Alignment);
6256 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6260 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
6262 // Handle AVX2 in-register broadcasts.
6263 if (!IsLoad && Subtarget->hasInt256() &&
6264 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
6265 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6267 // The scalar source must be a normal load.
6271 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6272 (Subtarget->hasVLX() && ScalarSize == 64))
6273 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6275 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
6276 // double since there is no vbroadcastsd xmm
6277 if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) {
6278 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
6279 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6282 // Unsupported broadcast.
6286 /// \brief For an EXTRACT_VECTOR_ELT with a constant index return the real
6287 /// underlying vector and index.
6289 /// Modifies \p ExtractedFromVec to the real vector and returns the real
6291 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
6293 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
6294 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
6297 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
6299 // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
6301 // (extract_vector_elt (vector_shuffle<2,u,u,u>
6302 // (extract_subvector (v8f32 %vreg0), Constant<4>),
6305 // In this case the vector is the extract_subvector expression and the index
6306 // is 2, as specified by the shuffle.
6307 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
6308 SDValue ShuffleVec = SVOp->getOperand(0);
6309 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
6310 assert(ShuffleVecVT.getVectorElementType() ==
6311 ExtractedFromVec.getSimpleValueType().getVectorElementType());
6313 int ShuffleIdx = SVOp->getMaskElt(Idx);
6314 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
6315 ExtractedFromVec = ShuffleVec;
6321 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
6322 MVT VT = Op.getSimpleValueType();
6324 // Skip if insert_vec_elt is not supported.
6325 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6326 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
6330 unsigned NumElems = Op.getNumOperands();
6334 SmallVector<unsigned, 4> InsertIndices;
6335 SmallVector<int, 8> Mask(NumElems, -1);
6337 for (unsigned i = 0; i != NumElems; ++i) {
6338 unsigned Opc = Op.getOperand(i).getOpcode();
6340 if (Opc == ISD::UNDEF)
6343 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
6344 // Quit if more than 1 elements need inserting.
6345 if (InsertIndices.size() > 1)
6348 InsertIndices.push_back(i);
6352 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
6353 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
6354 // Quit if non-constant index.
6355 if (!isa<ConstantSDNode>(ExtIdx))
6357 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
6359 // Quit if extracted from vector of different type.
6360 if (ExtractedFromVec.getValueType() != VT)
6363 if (!VecIn1.getNode())
6364 VecIn1 = ExtractedFromVec;
6365 else if (VecIn1 != ExtractedFromVec) {
6366 if (!VecIn2.getNode())
6367 VecIn2 = ExtractedFromVec;
6368 else if (VecIn2 != ExtractedFromVec)
6369 // Quit if more than 2 vectors to shuffle
6373 if (ExtractedFromVec == VecIn1)
6375 else if (ExtractedFromVec == VecIn2)
6376 Mask[i] = Idx + NumElems;
6379 if (!VecIn1.getNode())
6382 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
6383 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]);
6384 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
6385 unsigned Idx = InsertIndices[i];
6386 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
6387 DAG.getIntPtrConstant(Idx));
6393 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
6395 X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
6397 MVT VT = Op.getSimpleValueType();
6398 assert((VT.getVectorElementType() == MVT::i1) && (VT.getSizeInBits() <= 16) &&
6399 "Unexpected type in LowerBUILD_VECTORvXi1!");
6402 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6403 SDValue Cst = DAG.getTargetConstant(0, MVT::i1);
6404 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6405 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6408 if (ISD::isBuildVectorAllOnes(Op.getNode())) {
6409 SDValue Cst = DAG.getTargetConstant(1, MVT::i1);
6410 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6411 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6414 bool AllContants = true;
6415 uint64_t Immediate = 0;
6416 int NonConstIdx = -1;
6417 bool IsSplat = true;
6418 unsigned NumNonConsts = 0;
6419 unsigned NumConsts = 0;
6420 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
6421 SDValue In = Op.getOperand(idx);
6422 if (In.getOpcode() == ISD::UNDEF)
6424 if (!isa<ConstantSDNode>(In)) {
6425 AllContants = false;
6430 if (cast<ConstantSDNode>(In)->getZExtValue())
6431 Immediate |= (1ULL << idx);
6433 if (In != Op.getOperand(0))
6438 SDValue FullMask = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1,
6439 DAG.getConstant(Immediate, MVT::i16));
6440 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, FullMask,
6441 DAG.getIntPtrConstant(0));
6444 if (NumNonConsts == 1 && NonConstIdx != 0) {
6447 SDValue VecAsImm = DAG.getConstant(Immediate,
6448 MVT::getIntegerVT(VT.getSizeInBits()));
6449 DstVec = DAG.getNode(ISD::BITCAST, dl, VT, VecAsImm);
6452 DstVec = DAG.getUNDEF(VT);
6453 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
6454 Op.getOperand(NonConstIdx),
6455 DAG.getIntPtrConstant(NonConstIdx));
6457 if (!IsSplat && (NonConstIdx != 0))
6458 llvm_unreachable("Unsupported BUILD_VECTOR operation");
6459 MVT SelectVT = (VT == MVT::v16i1)? MVT::i16 : MVT::i8;
6462 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6463 DAG.getConstant(-1, SelectVT),
6464 DAG.getConstant(0, SelectVT));
6466 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6467 DAG.getConstant((Immediate | 1), SelectVT),
6468 DAG.getConstant(Immediate, SelectVT));
6469 return DAG.getNode(ISD::BITCAST, dl, VT, Select);
6472 /// \brief Return true if \p N implements a horizontal binop and return the
6473 /// operands for the horizontal binop into V0 and V1.
6475 /// This is a helper function of PerformBUILD_VECTORCombine.
6476 /// This function checks that the build_vector \p N in input implements a
6477 /// horizontal operation. Parameter \p Opcode defines the kind of horizontal
6478 /// operation to match.
6479 /// For example, if \p Opcode is equal to ISD::ADD, then this function
6480 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
6481 /// is equal to ISD::SUB, then this function checks if this is a horizontal
6484 /// This function only analyzes elements of \p N whose indices are
6485 /// in range [BaseIdx, LastIdx).
6486 static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
6488 unsigned BaseIdx, unsigned LastIdx,
6489 SDValue &V0, SDValue &V1) {
6490 EVT VT = N->getValueType(0);
6492 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
6493 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
6494 "Invalid Vector in input!");
6496 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
6497 bool CanFold = true;
6498 unsigned ExpectedVExtractIdx = BaseIdx;
6499 unsigned NumElts = LastIdx - BaseIdx;
6500 V0 = DAG.getUNDEF(VT);
6501 V1 = DAG.getUNDEF(VT);
6503 // Check if N implements a horizontal binop.
6504 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
6505 SDValue Op = N->getOperand(i + BaseIdx);
6508 if (Op->getOpcode() == ISD::UNDEF) {
6509 // Update the expected vector extract index.
6510 if (i * 2 == NumElts)
6511 ExpectedVExtractIdx = BaseIdx;
6512 ExpectedVExtractIdx += 2;
6516 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
6521 SDValue Op0 = Op.getOperand(0);
6522 SDValue Op1 = Op.getOperand(1);
6524 // Try to match the following pattern:
6525 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
6526 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6527 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6528 Op0.getOperand(0) == Op1.getOperand(0) &&
6529 isa<ConstantSDNode>(Op0.getOperand(1)) &&
6530 isa<ConstantSDNode>(Op1.getOperand(1)));
6534 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6535 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
6537 if (i * 2 < NumElts) {
6538 if (V0.getOpcode() == ISD::UNDEF)
6539 V0 = Op0.getOperand(0);
6541 if (V1.getOpcode() == ISD::UNDEF)
6542 V1 = Op0.getOperand(0);
6543 if (i * 2 == NumElts)
6544 ExpectedVExtractIdx = BaseIdx;
6547 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
6548 if (I0 == ExpectedVExtractIdx)
6549 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
6550 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
6551 // Try to match the following dag sequence:
6552 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
6553 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
6557 ExpectedVExtractIdx += 2;
6563 /// \brief Emit a sequence of two 128-bit horizontal add/sub followed by
6564 /// a concat_vector.
6566 /// This is a helper function of PerformBUILD_VECTORCombine.
6567 /// This function expects two 256-bit vectors called V0 and V1.
6568 /// At first, each vector is split into two separate 128-bit vectors.
6569 /// Then, the resulting 128-bit vectors are used to implement two
6570 /// horizontal binary operations.
6572 /// The kind of horizontal binary operation is defined by \p X86Opcode.
6574 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
6575 /// the two new horizontal binop.
6576 /// When Mode is set, the first horizontal binop dag node would take as input
6577 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
6578 /// horizontal binop dag node would take as input the lower 128-bit of V1
6579 /// and the upper 128-bit of V1.
6581 /// HADD V0_LO, V0_HI
6582 /// HADD V1_LO, V1_HI
6584 /// Otherwise, the first horizontal binop dag node takes as input the lower
6585 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
6586 /// dag node takes the the upper 128-bit of V0 and the upper 128-bit of V1.
6588 /// HADD V0_LO, V1_LO
6589 /// HADD V0_HI, V1_HI
6591 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
6592 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
6593 /// the upper 128-bits of the result.
6594 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
6595 SDLoc DL, SelectionDAG &DAG,
6596 unsigned X86Opcode, bool Mode,
6597 bool isUndefLO, bool isUndefHI) {
6598 EVT VT = V0.getValueType();
6599 assert(VT.is256BitVector() && VT == V1.getValueType() &&
6600 "Invalid nodes in input!");
6602 unsigned NumElts = VT.getVectorNumElements();
6603 SDValue V0_LO = Extract128BitVector(V0, 0, DAG, DL);
6604 SDValue V0_HI = Extract128BitVector(V0, NumElts/2, DAG, DL);
6605 SDValue V1_LO = Extract128BitVector(V1, 0, DAG, DL);
6606 SDValue V1_HI = Extract128BitVector(V1, NumElts/2, DAG, DL);
6607 EVT NewVT = V0_LO.getValueType();
6609 SDValue LO = DAG.getUNDEF(NewVT);
6610 SDValue HI = DAG.getUNDEF(NewVT);
6613 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6614 if (!isUndefLO && V0->getOpcode() != ISD::UNDEF)
6615 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
6616 if (!isUndefHI && V1->getOpcode() != ISD::UNDEF)
6617 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
6619 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6620 if (!isUndefLO && (V0_LO->getOpcode() != ISD::UNDEF ||
6621 V1_LO->getOpcode() != ISD::UNDEF))
6622 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
6624 if (!isUndefHI && (V0_HI->getOpcode() != ISD::UNDEF ||
6625 V1_HI->getOpcode() != ISD::UNDEF))
6626 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
6629 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
6632 /// \brief Try to fold a build_vector that performs an 'addsub' into the
6633 /// sequence of 'vadd + vsub + blendi'.
6634 static SDValue matchAddSub(const BuildVectorSDNode *BV, SelectionDAG &DAG,
6635 const X86Subtarget *Subtarget) {
6637 EVT VT = BV->getValueType(0);
6638 unsigned NumElts = VT.getVectorNumElements();
6639 SDValue InVec0 = DAG.getUNDEF(VT);
6640 SDValue InVec1 = DAG.getUNDEF(VT);
6642 assert((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
6643 VT == MVT::v2f64) && "build_vector with an invalid type found!");
6645 // Odd-numbered elements in the input build vector are obtained from
6646 // adding two integer/float elements.
6647 // Even-numbered elements in the input build vector are obtained from
6648 // subtracting two integer/float elements.
6649 unsigned ExpectedOpcode = ISD::FSUB;
6650 unsigned NextExpectedOpcode = ISD::FADD;
6651 bool AddFound = false;
6652 bool SubFound = false;
6654 for (unsigned i = 0, e = NumElts; i != e; i++) {
6655 SDValue Op = BV->getOperand(i);
6657 // Skip 'undef' values.
6658 unsigned Opcode = Op.getOpcode();
6659 if (Opcode == ISD::UNDEF) {
6660 std::swap(ExpectedOpcode, NextExpectedOpcode);
6664 // Early exit if we found an unexpected opcode.
6665 if (Opcode != ExpectedOpcode)
6668 SDValue Op0 = Op.getOperand(0);
6669 SDValue Op1 = Op.getOperand(1);
6671 // Try to match the following pattern:
6672 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
6673 // Early exit if we cannot match that sequence.
6674 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6675 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6676 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
6677 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
6678 Op0.getOperand(1) != Op1.getOperand(1))
6681 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6685 // We found a valid add/sub node. Update the information accordingly.
6691 // Update InVec0 and InVec1.
6692 if (InVec0.getOpcode() == ISD::UNDEF)
6693 InVec0 = Op0.getOperand(0);
6694 if (InVec1.getOpcode() == ISD::UNDEF)
6695 InVec1 = Op1.getOperand(0);
6697 // Make sure that operands in input to each add/sub node always
6698 // come from a same pair of vectors.
6699 if (InVec0 != Op0.getOperand(0)) {
6700 if (ExpectedOpcode == ISD::FSUB)
6703 // FADD is commutable. Try to commute the operands
6704 // and then test again.
6705 std::swap(Op0, Op1);
6706 if (InVec0 != Op0.getOperand(0))
6710 if (InVec1 != Op1.getOperand(0))
6713 // Update the pair of expected opcodes.
6714 std::swap(ExpectedOpcode, NextExpectedOpcode);
6717 // Don't try to fold this build_vector into an ADDSUB if the inputs are undef.
6718 if (AddFound && SubFound && InVec0.getOpcode() != ISD::UNDEF &&
6719 InVec1.getOpcode() != ISD::UNDEF)
6720 return DAG.getNode(X86ISD::ADDSUB, DL, VT, InVec0, InVec1);
6725 static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
6726 const X86Subtarget *Subtarget) {
6728 EVT VT = N->getValueType(0);
6729 unsigned NumElts = VT.getVectorNumElements();
6730 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
6731 SDValue InVec0, InVec1;
6733 // Try to match an ADDSUB.
6734 if ((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
6735 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
6736 SDValue Value = matchAddSub(BV, DAG, Subtarget);
6737 if (Value.getNode())
6741 // Try to match horizontal ADD/SUB.
6742 unsigned NumUndefsLO = 0;
6743 unsigned NumUndefsHI = 0;
6744 unsigned Half = NumElts/2;
6746 // Count the number of UNDEF operands in the build_vector in input.
6747 for (unsigned i = 0, e = Half; i != e; ++i)
6748 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6751 for (unsigned i = Half, e = NumElts; i != e; ++i)
6752 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6755 // Early exit if this is either a build_vector of all UNDEFs or all the
6756 // operands but one are UNDEF.
6757 if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
6760 if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget->hasSSE3()) {
6761 // Try to match an SSE3 float HADD/HSUB.
6762 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6763 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6765 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6766 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6767 } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) {
6768 // Try to match an SSSE3 integer HADD/HSUB.
6769 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6770 return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
6772 if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6773 return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
6776 if (!Subtarget->hasAVX())
6779 if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
6780 // Try to match an AVX horizontal add/sub of packed single/double
6781 // precision floating point values from 256-bit vectors.
6782 SDValue InVec2, InVec3;
6783 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
6784 isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
6785 ((InVec0.getOpcode() == ISD::UNDEF ||
6786 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6787 ((InVec1.getOpcode() == ISD::UNDEF ||
6788 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6789 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6791 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
6792 isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
6793 ((InVec0.getOpcode() == ISD::UNDEF ||
6794 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6795 ((InVec1.getOpcode() == ISD::UNDEF ||
6796 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6797 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6798 } else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
6799 // Try to match an AVX2 horizontal add/sub of signed integers.
6800 SDValue InVec2, InVec3;
6802 bool CanFold = true;
6804 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
6805 isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
6806 ((InVec0.getOpcode() == ISD::UNDEF ||
6807 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6808 ((InVec1.getOpcode() == ISD::UNDEF ||
6809 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6810 X86Opcode = X86ISD::HADD;
6811 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
6812 isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
6813 ((InVec0.getOpcode() == ISD::UNDEF ||
6814 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6815 ((InVec1.getOpcode() == ISD::UNDEF ||
6816 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6817 X86Opcode = X86ISD::HSUB;
6822 // Fold this build_vector into a single horizontal add/sub.
6823 // Do this only if the target has AVX2.
6824 if (Subtarget->hasAVX2())
6825 return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
6827 // Do not try to expand this build_vector into a pair of horizontal
6828 // add/sub if we can emit a pair of scalar add/sub.
6829 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6832 // Convert this build_vector into a pair of horizontal binop followed by
6834 bool isUndefLO = NumUndefsLO == Half;
6835 bool isUndefHI = NumUndefsHI == Half;
6836 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
6837 isUndefLO, isUndefHI);
6841 if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
6842 VT == MVT::v16i16) && Subtarget->hasAVX()) {
6844 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6845 X86Opcode = X86ISD::HADD;
6846 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6847 X86Opcode = X86ISD::HSUB;
6848 else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6849 X86Opcode = X86ISD::FHADD;
6850 else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6851 X86Opcode = X86ISD::FHSUB;
6855 // Don't try to expand this build_vector into a pair of horizontal add/sub
6856 // if we can simply emit a pair of scalar add/sub.
6857 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6860 // Convert this build_vector into two horizontal add/sub followed by
6862 bool isUndefLO = NumUndefsLO == Half;
6863 bool isUndefHI = NumUndefsHI == Half;
6864 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
6865 isUndefLO, isUndefHI);
6872 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
6875 MVT VT = Op.getSimpleValueType();
6876 MVT ExtVT = VT.getVectorElementType();
6877 unsigned NumElems = Op.getNumOperands();
6879 // Generate vectors for predicate vectors.
6880 if (VT.getScalarType() == MVT::i1 && Subtarget->hasAVX512())
6881 return LowerBUILD_VECTORvXi1(Op, DAG);
6883 // Vectors containing all zeros can be matched by pxor and xorps later
6884 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6885 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
6886 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
6887 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
6890 return getZeroVector(VT, Subtarget, DAG, dl);
6893 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
6894 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
6895 // vpcmpeqd on 256-bit vectors.
6896 if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
6897 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256()))
6900 if (!VT.is512BitVector())
6901 return getOnesVector(VT, Subtarget->hasInt256(), DAG, dl);
6904 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
6905 if (Broadcast.getNode())
6908 unsigned EVTBits = ExtVT.getSizeInBits();
6910 unsigned NumZero = 0;
6911 unsigned NumNonZero = 0;
6912 unsigned NonZeros = 0;
6913 bool IsAllConstants = true;
6914 SmallSet<SDValue, 8> Values;
6915 for (unsigned i = 0; i < NumElems; ++i) {
6916 SDValue Elt = Op.getOperand(i);
6917 if (Elt.getOpcode() == ISD::UNDEF)
6920 if (Elt.getOpcode() != ISD::Constant &&
6921 Elt.getOpcode() != ISD::ConstantFP)
6922 IsAllConstants = false;
6923 if (X86::isZeroNode(Elt))
6926 NonZeros |= (1 << i);
6931 // All undef vector. Return an UNDEF. All zero vectors were handled above.
6932 if (NumNonZero == 0)
6933 return DAG.getUNDEF(VT);
6935 // Special case for single non-zero, non-undef, element.
6936 if (NumNonZero == 1) {
6937 unsigned Idx = countTrailingZeros(NonZeros);
6938 SDValue Item = Op.getOperand(Idx);
6940 // If this is an insertion of an i64 value on x86-32, and if the top bits of
6941 // the value are obviously zero, truncate the value to i32 and do the
6942 // insertion that way. Only do this if the value is non-constant or if the
6943 // value is a constant being inserted into element 0. It is cheaper to do
6944 // a constant pool load than it is to do a movd + shuffle.
6945 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
6946 (!IsAllConstants || Idx == 0)) {
6947 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
6949 assert(VT == MVT::v2i64 && "Expected an SSE value type!");
6950 EVT VecVT = MVT::v4i32;
6951 unsigned VecElts = 4;
6953 // Truncate the value (which may itself be a constant) to i32, and
6954 // convert it to a vector with movd (S2V+shuffle to zero extend).
6955 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
6956 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
6958 // If using the new shuffle lowering, just directly insert this.
6959 if (ExperimentalVectorShuffleLowering)
6961 ISD::BITCAST, dl, VT,
6962 getShuffleVectorZeroOrUndef(Item, Idx * 2, true, Subtarget, DAG));
6964 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6966 // Now we have our 32-bit value zero extended in the low element of
6967 // a vector. If Idx != 0, swizzle it into place.
6969 SmallVector<int, 4> Mask;
6970 Mask.push_back(Idx);
6971 for (unsigned i = 1; i != VecElts; ++i)
6973 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT),
6976 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
6980 // If we have a constant or non-constant insertion into the low element of
6981 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
6982 // the rest of the elements. This will be matched as movd/movq/movss/movsd
6983 // depending on what the source datatype is.
6986 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6988 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
6989 (ExtVT == MVT::i64 && Subtarget->is64Bit())) {
6990 if (VT.is256BitVector() || VT.is512BitVector()) {
6991 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
6992 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
6993 Item, DAG.getIntPtrConstant(0));
6995 assert(VT.is128BitVector() && "Expected an SSE value type!");
6996 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6997 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
6998 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7001 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
7002 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
7003 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
7004 if (VT.is256BitVector()) {
7005 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
7006 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
7008 assert(VT.is128BitVector() && "Expected an SSE value type!");
7009 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7011 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
7015 // Is it a vector logical left shift?
7016 if (NumElems == 2 && Idx == 1 &&
7017 X86::isZeroNode(Op.getOperand(0)) &&
7018 !X86::isZeroNode(Op.getOperand(1))) {
7019 unsigned NumBits = VT.getSizeInBits();
7020 return getVShift(true, VT,
7021 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
7022 VT, Op.getOperand(1)),
7023 NumBits/2, DAG, *this, dl);
7026 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
7029 // Otherwise, if this is a vector with i32 or f32 elements, and the element
7030 // is a non-constant being inserted into an element other than the low one,
7031 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
7032 // movd/movss) to move this into the low element, then shuffle it into
7034 if (EVTBits == 32) {
7035 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7037 // If using the new shuffle lowering, just directly insert this.
7038 if (ExperimentalVectorShuffleLowering)
7039 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
7041 // Turn it into a shuffle of zero and zero-extended scalar to vector.
7042 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG);
7043 SmallVector<int, 8> MaskVec;
7044 for (unsigned i = 0; i != NumElems; ++i)
7045 MaskVec.push_back(i == Idx ? 0 : 1);
7046 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]);
7050 // Splat is obviously ok. Let legalizer expand it to a shuffle.
7051 if (Values.size() == 1) {
7052 if (EVTBits == 32) {
7053 // Instead of a shuffle like this:
7054 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
7055 // Check if it's possible to issue this instead.
7056 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
7057 unsigned Idx = countTrailingZeros(NonZeros);
7058 SDValue Item = Op.getOperand(Idx);
7059 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
7060 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
7065 // A vector full of immediates; various special cases are already
7066 // handled, so this is best done with a single constant-pool load.
7070 // For AVX-length vectors, see if we can use a vector load to get all of the
7071 // elements, otherwise build the individual 128-bit pieces and use
7072 // shuffles to put them in place.
7073 if (VT.is256BitVector() || VT.is512BitVector()) {
7074 SmallVector<SDValue, 64> V;
7075 for (unsigned i = 0; i != NumElems; ++i)
7076 V.push_back(Op.getOperand(i));
7078 // Check for a build vector of consecutive loads.
7079 if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
7082 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
7084 // Build both the lower and upper subvector.
7085 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7086 makeArrayRef(&V[0], NumElems/2));
7087 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7088 makeArrayRef(&V[NumElems / 2], NumElems/2));
7090 // Recreate the wider vector with the lower and upper part.
7091 if (VT.is256BitVector())
7092 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7093 return Concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7096 // Let legalizer expand 2-wide build_vectors.
7097 if (EVTBits == 64) {
7098 if (NumNonZero == 1) {
7099 // One half is zero or undef.
7100 unsigned Idx = countTrailingZeros(NonZeros);
7101 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
7102 Op.getOperand(Idx));
7103 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
7108 // If element VT is < 32 bits, convert it to inserts into a zero vector.
7109 if (EVTBits == 8 && NumElems == 16) {
7110 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
7112 if (V.getNode()) return V;
7115 if (EVTBits == 16 && NumElems == 8) {
7116 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
7118 if (V.getNode()) return V;
7121 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
7122 if (EVTBits == 32 && NumElems == 4) {
7123 SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget, *this);
7128 // If element VT is == 32 bits, turn it into a number of shuffles.
7129 SmallVector<SDValue, 8> V(NumElems);
7130 if (NumElems == 4 && NumZero > 0) {
7131 for (unsigned i = 0; i < 4; ++i) {
7132 bool isZero = !(NonZeros & (1 << i));
7134 V[i] = getZeroVector(VT, Subtarget, DAG, dl);
7136 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7139 for (unsigned i = 0; i < 2; ++i) {
7140 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
7143 V[i] = V[i*2]; // Must be a zero vector.
7146 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]);
7149 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]);
7152 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]);
7157 bool Reverse1 = (NonZeros & 0x3) == 2;
7158 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
7162 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
7163 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
7165 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
7168 if (Values.size() > 1 && VT.is128BitVector()) {
7169 // Check for a build vector of consecutive loads.
7170 for (unsigned i = 0; i < NumElems; ++i)
7171 V[i] = Op.getOperand(i);
7173 // Check for elements which are consecutive loads.
7174 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false);
7178 // Check for a build vector from mostly shuffle plus few inserting.
7179 SDValue Sh = buildFromShuffleMostly(Op, DAG);
7183 // For SSE 4.1, use insertps to put the high elements into the low element.
7184 if (Subtarget->hasSSE41()) {
7186 if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
7187 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
7189 Result = DAG.getUNDEF(VT);
7191 for (unsigned i = 1; i < NumElems; ++i) {
7192 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
7193 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
7194 Op.getOperand(i), DAG.getIntPtrConstant(i));
7199 // Otherwise, expand into a number of unpckl*, start by extending each of
7200 // our (non-undef) elements to the full vector width with the element in the
7201 // bottom slot of the vector (which generates no code for SSE).
7202 for (unsigned i = 0; i < NumElems; ++i) {
7203 if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
7204 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7206 V[i] = DAG.getUNDEF(VT);
7209 // Next, we iteratively mix elements, e.g. for v4f32:
7210 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
7211 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
7212 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
7213 unsigned EltStride = NumElems >> 1;
7214 while (EltStride != 0) {
7215 for (unsigned i = 0; i < EltStride; ++i) {
7216 // If V[i+EltStride] is undef and this is the first round of mixing,
7217 // then it is safe to just drop this shuffle: V[i] is already in the
7218 // right place, the one element (since it's the first round) being
7219 // inserted as undef can be dropped. This isn't safe for successive
7220 // rounds because they will permute elements within both vectors.
7221 if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
7222 EltStride == NumElems/2)
7225 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
7234 // LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction
7235 // to create 256-bit vectors from two other 128-bit ones.
7236 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7238 MVT ResVT = Op.getSimpleValueType();
7240 assert((ResVT.is256BitVector() ||
7241 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
7243 SDValue V1 = Op.getOperand(0);
7244 SDValue V2 = Op.getOperand(1);
7245 unsigned NumElems = ResVT.getVectorNumElements();
7246 if(ResVT.is256BitVector())
7247 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7249 if (Op.getNumOperands() == 4) {
7250 MVT HalfVT = MVT::getVectorVT(ResVT.getScalarType(),
7251 ResVT.getVectorNumElements()/2);
7252 SDValue V3 = Op.getOperand(2);
7253 SDValue V4 = Op.getOperand(3);
7254 return Concat256BitVectors(Concat128BitVectors(V1, V2, HalfVT, NumElems/2, DAG, dl),
7255 Concat128BitVectors(V3, V4, HalfVT, NumElems/2, DAG, dl), ResVT, NumElems, DAG, dl);
7257 return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7260 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7261 MVT LLVM_ATTRIBUTE_UNUSED VT = Op.getSimpleValueType();
7262 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
7263 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
7264 Op.getNumOperands() == 4)));
7266 // AVX can use the vinsertf128 instruction to create 256-bit vectors
7267 // from two other 128-bit ones.
7269 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
7270 return LowerAVXCONCAT_VECTORS(Op, DAG);
7274 //===----------------------------------------------------------------------===//
7275 // Vector shuffle lowering
7277 // This is an experimental code path for lowering vector shuffles on x86. It is
7278 // designed to handle arbitrary vector shuffles and blends, gracefully
7279 // degrading performance as necessary. It works hard to recognize idiomatic
7280 // shuffles and lower them to optimal instruction patterns without leaving
7281 // a framework that allows reasonably efficient handling of all vector shuffle
7283 //===----------------------------------------------------------------------===//
7285 /// \brief Tiny helper function to identify a no-op mask.
7287 /// This is a somewhat boring predicate function. It checks whether the mask
7288 /// array input, which is assumed to be a single-input shuffle mask of the kind
7289 /// used by the X86 shuffle instructions (not a fully general
7290 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
7291 /// in-place shuffle are 'no-op's.
7292 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
7293 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7294 if (Mask[i] != -1 && Mask[i] != i)
7299 /// \brief Helper function to classify a mask as a single-input mask.
7301 /// This isn't a generic single-input test because in the vector shuffle
7302 /// lowering we canonicalize single inputs to be the first input operand. This
7303 /// means we can more quickly test for a single input by only checking whether
7304 /// an input from the second operand exists. We also assume that the size of
7305 /// mask corresponds to the size of the input vectors which isn't true in the
7306 /// fully general case.
7307 static bool isSingleInputShuffleMask(ArrayRef<int> Mask) {
7309 if (M >= (int)Mask.size())
7314 /// \brief Test whether there are elements crossing 128-bit lanes in this
7317 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
7318 /// and we routinely test for these.
7319 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
7320 int LaneSize = 128 / VT.getScalarSizeInBits();
7321 int Size = Mask.size();
7322 for (int i = 0; i < Size; ++i)
7323 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
7328 /// \brief Test whether a shuffle mask is equivalent within each 128-bit lane.
7330 /// This checks a shuffle mask to see if it is performing the same
7331 /// 128-bit lane-relative shuffle in each 128-bit lane. This trivially implies
7332 /// that it is also not lane-crossing. It may however involve a blend from the
7333 /// same lane of a second vector.
7335 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
7336 /// non-trivial to compute in the face of undef lanes. The representation is
7337 /// *not* suitable for use with existing 128-bit shuffles as it will contain
7338 /// entries from both V1 and V2 inputs to the wider mask.
7340 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
7341 SmallVectorImpl<int> &RepeatedMask) {
7342 int LaneSize = 128 / VT.getScalarSizeInBits();
7343 RepeatedMask.resize(LaneSize, -1);
7344 int Size = Mask.size();
7345 for (int i = 0; i < Size; ++i) {
7348 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
7349 // This entry crosses lanes, so there is no way to model this shuffle.
7352 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
7353 if (RepeatedMask[i % LaneSize] == -1)
7354 // This is the first non-undef entry in this slot of a 128-bit lane.
7355 RepeatedMask[i % LaneSize] =
7356 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + Size;
7357 else if (RepeatedMask[i % LaneSize] + (i / LaneSize) * LaneSize != Mask[i])
7358 // Found a mismatch with the repeated mask.
7364 // Hide this symbol with an anonymous namespace instead of 'static' so that MSVC
7365 // 2013 will allow us to use it as a non-type template parameter.
7368 /// \brief Implementation of the \c isShuffleEquivalent variadic functor.
7370 /// See its documentation for details.
7371 bool isShuffleEquivalentImpl(ArrayRef<int> Mask, ArrayRef<const int *> Args) {
7372 if (Mask.size() != Args.size())
7374 for (int i = 0, e = Mask.size(); i < e; ++i) {
7375 assert(*Args[i] >= 0 && "Arguments must be positive integers!");
7376 if (Mask[i] != -1 && Mask[i] != *Args[i])
7384 /// \brief Checks whether a shuffle mask is equivalent to an explicit list of
7387 /// This is a fast way to test a shuffle mask against a fixed pattern:
7389 /// if (isShuffleEquivalent(Mask, 3, 2, 1, 0)) { ... }
7391 /// It returns true if the mask is exactly as wide as the argument list, and
7392 /// each element of the mask is either -1 (signifying undef) or the value given
7393 /// in the argument.
7394 static const VariadicFunction1<
7395 bool, ArrayRef<int>, int, isShuffleEquivalentImpl> isShuffleEquivalent = {};
7397 /// \brief Get a 4-lane 8-bit shuffle immediate for a mask.
7399 /// This helper function produces an 8-bit shuffle immediate corresponding to
7400 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
7401 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
7404 /// NB: We rely heavily on "undef" masks preserving the input lane.
7405 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,
7406 SelectionDAG &DAG) {
7407 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
7408 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
7409 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
7410 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
7411 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
7414 Imm |= (Mask[0] == -1 ? 0 : Mask[0]) << 0;
7415 Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2;
7416 Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4;
7417 Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6;
7418 return DAG.getConstant(Imm, MVT::i8);
7421 /// \brief Try to emit a blend instruction for a shuffle.
7423 /// This doesn't do any checks for the availability of instructions for blending
7424 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
7425 /// be matched in the backend with the type given. What it does check for is
7426 /// that the shuffle mask is in fact a blend.
7427 static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
7428 SDValue V2, ArrayRef<int> Mask,
7429 const X86Subtarget *Subtarget,
7430 SelectionDAG &DAG) {
7432 unsigned BlendMask = 0;
7433 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7434 if (Mask[i] >= Size) {
7435 if (Mask[i] != i + Size)
7436 return SDValue(); // Shuffled V2 input!
7437 BlendMask |= 1u << i;
7440 if (Mask[i] >= 0 && Mask[i] != i)
7441 return SDValue(); // Shuffled V1 input!
7443 switch (VT.SimpleTy) {
7448 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
7449 DAG.getConstant(BlendMask, MVT::i8));
7453 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7457 // If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
7458 // that instruction.
7459 if (Subtarget->hasAVX2()) {
7460 // Scale the blend by the number of 32-bit dwords per element.
7461 int Scale = VT.getScalarSizeInBits() / 32;
7463 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7464 if (Mask[i] >= Size)
7465 for (int j = 0; j < Scale; ++j)
7466 BlendMask |= 1u << (i * Scale + j);
7468 MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32;
7469 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7470 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7471 return DAG.getNode(ISD::BITCAST, DL, VT,
7472 DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
7473 DAG.getConstant(BlendMask, MVT::i8)));
7477 // For integer shuffles we need to expand the mask and cast the inputs to
7478 // v8i16s prior to blending.
7479 int Scale = 8 / VT.getVectorNumElements();
7481 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7482 if (Mask[i] >= Size)
7483 for (int j = 0; j < Scale; ++j)
7484 BlendMask |= 1u << (i * Scale + j);
7486 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
7487 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
7488 return DAG.getNode(ISD::BITCAST, DL, VT,
7489 DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
7490 DAG.getConstant(BlendMask, MVT::i8)));
7494 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7495 SmallVector<int, 8> RepeatedMask;
7496 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
7497 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
7498 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
7500 for (int i = 0; i < 8; ++i)
7501 if (RepeatedMask[i] >= 16)
7502 BlendMask |= 1u << i;
7503 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
7504 DAG.getConstant(BlendMask, MVT::i8));
7509 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7510 // Scale the blend by the number of bytes per element.
7511 int Scale = VT.getScalarSizeInBits() / 8;
7512 assert(Mask.size() * Scale == 32 && "Not a 256-bit vector!");
7514 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
7515 // mix of LLVM's code generator and the x86 backend. We tell the code
7516 // generator that boolean values in the elements of an x86 vector register
7517 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
7518 // mapping a select to operand #1, and 'false' mapping to operand #2. The
7519 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
7520 // of the element (the remaining are ignored) and 0 in that high bit would
7521 // mean operand #1 while 1 in the high bit would mean operand #2. So while
7522 // the LLVM model for boolean values in vector elements gets the relevant
7523 // bit set, it is set backwards and over constrained relative to x86's
7525 SDValue VSELECTMask[32];
7526 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7527 for (int j = 0; j < Scale; ++j)
7528 VSELECTMask[Scale * i + j] =
7529 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
7530 : DAG.getConstant(Mask[i] < Size ? -1 : 0, MVT::i8);
7532 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1);
7533 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V2);
7535 ISD::BITCAST, DL, VT,
7536 DAG.getNode(ISD::VSELECT, DL, MVT::v32i8,
7537 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, VSELECTMask),
7542 llvm_unreachable("Not a supported integer vector type!");
7546 /// \brief Try to lower as a blend of elements from two inputs followed by
7547 /// a single-input permutation.
7549 /// This matches the pattern where we can blend elements from two inputs and
7550 /// then reduce the shuffle to a single-input permutation.
7551 static SDValue lowerVectorShuffleAsBlendAndPermute(SDLoc DL, MVT VT, SDValue V1,
7554 SelectionDAG &DAG) {
7555 // We build up the blend mask while checking whether a blend is a viable way
7556 // to reduce the shuffle.
7557 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7558 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
7560 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7564 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
7566 if (BlendMask[Mask[i] % Size] == -1)
7567 BlendMask[Mask[i] % Size] = Mask[i];
7568 else if (BlendMask[Mask[i] % Size] != Mask[i])
7569 return SDValue(); // Can't blend in the needed input!
7571 PermuteMask[i] = Mask[i] % Size;
7574 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7575 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
7578 /// \brief Generic routine to lower a shuffle and blend as a decomposed set of
7579 /// unblended shuffles followed by an unshuffled blend.
7581 /// This matches the extremely common pattern for handling combined
7582 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
7584 static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
7588 SelectionDAG &DAG) {
7589 // Shuffle the input elements into the desired positions in V1 and V2 and
7590 // blend them together.
7591 SmallVector<int, 32> V1Mask(Mask.size(), -1);
7592 SmallVector<int, 32> V2Mask(Mask.size(), -1);
7593 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7594 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7595 if (Mask[i] >= 0 && Mask[i] < Size) {
7596 V1Mask[i] = Mask[i];
7598 } else if (Mask[i] >= Size) {
7599 V2Mask[i] = Mask[i] - Size;
7600 BlendMask[i] = i + Size;
7603 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
7604 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
7605 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7608 /// \brief Try to lower a vector shuffle as a byte rotation.
7610 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
7611 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
7612 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
7613 /// try to generically lower a vector shuffle through such an pattern. It
7614 /// does not check for the profitability of lowering either as PALIGNR or
7615 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
7616 /// This matches shuffle vectors that look like:
7618 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
7620 /// Essentially it concatenates V1 and V2, shifts right by some number of
7621 /// elements, and takes the low elements as the result. Note that while this is
7622 /// specified as a *right shift* because x86 is little-endian, it is a *left
7623 /// rotate* of the vector lanes.
7625 /// Note that this only handles 128-bit vector widths currently.
7626 static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
7629 const X86Subtarget *Subtarget,
7630 SelectionDAG &DAG) {
7631 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7633 // We need to detect various ways of spelling a rotation:
7634 // [11, 12, 13, 14, 15, 0, 1, 2]
7635 // [-1, 12, 13, 14, -1, -1, 1, -1]
7636 // [-1, -1, -1, -1, -1, -1, 1, 2]
7637 // [ 3, 4, 5, 6, 7, 8, 9, 10]
7638 // [-1, 4, 5, 6, -1, -1, 9, -1]
7639 // [-1, 4, 5, 6, -1, -1, -1, -1]
7642 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7645 assert(Mask[i] >= 0 && "Only -1 is a valid negative mask element!");
7647 // Based on the mod-Size value of this mask element determine where
7648 // a rotated vector would have started.
7649 int StartIdx = i - (Mask[i] % Size);
7651 // The identity rotation isn't interesting, stop.
7654 // If we found the tail of a vector the rotation must be the missing
7655 // front. If we found the head of a vector, it must be how much of the head.
7656 int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx;
7659 Rotation = CandidateRotation;
7660 else if (Rotation != CandidateRotation)
7661 // The rotations don't match, so we can't match this mask.
7664 // Compute which value this mask is pointing at.
7665 SDValue MaskV = Mask[i] < Size ? V1 : V2;
7667 // Compute which of the two target values this index should be assigned to.
7668 // This reflects whether the high elements are remaining or the low elements
7670 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
7672 // Either set up this value if we've not encountered it before, or check
7673 // that it remains consistent.
7676 else if (TargetV != MaskV)
7677 // This may be a rotation, but it pulls from the inputs in some
7678 // unsupported interleaving.
7682 // Check that we successfully analyzed the mask, and normalize the results.
7683 assert(Rotation != 0 && "Failed to locate a viable rotation!");
7684 assert((Lo || Hi) && "Failed to find a rotated input vector!");
7690 assert(VT.getSizeInBits() == 128 &&
7691 "Rotate-based lowering only supports 128-bit lowering!");
7692 assert(Mask.size() <= 16 &&
7693 "Can shuffle at most 16 bytes in a 128-bit vector!");
7695 // The actual rotate instruction rotates bytes, so we need to scale the
7696 // rotation based on how many bytes are in the vector.
7697 int Scale = 16 / Mask.size();
7699 // SSSE3 targets can use the palignr instruction
7700 if (Subtarget->hasSSSE3()) {
7701 // Cast the inputs to v16i8 to match PALIGNR.
7702 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Lo);
7703 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Hi);
7705 return DAG.getNode(ISD::BITCAST, DL, VT,
7706 DAG.getNode(X86ISD::PALIGNR, DL, MVT::v16i8, Hi, Lo,
7707 DAG.getConstant(Rotation * Scale, MVT::i8)));
7710 // Default SSE2 implementation
7711 int LoByteShift = 16 - Rotation * Scale;
7712 int HiByteShift = Rotation * Scale;
7714 // Cast the inputs to v2i64 to match PSLLDQ/PSRLDQ.
7715 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Lo);
7716 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Hi);
7718 SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo,
7719 DAG.getConstant(8 * LoByteShift, MVT::i8));
7720 SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi,
7721 DAG.getConstant(8 * HiByteShift, MVT::i8));
7722 return DAG.getNode(ISD::BITCAST, DL, VT,
7723 DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift));
7726 /// \brief Compute whether each element of a shuffle is zeroable.
7728 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
7729 /// Either it is an undef element in the shuffle mask, the element of the input
7730 /// referenced is undef, or the element of the input referenced is known to be
7731 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
7732 /// as many lanes with this technique as possible to simplify the remaining
7734 static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
7735 SDValue V1, SDValue V2) {
7736 SmallBitVector Zeroable(Mask.size(), false);
7738 while (V1.getOpcode() == ISD::BITCAST)
7739 V1 = V1->getOperand(0);
7740 while (V2.getOpcode() == ISD::BITCAST)
7741 V2 = V2->getOperand(0);
7743 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
7744 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
7746 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7748 // Handle the easy cases.
7749 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
7754 // If this is an index into a build_vector node (which has the same number
7755 // of elements), dig out the input value and use it.
7756 SDValue V = M < Size ? V1 : V2;
7757 if (V.getOpcode() != ISD::BUILD_VECTOR || Size != (int)V.getNumOperands())
7760 SDValue Input = V.getOperand(M % Size);
7761 // The UNDEF opcode check really should be dead code here, but not quite
7762 // worth asserting on (it isn't invalid, just unexpected).
7763 if (Input.getOpcode() == ISD::UNDEF || X86::isZeroNode(Input))
7770 /// \brief Try to emit a bitmask instruction for a shuffle.
7772 /// This handles cases where we can model a blend exactly as a bitmask due to
7773 /// one of the inputs being zeroable.
7774 static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1,
7775 SDValue V2, ArrayRef<int> Mask,
7776 SelectionDAG &DAG) {
7777 MVT EltVT = VT.getScalarType();
7778 int NumEltBits = EltVT.getSizeInBits();
7779 MVT IntEltVT = MVT::getIntegerVT(NumEltBits);
7780 SDValue Zero = DAG.getConstant(0, IntEltVT);
7781 SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), IntEltVT);
7782 if (EltVT.isFloatingPoint()) {
7783 Zero = DAG.getNode(ISD::BITCAST, DL, EltVT, Zero);
7784 AllOnes = DAG.getNode(ISD::BITCAST, DL, EltVT, AllOnes);
7786 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
7787 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7789 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7792 if (Mask[i] % Size != i)
7793 return SDValue(); // Not a blend.
7795 V = Mask[i] < Size ? V1 : V2;
7796 else if (V != (Mask[i] < Size ? V1 : V2))
7797 return SDValue(); // Can only let one input through the mask.
7799 VMaskOps[i] = AllOnes;
7802 return SDValue(); // No non-zeroable elements!
7804 SDValue VMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, VMaskOps);
7805 V = DAG.getNode(VT.isFloatingPoint()
7806 ? (unsigned) X86ISD::FAND : (unsigned) ISD::AND,
7811 /// \brief Try to lower a vector shuffle as a byte shift (shifts in zeros).
7813 /// Attempts to match a shuffle mask against the PSRLDQ and PSLLDQ SSE2
7814 /// byte-shift instructions. The mask must consist of a shifted sequential
7815 /// shuffle from one of the input vectors and zeroable elements for the
7816 /// remaining 'shifted in' elements.
7818 /// Note that this only handles 128-bit vector widths currently.
7819 static SDValue lowerVectorShuffleAsByteShift(SDLoc DL, MVT VT, SDValue V1,
7820 SDValue V2, ArrayRef<int> Mask,
7821 SelectionDAG &DAG) {
7822 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7824 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7826 int Size = Mask.size();
7827 int Scale = 16 / Size;
7829 for (int Shift = 1; Shift < Size; Shift++) {
7830 int ByteShift = Shift * Scale;
7832 // PSRLDQ : (little-endian) right byte shift
7833 // [ 5, 6, 7, zz, zz, zz, zz, zz]
7834 // [ -1, 5, 6, 7, zz, zz, zz, zz]
7835 // [ 1, 2, -1, -1, -1, -1, zz, zz]
7836 bool ZeroableRight = true;
7837 for (int i = Size - Shift; i < Size; i++) {
7838 ZeroableRight &= Zeroable[i];
7841 if (ZeroableRight) {
7842 bool ValidShiftRight1 =
7843 isSequentialOrUndefInRange(Mask, 0, Size - Shift, Shift);
7844 bool ValidShiftRight2 =
7845 isSequentialOrUndefInRange(Mask, 0, Size - Shift, Size + Shift);
7847 if (ValidShiftRight1 || ValidShiftRight2) {
7848 // Cast the inputs to v2i64 to match PSRLDQ.
7849 SDValue &TargetV = ValidShiftRight1 ? V1 : V2;
7850 SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TargetV);
7851 SDValue Shifted = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, V,
7852 DAG.getConstant(ByteShift * 8, MVT::i8));
7853 return DAG.getNode(ISD::BITCAST, DL, VT, Shifted);
7857 // PSLLDQ : (little-endian) left byte shift
7858 // [ zz, 0, 1, 2, 3, 4, 5, 6]
7859 // [ zz, zz, -1, -1, 2, 3, 4, -1]
7860 // [ zz, zz, zz, zz, zz, zz, -1, 1]
7861 bool ZeroableLeft = true;
7862 for (int i = 0; i < Shift; i++) {
7863 ZeroableLeft &= Zeroable[i];
7867 bool ValidShiftLeft1 =
7868 isSequentialOrUndefInRange(Mask, Shift, Size - Shift, 0);
7869 bool ValidShiftLeft2 =
7870 isSequentialOrUndefInRange(Mask, Shift, Size - Shift, Size);
7872 if (ValidShiftLeft1 || ValidShiftLeft2) {
7873 // Cast the inputs to v2i64 to match PSLLDQ.
7874 SDValue &TargetV = ValidShiftLeft1 ? V1 : V2;
7875 SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TargetV);
7876 SDValue Shifted = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, V,
7877 DAG.getConstant(ByteShift * 8, MVT::i8));
7878 return DAG.getNode(ISD::BITCAST, DL, VT, Shifted);
7886 /// \brief Try to lower a vector shuffle as a bit shift (shifts in zeros).
7888 /// Attempts to match a shuffle mask against the PSRL(W/D/Q) and PSLL(W/D/Q)
7889 /// SSE2 and AVX2 logical bit-shift instructions. The function matches
7890 /// elements from one of the input vectors shuffled to the left or right
7891 /// with zeroable elements 'shifted in'.
7892 static SDValue lowerVectorShuffleAsBitShift(SDLoc DL, MVT VT, SDValue V1,
7893 SDValue V2, ArrayRef<int> Mask,
7894 SelectionDAG &DAG) {
7895 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7897 int Size = Mask.size();
7898 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
7900 // PSRL : (little-endian) right bit shift.
7903 // PSHL : (little-endian) left bit shift.
7905 // [ -1, 4, zz, -1 ]
7906 auto MatchBitShift = [&](int Shift, int Scale) -> SDValue {
7907 MVT ShiftSVT = MVT::getIntegerVT(VT.getScalarSizeInBits() * Scale);
7908 MVT ShiftVT = MVT::getVectorVT(ShiftSVT, Size / Scale);
7909 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
7910 "Illegal integer vector type");
7912 bool MatchLeft = true, MatchRight = true;
7913 for (int i = 0; i != Size; i += Scale) {
7914 for (int j = 0; j != Shift; j++) {
7915 MatchLeft &= Zeroable[i + j];
7917 for (int j = Scale - Shift; j != Scale; j++) {
7918 MatchRight &= Zeroable[i + j];
7921 if (!(MatchLeft || MatchRight))
7924 bool MatchV1 = true, MatchV2 = true;
7925 for (int i = 0; i != Size; i += Scale) {
7926 unsigned Pos = MatchLeft ? i + Shift : i;
7927 unsigned Low = MatchLeft ? i : i + Shift;
7928 unsigned Len = Scale - Shift;
7929 MatchV1 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low);
7930 MatchV2 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low + Size);
7932 if (!(MatchV1 || MatchV2))
7935 // Cast the inputs to ShiftVT to match VSRLI/VSHLI and back again.
7936 unsigned OpCode = MatchLeft ? X86ISD::VSHLI : X86ISD::VSRLI;
7937 int ShiftAmt = Shift * VT.getScalarSizeInBits();
7938 SDValue V = MatchV1 ? V1 : V2;
7939 V = DAG.getNode(ISD::BITCAST, DL, ShiftVT, V);
7940 V = DAG.getNode(OpCode, DL, ShiftVT, V, DAG.getConstant(ShiftAmt, MVT::i8));
7941 return DAG.getNode(ISD::BITCAST, DL, VT, V);
7944 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
7945 // keep doubling the size of the integer elements up to that. We can
7946 // then shift the elements of the integer vector by whole multiples of
7947 // their width within the elements of the larger integer vector. Test each
7948 // multiple to see if we can find a match with the moved element indices
7949 // and that the shifted in elements are all zeroable.
7950 for (int Scale = 2; Scale * VT.getScalarSizeInBits() <= 64; Scale *= 2)
7951 for (int Shift = 1; Shift != Scale; Shift++)
7952 if (SDValue BitShift = MatchBitShift(Shift, Scale))
7959 /// \brief Lower a vector shuffle as a zero or any extension.
7961 /// Given a specific number of elements, element bit width, and extension
7962 /// stride, produce either a zero or any extension based on the available
7963 /// features of the subtarget.
7964 static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
7965 SDLoc DL, MVT VT, int Scale, bool AnyExt, SDValue InputV,
7966 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7967 assert(Scale > 1 && "Need a scale to extend.");
7968 int NumElements = VT.getVectorNumElements();
7969 int EltBits = VT.getScalarSizeInBits();
7970 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
7971 "Only 8, 16, and 32 bit elements can be extended.");
7972 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
7974 // Found a valid zext mask! Try various lowering strategies based on the
7975 // input type and available ISA extensions.
7976 if (Subtarget->hasSSE41()) {
7977 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
7978 NumElements / Scale);
7979 return DAG.getNode(ISD::BITCAST, DL, VT,
7980 DAG.getNode(X86ISD::VZEXT, DL, ExtVT, InputV));
7983 // For any extends we can cheat for larger element sizes and use shuffle
7984 // instructions that can fold with a load and/or copy.
7985 if (AnyExt && EltBits == 32) {
7986 int PSHUFDMask[4] = {0, -1, 1, -1};
7988 ISD::BITCAST, DL, VT,
7989 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
7990 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
7991 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
7993 if (AnyExt && EltBits == 16 && Scale > 2) {
7994 int PSHUFDMask[4] = {0, -1, 0, -1};
7995 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
7996 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
7997 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG));
7998 int PSHUFHWMask[4] = {1, -1, -1, -1};
8000 ISD::BITCAST, DL, VT,
8001 DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16,
8002 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, InputV),
8003 getV4X86ShuffleImm8ForMask(PSHUFHWMask, DAG)));
8006 // If this would require more than 2 unpack instructions to expand, use
8007 // pshufb when available. We can only use more than 2 unpack instructions
8008 // when zero extending i8 elements which also makes it easier to use pshufb.
8009 if (Scale > 4 && EltBits == 8 && Subtarget->hasSSSE3()) {
8010 assert(NumElements == 16 && "Unexpected byte vector width!");
8011 SDValue PSHUFBMask[16];
8012 for (int i = 0; i < 16; ++i)
8014 DAG.getConstant((i % Scale == 0) ? i / Scale : 0x80, MVT::i8);
8015 InputV = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, InputV);
8016 return DAG.getNode(ISD::BITCAST, DL, VT,
8017 DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
8018 DAG.getNode(ISD::BUILD_VECTOR, DL,
8019 MVT::v16i8, PSHUFBMask)));
8022 // Otherwise emit a sequence of unpacks.
8024 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
8025 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
8026 : getZeroVector(InputVT, Subtarget, DAG, DL);
8027 InputV = DAG.getNode(ISD::BITCAST, DL, InputVT, InputV);
8028 InputV = DAG.getNode(X86ISD::UNPCKL, DL, InputVT, InputV, Ext);
8032 } while (Scale > 1);
8033 return DAG.getNode(ISD::BITCAST, DL, VT, InputV);
8036 /// \brief Try to lower a vector shuffle as a zero extension on any microarch.
8038 /// This routine will try to do everything in its power to cleverly lower
8039 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
8040 /// check for the profitability of this lowering, it tries to aggressively
8041 /// match this pattern. It will use all of the micro-architectural details it
8042 /// can to emit an efficient lowering. It handles both blends with all-zero
8043 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
8044 /// masking out later).
8046 /// The reason we have dedicated lowering for zext-style shuffles is that they
8047 /// are both incredibly common and often quite performance sensitive.
8048 static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
8049 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8050 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8051 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8053 int Bits = VT.getSizeInBits();
8054 int NumElements = VT.getVectorNumElements();
8055 assert(VT.getScalarSizeInBits() <= 32 &&
8056 "Exceeds 32-bit integer zero extension limit");
8057 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
8059 // Define a helper function to check a particular ext-scale and lower to it if
8061 auto Lower = [&](int Scale) -> SDValue {
8064 for (int i = 0; i < NumElements; ++i) {
8066 continue; // Valid anywhere but doesn't tell us anything.
8067 if (i % Scale != 0) {
8068 // Each of the extended elements need to be zeroable.
8072 // We no longer are in the anyext case.
8077 // Each of the base elements needs to be consecutive indices into the
8078 // same input vector.
8079 SDValue V = Mask[i] < NumElements ? V1 : V2;
8082 else if (InputV != V)
8083 return SDValue(); // Flip-flopping inputs.
8085 if (Mask[i] % NumElements != i / Scale)
8086 return SDValue(); // Non-consecutive strided elements.
8089 // If we fail to find an input, we have a zero-shuffle which should always
8090 // have already been handled.
8091 // FIXME: Maybe handle this here in case during blending we end up with one?
8095 return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
8096 DL, VT, Scale, AnyExt, InputV, Subtarget, DAG);
8099 // The widest scale possible for extending is to a 64-bit integer.
8100 assert(Bits % 64 == 0 &&
8101 "The number of bits in a vector must be divisible by 64 on x86!");
8102 int NumExtElements = Bits / 64;
8104 // Each iteration, try extending the elements half as much, but into twice as
8106 for (; NumExtElements < NumElements; NumExtElements *= 2) {
8107 assert(NumElements % NumExtElements == 0 &&
8108 "The input vector size must be divisible by the extended size.");
8109 if (SDValue V = Lower(NumElements / NumExtElements))
8113 // General extends failed, but 128-bit vectors may be able to use MOVQ.
8117 // Returns one of the source operands if the shuffle can be reduced to a
8118 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
8119 auto CanZExtLowHalf = [&]() {
8120 for (int i = NumElements / 2; i != NumElements; i++)
8123 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
8125 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
8130 if (SDValue V = CanZExtLowHalf()) {
8131 V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V);
8132 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
8133 return DAG.getNode(ISD::BITCAST, DL, VT, V);
8136 // No viable ext lowering found.
8140 /// \brief Try to get a scalar value for a specific element of a vector.
8142 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
8143 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
8144 SelectionDAG &DAG) {
8145 MVT VT = V.getSimpleValueType();
8146 MVT EltVT = VT.getVectorElementType();
8147 while (V.getOpcode() == ISD::BITCAST)
8148 V = V.getOperand(0);
8149 // If the bitcasts shift the element size, we can't extract an equivalent
8151 MVT NewVT = V.getSimpleValueType();
8152 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
8155 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8156 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR))
8157 return DAG.getNode(ISD::BITCAST, SDLoc(V), EltVT, V.getOperand(Idx));
8162 /// \brief Helper to test for a load that can be folded with x86 shuffles.
8164 /// This is particularly important because the set of instructions varies
8165 /// significantly based on whether the operand is a load or not.
8166 static bool isShuffleFoldableLoad(SDValue V) {
8167 while (V.getOpcode() == ISD::BITCAST)
8168 V = V.getOperand(0);
8170 return ISD::isNON_EXTLoad(V.getNode());
8173 /// \brief Try to lower insertion of a single element into a zero vector.
8175 /// This is a common pattern that we have especially efficient patterns to lower
8176 /// across all subtarget feature sets.
8177 static SDValue lowerVectorShuffleAsElementInsertion(
8178 MVT VT, SDLoc DL, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8179 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8180 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8182 MVT EltVT = VT.getVectorElementType();
8184 int V2Index = std::find_if(Mask.begin(), Mask.end(),
8185 [&Mask](int M) { return M >= (int)Mask.size(); }) -
8187 bool IsV1Zeroable = true;
8188 for (int i = 0, Size = Mask.size(); i < Size; ++i)
8189 if (i != V2Index && !Zeroable[i]) {
8190 IsV1Zeroable = false;
8194 // Check for a single input from a SCALAR_TO_VECTOR node.
8195 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
8196 // all the smarts here sunk into that routine. However, the current
8197 // lowering of BUILD_VECTOR makes that nearly impossible until the old
8198 // vector shuffle lowering is dead.
8199 if (SDValue V2S = getScalarValueForVectorElement(
8200 V2, Mask[V2Index] - Mask.size(), DAG)) {
8201 // We need to zext the scalar if it is smaller than an i32.
8202 V2S = DAG.getNode(ISD::BITCAST, DL, EltVT, V2S);
8203 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
8204 // Using zext to expand a narrow element won't work for non-zero
8209 // Zero-extend directly to i32.
8211 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
8213 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
8214 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
8215 EltVT == MVT::i16) {
8216 // Either not inserting from the low element of the input or the input
8217 // element size is too small to use VZEXT_MOVL to clear the high bits.
8221 if (!IsV1Zeroable) {
8222 // If V1 can't be treated as a zero vector we have fewer options to lower
8223 // this. We can't support integer vectors or non-zero targets cheaply, and
8224 // the V1 elements can't be permuted in any way.
8225 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
8226 if (!VT.isFloatingPoint() || V2Index != 0)
8228 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
8229 V1Mask[V2Index] = -1;
8230 if (!isNoopShuffleMask(V1Mask))
8232 // This is essentially a special case blend operation, but if we have
8233 // general purpose blend operations, they are always faster. Bail and let
8234 // the rest of the lowering handle these as blends.
8235 if (Subtarget->hasSSE41())
8238 // Otherwise, use MOVSD or MOVSS.
8239 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
8240 "Only two types of floating point element types to handle!");
8241 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
8245 // This lowering only works for the low element with floating point vectors.
8246 if (VT.isFloatingPoint() && V2Index != 0)
8249 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
8251 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8254 // If we have 4 or fewer lanes we can cheaply shuffle the element into
8255 // the desired position. Otherwise it is more efficient to do a vector
8256 // shift left. We know that we can do a vector shift left because all
8257 // the inputs are zero.
8258 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
8259 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
8260 V2Shuffle[V2Index] = 0;
8261 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
8263 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V2);
8265 X86ISD::VSHLDQ, DL, MVT::v2i64, V2,
8267 V2Index * EltVT.getSizeInBits(),
8268 DAG.getTargetLoweringInfo().getScalarShiftAmountTy(MVT::v2i64)));
8269 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8275 /// \brief Try to lower broadcast of a single element.
8277 /// For convenience, this code also bundles all of the subtarget feature set
8278 /// filtering. While a little annoying to re-dispatch on type here, there isn't
8279 /// a convenient way to factor it out.
8280 static SDValue lowerVectorShuffleAsBroadcast(MVT VT, SDLoc DL, SDValue V,
8282 const X86Subtarget *Subtarget,
8283 SelectionDAG &DAG) {
8284 if (!Subtarget->hasAVX())
8286 if (VT.isInteger() && !Subtarget->hasAVX2())
8289 // Check that the mask is a broadcast.
8290 int BroadcastIdx = -1;
8292 if (M >= 0 && BroadcastIdx == -1)
8294 else if (M >= 0 && M != BroadcastIdx)
8297 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
8298 "a sorted mask where the broadcast "
8301 // Go up the chain of (vector) values to try and find a scalar load that
8302 // we can combine with the broadcast.
8304 switch (V.getOpcode()) {
8305 case ISD::CONCAT_VECTORS: {
8306 int OperandSize = Mask.size() / V.getNumOperands();
8307 V = V.getOperand(BroadcastIdx / OperandSize);
8308 BroadcastIdx %= OperandSize;
8312 case ISD::INSERT_SUBVECTOR: {
8313 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
8314 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
8318 int BeginIdx = (int)ConstantIdx->getZExtValue();
8320 BeginIdx + (int)VInner.getValueType().getVectorNumElements();
8321 if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) {
8322 BroadcastIdx -= BeginIdx;
8333 // Check if this is a broadcast of a scalar. We special case lowering
8334 // for scalars so that we can more effectively fold with loads.
8335 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8336 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
8337 V = V.getOperand(BroadcastIdx);
8339 // If the scalar isn't a load we can't broadcast from it in AVX1, only with
8341 if (!Subtarget->hasAVX2() && !isShuffleFoldableLoad(V))
8343 } else if (BroadcastIdx != 0 || !Subtarget->hasAVX2()) {
8344 // We can't broadcast from a vector register w/o AVX2, and we can only
8345 // broadcast from the zero-element of a vector register.
8349 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, V);
8352 // Check for whether we can use INSERTPS to perform the shuffle. We only use
8353 // INSERTPS when the V1 elements are already in the correct locations
8354 // because otherwise we can just always use two SHUFPS instructions which
8355 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
8356 // perform INSERTPS if a single V1 element is out of place and all V2
8357 // elements are zeroable.
8358 static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
8360 SelectionDAG &DAG) {
8361 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8362 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8363 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8364 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8366 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8369 int V1DstIndex = -1;
8370 int V2DstIndex = -1;
8371 bool V1UsedInPlace = false;
8373 for (int i = 0; i < 4; i++) {
8374 // Synthesize a zero mask from the zeroable elements (includes undefs).
8380 // Flag if we use any V1 inputs in place.
8382 V1UsedInPlace = true;
8386 // We can only insert a single non-zeroable element.
8387 if (V1DstIndex != -1 || V2DstIndex != -1)
8391 // V1 input out of place for insertion.
8394 // V2 input for insertion.
8399 // Don't bother if we have no (non-zeroable) element for insertion.
8400 if (V1DstIndex == -1 && V2DstIndex == -1)
8403 // Determine element insertion src/dst indices. The src index is from the
8404 // start of the inserted vector, not the start of the concatenated vector.
8405 unsigned V2SrcIndex = 0;
8406 if (V1DstIndex != -1) {
8407 // If we have a V1 input out of place, we use V1 as the V2 element insertion
8408 // and don't use the original V2 at all.
8409 V2SrcIndex = Mask[V1DstIndex];
8410 V2DstIndex = V1DstIndex;
8413 V2SrcIndex = Mask[V2DstIndex] - 4;
8416 // If no V1 inputs are used in place, then the result is created only from
8417 // the zero mask and the V2 insertion - so remove V1 dependency.
8419 V1 = DAG.getUNDEF(MVT::v4f32);
8421 unsigned InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
8422 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
8424 // Insert the V2 element into the desired position.
8426 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
8427 DAG.getConstant(InsertPSMask, MVT::i8));
8430 /// \brief Handle lowering of 2-lane 64-bit floating point shuffles.
8432 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
8433 /// support for floating point shuffles but not integer shuffles. These
8434 /// instructions will incur a domain crossing penalty on some chips though so
8435 /// it is better to avoid lowering through this for integer vectors where
8437 static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8438 const X86Subtarget *Subtarget,
8439 SelectionDAG &DAG) {
8441 assert(Op.getSimpleValueType() == MVT::v2f64 && "Bad shuffle type!");
8442 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8443 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8444 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8445 ArrayRef<int> Mask = SVOp->getMask();
8446 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8448 if (isSingleInputShuffleMask(Mask)) {
8449 // Use low duplicate instructions for masks that match their pattern.
8450 if (Subtarget->hasSSE3())
8451 if (isShuffleEquivalent(Mask, 0, 0))
8452 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, V1);
8454 // Straight shuffle of a single input vector. Simulate this by using the
8455 // single input as both of the "inputs" to this instruction..
8456 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
8458 if (Subtarget->hasAVX()) {
8459 // If we have AVX, we can use VPERMILPS which will allow folding a load
8460 // into the shuffle.
8461 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
8462 DAG.getConstant(SHUFPDMask, MVT::i8));
8465 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1,
8466 DAG.getConstant(SHUFPDMask, MVT::i8));
8468 assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
8469 assert(Mask[1] >= 2 && "Non-canonicalized blend!");
8471 // If we have a single input, insert that into V1 if we can do so cheaply.
8472 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8473 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8474 MVT::v2f64, DL, V1, V2, Mask, Subtarget, DAG))
8476 // Try inverting the insertion since for v2 masks it is easy to do and we
8477 // can't reliably sort the mask one way or the other.
8478 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8479 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8480 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8481 MVT::v2f64, DL, V2, V1, InverseMask, Subtarget, DAG))
8485 // Try to use one of the special instruction patterns to handle two common
8486 // blend patterns if a zero-blend above didn't work.
8487 if (isShuffleEquivalent(Mask, 0, 3) || isShuffleEquivalent(Mask, 1, 3))
8488 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
8489 // We can either use a special instruction to load over the low double or
8490 // to move just the low double.
8492 isShuffleFoldableLoad(V1S) ? X86ISD::MOVLPD : X86ISD::MOVSD,
8494 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
8496 if (Subtarget->hasSSE41())
8497 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
8501 // Use dedicated unpack instructions for masks that match their pattern.
8502 if (isShuffleEquivalent(Mask, 0, 2))
8503 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2f64, V1, V2);
8504 if (isShuffleEquivalent(Mask, 1, 3))
8505 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2f64, V1, V2);
8507 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
8508 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V2,
8509 DAG.getConstant(SHUFPDMask, MVT::i8));
8512 /// \brief Handle lowering of 2-lane 64-bit integer shuffles.
8514 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
8515 /// the integer unit to minimize domain crossing penalties. However, for blends
8516 /// it falls back to the floating point shuffle operation with appropriate bit
8518 static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8519 const X86Subtarget *Subtarget,
8520 SelectionDAG &DAG) {
8522 assert(Op.getSimpleValueType() == MVT::v2i64 && "Bad shuffle type!");
8523 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8524 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8525 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8526 ArrayRef<int> Mask = SVOp->getMask();
8527 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8529 if (isSingleInputShuffleMask(Mask)) {
8530 // Check for being able to broadcast a single element.
8531 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v2i64, DL, V1,
8532 Mask, Subtarget, DAG))
8535 // Straight shuffle of a single input vector. For everything from SSE2
8536 // onward this has a single fast instruction with no scary immediates.
8537 // We have to map the mask as it is actually a v4i32 shuffle instruction.
8538 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V1);
8539 int WidenedMask[4] = {
8540 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
8541 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
8543 ISD::BITCAST, DL, MVT::v2i64,
8544 DAG.getNode(X86ISD::PSHUFD, SDLoc(Op), MVT::v4i32, V1,
8545 getV4X86ShuffleImm8ForMask(WidenedMask, DAG)));
8548 // Try to use byte shift instructions.
8549 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8550 DL, MVT::v2i64, V1, V2, Mask, DAG))
8553 // If we have a single input from V2 insert that into V1 if we can do so
8555 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8556 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8557 MVT::v2i64, DL, V1, V2, Mask, Subtarget, DAG))
8559 // Try inverting the insertion since for v2 masks it is easy to do and we
8560 // can't reliably sort the mask one way or the other.
8561 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8562 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8563 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8564 MVT::v2i64, DL, V2, V1, InverseMask, Subtarget, DAG))
8568 if (Subtarget->hasSSE41())
8569 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
8573 // Use dedicated unpack instructions for masks that match their pattern.
8574 if (isShuffleEquivalent(Mask, 0, 2))
8575 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, V1, V2);
8576 if (isShuffleEquivalent(Mask, 1, 3))
8577 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2i64, V1, V2);
8579 // Try to use byte rotation instructions.
8580 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8581 if (Subtarget->hasSSSE3())
8582 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8583 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
8586 // We implement this with SHUFPD which is pretty lame because it will likely
8587 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
8588 // However, all the alternatives are still more cycles and newer chips don't
8589 // have this problem. It would be really nice if x86 had better shuffles here.
8590 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V1);
8591 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V2);
8592 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
8593 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
8596 /// \brief Test whether this can be lowered with a single SHUFPS instruction.
8598 /// This is used to disable more specialized lowerings when the shufps lowering
8599 /// will happen to be efficient.
8600 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
8601 // This routine only handles 128-bit shufps.
8602 assert(Mask.size() == 4 && "Unsupported mask size!");
8604 // To lower with a single SHUFPS we need to have the low half and high half
8605 // each requiring a single input.
8606 if (Mask[0] != -1 && Mask[1] != -1 && (Mask[0] < 4) != (Mask[1] < 4))
8608 if (Mask[2] != -1 && Mask[3] != -1 && (Mask[2] < 4) != (Mask[3] < 4))
8614 /// \brief Lower a vector shuffle using the SHUFPS instruction.
8616 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
8617 /// It makes no assumptions about whether this is the *best* lowering, it simply
8619 static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
8620 ArrayRef<int> Mask, SDValue V1,
8621 SDValue V2, SelectionDAG &DAG) {
8622 SDValue LowV = V1, HighV = V2;
8623 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
8626 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8628 if (NumV2Elements == 1) {
8630 std::find_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }) -
8633 // Compute the index adjacent to V2Index and in the same half by toggling
8635 int V2AdjIndex = V2Index ^ 1;
8637 if (Mask[V2AdjIndex] == -1) {
8638 // Handles all the cases where we have a single V2 element and an undef.
8639 // This will only ever happen in the high lanes because we commute the
8640 // vector otherwise.
8642 std::swap(LowV, HighV);
8643 NewMask[V2Index] -= 4;
8645 // Handle the case where the V2 element ends up adjacent to a V1 element.
8646 // To make this work, blend them together as the first step.
8647 int V1Index = V2AdjIndex;
8648 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
8649 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
8650 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8652 // Now proceed to reconstruct the final blend as we have the necessary
8653 // high or low half formed.
8660 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
8661 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
8663 } else if (NumV2Elements == 2) {
8664 if (Mask[0] < 4 && Mask[1] < 4) {
8665 // Handle the easy case where we have V1 in the low lanes and V2 in the
8669 } else if (Mask[2] < 4 && Mask[3] < 4) {
8670 // We also handle the reversed case because this utility may get called
8671 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
8672 // arrange things in the right direction.
8678 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
8679 // trying to place elements directly, just blend them and set up the final
8680 // shuffle to place them.
8682 // The first two blend mask elements are for V1, the second two are for
8684 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
8685 Mask[2] < 4 ? Mask[2] : Mask[3],
8686 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
8687 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
8688 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
8689 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8691 // Now we do a normal shuffle of V1 by giving V1 as both operands to
8694 NewMask[0] = Mask[0] < 4 ? 0 : 2;
8695 NewMask[1] = Mask[0] < 4 ? 2 : 0;
8696 NewMask[2] = Mask[2] < 4 ? 1 : 3;
8697 NewMask[3] = Mask[2] < 4 ? 3 : 1;
8700 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
8701 getV4X86ShuffleImm8ForMask(NewMask, DAG));
8704 /// \brief Lower 4-lane 32-bit floating point shuffles.
8706 /// Uses instructions exclusively from the floating point unit to minimize
8707 /// domain crossing penalties, as these are sufficient to implement all v4f32
8709 static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8710 const X86Subtarget *Subtarget,
8711 SelectionDAG &DAG) {
8713 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8714 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8715 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8716 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8717 ArrayRef<int> Mask = SVOp->getMask();
8718 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8721 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8723 if (NumV2Elements == 0) {
8724 // Check for being able to broadcast a single element.
8725 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f32, DL, V1,
8726 Mask, Subtarget, DAG))
8729 // Use even/odd duplicate instructions for masks that match their pattern.
8730 if (Subtarget->hasSSE3()) {
8731 if (isShuffleEquivalent(Mask, 0, 0, 2, 2))
8732 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
8733 if (isShuffleEquivalent(Mask, 1, 1, 3, 3))
8734 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
8737 if (Subtarget->hasAVX()) {
8738 // If we have AVX, we can use VPERMILPS which will allow folding a load
8739 // into the shuffle.
8740 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
8741 getV4X86ShuffleImm8ForMask(Mask, DAG));
8744 // Otherwise, use a straight shuffle of a single input vector. We pass the
8745 // input vector to both operands to simulate this with a SHUFPS.
8746 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
8747 getV4X86ShuffleImm8ForMask(Mask, DAG));
8750 // There are special ways we can lower some single-element blends. However, we
8751 // have custom ways we can lower more complex single-element blends below that
8752 // we defer to if both this and BLENDPS fail to match, so restrict this to
8753 // when the V2 input is targeting element 0 of the mask -- that is the fast
8755 if (NumV2Elements == 1 && Mask[0] >= 4)
8756 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4f32, DL, V1, V2,
8757 Mask, Subtarget, DAG))
8760 if (Subtarget->hasSSE41()) {
8761 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
8765 // Use INSERTPS if we can complete the shuffle efficiently.
8766 if (SDValue V = lowerVectorShuffleAsInsertPS(Op, V1, V2, Mask, DAG))
8769 if (!isSingleSHUFPSMask(Mask))
8770 if (SDValue BlendPerm = lowerVectorShuffleAsBlendAndPermute(
8771 DL, MVT::v4f32, V1, V2, Mask, DAG))
8775 // Use dedicated unpack instructions for masks that match their pattern.
8776 if (isShuffleEquivalent(Mask, 0, 4, 1, 5))
8777 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V1, V2);
8778 if (isShuffleEquivalent(Mask, 2, 6, 3, 7))
8779 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V1, V2);
8781 // Otherwise fall back to a SHUFPS lowering strategy.
8782 return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
8785 /// \brief Lower 4-lane i32 vector shuffles.
8787 /// We try to handle these with integer-domain shuffles where we can, but for
8788 /// blends we use the floating point domain blend instructions.
8789 static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8790 const X86Subtarget *Subtarget,
8791 SelectionDAG &DAG) {
8793 assert(Op.getSimpleValueType() == MVT::v4i32 && "Bad shuffle type!");
8794 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8795 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8796 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8797 ArrayRef<int> Mask = SVOp->getMask();
8798 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8800 // Whenever we can lower this as a zext, that instruction is strictly faster
8801 // than any alternative. It also allows us to fold memory operands into the
8802 // shuffle in many cases.
8803 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2,
8804 Mask, Subtarget, DAG))
8808 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8810 if (NumV2Elements == 0) {
8811 // Check for being able to broadcast a single element.
8812 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i32, DL, V1,
8813 Mask, Subtarget, DAG))
8816 // Straight shuffle of a single input vector. For everything from SSE2
8817 // onward this has a single fast instruction with no scary immediates.
8818 // We coerce the shuffle pattern to be compatible with UNPCK instructions
8819 // but we aren't actually going to use the UNPCK instruction because doing
8820 // so prevents folding a load into this instruction or making a copy.
8821 const int UnpackLoMask[] = {0, 0, 1, 1};
8822 const int UnpackHiMask[] = {2, 2, 3, 3};
8823 if (isShuffleEquivalent(Mask, 0, 0, 1, 1))
8824 Mask = UnpackLoMask;
8825 else if (isShuffleEquivalent(Mask, 2, 2, 3, 3))
8826 Mask = UnpackHiMask;
8828 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
8829 getV4X86ShuffleImm8ForMask(Mask, DAG));
8832 // Try to use bit shift instructions.
8833 if (SDValue Shift = lowerVectorShuffleAsBitShift(
8834 DL, MVT::v4i32, V1, V2, Mask, DAG))
8837 // Try to use byte shift instructions.
8838 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8839 DL, MVT::v4i32, V1, V2, Mask, DAG))
8842 // There are special ways we can lower some single-element blends.
8843 if (NumV2Elements == 1)
8844 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4i32, DL, V1, V2,
8845 Mask, Subtarget, DAG))
8848 if (Subtarget->hasSSE41())
8849 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
8853 if (SDValue Masked =
8854 lowerVectorShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask, DAG))
8857 // Use dedicated unpack instructions for masks that match their pattern.
8858 if (isShuffleEquivalent(Mask, 0, 4, 1, 5))
8859 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V1, V2);
8860 if (isShuffleEquivalent(Mask, 2, 6, 3, 7))
8861 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V1, V2);
8863 // Try to use byte rotation instructions.
8864 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8865 if (Subtarget->hasSSSE3())
8866 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8867 DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
8870 // We implement this with SHUFPS because it can blend from two vectors.
8871 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
8872 // up the inputs, bypassing domain shift penalties that we would encur if we
8873 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
8875 return DAG.getNode(ISD::BITCAST, DL, MVT::v4i32,
8876 DAG.getVectorShuffle(
8878 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V1),
8879 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V2), Mask));
8882 /// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
8883 /// shuffle lowering, and the most complex part.
8885 /// The lowering strategy is to try to form pairs of input lanes which are
8886 /// targeted at the same half of the final vector, and then use a dword shuffle
8887 /// to place them onto the right half, and finally unpack the paired lanes into
8888 /// their final position.
8890 /// The exact breakdown of how to form these dword pairs and align them on the
8891 /// correct sides is really tricky. See the comments within the function for
8892 /// more of the details.
8893 static SDValue lowerV8I16SingleInputVectorShuffle(
8894 SDLoc DL, SDValue V, MutableArrayRef<int> Mask,
8895 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8896 assert(V.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
8897 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
8898 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
8900 SmallVector<int, 4> LoInputs;
8901 std::copy_if(LoMask.begin(), LoMask.end(), std::back_inserter(LoInputs),
8902 [](int M) { return M >= 0; });
8903 std::sort(LoInputs.begin(), LoInputs.end());
8904 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
8905 SmallVector<int, 4> HiInputs;
8906 std::copy_if(HiMask.begin(), HiMask.end(), std::back_inserter(HiInputs),
8907 [](int M) { return M >= 0; });
8908 std::sort(HiInputs.begin(), HiInputs.end());
8909 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
8911 std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
8912 int NumHToL = LoInputs.size() - NumLToL;
8914 std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
8915 int NumHToH = HiInputs.size() - NumLToH;
8916 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
8917 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
8918 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
8919 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
8921 // Check for being able to broadcast a single element.
8922 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i16, DL, V,
8923 Mask, Subtarget, DAG))
8926 // Try to use bit shift instructions.
8927 if (SDValue Shift = lowerVectorShuffleAsBitShift(
8928 DL, MVT::v8i16, V, V, Mask, DAG))
8931 // Try to use byte shift instructions.
8932 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8933 DL, MVT::v8i16, V, V, Mask, DAG))
8936 // Use dedicated unpack instructions for masks that match their pattern.
8937 if (isShuffleEquivalent(Mask, 0, 0, 1, 1, 2, 2, 3, 3))
8938 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V, V);
8939 if (isShuffleEquivalent(Mask, 4, 4, 5, 5, 6, 6, 7, 7))
8940 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V, V);
8942 // Try to use byte rotation instructions.
8943 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8944 DL, MVT::v8i16, V, V, Mask, Subtarget, DAG))
8947 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
8948 // such inputs we can swap two of the dwords across the half mark and end up
8949 // with <=2 inputs to each half in each half. Once there, we can fall through
8950 // to the generic code below. For example:
8952 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
8953 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
8955 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
8956 // and an existing 2-into-2 on the other half. In this case we may have to
8957 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
8958 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
8959 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
8960 // because any other situation (including a 3-into-1 or 1-into-3 in the other
8961 // half than the one we target for fixing) will be fixed when we re-enter this
8962 // path. We will also combine away any sequence of PSHUFD instructions that
8963 // result into a single instruction. Here is an example of the tricky case:
8965 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
8966 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
8968 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
8970 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
8971 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
8973 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
8974 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
8976 // The result is fine to be handled by the generic logic.
8977 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
8978 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
8979 int AOffset, int BOffset) {
8980 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
8981 "Must call this with A having 3 or 1 inputs from the A half.");
8982 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
8983 "Must call this with B having 1 or 3 inputs from the B half.");
8984 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
8985 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
8987 // Compute the index of dword with only one word among the three inputs in
8988 // a half by taking the sum of the half with three inputs and subtracting
8989 // the sum of the actual three inputs. The difference is the remaining
8992 int &TripleDWord = AToAInputs.size() == 3 ? ADWord : BDWord;
8993 int &OneInputDWord = AToAInputs.size() == 3 ? BDWord : ADWord;
8994 int TripleInputOffset = AToAInputs.size() == 3 ? AOffset : BOffset;
8995 ArrayRef<int> TripleInputs = AToAInputs.size() == 3 ? AToAInputs : BToAInputs;
8996 int OneInput = AToAInputs.size() == 3 ? BToAInputs[0] : AToAInputs[0];
8997 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
8998 int TripleNonInputIdx =
8999 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
9000 TripleDWord = TripleNonInputIdx / 2;
9002 // We use xor with one to compute the adjacent DWord to whichever one the
9004 OneInputDWord = (OneInput / 2) ^ 1;
9006 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
9007 // and BToA inputs. If there is also such a problem with the BToB and AToB
9008 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
9009 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
9010 // is essential that we don't *create* a 3<-1 as then we might oscillate.
9011 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
9012 // Compute how many inputs will be flipped by swapping these DWords. We
9014 // to balance this to ensure we don't form a 3-1 shuffle in the other
9016 int NumFlippedAToBInputs =
9017 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
9018 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
9019 int NumFlippedBToBInputs =
9020 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
9021 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
9022 if ((NumFlippedAToBInputs == 1 &&
9023 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
9024 (NumFlippedBToBInputs == 1 &&
9025 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
9026 // We choose whether to fix the A half or B half based on whether that
9027 // half has zero flipped inputs. At zero, we may not be able to fix it
9028 // with that half. We also bias towards fixing the B half because that
9029 // will more commonly be the high half, and we have to bias one way.
9030 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
9031 ArrayRef<int> Inputs) {
9032 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
9033 bool IsFixIdxInput = std::find(Inputs.begin(), Inputs.end(),
9034 PinnedIdx ^ 1) != Inputs.end();
9035 // Determine whether the free index is in the flipped dword or the
9036 // unflipped dword based on where the pinned index is. We use this bit
9037 // in an xor to conditionally select the adjacent dword.
9038 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
9039 bool IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9040 FixFreeIdx) != Inputs.end();
9041 if (IsFixIdxInput == IsFixFreeIdxInput)
9043 IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9044 FixFreeIdx) != Inputs.end();
9045 assert(IsFixIdxInput != IsFixFreeIdxInput &&
9046 "We need to be changing the number of flipped inputs!");
9047 int PSHUFHalfMask[] = {0, 1, 2, 3};
9048 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
9049 V = DAG.getNode(FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
9051 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DAG));
9054 if (M != -1 && M == FixIdx)
9056 else if (M != -1 && M == FixFreeIdx)
9059 if (NumFlippedBToBInputs != 0) {
9061 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9062 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
9064 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
9066 AToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9067 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
9072 int PSHUFDMask[] = {0, 1, 2, 3};
9073 PSHUFDMask[ADWord] = BDWord;
9074 PSHUFDMask[BDWord] = ADWord;
9075 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9076 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9077 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9078 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9080 // Adjust the mask to match the new locations of A and B.
9082 if (M != -1 && M/2 == ADWord)
9083 M = 2 * BDWord + M % 2;
9084 else if (M != -1 && M/2 == BDWord)
9085 M = 2 * ADWord + M % 2;
9087 // Recurse back into this routine to re-compute state now that this isn't
9088 // a 3 and 1 problem.
9089 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9092 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
9093 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
9094 else if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
9095 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
9097 // At this point there are at most two inputs to the low and high halves from
9098 // each half. That means the inputs can always be grouped into dwords and
9099 // those dwords can then be moved to the correct half with a dword shuffle.
9100 // We use at most one low and one high word shuffle to collect these paired
9101 // inputs into dwords, and finally a dword shuffle to place them.
9102 int PSHUFLMask[4] = {-1, -1, -1, -1};
9103 int PSHUFHMask[4] = {-1, -1, -1, -1};
9104 int PSHUFDMask[4] = {-1, -1, -1, -1};
9106 // First fix the masks for all the inputs that are staying in their
9107 // original halves. This will then dictate the targets of the cross-half
9109 auto fixInPlaceInputs =
9110 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
9111 MutableArrayRef<int> SourceHalfMask,
9112 MutableArrayRef<int> HalfMask, int HalfOffset) {
9113 if (InPlaceInputs.empty())
9115 if (InPlaceInputs.size() == 1) {
9116 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9117 InPlaceInputs[0] - HalfOffset;
9118 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
9121 if (IncomingInputs.empty()) {
9122 // Just fix all of the in place inputs.
9123 for (int Input : InPlaceInputs) {
9124 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
9125 PSHUFDMask[Input / 2] = Input / 2;
9130 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
9131 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9132 InPlaceInputs[0] - HalfOffset;
9133 // Put the second input next to the first so that they are packed into
9134 // a dword. We find the adjacent index by toggling the low bit.
9135 int AdjIndex = InPlaceInputs[0] ^ 1;
9136 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
9137 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
9138 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
9140 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
9141 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
9143 // Now gather the cross-half inputs and place them into a free dword of
9144 // their target half.
9145 // FIXME: This operation could almost certainly be simplified dramatically to
9146 // look more like the 3-1 fixing operation.
9147 auto moveInputsToRightHalf = [&PSHUFDMask](
9148 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
9149 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
9150 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
9152 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
9153 return SourceHalfMask[Word] != -1 && SourceHalfMask[Word] != Word;
9155 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
9157 int LowWord = Word & ~1;
9158 int HighWord = Word | 1;
9159 return isWordClobbered(SourceHalfMask, LowWord) ||
9160 isWordClobbered(SourceHalfMask, HighWord);
9163 if (IncomingInputs.empty())
9166 if (ExistingInputs.empty()) {
9167 // Map any dwords with inputs from them into the right half.
9168 for (int Input : IncomingInputs) {
9169 // If the source half mask maps over the inputs, turn those into
9170 // swaps and use the swapped lane.
9171 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
9172 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == -1) {
9173 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
9174 Input - SourceOffset;
9175 // We have to swap the uses in our half mask in one sweep.
9176 for (int &M : HalfMask)
9177 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
9179 else if (M == Input)
9180 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9182 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
9183 Input - SourceOffset &&
9184 "Previous placement doesn't match!");
9186 // Note that this correctly re-maps both when we do a swap and when
9187 // we observe the other side of the swap above. We rely on that to
9188 // avoid swapping the members of the input list directly.
9189 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9192 // Map the input's dword into the correct half.
9193 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == -1)
9194 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
9196 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
9198 "Previous placement doesn't match!");
9201 // And just directly shift any other-half mask elements to be same-half
9202 // as we will have mirrored the dword containing the element into the
9203 // same position within that half.
9204 for (int &M : HalfMask)
9205 if (M >= SourceOffset && M < SourceOffset + 4) {
9206 M = M - SourceOffset + DestOffset;
9207 assert(M >= 0 && "This should never wrap below zero!");
9212 // Ensure we have the input in a viable dword of its current half. This
9213 // is particularly tricky because the original position may be clobbered
9214 // by inputs being moved and *staying* in that half.
9215 if (IncomingInputs.size() == 1) {
9216 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9217 int InputFixed = std::find(std::begin(SourceHalfMask),
9218 std::end(SourceHalfMask), -1) -
9219 std::begin(SourceHalfMask) + SourceOffset;
9220 SourceHalfMask[InputFixed - SourceOffset] =
9221 IncomingInputs[0] - SourceOffset;
9222 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
9224 IncomingInputs[0] = InputFixed;
9226 } else if (IncomingInputs.size() == 2) {
9227 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
9228 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9229 // We have two non-adjacent or clobbered inputs we need to extract from
9230 // the source half. To do this, we need to map them into some adjacent
9231 // dword slot in the source mask.
9232 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
9233 IncomingInputs[1] - SourceOffset};
9235 // If there is a free slot in the source half mask adjacent to one of
9236 // the inputs, place the other input in it. We use (Index XOR 1) to
9237 // compute an adjacent index.
9238 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
9239 SourceHalfMask[InputsFixed[0] ^ 1] == -1) {
9240 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
9241 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9242 InputsFixed[1] = InputsFixed[0] ^ 1;
9243 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
9244 SourceHalfMask[InputsFixed[1] ^ 1] == -1) {
9245 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
9246 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
9247 InputsFixed[0] = InputsFixed[1] ^ 1;
9248 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] == -1 &&
9249 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] == -1) {
9250 // The two inputs are in the same DWord but it is clobbered and the
9251 // adjacent DWord isn't used at all. Move both inputs to the free
9253 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
9254 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
9255 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
9256 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
9258 // The only way we hit this point is if there is no clobbering
9259 // (because there are no off-half inputs to this half) and there is no
9260 // free slot adjacent to one of the inputs. In this case, we have to
9261 // swap an input with a non-input.
9262 for (int i = 0; i < 4; ++i)
9263 assert((SourceHalfMask[i] == -1 || SourceHalfMask[i] == i) &&
9264 "We can't handle any clobbers here!");
9265 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
9266 "Cannot have adjacent inputs here!");
9268 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9269 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
9271 // We also have to update the final source mask in this case because
9272 // it may need to undo the above swap.
9273 for (int &M : FinalSourceHalfMask)
9274 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
9275 M = InputsFixed[1] + SourceOffset;
9276 else if (M == InputsFixed[1] + SourceOffset)
9277 M = (InputsFixed[0] ^ 1) + SourceOffset;
9279 InputsFixed[1] = InputsFixed[0] ^ 1;
9282 // Point everything at the fixed inputs.
9283 for (int &M : HalfMask)
9284 if (M == IncomingInputs[0])
9285 M = InputsFixed[0] + SourceOffset;
9286 else if (M == IncomingInputs[1])
9287 M = InputsFixed[1] + SourceOffset;
9289 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
9290 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
9293 llvm_unreachable("Unhandled input size!");
9296 // Now hoist the DWord down to the right half.
9297 int FreeDWord = (PSHUFDMask[DestOffset / 2] == -1 ? 0 : 1) + DestOffset / 2;
9298 assert(PSHUFDMask[FreeDWord] == -1 && "DWord not free");
9299 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
9300 for (int &M : HalfMask)
9301 for (int Input : IncomingInputs)
9303 M = FreeDWord * 2 + Input % 2;
9305 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
9306 /*SourceOffset*/ 4, /*DestOffset*/ 0);
9307 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
9308 /*SourceOffset*/ 0, /*DestOffset*/ 4);
9310 // Now enact all the shuffles we've computed to move the inputs into their
9312 if (!isNoopShuffleMask(PSHUFLMask))
9313 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9314 getV4X86ShuffleImm8ForMask(PSHUFLMask, DAG));
9315 if (!isNoopShuffleMask(PSHUFHMask))
9316 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9317 getV4X86ShuffleImm8ForMask(PSHUFHMask, DAG));
9318 if (!isNoopShuffleMask(PSHUFDMask))
9319 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9320 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9321 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9322 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9324 // At this point, each half should contain all its inputs, and we can then
9325 // just shuffle them into their final position.
9326 assert(std::count_if(LoMask.begin(), LoMask.end(),
9327 [](int M) { return M >= 4; }) == 0 &&
9328 "Failed to lift all the high half inputs to the low mask!");
9329 assert(std::count_if(HiMask.begin(), HiMask.end(),
9330 [](int M) { return M >= 0 && M < 4; }) == 0 &&
9331 "Failed to lift all the low half inputs to the high mask!");
9333 // Do a half shuffle for the low mask.
9334 if (!isNoopShuffleMask(LoMask))
9335 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9336 getV4X86ShuffleImm8ForMask(LoMask, DAG));
9338 // Do a half shuffle with the high mask after shifting its values down.
9339 for (int &M : HiMask)
9342 if (!isNoopShuffleMask(HiMask))
9343 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9344 getV4X86ShuffleImm8ForMask(HiMask, DAG));
9349 /// \brief Detect whether the mask pattern should be lowered through
9352 /// This essentially tests whether viewing the mask as an interleaving of two
9353 /// sub-sequences reduces the cross-input traffic of a blend operation. If so,
9354 /// lowering it through interleaving is a significantly better strategy.
9355 static bool shouldLowerAsInterleaving(ArrayRef<int> Mask) {
9356 int NumEvenInputs[2] = {0, 0};
9357 int NumOddInputs[2] = {0, 0};
9358 int NumLoInputs[2] = {0, 0};
9359 int NumHiInputs[2] = {0, 0};
9360 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
9364 int InputIdx = Mask[i] >= Size;
9367 ++NumLoInputs[InputIdx];
9369 ++NumHiInputs[InputIdx];
9372 ++NumEvenInputs[InputIdx];
9374 ++NumOddInputs[InputIdx];
9377 // The minimum number of cross-input results for both the interleaved and
9378 // split cases. If interleaving results in fewer cross-input results, return
9380 int InterleavedCrosses = std::min(NumEvenInputs[1] + NumOddInputs[0],
9381 NumEvenInputs[0] + NumOddInputs[1]);
9382 int SplitCrosses = std::min(NumLoInputs[1] + NumHiInputs[0],
9383 NumLoInputs[0] + NumHiInputs[1]);
9384 return InterleavedCrosses < SplitCrosses;
9387 /// \brief Blend two v8i16 vectors using a naive unpack strategy.
9389 /// This strategy only works when the inputs from each vector fit into a single
9390 /// half of that vector, and generally there are not so many inputs as to leave
9391 /// the in-place shuffles required highly constrained (and thus expensive). It
9392 /// shifts all the inputs into a single side of both input vectors and then
9393 /// uses an unpack to interleave these inputs in a single vector. At that
9394 /// point, we will fall back on the generic single input shuffle lowering.
9395 static SDValue lowerV8I16BasicBlendVectorShuffle(SDLoc DL, SDValue V1,
9397 MutableArrayRef<int> Mask,
9398 const X86Subtarget *Subtarget,
9399 SelectionDAG &DAG) {
9400 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9401 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9402 SmallVector<int, 3> LoV1Inputs, HiV1Inputs, LoV2Inputs, HiV2Inputs;
9403 for (int i = 0; i < 8; ++i)
9404 if (Mask[i] >= 0 && Mask[i] < 4)
9405 LoV1Inputs.push_back(i);
9406 else if (Mask[i] >= 4 && Mask[i] < 8)
9407 HiV1Inputs.push_back(i);
9408 else if (Mask[i] >= 8 && Mask[i] < 12)
9409 LoV2Inputs.push_back(i);
9410 else if (Mask[i] >= 12)
9411 HiV2Inputs.push_back(i);
9413 int NumV1Inputs = LoV1Inputs.size() + HiV1Inputs.size();
9414 int NumV2Inputs = LoV2Inputs.size() + HiV2Inputs.size();
9417 assert(NumV1Inputs > 0 && NumV1Inputs <= 3 && "At most 3 inputs supported");
9418 assert(NumV2Inputs > 0 && NumV2Inputs <= 3 && "At most 3 inputs supported");
9419 assert(NumV1Inputs + NumV2Inputs <= 4 && "At most 4 combined inputs");
9421 bool MergeFromLo = LoV1Inputs.size() + LoV2Inputs.size() >=
9422 HiV1Inputs.size() + HiV2Inputs.size();
9424 auto moveInputsToHalf = [&](SDValue V, ArrayRef<int> LoInputs,
9425 ArrayRef<int> HiInputs, bool MoveToLo,
9427 ArrayRef<int> GoodInputs = MoveToLo ? LoInputs : HiInputs;
9428 ArrayRef<int> BadInputs = MoveToLo ? HiInputs : LoInputs;
9429 if (BadInputs.empty())
9432 int MoveMask[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9433 int MoveOffset = MoveToLo ? 0 : 4;
9435 if (GoodInputs.empty()) {
9436 for (int BadInput : BadInputs) {
9437 MoveMask[Mask[BadInput] % 4 + MoveOffset] = Mask[BadInput] - MaskOffset;
9438 Mask[BadInput] = Mask[BadInput] % 4 + MoveOffset + MaskOffset;
9441 if (GoodInputs.size() == 2) {
9442 // If the low inputs are spread across two dwords, pack them into
9444 MoveMask[MoveOffset] = Mask[GoodInputs[0]] - MaskOffset;
9445 MoveMask[MoveOffset + 1] = Mask[GoodInputs[1]] - MaskOffset;
9446 Mask[GoodInputs[0]] = MoveOffset + MaskOffset;
9447 Mask[GoodInputs[1]] = MoveOffset + 1 + MaskOffset;
9449 // Otherwise pin the good inputs.
9450 for (int GoodInput : GoodInputs)
9451 MoveMask[Mask[GoodInput] - MaskOffset] = Mask[GoodInput] - MaskOffset;
9454 if (BadInputs.size() == 2) {
9455 // If we have two bad inputs then there may be either one or two good
9456 // inputs fixed in place. Find a fixed input, and then find the *other*
9457 // two adjacent indices by using modular arithmetic.
9459 std::find_if(std::begin(MoveMask) + MoveOffset, std::end(MoveMask),
9460 [](int M) { return M >= 0; }) -
9461 std::begin(MoveMask);
9463 ((((GoodMaskIdx - MoveOffset) & ~1) + 2) % 4) + MoveOffset;
9464 assert(MoveMask[MoveMaskIdx] == -1 && "Expected empty slot");
9465 assert(MoveMask[MoveMaskIdx + 1] == -1 && "Expected empty slot");
9466 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9467 MoveMask[MoveMaskIdx + 1] = Mask[BadInputs[1]] - MaskOffset;
9468 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9469 Mask[BadInputs[1]] = MoveMaskIdx + 1 + MaskOffset;
9471 assert(BadInputs.size() == 1 && "All sizes handled");
9472 int MoveMaskIdx = std::find(std::begin(MoveMask) + MoveOffset,
9473 std::end(MoveMask), -1) -
9474 std::begin(MoveMask);
9475 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9476 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9480 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9483 V1 = moveInputsToHalf(V1, LoV1Inputs, HiV1Inputs, MergeFromLo,
9485 V2 = moveInputsToHalf(V2, LoV2Inputs, HiV2Inputs, MergeFromLo,
9488 // FIXME: Select an interleaving of the merge of V1 and V2 that minimizes
9489 // cross-half traffic in the final shuffle.
9491 // Munge the mask to be a single-input mask after the unpack merges the
9495 M = 2 * (M % 4) + (M / 8);
9497 return DAG.getVectorShuffle(
9498 MVT::v8i16, DL, DAG.getNode(MergeFromLo ? X86ISD::UNPCKL : X86ISD::UNPCKH,
9499 DL, MVT::v8i16, V1, V2),
9500 DAG.getUNDEF(MVT::v8i16), Mask);
9503 /// \brief Generic lowering of 8-lane i16 shuffles.
9505 /// This handles both single-input shuffles and combined shuffle/blends with
9506 /// two inputs. The single input shuffles are immediately delegated to
9507 /// a dedicated lowering routine.
9509 /// The blends are lowered in one of three fundamental ways. If there are few
9510 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
9511 /// of the input is significantly cheaper when lowered as an interleaving of
9512 /// the two inputs, try to interleave them. Otherwise, blend the low and high
9513 /// halves of the inputs separately (making them have relatively few inputs)
9514 /// and then concatenate them.
9515 static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9516 const X86Subtarget *Subtarget,
9517 SelectionDAG &DAG) {
9519 assert(Op.getSimpleValueType() == MVT::v8i16 && "Bad shuffle type!");
9520 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9521 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9522 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9523 ArrayRef<int> OrigMask = SVOp->getMask();
9524 int MaskStorage[8] = {OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9525 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7]};
9526 MutableArrayRef<int> Mask(MaskStorage);
9528 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
9530 // Whenever we can lower this as a zext, that instruction is strictly faster
9531 // than any alternative.
9532 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9533 DL, MVT::v8i16, V1, V2, OrigMask, Subtarget, DAG))
9536 auto isV1 = [](int M) { return M >= 0 && M < 8; };
9537 auto isV2 = [](int M) { return M >= 8; };
9539 int NumV1Inputs = std::count_if(Mask.begin(), Mask.end(), isV1);
9540 int NumV2Inputs = std::count_if(Mask.begin(), Mask.end(), isV2);
9542 if (NumV2Inputs == 0)
9543 return lowerV8I16SingleInputVectorShuffle(DL, V1, Mask, Subtarget, DAG);
9545 assert(NumV1Inputs > 0 && "All single-input shuffles should be canonicalized "
9546 "to be V1-input shuffles.");
9548 // Try to use bit shift instructions.
9549 if (SDValue Shift = lowerVectorShuffleAsBitShift(
9550 DL, MVT::v8i16, V1, V2, Mask, DAG))
9553 // Try to use byte shift instructions.
9554 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9555 DL, MVT::v8i16, V1, V2, Mask, DAG))
9558 // There are special ways we can lower some single-element blends.
9559 if (NumV2Inputs == 1)
9560 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v8i16, DL, V1, V2,
9561 Mask, Subtarget, DAG))
9564 if (Subtarget->hasSSE41())
9565 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
9569 if (SDValue Masked =
9570 lowerVectorShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask, DAG))
9573 // Use dedicated unpack instructions for masks that match their pattern.
9574 if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 2, 10, 3, 11))
9575 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V1, V2);
9576 if (isShuffleEquivalent(Mask, 4, 12, 5, 13, 6, 14, 7, 15))
9577 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V1, V2);
9579 // Try to use byte rotation instructions.
9580 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9581 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
9584 if (NumV1Inputs + NumV2Inputs <= 4)
9585 return lowerV8I16BasicBlendVectorShuffle(DL, V1, V2, Mask, Subtarget, DAG);
9587 // Check whether an interleaving lowering is likely to be more efficient.
9588 // This isn't perfect but it is a strong heuristic that tends to work well on
9589 // the kinds of shuffles that show up in practice.
9591 // FIXME: Handle 1x, 2x, and 4x interleaving.
9592 if (shouldLowerAsInterleaving(Mask)) {
9593 // FIXME: Figure out whether we should pack these into the low or high
9596 int EMask[8], OMask[8];
9597 for (int i = 0; i < 4; ++i) {
9598 EMask[i] = Mask[2*i];
9599 OMask[i] = Mask[2*i + 1];
9604 SDValue Evens = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, EMask);
9605 SDValue Odds = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, OMask);
9607 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, Evens, Odds);
9610 int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9611 int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9613 for (int i = 0; i < 4; ++i) {
9614 LoBlendMask[i] = Mask[i];
9615 HiBlendMask[i] = Mask[i + 4];
9618 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
9619 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
9620 LoV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, LoV);
9621 HiV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, HiV);
9623 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9624 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, LoV, HiV));
9627 /// \brief Check whether a compaction lowering can be done by dropping even
9628 /// elements and compute how many times even elements must be dropped.
9630 /// This handles shuffles which take every Nth element where N is a power of
9631 /// two. Example shuffle masks:
9633 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
9634 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
9635 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
9636 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
9637 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
9638 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
9640 /// Any of these lanes can of course be undef.
9642 /// This routine only supports N <= 3.
9643 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
9646 /// \returns N above, or the number of times even elements must be dropped if
9647 /// there is such a number. Otherwise returns zero.
9648 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
9649 // Figure out whether we're looping over two inputs or just one.
9650 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9652 // The modulus for the shuffle vector entries is based on whether this is
9653 // a single input or not.
9654 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
9655 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
9656 "We should only be called with masks with a power-of-2 size!");
9658 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
9660 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
9661 // and 2^3 simultaneously. This is because we may have ambiguity with
9662 // partially undef inputs.
9663 bool ViableForN[3] = {true, true, true};
9665 for (int i = 0, e = Mask.size(); i < e; ++i) {
9666 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
9671 bool IsAnyViable = false;
9672 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9673 if (ViableForN[j]) {
9676 // The shuffle mask must be equal to (i * 2^N) % M.
9677 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
9680 ViableForN[j] = false;
9682 // Early exit if we exhaust the possible powers of two.
9687 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9691 // Return 0 as there is no viable power of two.
9695 /// \brief Generic lowering of v16i8 shuffles.
9697 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
9698 /// detect any complexity reducing interleaving. If that doesn't help, it uses
9699 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
9700 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
9702 static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9703 const X86Subtarget *Subtarget,
9704 SelectionDAG &DAG) {
9706 assert(Op.getSimpleValueType() == MVT::v16i8 && "Bad shuffle type!");
9707 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9708 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9709 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9710 ArrayRef<int> OrigMask = SVOp->getMask();
9711 assert(OrigMask.size() == 16 && "Unexpected mask size for v16 shuffle!");
9713 // Try to use bit shift instructions.
9714 if (SDValue Shift = lowerVectorShuffleAsBitShift(
9715 DL, MVT::v16i8, V1, V2, OrigMask, DAG))
9718 // Try to use byte shift instructions.
9719 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9720 DL, MVT::v16i8, V1, V2, OrigMask, DAG))
9723 // Try to use byte rotation instructions.
9724 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9725 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9728 // Try to use a zext lowering.
9729 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9730 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9733 int MaskStorage[16] = {
9734 OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9735 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7],
9736 OrigMask[8], OrigMask[9], OrigMask[10], OrigMask[11],
9737 OrigMask[12], OrigMask[13], OrigMask[14], OrigMask[15]};
9738 MutableArrayRef<int> Mask(MaskStorage);
9739 MutableArrayRef<int> LoMask = Mask.slice(0, 8);
9740 MutableArrayRef<int> HiMask = Mask.slice(8, 8);
9743 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 16; });
9745 // For single-input shuffles, there are some nicer lowering tricks we can use.
9746 if (NumV2Elements == 0) {
9747 // Check for being able to broadcast a single element.
9748 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i8, DL, V1,
9749 Mask, Subtarget, DAG))
9752 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
9753 // Notably, this handles splat and partial-splat shuffles more efficiently.
9754 // However, it only makes sense if the pre-duplication shuffle simplifies
9755 // things significantly. Currently, this means we need to be able to
9756 // express the pre-duplication shuffle as an i16 shuffle.
9758 // FIXME: We should check for other patterns which can be widened into an
9759 // i16 shuffle as well.
9760 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
9761 for (int i = 0; i < 16; i += 2)
9762 if (Mask[i] != -1 && Mask[i + 1] != -1 && Mask[i] != Mask[i + 1])
9767 auto tryToWidenViaDuplication = [&]() -> SDValue {
9768 if (!canWidenViaDuplication(Mask))
9770 SmallVector<int, 4> LoInputs;
9771 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(LoInputs),
9772 [](int M) { return M >= 0 && M < 8; });
9773 std::sort(LoInputs.begin(), LoInputs.end());
9774 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
9776 SmallVector<int, 4> HiInputs;
9777 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(HiInputs),
9778 [](int M) { return M >= 8; });
9779 std::sort(HiInputs.begin(), HiInputs.end());
9780 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
9783 bool TargetLo = LoInputs.size() >= HiInputs.size();
9784 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
9785 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
9787 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9788 SmallDenseMap<int, int, 8> LaneMap;
9789 for (int I : InPlaceInputs) {
9790 PreDupI16Shuffle[I/2] = I/2;
9793 int j = TargetLo ? 0 : 4, je = j + 4;
9794 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
9795 // Check if j is already a shuffle of this input. This happens when
9796 // there are two adjacent bytes after we move the low one.
9797 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
9798 // If we haven't yet mapped the input, search for a slot into which
9800 while (j < je && PreDupI16Shuffle[j] != -1)
9804 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
9807 // Map this input with the i16 shuffle.
9808 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
9811 // Update the lane map based on the mapping we ended up with.
9812 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
9815 ISD::BITCAST, DL, MVT::v16i8,
9816 DAG.getVectorShuffle(MVT::v8i16, DL,
9817 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9818 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
9820 // Unpack the bytes to form the i16s that will be shuffled into place.
9821 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9822 MVT::v16i8, V1, V1);
9824 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9825 for (int i = 0; i < 16; ++i)
9826 if (Mask[i] != -1) {
9827 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
9828 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
9829 if (PostDupI16Shuffle[i / 2] == -1)
9830 PostDupI16Shuffle[i / 2] = MappedMask;
9832 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
9833 "Conflicting entrties in the original shuffle!");
9836 ISD::BITCAST, DL, MVT::v16i8,
9837 DAG.getVectorShuffle(MVT::v8i16, DL,
9838 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9839 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
9841 if (SDValue V = tryToWidenViaDuplication())
9845 // Check whether an interleaving lowering is likely to be more efficient.
9846 // This isn't perfect but it is a strong heuristic that tends to work well on
9847 // the kinds of shuffles that show up in practice.
9849 // FIXME: We need to handle other interleaving widths (i16, i32, ...).
9850 if (shouldLowerAsInterleaving(Mask)) {
9851 int NumLoHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9852 return (M >= 0 && M < 8) || (M >= 16 && M < 24);
9854 int NumHiHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9855 return (M >= 8 && M < 16) || M >= 24;
9857 int EMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9858 -1, -1, -1, -1, -1, -1, -1, -1};
9859 int OMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9860 -1, -1, -1, -1, -1, -1, -1, -1};
9861 bool UnpackLo = NumLoHalf >= NumHiHalf;
9862 MutableArrayRef<int> TargetEMask(UnpackLo ? EMask : EMask + 8, 8);
9863 MutableArrayRef<int> TargetOMask(UnpackLo ? OMask : OMask + 8, 8);
9864 for (int i = 0; i < 8; ++i) {
9865 TargetEMask[i] = Mask[2 * i];
9866 TargetOMask[i] = Mask[2 * i + 1];
9869 SDValue Evens = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, EMask);
9870 SDValue Odds = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, OMask);
9872 return DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9873 MVT::v16i8, Evens, Odds);
9876 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
9877 // with PSHUFB. It is important to do this before we attempt to generate any
9878 // blends but after all of the single-input lowerings. If the single input
9879 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
9880 // want to preserve that and we can DAG combine any longer sequences into
9881 // a PSHUFB in the end. But once we start blending from multiple inputs,
9882 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
9883 // and there are *very* few patterns that would actually be faster than the
9884 // PSHUFB approach because of its ability to zero lanes.
9886 // FIXME: The only exceptions to the above are blends which are exact
9887 // interleavings with direct instructions supporting them. We currently don't
9888 // handle those well here.
9889 if (Subtarget->hasSSSE3()) {
9892 bool V1InUse = false;
9893 bool V2InUse = false;
9894 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
9896 for (int i = 0; i < 16; ++i) {
9897 if (Mask[i] == -1) {
9898 V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
9900 const int ZeroMask = 0x80;
9901 int V1Idx = (Mask[i] < 16 ? Mask[i] : ZeroMask);
9902 int V2Idx = (Mask[i] < 16 ? ZeroMask : Mask[i] - 16);
9904 V1Idx = V2Idx = ZeroMask;
9905 V1Mask[i] = DAG.getConstant(V1Idx, MVT::i8);
9906 V2Mask[i] = DAG.getConstant(V2Idx, MVT::i8);
9907 V1InUse |= (ZeroMask != V1Idx);
9908 V2InUse |= (ZeroMask != V2Idx);
9913 V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V1,
9914 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
9916 V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V2,
9917 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
9919 // If we need shuffled inputs from both, blend the two.
9920 if (V1InUse && V2InUse)
9921 return DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
9923 return V1; // Single inputs are easy.
9925 return V2; // Single inputs are easy.
9926 // Shuffling to a zeroable vector.
9927 return getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
9930 // There are special ways we can lower some single-element blends.
9931 if (NumV2Elements == 1)
9932 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v16i8, DL, V1, V2,
9933 Mask, Subtarget, DAG))
9936 // Check whether a compaction lowering can be done. This handles shuffles
9937 // which take every Nth element for some even N. See the helper function for
9940 // We special case these as they can be particularly efficiently handled with
9941 // the PACKUSB instruction on x86 and they show up in common patterns of
9942 // rearranging bytes to truncate wide elements.
9943 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask)) {
9944 // NumEvenDrops is the power of two stride of the elements. Another way of
9945 // thinking about it is that we need to drop the even elements this many
9946 // times to get the original input.
9947 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9949 // First we need to zero all the dropped bytes.
9950 assert(NumEvenDrops <= 3 &&
9951 "No support for dropping even elements more than 3 times.");
9952 // We use the mask type to pick which bytes are preserved based on how many
9953 // elements are dropped.
9954 MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
9955 SDValue ByteClearMask =
9956 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8,
9957 DAG.getConstant(0xFF, MaskVTs[NumEvenDrops - 1]));
9958 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
9960 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
9962 // Now pack things back together.
9963 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
9964 V2 = IsSingleInput ? V1 : DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
9965 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
9966 for (int i = 1; i < NumEvenDrops; ++i) {
9967 Result = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, Result);
9968 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
9974 int V1LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9975 int V1HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9976 int V2LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9977 int V2HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9979 auto buildBlendMasks = [](MutableArrayRef<int> HalfMask,
9980 MutableArrayRef<int> V1HalfBlendMask,
9981 MutableArrayRef<int> V2HalfBlendMask) {
9982 for (int i = 0; i < 8; ++i)
9983 if (HalfMask[i] >= 0 && HalfMask[i] < 16) {
9984 V1HalfBlendMask[i] = HalfMask[i];
9986 } else if (HalfMask[i] >= 16) {
9987 V2HalfBlendMask[i] = HalfMask[i] - 16;
9988 HalfMask[i] = i + 8;
9991 buildBlendMasks(LoMask, V1LoBlendMask, V2LoBlendMask);
9992 buildBlendMasks(HiMask, V1HiBlendMask, V2HiBlendMask);
9994 SDValue Zero = getZeroVector(MVT::v8i16, Subtarget, DAG, DL);
9996 auto buildLoAndHiV8s = [&](SDValue V, MutableArrayRef<int> LoBlendMask,
9997 MutableArrayRef<int> HiBlendMask) {
9999 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
10000 // them out and avoid using UNPCK{L,H} to extract the elements of V as
10002 if (std::none_of(LoBlendMask.begin(), LoBlendMask.end(),
10003 [](int M) { return M >= 0 && M % 2 == 1; }) &&
10004 std::none_of(HiBlendMask.begin(), HiBlendMask.end(),
10005 [](int M) { return M >= 0 && M % 2 == 1; })) {
10006 // Use a mask to drop the high bytes.
10007 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
10008 V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, V1,
10009 DAG.getConstant(0x00FF, MVT::v8i16));
10011 // This will be a single vector shuffle instead of a blend so nuke V2.
10012 V2 = DAG.getUNDEF(MVT::v8i16);
10014 // Squash the masks to point directly into V1.
10015 for (int &M : LoBlendMask)
10018 for (int &M : HiBlendMask)
10022 // Otherwise just unpack the low half of V into V1 and the high half into
10023 // V2 so that we can blend them as i16s.
10024 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
10025 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
10026 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
10027 DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
10030 SDValue BlendedLo = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
10031 SDValue BlendedHi = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
10032 return std::make_pair(BlendedLo, BlendedHi);
10034 SDValue V1Lo, V1Hi, V2Lo, V2Hi;
10035 std::tie(V1Lo, V1Hi) = buildLoAndHiV8s(V1, V1LoBlendMask, V1HiBlendMask);
10036 std::tie(V2Lo, V2Hi) = buildLoAndHiV8s(V2, V2LoBlendMask, V2HiBlendMask);
10038 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Lo, V2Lo, LoMask);
10039 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Hi, V2Hi, HiMask);
10041 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
10044 /// \brief Dispatching routine to lower various 128-bit x86 vector shuffles.
10046 /// This routine breaks down the specific type of 128-bit shuffle and
10047 /// dispatches to the lowering routines accordingly.
10048 static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10049 MVT VT, const X86Subtarget *Subtarget,
10050 SelectionDAG &DAG) {
10051 switch (VT.SimpleTy) {
10053 return lowerV2I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10055 return lowerV2F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10057 return lowerV4I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10059 return lowerV4F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10061 return lowerV8I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
10063 return lowerV16I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
10066 llvm_unreachable("Unimplemented!");
10070 /// \brief Helper function to test whether a shuffle mask could be
10071 /// simplified by widening the elements being shuffled.
10073 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
10074 /// leaves it in an unspecified state.
10076 /// NOTE: This must handle normal vector shuffle masks and *target* vector
10077 /// shuffle masks. The latter have the special property of a '-2' representing
10078 /// a zero-ed lane of a vector.
10079 static bool canWidenShuffleElements(ArrayRef<int> Mask,
10080 SmallVectorImpl<int> &WidenedMask) {
10081 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
10082 // If both elements are undef, its trivial.
10083 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] == SM_SentinelUndef) {
10084 WidenedMask.push_back(SM_SentinelUndef);
10088 // Check for an undef mask and a mask value properly aligned to fit with
10089 // a pair of values. If we find such a case, use the non-undef mask's value.
10090 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] >= 0 && Mask[i + 1] % 2 == 1) {
10091 WidenedMask.push_back(Mask[i + 1] / 2);
10094 if (Mask[i + 1] == SM_SentinelUndef && Mask[i] >= 0 && Mask[i] % 2 == 0) {
10095 WidenedMask.push_back(Mask[i] / 2);
10099 // When zeroing, we need to spread the zeroing across both lanes to widen.
10100 if (Mask[i] == SM_SentinelZero || Mask[i + 1] == SM_SentinelZero) {
10101 if ((Mask[i] == SM_SentinelZero || Mask[i] == SM_SentinelUndef) &&
10102 (Mask[i + 1] == SM_SentinelZero || Mask[i + 1] == SM_SentinelUndef)) {
10103 WidenedMask.push_back(SM_SentinelZero);
10109 // Finally check if the two mask values are adjacent and aligned with
10111 if (Mask[i] != SM_SentinelUndef && Mask[i] % 2 == 0 && Mask[i] + 1 == Mask[i + 1]) {
10112 WidenedMask.push_back(Mask[i] / 2);
10116 // Otherwise we can't safely widen the elements used in this shuffle.
10119 assert(WidenedMask.size() == Mask.size() / 2 &&
10120 "Incorrect size of mask after widening the elements!");
10125 /// \brief Generic routine to split vector shuffle into half-sized shuffles.
10127 /// This routine just extracts two subvectors, shuffles them independently, and
10128 /// then concatenates them back together. This should work effectively with all
10129 /// AVX vector shuffle types.
10130 static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10131 SDValue V2, ArrayRef<int> Mask,
10132 SelectionDAG &DAG) {
10133 assert(VT.getSizeInBits() >= 256 &&
10134 "Only for 256-bit or wider vector shuffles!");
10135 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
10136 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
10138 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
10139 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
10141 int NumElements = VT.getVectorNumElements();
10142 int SplitNumElements = NumElements / 2;
10143 MVT ScalarVT = VT.getScalarType();
10144 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
10146 // Rather than splitting build-vectors, just build two narrower build
10147 // vectors. This helps shuffling with splats and zeros.
10148 auto SplitVector = [&](SDValue V) {
10149 while (V.getOpcode() == ISD::BITCAST)
10150 V = V->getOperand(0);
10152 MVT OrigVT = V.getSimpleValueType();
10153 int OrigNumElements = OrigVT.getVectorNumElements();
10154 int OrigSplitNumElements = OrigNumElements / 2;
10155 MVT OrigScalarVT = OrigVT.getScalarType();
10156 MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
10160 auto *BV = dyn_cast<BuildVectorSDNode>(V);
10162 LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10163 DAG.getIntPtrConstant(0));
10164 HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10165 DAG.getIntPtrConstant(OrigSplitNumElements));
10168 SmallVector<SDValue, 16> LoOps, HiOps;
10169 for (int i = 0; i < OrigSplitNumElements; ++i) {
10170 LoOps.push_back(BV->getOperand(i));
10171 HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
10173 LoV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, LoOps);
10174 HiV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, HiOps);
10176 return std::make_pair(DAG.getNode(ISD::BITCAST, DL, SplitVT, LoV),
10177 DAG.getNode(ISD::BITCAST, DL, SplitVT, HiV));
10180 SDValue LoV1, HiV1, LoV2, HiV2;
10181 std::tie(LoV1, HiV1) = SplitVector(V1);
10182 std::tie(LoV2, HiV2) = SplitVector(V2);
10184 // Now create two 4-way blends of these half-width vectors.
10185 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
10186 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
10187 SmallVector<int, 32> V1BlendMask, V2BlendMask, BlendMask;
10188 for (int i = 0; i < SplitNumElements; ++i) {
10189 int M = HalfMask[i];
10190 if (M >= NumElements) {
10191 if (M >= NumElements + SplitNumElements)
10195 V2BlendMask.push_back(M - NumElements);
10196 V1BlendMask.push_back(-1);
10197 BlendMask.push_back(SplitNumElements + i);
10198 } else if (M >= 0) {
10199 if (M >= SplitNumElements)
10203 V2BlendMask.push_back(-1);
10204 V1BlendMask.push_back(M);
10205 BlendMask.push_back(i);
10207 V2BlendMask.push_back(-1);
10208 V1BlendMask.push_back(-1);
10209 BlendMask.push_back(-1);
10213 // Because the lowering happens after all combining takes place, we need to
10214 // manually combine these blend masks as much as possible so that we create
10215 // a minimal number of high-level vector shuffle nodes.
10217 // First try just blending the halves of V1 or V2.
10218 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
10219 return DAG.getUNDEF(SplitVT);
10220 if (!UseLoV2 && !UseHiV2)
10221 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10222 if (!UseLoV1 && !UseHiV1)
10223 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10225 SDValue V1Blend, V2Blend;
10226 if (UseLoV1 && UseHiV1) {
10228 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10230 // We only use half of V1 so map the usage down into the final blend mask.
10231 V1Blend = UseLoV1 ? LoV1 : HiV1;
10232 for (int i = 0; i < SplitNumElements; ++i)
10233 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
10234 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
10236 if (UseLoV2 && UseHiV2) {
10238 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10240 // We only use half of V2 so map the usage down into the final blend mask.
10241 V2Blend = UseLoV2 ? LoV2 : HiV2;
10242 for (int i = 0; i < SplitNumElements; ++i)
10243 if (BlendMask[i] >= SplitNumElements)
10244 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
10246 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
10248 SDValue Lo = HalfBlend(LoMask);
10249 SDValue Hi = HalfBlend(HiMask);
10250 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
10253 /// \brief Either split a vector in halves or decompose the shuffles and the
10256 /// This is provided as a good fallback for many lowerings of non-single-input
10257 /// shuffles with more than one 128-bit lane. In those cases, we want to select
10258 /// between splitting the shuffle into 128-bit components and stitching those
10259 /// back together vs. extracting the single-input shuffles and blending those
10261 static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
10262 SDValue V2, ArrayRef<int> Mask,
10263 SelectionDAG &DAG) {
10264 assert(!isSingleInputShuffleMask(Mask) && "This routine must not be used to "
10265 "lower single-input shuffles as it "
10266 "could then recurse on itself.");
10267 int Size = Mask.size();
10269 // If this can be modeled as a broadcast of two elements followed by a blend,
10270 // prefer that lowering. This is especially important because broadcasts can
10271 // often fold with memory operands.
10272 auto DoBothBroadcast = [&] {
10273 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
10276 if (V2BroadcastIdx == -1)
10277 V2BroadcastIdx = M - Size;
10278 else if (M - Size != V2BroadcastIdx)
10280 } else if (M >= 0) {
10281 if (V1BroadcastIdx == -1)
10282 V1BroadcastIdx = M;
10283 else if (M != V1BroadcastIdx)
10288 if (DoBothBroadcast())
10289 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
10292 // If the inputs all stem from a single 128-bit lane of each input, then we
10293 // split them rather than blending because the split will decompose to
10294 // unusually few instructions.
10295 int LaneCount = VT.getSizeInBits() / 128;
10296 int LaneSize = Size / LaneCount;
10297 SmallBitVector LaneInputs[2];
10298 LaneInputs[0].resize(LaneCount, false);
10299 LaneInputs[1].resize(LaneCount, false);
10300 for (int i = 0; i < Size; ++i)
10302 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
10303 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
10304 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10306 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
10307 // that the decomposed single-input shuffles don't end up here.
10308 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10311 /// \brief Lower a vector shuffle crossing multiple 128-bit lanes as
10312 /// a permutation and blend of those lanes.
10314 /// This essentially blends the out-of-lane inputs to each lane into the lane
10315 /// from a permuted copy of the vector. This lowering strategy results in four
10316 /// instructions in the worst case for a single-input cross lane shuffle which
10317 /// is lower than any other fully general cross-lane shuffle strategy I'm aware
10318 /// of. Special cases for each particular shuffle pattern should be handled
10319 /// prior to trying this lowering.
10320 static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT,
10321 SDValue V1, SDValue V2,
10322 ArrayRef<int> Mask,
10323 SelectionDAG &DAG) {
10324 // FIXME: This should probably be generalized for 512-bit vectors as well.
10325 assert(VT.getSizeInBits() == 256 && "Only for 256-bit vector shuffles!");
10326 int LaneSize = Mask.size() / 2;
10328 // If there are only inputs from one 128-bit lane, splitting will in fact be
10329 // less expensive. The flags track wether the given lane contains an element
10330 // that crosses to another lane.
10331 bool LaneCrossing[2] = {false, false};
10332 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10333 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10334 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
10335 if (!LaneCrossing[0] || !LaneCrossing[1])
10336 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10338 if (isSingleInputShuffleMask(Mask)) {
10339 SmallVector<int, 32> FlippedBlendMask;
10340 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10341 FlippedBlendMask.push_back(
10342 Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
10344 : Mask[i] % LaneSize +
10345 (i / LaneSize) * LaneSize + Size));
10347 // Flip the vector, and blend the results which should now be in-lane. The
10348 // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
10349 // 5 for the high source. The value 3 selects the high half of source 2 and
10350 // the value 2 selects the low half of source 2. We only use source 2 to
10351 // allow folding it into a memory operand.
10352 unsigned PERMMask = 3 | 2 << 4;
10353 SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
10354 V1, DAG.getConstant(PERMMask, MVT::i8));
10355 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
10358 // This now reduces to two single-input shuffles of V1 and V2 which at worst
10359 // will be handled by the above logic and a blend of the results, much like
10360 // other patterns in AVX.
10361 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10364 /// \brief Handle lowering 2-lane 128-bit shuffles.
10365 static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10366 SDValue V2, ArrayRef<int> Mask,
10367 const X86Subtarget *Subtarget,
10368 SelectionDAG &DAG) {
10369 // Blends are faster and handle all the non-lane-crossing cases.
10370 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask,
10374 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
10375 VT.getVectorNumElements() / 2);
10376 // Check for patterns which can be matched with a single insert of a 128-bit
10378 if (isShuffleEquivalent(Mask, 0, 1, 0, 1) ||
10379 isShuffleEquivalent(Mask, 0, 1, 4, 5)) {
10380 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10381 DAG.getIntPtrConstant(0));
10382 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
10383 Mask[2] < 4 ? V1 : V2, DAG.getIntPtrConstant(0));
10384 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10386 if (isShuffleEquivalent(Mask, 0, 1, 6, 7)) {
10387 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10388 DAG.getIntPtrConstant(0));
10389 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
10390 DAG.getIntPtrConstant(2));
10391 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10394 // Otherwise form a 128-bit permutation.
10395 // FIXME: Detect zero-vector inputs and use the VPERM2X128 to zero that half.
10396 unsigned PermMask = Mask[0] / 2 | (Mask[2] / 2) << 4;
10397 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
10398 DAG.getConstant(PermMask, MVT::i8));
10401 /// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
10402 /// shuffling each lane.
10404 /// This will only succeed when the result of fixing the 128-bit lanes results
10405 /// in a single-input non-lane-crossing shuffle with a repeating shuffle mask in
10406 /// each 128-bit lanes. This handles many cases where we can quickly blend away
10407 /// the lane crosses early and then use simpler shuffles within each lane.
10409 /// FIXME: It might be worthwhile at some point to support this without
10410 /// requiring the 128-bit lane-relative shuffles to be repeating, but currently
10411 /// in x86 only floating point has interesting non-repeating shuffles, and even
10412 /// those are still *marginally* more expensive.
10413 static SDValue lowerVectorShuffleByMerging128BitLanes(
10414 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
10415 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
10416 assert(!isSingleInputShuffleMask(Mask) &&
10417 "This is only useful with multiple inputs.");
10419 int Size = Mask.size();
10420 int LaneSize = 128 / VT.getScalarSizeInBits();
10421 int NumLanes = Size / LaneSize;
10422 assert(NumLanes > 1 && "Only handles 256-bit and wider shuffles.");
10424 // See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
10425 // check whether the in-128-bit lane shuffles share a repeating pattern.
10426 SmallVector<int, 4> Lanes;
10427 Lanes.resize(NumLanes, -1);
10428 SmallVector<int, 4> InLaneMask;
10429 InLaneMask.resize(LaneSize, -1);
10430 for (int i = 0; i < Size; ++i) {
10434 int j = i / LaneSize;
10436 if (Lanes[j] < 0) {
10437 // First entry we've seen for this lane.
10438 Lanes[j] = Mask[i] / LaneSize;
10439 } else if (Lanes[j] != Mask[i] / LaneSize) {
10440 // This doesn't match the lane selected previously!
10444 // Check that within each lane we have a consistent shuffle mask.
10445 int k = i % LaneSize;
10446 if (InLaneMask[k] < 0) {
10447 InLaneMask[k] = Mask[i] % LaneSize;
10448 } else if (InLaneMask[k] != Mask[i] % LaneSize) {
10449 // This doesn't fit a repeating in-lane mask.
10454 // First shuffle the lanes into place.
10455 MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
10456 VT.getSizeInBits() / 64);
10457 SmallVector<int, 8> LaneMask;
10458 LaneMask.resize(NumLanes * 2, -1);
10459 for (int i = 0; i < NumLanes; ++i)
10460 if (Lanes[i] >= 0) {
10461 LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
10462 LaneMask[2 * i + 1] = 2*Lanes[i] + 1;
10465 V1 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V1);
10466 V2 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V2);
10467 SDValue LaneShuffle = DAG.getVectorShuffle(LaneVT, DL, V1, V2, LaneMask);
10469 // Cast it back to the type we actually want.
10470 LaneShuffle = DAG.getNode(ISD::BITCAST, DL, VT, LaneShuffle);
10472 // Now do a simple shuffle that isn't lane crossing.
10473 SmallVector<int, 8> NewMask;
10474 NewMask.resize(Size, -1);
10475 for (int i = 0; i < Size; ++i)
10477 NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
10478 assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) &&
10479 "Must not introduce lane crosses at this point!");
10481 return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask);
10484 /// \brief Test whether the specified input (0 or 1) is in-place blended by the
10487 /// This returns true if the elements from a particular input are already in the
10488 /// slot required by the given mask and require no permutation.
10489 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
10490 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
10491 int Size = Mask.size();
10492 for (int i = 0; i < Size; ++i)
10493 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
10499 /// \brief Handle lowering of 4-lane 64-bit floating point shuffles.
10501 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
10502 /// isn't available.
10503 static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10504 const X86Subtarget *Subtarget,
10505 SelectionDAG &DAG) {
10507 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10508 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10509 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10510 ArrayRef<int> Mask = SVOp->getMask();
10511 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10513 SmallVector<int, 4> WidenedMask;
10514 if (canWidenShuffleElements(Mask, WidenedMask))
10515 return lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask, Subtarget,
10518 if (isSingleInputShuffleMask(Mask)) {
10519 // Check for being able to broadcast a single element.
10520 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f64, DL, V1,
10521 Mask, Subtarget, DAG))
10524 // Use low duplicate instructions for masks that match their pattern.
10525 if (isShuffleEquivalent(Mask, 0, 0, 2, 2))
10526 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
10528 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
10529 // Non-half-crossing single input shuffles can be lowerid with an
10530 // interleaved permutation.
10531 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
10532 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
10533 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
10534 DAG.getConstant(VPERMILPMask, MVT::i8));
10537 // With AVX2 we have direct support for this permutation.
10538 if (Subtarget->hasAVX2())
10539 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
10540 getV4X86ShuffleImm8ForMask(Mask, DAG));
10542 // Otherwise, fall back.
10543 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
10547 // X86 has dedicated unpack instructions that can handle specific blend
10548 // operations: UNPCKH and UNPCKL.
10549 if (isShuffleEquivalent(Mask, 0, 4, 2, 6))
10550 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V1, V2);
10551 if (isShuffleEquivalent(Mask, 1, 5, 3, 7))
10552 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V1, V2);
10554 // If we have a single input to the zero element, insert that into V1 if we
10555 // can do so cheaply.
10556 int NumV2Elements =
10557 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
10558 if (NumV2Elements == 1 && Mask[0] >= 4)
10559 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
10560 MVT::v4f64, DL, V1, V2, Mask, Subtarget, DAG))
10563 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
10567 // Check if the blend happens to exactly fit that of SHUFPD.
10568 if ((Mask[0] == -1 || Mask[0] < 2) &&
10569 (Mask[1] == -1 || (Mask[1] >= 4 && Mask[1] < 6)) &&
10570 (Mask[2] == -1 || (Mask[2] >= 2 && Mask[2] < 4)) &&
10571 (Mask[3] == -1 || Mask[3] >= 6)) {
10572 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 5) << 1) |
10573 ((Mask[2] == 3) << 2) | ((Mask[3] == 7) << 3);
10574 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V1, V2,
10575 DAG.getConstant(SHUFPDMask, MVT::i8));
10577 if ((Mask[0] == -1 || (Mask[0] >= 4 && Mask[0] < 6)) &&
10578 (Mask[1] == -1 || Mask[1] < 2) &&
10579 (Mask[2] == -1 || Mask[2] >= 6) &&
10580 (Mask[3] == -1 || (Mask[3] >= 2 && Mask[3] < 4))) {
10581 unsigned SHUFPDMask = (Mask[0] == 5) | ((Mask[1] == 1) << 1) |
10582 ((Mask[2] == 7) << 2) | ((Mask[3] == 3) << 3);
10583 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V2, V1,
10584 DAG.getConstant(SHUFPDMask, MVT::i8));
10587 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10588 // shuffle. However, if we have AVX2 and either inputs are already in place,
10589 // we will be able to shuffle even across lanes the other input in a single
10590 // instruction so skip this pattern.
10591 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10592 isShuffleMaskInputInPlace(1, Mask))))
10593 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10594 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
10597 // If we have AVX2 then we always want to lower with a blend because an v4 we
10598 // can fully permute the elements.
10599 if (Subtarget->hasAVX2())
10600 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
10603 // Otherwise fall back on generic lowering.
10604 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, DAG);
10607 /// \brief Handle lowering of 4-lane 64-bit integer shuffles.
10609 /// This routine is only called when we have AVX2 and thus a reasonable
10610 /// instruction set for v4i64 shuffling..
10611 static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10612 const X86Subtarget *Subtarget,
10613 SelectionDAG &DAG) {
10615 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10616 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10617 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10618 ArrayRef<int> Mask = SVOp->getMask();
10619 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10620 assert(Subtarget->hasAVX2() && "We can only lower v4i64 with AVX2!");
10622 SmallVector<int, 4> WidenedMask;
10623 if (canWidenShuffleElements(Mask, WidenedMask))
10624 return lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask, Subtarget,
10627 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
10631 // Check for being able to broadcast a single element.
10632 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i64, DL, V1,
10633 Mask, Subtarget, DAG))
10636 // When the shuffle is mirrored between the 128-bit lanes of the unit, we can
10637 // use lower latency instructions that will operate on both 128-bit lanes.
10638 SmallVector<int, 2> RepeatedMask;
10639 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
10640 if (isSingleInputShuffleMask(Mask)) {
10641 int PSHUFDMask[] = {-1, -1, -1, -1};
10642 for (int i = 0; i < 2; ++i)
10643 if (RepeatedMask[i] >= 0) {
10644 PSHUFDMask[2 * i] = 2 * RepeatedMask[i];
10645 PSHUFDMask[2 * i + 1] = 2 * RepeatedMask[i] + 1;
10647 return DAG.getNode(
10648 ISD::BITCAST, DL, MVT::v4i64,
10649 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
10650 DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, V1),
10651 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
10654 // Use dedicated unpack instructions for masks that match their pattern.
10655 if (isShuffleEquivalent(Mask, 0, 4, 2, 6))
10656 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V1, V2);
10657 if (isShuffleEquivalent(Mask, 1, 5, 3, 7))
10658 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V1, V2);
10661 // AVX2 provides a direct instruction for permuting a single input across
10663 if (isSingleInputShuffleMask(Mask))
10664 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
10665 getV4X86ShuffleImm8ForMask(Mask, DAG));
10667 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10668 // shuffle. However, if we have AVX2 and either inputs are already in place,
10669 // we will be able to shuffle even across lanes the other input in a single
10670 // instruction so skip this pattern.
10671 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10672 isShuffleMaskInputInPlace(1, Mask))))
10673 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10674 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
10677 // Otherwise fall back on generic blend lowering.
10678 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
10682 /// \brief Handle lowering of 8-lane 32-bit floating point shuffles.
10684 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
10685 /// isn't available.
10686 static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10687 const X86Subtarget *Subtarget,
10688 SelectionDAG &DAG) {
10690 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10691 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10692 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10693 ArrayRef<int> Mask = SVOp->getMask();
10694 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10696 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
10700 // Check for being able to broadcast a single element.
10701 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8f32, DL, V1,
10702 Mask, Subtarget, DAG))
10705 // If the shuffle mask is repeated in each 128-bit lane, we have many more
10706 // options to efficiently lower the shuffle.
10707 SmallVector<int, 4> RepeatedMask;
10708 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
10709 assert(RepeatedMask.size() == 4 &&
10710 "Repeated masks must be half the mask width!");
10712 // Use even/odd duplicate instructions for masks that match their pattern.
10713 if (isShuffleEquivalent(Mask, 0, 0, 2, 2, 4, 4, 6, 6))
10714 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
10715 if (isShuffleEquivalent(Mask, 1, 1, 3, 3, 5, 5, 7, 7))
10716 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
10718 if (isSingleInputShuffleMask(Mask))
10719 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
10720 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10722 // Use dedicated unpack instructions for masks that match their pattern.
10723 if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10724 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V1, V2);
10725 if (isShuffleEquivalent(Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10726 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V1, V2);
10728 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
10729 // have already handled any direct blends. We also need to squash the
10730 // repeated mask into a simulated v4f32 mask.
10731 for (int i = 0; i < 4; ++i)
10732 if (RepeatedMask[i] >= 8)
10733 RepeatedMask[i] -= 4;
10734 return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
10737 // If we have a single input shuffle with different shuffle patterns in the
10738 // two 128-bit lanes use the variable mask to VPERMILPS.
10739 if (isSingleInputShuffleMask(Mask)) {
10740 SDValue VPermMask[8];
10741 for (int i = 0; i < 8; ++i)
10742 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10743 : DAG.getConstant(Mask[i], MVT::i32);
10744 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
10745 return DAG.getNode(
10746 X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
10747 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask));
10749 if (Subtarget->hasAVX2())
10750 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32,
10751 DAG.getNode(ISD::BITCAST, DL, MVT::v8f32,
10752 DAG.getNode(ISD::BUILD_VECTOR, DL,
10753 MVT::v8i32, VPermMask)),
10756 // Otherwise, fall back.
10757 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
10761 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10763 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10764 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
10767 // If we have AVX2 then we always want to lower with a blend because at v8 we
10768 // can fully permute the elements.
10769 if (Subtarget->hasAVX2())
10770 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
10773 // Otherwise fall back on generic lowering.
10774 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, DAG);
10777 /// \brief Handle lowering of 8-lane 32-bit integer shuffles.
10779 /// This routine is only called when we have AVX2 and thus a reasonable
10780 /// instruction set for v8i32 shuffling..
10781 static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10782 const X86Subtarget *Subtarget,
10783 SelectionDAG &DAG) {
10785 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10786 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10787 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10788 ArrayRef<int> Mask = SVOp->getMask();
10789 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10790 assert(Subtarget->hasAVX2() && "We can only lower v8i32 with AVX2!");
10792 // Whenever we can lower this as a zext, that instruction is strictly faster
10793 // than any alternative. It also allows us to fold memory operands into the
10794 // shuffle in many cases.
10795 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2,
10796 Mask, Subtarget, DAG))
10799 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
10803 // Check for being able to broadcast a single element.
10804 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i32, DL, V1,
10805 Mask, Subtarget, DAG))
10808 // If the shuffle mask is repeated in each 128-bit lane we can use more
10809 // efficient instructions that mirror the shuffles across the two 128-bit
10811 SmallVector<int, 4> RepeatedMask;
10812 if (is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask)) {
10813 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
10814 if (isSingleInputShuffleMask(Mask))
10815 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
10816 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10818 // Use dedicated unpack instructions for masks that match their pattern.
10819 if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10820 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V1, V2);
10821 if (isShuffleEquivalent(Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10822 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V1, V2);
10825 // If the shuffle patterns aren't repeated but it is a single input, directly
10826 // generate a cross-lane VPERMD instruction.
10827 if (isSingleInputShuffleMask(Mask)) {
10828 SDValue VPermMask[8];
10829 for (int i = 0; i < 8; ++i)
10830 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10831 : DAG.getConstant(Mask[i], MVT::i32);
10832 return DAG.getNode(
10833 X86ISD::VPERMV, DL, MVT::v8i32,
10834 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
10837 // Try to use bit shift instructions.
10838 if (SDValue Shift = lowerVectorShuffleAsBitShift(
10839 DL, MVT::v8i32, V1, V2, Mask, DAG))
10842 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10844 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10845 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10848 // Otherwise fall back on generic blend lowering.
10849 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
10853 /// \brief Handle lowering of 16-lane 16-bit integer shuffles.
10855 /// This routine is only called when we have AVX2 and thus a reasonable
10856 /// instruction set for v16i16 shuffling..
10857 static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10858 const X86Subtarget *Subtarget,
10859 SelectionDAG &DAG) {
10861 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10862 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10863 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10864 ArrayRef<int> Mask = SVOp->getMask();
10865 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10866 assert(Subtarget->hasAVX2() && "We can only lower v16i16 with AVX2!");
10868 // Whenever we can lower this as a zext, that instruction is strictly faster
10869 // than any alternative. It also allows us to fold memory operands into the
10870 // shuffle in many cases.
10871 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v16i16, V1, V2,
10872 Mask, Subtarget, DAG))
10875 // Check for being able to broadcast a single element.
10876 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i16, DL, V1,
10877 Mask, Subtarget, DAG))
10880 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
10884 // Use dedicated unpack instructions for masks that match their pattern.
10885 if (isShuffleEquivalent(Mask,
10886 // First 128-bit lane:
10887 0, 16, 1, 17, 2, 18, 3, 19,
10888 // Second 128-bit lane:
10889 8, 24, 9, 25, 10, 26, 11, 27))
10890 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i16, V1, V2);
10891 if (isShuffleEquivalent(Mask,
10892 // First 128-bit lane:
10893 4, 20, 5, 21, 6, 22, 7, 23,
10894 // Second 128-bit lane:
10895 12, 28, 13, 29, 14, 30, 15, 31))
10896 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i16, V1, V2);
10898 if (isSingleInputShuffleMask(Mask)) {
10899 // There are no generalized cross-lane shuffle operations available on i16
10901 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
10902 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
10905 SDValue PSHUFBMask[32];
10906 for (int i = 0; i < 16; ++i) {
10907 if (Mask[i] == -1) {
10908 PSHUFBMask[2 * i] = PSHUFBMask[2 * i + 1] = DAG.getUNDEF(MVT::i8);
10912 int M = i < 8 ? Mask[i] : Mask[i] - 8;
10913 assert(M >= 0 && M < 8 && "Invalid single-input mask!");
10914 PSHUFBMask[2 * i] = DAG.getConstant(2 * M, MVT::i8);
10915 PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, MVT::i8);
10917 return DAG.getNode(
10918 ISD::BITCAST, DL, MVT::v16i16,
10920 X86ISD::PSHUFB, DL, MVT::v32i8,
10921 DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1),
10922 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask)));
10925 // Try to use bit shift instructions.
10926 if (SDValue Shift = lowerVectorShuffleAsBitShift(
10927 DL, MVT::v16i16, V1, V2, Mask, DAG))
10930 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10932 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10933 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
10936 // Otherwise fall back on generic lowering.
10937 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG);
10940 /// \brief Handle lowering of 32-lane 8-bit integer shuffles.
10942 /// This routine is only called when we have AVX2 and thus a reasonable
10943 /// instruction set for v32i8 shuffling..
10944 static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10945 const X86Subtarget *Subtarget,
10946 SelectionDAG &DAG) {
10948 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10949 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10950 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10951 ArrayRef<int> Mask = SVOp->getMask();
10952 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
10953 assert(Subtarget->hasAVX2() && "We can only lower v32i8 with AVX2!");
10955 // Whenever we can lower this as a zext, that instruction is strictly faster
10956 // than any alternative. It also allows us to fold memory operands into the
10957 // shuffle in many cases.
10958 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2,
10959 Mask, Subtarget, DAG))
10962 // Check for being able to broadcast a single element.
10963 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v32i8, DL, V1,
10964 Mask, Subtarget, DAG))
10967 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
10971 // Use dedicated unpack instructions for masks that match their pattern.
10972 // Note that these are repeated 128-bit lane unpacks, not unpacks across all
10974 if (isShuffleEquivalent(
10976 // First 128-bit lane:
10977 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39,
10978 // Second 128-bit lane:
10979 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55))
10980 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v32i8, V1, V2);
10981 if (isShuffleEquivalent(
10983 // First 128-bit lane:
10984 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47,
10985 // Second 128-bit lane:
10986 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63))
10987 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v32i8, V1, V2);
10989 if (isSingleInputShuffleMask(Mask)) {
10990 // There are no generalized cross-lane shuffle operations available on i8
10992 if (is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
10993 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2,
10996 SDValue PSHUFBMask[32];
10997 for (int i = 0; i < 32; ++i)
11000 ? DAG.getUNDEF(MVT::i8)
11001 : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, MVT::i8);
11003 return DAG.getNode(
11004 X86ISD::PSHUFB, DL, MVT::v32i8, V1,
11005 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask));
11008 // Try to use bit shift instructions.
11009 if (SDValue Shift = lowerVectorShuffleAsBitShift(
11010 DL, MVT::v32i8, V1, V2, Mask, DAG))
11013 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11015 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11016 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
11019 // Otherwise fall back on generic lowering.
11020 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG);
11023 /// \brief High-level routine to lower various 256-bit x86 vector shuffles.
11025 /// This routine either breaks down the specific type of a 256-bit x86 vector
11026 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
11027 /// together based on the available instructions.
11028 static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11029 MVT VT, const X86Subtarget *Subtarget,
11030 SelectionDAG &DAG) {
11032 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11033 ArrayRef<int> Mask = SVOp->getMask();
11035 // There is a really nice hard cut-over between AVX1 and AVX2 that means we can
11036 // check for those subtargets here and avoid much of the subtarget querying in
11037 // the per-vector-type lowering routines. With AVX1 we have essentially *zero*
11038 // ability to manipulate a 256-bit vector with integer types. Since we'll use
11039 // floating point types there eventually, just immediately cast everything to
11040 // a float and operate entirely in that domain.
11041 if (VT.isInteger() && !Subtarget->hasAVX2()) {
11042 int ElementBits = VT.getScalarSizeInBits();
11043 if (ElementBits < 32)
11044 // No floating point type available, decompose into 128-bit vectors.
11045 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11047 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
11048 VT.getVectorNumElements());
11049 V1 = DAG.getNode(ISD::BITCAST, DL, FpVT, V1);
11050 V2 = DAG.getNode(ISD::BITCAST, DL, FpVT, V2);
11051 return DAG.getNode(ISD::BITCAST, DL, VT,
11052 DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
11055 switch (VT.SimpleTy) {
11057 return lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11059 return lowerV4I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11061 return lowerV8F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11063 return lowerV8I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11065 return lowerV16I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11067 return lowerV32I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11070 llvm_unreachable("Not a valid 256-bit x86 vector type!");
11074 /// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
11075 static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11076 const X86Subtarget *Subtarget,
11077 SelectionDAG &DAG) {
11079 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11080 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11081 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11082 ArrayRef<int> Mask = SVOp->getMask();
11083 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11085 // X86 has dedicated unpack instructions that can handle specific blend
11086 // operations: UNPCKH and UNPCKL.
11087 if (isShuffleEquivalent(Mask, 0, 8, 2, 10, 4, 12, 6, 14))
11088 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f64, V1, V2);
11089 if (isShuffleEquivalent(Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11090 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f64, V1, V2);
11092 // FIXME: Implement direct support for this type!
11093 return splitAndLowerVectorShuffle(DL, MVT::v8f64, V1, V2, Mask, DAG);
11096 /// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
11097 static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11098 const X86Subtarget *Subtarget,
11099 SelectionDAG &DAG) {
11101 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11102 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11103 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11104 ArrayRef<int> Mask = SVOp->getMask();
11105 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11107 // Use dedicated unpack instructions for masks that match their pattern.
11108 if (isShuffleEquivalent(Mask,
11109 0, 16, 1, 17, 4, 20, 5, 21,
11110 8, 24, 9, 25, 12, 28, 13, 29))
11111 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16f32, V1, V2);
11112 if (isShuffleEquivalent(Mask,
11113 2, 18, 3, 19, 6, 22, 7, 23,
11114 10, 26, 11, 27, 14, 30, 15, 31))
11115 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16f32, V1, V2);
11117 // FIXME: Implement direct support for this type!
11118 return splitAndLowerVectorShuffle(DL, MVT::v16f32, V1, V2, Mask, DAG);
11121 /// \brief Handle lowering of 8-lane 64-bit integer shuffles.
11122 static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11123 const X86Subtarget *Subtarget,
11124 SelectionDAG &DAG) {
11126 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11127 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11128 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11129 ArrayRef<int> Mask = SVOp->getMask();
11130 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11132 // X86 has dedicated unpack instructions that can handle specific blend
11133 // operations: UNPCKH and UNPCKL.
11134 if (isShuffleEquivalent(Mask, 0, 8, 2, 10, 4, 12, 6, 14))
11135 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i64, V1, V2);
11136 if (isShuffleEquivalent(Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11137 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i64, V1, V2);
11139 // FIXME: Implement direct support for this type!
11140 return splitAndLowerVectorShuffle(DL, MVT::v8i64, V1, V2, Mask, DAG);
11143 /// \brief Handle lowering of 16-lane 32-bit integer shuffles.
11144 static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11145 const X86Subtarget *Subtarget,
11146 SelectionDAG &DAG) {
11148 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11149 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11150 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11151 ArrayRef<int> Mask = SVOp->getMask();
11152 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11154 // Use dedicated unpack instructions for masks that match their pattern.
11155 if (isShuffleEquivalent(Mask,
11156 0, 16, 1, 17, 4, 20, 5, 21,
11157 8, 24, 9, 25, 12, 28, 13, 29))
11158 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i32, V1, V2);
11159 if (isShuffleEquivalent(Mask,
11160 2, 18, 3, 19, 6, 22, 7, 23,
11161 10, 26, 11, 27, 14, 30, 15, 31))
11162 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i32, V1, V2);
11164 // FIXME: Implement direct support for this type!
11165 return splitAndLowerVectorShuffle(DL, MVT::v16i32, V1, V2, Mask, DAG);
11168 /// \brief Handle lowering of 32-lane 16-bit integer shuffles.
11169 static SDValue lowerV32I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11170 const X86Subtarget *Subtarget,
11171 SelectionDAG &DAG) {
11173 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11174 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11175 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11176 ArrayRef<int> Mask = SVOp->getMask();
11177 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
11178 assert(Subtarget->hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
11180 // FIXME: Implement direct support for this type!
11181 return splitAndLowerVectorShuffle(DL, MVT::v32i16, V1, V2, Mask, DAG);
11184 /// \brief Handle lowering of 64-lane 8-bit integer shuffles.
11185 static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11186 const X86Subtarget *Subtarget,
11187 SelectionDAG &DAG) {
11189 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11190 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11191 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11192 ArrayRef<int> Mask = SVOp->getMask();
11193 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
11194 assert(Subtarget->hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
11196 // FIXME: Implement direct support for this type!
11197 return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
11200 /// \brief High-level routine to lower various 512-bit x86 vector shuffles.
11202 /// This routine either breaks down the specific type of a 512-bit x86 vector
11203 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
11204 /// together based on the available instructions.
11205 static SDValue lower512BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11206 MVT VT, const X86Subtarget *Subtarget,
11207 SelectionDAG &DAG) {
11209 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11210 ArrayRef<int> Mask = SVOp->getMask();
11211 assert(Subtarget->hasAVX512() &&
11212 "Cannot lower 512-bit vectors w/ basic ISA!");
11214 // Check for being able to broadcast a single element.
11215 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(VT.SimpleTy, DL, V1,
11216 Mask, Subtarget, DAG))
11219 // Dispatch to each element type for lowering. If we don't have supprot for
11220 // specific element type shuffles at 512 bits, immediately split them and
11221 // lower them. Each lowering routine of a given type is allowed to assume that
11222 // the requisite ISA extensions for that element type are available.
11223 switch (VT.SimpleTy) {
11225 return lowerV8F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11227 return lowerV16F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11229 return lowerV8I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11231 return lowerV16I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11233 if (Subtarget->hasBWI())
11234 return lowerV32I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11237 if (Subtarget->hasBWI())
11238 return lowerV64I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11242 llvm_unreachable("Not a valid 512-bit x86 vector type!");
11245 // Otherwise fall back on splitting.
11246 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11249 /// \brief Top-level lowering for x86 vector shuffles.
11251 /// This handles decomposition, canonicalization, and lowering of all x86
11252 /// vector shuffles. Most of the specific lowering strategies are encapsulated
11253 /// above in helper routines. The canonicalization attempts to widen shuffles
11254 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
11255 /// s.t. only one of the two inputs needs to be tested, etc.
11256 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
11257 SelectionDAG &DAG) {
11258 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11259 ArrayRef<int> Mask = SVOp->getMask();
11260 SDValue V1 = Op.getOperand(0);
11261 SDValue V2 = Op.getOperand(1);
11262 MVT VT = Op.getSimpleValueType();
11263 int NumElements = VT.getVectorNumElements();
11266 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
11268 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
11269 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11270 if (V1IsUndef && V2IsUndef)
11271 return DAG.getUNDEF(VT);
11273 // When we create a shuffle node we put the UNDEF node to second operand,
11274 // but in some cases the first operand may be transformed to UNDEF.
11275 // In this case we should just commute the node.
11277 return DAG.getCommutedVectorShuffle(*SVOp);
11279 // Check for non-undef masks pointing at an undef vector and make the masks
11280 // undef as well. This makes it easier to match the shuffle based solely on
11284 if (M >= NumElements) {
11285 SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
11286 for (int &M : NewMask)
11287 if (M >= NumElements)
11289 return DAG.getVectorShuffle(VT, dl, V1, V2, NewMask);
11292 // We actually see shuffles that are entirely re-arrangements of a set of
11293 // zero inputs. This mostly happens while decomposing complex shuffles into
11294 // simple ones. Directly lower these as a buildvector of zeros.
11295 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
11296 if (Zeroable.all())
11297 return getZeroVector(VT, Subtarget, DAG, dl);
11299 // Try to collapse shuffles into using a vector type with fewer elements but
11300 // wider element types. We cap this to not form integers or floating point
11301 // elements wider than 64 bits, but it might be interesting to form i128
11302 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
11303 SmallVector<int, 16> WidenedMask;
11304 if (VT.getScalarSizeInBits() < 64 &&
11305 canWidenShuffleElements(Mask, WidenedMask)) {
11306 MVT NewEltVT = VT.isFloatingPoint()
11307 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
11308 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
11309 MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
11310 // Make sure that the new vector type is legal. For example, v2f64 isn't
11312 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
11313 V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
11314 V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
11315 return DAG.getNode(ISD::BITCAST, dl, VT,
11316 DAG.getVectorShuffle(NewVT, dl, V1, V2, WidenedMask));
11320 int NumV1Elements = 0, NumUndefElements = 0, NumV2Elements = 0;
11321 for (int M : SVOp->getMask())
11323 ++NumUndefElements;
11324 else if (M < NumElements)
11329 // Commute the shuffle as needed such that more elements come from V1 than
11330 // V2. This allows us to match the shuffle pattern strictly on how many
11331 // elements come from V1 without handling the symmetric cases.
11332 if (NumV2Elements > NumV1Elements)
11333 return DAG.getCommutedVectorShuffle(*SVOp);
11335 // When the number of V1 and V2 elements are the same, try to minimize the
11336 // number of uses of V2 in the low half of the vector. When that is tied,
11337 // ensure that the sum of indices for V1 is equal to or lower than the sum
11338 // indices for V2. When those are equal, try to ensure that the number of odd
11339 // indices for V1 is lower than the number of odd indices for V2.
11340 if (NumV1Elements == NumV2Elements) {
11341 int LowV1Elements = 0, LowV2Elements = 0;
11342 for (int M : SVOp->getMask().slice(0, NumElements / 2))
11343 if (M >= NumElements)
11347 if (LowV2Elements > LowV1Elements) {
11348 return DAG.getCommutedVectorShuffle(*SVOp);
11349 } else if (LowV2Elements == LowV1Elements) {
11350 int SumV1Indices = 0, SumV2Indices = 0;
11351 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11352 if (SVOp->getMask()[i] >= NumElements)
11354 else if (SVOp->getMask()[i] >= 0)
11356 if (SumV2Indices < SumV1Indices) {
11357 return DAG.getCommutedVectorShuffle(*SVOp);
11358 } else if (SumV2Indices == SumV1Indices) {
11359 int NumV1OddIndices = 0, NumV2OddIndices = 0;
11360 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11361 if (SVOp->getMask()[i] >= NumElements)
11362 NumV2OddIndices += i % 2;
11363 else if (SVOp->getMask()[i] >= 0)
11364 NumV1OddIndices += i % 2;
11365 if (NumV2OddIndices < NumV1OddIndices)
11366 return DAG.getCommutedVectorShuffle(*SVOp);
11371 // For each vector width, delegate to a specialized lowering routine.
11372 if (VT.getSizeInBits() == 128)
11373 return lower128BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11375 if (VT.getSizeInBits() == 256)
11376 return lower256BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11378 // Force AVX-512 vectors to be scalarized for now.
11379 // FIXME: Implement AVX-512 support!
11380 if (VT.getSizeInBits() == 512)
11381 return lower512BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11383 llvm_unreachable("Unimplemented!");
11387 //===----------------------------------------------------------------------===//
11388 // Legacy vector shuffle lowering
11390 // This code is the legacy code handling vector shuffles until the above
11391 // replaces its functionality and performance.
11392 //===----------------------------------------------------------------------===//
11394 static bool isBlendMask(ArrayRef<int> MaskVals, MVT VT, bool hasSSE41,
11395 bool hasInt256, unsigned *MaskOut = nullptr) {
11396 MVT EltVT = VT.getVectorElementType();
11398 // There is no blend with immediate in AVX-512.
11399 if (VT.is512BitVector())
11402 if (!hasSSE41 || EltVT == MVT::i8)
11404 if (!hasInt256 && VT == MVT::v16i16)
11407 unsigned MaskValue = 0;
11408 unsigned NumElems = VT.getVectorNumElements();
11409 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
11410 unsigned NumLanes = (NumElems - 1) / 8 + 1;
11411 unsigned NumElemsInLane = NumElems / NumLanes;
11413 // Blend for v16i16 should be symmetric for both lanes.
11414 for (unsigned i = 0; i < NumElemsInLane; ++i) {
11416 int SndLaneEltIdx = (NumLanes == 2) ? MaskVals[i + NumElemsInLane] : -1;
11417 int EltIdx = MaskVals[i];
11419 if ((EltIdx < 0 || EltIdx == (int)i) &&
11420 (SndLaneEltIdx < 0 || SndLaneEltIdx == (int)(i + NumElemsInLane)))
11423 if (((unsigned)EltIdx == (i + NumElems)) &&
11424 (SndLaneEltIdx < 0 ||
11425 (unsigned)SndLaneEltIdx == i + NumElems + NumElemsInLane))
11426 MaskValue |= (1 << i);
11432 *MaskOut = MaskValue;
11436 // Try to lower a shuffle node into a simple blend instruction.
11437 // This function assumes isBlendMask returns true for this
11438 // SuffleVectorSDNode
11439 static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
11440 unsigned MaskValue,
11441 const X86Subtarget *Subtarget,
11442 SelectionDAG &DAG) {
11443 MVT VT = SVOp->getSimpleValueType(0);
11444 MVT EltVT = VT.getVectorElementType();
11445 assert(isBlendMask(SVOp->getMask(), VT, Subtarget->hasSSE41(),
11446 Subtarget->hasInt256() && "Trying to lower a "
11447 "VECTOR_SHUFFLE to a Blend but "
11448 "with the wrong mask"));
11449 SDValue V1 = SVOp->getOperand(0);
11450 SDValue V2 = SVOp->getOperand(1);
11452 unsigned NumElems = VT.getVectorNumElements();
11454 // Convert i32 vectors to floating point if it is not AVX2.
11455 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
11457 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
11458 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
11460 V1 = DAG.getNode(ISD::BITCAST, dl, VT, V1);
11461 V2 = DAG.getNode(ISD::BITCAST, dl, VT, V2);
11464 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, V1, V2,
11465 DAG.getConstant(MaskValue, MVT::i32));
11466 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
11469 /// In vector type \p VT, return true if the element at index \p InputIdx
11470 /// falls on a different 128-bit lane than \p OutputIdx.
11471 static bool ShuffleCrosses128bitLane(MVT VT, unsigned InputIdx,
11472 unsigned OutputIdx) {
11473 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
11474 return InputIdx * EltSize / 128 != OutputIdx * EltSize / 128;
11477 /// Generate a PSHUFB if possible. Selects elements from \p V1 according to
11478 /// \p MaskVals. MaskVals[OutputIdx] = InputIdx specifies that we want to
11479 /// shuffle the element at InputIdx in V1 to OutputIdx in the result. If \p
11480 /// MaskVals refers to elements outside of \p V1 or is undef (-1), insert a
11482 static SDValue getPSHUFB(ArrayRef<int> MaskVals, SDValue V1, SDLoc &dl,
11483 SelectionDAG &DAG) {
11484 MVT VT = V1.getSimpleValueType();
11485 assert(VT.is128BitVector() || VT.is256BitVector());
11487 MVT EltVT = VT.getVectorElementType();
11488 unsigned EltSizeInBytes = EltVT.getSizeInBits() / 8;
11489 unsigned NumElts = VT.getVectorNumElements();
11491 SmallVector<SDValue, 32> PshufbMask;
11492 for (unsigned OutputIdx = 0; OutputIdx < NumElts; ++OutputIdx) {
11493 int InputIdx = MaskVals[OutputIdx];
11494 unsigned InputByteIdx;
11496 if (InputIdx < 0 || NumElts <= (unsigned)InputIdx)
11497 InputByteIdx = 0x80;
11499 // Cross lane is not allowed.
11500 if (ShuffleCrosses128bitLane(VT, InputIdx, OutputIdx))
11502 InputByteIdx = InputIdx * EltSizeInBytes;
11503 // Index is an byte offset within the 128-bit lane.
11504 InputByteIdx &= 0xf;
11507 for (unsigned j = 0; j < EltSizeInBytes; ++j) {
11508 PshufbMask.push_back(DAG.getConstant(InputByteIdx, MVT::i8));
11509 if (InputByteIdx != 0x80)
11514 MVT ShufVT = MVT::getVectorVT(MVT::i8, PshufbMask.size());
11516 V1 = DAG.getNode(ISD::BITCAST, dl, ShufVT, V1);
11517 return DAG.getNode(X86ISD::PSHUFB, dl, ShufVT, V1,
11518 DAG.getNode(ISD::BUILD_VECTOR, dl, ShufVT, PshufbMask));
11521 // v8i16 shuffles - Prefer shuffles in the following order:
11522 // 1. [all] pshuflw, pshufhw, optional move
11523 // 2. [ssse3] 1 x pshufb
11524 // 3. [ssse3] 2 x pshufb + 1 x por
11525 // 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw)
11527 LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
11528 SelectionDAG &DAG) {
11529 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11530 SDValue V1 = SVOp->getOperand(0);
11531 SDValue V2 = SVOp->getOperand(1);
11533 SmallVector<int, 8> MaskVals;
11535 // Determine if more than 1 of the words in each of the low and high quadwords
11536 // of the result come from the same quadword of one of the two inputs. Undef
11537 // mask values count as coming from any quadword, for better codegen.
11539 // Lo/HiQuad[i] = j indicates how many words from the ith quad of the input
11540 // feeds this quad. For i, 0 and 1 refer to V1, 2 and 3 refer to V2.
11541 unsigned LoQuad[] = { 0, 0, 0, 0 };
11542 unsigned HiQuad[] = { 0, 0, 0, 0 };
11543 // Indices of quads used.
11544 std::bitset<4> InputQuads;
11545 for (unsigned i = 0; i < 8; ++i) {
11546 unsigned *Quad = i < 4 ? LoQuad : HiQuad;
11547 int EltIdx = SVOp->getMaskElt(i);
11548 MaskVals.push_back(EltIdx);
11556 ++Quad[EltIdx / 4];
11557 InputQuads.set(EltIdx / 4);
11560 int BestLoQuad = -1;
11561 unsigned MaxQuad = 1;
11562 for (unsigned i = 0; i < 4; ++i) {
11563 if (LoQuad[i] > MaxQuad) {
11565 MaxQuad = LoQuad[i];
11569 int BestHiQuad = -1;
11571 for (unsigned i = 0; i < 4; ++i) {
11572 if (HiQuad[i] > MaxQuad) {
11574 MaxQuad = HiQuad[i];
11578 // For SSSE3, If all 8 words of the result come from only 1 quadword of each
11579 // of the two input vectors, shuffle them into one input vector so only a
11580 // single pshufb instruction is necessary. If there are more than 2 input
11581 // quads, disable the next transformation since it does not help SSSE3.
11582 bool V1Used = InputQuads[0] || InputQuads[1];
11583 bool V2Used = InputQuads[2] || InputQuads[3];
11584 if (Subtarget->hasSSSE3()) {
11585 if (InputQuads.count() == 2 && V1Used && V2Used) {
11586 BestLoQuad = InputQuads[0] ? 0 : 1;
11587 BestHiQuad = InputQuads[2] ? 2 : 3;
11589 if (InputQuads.count() > 2) {
11595 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update
11596 // the shuffle mask. If a quad is scored as -1, that means that it contains
11597 // words from all 4 input quadwords.
11599 if (BestLoQuad >= 0 || BestHiQuad >= 0) {
11601 BestLoQuad < 0 ? 0 : BestLoQuad,
11602 BestHiQuad < 0 ? 1 : BestHiQuad
11604 NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
11605 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1),
11606 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]);
11607 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV);
11609 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
11610 // source words for the shuffle, to aid later transformations.
11611 bool AllWordsInNewV = true;
11612 bool InOrder[2] = { true, true };
11613 for (unsigned i = 0; i != 8; ++i) {
11614 int idx = MaskVals[i];
11616 InOrder[i/4] = false;
11617 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad)
11619 AllWordsInNewV = false;
11623 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV;
11624 if (AllWordsInNewV) {
11625 for (int i = 0; i != 8; ++i) {
11626 int idx = MaskVals[i];
11629 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4;
11630 if ((idx != i) && idx < 4)
11632 if ((idx != i) && idx > 3)
11641 // If we've eliminated the use of V2, and the new mask is a pshuflw or
11642 // pshufhw, that's as cheap as it gets. Return the new shuffle.
11643 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) {
11644 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW;
11645 unsigned TargetMask = 0;
11646 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV,
11647 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]);
11648 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11649 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp):
11650 getShufflePSHUFLWImmediate(SVOp);
11651 V1 = NewV.getOperand(0);
11652 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG);
11656 // Promote splats to a larger type which usually leads to more efficient code.
11657 // FIXME: Is this true if pshufb is available?
11658 if (SVOp->isSplat())
11659 return PromoteSplat(SVOp, DAG);
11661 // If we have SSSE3, and all words of the result are from 1 input vector,
11662 // case 2 is generated, otherwise case 3 is generated. If no SSSE3
11663 // is present, fall back to case 4.
11664 if (Subtarget->hasSSSE3()) {
11665 SmallVector<SDValue,16> pshufbMask;
11667 // If we have elements from both input vectors, set the high bit of the
11668 // shuffle mask element to zero out elements that come from V2 in the V1
11669 // mask, and elements that come from V1 in the V2 mask, so that the two
11670 // results can be OR'd together.
11671 bool TwoInputs = V1Used && V2Used;
11672 V1 = getPSHUFB(MaskVals, V1, dl, DAG);
11674 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11676 // Calculate the shuffle mask for the second input, shuffle it, and
11677 // OR it with the first shuffled input.
11678 CommuteVectorShuffleMask(MaskVals, 8);
11679 V2 = getPSHUFB(MaskVals, V2, dl, DAG);
11680 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11681 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11684 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
11685 // and update MaskVals with new element order.
11686 std::bitset<8> InOrder;
11687 if (BestLoQuad >= 0) {
11688 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 };
11689 for (int i = 0; i != 4; ++i) {
11690 int idx = MaskVals[i];
11693 } else if ((idx / 4) == BestLoQuad) {
11694 MaskV[i] = idx & 3;
11698 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11701 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11702 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11703 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16,
11704 NewV.getOperand(0),
11705 getShufflePSHUFLWImmediate(SVOp), DAG);
11709 // If BestHi >= 0, generate a pshufhw to put the high elements in order,
11710 // and update MaskVals with the new element order.
11711 if (BestHiQuad >= 0) {
11712 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 };
11713 for (unsigned i = 4; i != 8; ++i) {
11714 int idx = MaskVals[i];
11717 } else if ((idx / 4) == BestHiQuad) {
11718 MaskV[i] = (idx & 3) + 4;
11722 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11725 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11726 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11727 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16,
11728 NewV.getOperand(0),
11729 getShufflePSHUFHWImmediate(SVOp), DAG);
11733 // In case BestHi & BestLo were both -1, which means each quadword has a word
11734 // from each of the four input quadwords, calculate the InOrder bitvector now
11735 // before falling through to the insert/extract cleanup.
11736 if (BestLoQuad == -1 && BestHiQuad == -1) {
11738 for (int i = 0; i != 8; ++i)
11739 if (MaskVals[i] < 0 || MaskVals[i] == i)
11743 // The other elements are put in the right place using pextrw and pinsrw.
11744 for (unsigned i = 0; i != 8; ++i) {
11747 int EltIdx = MaskVals[i];
11750 SDValue ExtOp = (EltIdx < 8) ?
11751 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1,
11752 DAG.getIntPtrConstant(EltIdx)) :
11753 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2,
11754 DAG.getIntPtrConstant(EltIdx - 8));
11755 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp,
11756 DAG.getIntPtrConstant(i));
11761 /// \brief v16i16 shuffles
11763 /// FIXME: We only support generation of a single pshufb currently. We can
11764 /// generalize the other applicable cases from LowerVECTOR_SHUFFLEv8i16 as
11765 /// well (e.g 2 x pshufb + 1 x por).
11767 LowerVECTOR_SHUFFLEv16i16(SDValue Op, SelectionDAG &DAG) {
11768 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11769 SDValue V1 = SVOp->getOperand(0);
11770 SDValue V2 = SVOp->getOperand(1);
11773 if (V2.getOpcode() != ISD::UNDEF)
11776 SmallVector<int, 16> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11777 return getPSHUFB(MaskVals, V1, dl, DAG);
11780 // v16i8 shuffles - Prefer shuffles in the following order:
11781 // 1. [ssse3] 1 x pshufb
11782 // 2. [ssse3] 2 x pshufb + 1 x por
11783 // 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw
11784 static SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
11785 const X86Subtarget* Subtarget,
11786 SelectionDAG &DAG) {
11787 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11788 SDValue V1 = SVOp->getOperand(0);
11789 SDValue V2 = SVOp->getOperand(1);
11791 ArrayRef<int> MaskVals = SVOp->getMask();
11793 // Promote splats to a larger type which usually leads to more efficient code.
11794 // FIXME: Is this true if pshufb is available?
11795 if (SVOp->isSplat())
11796 return PromoteSplat(SVOp, DAG);
11798 // If we have SSSE3, case 1 is generated when all result bytes come from
11799 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is
11800 // present, fall back to case 3.
11802 // If SSSE3, use 1 pshufb instruction per vector with elements in the result.
11803 if (Subtarget->hasSSSE3()) {
11804 SmallVector<SDValue,16> pshufbMask;
11806 // If all result elements are from one input vector, then only translate
11807 // undef mask values to 0x80 (zero out result) in the pshufb mask.
11809 // Otherwise, we have elements from both input vectors, and must zero out
11810 // elements that come from V2 in the first mask, and V1 in the second mask
11811 // so that we can OR them together.
11812 for (unsigned i = 0; i != 16; ++i) {
11813 int EltIdx = MaskVals[i];
11814 if (EltIdx < 0 || EltIdx >= 16)
11816 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11818 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
11819 DAG.getNode(ISD::BUILD_VECTOR, dl,
11820 MVT::v16i8, pshufbMask));
11822 // As PSHUFB will zero elements with negative indices, it's safe to ignore
11823 // the 2nd operand if it's undefined or zero.
11824 if (V2.getOpcode() == ISD::UNDEF ||
11825 ISD::isBuildVectorAllZeros(V2.getNode()))
11828 // Calculate the shuffle mask for the second input, shuffle it, and
11829 // OR it with the first shuffled input.
11830 pshufbMask.clear();
11831 for (unsigned i = 0; i != 16; ++i) {
11832 int EltIdx = MaskVals[i];
11833 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16;
11834 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11836 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
11837 DAG.getNode(ISD::BUILD_VECTOR, dl,
11838 MVT::v16i8, pshufbMask));
11839 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11842 // No SSSE3 - Calculate in place words and then fix all out of place words
11843 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from
11844 // the 16 different words that comprise the two doublequadword input vectors.
11845 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11846 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
11848 for (int i = 0; i != 8; ++i) {
11849 int Elt0 = MaskVals[i*2];
11850 int Elt1 = MaskVals[i*2+1];
11852 // This word of the result is all undef, skip it.
11853 if (Elt0 < 0 && Elt1 < 0)
11856 // This word of the result is already in the correct place, skip it.
11857 if ((Elt0 == i*2) && (Elt1 == i*2+1))
11860 SDValue Elt0Src = Elt0 < 16 ? V1 : V2;
11861 SDValue Elt1Src = Elt1 < 16 ? V1 : V2;
11864 // If Elt0 and Elt1 are defined, are consecutive, and can be load
11865 // using a single extract together, load it and store it.
11866 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) {
11867 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11868 DAG.getIntPtrConstant(Elt1 / 2));
11869 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11870 DAG.getIntPtrConstant(i));
11874 // If Elt1 is defined, extract it from the appropriate source. If the
11875 // source byte is not also odd, shift the extracted word left 8 bits
11876 // otherwise clear the bottom 8 bits if we need to do an or.
11878 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11879 DAG.getIntPtrConstant(Elt1 / 2));
11880 if ((Elt1 & 1) == 0)
11881 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
11883 TLI.getShiftAmountTy(InsElt.getValueType())));
11884 else if (Elt0 >= 0)
11885 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
11886 DAG.getConstant(0xFF00, MVT::i16));
11888 // If Elt0 is defined, extract it from the appropriate source. If the
11889 // source byte is not also even, shift the extracted word right 8 bits. If
11890 // Elt1 was also defined, OR the extracted values together before
11891 // inserting them in the result.
11893 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
11894 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2));
11895 if ((Elt0 & 1) != 0)
11896 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
11898 TLI.getShiftAmountTy(InsElt0.getValueType())));
11899 else if (Elt1 >= 0)
11900 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
11901 DAG.getConstant(0x00FF, MVT::i16));
11902 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0)
11905 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11906 DAG.getIntPtrConstant(i));
11908 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV);
11911 // v32i8 shuffles - Translate to VPSHUFB if possible.
11913 SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp,
11914 const X86Subtarget *Subtarget,
11915 SelectionDAG &DAG) {
11916 MVT VT = SVOp->getSimpleValueType(0);
11917 SDValue V1 = SVOp->getOperand(0);
11918 SDValue V2 = SVOp->getOperand(1);
11920 SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11922 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11923 bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode());
11924 bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode());
11926 // VPSHUFB may be generated if
11927 // (1) one of input vector is undefined or zeroinitializer.
11928 // The mask value 0x80 puts 0 in the corresponding slot of the vector.
11929 // And (2) the mask indexes don't cross the 128-bit lane.
11930 if (VT != MVT::v32i8 || !Subtarget->hasInt256() ||
11931 (!V2IsUndef && !V2IsAllZero && !V1IsAllZero))
11934 if (V1IsAllZero && !V2IsAllZero) {
11935 CommuteVectorShuffleMask(MaskVals, 32);
11938 return getPSHUFB(MaskVals, V1, dl, DAG);
11941 /// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
11942 /// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be
11943 /// done when every pair / quad of shuffle mask elements point to elements in
11944 /// the right sequence. e.g.
11945 /// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15>
11947 SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
11948 SelectionDAG &DAG) {
11949 MVT VT = SVOp->getSimpleValueType(0);
11951 unsigned NumElems = VT.getVectorNumElements();
11954 switch (VT.SimpleTy) {
11955 default: llvm_unreachable("Unexpected!");
11958 return SDValue(SVOp, 0);
11959 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break;
11960 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break;
11961 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break;
11962 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break;
11963 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break;
11964 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break;
11967 SmallVector<int, 8> MaskVec;
11968 for (unsigned i = 0; i != NumElems; i += Scale) {
11970 for (unsigned j = 0; j != Scale; ++j) {
11971 int EltIdx = SVOp->getMaskElt(i+j);
11975 StartIdx = (EltIdx / Scale);
11976 if (EltIdx != (int)(StartIdx*Scale + j))
11979 MaskVec.push_back(StartIdx);
11982 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0));
11983 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1));
11984 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
11987 /// getVZextMovL - Return a zero-extending vector move low node.
11989 static SDValue getVZextMovL(MVT VT, MVT OpVT,
11990 SDValue SrcOp, SelectionDAG &DAG,
11991 const X86Subtarget *Subtarget, SDLoc dl) {
11992 if (VT == MVT::v2f64 || VT == MVT::v4f32) {
11993 LoadSDNode *LD = nullptr;
11994 if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
11995 LD = dyn_cast<LoadSDNode>(SrcOp);
11997 // movssrr and movsdrr do not clear top bits. Try to use movd, movq
11999 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
12000 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) &&
12001 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
12002 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST &&
12003 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {
12005 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
12006 return DAG.getNode(ISD::BITCAST, dl, VT,
12007 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
12008 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
12010 SrcOp.getOperand(0)
12016 return DAG.getNode(ISD::BITCAST, dl, VT,
12017 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
12018 DAG.getNode(ISD::BITCAST, dl,
12022 /// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles
12023 /// which could not be matched by any known target speficic shuffle
12025 LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
12027 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG);
12028 if (NewOp.getNode())
12031 MVT VT = SVOp->getSimpleValueType(0);
12033 unsigned NumElems = VT.getVectorNumElements();
12034 unsigned NumLaneElems = NumElems / 2;
12037 MVT EltVT = VT.getVectorElementType();
12038 MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems);
12041 SmallVector<int, 16> Mask;
12042 for (unsigned l = 0; l < 2; ++l) {
12043 // Build a shuffle mask for the output, discovering on the fly which
12044 // input vectors to use as shuffle operands (recorded in InputUsed).
12045 // If building a suitable shuffle vector proves too hard, then bail
12046 // out with UseBuildVector set.
12047 bool UseBuildVector = false;
12048 int InputUsed[2] = { -1, -1 }; // Not yet discovered.
12049 unsigned LaneStart = l * NumLaneElems;
12050 for (unsigned i = 0; i != NumLaneElems; ++i) {
12051 // The mask element. This indexes into the input.
12052 int Idx = SVOp->getMaskElt(i+LaneStart);
12054 // the mask element does not index into any input vector.
12055 Mask.push_back(-1);
12059 // The input vector this mask element indexes into.
12060 int Input = Idx / NumLaneElems;
12062 // Turn the index into an offset from the start of the input vector.
12063 Idx -= Input * NumLaneElems;
12065 // Find or create a shuffle vector operand to hold this input.
12067 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) {
12068 if (InputUsed[OpNo] == Input)
12069 // This input vector is already an operand.
12071 if (InputUsed[OpNo] < 0) {
12072 // Create a new operand for this input vector.
12073 InputUsed[OpNo] = Input;
12078 if (OpNo >= array_lengthof(InputUsed)) {
12079 // More than two input vectors used! Give up on trying to create a
12080 // shuffle vector. Insert all elements into a BUILD_VECTOR instead.
12081 UseBuildVector = true;
12085 // Add the mask index for the new shuffle vector.
12086 Mask.push_back(Idx + OpNo * NumLaneElems);
12089 if (UseBuildVector) {
12090 SmallVector<SDValue, 16> SVOps;
12091 for (unsigned i = 0; i != NumLaneElems; ++i) {
12092 // The mask element. This indexes into the input.
12093 int Idx = SVOp->getMaskElt(i+LaneStart);
12095 SVOps.push_back(DAG.getUNDEF(EltVT));
12099 // The input vector this mask element indexes into.
12100 int Input = Idx / NumElems;
12102 // Turn the index into an offset from the start of the input vector.
12103 Idx -= Input * NumElems;
12105 // Extract the vector element by hand.
12106 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
12107 SVOp->getOperand(Input),
12108 DAG.getIntPtrConstant(Idx)));
12111 // Construct the output using a BUILD_VECTOR.
12112 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, SVOps);
12113 } else if (InputUsed[0] < 0) {
12114 // No input vectors were used! The result is undefined.
12115 Output[l] = DAG.getUNDEF(NVT);
12117 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2),
12118 (InputUsed[0] % 2) * NumLaneElems,
12120 // If only one input was used, use an undefined vector for the other.
12121 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) :
12122 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2),
12123 (InputUsed[1] % 2) * NumLaneElems, DAG, dl);
12124 // At least one input vector was used. Create a new shuffle vector.
12125 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]);
12131 // Concatenate the result back
12132 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]);
12135 /// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with
12136 /// 4 elements, and match them with several different shuffle types.
12138 LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
12139 SDValue V1 = SVOp->getOperand(0);
12140 SDValue V2 = SVOp->getOperand(1);
12142 MVT VT = SVOp->getSimpleValueType(0);
12144 assert(VT.is128BitVector() && "Unsupported vector size");
12146 std::pair<int, int> Locs[4];
12147 int Mask1[] = { -1, -1, -1, -1 };
12148 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end());
12150 unsigned NumHi = 0;
12151 unsigned NumLo = 0;
12152 for (unsigned i = 0; i != 4; ++i) {
12153 int Idx = PermMask[i];
12155 Locs[i] = std::make_pair(-1, -1);
12157 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!");
12159 Locs[i] = std::make_pair(0, NumLo);
12160 Mask1[NumLo] = Idx;
12163 Locs[i] = std::make_pair(1, NumHi);
12165 Mask1[2+NumHi] = Idx;
12171 if (NumLo <= 2 && NumHi <= 2) {
12172 // If no more than two elements come from either vector. This can be
12173 // implemented with two shuffles. First shuffle gather the elements.
12174 // The second shuffle, which takes the first shuffle as both of its
12175 // vector operands, put the elements into the right order.
12176 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12178 int Mask2[] = { -1, -1, -1, -1 };
12180 for (unsigned i = 0; i != 4; ++i)
12181 if (Locs[i].first != -1) {
12182 unsigned Idx = (i < 2) ? 0 : 4;
12183 Idx += Locs[i].first * 2 + Locs[i].second;
12187 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]);
12190 if (NumLo == 3 || NumHi == 3) {
12191 // Otherwise, we must have three elements from one vector, call it X, and
12192 // one element from the other, call it Y. First, use a shufps to build an
12193 // intermediate vector with the one element from Y and the element from X
12194 // that will be in the same half in the final destination (the indexes don't
12195 // matter). Then, use a shufps to build the final vector, taking the half
12196 // containing the element from Y from the intermediate, and the other half
12199 // Normalize it so the 3 elements come from V1.
12200 CommuteVectorShuffleMask(PermMask, 4);
12204 // Find the element from V2.
12206 for (HiIndex = 0; HiIndex < 3; ++HiIndex) {
12207 int Val = PermMask[HiIndex];
12214 Mask1[0] = PermMask[HiIndex];
12216 Mask1[2] = PermMask[HiIndex^1];
12218 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12220 if (HiIndex >= 2) {
12221 Mask1[0] = PermMask[0];
12222 Mask1[1] = PermMask[1];
12223 Mask1[2] = HiIndex & 1 ? 6 : 4;
12224 Mask1[3] = HiIndex & 1 ? 4 : 6;
12225 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12228 Mask1[0] = HiIndex & 1 ? 2 : 0;
12229 Mask1[1] = HiIndex & 1 ? 0 : 2;
12230 Mask1[2] = PermMask[2];
12231 Mask1[3] = PermMask[3];
12236 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]);
12239 // Break it into (shuffle shuffle_hi, shuffle_lo).
12240 int LoMask[] = { -1, -1, -1, -1 };
12241 int HiMask[] = { -1, -1, -1, -1 };
12243 int *MaskPtr = LoMask;
12244 unsigned MaskIdx = 0;
12245 unsigned LoIdx = 0;
12246 unsigned HiIdx = 2;
12247 for (unsigned i = 0; i != 4; ++i) {
12254 int Idx = PermMask[i];
12256 Locs[i] = std::make_pair(-1, -1);
12257 } else if (Idx < 4) {
12258 Locs[i] = std::make_pair(MaskIdx, LoIdx);
12259 MaskPtr[LoIdx] = Idx;
12262 Locs[i] = std::make_pair(MaskIdx, HiIdx);
12263 MaskPtr[HiIdx] = Idx;
12268 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]);
12269 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]);
12270 int MaskOps[] = { -1, -1, -1, -1 };
12271 for (unsigned i = 0; i != 4; ++i)
12272 if (Locs[i].first != -1)
12273 MaskOps[i] = Locs[i].first * 4 + Locs[i].second;
12274 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]);
12277 static bool MayFoldVectorLoad(SDValue V) {
12278 while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
12279 V = V.getOperand(0);
12281 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
12282 V = V.getOperand(0);
12283 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR &&
12284 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF)
12285 // BUILD_VECTOR (load), undef
12286 V = V.getOperand(0);
12288 return MayFoldLoad(V);
12292 SDValue getMOVDDup(SDValue &Op, SDLoc &dl, SDValue V1, SelectionDAG &DAG) {
12293 MVT VT = Op.getSimpleValueType();
12295 // Canonicalize to v2f64.
12296 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
12297 return DAG.getNode(ISD::BITCAST, dl, VT,
12298 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64,
12303 SDValue getMOVLowToHigh(SDValue &Op, SDLoc &dl, SelectionDAG &DAG,
12305 SDValue V1 = Op.getOperand(0);
12306 SDValue V2 = Op.getOperand(1);
12307 MVT VT = Op.getSimpleValueType();
12309 assert(VT != MVT::v2i64 && "unsupported shuffle type");
12311 if (HasSSE2 && VT == MVT::v2f64)
12312 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG);
12314 // v4f32 or v4i32: canonicalize to v4f32 (which is legal for SSE1)
12315 return DAG.getNode(ISD::BITCAST, dl, VT,
12316 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32,
12317 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1),
12318 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG));
12322 SDValue getMOVHighToLow(SDValue &Op, SDLoc &dl, SelectionDAG &DAG) {
12323 SDValue V1 = Op.getOperand(0);
12324 SDValue V2 = Op.getOperand(1);
12325 MVT VT = Op.getSimpleValueType();
12327 assert((VT == MVT::v4i32 || VT == MVT::v4f32) &&
12328 "unsupported shuffle type");
12330 if (V2.getOpcode() == ISD::UNDEF)
12334 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG);
12338 SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
12339 SDValue V1 = Op.getOperand(0);
12340 SDValue V2 = Op.getOperand(1);
12341 MVT VT = Op.getSimpleValueType();
12342 unsigned NumElems = VT.getVectorNumElements();
12344 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second
12345 // operand of these instructions is only memory, so check if there's a
12346 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the
12348 bool CanFoldLoad = false;
12350 // Trivial case, when V2 comes from a load.
12351 if (MayFoldVectorLoad(V2))
12352 CanFoldLoad = true;
12354 // When V1 is a load, it can be folded later into a store in isel, example:
12355 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1)
12357 // (MOVLPSmr addr:$src1, VR128:$src2)
12358 // So, recognize this potential and also use MOVLPS or MOVLPD
12359 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op))
12360 CanFoldLoad = true;
12362 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12364 if (HasSSE2 && NumElems == 2)
12365 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
12368 // If we don't care about the second element, proceed to use movss.
12369 if (SVOp->getMaskElt(1) != -1)
12370 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG);
12373 // movl and movlp will both match v2i64, but v2i64 is never matched by
12374 // movl earlier because we make it strict to avoid messing with the movlp load
12375 // folding logic (see the code above getMOVLP call). Match it here then,
12376 // this is horrible, but will stay like this until we move all shuffle
12377 // matching to x86 specific nodes. Note that for the 1st condition all
12378 // types are matched with movsd.
12380 // FIXME: isMOVLMask should be checked and matched before getMOVLP,
12381 // as to remove this logic from here, as much as possible
12382 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT))
12383 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12384 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12387 assert(VT != MVT::v4i32 && "unsupported shuffle type");
12389 // Invert the operand order and use SHUFPS to match it.
12390 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1,
12391 getShuffleSHUFImmediate(SVOp), DAG);
12394 static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index,
12395 SelectionDAG &DAG) {
12397 MVT VT = Load->getSimpleValueType(0);
12398 MVT EVT = VT.getVectorElementType();
12399 SDValue Addr = Load->getOperand(1);
12400 SDValue NewAddr = DAG.getNode(
12401 ISD::ADD, dl, Addr.getSimpleValueType(), Addr,
12402 DAG.getConstant(Index * EVT.getStoreSize(), Addr.getSimpleValueType()));
12405 DAG.getLoad(EVT, dl, Load->getChain(), NewAddr,
12406 DAG.getMachineFunction().getMachineMemOperand(
12407 Load->getMemOperand(), 0, EVT.getStoreSize()));
12411 // It is only safe to call this function if isINSERTPSMask is true for
12412 // this shufflevector mask.
12413 static SDValue getINSERTPS(ShuffleVectorSDNode *SVOp, SDLoc &dl,
12414 SelectionDAG &DAG) {
12415 // Generate an insertps instruction when inserting an f32 from memory onto a
12416 // v4f32 or when copying a member from one v4f32 to another.
12417 // We also use it for transferring i32 from one register to another,
12418 // since it simply copies the same bits.
12419 // If we're transferring an i32 from memory to a specific element in a
12420 // register, we output a generic DAG that will match the PINSRD
12422 MVT VT = SVOp->getSimpleValueType(0);
12423 MVT EVT = VT.getVectorElementType();
12424 SDValue V1 = SVOp->getOperand(0);
12425 SDValue V2 = SVOp->getOperand(1);
12426 auto Mask = SVOp->getMask();
12427 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
12428 "unsupported vector type for insertps/pinsrd");
12430 auto FromV1Predicate = [](const int &i) { return i < 4 && i > -1; };
12431 auto FromV2Predicate = [](const int &i) { return i >= 4; };
12432 int FromV1 = std::count_if(Mask.begin(), Mask.end(), FromV1Predicate);
12436 unsigned DestIndex;
12440 DestIndex = std::find_if(Mask.begin(), Mask.end(), FromV1Predicate) -
12443 // If we have 1 element from each vector, we have to check if we're
12444 // changing V1's element's place. If so, we're done. Otherwise, we
12445 // should assume we're changing V2's element's place and behave
12447 int FromV2 = std::count_if(Mask.begin(), Mask.end(), FromV2Predicate);
12448 assert(DestIndex <= INT32_MAX && "truncated destination index");
12449 if (FromV1 == FromV2 &&
12450 static_cast<int>(DestIndex) == Mask[DestIndex] % 4) {
12454 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12457 assert(std::count_if(Mask.begin(), Mask.end(), FromV2Predicate) == 1 &&
12458 "More than one element from V1 and from V2, or no elements from one "
12459 "of the vectors. This case should not have returned true from "
12464 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12467 // Get an index into the source vector in the range [0,4) (the mask is
12468 // in the range [0,8) because it can address V1 and V2)
12469 unsigned SrcIndex = Mask[DestIndex] % 4;
12470 if (MayFoldLoad(From)) {
12471 // Trivial case, when From comes from a load and is only used by the
12472 // shuffle. Make it use insertps from the vector that we need from that
12475 NarrowVectorLoadToElement(cast<LoadSDNode>(From), SrcIndex, DAG);
12476 if (!NewLoad.getNode())
12479 if (EVT == MVT::f32) {
12480 // Create this as a scalar to vector to match the instruction pattern.
12481 SDValue LoadScalarToVector =
12482 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, NewLoad);
12483 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4);
12484 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, LoadScalarToVector,
12486 } else { // EVT == MVT::i32
12487 // If we're getting an i32 from memory, use an INSERT_VECTOR_ELT
12488 // instruction, to match the PINSRD instruction, which loads an i32 to a
12489 // certain vector element.
12490 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, To, NewLoad,
12491 DAG.getConstant(DestIndex, MVT::i32));
12495 // Vector-element-to-vector
12496 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4 | SrcIndex << 6);
12497 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, From, InsertpsMask);
12500 // Reduce a vector shuffle to zext.
12501 static SDValue LowerVectorIntExtend(SDValue Op, const X86Subtarget *Subtarget,
12502 SelectionDAG &DAG) {
12503 // PMOVZX is only available from SSE41.
12504 if (!Subtarget->hasSSE41())
12507 MVT VT = Op.getSimpleValueType();
12509 // Only AVX2 support 256-bit vector integer extending.
12510 if (!Subtarget->hasInt256() && VT.is256BitVector())
12513 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12515 SDValue V1 = Op.getOperand(0);
12516 SDValue V2 = Op.getOperand(1);
12517 unsigned NumElems = VT.getVectorNumElements();
12519 // Extending is an unary operation and the element type of the source vector
12520 // won't be equal to or larger than i64.
12521 if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() ||
12522 VT.getVectorElementType() == MVT::i64)
12525 // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4.
12526 unsigned Shift = 1; // Start from 2, i.e. 1 << 1.
12527 while ((1U << Shift) < NumElems) {
12528 if (SVOp->getMaskElt(1U << Shift) == 1)
12531 // The maximal ratio is 8, i.e. from i8 to i64.
12536 // Check the shuffle mask.
12537 unsigned Mask = (1U << Shift) - 1;
12538 for (unsigned i = 0; i != NumElems; ++i) {
12539 int EltIdx = SVOp->getMaskElt(i);
12540 if ((i & Mask) != 0 && EltIdx != -1)
12542 if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift))
12546 unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift;
12547 MVT NeVT = MVT::getIntegerVT(NBits);
12548 MVT NVT = MVT::getVectorVT(NeVT, NumElems >> Shift);
12550 if (!DAG.getTargetLoweringInfo().isTypeLegal(NVT))
12553 return DAG.getNode(ISD::BITCAST, DL, VT,
12554 DAG.getNode(X86ISD::VZEXT, DL, NVT, V1));
12557 static SDValue NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
12558 SelectionDAG &DAG) {
12559 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12560 MVT VT = Op.getSimpleValueType();
12562 SDValue V1 = Op.getOperand(0);
12563 SDValue V2 = Op.getOperand(1);
12565 if (isZeroShuffle(SVOp))
12566 return getZeroVector(VT, Subtarget, DAG, dl);
12568 // Handle splat operations
12569 if (SVOp->isSplat()) {
12570 // Use vbroadcast whenever the splat comes from a foldable load
12571 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
12572 if (Broadcast.getNode())
12576 // Check integer expanding shuffles.
12577 SDValue NewOp = LowerVectorIntExtend(Op, Subtarget, DAG);
12578 if (NewOp.getNode())
12581 // If the shuffle can be profitably rewritten as a narrower shuffle, then
12583 if (VT == MVT::v8i16 || VT == MVT::v16i8 || VT == MVT::v16i16 ||
12584 VT == MVT::v32i8) {
12585 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12586 if (NewOp.getNode())
12587 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
12588 } else if (VT.is128BitVector() && Subtarget->hasSSE2()) {
12589 // FIXME: Figure out a cleaner way to do this.
12590 if (ISD::isBuildVectorAllZeros(V2.getNode())) {
12591 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12592 if (NewOp.getNode()) {
12593 MVT NewVT = NewOp.getSimpleValueType();
12594 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(),
12595 NewVT, true, false))
12596 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), DAG, Subtarget,
12599 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
12600 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12601 if (NewOp.getNode()) {
12602 MVT NewVT = NewOp.getSimpleValueType();
12603 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT))
12604 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), DAG, Subtarget,
12613 X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
12614 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12615 SDValue V1 = Op.getOperand(0);
12616 SDValue V2 = Op.getOperand(1);
12617 MVT VT = Op.getSimpleValueType();
12619 unsigned NumElems = VT.getVectorNumElements();
12620 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
12621 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
12622 bool V1IsSplat = false;
12623 bool V2IsSplat = false;
12624 bool HasSSE2 = Subtarget->hasSSE2();
12625 bool HasFp256 = Subtarget->hasFp256();
12626 bool HasInt256 = Subtarget->hasInt256();
12627 MachineFunction &MF = DAG.getMachineFunction();
12629 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
12631 // Check if we should use the experimental vector shuffle lowering. If so,
12632 // delegate completely to that code path.
12633 if (ExperimentalVectorShuffleLowering)
12634 return lowerVectorShuffle(Op, Subtarget, DAG);
12636 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
12638 if (V1IsUndef && V2IsUndef)
12639 return DAG.getUNDEF(VT);
12641 // When we create a shuffle node we put the UNDEF node to second operand,
12642 // but in some cases the first operand may be transformed to UNDEF.
12643 // In this case we should just commute the node.
12645 return DAG.getCommutedVectorShuffle(*SVOp);
12647 // Vector shuffle lowering takes 3 steps:
12649 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable
12650 // narrowing and commutation of operands should be handled.
12651 // 2) Matching of shuffles with known shuffle masks to x86 target specific
12653 // 3) Rewriting of unmatched masks into new generic shuffle operations,
12654 // so the shuffle can be broken into other shuffles and the legalizer can
12655 // try the lowering again.
12657 // The general idea is that no vector_shuffle operation should be left to
12658 // be matched during isel, all of them must be converted to a target specific
12661 // Normalize the input vectors. Here splats, zeroed vectors, profitable
12662 // narrowing and commutation of operands should be handled. The actual code
12663 // doesn't include all of those, work in progress...
12664 SDValue NewOp = NormalizeVectorShuffle(Op, Subtarget, DAG);
12665 if (NewOp.getNode())
12668 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end());
12670 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and
12671 // unpckh_undef). Only use pshufd if speed is more important than size.
12672 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12673 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12674 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12675 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12677 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() &&
12678 V2IsUndef && MayFoldVectorLoad(V1))
12679 return getMOVDDup(Op, dl, V1, DAG);
12681 if (isMOVHLPS_v_undef_Mask(M, VT))
12682 return getMOVHighToLow(Op, dl, DAG);
12684 // Use to match splats
12685 if (HasSSE2 && isUNPCKHMask(M, VT, HasInt256) && V2IsUndef &&
12686 (VT == MVT::v2f64 || VT == MVT::v2i64))
12687 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12689 if (isPSHUFDMask(M, VT)) {
12690 // The actual implementation will match the mask in the if above and then
12691 // during isel it can match several different instructions, not only pshufd
12692 // as its name says, sad but true, emulate the behavior for now...
12693 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64)))
12694 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG);
12696 unsigned TargetMask = getShuffleSHUFImmediate(SVOp);
12698 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32))
12699 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG);
12701 if (HasFp256 && (VT == MVT::v4f32 || VT == MVT::v2f64))
12702 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1, TargetMask,
12705 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1,
12709 if (isPALIGNRMask(M, VT, Subtarget))
12710 return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2,
12711 getShufflePALIGNRImmediate(SVOp),
12714 if (isVALIGNMask(M, VT, Subtarget))
12715 return getTargetShuffleNode(X86ISD::VALIGN, dl, VT, V1, V2,
12716 getShuffleVALIGNImmediate(SVOp),
12719 // Check if this can be converted into a logical shift.
12720 bool isLeft = false;
12721 unsigned ShAmt = 0;
12723 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt);
12724 if (isShift && ShVal.hasOneUse()) {
12725 // If the shifted value has multiple uses, it may be cheaper to use
12726 // v_set0 + movlhps or movhlps, etc.
12727 MVT EltVT = VT.getVectorElementType();
12728 ShAmt *= EltVT.getSizeInBits();
12729 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12732 if (isMOVLMask(M, VT)) {
12733 if (ISD::isBuildVectorAllZeros(V1.getNode()))
12734 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
12735 if (!isMOVLPMask(M, VT)) {
12736 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
12737 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12739 if (VT == MVT::v4i32 || VT == MVT::v4f32)
12740 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12744 // FIXME: fold these into legal mask.
12745 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasInt256))
12746 return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
12748 if (isMOVHLPSMask(M, VT))
12749 return getMOVHighToLow(Op, dl, DAG);
12751 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget))
12752 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG);
12754 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget))
12755 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG);
12757 if (isMOVLPMask(M, VT))
12758 return getMOVLP(Op, dl, DAG, HasSSE2);
12760 if (ShouldXformToMOVHLPS(M, VT) ||
12761 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT))
12762 return DAG.getCommutedVectorShuffle(*SVOp);
12765 // No better options. Use a vshldq / vsrldq.
12766 MVT EltVT = VT.getVectorElementType();
12767 ShAmt *= EltVT.getSizeInBits();
12768 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12771 bool Commuted = false;
12772 // FIXME: This should also accept a bitcast of a splat? Be careful, not
12773 // 1,1,1,1 -> v8i16 though.
12774 BitVector UndefElements;
12775 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V1.getNode()))
12776 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12778 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V2.getNode()))
12779 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12782 // Canonicalize the splat or undef, if present, to be on the RHS.
12783 if (!V2IsUndef && V1IsSplat && !V2IsSplat) {
12784 CommuteVectorShuffleMask(M, NumElems);
12786 std::swap(V1IsSplat, V2IsSplat);
12790 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) {
12791 // Shuffling low element of v1 into undef, just return v1.
12794 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which
12795 // the instruction selector will not match, so get a canonical MOVL with
12796 // swapped operands to undo the commute.
12797 return getMOVL(DAG, dl, VT, V2, V1);
12800 if (isUNPCKLMask(M, VT, HasInt256))
12801 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12803 if (isUNPCKHMask(M, VT, HasInt256))
12804 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12807 // Normalize mask so all entries that point to V2 points to its first
12808 // element then try to match unpck{h|l} again. If match, return a
12809 // new vector_shuffle with the corrected mask.p
12810 SmallVector<int, 8> NewMask(M.begin(), M.end());
12811 NormalizeMask(NewMask, NumElems);
12812 if (isUNPCKLMask(NewMask, VT, HasInt256, true))
12813 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12814 if (isUNPCKHMask(NewMask, VT, HasInt256, true))
12815 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12819 // Commute is back and try unpck* again.
12820 // FIXME: this seems wrong.
12821 CommuteVectorShuffleMask(M, NumElems);
12823 std::swap(V1IsSplat, V2IsSplat);
12825 if (isUNPCKLMask(M, VT, HasInt256))
12826 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12828 if (isUNPCKHMask(M, VT, HasInt256))
12829 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12832 // Normalize the node to match x86 shuffle ops if needed
12833 if (!V2IsUndef && (isSHUFPMask(M, VT, /* Commuted */ true)))
12834 return DAG.getCommutedVectorShuffle(*SVOp);
12836 // The checks below are all present in isShuffleMaskLegal, but they are
12837 // inlined here right now to enable us to directly emit target specific
12838 // nodes, and remove one by one until they don't return Op anymore.
12840 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) &&
12841 SVOp->getSplatIndex() == 0 && V2IsUndef) {
12842 if (VT == MVT::v2f64 || VT == MVT::v2i64)
12843 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12846 if (isPSHUFHWMask(M, VT, HasInt256))
12847 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1,
12848 getShufflePSHUFHWImmediate(SVOp),
12851 if (isPSHUFLWMask(M, VT, HasInt256))
12852 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1,
12853 getShufflePSHUFLWImmediate(SVOp),
12856 unsigned MaskValue;
12857 if (isBlendMask(M, VT, Subtarget->hasSSE41(), HasInt256, &MaskValue))
12858 return LowerVECTOR_SHUFFLEtoBlend(SVOp, MaskValue, Subtarget, DAG);
12860 if (isSHUFPMask(M, VT))
12861 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2,
12862 getShuffleSHUFImmediate(SVOp), DAG);
12864 if (isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12865 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12866 if (isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12867 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12869 //===--------------------------------------------------------------------===//
12870 // Generate target specific nodes for 128 or 256-bit shuffles only
12871 // supported in the AVX instruction set.
12874 // Handle VMOVDDUPY permutations
12875 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasFp256))
12876 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG);
12878 // Handle VPERMILPS/D* permutations
12879 if (isVPERMILPMask(M, VT)) {
12880 if ((HasInt256 && VT == MVT::v8i32) || VT == MVT::v16i32)
12881 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1,
12882 getShuffleSHUFImmediate(SVOp), DAG);
12883 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1,
12884 getShuffleSHUFImmediate(SVOp), DAG);
12888 if (VT.is512BitVector() && isINSERT64x4Mask(M, VT, &Idx))
12889 return Insert256BitVector(V1, Extract256BitVector(V2, 0, DAG, dl),
12890 Idx*(NumElems/2), DAG, dl);
12892 // Handle VPERM2F128/VPERM2I128 permutations
12893 if (isVPERM2X128Mask(M, VT, HasFp256))
12894 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1,
12895 V2, getShuffleVPERM2X128Immediate(SVOp), DAG);
12897 if (Subtarget->hasSSE41() && isINSERTPSMask(M, VT))
12898 return getINSERTPS(SVOp, dl, DAG);
12901 if (V2IsUndef && HasInt256 && isPermImmMask(M, VT, Imm8))
12902 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, Imm8, DAG);
12904 if ((V2IsUndef && HasInt256 && VT.is256BitVector() && NumElems == 8) ||
12905 VT.is512BitVector()) {
12906 MVT MaskEltVT = MVT::getIntegerVT(VT.getVectorElementType().getSizeInBits());
12907 MVT MaskVectorVT = MVT::getVectorVT(MaskEltVT, NumElems);
12908 SmallVector<SDValue, 16> permclMask;
12909 for (unsigned i = 0; i != NumElems; ++i) {
12910 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MaskEltVT));
12913 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVectorVT, permclMask);
12915 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32
12916 return DAG.getNode(X86ISD::VPERMV, dl, VT,
12917 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1);
12918 return DAG.getNode(X86ISD::VPERMV3, dl, VT, V1,
12919 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V2);
12922 //===--------------------------------------------------------------------===//
12923 // Since no target specific shuffle was selected for this generic one,
12924 // lower it into other known shuffles. FIXME: this isn't true yet, but
12925 // this is the plan.
12928 // Handle v8i16 specifically since SSE can do byte extraction and insertion.
12929 if (VT == MVT::v8i16) {
12930 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG);
12931 if (NewOp.getNode())
12935 if (VT == MVT::v16i16 && HasInt256) {
12936 SDValue NewOp = LowerVECTOR_SHUFFLEv16i16(Op, DAG);
12937 if (NewOp.getNode())
12941 if (VT == MVT::v16i8) {
12942 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, Subtarget, DAG);
12943 if (NewOp.getNode())
12947 if (VT == MVT::v32i8) {
12948 SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG);
12949 if (NewOp.getNode())
12953 // Handle all 128-bit wide vectors with 4 elements, and match them with
12954 // several different shuffle types.
12955 if (NumElems == 4 && VT.is128BitVector())
12956 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG);
12958 // Handle general 256-bit shuffles
12959 if (VT.is256BitVector())
12960 return LowerVECTOR_SHUFFLE_256(SVOp, DAG);
12965 // This function assumes its argument is a BUILD_VECTOR of constants or
12966 // undef SDNodes. i.e: ISD::isBuildVectorOfConstantSDNodes(BuildVector) is
12968 static bool BUILD_VECTORtoBlendMask(BuildVectorSDNode *BuildVector,
12969 unsigned &MaskValue) {
12971 unsigned NumElems = BuildVector->getNumOperands();
12972 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
12973 unsigned NumLanes = (NumElems - 1) / 8 + 1;
12974 unsigned NumElemsInLane = NumElems / NumLanes;
12976 // Blend for v16i16 should be symetric for the both lanes.
12977 for (unsigned i = 0; i < NumElemsInLane; ++i) {
12978 SDValue EltCond = BuildVector->getOperand(i);
12979 SDValue SndLaneEltCond =
12980 (NumLanes == 2) ? BuildVector->getOperand(i + NumElemsInLane) : EltCond;
12982 int Lane1Cond = -1, Lane2Cond = -1;
12983 if (isa<ConstantSDNode>(EltCond))
12984 Lane1Cond = !isZero(EltCond);
12985 if (isa<ConstantSDNode>(SndLaneEltCond))
12986 Lane2Cond = !isZero(SndLaneEltCond);
12988 if (Lane1Cond == Lane2Cond || Lane2Cond < 0)
12989 // Lane1Cond != 0, means we want the first argument.
12990 // Lane1Cond == 0, means we want the second argument.
12991 // The encoding of this argument is 0 for the first argument, 1
12992 // for the second. Therefore, invert the condition.
12993 MaskValue |= !Lane1Cond << i;
12994 else if (Lane1Cond < 0)
12995 MaskValue |= !Lane2Cond << i;
13002 /// \brief Try to lower a VSELECT instruction to an immediate-controlled blend
13004 static SDValue lowerVSELECTtoBLENDI(SDValue Op, const X86Subtarget *Subtarget,
13005 SelectionDAG &DAG) {
13006 SDValue Cond = Op.getOperand(0);
13007 SDValue LHS = Op.getOperand(1);
13008 SDValue RHS = Op.getOperand(2);
13010 MVT VT = Op.getSimpleValueType();
13011 MVT EltVT = VT.getVectorElementType();
13012 unsigned NumElems = VT.getVectorNumElements();
13014 // There is no blend with immediate in AVX-512.
13015 if (VT.is512BitVector())
13018 if (!Subtarget->hasSSE41() || EltVT == MVT::i8)
13020 if (!Subtarget->hasInt256() && VT == MVT::v16i16)
13023 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
13026 // Check the mask for BLEND and build the value.
13027 unsigned MaskValue = 0;
13028 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
13031 // Convert i32 vectors to floating point if it is not AVX2.
13032 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
13034 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
13035 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
13037 LHS = DAG.getNode(ISD::BITCAST, dl, VT, LHS);
13038 RHS = DAG.getNode(ISD::BITCAST, dl, VT, RHS);
13041 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, LHS, RHS,
13042 DAG.getConstant(MaskValue, MVT::i32));
13043 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
13046 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
13047 // A vselect where all conditions and data are constants can be optimized into
13048 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
13049 if (ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(0).getNode()) &&
13050 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(1).getNode()) &&
13051 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(2).getNode()))
13054 SDValue BlendOp = lowerVSELECTtoBLENDI(Op, Subtarget, DAG);
13055 if (BlendOp.getNode())
13058 // Some types for vselect were previously set to Expand, not Legal or
13059 // Custom. Return an empty SDValue so we fall-through to Expand, after
13060 // the Custom lowering phase.
13061 MVT VT = Op.getSimpleValueType();
13062 switch (VT.SimpleTy) {
13067 if (Subtarget->hasBWI() && Subtarget->hasVLX())
13072 // We couldn't create a "Blend with immediate" node.
13073 // This node should still be legal, but we'll have to emit a blendv*
13078 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
13079 MVT VT = Op.getSimpleValueType();
13082 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
13085 if (VT.getSizeInBits() == 8) {
13086 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
13087 Op.getOperand(0), Op.getOperand(1));
13088 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13089 DAG.getValueType(VT));
13090 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13093 if (VT.getSizeInBits() == 16) {
13094 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13095 // If Idx is 0, it's cheaper to do a move instead of a pextrw.
13097 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13098 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13099 DAG.getNode(ISD::BITCAST, dl,
13102 Op.getOperand(1)));
13103 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
13104 Op.getOperand(0), Op.getOperand(1));
13105 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13106 DAG.getValueType(VT));
13107 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13110 if (VT == MVT::f32) {
13111 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
13112 // the result back to FR32 register. It's only worth matching if the
13113 // result has a single use which is a store or a bitcast to i32. And in
13114 // the case of a store, it's not worth it if the index is a constant 0,
13115 // because a MOVSSmr can be used instead, which is smaller and faster.
13116 if (!Op.hasOneUse())
13118 SDNode *User = *Op.getNode()->use_begin();
13119 if ((User->getOpcode() != ISD::STORE ||
13120 (isa<ConstantSDNode>(Op.getOperand(1)) &&
13121 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
13122 (User->getOpcode() != ISD::BITCAST ||
13123 User->getValueType(0) != MVT::i32))
13125 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13126 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
13129 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
13132 if (VT == MVT::i32 || VT == MVT::i64) {
13133 // ExtractPS/pextrq works with constant index.
13134 if (isa<ConstantSDNode>(Op.getOperand(1)))
13140 /// Extract one bit from mask vector, like v16i1 or v8i1.
13141 /// AVX-512 feature.
13143 X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const {
13144 SDValue Vec = Op.getOperand(0);
13146 MVT VecVT = Vec.getSimpleValueType();
13147 SDValue Idx = Op.getOperand(1);
13148 MVT EltVT = Op.getSimpleValueType();
13150 assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
13151 assert((VecVT.getVectorNumElements() <= 16 || Subtarget->hasBWI()) &&
13152 "Unexpected vector type in ExtractBitFromMaskVector");
13154 // variable index can't be handled in mask registers,
13155 // extend vector to VR512
13156 if (!isa<ConstantSDNode>(Idx)) {
13157 MVT ExtVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13158 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Vec);
13159 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
13160 ExtVT.getVectorElementType(), Ext, Idx);
13161 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
13164 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13165 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13166 if (!Subtarget->hasDQI() && (VecVT.getVectorNumElements() <= 8))
13167 rc = getRegClassFor(MVT::v16i1);
13168 unsigned MaxSift = rc->getSize()*8 - 1;
13169 Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
13170 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13171 Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
13172 DAG.getConstant(MaxSift, MVT::i8));
13173 return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec,
13174 DAG.getIntPtrConstant(0));
13178 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
13179 SelectionDAG &DAG) const {
13181 SDValue Vec = Op.getOperand(0);
13182 MVT VecVT = Vec.getSimpleValueType();
13183 SDValue Idx = Op.getOperand(1);
13185 if (Op.getSimpleValueType() == MVT::i1)
13186 return ExtractBitFromMaskVector(Op, DAG);
13188 if (!isa<ConstantSDNode>(Idx)) {
13189 if (VecVT.is512BitVector() ||
13190 (VecVT.is256BitVector() && Subtarget->hasInt256() &&
13191 VecVT.getVectorElementType().getSizeInBits() == 32)) {
13194 MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
13195 MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
13196 MaskEltVT.getSizeInBits());
13198 Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT);
13199 SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT,
13200 getZeroVector(MaskVT, Subtarget, DAG, dl),
13201 Idx, DAG.getConstant(0, getPointerTy()));
13202 SDValue Perm = DAG.getNode(X86ISD::VPERMV, dl, VecVT, Mask, Vec);
13203 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(),
13204 Perm, DAG.getConstant(0, getPointerTy()));
13209 // If this is a 256-bit vector result, first extract the 128-bit vector and
13210 // then extract the element from the 128-bit vector.
13211 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
13213 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13214 // Get the 128-bit vector.
13215 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl);
13216 MVT EltVT = VecVT.getVectorElementType();
13218 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
13220 //if (IdxVal >= NumElems/2)
13221 // IdxVal -= NumElems/2;
13222 IdxVal -= (IdxVal/ElemsPerChunk)*ElemsPerChunk;
13223 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
13224 DAG.getConstant(IdxVal, MVT::i32));
13227 assert(VecVT.is128BitVector() && "Unexpected vector length");
13229 if (Subtarget->hasSSE41()) {
13230 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
13235 MVT VT = Op.getSimpleValueType();
13236 // TODO: handle v16i8.
13237 if (VT.getSizeInBits() == 16) {
13238 SDValue Vec = Op.getOperand(0);
13239 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13241 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13242 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13243 DAG.getNode(ISD::BITCAST, dl,
13245 Op.getOperand(1)));
13246 // Transform it so it match pextrw which produces a 32-bit result.
13247 MVT EltVT = MVT::i32;
13248 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT,
13249 Op.getOperand(0), Op.getOperand(1));
13250 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
13251 DAG.getValueType(VT));
13252 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13255 if (VT.getSizeInBits() == 32) {
13256 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13260 // SHUFPS the element to the lowest double word, then movss.
13261 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 };
13262 MVT VVT = Op.getOperand(0).getSimpleValueType();
13263 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13264 DAG.getUNDEF(VVT), Mask);
13265 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13266 DAG.getIntPtrConstant(0));
13269 if (VT.getSizeInBits() == 64) {
13270 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
13271 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
13272 // to match extract_elt for f64.
13273 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13277 // UNPCKHPD the element to the lowest double word, then movsd.
13278 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
13279 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
13280 int Mask[2] = { 1, -1 };
13281 MVT VVT = Op.getOperand(0).getSimpleValueType();
13282 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13283 DAG.getUNDEF(VVT), Mask);
13284 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13285 DAG.getIntPtrConstant(0));
13291 /// Insert one bit to mask vector, like v16i1 or v8i1.
13292 /// AVX-512 feature.
13294 X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
13296 SDValue Vec = Op.getOperand(0);
13297 SDValue Elt = Op.getOperand(1);
13298 SDValue Idx = Op.getOperand(2);
13299 MVT VecVT = Vec.getSimpleValueType();
13301 if (!isa<ConstantSDNode>(Idx)) {
13302 // Non constant index. Extend source and destination,
13303 // insert element and then truncate the result.
13304 MVT ExtVecVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13305 MVT ExtEltVT = (VecVT == MVT::v8i1 ? MVT::i64 : MVT::i32);
13306 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
13307 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVecVT, Vec),
13308 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx);
13309 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
13312 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13313 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
13314 if (Vec.getOpcode() == ISD::UNDEF)
13315 return DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13316 DAG.getConstant(IdxVal, MVT::i8));
13317 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13318 unsigned MaxSift = rc->getSize()*8 - 1;
13319 EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13320 DAG.getConstant(MaxSift, MVT::i8));
13321 EltInVec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, EltInVec,
13322 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13323 return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
13326 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
13327 SelectionDAG &DAG) const {
13328 MVT VT = Op.getSimpleValueType();
13329 MVT EltVT = VT.getVectorElementType();
13331 if (EltVT == MVT::i1)
13332 return InsertBitToMaskVector(Op, DAG);
13335 SDValue N0 = Op.getOperand(0);
13336 SDValue N1 = Op.getOperand(1);
13337 SDValue N2 = Op.getOperand(2);
13338 if (!isa<ConstantSDNode>(N2))
13340 auto *N2C = cast<ConstantSDNode>(N2);
13341 unsigned IdxVal = N2C->getZExtValue();
13343 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
13344 // into that, and then insert the subvector back into the result.
13345 if (VT.is256BitVector() || VT.is512BitVector()) {
13346 // Get the desired 128-bit vector half.
13347 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
13349 // Insert the element into the desired half.
13350 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
13351 unsigned IdxIn128 = IdxVal - (IdxVal / NumEltsIn128) * NumEltsIn128;
13353 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
13354 DAG.getConstant(IdxIn128, MVT::i32));
13356 // Insert the changed part back to the 256-bit vector
13357 return Insert128BitVector(N0, V, IdxVal, DAG, dl);
13359 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
13361 if (Subtarget->hasSSE41()) {
13362 if (EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) {
13364 if (VT == MVT::v8i16) {
13365 Opc = X86ISD::PINSRW;
13367 assert(VT == MVT::v16i8);
13368 Opc = X86ISD::PINSRB;
13371 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
13373 if (N1.getValueType() != MVT::i32)
13374 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13375 if (N2.getValueType() != MVT::i32)
13376 N2 = DAG.getIntPtrConstant(IdxVal);
13377 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
13380 if (EltVT == MVT::f32) {
13381 // Bits [7:6] of the constant are the source select. This will always be
13382 // zero here. The DAG Combiner may combine an extract_elt index into
13384 // bits. For example (insert (extract, 3), 2) could be matched by
13386 // the '3' into bits [7:6] of X86ISD::INSERTPS.
13387 // Bits [5:4] of the constant are the destination select. This is the
13388 // value of the incoming immediate.
13389 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
13390 // combine either bitwise AND or insert of float 0.0 to set these bits.
13391 N2 = DAG.getIntPtrConstant(IdxVal << 4);
13392 // Create this as a scalar to vector..
13393 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
13394 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
13397 if (EltVT == MVT::i32 || EltVT == MVT::i64) {
13398 // PINSR* works with constant index.
13403 if (EltVT == MVT::i8)
13406 if (EltVT.getSizeInBits() == 16) {
13407 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
13408 // as its second argument.
13409 if (N1.getValueType() != MVT::i32)
13410 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13411 if (N2.getValueType() != MVT::i32)
13412 N2 = DAG.getIntPtrConstant(IdxVal);
13413 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
13418 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
13420 MVT OpVT = Op.getSimpleValueType();
13422 // If this is a 256-bit vector result, first insert into a 128-bit
13423 // vector and then insert into the 256-bit vector.
13424 if (!OpVT.is128BitVector()) {
13425 // Insert into a 128-bit vector.
13426 unsigned SizeFactor = OpVT.getSizeInBits()/128;
13427 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
13428 OpVT.getVectorNumElements() / SizeFactor);
13430 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
13432 // Insert the 128-bit vector.
13433 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
13436 if (OpVT == MVT::v1i64 &&
13437 Op.getOperand(0).getValueType() == MVT::i64)
13438 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
13440 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
13441 assert(OpVT.is128BitVector() && "Expected an SSE type!");
13442 return DAG.getNode(ISD::BITCAST, dl, OpVT,
13443 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
13446 // Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
13447 // a simple subregister reference or explicit instructions to grab
13448 // upper bits of a vector.
13449 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13450 SelectionDAG &DAG) {
13452 SDValue In = Op.getOperand(0);
13453 SDValue Idx = Op.getOperand(1);
13454 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13455 MVT ResVT = Op.getSimpleValueType();
13456 MVT InVT = In.getSimpleValueType();
13458 if (Subtarget->hasFp256()) {
13459 if (ResVT.is128BitVector() &&
13460 (InVT.is256BitVector() || InVT.is512BitVector()) &&
13461 isa<ConstantSDNode>(Idx)) {
13462 return Extract128BitVector(In, IdxVal, DAG, dl);
13464 if (ResVT.is256BitVector() && InVT.is512BitVector() &&
13465 isa<ConstantSDNode>(Idx)) {
13466 return Extract256BitVector(In, IdxVal, DAG, dl);
13472 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
13473 // simple superregister reference or explicit instructions to insert
13474 // the upper bits of a vector.
13475 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13476 SelectionDAG &DAG) {
13477 if (!Subtarget->hasAVX())
13481 SDValue Vec = Op.getOperand(0);
13482 SDValue SubVec = Op.getOperand(1);
13483 SDValue Idx = Op.getOperand(2);
13485 if (!isa<ConstantSDNode>(Idx))
13488 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13489 MVT OpVT = Op.getSimpleValueType();
13490 MVT SubVecVT = SubVec.getSimpleValueType();
13492 // Fold two 16-byte subvector loads into one 32-byte load:
13493 // (insert_subvector (insert_subvector undef, (load addr), 0),
13494 // (load addr + 16), Elts/2)
13496 if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
13497 Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
13498 OpVT.is256BitVector() && SubVecVT.is128BitVector() &&
13499 !Subtarget->isUnalignedMem32Slow()) {
13500 SDValue SubVec2 = Vec.getOperand(1);
13501 if (auto *Idx2 = dyn_cast<ConstantSDNode>(Vec.getOperand(2))) {
13502 if (Idx2->getZExtValue() == 0) {
13503 SDValue Ops[] = { SubVec2, SubVec };
13504 SDValue LD = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, false);
13511 if ((OpVT.is256BitVector() || OpVT.is512BitVector()) &&
13512 SubVecVT.is128BitVector())
13513 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
13515 if (OpVT.is512BitVector() && SubVecVT.is256BitVector())
13516 return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
13521 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
13522 // their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
13523 // one of the above mentioned nodes. It has to be wrapped because otherwise
13524 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
13525 // be used to form addressing mode. These wrapped nodes will be selected
13528 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
13529 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
13531 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13532 // global base reg.
13533 unsigned char OpFlag = 0;
13534 unsigned WrapperKind = X86ISD::Wrapper;
13535 CodeModel::Model M = DAG.getTarget().getCodeModel();
13537 if (Subtarget->isPICStyleRIPRel() &&
13538 (M == CodeModel::Small || M == CodeModel::Kernel))
13539 WrapperKind = X86ISD::WrapperRIP;
13540 else if (Subtarget->isPICStyleGOT())
13541 OpFlag = X86II::MO_GOTOFF;
13542 else if (Subtarget->isPICStyleStubPIC())
13543 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13545 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
13546 CP->getAlignment(),
13547 CP->getOffset(), OpFlag);
13549 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13550 // With PIC, the address is actually $g + Offset.
13552 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13553 DAG.getNode(X86ISD::GlobalBaseReg,
13554 SDLoc(), getPointerTy()),
13561 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
13562 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
13564 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13565 // global base reg.
13566 unsigned char OpFlag = 0;
13567 unsigned WrapperKind = X86ISD::Wrapper;
13568 CodeModel::Model M = DAG.getTarget().getCodeModel();
13570 if (Subtarget->isPICStyleRIPRel() &&
13571 (M == CodeModel::Small || M == CodeModel::Kernel))
13572 WrapperKind = X86ISD::WrapperRIP;
13573 else if (Subtarget->isPICStyleGOT())
13574 OpFlag = X86II::MO_GOTOFF;
13575 else if (Subtarget->isPICStyleStubPIC())
13576 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13578 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
13581 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13583 // With PIC, the address is actually $g + Offset.
13585 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13586 DAG.getNode(X86ISD::GlobalBaseReg,
13587 SDLoc(), getPointerTy()),
13594 X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
13595 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
13597 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13598 // global base reg.
13599 unsigned char OpFlag = 0;
13600 unsigned WrapperKind = X86ISD::Wrapper;
13601 CodeModel::Model M = DAG.getTarget().getCodeModel();
13603 if (Subtarget->isPICStyleRIPRel() &&
13604 (M == CodeModel::Small || M == CodeModel::Kernel)) {
13605 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF())
13606 OpFlag = X86II::MO_GOTPCREL;
13607 WrapperKind = X86ISD::WrapperRIP;
13608 } else if (Subtarget->isPICStyleGOT()) {
13609 OpFlag = X86II::MO_GOT;
13610 } else if (Subtarget->isPICStyleStubPIC()) {
13611 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE;
13612 } else if (Subtarget->isPICStyleStubNoDynamic()) {
13613 OpFlag = X86II::MO_DARWIN_NONLAZY;
13616 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag);
13619 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13621 // With PIC, the address is actually $g + Offset.
13622 if (DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
13623 !Subtarget->is64Bit()) {
13624 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13625 DAG.getNode(X86ISD::GlobalBaseReg,
13626 SDLoc(), getPointerTy()),
13630 // For symbols that require a load from a stub to get the address, emit the
13632 if (isGlobalStubReference(OpFlag))
13633 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result,
13634 MachinePointerInfo::getGOT(), false, false, false, 0);
13640 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
13641 // Create the TargetBlockAddressAddress node.
13642 unsigned char OpFlags =
13643 Subtarget->ClassifyBlockAddressReference();
13644 CodeModel::Model M = DAG.getTarget().getCodeModel();
13645 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
13646 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
13648 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset,
13651 if (Subtarget->isPICStyleRIPRel() &&
13652 (M == CodeModel::Small || M == CodeModel::Kernel))
13653 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13655 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13657 // With PIC, the address is actually $g + Offset.
13658 if (isGlobalRelativeToPICBase(OpFlags)) {
13659 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13660 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13668 X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
13669 int64_t Offset, SelectionDAG &DAG) const {
13670 // Create the TargetGlobalAddress node, folding in the constant
13671 // offset if it is legal.
13672 unsigned char OpFlags =
13673 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget());
13674 CodeModel::Model M = DAG.getTarget().getCodeModel();
13676 if (OpFlags == X86II::MO_NO_FLAG &&
13677 X86::isOffsetSuitableForCodeModel(Offset, M)) {
13678 // A direct static reference to a global.
13679 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
13682 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
13685 if (Subtarget->isPICStyleRIPRel() &&
13686 (M == CodeModel::Small || M == CodeModel::Kernel))
13687 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13689 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13691 // With PIC, the address is actually $g + Offset.
13692 if (isGlobalRelativeToPICBase(OpFlags)) {
13693 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13694 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13698 // For globals that require a load from a stub to get the address, emit the
13700 if (isGlobalStubReference(OpFlags))
13701 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
13702 MachinePointerInfo::getGOT(), false, false, false, 0);
13704 // If there was a non-zero offset that we didn't fold, create an explicit
13705 // addition for it.
13707 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result,
13708 DAG.getConstant(Offset, getPointerTy()));
13714 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
13715 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
13716 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
13717 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
13721 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
13722 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
13723 unsigned char OperandFlags, bool LocalDynamic = false) {
13724 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13725 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13727 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13728 GA->getValueType(0),
13732 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
13736 SDValue Ops[] = { Chain, TGA, *InFlag };
13737 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13739 SDValue Ops[] = { Chain, TGA };
13740 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13743 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
13744 MFI->setAdjustsStack(true);
13745 MFI->setHasCalls(true);
13747 SDValue Flag = Chain.getValue(1);
13748 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
13751 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
13753 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13756 SDLoc dl(GA); // ? function entry point might be better
13757 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13758 DAG.getNode(X86ISD::GlobalBaseReg,
13759 SDLoc(), PtrVT), InFlag);
13760 InFlag = Chain.getValue(1);
13762 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
13765 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
13767 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13769 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
13770 X86::RAX, X86II::MO_TLSGD);
13773 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
13779 // Get the start address of the TLS block for this module.
13780 X86MachineFunctionInfo* MFI = DAG.getMachineFunction()
13781 .getInfo<X86MachineFunctionInfo>();
13782 MFI->incNumLocalDynamicTLSAccesses();
13786 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
13787 X86II::MO_TLSLD, /*LocalDynamic=*/true);
13790 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13791 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
13792 InFlag = Chain.getValue(1);
13793 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
13794 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
13797 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
13801 unsigned char OperandFlags = X86II::MO_DTPOFF;
13802 unsigned WrapperKind = X86ISD::Wrapper;
13803 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13804 GA->getValueType(0),
13805 GA->getOffset(), OperandFlags);
13806 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13808 // Add x@dtpoff with the base.
13809 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
13812 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
13813 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13814 const EVT PtrVT, TLSModel::Model model,
13815 bool is64Bit, bool isPIC) {
13818 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
13819 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
13820 is64Bit ? 257 : 256));
13822 SDValue ThreadPointer =
13823 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0),
13824 MachinePointerInfo(Ptr), false, false, false, 0);
13826 unsigned char OperandFlags = 0;
13827 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
13829 unsigned WrapperKind = X86ISD::Wrapper;
13830 if (model == TLSModel::LocalExec) {
13831 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
13832 } else if (model == TLSModel::InitialExec) {
13834 OperandFlags = X86II::MO_GOTTPOFF;
13835 WrapperKind = X86ISD::WrapperRIP;
13837 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
13840 llvm_unreachable("Unexpected model");
13843 // emit "addl x@ntpoff,%eax" (local exec)
13844 // or "addl x@indntpoff,%eax" (initial exec)
13845 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
13847 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
13848 GA->getOffset(), OperandFlags);
13849 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13851 if (model == TLSModel::InitialExec) {
13852 if (isPIC && !is64Bit) {
13853 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
13854 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
13858 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
13859 MachinePointerInfo::getGOT(), false, false, false, 0);
13862 // The address of the thread local variable is the add of the thread
13863 // pointer with the offset of the variable.
13864 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
13868 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
13870 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
13871 const GlobalValue *GV = GA->getGlobal();
13873 if (Subtarget->isTargetELF()) {
13874 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
13877 case TLSModel::GeneralDynamic:
13878 if (Subtarget->is64Bit())
13879 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
13880 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
13881 case TLSModel::LocalDynamic:
13882 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(),
13883 Subtarget->is64Bit());
13884 case TLSModel::InitialExec:
13885 case TLSModel::LocalExec:
13886 return LowerToTLSExecModel(
13887 GA, DAG, getPointerTy(), model, Subtarget->is64Bit(),
13888 DAG.getTarget().getRelocationModel() == Reloc::PIC_);
13890 llvm_unreachable("Unknown TLS model.");
13893 if (Subtarget->isTargetDarwin()) {
13894 // Darwin only has one model of TLS. Lower to that.
13895 unsigned char OpFlag = 0;
13896 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
13897 X86ISD::WrapperRIP : X86ISD::Wrapper;
13899 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13900 // global base reg.
13901 bool PIC32 = (DAG.getTarget().getRelocationModel() == Reloc::PIC_) &&
13902 !Subtarget->is64Bit();
13904 OpFlag = X86II::MO_TLVP_PIC_BASE;
13906 OpFlag = X86II::MO_TLVP;
13908 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
13909 GA->getValueType(0),
13910 GA->getOffset(), OpFlag);
13911 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13913 // With PIC32, the address is actually $g + Offset.
13915 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13916 DAG.getNode(X86ISD::GlobalBaseReg,
13917 SDLoc(), getPointerTy()),
13920 // Lowering the machine isd will make sure everything is in the right
13922 SDValue Chain = DAG.getEntryNode();
13923 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13924 SDValue Args[] = { Chain, Offset };
13925 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
13927 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
13928 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13929 MFI->setAdjustsStack(true);
13931 // And our return value (tls address) is in the standard call return value
13933 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
13934 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(),
13935 Chain.getValue(1));
13938 if (Subtarget->isTargetKnownWindowsMSVC() ||
13939 Subtarget->isTargetWindowsGNU()) {
13940 // Just use the implicit TLS architecture
13941 // Need to generate someting similar to:
13942 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
13944 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
13945 // mov rcx, qword [rdx+rcx*8]
13946 // mov eax, .tls$:tlsvar
13947 // [rax+rcx] contains the address
13948 // Windows 64bit: gs:0x58
13949 // Windows 32bit: fs:__tls_array
13952 SDValue Chain = DAG.getEntryNode();
13954 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
13955 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
13956 // use its literal value of 0x2C.
13957 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit()
13958 ? Type::getInt8PtrTy(*DAG.getContext(),
13960 : Type::getInt32PtrTy(*DAG.getContext(),
13964 Subtarget->is64Bit()
13965 ? DAG.getIntPtrConstant(0x58)
13966 : (Subtarget->isTargetWindowsGNU()
13967 ? DAG.getIntPtrConstant(0x2C)
13968 : DAG.getExternalSymbol("_tls_array", getPointerTy()));
13970 SDValue ThreadPointer =
13971 DAG.getLoad(getPointerTy(), dl, Chain, TlsArray,
13972 MachinePointerInfo(Ptr), false, false, false, 0);
13974 // Load the _tls_index variable
13975 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy());
13976 if (Subtarget->is64Bit())
13977 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain,
13978 IDX, MachinePointerInfo(), MVT::i32,
13979 false, false, false, 0);
13981 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(),
13982 false, false, false, 0);
13984 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()),
13986 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale);
13988 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX);
13989 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(),
13990 false, false, false, 0);
13992 // Get the offset of start of .tls section
13993 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13994 GA->getValueType(0),
13995 GA->getOffset(), X86II::MO_SECREL);
13996 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA);
13998 // The address of the thread local variable is the add of the thread
13999 // pointer with the offset of the variable.
14000 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset);
14003 llvm_unreachable("TLS not implemented for this target.");
14006 /// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values
14007 /// and take a 2 x i32 value to shift plus a shift amount.
14008 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
14009 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
14010 MVT VT = Op.getSimpleValueType();
14011 unsigned VTBits = VT.getSizeInBits();
14013 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
14014 SDValue ShOpLo = Op.getOperand(0);
14015 SDValue ShOpHi = Op.getOperand(1);
14016 SDValue ShAmt = Op.getOperand(2);
14017 // X86ISD::SHLD and X86ISD::SHRD have defined overflow behavior but the
14018 // generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
14020 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14021 DAG.getConstant(VTBits - 1, MVT::i8));
14022 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
14023 DAG.getConstant(VTBits - 1, MVT::i8))
14024 : DAG.getConstant(0, VT);
14026 SDValue Tmp2, Tmp3;
14027 if (Op.getOpcode() == ISD::SHL_PARTS) {
14028 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
14029 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
14031 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
14032 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
14035 // If the shift amount is larger or equal than the width of a part we can't
14036 // rely on the results of shld/shrd. Insert a test and select the appropriate
14037 // values for large shift amounts.
14038 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14039 DAG.getConstant(VTBits, MVT::i8));
14040 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
14041 AndNode, DAG.getConstant(0, MVT::i8));
14044 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
14045 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
14046 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
14048 if (Op.getOpcode() == ISD::SHL_PARTS) {
14049 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14050 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14052 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14053 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14056 SDValue Ops[2] = { Lo, Hi };
14057 return DAG.getMergeValues(Ops, dl);
14060 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
14061 SelectionDAG &DAG) const {
14062 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
14065 if (SrcVT.isVector()) {
14066 if (SrcVT.getVectorElementType() == MVT::i1) {
14067 MVT IntegerVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
14068 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14069 DAG.getNode(ISD::SIGN_EXTEND, dl, IntegerVT,
14070 Op.getOperand(0)));
14075 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
14076 "Unknown SINT_TO_FP to lower!");
14078 // These are really Legal; return the operand so the caller accepts it as
14080 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
14082 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
14083 Subtarget->is64Bit()) {
14087 unsigned Size = SrcVT.getSizeInBits()/8;
14088 MachineFunction &MF = DAG.getMachineFunction();
14089 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false);
14090 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14091 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14093 MachinePointerInfo::getFixedStack(SSFI),
14095 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
14098 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
14100 SelectionDAG &DAG) const {
14104 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
14106 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
14108 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
14110 unsigned ByteSize = SrcVT.getSizeInBits()/8;
14112 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
14113 MachineMemOperand *MMO;
14115 int SSFI = FI->getIndex();
14117 DAG.getMachineFunction()
14118 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14119 MachineMemOperand::MOLoad, ByteSize, ByteSize);
14121 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
14122 StackSlot = StackSlot.getOperand(1);
14124 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
14125 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
14127 Tys, Ops, SrcVT, MMO);
14130 Chain = Result.getValue(1);
14131 SDValue InFlag = Result.getValue(2);
14133 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
14134 // shouldn't be necessary except that RFP cannot be live across
14135 // multiple blocks. When stackifier is fixed, they can be uncoupled.
14136 MachineFunction &MF = DAG.getMachineFunction();
14137 unsigned SSFISize = Op.getValueType().getSizeInBits()/8;
14138 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false);
14139 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14140 Tys = DAG.getVTList(MVT::Other);
14142 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
14144 MachineMemOperand *MMO =
14145 DAG.getMachineFunction()
14146 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14147 MachineMemOperand::MOStore, SSFISize, SSFISize);
14149 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
14150 Ops, Op.getValueType(), MMO);
14151 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot,
14152 MachinePointerInfo::getFixedStack(SSFI),
14153 false, false, false, 0);
14159 // LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
14160 SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
14161 SelectionDAG &DAG) const {
14162 // This algorithm is not obvious. Here it is what we're trying to output:
14165 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
14166 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
14168 haddpd %xmm0, %xmm0
14170 pshufd $0x4e, %xmm0, %xmm1
14176 LLVMContext *Context = DAG.getContext();
14178 // Build some magic constants.
14179 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
14180 Constant *C0 = ConstantDataVector::get(*Context, CV0);
14181 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
14183 SmallVector<Constant*,2> CV1;
14185 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14186 APInt(64, 0x4330000000000000ULL))));
14188 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14189 APInt(64, 0x4530000000000000ULL))));
14190 Constant *C1 = ConstantVector::get(CV1);
14191 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
14193 // Load the 64-bit value into an XMM register.
14194 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
14196 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
14197 MachinePointerInfo::getConstantPool(),
14198 false, false, false, 16);
14199 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32,
14200 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1),
14203 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
14204 MachinePointerInfo::getConstantPool(),
14205 false, false, false, 16);
14206 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1);
14207 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
14210 if (Subtarget->hasSSE3()) {
14211 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
14212 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
14214 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub);
14215 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32,
14217 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64,
14218 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle),
14222 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
14223 DAG.getIntPtrConstant(0));
14226 // LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
14227 SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
14228 SelectionDAG &DAG) const {
14230 // FP constant to bias correct the final result.
14231 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
14234 // Load the 32-bit value into an XMM register.
14235 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
14238 // Zero out the upper parts of the register.
14239 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
14241 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14242 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
14243 DAG.getIntPtrConstant(0));
14245 // Or the load with the bias.
14246 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
14247 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14248 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14249 MVT::v2f64, Load)),
14250 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14251 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14252 MVT::v2f64, Bias)));
14253 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14254 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or),
14255 DAG.getIntPtrConstant(0));
14257 // Subtract the bias.
14258 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
14260 // Handle final rounding.
14261 EVT DestVT = Op.getValueType();
14263 if (DestVT.bitsLT(MVT::f64))
14264 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
14265 DAG.getIntPtrConstant(0));
14266 if (DestVT.bitsGT(MVT::f64))
14267 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
14269 // Handle final rounding.
14273 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
14274 const X86Subtarget &Subtarget) {
14275 // The algorithm is the following:
14276 // #ifdef __SSE4_1__
14277 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14278 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14279 // (uint4) 0x53000000, 0xaa);
14281 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14282 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14284 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14285 // return (float4) lo + fhi;
14288 SDValue V = Op->getOperand(0);
14289 EVT VecIntVT = V.getValueType();
14290 bool Is128 = VecIntVT == MVT::v4i32;
14291 EVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
14292 // If we convert to something else than the supported type, e.g., to v4f64,
14294 if (VecFloatVT != Op->getValueType(0))
14297 unsigned NumElts = VecIntVT.getVectorNumElements();
14298 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
14299 "Unsupported custom type");
14300 assert(NumElts <= 8 && "The size of the constant array must be fixed");
14302 // In the #idef/#else code, we have in common:
14303 // - The vector of constants:
14309 // Create the splat vector for 0x4b000000.
14310 SDValue CstLow = DAG.getConstant(0x4b000000, MVT::i32);
14311 SDValue CstLowArray[] = {CstLow, CstLow, CstLow, CstLow,
14312 CstLow, CstLow, CstLow, CstLow};
14313 SDValue VecCstLow = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14314 makeArrayRef(&CstLowArray[0], NumElts));
14315 // Create the splat vector for 0x53000000.
14316 SDValue CstHigh = DAG.getConstant(0x53000000, MVT::i32);
14317 SDValue CstHighArray[] = {CstHigh, CstHigh, CstHigh, CstHigh,
14318 CstHigh, CstHigh, CstHigh, CstHigh};
14319 SDValue VecCstHigh = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14320 makeArrayRef(&CstHighArray[0], NumElts));
14322 // Create the right shift.
14323 SDValue CstShift = DAG.getConstant(16, MVT::i32);
14324 SDValue CstShiftArray[] = {CstShift, CstShift, CstShift, CstShift,
14325 CstShift, CstShift, CstShift, CstShift};
14326 SDValue VecCstShift = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14327 makeArrayRef(&CstShiftArray[0], NumElts));
14328 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
14331 if (Subtarget.hasSSE41()) {
14332 EVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
14333 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14334 SDValue VecCstLowBitcast =
14335 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstLow);
14336 SDValue VecBitcast = DAG.getNode(ISD::BITCAST, DL, VecI16VT, V);
14337 // Low will be bitcasted right away, so do not bother bitcasting back to its
14339 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
14340 VecCstLowBitcast, DAG.getConstant(0xaa, MVT::i32));
14341 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14342 // (uint4) 0x53000000, 0xaa);
14343 SDValue VecCstHighBitcast =
14344 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstHigh);
14345 SDValue VecShiftBitcast =
14346 DAG.getNode(ISD::BITCAST, DL, VecI16VT, HighShift);
14347 // High will be bitcasted right away, so do not bother bitcasting back to
14348 // its original type.
14349 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
14350 VecCstHighBitcast, DAG.getConstant(0xaa, MVT::i32));
14352 SDValue CstMask = DAG.getConstant(0xffff, MVT::i32);
14353 SDValue VecCstMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, CstMask,
14354 CstMask, CstMask, CstMask);
14355 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14356 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
14357 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
14359 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14360 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
14363 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
14364 SDValue CstFAdd = DAG.getConstantFP(
14365 APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), MVT::f32);
14366 SDValue CstFAddArray[] = {CstFAdd, CstFAdd, CstFAdd, CstFAdd,
14367 CstFAdd, CstFAdd, CstFAdd, CstFAdd};
14368 SDValue VecCstFAdd = DAG.getNode(ISD::BUILD_VECTOR, DL, VecFloatVT,
14369 makeArrayRef(&CstFAddArray[0], NumElts));
14371 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14372 SDValue HighBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, High);
14374 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
14375 // return (float4) lo + fhi;
14376 SDValue LowBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, Low);
14377 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
14380 SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
14381 SelectionDAG &DAG) const {
14382 SDValue N0 = Op.getOperand(0);
14383 MVT SVT = N0.getSimpleValueType();
14386 switch (SVT.SimpleTy) {
14388 llvm_unreachable("Custom UINT_TO_FP is not supported!");
14393 MVT NVT = MVT::getVectorVT(MVT::i32, SVT.getVectorNumElements());
14394 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14395 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0));
14399 return lowerUINT_TO_FP_vXi32(Op, DAG, *Subtarget);
14401 llvm_unreachable(nullptr);
14404 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
14405 SelectionDAG &DAG) const {
14406 SDValue N0 = Op.getOperand(0);
14409 if (Op.getValueType().isVector())
14410 return lowerUINT_TO_FP_vec(Op, DAG);
14412 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
14413 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
14414 // the optimization here.
14415 if (DAG.SignBitIsZero(N0))
14416 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
14418 MVT SrcVT = N0.getSimpleValueType();
14419 MVT DstVT = Op.getSimpleValueType();
14420 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
14421 return LowerUINT_TO_FP_i64(Op, DAG);
14422 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
14423 return LowerUINT_TO_FP_i32(Op, DAG);
14424 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
14427 // Make a 64-bit buffer, and use it to build an FILD.
14428 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
14429 if (SrcVT == MVT::i32) {
14430 SDValue WordOff = DAG.getConstant(4, getPointerTy());
14431 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
14432 getPointerTy(), StackSlot, WordOff);
14433 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14434 StackSlot, MachinePointerInfo(),
14436 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
14437 OffsetSlot, MachinePointerInfo(),
14439 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
14443 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
14444 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14445 StackSlot, MachinePointerInfo(),
14447 // For i64 source, we need to add the appropriate power of 2 if the input
14448 // was negative. This is the same as the optimization in
14449 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
14450 // we must be careful to do the computation in x87 extended precision, not
14451 // in SSE. (The generic code can't know it's OK to do this, or how to.)
14452 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
14453 MachineMemOperand *MMO =
14454 DAG.getMachineFunction()
14455 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14456 MachineMemOperand::MOLoad, 8, 8);
14458 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
14459 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
14460 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
14463 APInt FF(32, 0x5F800000ULL);
14465 // Check whether the sign bit is set.
14466 SDValue SignSet = DAG.getSetCC(dl,
14467 getSetCCResultType(*DAG.getContext(), MVT::i64),
14468 Op.getOperand(0), DAG.getConstant(0, MVT::i64),
14471 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
14472 SDValue FudgePtr = DAG.getConstantPool(
14473 ConstantInt::get(*DAG.getContext(), FF.zext(64)),
14476 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
14477 SDValue Zero = DAG.getIntPtrConstant(0);
14478 SDValue Four = DAG.getIntPtrConstant(4);
14479 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
14481 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset);
14483 // Load the value out, extending it from f32 to f80.
14484 // FIXME: Avoid the extend by constructing the right constant pool?
14485 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(),
14486 FudgePtr, MachinePointerInfo::getConstantPool(),
14487 MVT::f32, false, false, false, 4);
14488 // Extend everything to 80 bits to force it to be done on x87.
14489 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
14490 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0));
14493 std::pair<SDValue,SDValue>
14494 X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
14495 bool IsSigned, bool IsReplace) const {
14498 EVT DstTy = Op.getValueType();
14500 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) {
14501 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
14505 assert(DstTy.getSimpleVT() <= MVT::i64 &&
14506 DstTy.getSimpleVT() >= MVT::i16 &&
14507 "Unknown FP_TO_INT to lower!");
14509 // These are really Legal.
14510 if (DstTy == MVT::i32 &&
14511 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14512 return std::make_pair(SDValue(), SDValue());
14513 if (Subtarget->is64Bit() &&
14514 DstTy == MVT::i64 &&
14515 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14516 return std::make_pair(SDValue(), SDValue());
14518 // We lower FP->int64 either into FISTP64 followed by a load from a temporary
14519 // stack slot, or into the FTOL runtime function.
14520 MachineFunction &MF = DAG.getMachineFunction();
14521 unsigned MemSize = DstTy.getSizeInBits()/8;
14522 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14523 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14526 if (!IsSigned && isIntegerTypeFTOL(DstTy))
14527 Opc = X86ISD::WIN_FTOL;
14529 switch (DstTy.getSimpleVT().SimpleTy) {
14530 default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
14531 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
14532 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
14533 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
14536 SDValue Chain = DAG.getEntryNode();
14537 SDValue Value = Op.getOperand(0);
14538 EVT TheVT = Op.getOperand(0).getValueType();
14539 // FIXME This causes a redundant load/store if the SSE-class value is already
14540 // in memory, such as if it is on the callstack.
14541 if (isScalarFPTypeInSSEReg(TheVT)) {
14542 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
14543 Chain = DAG.getStore(Chain, DL, Value, StackSlot,
14544 MachinePointerInfo::getFixedStack(SSFI),
14546 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
14548 Chain, StackSlot, DAG.getValueType(TheVT)
14551 MachineMemOperand *MMO =
14552 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14553 MachineMemOperand::MOLoad, MemSize, MemSize);
14554 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
14555 Chain = Value.getValue(1);
14556 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14557 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14560 MachineMemOperand *MMO =
14561 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14562 MachineMemOperand::MOStore, MemSize, MemSize);
14564 if (Opc != X86ISD::WIN_FTOL) {
14565 // Build the FP_TO_INT*_IN_MEM
14566 SDValue Ops[] = { Chain, Value, StackSlot };
14567 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
14569 return std::make_pair(FIST, StackSlot);
14571 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL,
14572 DAG.getVTList(MVT::Other, MVT::Glue),
14574 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX,
14575 MVT::i32, ftol.getValue(1));
14576 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX,
14577 MVT::i32, eax.getValue(2));
14578 SDValue Ops[] = { eax, edx };
14579 SDValue pair = IsReplace
14580 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops)
14581 : DAG.getMergeValues(Ops, DL);
14582 return std::make_pair(pair, SDValue());
14586 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
14587 const X86Subtarget *Subtarget) {
14588 MVT VT = Op->getSimpleValueType(0);
14589 SDValue In = Op->getOperand(0);
14590 MVT InVT = In.getSimpleValueType();
14593 // Optimize vectors in AVX mode:
14596 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
14597 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
14598 // Concat upper and lower parts.
14601 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
14602 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
14603 // Concat upper and lower parts.
14606 if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) &&
14607 ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) &&
14608 ((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
14611 if (Subtarget->hasInt256())
14612 return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
14614 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
14615 SDValue Undef = DAG.getUNDEF(InVT);
14616 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
14617 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14618 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14620 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(),
14621 VT.getVectorNumElements()/2);
14623 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo);
14624 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi);
14626 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
14629 static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
14630 SelectionDAG &DAG) {
14631 MVT VT = Op->getSimpleValueType(0);
14632 SDValue In = Op->getOperand(0);
14633 MVT InVT = In.getSimpleValueType();
14635 unsigned int NumElts = VT.getVectorNumElements();
14636 if (NumElts != 8 && NumElts != 16)
14639 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
14640 return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
14642 EVT ExtVT = (NumElts == 8)? MVT::v8i64 : MVT::v16i32;
14643 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14644 // Now we have only mask extension
14645 assert(InVT.getVectorElementType() == MVT::i1);
14646 SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
14647 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14648 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
14649 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14650 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14651 MachinePointerInfo::getConstantPool(),
14652 false, false, false, Alignment);
14654 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, DL, ExtVT, In, Ld);
14655 if (VT.is512BitVector())
14657 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Brcst);
14660 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14661 SelectionDAG &DAG) {
14662 if (Subtarget->hasFp256()) {
14663 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14671 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14672 SelectionDAG &DAG) {
14674 MVT VT = Op.getSimpleValueType();
14675 SDValue In = Op.getOperand(0);
14676 MVT SVT = In.getSimpleValueType();
14678 if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
14679 return LowerZERO_EXTEND_AVX512(Op, DAG);
14681 if (Subtarget->hasFp256()) {
14682 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14687 assert(!VT.is256BitVector() || !SVT.is128BitVector() ||
14688 VT.getVectorNumElements() != SVT.getVectorNumElements());
14692 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
14694 MVT VT = Op.getSimpleValueType();
14695 SDValue In = Op.getOperand(0);
14696 MVT InVT = In.getSimpleValueType();
14698 if (VT == MVT::i1) {
14699 assert((InVT.isInteger() && (InVT.getSizeInBits() <= 64)) &&
14700 "Invalid scalar TRUNCATE operation");
14701 if (InVT.getSizeInBits() >= 32)
14703 In = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, In);
14704 return DAG.getNode(ISD::TRUNCATE, DL, VT, In);
14706 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
14707 "Invalid TRUNCATE operation");
14709 if (InVT.is512BitVector() || VT.getVectorElementType() == MVT::i1) {
14710 if (VT.getVectorElementType().getSizeInBits() >=8)
14711 return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
14713 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
14714 unsigned NumElts = InVT.getVectorNumElements();
14715 assert ((NumElts == 8 || NumElts == 16) && "Unexpected vector type");
14716 if (InVT.getSizeInBits() < 512) {
14717 MVT ExtVT = (NumElts == 16)? MVT::v16i32 : MVT::v8i64;
14718 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
14722 SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
14723 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14724 SDValue CP = DAG.getConstantPool(C, getPointerTy());
14725 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14726 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14727 MachinePointerInfo::getConstantPool(),
14728 false, false, false, Alignment);
14729 SDValue OneV = DAG.getNode(X86ISD::VBROADCAST, DL, InVT, Ld);
14730 SDValue And = DAG.getNode(ISD::AND, DL, InVT, OneV, In);
14731 return DAG.getNode(X86ISD::TESTM, DL, VT, And, And);
14734 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
14735 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
14736 if (Subtarget->hasInt256()) {
14737 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
14738 In = DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, In);
14739 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32),
14741 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
14742 DAG.getIntPtrConstant(0));
14745 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14746 DAG.getIntPtrConstant(0));
14747 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14748 DAG.getIntPtrConstant(2));
14749 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14750 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14751 static const int ShufMask[] = {0, 2, 4, 6};
14752 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
14755 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
14756 // On AVX2, v8i32 -> v8i16 becomed PSHUFB.
14757 if (Subtarget->hasInt256()) {
14758 In = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, In);
14760 SmallVector<SDValue,32> pshufbMask;
14761 for (unsigned i = 0; i < 2; ++i) {
14762 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8));
14763 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8));
14764 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8));
14765 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8));
14766 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8));
14767 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8));
14768 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8));
14769 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8));
14770 for (unsigned j = 0; j < 8; ++j)
14771 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
14773 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
14774 In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
14775 In = DAG.getNode(ISD::BITCAST, DL, MVT::v4i64, In);
14777 static const int ShufMask[] = {0, 2, -1, -1};
14778 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64),
14780 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14781 DAG.getIntPtrConstant(0));
14782 return DAG.getNode(ISD::BITCAST, DL, VT, In);
14785 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14786 DAG.getIntPtrConstant(0));
14788 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14789 DAG.getIntPtrConstant(4));
14791 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpLo);
14792 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpHi);
14794 // The PSHUFB mask:
14795 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
14796 -1, -1, -1, -1, -1, -1, -1, -1};
14798 SDValue Undef = DAG.getUNDEF(MVT::v16i8);
14799 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, Undef, ShufMask1);
14800 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, Undef, ShufMask1);
14802 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14803 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14805 // The MOVLHPS Mask:
14806 static const int ShufMask2[] = {0, 1, 4, 5};
14807 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
14808 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, res);
14811 // Handle truncation of V256 to V128 using shuffles.
14812 if (!VT.is128BitVector() || !InVT.is256BitVector())
14815 assert(Subtarget->hasFp256() && "256-bit vector without AVX!");
14817 unsigned NumElems = VT.getVectorNumElements();
14818 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
14820 SmallVector<int, 16> MaskVec(NumElems * 2, -1);
14821 // Prepare truncation shuffle mask
14822 for (unsigned i = 0; i != NumElems; ++i)
14823 MaskVec[i] = i * 2;
14824 SDValue V = DAG.getVectorShuffle(NVT, DL,
14825 DAG.getNode(ISD::BITCAST, DL, NVT, In),
14826 DAG.getUNDEF(NVT), &MaskVec[0]);
14827 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
14828 DAG.getIntPtrConstant(0));
14831 SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
14832 SelectionDAG &DAG) const {
14833 assert(!Op.getSimpleValueType().isVector());
14835 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14836 /*IsSigned=*/ true, /*IsReplace=*/ false);
14837 SDValue FIST = Vals.first, StackSlot = Vals.second;
14838 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
14839 if (!FIST.getNode()) return Op;
14841 if (StackSlot.getNode())
14842 // Load the result.
14843 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14844 FIST, StackSlot, MachinePointerInfo(),
14845 false, false, false, 0);
14847 // The node is the result.
14851 SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
14852 SelectionDAG &DAG) const {
14853 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14854 /*IsSigned=*/ false, /*IsReplace=*/ false);
14855 SDValue FIST = Vals.first, StackSlot = Vals.second;
14856 assert(FIST.getNode() && "Unexpected failure");
14858 if (StackSlot.getNode())
14859 // Load the result.
14860 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14861 FIST, StackSlot, MachinePointerInfo(),
14862 false, false, false, 0);
14864 // The node is the result.
14868 static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
14870 MVT VT = Op.getSimpleValueType();
14871 SDValue In = Op.getOperand(0);
14872 MVT SVT = In.getSimpleValueType();
14874 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
14876 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
14877 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
14878 In, DAG.getUNDEF(SVT)));
14881 /// The only differences between FABS and FNEG are the mask and the logic op.
14882 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
14883 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
14884 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
14885 "Wrong opcode for lowering FABS or FNEG.");
14887 bool IsFABS = (Op.getOpcode() == ISD::FABS);
14889 // If this is a FABS and it has an FNEG user, bail out to fold the combination
14890 // into an FNABS. We'll lower the FABS after that if it is still in use.
14892 for (SDNode *User : Op->uses())
14893 if (User->getOpcode() == ISD::FNEG)
14896 SDValue Op0 = Op.getOperand(0);
14897 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
14900 MVT VT = Op.getSimpleValueType();
14901 // Assume scalar op for initialization; update for vector if needed.
14902 // Note that there are no scalar bitwise logical SSE/AVX instructions, so we
14903 // generate a 16-byte vector constant and logic op even for the scalar case.
14904 // Using a 16-byte mask allows folding the load of the mask with
14905 // the logic op, so it can save (~4 bytes) on code size.
14907 unsigned NumElts = VT == MVT::f64 ? 2 : 4;
14908 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
14909 // decide if we should generate a 16-byte constant mask when we only need 4 or
14910 // 8 bytes for the scalar case.
14911 if (VT.isVector()) {
14912 EltVT = VT.getVectorElementType();
14913 NumElts = VT.getVectorNumElements();
14916 unsigned EltBits = EltVT.getSizeInBits();
14917 LLVMContext *Context = DAG.getContext();
14918 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
14920 IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignBit(EltBits);
14921 Constant *C = ConstantInt::get(*Context, MaskElt);
14922 C = ConstantVector::getSplat(NumElts, C);
14923 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14924 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy());
14925 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
14926 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
14927 MachinePointerInfo::getConstantPool(),
14928 false, false, false, Alignment);
14930 if (VT.isVector()) {
14931 // For a vector, cast operands to a vector type, perform the logic op,
14932 // and cast the result back to the original value type.
14933 MVT VecVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
14934 SDValue MaskCasted = DAG.getNode(ISD::BITCAST, dl, VecVT, Mask);
14935 SDValue Operand = IsFNABS ?
14936 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0.getOperand(0)) :
14937 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0);
14938 unsigned BitOp = IsFABS ? ISD::AND : IsFNABS ? ISD::OR : ISD::XOR;
14939 return DAG.getNode(ISD::BITCAST, dl, VT,
14940 DAG.getNode(BitOp, dl, VecVT, Operand, MaskCasted));
14943 // If not vector, then scalar.
14944 unsigned BitOp = IsFABS ? X86ISD::FAND : IsFNABS ? X86ISD::FOR : X86ISD::FXOR;
14945 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
14946 return DAG.getNode(BitOp, dl, VT, Operand, Mask);
14949 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
14950 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14951 LLVMContext *Context = DAG.getContext();
14952 SDValue Op0 = Op.getOperand(0);
14953 SDValue Op1 = Op.getOperand(1);
14955 MVT VT = Op.getSimpleValueType();
14956 MVT SrcVT = Op1.getSimpleValueType();
14958 // If second operand is smaller, extend it first.
14959 if (SrcVT.bitsLT(VT)) {
14960 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1);
14963 // And if it is bigger, shrink it first.
14964 if (SrcVT.bitsGT(VT)) {
14965 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1));
14969 // At this point the operands and the result should have the same
14970 // type, and that won't be f80 since that is not custom lowered.
14972 const fltSemantics &Sem =
14973 VT == MVT::f64 ? APFloat::IEEEdouble : APFloat::IEEEsingle;
14974 const unsigned SizeInBits = VT.getSizeInBits();
14976 SmallVector<Constant *, 4> CV(
14977 VT == MVT::f64 ? 2 : 4,
14978 ConstantFP::get(*Context, APFloat(Sem, APInt(SizeInBits, 0))));
14980 // First, clear all bits but the sign bit from the second operand (sign).
14981 CV[0] = ConstantFP::get(*Context,
14982 APFloat(Sem, APInt::getHighBitsSet(SizeInBits, 1)));
14983 Constant *C = ConstantVector::get(CV);
14984 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
14985 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
14986 MachinePointerInfo::getConstantPool(),
14987 false, false, false, 16);
14988 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1);
14990 // Next, clear the sign bit from the first operand (magnitude).
14991 // If it's a constant, we can clear it here.
14992 if (ConstantFPSDNode *Op0CN = dyn_cast<ConstantFPSDNode>(Op0)) {
14993 APFloat APF = Op0CN->getValueAPF();
14994 // If the magnitude is a positive zero, the sign bit alone is enough.
14995 if (APF.isPosZero())
14998 CV[0] = ConstantFP::get(*Context, APF);
15000 CV[0] = ConstantFP::get(
15002 APFloat(Sem, APInt::getLowBitsSet(SizeInBits, SizeInBits - 1)));
15004 C = ConstantVector::get(CV);
15005 CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
15006 SDValue Val = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
15007 MachinePointerInfo::getConstantPool(),
15008 false, false, false, 16);
15009 // If the magnitude operand wasn't a constant, we need to AND out the sign.
15010 if (!isa<ConstantFPSDNode>(Op0))
15011 Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Val);
15013 // OR the magnitude value with the sign bit.
15014 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit);
15017 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
15018 SDValue N0 = Op.getOperand(0);
15020 MVT VT = Op.getSimpleValueType();
15022 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
15023 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
15024 DAG.getConstant(1, VT));
15025 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT));
15028 // Check whether an OR'd tree is PTEST-able.
15029 static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
15030 SelectionDAG &DAG) {
15031 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
15033 if (!Subtarget->hasSSE41())
15036 if (!Op->hasOneUse())
15039 SDNode *N = Op.getNode();
15042 SmallVector<SDValue, 8> Opnds;
15043 DenseMap<SDValue, unsigned> VecInMap;
15044 SmallVector<SDValue, 8> VecIns;
15045 EVT VT = MVT::Other;
15047 // Recognize a special case where a vector is casted into wide integer to
15049 Opnds.push_back(N->getOperand(0));
15050 Opnds.push_back(N->getOperand(1));
15052 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
15053 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
15054 // BFS traverse all OR'd operands.
15055 if (I->getOpcode() == ISD::OR) {
15056 Opnds.push_back(I->getOperand(0));
15057 Opnds.push_back(I->getOperand(1));
15058 // Re-evaluate the number of nodes to be traversed.
15059 e += 2; // 2 more nodes (LHS and RHS) are pushed.
15063 // Quit if a non-EXTRACT_VECTOR_ELT
15064 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
15067 // Quit if without a constant index.
15068 SDValue Idx = I->getOperand(1);
15069 if (!isa<ConstantSDNode>(Idx))
15072 SDValue ExtractedFromVec = I->getOperand(0);
15073 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
15074 if (M == VecInMap.end()) {
15075 VT = ExtractedFromVec.getValueType();
15076 // Quit if not 128/256-bit vector.
15077 if (!VT.is128BitVector() && !VT.is256BitVector())
15079 // Quit if not the same type.
15080 if (VecInMap.begin() != VecInMap.end() &&
15081 VT != VecInMap.begin()->first.getValueType())
15083 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
15084 VecIns.push_back(ExtractedFromVec);
15086 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
15089 assert((VT.is128BitVector() || VT.is256BitVector()) &&
15090 "Not extracted from 128-/256-bit vector.");
15092 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
15094 for (DenseMap<SDValue, unsigned>::const_iterator
15095 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
15096 // Quit if not all elements are used.
15097 if (I->second != FullMask)
15101 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
15103 // Cast all vectors into TestVT for PTEST.
15104 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
15105 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]);
15107 // If more than one full vectors are evaluated, OR them first before PTEST.
15108 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
15109 // Each iteration will OR 2 nodes and append the result until there is only
15110 // 1 node left, i.e. the final OR'd value of all vectors.
15111 SDValue LHS = VecIns[Slot];
15112 SDValue RHS = VecIns[Slot + 1];
15113 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
15116 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
15117 VecIns.back(), VecIns.back());
15120 /// \brief return true if \c Op has a use that doesn't just read flags.
15121 static bool hasNonFlagsUse(SDValue Op) {
15122 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
15124 SDNode *User = *UI;
15125 unsigned UOpNo = UI.getOperandNo();
15126 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
15127 // Look pass truncate.
15128 UOpNo = User->use_begin().getOperandNo();
15129 User = *User->use_begin();
15132 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
15133 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
15139 /// Emit nodes that will be selected as "test Op0,Op0", or something
15141 SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
15142 SelectionDAG &DAG) const {
15143 if (Op.getValueType() == MVT::i1) {
15144 SDValue ExtOp = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Op);
15145 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, ExtOp,
15146 DAG.getConstant(0, MVT::i8));
15148 // CF and OF aren't always set the way we want. Determine which
15149 // of these we need.
15150 bool NeedCF = false;
15151 bool NeedOF = false;
15154 case X86::COND_A: case X86::COND_AE:
15155 case X86::COND_B: case X86::COND_BE:
15158 case X86::COND_G: case X86::COND_GE:
15159 case X86::COND_L: case X86::COND_LE:
15160 case X86::COND_O: case X86::COND_NO: {
15161 // Check if we really need to set the
15162 // Overflow flag. If NoSignedWrap is present
15163 // that is not actually needed.
15164 switch (Op->getOpcode()) {
15169 const BinaryWithFlagsSDNode *BinNode =
15170 cast<BinaryWithFlagsSDNode>(Op.getNode());
15171 if (BinNode->hasNoSignedWrap())
15181 // See if we can use the EFLAGS value from the operand instead of
15182 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
15183 // we prove that the arithmetic won't overflow, we can't use OF or CF.
15184 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
15185 // Emit a CMP with 0, which is the TEST pattern.
15186 //if (Op.getValueType() == MVT::i1)
15187 // return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op,
15188 // DAG.getConstant(0, MVT::i1));
15189 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15190 DAG.getConstant(0, Op.getValueType()));
15192 unsigned Opcode = 0;
15193 unsigned NumOperands = 0;
15195 // Truncate operations may prevent the merge of the SETCC instruction
15196 // and the arithmetic instruction before it. Attempt to truncate the operands
15197 // of the arithmetic instruction and use a reduced bit-width instruction.
15198 bool NeedTruncation = false;
15199 SDValue ArithOp = Op;
15200 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
15201 SDValue Arith = Op->getOperand(0);
15202 // Both the trunc and the arithmetic op need to have one user each.
15203 if (Arith->hasOneUse())
15204 switch (Arith.getOpcode()) {
15211 NeedTruncation = true;
15217 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
15218 // which may be the result of a CAST. We use the variable 'Op', which is the
15219 // non-casted variable when we check for possible users.
15220 switch (ArithOp.getOpcode()) {
15222 // Due to an isel shortcoming, be conservative if this add is likely to be
15223 // selected as part of a load-modify-store instruction. When the root node
15224 // in a match is a store, isel doesn't know how to remap non-chain non-flag
15225 // uses of other nodes in the match, such as the ADD in this case. This
15226 // leads to the ADD being left around and reselected, with the result being
15227 // two adds in the output. Alas, even if none our users are stores, that
15228 // doesn't prove we're O.K. Ergo, if we have any parents that aren't
15229 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
15230 // climbing the DAG back to the root, and it doesn't seem to be worth the
15232 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15233 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15234 if (UI->getOpcode() != ISD::CopyToReg &&
15235 UI->getOpcode() != ISD::SETCC &&
15236 UI->getOpcode() != ISD::STORE)
15239 if (ConstantSDNode *C =
15240 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
15241 // An add of one will be selected as an INC.
15242 if (C->getAPIntValue() == 1 && !Subtarget->slowIncDec()) {
15243 Opcode = X86ISD::INC;
15248 // An add of negative one (subtract of one) will be selected as a DEC.
15249 if (C->getAPIntValue().isAllOnesValue() && !Subtarget->slowIncDec()) {
15250 Opcode = X86ISD::DEC;
15256 // Otherwise use a regular EFLAGS-setting add.
15257 Opcode = X86ISD::ADD;
15262 // If we have a constant logical shift that's only used in a comparison
15263 // against zero turn it into an equivalent AND. This allows turning it into
15264 // a TEST instruction later.
15265 if ((X86CC == X86::COND_E || X86CC == X86::COND_NE) && Op->hasOneUse() &&
15266 isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
15267 EVT VT = Op.getValueType();
15268 unsigned BitWidth = VT.getSizeInBits();
15269 unsigned ShAmt = Op->getConstantOperandVal(1);
15270 if (ShAmt >= BitWidth) // Avoid undefined shifts.
15272 APInt Mask = ArithOp.getOpcode() == ISD::SRL
15273 ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
15274 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
15275 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
15277 SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
15278 DAG.getConstant(Mask, VT));
15279 DAG.ReplaceAllUsesWith(Op, New);
15285 // If the primary and result isn't used, don't bother using X86ISD::AND,
15286 // because a TEST instruction will be better.
15287 if (!hasNonFlagsUse(Op))
15293 // Due to the ISEL shortcoming noted above, be conservative if this op is
15294 // likely to be selected as part of a load-modify-store instruction.
15295 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15296 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15297 if (UI->getOpcode() == ISD::STORE)
15300 // Otherwise use a regular EFLAGS-setting instruction.
15301 switch (ArithOp.getOpcode()) {
15302 default: llvm_unreachable("unexpected operator!");
15303 case ISD::SUB: Opcode = X86ISD::SUB; break;
15304 case ISD::XOR: Opcode = X86ISD::XOR; break;
15305 case ISD::AND: Opcode = X86ISD::AND; break;
15307 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
15308 SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG);
15309 if (EFLAGS.getNode())
15312 Opcode = X86ISD::OR;
15326 return SDValue(Op.getNode(), 1);
15332 // If we found that truncation is beneficial, perform the truncation and
15334 if (NeedTruncation) {
15335 EVT VT = Op.getValueType();
15336 SDValue WideVal = Op->getOperand(0);
15337 EVT WideVT = WideVal.getValueType();
15338 unsigned ConvertedOp = 0;
15339 // Use a target machine opcode to prevent further DAGCombine
15340 // optimizations that may separate the arithmetic operations
15341 // from the setcc node.
15342 switch (WideVal.getOpcode()) {
15344 case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
15345 case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
15346 case ISD::AND: ConvertedOp = X86ISD::AND; break;
15347 case ISD::OR: ConvertedOp = X86ISD::OR; break;
15348 case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
15352 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15353 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
15354 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
15355 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
15356 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1);
15362 // Emit a CMP with 0, which is the TEST pattern.
15363 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15364 DAG.getConstant(0, Op.getValueType()));
15366 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
15367 SmallVector<SDValue, 4> Ops;
15368 for (unsigned i = 0; i != NumOperands; ++i)
15369 Ops.push_back(Op.getOperand(i));
15371 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
15372 DAG.ReplaceAllUsesWith(Op, New);
15373 return SDValue(New.getNode(), 1);
15376 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
15378 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
15379 SDLoc dl, SelectionDAG &DAG) const {
15380 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) {
15381 if (C->getAPIntValue() == 0)
15382 return EmitTest(Op0, X86CC, dl, DAG);
15384 if (Op0.getValueType() == MVT::i1)
15385 llvm_unreachable("Unexpected comparison operation for MVT::i1 operands");
15388 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
15389 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
15390 // Do the comparison at i32 if it's smaller, besides the Atom case.
15391 // This avoids subregister aliasing issues. Keep the smaller reference
15392 // if we're optimizing for size, however, as that'll allow better folding
15393 // of memory operations.
15394 if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
15395 !DAG.getMachineFunction().getFunction()->hasFnAttribute(
15396 Attribute::MinSize) &&
15397 !Subtarget->isAtom()) {
15398 unsigned ExtendOp =
15399 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
15400 Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
15401 Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
15403 // Use SUB instead of CMP to enable CSE between SUB and CMP.
15404 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
15405 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs,
15407 return SDValue(Sub.getNode(), 1);
15409 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
15412 /// Convert a comparison if required by the subtarget.
15413 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
15414 SelectionDAG &DAG) const {
15415 // If the subtarget does not support the FUCOMI instruction, floating-point
15416 // comparisons have to be converted.
15417 if (Subtarget->hasCMov() ||
15418 Cmp.getOpcode() != X86ISD::CMP ||
15419 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
15420 !Cmp.getOperand(1).getValueType().isFloatingPoint())
15423 // The instruction selector will select an FUCOM instruction instead of
15424 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
15425 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
15426 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
15428 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
15429 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
15430 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
15431 DAG.getConstant(8, MVT::i8));
15432 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
15433 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
15436 /// The minimum architected relative accuracy is 2^-12. We need one
15437 /// Newton-Raphson step to have a good float result (24 bits of precision).
15438 SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
15439 DAGCombinerInfo &DCI,
15440 unsigned &RefinementSteps,
15441 bool &UseOneConstNR) const {
15442 // FIXME: We should use instruction latency models to calculate the cost of
15443 // each potential sequence, but this is very hard to do reliably because
15444 // at least Intel's Core* chips have variable timing based on the number of
15445 // significant digits in the divisor and/or sqrt operand.
15446 if (!Subtarget->useSqrtEst())
15449 EVT VT = Op.getValueType();
15451 // SSE1 has rsqrtss and rsqrtps.
15452 // TODO: Add support for AVX512 (v16f32).
15453 // It is likely not profitable to do this for f64 because a double-precision
15454 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
15455 // instructions: convert to single, rsqrtss, convert back to double, refine
15456 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
15457 // along with FMA, this could be a throughput win.
15458 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15459 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15460 RefinementSteps = 1;
15461 UseOneConstNR = false;
15462 return DCI.DAG.getNode(X86ISD::FRSQRT, SDLoc(Op), VT, Op);
15467 /// The minimum architected relative accuracy is 2^-12. We need one
15468 /// Newton-Raphson step to have a good float result (24 bits of precision).
15469 SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
15470 DAGCombinerInfo &DCI,
15471 unsigned &RefinementSteps) const {
15472 // FIXME: We should use instruction latency models to calculate the cost of
15473 // each potential sequence, but this is very hard to do reliably because
15474 // at least Intel's Core* chips have variable timing based on the number of
15475 // significant digits in the divisor.
15476 if (!Subtarget->useReciprocalEst())
15479 EVT VT = Op.getValueType();
15481 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
15482 // TODO: Add support for AVX512 (v16f32).
15483 // It is likely not profitable to do this for f64 because a double-precision
15484 // reciprocal estimate with refinement on x86 prior to FMA requires
15485 // 15 instructions: convert to single, rcpss, convert back to double, refine
15486 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
15487 // along with FMA, this could be a throughput win.
15488 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15489 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15490 RefinementSteps = ReciprocalEstimateRefinementSteps;
15491 return DCI.DAG.getNode(X86ISD::FRCP, SDLoc(Op), VT, Op);
15496 static bool isAllOnes(SDValue V) {
15497 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
15498 return C && C->isAllOnesValue();
15501 /// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
15502 /// if it's possible.
15503 SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
15504 SDLoc dl, SelectionDAG &DAG) const {
15505 SDValue Op0 = And.getOperand(0);
15506 SDValue Op1 = And.getOperand(1);
15507 if (Op0.getOpcode() == ISD::TRUNCATE)
15508 Op0 = Op0.getOperand(0);
15509 if (Op1.getOpcode() == ISD::TRUNCATE)
15510 Op1 = Op1.getOperand(0);
15513 if (Op1.getOpcode() == ISD::SHL)
15514 std::swap(Op0, Op1);
15515 if (Op0.getOpcode() == ISD::SHL) {
15516 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0)))
15517 if (And00C->getZExtValue() == 1) {
15518 // If we looked past a truncate, check that it's only truncating away
15520 unsigned BitWidth = Op0.getValueSizeInBits();
15521 unsigned AndBitWidth = And.getValueSizeInBits();
15522 if (BitWidth > AndBitWidth) {
15524 DAG.computeKnownBits(Op0, Zeros, Ones);
15525 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
15529 RHS = Op0.getOperand(1);
15531 } else if (Op1.getOpcode() == ISD::Constant) {
15532 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
15533 uint64_t AndRHSVal = AndRHS->getZExtValue();
15534 SDValue AndLHS = Op0;
15536 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
15537 LHS = AndLHS.getOperand(0);
15538 RHS = AndLHS.getOperand(1);
15541 // Use BT if the immediate can't be encoded in a TEST instruction.
15542 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
15544 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType());
15548 if (LHS.getNode()) {
15549 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT
15550 // instruction. Since the shift amount is in-range-or-undefined, we know
15551 // that doing a bittest on the i32 value is ok. We extend to i32 because
15552 // the encoding for the i16 version is larger than the i32 version.
15553 // Also promote i16 to i32 for performance / code size reason.
15554 if (LHS.getValueType() == MVT::i8 ||
15555 LHS.getValueType() == MVT::i16)
15556 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
15558 // If the operand types disagree, extend the shift amount to match. Since
15559 // BT ignores high bits (like shifts) we can use anyextend.
15560 if (LHS.getValueType() != RHS.getValueType())
15561 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS);
15563 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
15564 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
15565 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15566 DAG.getConstant(Cond, MVT::i8), BT);
15572 /// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point
15574 static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
15579 // SSE Condition code mapping:
15588 switch (SetCCOpcode) {
15589 default: llvm_unreachable("Unexpected SETCC condition");
15591 case ISD::SETEQ: SSECC = 0; break;
15593 case ISD::SETGT: Swap = true; // Fallthrough
15595 case ISD::SETOLT: SSECC = 1; break;
15597 case ISD::SETGE: Swap = true; // Fallthrough
15599 case ISD::SETOLE: SSECC = 2; break;
15600 case ISD::SETUO: SSECC = 3; break;
15602 case ISD::SETNE: SSECC = 4; break;
15603 case ISD::SETULE: Swap = true; // Fallthrough
15604 case ISD::SETUGE: SSECC = 5; break;
15605 case ISD::SETULT: Swap = true; // Fallthrough
15606 case ISD::SETUGT: SSECC = 6; break;
15607 case ISD::SETO: SSECC = 7; break;
15609 case ISD::SETONE: SSECC = 8; break;
15612 std::swap(Op0, Op1);
15617 // Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128
15618 // ones, and then concatenate the result back.
15619 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
15620 MVT VT = Op.getSimpleValueType();
15622 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
15623 "Unsupported value type for operation");
15625 unsigned NumElems = VT.getVectorNumElements();
15627 SDValue CC = Op.getOperand(2);
15629 // Extract the LHS vectors
15630 SDValue LHS = Op.getOperand(0);
15631 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
15632 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
15634 // Extract the RHS vectors
15635 SDValue RHS = Op.getOperand(1);
15636 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
15637 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
15639 // Issue the operation on the smaller types and concatenate the result back
15640 MVT EltVT = VT.getVectorElementType();
15641 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
15642 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
15643 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
15644 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
15647 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
15648 const X86Subtarget *Subtarget) {
15649 SDValue Op0 = Op.getOperand(0);
15650 SDValue Op1 = Op.getOperand(1);
15651 SDValue CC = Op.getOperand(2);
15652 MVT VT = Op.getSimpleValueType();
15655 assert(Op0.getValueType().getVectorElementType().getSizeInBits() >= 8 &&
15656 Op.getValueType().getScalarType() == MVT::i1 &&
15657 "Cannot set masked compare for this operation");
15659 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15661 bool Unsigned = false;
15664 switch (SetCCOpcode) {
15665 default: llvm_unreachable("Unexpected SETCC condition");
15666 case ISD::SETNE: SSECC = 4; break;
15667 case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
15668 case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
15669 case ISD::SETLT: Swap = true; //fall-through
15670 case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
15671 case ISD::SETULT: SSECC = 1; Unsigned = true; break;
15672 case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
15673 case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
15674 case ISD::SETULE: Unsigned = true; //fall-through
15675 case ISD::SETLE: SSECC = 2; break;
15679 std::swap(Op0, Op1);
15681 return DAG.getNode(Opc, dl, VT, Op0, Op1);
15682 Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
15683 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15684 DAG.getConstant(SSECC, MVT::i8));
15687 /// \brief Try to turn a VSETULT into a VSETULE by modifying its second
15688 /// operand \p Op1. If non-trivial (for example because it's not constant)
15689 /// return an empty value.
15690 static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
15692 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
15696 MVT VT = Op1.getSimpleValueType();
15697 MVT EVT = VT.getVectorElementType();
15698 unsigned n = VT.getVectorNumElements();
15699 SmallVector<SDValue, 8> ULTOp1;
15701 for (unsigned i = 0; i < n; ++i) {
15702 ConstantSDNode *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
15703 if (!Elt || Elt->isOpaque() || Elt->getValueType(0) != EVT)
15706 // Avoid underflow.
15707 APInt Val = Elt->getAPIntValue();
15711 ULTOp1.push_back(DAG.getConstant(Val - 1, EVT));
15714 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
15717 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
15718 SelectionDAG &DAG) {
15719 SDValue Op0 = Op.getOperand(0);
15720 SDValue Op1 = Op.getOperand(1);
15721 SDValue CC = Op.getOperand(2);
15722 MVT VT = Op.getSimpleValueType();
15723 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15724 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
15729 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
15730 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
15733 unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
15734 unsigned Opc = X86ISD::CMPP;
15735 if (Subtarget->hasAVX512() && VT.getVectorElementType() == MVT::i1) {
15736 assert(VT.getVectorNumElements() <= 16);
15737 Opc = X86ISD::CMPM;
15739 // In the two special cases we can't handle, emit two comparisons.
15742 unsigned CombineOpc;
15743 if (SetCCOpcode == ISD::SETUEQ) {
15744 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR;
15746 assert(SetCCOpcode == ISD::SETONE);
15747 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND;
15750 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15751 DAG.getConstant(CC0, MVT::i8));
15752 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15753 DAG.getConstant(CC1, MVT::i8));
15754 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
15756 // Handle all other FP comparisons here.
15757 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15758 DAG.getConstant(SSECC, MVT::i8));
15761 // Break 256-bit integer vector compare into smaller ones.
15762 if (VT.is256BitVector() && !Subtarget->hasInt256())
15763 return Lower256IntVSETCC(Op, DAG);
15765 bool MaskResult = (VT.getVectorElementType() == MVT::i1);
15766 EVT OpVT = Op1.getValueType();
15767 if (Subtarget->hasAVX512()) {
15768 if (Op1.getValueType().is512BitVector() ||
15769 (Subtarget->hasBWI() && Subtarget->hasVLX()) ||
15770 (MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32))
15771 return LowerIntVSETCC_AVX512(Op, DAG, Subtarget);
15773 // In AVX-512 architecture setcc returns mask with i1 elements,
15774 // But there is no compare instruction for i8 and i16 elements in KNL.
15775 // We are not talking about 512-bit operands in this case, these
15776 // types are illegal.
15778 (OpVT.getVectorElementType().getSizeInBits() < 32 &&
15779 OpVT.getVectorElementType().getSizeInBits() >= 8))
15780 return DAG.getNode(ISD::TRUNCATE, dl, VT,
15781 DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
15784 // We are handling one of the integer comparisons here. Since SSE only has
15785 // GT and EQ comparisons for integer, swapping operands and multiple
15786 // operations may be required for some comparisons.
15788 bool Swap = false, Invert = false, FlipSigns = false, MinMax = false;
15789 bool Subus = false;
15791 switch (SetCCOpcode) {
15792 default: llvm_unreachable("Unexpected SETCC condition");
15793 case ISD::SETNE: Invert = true;
15794 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
15795 case ISD::SETLT: Swap = true;
15796 case ISD::SETGT: Opc = X86ISD::PCMPGT; break;
15797 case ISD::SETGE: Swap = true;
15798 case ISD::SETLE: Opc = X86ISD::PCMPGT;
15799 Invert = true; break;
15800 case ISD::SETULT: Swap = true;
15801 case ISD::SETUGT: Opc = X86ISD::PCMPGT;
15802 FlipSigns = true; break;
15803 case ISD::SETUGE: Swap = true;
15804 case ISD::SETULE: Opc = X86ISD::PCMPGT;
15805 FlipSigns = true; Invert = true; break;
15808 // Special case: Use min/max operations for SETULE/SETUGE
15809 MVT VET = VT.getVectorElementType();
15811 (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
15812 || (Subtarget->hasSSE2() && (VET == MVT::i8));
15815 switch (SetCCOpcode) {
15817 case ISD::SETULE: Opc = X86ISD::UMIN; MinMax = true; break;
15818 case ISD::SETUGE: Opc = X86ISD::UMAX; MinMax = true; break;
15821 if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
15824 bool hasSubus = Subtarget->hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
15825 if (!MinMax && hasSubus) {
15826 // As another special case, use PSUBUS[BW] when it's profitable. E.g. for
15828 // t = psubus Op0, Op1
15829 // pcmpeq t, <0..0>
15830 switch (SetCCOpcode) {
15832 case ISD::SETULT: {
15833 // If the comparison is against a constant we can turn this into a
15834 // setule. With psubus, setule does not require a swap. This is
15835 // beneficial because the constant in the register is no longer
15836 // destructed as the destination so it can be hoisted out of a loop.
15837 // Only do this pre-AVX since vpcmp* is no longer destructive.
15838 if (Subtarget->hasAVX())
15840 SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
15841 if (ULEOp1.getNode()) {
15843 Subus = true; Invert = false; Swap = false;
15847 // Psubus is better than flip-sign because it requires no inversion.
15848 case ISD::SETUGE: Subus = true; Invert = false; Swap = true; break;
15849 case ISD::SETULE: Subus = true; Invert = false; Swap = false; break;
15853 Opc = X86ISD::SUBUS;
15859 std::swap(Op0, Op1);
15861 // Check that the operation in question is available (most are plain SSE2,
15862 // but PCMPGTQ and PCMPEQQ have different requirements).
15863 if (VT == MVT::v2i64) {
15864 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) {
15865 assert(Subtarget->hasSSE2() && "Don't know how to lower!");
15867 // First cast everything to the right type.
15868 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15869 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15871 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15872 // bits of the inputs before performing those operations. The lower
15873 // compare is always unsigned.
15876 SB = DAG.getConstant(0x80000000U, MVT::v4i32);
15878 SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32);
15879 SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32);
15880 SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
15881 Sign, Zero, Sign, Zero);
15883 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
15884 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
15886 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
15887 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
15888 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
15890 // Create masks for only the low parts/high parts of the 64 bit integers.
15891 static const int MaskHi[] = { 1, 1, 3, 3 };
15892 static const int MaskLo[] = { 0, 0, 2, 2 };
15893 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
15894 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
15895 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
15897 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
15898 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
15901 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15903 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15906 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) {
15907 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
15908 // pcmpeqd + pshufd + pand.
15909 assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!");
15911 // First cast everything to the right type.
15912 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15913 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15916 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
15918 // Make sure the lower and upper halves are both all-ones.
15919 static const int Mask[] = { 1, 0, 3, 2 };
15920 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
15921 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
15924 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15926 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15930 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15931 // bits of the inputs before performing those operations.
15933 EVT EltVT = VT.getVectorElementType();
15934 SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT);
15935 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB);
15936 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB);
15939 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
15941 // If the logical-not of the result is required, perform that now.
15943 Result = DAG.getNOT(dl, Result, VT);
15946 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
15949 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
15950 getZeroVector(VT, Subtarget, DAG, dl));
15955 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
15957 MVT VT = Op.getSimpleValueType();
15959 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
15961 assert(((!Subtarget->hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
15962 && "SetCC type must be 8-bit or 1-bit integer");
15963 SDValue Op0 = Op.getOperand(0);
15964 SDValue Op1 = Op.getOperand(1);
15966 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
15968 // Optimize to BT if possible.
15969 // Lower (X & (1 << N)) == 0 to BT(X, N).
15970 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
15971 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
15972 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() &&
15973 Op1.getOpcode() == ISD::Constant &&
15974 cast<ConstantSDNode>(Op1)->isNullValue() &&
15975 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15976 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG);
15977 if (NewSetCC.getNode()) {
15979 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
15984 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
15986 if (Op1.getOpcode() == ISD::Constant &&
15987 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
15988 cast<ConstantSDNode>(Op1)->isNullValue()) &&
15989 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15991 // If the input is a setcc, then reuse the input setcc or use a new one with
15992 // the inverted condition.
15993 if (Op0.getOpcode() == X86ISD::SETCC) {
15994 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
15995 bool Invert = (CC == ISD::SETNE) ^
15996 cast<ConstantSDNode>(Op1)->isNullValue();
16000 CCode = X86::GetOppositeBranchCondition(CCode);
16001 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16002 DAG.getConstant(CCode, MVT::i8),
16003 Op0.getOperand(1));
16005 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
16009 if ((Op0.getValueType() == MVT::i1) && (Op1.getOpcode() == ISD::Constant) &&
16010 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1) &&
16011 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16013 ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
16014 return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, MVT::i1), NewCC);
16017 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
16018 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
16019 if (X86CC == X86::COND_INVALID)
16022 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
16023 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
16024 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16025 DAG.getConstant(X86CC, MVT::i8), EFLAGS);
16027 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
16031 // isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
16032 static bool isX86LogicalCmp(SDValue Op) {
16033 unsigned Opc = Op.getNode()->getOpcode();
16034 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
16035 Opc == X86ISD::SAHF)
16037 if (Op.getResNo() == 1 &&
16038 (Opc == X86ISD::ADD ||
16039 Opc == X86ISD::SUB ||
16040 Opc == X86ISD::ADC ||
16041 Opc == X86ISD::SBB ||
16042 Opc == X86ISD::SMUL ||
16043 Opc == X86ISD::UMUL ||
16044 Opc == X86ISD::INC ||
16045 Opc == X86ISD::DEC ||
16046 Opc == X86ISD::OR ||
16047 Opc == X86ISD::XOR ||
16048 Opc == X86ISD::AND))
16051 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
16057 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
16058 if (V.getOpcode() != ISD::TRUNCATE)
16061 SDValue VOp0 = V.getOperand(0);
16062 unsigned InBits = VOp0.getValueSizeInBits();
16063 unsigned Bits = V.getValueSizeInBits();
16064 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
16067 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
16068 bool addTest = true;
16069 SDValue Cond = Op.getOperand(0);
16070 SDValue Op1 = Op.getOperand(1);
16071 SDValue Op2 = Op.getOperand(2);
16073 EVT VT = Op1.getValueType();
16076 // Lower fp selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
16077 // are available. Otherwise fp cmovs get lowered into a less efficient branch
16078 // sequence later on.
16079 if (Cond.getOpcode() == ISD::SETCC &&
16080 ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
16081 (Subtarget->hasSSE1() && VT == MVT::f32)) &&
16082 VT == Cond.getOperand(0).getValueType() && Cond->hasOneUse()) {
16083 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
16084 int SSECC = translateX86FSETCC(
16085 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
16088 if (Subtarget->hasAVX512()) {
16089 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
16090 DAG.getConstant(SSECC, MVT::i8));
16091 return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
16093 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
16094 DAG.getConstant(SSECC, MVT::i8));
16095 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
16096 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
16097 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
16101 if (Cond.getOpcode() == ISD::SETCC) {
16102 SDValue NewCond = LowerSETCC(Cond, DAG);
16103 if (NewCond.getNode())
16107 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
16108 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
16109 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
16110 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
16111 if (Cond.getOpcode() == X86ISD::SETCC &&
16112 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
16113 isZero(Cond.getOperand(1).getOperand(1))) {
16114 SDValue Cmp = Cond.getOperand(1);
16116 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
16118 if ((isAllOnes(Op1) || isAllOnes(Op2)) &&
16119 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
16120 SDValue Y = isAllOnes(Op2) ? Op1 : Op2;
16122 SDValue CmpOp0 = Cmp.getOperand(0);
16123 // Apply further optimizations for special cases
16124 // (select (x != 0), -1, 0) -> neg & sbb
16125 // (select (x == 0), 0, -1) -> neg & sbb
16126 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y))
16127 if (YC->isNullValue() &&
16128 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) {
16129 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
16130 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs,
16131 DAG.getConstant(0, CmpOp0.getValueType()),
16133 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16134 DAG.getConstant(X86::COND_B, MVT::i8),
16135 SDValue(Neg.getNode(), 1));
16139 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
16140 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
16141 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16143 SDValue Res = // Res = 0 or -1.
16144 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16145 DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
16147 if (isAllOnes(Op1) != (CondCode == X86::COND_E))
16148 Res = DAG.getNOT(DL, Res, Res.getValueType());
16150 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2);
16151 if (!N2C || !N2C->isNullValue())
16152 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
16157 // Look past (and (setcc_carry (cmp ...)), 1).
16158 if (Cond.getOpcode() == ISD::AND &&
16159 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16160 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16161 if (C && C->getAPIntValue() == 1)
16162 Cond = Cond.getOperand(0);
16165 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16166 // setting operand in place of the X86ISD::SETCC.
16167 unsigned CondOpcode = Cond.getOpcode();
16168 if (CondOpcode == X86ISD::SETCC ||
16169 CondOpcode == X86ISD::SETCC_CARRY) {
16170 CC = Cond.getOperand(0);
16172 SDValue Cmp = Cond.getOperand(1);
16173 unsigned Opc = Cmp.getOpcode();
16174 MVT VT = Op.getSimpleValueType();
16176 bool IllegalFPCMov = false;
16177 if (VT.isFloatingPoint() && !VT.isVector() &&
16178 !isScalarFPTypeInSSEReg(VT)) // FPStack?
16179 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
16181 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
16182 Opc == X86ISD::BT) { // FIXME
16186 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16187 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16188 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16189 Cond.getOperand(0).getValueType() != MVT::i8)) {
16190 SDValue LHS = Cond.getOperand(0);
16191 SDValue RHS = Cond.getOperand(1);
16192 unsigned X86Opcode;
16195 switch (CondOpcode) {
16196 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16197 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16198 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16199 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16200 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16201 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16202 default: llvm_unreachable("unexpected overflowing operator");
16204 if (CondOpcode == ISD::UMULO)
16205 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16208 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16210 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
16212 if (CondOpcode == ISD::UMULO)
16213 Cond = X86Op.getValue(2);
16215 Cond = X86Op.getValue(1);
16217 CC = DAG.getConstant(X86Cond, MVT::i8);
16222 // Look pass the truncate if the high bits are known zero.
16223 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16224 Cond = Cond.getOperand(0);
16226 // We know the result of AND is compared against zero. Try to match
16228 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16229 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG);
16230 if (NewSetCC.getNode()) {
16231 CC = NewSetCC.getOperand(0);
16232 Cond = NewSetCC.getOperand(1);
16239 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16240 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
16243 // a < b ? -1 : 0 -> RES = ~setcc_carry
16244 // a < b ? 0 : -1 -> RES = setcc_carry
16245 // a >= b ? -1 : 0 -> RES = setcc_carry
16246 // a >= b ? 0 : -1 -> RES = ~setcc_carry
16247 if (Cond.getOpcode() == X86ISD::SUB) {
16248 Cond = ConvertCmpIfNecessary(Cond, DAG);
16249 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
16251 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
16252 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) {
16253 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16254 DAG.getConstant(X86::COND_B, MVT::i8), Cond);
16255 if (isAllOnes(Op1) != (CondCode == X86::COND_B))
16256 return DAG.getNOT(DL, Res, Res.getValueType());
16261 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
16262 // widen the cmov and push the truncate through. This avoids introducing a new
16263 // branch during isel and doesn't add any extensions.
16264 if (Op.getValueType() == MVT::i8 &&
16265 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
16266 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
16267 if (T1.getValueType() == T2.getValueType() &&
16268 // Blacklist CopyFromReg to avoid partial register stalls.
16269 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
16270 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
16271 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond);
16272 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
16276 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
16277 // condition is true.
16278 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
16279 SDValue Ops[] = { Op2, Op1, CC, Cond };
16280 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops);
16283 static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, const X86Subtarget *Subtarget,
16284 SelectionDAG &DAG) {
16285 MVT VT = Op->getSimpleValueType(0);
16286 SDValue In = Op->getOperand(0);
16287 MVT InVT = In.getSimpleValueType();
16288 MVT VTElt = VT.getVectorElementType();
16289 MVT InVTElt = InVT.getVectorElementType();
16293 if ((InVTElt == MVT::i1) &&
16294 (((Subtarget->hasBWI() && Subtarget->hasVLX() &&
16295 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() <= 16)) ||
16297 ((Subtarget->hasBWI() && VT.is512BitVector() &&
16298 VTElt.getSizeInBits() <= 16)) ||
16300 ((Subtarget->hasDQI() && Subtarget->hasVLX() &&
16301 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) ||
16303 ((Subtarget->hasDQI() && VT.is512BitVector() &&
16304 VTElt.getSizeInBits() >= 32))))
16305 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16307 unsigned int NumElts = VT.getVectorNumElements();
16309 if (NumElts != 8 && NumElts != 16)
16312 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) {
16313 if (In.getOpcode() == X86ISD::VSEXT || In.getOpcode() == X86ISD::VZEXT)
16314 return DAG.getNode(In.getOpcode(), dl, VT, In.getOperand(0));
16315 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16318 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16319 assert (InVT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
16321 MVT ExtVT = (NumElts == 8) ? MVT::v8i64 : MVT::v16i32;
16322 Constant *C = ConstantInt::get(*DAG.getContext(),
16323 APInt::getAllOnesValue(ExtVT.getScalarType().getSizeInBits()));
16325 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
16326 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
16327 SDValue Ld = DAG.getLoad(ExtVT.getScalarType(), dl, DAG.getEntryNode(), CP,
16328 MachinePointerInfo::getConstantPool(),
16329 false, false, false, Alignment);
16330 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, dl, ExtVT, In, Ld);
16331 if (VT.is512BitVector())
16333 return DAG.getNode(X86ISD::VTRUNC, dl, VT, Brcst);
16336 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
16337 SelectionDAG &DAG) {
16338 MVT VT = Op->getSimpleValueType(0);
16339 SDValue In = Op->getOperand(0);
16340 MVT InVT = In.getSimpleValueType();
16343 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
16344 return LowerSIGN_EXTEND_AVX512(Op, Subtarget, DAG);
16346 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) &&
16347 (VT != MVT::v8i32 || InVT != MVT::v8i16) &&
16348 (VT != MVT::v16i16 || InVT != MVT::v16i8))
16351 if (Subtarget->hasInt256())
16352 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16354 // Optimize vectors in AVX mode
16355 // Sign extend v8i16 to v8i32 and
16358 // Divide input vector into two parts
16359 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
16360 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
16361 // concat the vectors to original VT
16363 unsigned NumElems = InVT.getVectorNumElements();
16364 SDValue Undef = DAG.getUNDEF(InVT);
16366 SmallVector<int,8> ShufMask1(NumElems, -1);
16367 for (unsigned i = 0; i != NumElems/2; ++i)
16370 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]);
16372 SmallVector<int,8> ShufMask2(NumElems, -1);
16373 for (unsigned i = 0; i != NumElems/2; ++i)
16374 ShufMask2[i] = i + NumElems/2;
16376 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]);
16378 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(),
16379 VT.getVectorNumElements()/2);
16381 OpLo = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpLo);
16382 OpHi = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpHi);
16384 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
16387 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
16388 // may emit an illegal shuffle but the expansion is still better than scalar
16389 // code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
16390 // we'll emit a shuffle and a arithmetic shift.
16391 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
16392 // TODO: It is possible to support ZExt by zeroing the undef values during
16393 // the shuffle phase or after the shuffle.
16394 static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
16395 SelectionDAG &DAG) {
16396 MVT RegVT = Op.getSimpleValueType();
16397 assert(RegVT.isVector() && "We only custom lower vector sext loads.");
16398 assert(RegVT.isInteger() &&
16399 "We only custom lower integer vector sext loads.");
16401 // Nothing useful we can do without SSE2 shuffles.
16402 assert(Subtarget->hasSSE2() && "We only custom lower sext loads with SSE2.");
16404 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
16406 EVT MemVT = Ld->getMemoryVT();
16407 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16408 unsigned RegSz = RegVT.getSizeInBits();
16410 ISD::LoadExtType Ext = Ld->getExtensionType();
16412 assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
16413 && "Only anyext and sext are currently implemented.");
16414 assert(MemVT != RegVT && "Cannot extend to the same type");
16415 assert(MemVT.isVector() && "Must load a vector from memory");
16417 unsigned NumElems = RegVT.getVectorNumElements();
16418 unsigned MemSz = MemVT.getSizeInBits();
16419 assert(RegSz > MemSz && "Register size must be greater than the mem size");
16421 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) {
16422 // The only way in which we have a legal 256-bit vector result but not the
16423 // integer 256-bit operations needed to directly lower a sextload is if we
16424 // have AVX1 but not AVX2. In that case, we can always emit a sextload to
16425 // a 128-bit vector and a normal sign_extend to 256-bits that should get
16426 // correctly legalized. We do this late to allow the canonical form of
16427 // sextload to persist throughout the rest of the DAG combiner -- it wants
16428 // to fold together any extensions it can, and so will fuse a sign_extend
16429 // of an sextload into a sextload targeting a wider value.
16431 if (MemSz == 128) {
16432 // Just switch this to a normal load.
16433 assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
16434 "it must be a legal 128-bit vector "
16436 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
16437 Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(),
16438 Ld->isInvariant(), Ld->getAlignment());
16440 assert(MemSz < 128 &&
16441 "Can't extend a type wider than 128 bits to a 256 bit vector!");
16442 // Do an sext load to a 128-bit vector type. We want to use the same
16443 // number of elements, but elements half as wide. This will end up being
16444 // recursively lowered by this routine, but will succeed as we definitely
16445 // have all the necessary features if we're using AVX1.
16447 EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
16448 EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
16450 DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
16451 Ld->getPointerInfo(), MemVT, Ld->isVolatile(),
16452 Ld->isNonTemporal(), Ld->isInvariant(),
16453 Ld->getAlignment());
16456 // Replace chain users with the new chain.
16457 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
16458 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
16460 // Finally, do a normal sign-extend to the desired register.
16461 return DAG.getSExtOrTrunc(Load, dl, RegVT);
16464 // All sizes must be a power of two.
16465 assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
16466 "Non-power-of-two elements are not custom lowered!");
16468 // Attempt to load the original value using scalar loads.
16469 // Find the largest scalar type that divides the total loaded size.
16470 MVT SclrLoadTy = MVT::i8;
16471 for (MVT Tp : MVT::integer_valuetypes()) {
16472 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
16477 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
16478 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
16480 SclrLoadTy = MVT::f64;
16482 // Calculate the number of scalar loads that we need to perform
16483 // in order to load our vector from memory.
16484 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
16486 assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
16487 "Can only lower sext loads with a single scalar load!");
16489 unsigned loadRegZize = RegSz;
16490 if (Ext == ISD::SEXTLOAD && RegSz == 256)
16493 // Represent our vector as a sequence of elements which are the
16494 // largest scalar that we can load.
16495 EVT LoadUnitVecVT = EVT::getVectorVT(
16496 *DAG.getContext(), SclrLoadTy, loadRegZize / SclrLoadTy.getSizeInBits());
16498 // Represent the data using the same element type that is stored in
16499 // memory. In practice, we ''widen'' MemVT.
16501 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
16502 loadRegZize / MemVT.getScalarType().getSizeInBits());
16504 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
16505 "Invalid vector type");
16507 // We can't shuffle using an illegal type.
16508 assert(TLI.isTypeLegal(WideVecVT) &&
16509 "We only lower types that form legal widened vector types");
16511 SmallVector<SDValue, 8> Chains;
16512 SDValue Ptr = Ld->getBasePtr();
16513 SDValue Increment =
16514 DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, TLI.getPointerTy());
16515 SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
16517 for (unsigned i = 0; i < NumLoads; ++i) {
16518 // Perform a single load.
16519 SDValue ScalarLoad =
16520 DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
16521 Ld->isVolatile(), Ld->isNonTemporal(), Ld->isInvariant(),
16522 Ld->getAlignment());
16523 Chains.push_back(ScalarLoad.getValue(1));
16524 // Create the first element type using SCALAR_TO_VECTOR in order to avoid
16525 // another round of DAGCombining.
16527 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
16529 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
16530 ScalarLoad, DAG.getIntPtrConstant(i));
16532 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
16535 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
16537 // Bitcast the loaded value to a vector of the original element type, in
16538 // the size of the target vector type.
16539 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res);
16540 unsigned SizeRatio = RegSz / MemSz;
16542 if (Ext == ISD::SEXTLOAD) {
16543 // If we have SSE4.1, we can directly emit a VSEXT node.
16544 if (Subtarget->hasSSE41()) {
16545 SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec);
16546 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16550 // Otherwise we'll shuffle the small elements in the high bits of the
16551 // larger type and perform an arithmetic shift. If the shift is not legal
16552 // it's better to scalarize.
16553 assert(TLI.isOperationLegalOrCustom(ISD::SRA, RegVT) &&
16554 "We can't implement a sext load without an arithmetic right shift!");
16556 // Redistribute the loaded elements into the different locations.
16557 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16558 for (unsigned i = 0; i != NumElems; ++i)
16559 ShuffleVec[i * SizeRatio + SizeRatio - 1] = i;
16561 SDValue Shuff = DAG.getVectorShuffle(
16562 WideVecVT, dl, SlicedVec, DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16564 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16566 // Build the arithmetic shift.
16567 unsigned Amt = RegVT.getVectorElementType().getSizeInBits() -
16568 MemVT.getVectorElementType().getSizeInBits();
16570 DAG.getNode(ISD::SRA, dl, RegVT, Shuff, DAG.getConstant(Amt, RegVT));
16572 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16576 // Redistribute the loaded elements into the different locations.
16577 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16578 for (unsigned i = 0; i != NumElems; ++i)
16579 ShuffleVec[i * SizeRatio] = i;
16581 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
16582 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16584 // Bitcast to the requested type.
16585 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16586 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16590 // isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
16591 // ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart
16592 // from the AND / OR.
16593 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
16594 Opc = Op.getOpcode();
16595 if (Opc != ISD::OR && Opc != ISD::AND)
16597 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16598 Op.getOperand(0).hasOneUse() &&
16599 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
16600 Op.getOperand(1).hasOneUse());
16603 // isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and
16604 // 1 and that the SETCC node has a single use.
16605 static bool isXor1OfSetCC(SDValue Op) {
16606 if (Op.getOpcode() != ISD::XOR)
16608 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
16609 if (N1C && N1C->getAPIntValue() == 1) {
16610 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16611 Op.getOperand(0).hasOneUse();
16616 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
16617 bool addTest = true;
16618 SDValue Chain = Op.getOperand(0);
16619 SDValue Cond = Op.getOperand(1);
16620 SDValue Dest = Op.getOperand(2);
16623 bool Inverted = false;
16625 if (Cond.getOpcode() == ISD::SETCC) {
16626 // Check for setcc([su]{add,sub,mul}o == 0).
16627 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
16628 isa<ConstantSDNode>(Cond.getOperand(1)) &&
16629 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() &&
16630 Cond.getOperand(0).getResNo() == 1 &&
16631 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
16632 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
16633 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
16634 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
16635 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
16636 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
16638 Cond = Cond.getOperand(0);
16640 SDValue NewCond = LowerSETCC(Cond, DAG);
16641 if (NewCond.getNode())
16646 // FIXME: LowerXALUO doesn't handle these!!
16647 else if (Cond.getOpcode() == X86ISD::ADD ||
16648 Cond.getOpcode() == X86ISD::SUB ||
16649 Cond.getOpcode() == X86ISD::SMUL ||
16650 Cond.getOpcode() == X86ISD::UMUL)
16651 Cond = LowerXALUO(Cond, DAG);
16654 // Look pass (and (setcc_carry (cmp ...)), 1).
16655 if (Cond.getOpcode() == ISD::AND &&
16656 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16657 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16658 if (C && C->getAPIntValue() == 1)
16659 Cond = Cond.getOperand(0);
16662 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16663 // setting operand in place of the X86ISD::SETCC.
16664 unsigned CondOpcode = Cond.getOpcode();
16665 if (CondOpcode == X86ISD::SETCC ||
16666 CondOpcode == X86ISD::SETCC_CARRY) {
16667 CC = Cond.getOperand(0);
16669 SDValue Cmp = Cond.getOperand(1);
16670 unsigned Opc = Cmp.getOpcode();
16671 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
16672 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
16676 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
16680 // These can only come from an arithmetic instruction with overflow,
16681 // e.g. SADDO, UADDO.
16682 Cond = Cond.getNode()->getOperand(1);
16688 CondOpcode = Cond.getOpcode();
16689 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16690 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16691 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16692 Cond.getOperand(0).getValueType() != MVT::i8)) {
16693 SDValue LHS = Cond.getOperand(0);
16694 SDValue RHS = Cond.getOperand(1);
16695 unsigned X86Opcode;
16698 // Keep this in sync with LowerXALUO, otherwise we might create redundant
16699 // instructions that can't be removed afterwards (i.e. X86ISD::ADD and
16701 switch (CondOpcode) {
16702 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16704 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16706 X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
16709 X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16710 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16712 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16714 X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
16717 X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16718 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16719 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16720 default: llvm_unreachable("unexpected overflowing operator");
16723 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
16724 if (CondOpcode == ISD::UMULO)
16725 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16728 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16730 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
16732 if (CondOpcode == ISD::UMULO)
16733 Cond = X86Op.getValue(2);
16735 Cond = X86Op.getValue(1);
16737 CC = DAG.getConstant(X86Cond, MVT::i8);
16741 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
16742 SDValue Cmp = Cond.getOperand(0).getOperand(1);
16743 if (CondOpc == ISD::OR) {
16744 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
16745 // two branches instead of an explicit OR instruction with a
16747 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16748 isX86LogicalCmp(Cmp)) {
16749 CC = Cond.getOperand(0).getOperand(0);
16750 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16751 Chain, Dest, CC, Cmp);
16752 CC = Cond.getOperand(1).getOperand(0);
16756 } else { // ISD::AND
16757 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
16758 // two branches instead of an explicit AND instruction with a
16759 // separate test. However, we only do this if this block doesn't
16760 // have a fall-through edge, because this requires an explicit
16761 // jmp when the condition is false.
16762 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16763 isX86LogicalCmp(Cmp) &&
16764 Op.getNode()->hasOneUse()) {
16765 X86::CondCode CCode =
16766 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16767 CCode = X86::GetOppositeBranchCondition(CCode);
16768 CC = DAG.getConstant(CCode, MVT::i8);
16769 SDNode *User = *Op.getNode()->use_begin();
16770 // Look for an unconditional branch following this conditional branch.
16771 // We need this because we need to reverse the successors in order
16772 // to implement FCMP_OEQ.
16773 if (User->getOpcode() == ISD::BR) {
16774 SDValue FalseBB = User->getOperand(1);
16776 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16777 assert(NewBR == User);
16781 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16782 Chain, Dest, CC, Cmp);
16783 X86::CondCode CCode =
16784 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
16785 CCode = X86::GetOppositeBranchCondition(CCode);
16786 CC = DAG.getConstant(CCode, MVT::i8);
16792 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
16793 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
16794 // It should be transformed during dag combiner except when the condition
16795 // is set by a arithmetics with overflow node.
16796 X86::CondCode CCode =
16797 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16798 CCode = X86::GetOppositeBranchCondition(CCode);
16799 CC = DAG.getConstant(CCode, MVT::i8);
16800 Cond = Cond.getOperand(0).getOperand(1);
16802 } else if (Cond.getOpcode() == ISD::SETCC &&
16803 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
16804 // For FCMP_OEQ, we can emit
16805 // two branches instead of an explicit AND instruction with a
16806 // separate test. However, we only do this if this block doesn't
16807 // have a fall-through edge, because this requires an explicit
16808 // jmp when the condition is false.
16809 if (Op.getNode()->hasOneUse()) {
16810 SDNode *User = *Op.getNode()->use_begin();
16811 // Look for an unconditional branch following this conditional branch.
16812 // We need this because we need to reverse the successors in order
16813 // to implement FCMP_OEQ.
16814 if (User->getOpcode() == ISD::BR) {
16815 SDValue FalseBB = User->getOperand(1);
16817 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16818 assert(NewBR == User);
16822 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16823 Cond.getOperand(0), Cond.getOperand(1));
16824 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16825 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16826 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16827 Chain, Dest, CC, Cmp);
16828 CC = DAG.getConstant(X86::COND_P, MVT::i8);
16833 } else if (Cond.getOpcode() == ISD::SETCC &&
16834 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
16835 // For FCMP_UNE, we can emit
16836 // two branches instead of an explicit AND instruction with a
16837 // separate test. However, we only do this if this block doesn't
16838 // have a fall-through edge, because this requires an explicit
16839 // jmp when the condition is false.
16840 if (Op.getNode()->hasOneUse()) {
16841 SDNode *User = *Op.getNode()->use_begin();
16842 // Look for an unconditional branch following this conditional branch.
16843 // We need this because we need to reverse the successors in order
16844 // to implement FCMP_UNE.
16845 if (User->getOpcode() == ISD::BR) {
16846 SDValue FalseBB = User->getOperand(1);
16848 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16849 assert(NewBR == User);
16852 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16853 Cond.getOperand(0), Cond.getOperand(1));
16854 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16855 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16856 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16857 Chain, Dest, CC, Cmp);
16858 CC = DAG.getConstant(X86::COND_NP, MVT::i8);
16868 // Look pass the truncate if the high bits are known zero.
16869 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16870 Cond = Cond.getOperand(0);
16872 // We know the result of AND is compared against zero. Try to match
16874 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16875 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG);
16876 if (NewSetCC.getNode()) {
16877 CC = NewSetCC.getOperand(0);
16878 Cond = NewSetCC.getOperand(1);
16885 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
16886 CC = DAG.getConstant(X86Cond, MVT::i8);
16887 Cond = EmitTest(Cond, X86Cond, dl, DAG);
16889 Cond = ConvertCmpIfNecessary(Cond, DAG);
16890 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16891 Chain, Dest, CC, Cond);
16894 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
16895 // Calls to _alloca are needed to probe the stack when allocating more than 4k
16896 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
16897 // that the guard pages used by the OS virtual memory manager are allocated in
16898 // correct sequence.
16900 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
16901 SelectionDAG &DAG) const {
16902 MachineFunction &MF = DAG.getMachineFunction();
16903 bool SplitStack = MF.shouldSplitStack();
16904 bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMachO()) ||
16909 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16910 SDNode* Node = Op.getNode();
16912 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
16913 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
16914 " not tell us which reg is the stack pointer!");
16915 EVT VT = Node->getValueType(0);
16916 SDValue Tmp1 = SDValue(Node, 0);
16917 SDValue Tmp2 = SDValue(Node, 1);
16918 SDValue Tmp3 = Node->getOperand(2);
16919 SDValue Chain = Tmp1.getOperand(0);
16921 // Chain the dynamic stack allocation so that it doesn't modify the stack
16922 // pointer when other instructions are using the stack.
16923 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true),
16926 SDValue Size = Tmp2.getOperand(1);
16927 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
16928 Chain = SP.getValue(1);
16929 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
16930 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
16931 unsigned StackAlign = TFI.getStackAlignment();
16932 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
16933 if (Align > StackAlign)
16934 Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
16935 DAG.getConstant(-(uint64_t)Align, VT));
16936 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
16938 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
16939 DAG.getIntPtrConstant(0, true), SDValue(),
16942 SDValue Ops[2] = { Tmp1, Tmp2 };
16943 return DAG.getMergeValues(Ops, dl);
16947 SDValue Chain = Op.getOperand(0);
16948 SDValue Size = Op.getOperand(1);
16949 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
16950 EVT VT = Op.getNode()->getValueType(0);
16952 bool Is64Bit = Subtarget->is64Bit();
16953 EVT SPTy = getPointerTy();
16956 MachineRegisterInfo &MRI = MF.getRegInfo();
16959 // The 64 bit implementation of segmented stacks needs to clobber both r10
16960 // r11. This makes it impossible to use it along with nested parameters.
16961 const Function *F = MF.getFunction();
16963 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
16965 if (I->hasNestAttr())
16966 report_fatal_error("Cannot use segmented stacks with functions that "
16967 "have nested arguments.");
16970 const TargetRegisterClass *AddrRegClass =
16971 getRegClassFor(getPointerTy());
16972 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
16973 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
16974 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
16975 DAG.getRegister(Vreg, SPTy));
16976 SDValue Ops1[2] = { Value, Chain };
16977 return DAG.getMergeValues(Ops1, dl);
16980 const unsigned Reg = (Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX);
16982 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag);
16983 Flag = Chain.getValue(1);
16984 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
16986 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
16988 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
16989 unsigned SPReg = RegInfo->getStackRegister();
16990 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
16991 Chain = SP.getValue(1);
16994 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
16995 DAG.getConstant(-(uint64_t)Align, VT));
16996 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
16999 SDValue Ops1[2] = { SP, Chain };
17000 return DAG.getMergeValues(Ops1, dl);
17004 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
17005 MachineFunction &MF = DAG.getMachineFunction();
17006 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
17008 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
17011 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) {
17012 // vastart just stores the address of the VarArgsFrameIndex slot into the
17013 // memory location argument.
17014 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
17016 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
17017 MachinePointerInfo(SV), false, false, 0);
17021 // gp_offset (0 - 6 * 8)
17022 // fp_offset (48 - 48 + 8 * 16)
17023 // overflow_arg_area (point to parameters coming in memory).
17025 SmallVector<SDValue, 8> MemOps;
17026 SDValue FIN = Op.getOperand(1);
17028 SDValue Store = DAG.getStore(Op.getOperand(0), DL,
17029 DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
17031 FIN, MachinePointerInfo(SV), false, false, 0);
17032 MemOps.push_back(Store);
17035 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17036 FIN, DAG.getIntPtrConstant(4));
17037 Store = DAG.getStore(Op.getOperand(0), DL,
17038 DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
17040 FIN, MachinePointerInfo(SV, 4), false, false, 0);
17041 MemOps.push_back(Store);
17043 // Store ptr to overflow_arg_area
17044 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17045 FIN, DAG.getIntPtrConstant(4));
17046 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
17048 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
17049 MachinePointerInfo(SV, 8),
17051 MemOps.push_back(Store);
17053 // Store ptr to reg_save_area.
17054 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17055 FIN, DAG.getIntPtrConstant(8));
17056 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
17058 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN,
17059 MachinePointerInfo(SV, 16), false, false, 0);
17060 MemOps.push_back(Store);
17061 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
17064 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
17065 assert(Subtarget->is64Bit() &&
17066 "LowerVAARG only handles 64-bit va_arg!");
17067 assert((Subtarget->isTargetLinux() ||
17068 Subtarget->isTargetDarwin()) &&
17069 "Unhandled target in LowerVAARG");
17070 assert(Op.getNode()->getNumOperands() == 4);
17071 SDValue Chain = Op.getOperand(0);
17072 SDValue SrcPtr = Op.getOperand(1);
17073 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
17074 unsigned Align = Op.getConstantOperandVal(3);
17077 EVT ArgVT = Op.getNode()->getValueType(0);
17078 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
17079 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy);
17082 // Decide which area this value should be read from.
17083 // TODO: Implement the AMD64 ABI in its entirety. This simple
17084 // selection mechanism works only for the basic types.
17085 if (ArgVT == MVT::f80) {
17086 llvm_unreachable("va_arg for f80 not yet implemented");
17087 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
17088 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
17089 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
17090 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
17092 llvm_unreachable("Unhandled argument type in LowerVAARG");
17095 if (ArgMode == 2) {
17096 // Sanity Check: Make sure using fp_offset makes sense.
17097 assert(!DAG.getTarget().Options.UseSoftFloat &&
17098 !(DAG.getMachineFunction().getFunction()->hasFnAttribute(
17099 Attribute::NoImplicitFloat)) &&
17100 Subtarget->hasSSE1());
17103 // Insert VAARG_64 node into the DAG
17104 // VAARG_64 returns two values: Variable Argument Address, Chain
17105 SmallVector<SDValue, 11> InstOps;
17106 InstOps.push_back(Chain);
17107 InstOps.push_back(SrcPtr);
17108 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32));
17109 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8));
17110 InstOps.push_back(DAG.getConstant(Align, MVT::i32));
17111 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other);
17112 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
17113 VTs, InstOps, MVT::i64,
17114 MachinePointerInfo(SV),
17116 /*Volatile=*/false,
17118 /*WriteMem=*/true);
17119 Chain = VAARG.getValue(1);
17121 // Load the next argument and return it
17122 return DAG.getLoad(ArgVT, dl,
17125 MachinePointerInfo(),
17126 false, false, false, 0);
17129 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
17130 SelectionDAG &DAG) {
17131 // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
17132 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
17133 SDValue Chain = Op.getOperand(0);
17134 SDValue DstPtr = Op.getOperand(1);
17135 SDValue SrcPtr = Op.getOperand(2);
17136 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
17137 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
17140 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
17141 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
17143 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
17146 // getTargetVShiftByConstNode - Handle vector element shifts where the shift
17147 // amount is a constant. Takes immediate version of shift as input.
17148 static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
17149 SDValue SrcOp, uint64_t ShiftAmt,
17150 SelectionDAG &DAG) {
17151 MVT ElementType = VT.getVectorElementType();
17153 // Fold this packed shift into its first operand if ShiftAmt is 0.
17157 // Check for ShiftAmt >= element width
17158 if (ShiftAmt >= ElementType.getSizeInBits()) {
17159 if (Opc == X86ISD::VSRAI)
17160 ShiftAmt = ElementType.getSizeInBits() - 1;
17162 return DAG.getConstant(0, VT);
17165 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
17166 && "Unknown target vector shift-by-constant node");
17168 // Fold this packed vector shift into a build vector if SrcOp is a
17169 // vector of Constants or UNDEFs, and SrcOp valuetype is the same as VT.
17170 if (VT == SrcOp.getSimpleValueType() &&
17171 ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
17172 SmallVector<SDValue, 8> Elts;
17173 unsigned NumElts = SrcOp->getNumOperands();
17174 ConstantSDNode *ND;
17177 default: llvm_unreachable(nullptr);
17178 case X86ISD::VSHLI:
17179 for (unsigned i=0; i!=NumElts; ++i) {
17180 SDValue CurrentOp = SrcOp->getOperand(i);
17181 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17182 Elts.push_back(CurrentOp);
17185 ND = cast<ConstantSDNode>(CurrentOp);
17186 const APInt &C = ND->getAPIntValue();
17187 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), ElementType));
17190 case X86ISD::VSRLI:
17191 for (unsigned i=0; i!=NumElts; ++i) {
17192 SDValue CurrentOp = SrcOp->getOperand(i);
17193 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17194 Elts.push_back(CurrentOp);
17197 ND = cast<ConstantSDNode>(CurrentOp);
17198 const APInt &C = ND->getAPIntValue();
17199 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), ElementType));
17202 case X86ISD::VSRAI:
17203 for (unsigned i=0; i!=NumElts; ++i) {
17204 SDValue CurrentOp = SrcOp->getOperand(i);
17205 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17206 Elts.push_back(CurrentOp);
17209 ND = cast<ConstantSDNode>(CurrentOp);
17210 const APInt &C = ND->getAPIntValue();
17211 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), ElementType));
17216 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
17219 return DAG.getNode(Opc, dl, VT, SrcOp, DAG.getConstant(ShiftAmt, MVT::i8));
17222 // getTargetVShiftNode - Handle vector element shifts where the shift amount
17223 // may or may not be a constant. Takes immediate version of shift as input.
17224 static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
17225 SDValue SrcOp, SDValue ShAmt,
17226 SelectionDAG &DAG) {
17227 MVT SVT = ShAmt.getSimpleValueType();
17228 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
17230 // Catch shift-by-constant.
17231 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
17232 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
17233 CShAmt->getZExtValue(), DAG);
17235 // Change opcode to non-immediate version
17237 default: llvm_unreachable("Unknown target vector shift node");
17238 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break;
17239 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break;
17240 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
17243 const X86Subtarget &Subtarget =
17244 static_cast<const X86Subtarget &>(DAG.getSubtarget());
17245 if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
17246 ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
17247 // Let the shuffle legalizer expand this shift amount node.
17248 SDValue Op0 = ShAmt.getOperand(0);
17249 Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0);
17250 ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, &Subtarget, DAG);
17252 // Need to build a vector containing shift amount.
17253 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
17254 SmallVector<SDValue, 4> ShOps;
17255 ShOps.push_back(ShAmt);
17256 if (SVT == MVT::i32) {
17257 ShOps.push_back(DAG.getConstant(0, SVT));
17258 ShOps.push_back(DAG.getUNDEF(SVT));
17260 ShOps.push_back(DAG.getUNDEF(SVT));
17262 MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
17263 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps);
17266 // The return type has to be a 128-bit type with the same element
17267 // type as the input type.
17268 MVT EltVT = VT.getVectorElementType();
17269 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
17271 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt);
17272 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
17275 /// \brief Return (and \p Op, \p Mask) for compare instructions or
17276 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
17277 /// necessary casting for \p Mask when lowering masking intrinsics.
17278 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
17279 SDValue PreservedSrc,
17280 const X86Subtarget *Subtarget,
17281 SelectionDAG &DAG) {
17282 EVT VT = Op.getValueType();
17283 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(),
17284 MVT::i1, VT.getVectorNumElements());
17285 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17286 Mask.getValueType().getSizeInBits());
17289 assert(MaskVT.isSimple() && "invalid mask type");
17291 if (isAllOnes(Mask))
17294 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
17295 // are extracted by EXTRACT_SUBVECTOR.
17296 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17297 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17298 DAG.getIntPtrConstant(0));
17300 switch (Op.getOpcode()) {
17302 case X86ISD::PCMPEQM:
17303 case X86ISD::PCMPGTM:
17305 case X86ISD::CMPMU:
17306 return DAG.getNode(ISD::AND, dl, VT, Op, VMask);
17308 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17309 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17310 return DAG.getNode(ISD::VSELECT, dl, VT, VMask, Op, PreservedSrc);
17313 /// \brief Creates an SDNode for a predicated scalar operation.
17314 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
17315 /// The mask is comming as MVT::i8 and it should be truncated
17316 /// to MVT::i1 while lowering masking intrinsics.
17317 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
17318 /// "X86select" instead of "vselect". We just can't create the "vselect" node for
17319 /// a scalar instruction.
17320 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
17321 SDValue PreservedSrc,
17322 const X86Subtarget *Subtarget,
17323 SelectionDAG &DAG) {
17324 if (isAllOnes(Mask))
17327 EVT VT = Op.getValueType();
17329 // The mask should be of type MVT::i1
17330 SDValue IMask = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Mask);
17332 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17333 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17334 return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc);
17337 static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17338 SelectionDAG &DAG) {
17340 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17341 EVT VT = Op.getValueType();
17342 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
17344 switch(IntrData->Type) {
17345 case INTR_TYPE_1OP:
17346 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
17347 case INTR_TYPE_2OP:
17348 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17350 case INTR_TYPE_3OP:
17351 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17352 Op.getOperand(2), Op.getOperand(3));
17353 case INTR_TYPE_1OP_MASK_RM: {
17354 SDValue Src = Op.getOperand(1);
17355 SDValue Src0 = Op.getOperand(2);
17356 SDValue Mask = Op.getOperand(3);
17357 SDValue RoundingMode = Op.getOperand(4);
17358 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
17360 Mask, Src0, Subtarget, DAG);
17362 case INTR_TYPE_SCALAR_MASK_RM: {
17363 SDValue Src1 = Op.getOperand(1);
17364 SDValue Src2 = Op.getOperand(2);
17365 SDValue Src0 = Op.getOperand(3);
17366 SDValue Mask = Op.getOperand(4);
17367 SDValue RoundingMode = Op.getOperand(5);
17368 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
17370 Mask, Src0, Subtarget, DAG);
17372 case INTR_TYPE_2OP_MASK: {
17373 SDValue Mask = Op.getOperand(4);
17374 SDValue PassThru = Op.getOperand(3);
17375 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17376 if (IntrWithRoundingModeOpcode != 0) {
17377 unsigned Round = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
17378 if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION) {
17379 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17380 dl, Op.getValueType(),
17381 Op.getOperand(1), Op.getOperand(2),
17382 Op.getOperand(3), Op.getOperand(5)),
17383 Mask, PassThru, Subtarget, DAG);
17386 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
17389 Mask, PassThru, Subtarget, DAG);
17391 case FMA_OP_MASK: {
17392 SDValue Src1 = Op.getOperand(1);
17393 SDValue Src2 = Op.getOperand(2);
17394 SDValue Src3 = Op.getOperand(3);
17395 SDValue Mask = Op.getOperand(4);
17396 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17397 if (IntrWithRoundingModeOpcode != 0) {
17398 SDValue Rnd = Op.getOperand(5);
17399 if (cast<ConstantSDNode>(Rnd)->getZExtValue() !=
17400 X86::STATIC_ROUNDING::CUR_DIRECTION)
17401 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17402 dl, Op.getValueType(),
17403 Src1, Src2, Src3, Rnd),
17404 Mask, Src1, Subtarget, DAG);
17406 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
17407 dl, Op.getValueType(),
17409 Mask, Src1, Subtarget, DAG);
17412 case CMP_MASK_CC: {
17413 // Comparison intrinsics with masks.
17414 // Example of transformation:
17415 // (i8 (int_x86_avx512_mask_pcmpeq_q_128
17416 // (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
17418 // (v8i1 (insert_subvector undef,
17419 // (v2i1 (and (PCMPEQM %a, %b),
17420 // (extract_subvector
17421 // (v8i1 (bitcast %mask)), 0))), 0))))
17422 EVT VT = Op.getOperand(1).getValueType();
17423 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17424 VT.getVectorNumElements());
17425 SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3);
17426 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17427 Mask.getValueType().getSizeInBits());
17429 if (IntrData->Type == CMP_MASK_CC) {
17430 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17431 Op.getOperand(2), Op.getOperand(3));
17433 assert(IntrData->Type == CMP_MASK && "Unexpected intrinsic type!");
17434 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17437 SDValue CmpMask = getVectorMaskingNode(Cmp, Mask,
17438 DAG.getTargetConstant(0, MaskVT),
17440 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
17441 DAG.getUNDEF(BitcastVT), CmpMask,
17442 DAG.getIntPtrConstant(0));
17443 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
17445 case COMI: { // Comparison intrinsics
17446 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
17447 SDValue LHS = Op.getOperand(1);
17448 SDValue RHS = Op.getOperand(2);
17449 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG);
17450 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!");
17451 SDValue Cond = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
17452 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17453 DAG.getConstant(X86CC, MVT::i8), Cond);
17454 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17457 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
17458 Op.getOperand(1), Op.getOperand(2), DAG);
17460 return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl,
17461 Op.getSimpleValueType(),
17463 Op.getOperand(2), DAG),
17464 Op.getOperand(4), Op.getOperand(3), Subtarget,
17466 case COMPRESS_EXPAND_IN_REG: {
17467 SDValue Mask = Op.getOperand(3);
17468 SDValue DataToCompress = Op.getOperand(1);
17469 SDValue PassThru = Op.getOperand(2);
17470 if (isAllOnes(Mask)) // return data as is
17471 return Op.getOperand(1);
17472 EVT VT = Op.getValueType();
17473 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17474 VT.getVectorNumElements());
17475 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17476 Mask.getValueType().getSizeInBits());
17478 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17479 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17480 DAG.getIntPtrConstant(0));
17482 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToCompress,
17486 SDValue Mask = Op.getOperand(3);
17487 EVT VT = Op.getValueType();
17488 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17489 VT.getVectorNumElements());
17490 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17491 Mask.getValueType().getSizeInBits());
17493 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17494 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17495 DAG.getIntPtrConstant(0));
17496 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1),
17505 default: return SDValue(); // Don't custom lower most intrinsics.
17507 case Intrinsic::x86_avx512_mask_valign_q_512:
17508 case Intrinsic::x86_avx512_mask_valign_d_512:
17509 // Vector source operands are swapped.
17510 return getVectorMaskingNode(DAG.getNode(X86ISD::VALIGN, dl,
17511 Op.getValueType(), Op.getOperand(2),
17514 Op.getOperand(5), Op.getOperand(4),
17517 // ptest and testp intrinsics. The intrinsic these come from are designed to
17518 // return an integer value, not just an instruction so lower it to the ptest
17519 // or testp pattern and a setcc for the result.
17520 case Intrinsic::x86_sse41_ptestz:
17521 case Intrinsic::x86_sse41_ptestc:
17522 case Intrinsic::x86_sse41_ptestnzc:
17523 case Intrinsic::x86_avx_ptestz_256:
17524 case Intrinsic::x86_avx_ptestc_256:
17525 case Intrinsic::x86_avx_ptestnzc_256:
17526 case Intrinsic::x86_avx_vtestz_ps:
17527 case Intrinsic::x86_avx_vtestc_ps:
17528 case Intrinsic::x86_avx_vtestnzc_ps:
17529 case Intrinsic::x86_avx_vtestz_pd:
17530 case Intrinsic::x86_avx_vtestc_pd:
17531 case Intrinsic::x86_avx_vtestnzc_pd:
17532 case Intrinsic::x86_avx_vtestz_ps_256:
17533 case Intrinsic::x86_avx_vtestc_ps_256:
17534 case Intrinsic::x86_avx_vtestnzc_ps_256:
17535 case Intrinsic::x86_avx_vtestz_pd_256:
17536 case Intrinsic::x86_avx_vtestc_pd_256:
17537 case Intrinsic::x86_avx_vtestnzc_pd_256: {
17538 bool IsTestPacked = false;
17541 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
17542 case Intrinsic::x86_avx_vtestz_ps:
17543 case Intrinsic::x86_avx_vtestz_pd:
17544 case Intrinsic::x86_avx_vtestz_ps_256:
17545 case Intrinsic::x86_avx_vtestz_pd_256:
17546 IsTestPacked = true; // Fallthrough
17547 case Intrinsic::x86_sse41_ptestz:
17548 case Intrinsic::x86_avx_ptestz_256:
17550 X86CC = X86::COND_E;
17552 case Intrinsic::x86_avx_vtestc_ps:
17553 case Intrinsic::x86_avx_vtestc_pd:
17554 case Intrinsic::x86_avx_vtestc_ps_256:
17555 case Intrinsic::x86_avx_vtestc_pd_256:
17556 IsTestPacked = true; // Fallthrough
17557 case Intrinsic::x86_sse41_ptestc:
17558 case Intrinsic::x86_avx_ptestc_256:
17560 X86CC = X86::COND_B;
17562 case Intrinsic::x86_avx_vtestnzc_ps:
17563 case Intrinsic::x86_avx_vtestnzc_pd:
17564 case Intrinsic::x86_avx_vtestnzc_ps_256:
17565 case Intrinsic::x86_avx_vtestnzc_pd_256:
17566 IsTestPacked = true; // Fallthrough
17567 case Intrinsic::x86_sse41_ptestnzc:
17568 case Intrinsic::x86_avx_ptestnzc_256:
17570 X86CC = X86::COND_A;
17574 SDValue LHS = Op.getOperand(1);
17575 SDValue RHS = Op.getOperand(2);
17576 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
17577 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
17578 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17579 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
17580 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17582 case Intrinsic::x86_avx512_kortestz_w:
17583 case Intrinsic::x86_avx512_kortestc_w: {
17584 unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz_w)? X86::COND_E: X86::COND_B;
17585 SDValue LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(1));
17586 SDValue RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(2));
17587 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17588 SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
17589 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test);
17590 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17593 case Intrinsic::x86_sse42_pcmpistria128:
17594 case Intrinsic::x86_sse42_pcmpestria128:
17595 case Intrinsic::x86_sse42_pcmpistric128:
17596 case Intrinsic::x86_sse42_pcmpestric128:
17597 case Intrinsic::x86_sse42_pcmpistrio128:
17598 case Intrinsic::x86_sse42_pcmpestrio128:
17599 case Intrinsic::x86_sse42_pcmpistris128:
17600 case Intrinsic::x86_sse42_pcmpestris128:
17601 case Intrinsic::x86_sse42_pcmpistriz128:
17602 case Intrinsic::x86_sse42_pcmpestriz128: {
17606 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
17607 case Intrinsic::x86_sse42_pcmpistria128:
17608 Opcode = X86ISD::PCMPISTRI;
17609 X86CC = X86::COND_A;
17611 case Intrinsic::x86_sse42_pcmpestria128:
17612 Opcode = X86ISD::PCMPESTRI;
17613 X86CC = X86::COND_A;
17615 case Intrinsic::x86_sse42_pcmpistric128:
17616 Opcode = X86ISD::PCMPISTRI;
17617 X86CC = X86::COND_B;
17619 case Intrinsic::x86_sse42_pcmpestric128:
17620 Opcode = X86ISD::PCMPESTRI;
17621 X86CC = X86::COND_B;
17623 case Intrinsic::x86_sse42_pcmpistrio128:
17624 Opcode = X86ISD::PCMPISTRI;
17625 X86CC = X86::COND_O;
17627 case Intrinsic::x86_sse42_pcmpestrio128:
17628 Opcode = X86ISD::PCMPESTRI;
17629 X86CC = X86::COND_O;
17631 case Intrinsic::x86_sse42_pcmpistris128:
17632 Opcode = X86ISD::PCMPISTRI;
17633 X86CC = X86::COND_S;
17635 case Intrinsic::x86_sse42_pcmpestris128:
17636 Opcode = X86ISD::PCMPESTRI;
17637 X86CC = X86::COND_S;
17639 case Intrinsic::x86_sse42_pcmpistriz128:
17640 Opcode = X86ISD::PCMPISTRI;
17641 X86CC = X86::COND_E;
17643 case Intrinsic::x86_sse42_pcmpestriz128:
17644 Opcode = X86ISD::PCMPESTRI;
17645 X86CC = X86::COND_E;
17648 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17649 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17650 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps);
17651 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17652 DAG.getConstant(X86CC, MVT::i8),
17653 SDValue(PCMP.getNode(), 1));
17654 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17657 case Intrinsic::x86_sse42_pcmpistri128:
17658 case Intrinsic::x86_sse42_pcmpestri128: {
17660 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
17661 Opcode = X86ISD::PCMPISTRI;
17663 Opcode = X86ISD::PCMPESTRI;
17665 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17666 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17667 return DAG.getNode(Opcode, dl, VTs, NewOps);
17672 static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17673 SDValue Src, SDValue Mask, SDValue Base,
17674 SDValue Index, SDValue ScaleOp, SDValue Chain,
17675 const X86Subtarget * Subtarget) {
17677 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17678 assert(C && "Invalid scale type");
17679 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17680 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17681 Index.getSimpleValueType().getVectorNumElements());
17683 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17685 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17687 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17688 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
17689 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17690 SDValue Segment = DAG.getRegister(0, MVT::i32);
17691 if (Src.getOpcode() == ISD::UNDEF)
17692 Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
17693 SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17694 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17695 SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
17696 return DAG.getMergeValues(RetOps, dl);
17699 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17700 SDValue Src, SDValue Mask, SDValue Base,
17701 SDValue Index, SDValue ScaleOp, SDValue Chain) {
17703 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17704 assert(C && "Invalid scale type");
17705 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17706 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17707 SDValue Segment = DAG.getRegister(0, MVT::i32);
17708 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17709 Index.getSimpleValueType().getVectorNumElements());
17711 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17713 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17715 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17716 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
17717 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
17718 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17719 return SDValue(Res, 1);
17722 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17723 SDValue Mask, SDValue Base, SDValue Index,
17724 SDValue ScaleOp, SDValue Chain) {
17726 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17727 assert(C && "Invalid scale type");
17728 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17729 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17730 SDValue Segment = DAG.getRegister(0, MVT::i32);
17732 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
17734 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17736 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17738 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17739 //SDVTList VTs = DAG.getVTList(MVT::Other);
17740 SDValue Ops[] = {MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17741 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
17742 return SDValue(Res, 0);
17745 // getReadPerformanceCounter - Handles the lowering of builtin intrinsics that
17746 // read performance monitor counters (x86_rdpmc).
17747 static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
17748 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17749 SmallVectorImpl<SDValue> &Results) {
17750 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17751 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17754 // The ECX register is used to select the index of the performance counter
17756 SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
17758 SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
17760 // Reads the content of a 64-bit performance counter and returns it in the
17761 // registers EDX:EAX.
17762 if (Subtarget->is64Bit()) {
17763 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17764 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17767 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17768 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17771 Chain = HI.getValue(1);
17773 if (Subtarget->is64Bit()) {
17774 // The EAX register is loaded with the low-order 32 bits. The EDX register
17775 // is loaded with the supported high-order bits of the counter.
17776 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17777 DAG.getConstant(32, MVT::i8));
17778 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17779 Results.push_back(Chain);
17783 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17784 SDValue Ops[] = { LO, HI };
17785 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17786 Results.push_back(Pair);
17787 Results.push_back(Chain);
17790 // getReadTimeStampCounter - Handles the lowering of builtin intrinsics that
17791 // read the time stamp counter (x86_rdtsc and x86_rdtscp). This function is
17792 // also used to custom lower READCYCLECOUNTER nodes.
17793 static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
17794 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17795 SmallVectorImpl<SDValue> &Results) {
17796 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17797 SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
17800 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
17801 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
17802 // and the EAX register is loaded with the low-order 32 bits.
17803 if (Subtarget->is64Bit()) {
17804 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17805 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17808 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17809 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17812 SDValue Chain = HI.getValue(1);
17814 if (Opcode == X86ISD::RDTSCP_DAG) {
17815 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17817 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
17818 // the ECX register. Add 'ecx' explicitly to the chain.
17819 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
17821 // Explicitly store the content of ECX at the location passed in input
17822 // to the 'rdtscp' intrinsic.
17823 Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
17824 MachinePointerInfo(), false, false, 0);
17827 if (Subtarget->is64Bit()) {
17828 // The EDX register is loaded with the high-order 32 bits of the MSR, and
17829 // the EAX register is loaded with the low-order 32 bits.
17830 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17831 DAG.getConstant(32, MVT::i8));
17832 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17833 Results.push_back(Chain);
17837 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17838 SDValue Ops[] = { LO, HI };
17839 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17840 Results.push_back(Pair);
17841 Results.push_back(Chain);
17844 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
17845 SelectionDAG &DAG) {
17846 SmallVector<SDValue, 2> Results;
17848 getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
17850 return DAG.getMergeValues(Results, DL);
17854 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17855 SelectionDAG &DAG) {
17856 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
17858 const IntrinsicData* IntrData = getIntrinsicWithChain(IntNo);
17863 switch(IntrData->Type) {
17865 llvm_unreachable("Unknown Intrinsic Type");
17869 // Emit the node with the right value type.
17870 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other);
17871 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17873 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
17874 // Otherwise return the value from Rand, which is always 0, casted to i32.
17875 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
17876 DAG.getConstant(1, Op->getValueType(1)),
17877 DAG.getConstant(X86::COND_B, MVT::i32),
17878 SDValue(Result.getNode(), 1) };
17879 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
17880 DAG.getVTList(Op->getValueType(1), MVT::Glue),
17883 // Return { result, isValid, chain }.
17884 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
17885 SDValue(Result.getNode(), 2));
17888 //gather(v1, mask, index, base, scale);
17889 SDValue Chain = Op.getOperand(0);
17890 SDValue Src = Op.getOperand(2);
17891 SDValue Base = Op.getOperand(3);
17892 SDValue Index = Op.getOperand(4);
17893 SDValue Mask = Op.getOperand(5);
17894 SDValue Scale = Op.getOperand(6);
17895 return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain,
17899 //scatter(base, mask, index, v1, scale);
17900 SDValue Chain = Op.getOperand(0);
17901 SDValue Base = Op.getOperand(2);
17902 SDValue Mask = Op.getOperand(3);
17903 SDValue Index = Op.getOperand(4);
17904 SDValue Src = Op.getOperand(5);
17905 SDValue Scale = Op.getOperand(6);
17906 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain);
17909 SDValue Hint = Op.getOperand(6);
17911 if (dyn_cast<ConstantSDNode> (Hint) == nullptr ||
17912 (HintVal = dyn_cast<ConstantSDNode> (Hint)->getZExtValue()) > 1)
17913 llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1");
17914 unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
17915 SDValue Chain = Op.getOperand(0);
17916 SDValue Mask = Op.getOperand(2);
17917 SDValue Index = Op.getOperand(3);
17918 SDValue Base = Op.getOperand(4);
17919 SDValue Scale = Op.getOperand(5);
17920 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain);
17922 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
17924 SmallVector<SDValue, 2> Results;
17925 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget, Results);
17926 return DAG.getMergeValues(Results, dl);
17928 // Read Performance Monitoring Counters.
17930 SmallVector<SDValue, 2> Results;
17931 getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
17932 return DAG.getMergeValues(Results, dl);
17934 // XTEST intrinsics.
17936 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17937 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17938 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17939 DAG.getConstant(X86::COND_NE, MVT::i8),
17941 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
17942 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
17943 Ret, SDValue(InTrans.getNode(), 1));
17947 SmallVector<SDValue, 2> Results;
17948 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17949 SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
17950 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
17951 DAG.getConstant(-1, MVT::i8));
17952 SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
17953 Op.getOperand(4), GenCF.getValue(1));
17954 SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
17955 Op.getOperand(5), MachinePointerInfo(),
17957 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17958 DAG.getConstant(X86::COND_B, MVT::i8),
17960 Results.push_back(SetCC);
17961 Results.push_back(Store);
17962 return DAG.getMergeValues(Results, dl);
17964 case COMPRESS_TO_MEM: {
17966 SDValue Mask = Op.getOperand(4);
17967 SDValue DataToCompress = Op.getOperand(3);
17968 SDValue Addr = Op.getOperand(2);
17969 SDValue Chain = Op.getOperand(0);
17971 if (isAllOnes(Mask)) // return just a store
17972 return DAG.getStore(Chain, dl, DataToCompress, Addr,
17973 MachinePointerInfo(), false, false, 0);
17975 EVT VT = DataToCompress.getValueType();
17976 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17977 VT.getVectorNumElements());
17978 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17979 Mask.getValueType().getSizeInBits());
17980 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17981 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17982 DAG.getIntPtrConstant(0));
17984 SDValue Compressed = DAG.getNode(IntrData->Opc0, dl, VT, VMask,
17985 DataToCompress, DAG.getUNDEF(VT));
17986 return DAG.getStore(Chain, dl, Compressed, Addr,
17987 MachinePointerInfo(), false, false, 0);
17989 case EXPAND_FROM_MEM: {
17991 SDValue Mask = Op.getOperand(4);
17992 SDValue PathThru = Op.getOperand(3);
17993 SDValue Addr = Op.getOperand(2);
17994 SDValue Chain = Op.getOperand(0);
17995 EVT VT = Op.getValueType();
17997 if (isAllOnes(Mask)) // return just a load
17998 return DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(), false, false,
18000 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18001 VT.getVectorNumElements());
18002 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18003 Mask.getValueType().getSizeInBits());
18004 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
18005 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
18006 DAG.getIntPtrConstant(0));
18008 SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(),
18009 false, false, false, 0);
18011 SmallVector<SDValue, 2> Results;
18012 Results.push_back(DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToExpand,
18014 Results.push_back(Chain);
18015 return DAG.getMergeValues(Results, dl);
18020 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
18021 SelectionDAG &DAG) const {
18022 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
18023 MFI->setReturnAddressIsTaken(true);
18025 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
18028 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18030 EVT PtrVT = getPointerTy();
18033 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
18034 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18035 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
18036 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
18037 DAG.getNode(ISD::ADD, dl, PtrVT,
18038 FrameAddr, Offset),
18039 MachinePointerInfo(), false, false, false, 0);
18042 // Just load the return address.
18043 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
18044 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
18045 RetAddrFI, MachinePointerInfo(), false, false, false, 0);
18048 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
18049 MachineFunction &MF = DAG.getMachineFunction();
18050 MachineFrameInfo *MFI = MF.getFrameInfo();
18051 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
18052 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18053 EVT VT = Op.getValueType();
18055 MFI->setFrameAddressIsTaken(true);
18057 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
18058 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
18059 // is not possible to crawl up the stack without looking at the unwind codes
18061 int FrameAddrIndex = FuncInfo->getFAIndex();
18062 if (!FrameAddrIndex) {
18063 // Set up a frame object for the return address.
18064 unsigned SlotSize = RegInfo->getSlotSize();
18065 FrameAddrIndex = MF.getFrameInfo()->CreateFixedObject(
18066 SlotSize, /*Offset=*/INT64_MIN, /*IsImmutable=*/false);
18067 FuncInfo->setFAIndex(FrameAddrIndex);
18069 return DAG.getFrameIndex(FrameAddrIndex, VT);
18072 unsigned FrameReg =
18073 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
18074 SDLoc dl(Op); // FIXME probably not meaningful
18075 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18076 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
18077 (FrameReg == X86::EBP && VT == MVT::i32)) &&
18078 "Invalid Frame Register!");
18079 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
18081 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
18082 MachinePointerInfo(),
18083 false, false, false, 0);
18087 // FIXME? Maybe this could be a TableGen attribute on some registers and
18088 // this table could be generated automatically from RegInfo.
18089 unsigned X86TargetLowering::getRegisterByName(const char* RegName,
18091 unsigned Reg = StringSwitch<unsigned>(RegName)
18092 .Case("esp", X86::ESP)
18093 .Case("rsp", X86::RSP)
18097 report_fatal_error("Invalid register name global variable");
18100 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
18101 SelectionDAG &DAG) const {
18102 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18103 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
18106 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
18107 SDValue Chain = Op.getOperand(0);
18108 SDValue Offset = Op.getOperand(1);
18109 SDValue Handler = Op.getOperand(2);
18112 EVT PtrVT = getPointerTy();
18113 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18114 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
18115 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
18116 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
18117 "Invalid Frame Register!");
18118 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
18119 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
18121 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
18122 DAG.getIntPtrConstant(RegInfo->getSlotSize()));
18123 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
18124 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
18126 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
18128 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
18129 DAG.getRegister(StoreAddrReg, PtrVT));
18132 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
18133 SelectionDAG &DAG) const {
18135 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
18136 DAG.getVTList(MVT::i32, MVT::Other),
18137 Op.getOperand(0), Op.getOperand(1));
18140 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
18141 SelectionDAG &DAG) const {
18143 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
18144 Op.getOperand(0), Op.getOperand(1));
18147 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
18148 return Op.getOperand(0);
18151 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
18152 SelectionDAG &DAG) const {
18153 SDValue Root = Op.getOperand(0);
18154 SDValue Trmp = Op.getOperand(1); // trampoline
18155 SDValue FPtr = Op.getOperand(2); // nested function
18156 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
18159 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
18160 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
18162 if (Subtarget->is64Bit()) {
18163 SDValue OutChains[6];
18165 // Large code-model.
18166 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
18167 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
18169 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
18170 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
18172 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
18174 // Load the pointer to the nested function into R11.
18175 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
18176 SDValue Addr = Trmp;
18177 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18178 Addr, MachinePointerInfo(TrmpAddr),
18181 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18182 DAG.getConstant(2, MVT::i64));
18183 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
18184 MachinePointerInfo(TrmpAddr, 2),
18187 // Load the 'nest' parameter value into R10.
18188 // R10 is specified in X86CallingConv.td
18189 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
18190 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18191 DAG.getConstant(10, MVT::i64));
18192 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18193 Addr, MachinePointerInfo(TrmpAddr, 10),
18196 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18197 DAG.getConstant(12, MVT::i64));
18198 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
18199 MachinePointerInfo(TrmpAddr, 12),
18202 // Jump to the nested function.
18203 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
18204 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18205 DAG.getConstant(20, MVT::i64));
18206 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18207 Addr, MachinePointerInfo(TrmpAddr, 20),
18210 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
18211 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18212 DAG.getConstant(22, MVT::i64));
18213 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
18214 MachinePointerInfo(TrmpAddr, 22),
18217 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18219 const Function *Func =
18220 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
18221 CallingConv::ID CC = Func->getCallingConv();
18226 llvm_unreachable("Unsupported calling convention");
18227 case CallingConv::C:
18228 case CallingConv::X86_StdCall: {
18229 // Pass 'nest' parameter in ECX.
18230 // Must be kept in sync with X86CallingConv.td
18231 NestReg = X86::ECX;
18233 // Check that ECX wasn't needed by an 'inreg' parameter.
18234 FunctionType *FTy = Func->getFunctionType();
18235 const AttributeSet &Attrs = Func->getAttributes();
18237 if (!Attrs.isEmpty() && !Func->isVarArg()) {
18238 unsigned InRegCount = 0;
18241 for (FunctionType::param_iterator I = FTy->param_begin(),
18242 E = FTy->param_end(); I != E; ++I, ++Idx)
18243 if (Attrs.hasAttribute(Idx, Attribute::InReg))
18244 // FIXME: should only count parameters that are lowered to integers.
18245 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
18247 if (InRegCount > 2) {
18248 report_fatal_error("Nest register in use - reduce number of inreg"
18254 case CallingConv::X86_FastCall:
18255 case CallingConv::X86_ThisCall:
18256 case CallingConv::Fast:
18257 // Pass 'nest' parameter in EAX.
18258 // Must be kept in sync with X86CallingConv.td
18259 NestReg = X86::EAX;
18263 SDValue OutChains[4];
18264 SDValue Addr, Disp;
18266 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18267 DAG.getConstant(10, MVT::i32));
18268 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
18270 // This is storing the opcode for MOV32ri.
18271 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
18272 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
18273 OutChains[0] = DAG.getStore(Root, dl,
18274 DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
18275 Trmp, MachinePointerInfo(TrmpAddr),
18278 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18279 DAG.getConstant(1, MVT::i32));
18280 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
18281 MachinePointerInfo(TrmpAddr, 1),
18284 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
18285 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18286 DAG.getConstant(5, MVT::i32));
18287 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
18288 MachinePointerInfo(TrmpAddr, 5),
18291 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18292 DAG.getConstant(6, MVT::i32));
18293 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
18294 MachinePointerInfo(TrmpAddr, 6),
18297 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18301 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
18302 SelectionDAG &DAG) const {
18304 The rounding mode is in bits 11:10 of FPSR, and has the following
18306 00 Round to nearest
18311 FLT_ROUNDS, on the other hand, expects the following:
18318 To perform the conversion, we do:
18319 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
18322 MachineFunction &MF = DAG.getMachineFunction();
18323 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
18324 unsigned StackAlignment = TFI.getStackAlignment();
18325 MVT VT = Op.getSimpleValueType();
18328 // Save FP Control Word to stack slot
18329 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
18330 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
18332 MachineMemOperand *MMO =
18333 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
18334 MachineMemOperand::MOStore, 2, 2);
18336 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
18337 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
18338 DAG.getVTList(MVT::Other),
18339 Ops, MVT::i16, MMO);
18341 // Load FP Control Word from stack slot
18342 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
18343 MachinePointerInfo(), false, false, false, 0);
18345 // Transform as necessary
18347 DAG.getNode(ISD::SRL, DL, MVT::i16,
18348 DAG.getNode(ISD::AND, DL, MVT::i16,
18349 CWD, DAG.getConstant(0x800, MVT::i16)),
18350 DAG.getConstant(11, MVT::i8));
18352 DAG.getNode(ISD::SRL, DL, MVT::i16,
18353 DAG.getNode(ISD::AND, DL, MVT::i16,
18354 CWD, DAG.getConstant(0x400, MVT::i16)),
18355 DAG.getConstant(9, MVT::i8));
18358 DAG.getNode(ISD::AND, DL, MVT::i16,
18359 DAG.getNode(ISD::ADD, DL, MVT::i16,
18360 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
18361 DAG.getConstant(1, MVT::i16)),
18362 DAG.getConstant(3, MVT::i16));
18364 return DAG.getNode((VT.getSizeInBits() < 16 ?
18365 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
18368 static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
18369 MVT VT = Op.getSimpleValueType();
18371 unsigned NumBits = VT.getSizeInBits();
18374 Op = Op.getOperand(0);
18375 if (VT == MVT::i8) {
18376 // Zero extend to i32 since there is not an i8 bsr.
18378 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18381 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
18382 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18383 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18385 // If src is zero (i.e. bsr sets ZF), returns NumBits.
18388 DAG.getConstant(NumBits+NumBits-1, OpVT),
18389 DAG.getConstant(X86::COND_E, MVT::i8),
18392 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
18394 // Finally xor with NumBits-1.
18395 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18398 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18402 static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
18403 MVT VT = Op.getSimpleValueType();
18405 unsigned NumBits = VT.getSizeInBits();
18408 Op = Op.getOperand(0);
18409 if (VT == MVT::i8) {
18410 // Zero extend to i32 since there is not an i8 bsr.
18412 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18415 // Issue a bsr (scan bits in reverse).
18416 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18417 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18419 // And xor with NumBits-1.
18420 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18423 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18427 static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
18428 MVT VT = Op.getSimpleValueType();
18429 unsigned NumBits = VT.getSizeInBits();
18431 Op = Op.getOperand(0);
18433 // Issue a bsf (scan bits forward) which also sets EFLAGS.
18434 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
18435 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op);
18437 // If src is zero (i.e. bsf sets ZF), returns NumBits.
18440 DAG.getConstant(NumBits, VT),
18441 DAG.getConstant(X86::COND_E, MVT::i8),
18444 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
18447 // Lower256IntArith - Break a 256-bit integer operation into two new 128-bit
18448 // ones, and then concatenate the result back.
18449 static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
18450 MVT VT = Op.getSimpleValueType();
18452 assert(VT.is256BitVector() && VT.isInteger() &&
18453 "Unsupported value type for operation");
18455 unsigned NumElems = VT.getVectorNumElements();
18458 // Extract the LHS vectors
18459 SDValue LHS = Op.getOperand(0);
18460 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
18461 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
18463 // Extract the RHS vectors
18464 SDValue RHS = Op.getOperand(1);
18465 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
18466 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
18468 MVT EltVT = VT.getVectorElementType();
18469 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
18471 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
18472 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
18473 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
18476 static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) {
18477 assert(Op.getSimpleValueType().is256BitVector() &&
18478 Op.getSimpleValueType().isInteger() &&
18479 "Only handle AVX 256-bit vector integer operation");
18480 return Lower256IntArith(Op, DAG);
18483 static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
18484 assert(Op.getSimpleValueType().is256BitVector() &&
18485 Op.getSimpleValueType().isInteger() &&
18486 "Only handle AVX 256-bit vector integer operation");
18487 return Lower256IntArith(Op, DAG);
18490 static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
18491 SelectionDAG &DAG) {
18493 MVT VT = Op.getSimpleValueType();
18495 // Decompose 256-bit ops into smaller 128-bit ops.
18496 if (VT.is256BitVector() && !Subtarget->hasInt256())
18497 return Lower256IntArith(Op, DAG);
18499 SDValue A = Op.getOperand(0);
18500 SDValue B = Op.getOperand(1);
18502 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
18503 if (VT == MVT::v4i32) {
18504 assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() &&
18505 "Should not custom lower when pmuldq is available!");
18507 // Extract the odd parts.
18508 static const int UnpackMask[] = { 1, -1, 3, -1 };
18509 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
18510 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
18512 // Multiply the even parts.
18513 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B);
18514 // Now multiply odd parts.
18515 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds);
18517 Evens = DAG.getNode(ISD::BITCAST, dl, VT, Evens);
18518 Odds = DAG.getNode(ISD::BITCAST, dl, VT, Odds);
18520 // Merge the two vectors back together with a shuffle. This expands into 2
18522 static const int ShufMask[] = { 0, 4, 2, 6 };
18523 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
18526 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
18527 "Only know how to lower V2I64/V4I64/V8I64 multiply");
18529 // Ahi = psrlqi(a, 32);
18530 // Bhi = psrlqi(b, 32);
18532 // AloBlo = pmuludq(a, b);
18533 // AloBhi = pmuludq(a, Bhi);
18534 // AhiBlo = pmuludq(Ahi, b);
18536 // AloBhi = psllqi(AloBhi, 32);
18537 // AhiBlo = psllqi(AhiBlo, 32);
18538 // return AloBlo + AloBhi + AhiBlo;
18540 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
18541 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
18543 // Bit cast to 32-bit vectors for MULUDQ
18544 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 :
18545 (VT == MVT::v4i64) ? MVT::v8i32 : MVT::v16i32;
18546 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A);
18547 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B);
18548 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi);
18549 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi);
18551 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
18552 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
18553 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
18555 AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG);
18556 AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG);
18558 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
18559 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
18562 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
18563 assert(Subtarget->isTargetWin64() && "Unexpected target");
18564 EVT VT = Op.getValueType();
18565 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
18566 "Unexpected return type for lowering");
18570 switch (Op->getOpcode()) {
18571 default: llvm_unreachable("Unexpected request for libcall!");
18572 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
18573 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
18574 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
18575 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
18576 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
18577 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
18581 SDValue InChain = DAG.getEntryNode();
18583 TargetLowering::ArgListTy Args;
18584 TargetLowering::ArgListEntry Entry;
18585 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
18586 EVT ArgVT = Op->getOperand(i).getValueType();
18587 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
18588 "Unexpected argument type for lowering");
18589 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
18590 Entry.Node = StackPtr;
18591 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MachinePointerInfo(),
18593 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
18594 Entry.Ty = PointerType::get(ArgTy,0);
18595 Entry.isSExt = false;
18596 Entry.isZExt = false;
18597 Args.push_back(Entry);
18600 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
18603 TargetLowering::CallLoweringInfo CLI(DAG);
18604 CLI.setDebugLoc(dl).setChain(InChain)
18605 .setCallee(getLibcallCallingConv(LC),
18606 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()),
18607 Callee, std::move(Args), 0)
18608 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
18610 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
18611 return DAG.getNode(ISD::BITCAST, dl, VT, CallInfo.first);
18614 static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
18615 SelectionDAG &DAG) {
18616 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
18617 EVT VT = Op0.getValueType();
18620 assert((VT == MVT::v4i32 && Subtarget->hasSSE2()) ||
18621 (VT == MVT::v8i32 && Subtarget->hasInt256()));
18623 // PMULxD operations multiply each even value (starting at 0) of LHS with
18624 // the related value of RHS and produce a widen result.
18625 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18626 // => <2 x i64> <ae|cg>
18628 // In other word, to have all the results, we need to perform two PMULxD:
18629 // 1. one with the even values.
18630 // 2. one with the odd values.
18631 // To achieve #2, with need to place the odd values at an even position.
18633 // Place the odd value at an even position (basically, shift all values 1
18634 // step to the left):
18635 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
18636 // <a|b|c|d> => <b|undef|d|undef>
18637 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0, Mask);
18638 // <e|f|g|h> => <f|undef|h|undef>
18639 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1, Mask);
18641 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
18643 MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
18644 bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
18646 (!IsSigned || !Subtarget->hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
18647 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18648 // => <2 x i64> <ae|cg>
18649 SDValue Mul1 = DAG.getNode(ISD::BITCAST, dl, VT,
18650 DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
18651 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
18652 // => <2 x i64> <bf|dh>
18653 SDValue Mul2 = DAG.getNode(ISD::BITCAST, dl, VT,
18654 DAG.getNode(Opcode, dl, MulVT, Odd0, Odd1));
18656 // Shuffle it back into the right order.
18657 SDValue Highs, Lows;
18658 if (VT == MVT::v8i32) {
18659 const int HighMask[] = {1, 9, 3, 11, 5, 13, 7, 15};
18660 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18661 const int LowMask[] = {0, 8, 2, 10, 4, 12, 6, 14};
18662 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18664 const int HighMask[] = {1, 5, 3, 7};
18665 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18666 const int LowMask[] = {0, 4, 2, 6};
18667 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18670 // If we have a signed multiply but no PMULDQ fix up the high parts of a
18671 // unsigned multiply.
18672 if (IsSigned && !Subtarget->hasSSE41()) {
18674 DAG.getConstant(31, DAG.getTargetLoweringInfo().getShiftAmountTy(VT));
18675 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
18676 DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
18677 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
18678 DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0);
18680 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
18681 Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup);
18684 // The first result of MUL_LOHI is actually the low value, followed by the
18686 SDValue Ops[] = {Lows, Highs};
18687 return DAG.getMergeValues(Ops, dl);
18690 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
18691 const X86Subtarget *Subtarget) {
18692 MVT VT = Op.getSimpleValueType();
18694 SDValue R = Op.getOperand(0);
18695 SDValue Amt = Op.getOperand(1);
18697 // Optimize shl/srl/sra with constant shift amount.
18698 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
18699 if (auto *ShiftConst = BVAmt->getConstantSplatNode()) {
18700 uint64_t ShiftAmt = ShiftConst->getZExtValue();
18702 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
18703 (Subtarget->hasInt256() &&
18704 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18705 (Subtarget->hasAVX512() &&
18706 (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18707 if (Op.getOpcode() == ISD::SHL)
18708 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18710 if (Op.getOpcode() == ISD::SRL)
18711 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18713 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64)
18714 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18718 if (VT == MVT::v16i8) {
18719 if (Op.getOpcode() == ISD::SHL) {
18720 // Make a large shift.
18721 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18722 MVT::v8i16, R, ShiftAmt,
18724 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18725 // Zero out the rightmost bits.
18726 SmallVector<SDValue, 16> V(16,
18727 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18729 return DAG.getNode(ISD::AND, dl, VT, SHL,
18730 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18732 if (Op.getOpcode() == ISD::SRL) {
18733 // Make a large shift.
18734 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18735 MVT::v8i16, R, ShiftAmt,
18737 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18738 // Zero out the leftmost bits.
18739 SmallVector<SDValue, 16> V(16,
18740 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18742 return DAG.getNode(ISD::AND, dl, VT, SRL,
18743 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18745 if (Op.getOpcode() == ISD::SRA) {
18746 if (ShiftAmt == 7) {
18747 // R s>> 7 === R s< 0
18748 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18749 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18752 // R s>> a === ((R u>> a) ^ m) - m
18753 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18754 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt,
18756 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18757 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18758 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18761 llvm_unreachable("Unknown shift opcode.");
18764 if (Subtarget->hasInt256() && VT == MVT::v32i8) {
18765 if (Op.getOpcode() == ISD::SHL) {
18766 // Make a large shift.
18767 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18768 MVT::v16i16, R, ShiftAmt,
18770 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18771 // Zero out the rightmost bits.
18772 SmallVector<SDValue, 32> V(32,
18773 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18775 return DAG.getNode(ISD::AND, dl, VT, SHL,
18776 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18778 if (Op.getOpcode() == ISD::SRL) {
18779 // Make a large shift.
18780 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18781 MVT::v16i16, R, ShiftAmt,
18783 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18784 // Zero out the leftmost bits.
18785 SmallVector<SDValue, 32> V(32,
18786 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18788 return DAG.getNode(ISD::AND, dl, VT, SRL,
18789 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18791 if (Op.getOpcode() == ISD::SRA) {
18792 if (ShiftAmt == 7) {
18793 // R s>> 7 === R s< 0
18794 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18795 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18798 // R s>> a === ((R u>> a) ^ m) - m
18799 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18800 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt,
18802 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18803 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18804 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18807 llvm_unreachable("Unknown shift opcode.");
18812 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18813 if (!Subtarget->is64Bit() &&
18814 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) &&
18815 Amt.getOpcode() == ISD::BITCAST &&
18816 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18817 Amt = Amt.getOperand(0);
18818 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18819 VT.getVectorNumElements();
18820 unsigned RatioInLog2 = Log2_32_Ceil(Ratio);
18821 uint64_t ShiftAmt = 0;
18822 for (unsigned i = 0; i != Ratio; ++i) {
18823 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i));
18827 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2)));
18829 // Check remaining shift amounts.
18830 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18831 uint64_t ShAmt = 0;
18832 for (unsigned j = 0; j != Ratio; ++j) {
18833 ConstantSDNode *C =
18834 dyn_cast<ConstantSDNode>(Amt.getOperand(i + j));
18838 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2)));
18840 if (ShAmt != ShiftAmt)
18843 switch (Op.getOpcode()) {
18845 llvm_unreachable("Unknown shift opcode!");
18847 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18850 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18853 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18861 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
18862 const X86Subtarget* Subtarget) {
18863 MVT VT = Op.getSimpleValueType();
18865 SDValue R = Op.getOperand(0);
18866 SDValue Amt = Op.getOperand(1);
18868 if ((VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) ||
18869 VT == MVT::v4i32 || VT == MVT::v8i16 ||
18870 (Subtarget->hasInt256() &&
18871 ((VT == MVT::v4i64 && Op.getOpcode() != ISD::SRA) ||
18872 VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18873 (Subtarget->hasAVX512() && (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18875 EVT EltVT = VT.getVectorElementType();
18877 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Amt)) {
18878 // Check if this build_vector node is doing a splat.
18879 // If so, then set BaseShAmt equal to the splat value.
18880 BaseShAmt = BV->getSplatValue();
18881 if (BaseShAmt && BaseShAmt.getOpcode() == ISD::UNDEF)
18882 BaseShAmt = SDValue();
18884 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
18885 Amt = Amt.getOperand(0);
18887 ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt);
18888 if (SVN && SVN->isSplat()) {
18889 unsigned SplatIdx = (unsigned)SVN->getSplatIndex();
18890 SDValue InVec = Amt.getOperand(0);
18891 if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
18892 assert((SplatIdx < InVec.getValueType().getVectorNumElements()) &&
18893 "Unexpected shuffle index found!");
18894 BaseShAmt = InVec.getOperand(SplatIdx);
18895 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
18896 if (ConstantSDNode *C =
18897 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) {
18898 if (C->getZExtValue() == SplatIdx)
18899 BaseShAmt = InVec.getOperand(1);
18904 // Avoid introducing an extract element from a shuffle.
18905 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec,
18906 DAG.getIntPtrConstant(SplatIdx));
18910 if (BaseShAmt.getNode()) {
18911 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
18912 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
18913 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
18914 else if (EltVT.bitsLT(MVT::i32))
18915 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
18917 switch (Op.getOpcode()) {
18919 llvm_unreachable("Unknown shift opcode!");
18921 switch (VT.SimpleTy) {
18922 default: return SDValue();
18931 return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG);
18934 switch (VT.SimpleTy) {
18935 default: return SDValue();
18942 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG);
18945 switch (VT.SimpleTy) {
18946 default: return SDValue();
18955 return getTargetVShiftNode(X86ISD::VSRLI, dl, VT, R, BaseShAmt, DAG);
18961 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18962 if (!Subtarget->is64Bit() &&
18963 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64) ||
18964 (Subtarget->hasAVX512() && VT == MVT::v8i64)) &&
18965 Amt.getOpcode() == ISD::BITCAST &&
18966 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18967 Amt = Amt.getOperand(0);
18968 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18969 VT.getVectorNumElements();
18970 std::vector<SDValue> Vals(Ratio);
18971 for (unsigned i = 0; i != Ratio; ++i)
18972 Vals[i] = Amt.getOperand(i);
18973 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18974 for (unsigned j = 0; j != Ratio; ++j)
18975 if (Vals[j] != Amt.getOperand(i + j))
18978 switch (Op.getOpcode()) {
18980 llvm_unreachable("Unknown shift opcode!");
18982 return DAG.getNode(X86ISD::VSHL, dl, VT, R, Op.getOperand(1));
18984 return DAG.getNode(X86ISD::VSRL, dl, VT, R, Op.getOperand(1));
18986 return DAG.getNode(X86ISD::VSRA, dl, VT, R, Op.getOperand(1));
18993 static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
18994 SelectionDAG &DAG) {
18995 MVT VT = Op.getSimpleValueType();
18997 SDValue R = Op.getOperand(0);
18998 SDValue Amt = Op.getOperand(1);
19001 assert(VT.isVector() && "Custom lowering only for vector shifts!");
19002 assert(Subtarget->hasSSE2() && "Only custom lower when we have SSE2!");
19004 V = LowerScalarImmediateShift(Op, DAG, Subtarget);
19008 V = LowerScalarVariableShift(Op, DAG, Subtarget);
19012 if (Subtarget->hasAVX512() && (VT == MVT::v16i32 || VT == MVT::v8i64))
19014 // AVX2 has VPSLLV/VPSRAV/VPSRLV.
19015 if (Subtarget->hasInt256()) {
19016 if (Op.getOpcode() == ISD::SRL &&
19017 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
19018 VT == MVT::v4i64 || VT == MVT::v8i32))
19020 if (Op.getOpcode() == ISD::SHL &&
19021 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
19022 VT == MVT::v4i64 || VT == MVT::v8i32))
19024 if (Op.getOpcode() == ISD::SRA && (VT == MVT::v4i32 || VT == MVT::v8i32))
19028 // If possible, lower this packed shift into a vector multiply instead of
19029 // expanding it into a sequence of scalar shifts.
19030 // Do this only if the vector shift count is a constant build_vector.
19031 if (Op.getOpcode() == ISD::SHL &&
19032 (VT == MVT::v8i16 || VT == MVT::v4i32 ||
19033 (Subtarget->hasInt256() && VT == MVT::v16i16)) &&
19034 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
19035 SmallVector<SDValue, 8> Elts;
19036 EVT SVT = VT.getScalarType();
19037 unsigned SVTBits = SVT.getSizeInBits();
19038 const APInt &One = APInt(SVTBits, 1);
19039 unsigned NumElems = VT.getVectorNumElements();
19041 for (unsigned i=0; i !=NumElems; ++i) {
19042 SDValue Op = Amt->getOperand(i);
19043 if (Op->getOpcode() == ISD::UNDEF) {
19044 Elts.push_back(Op);
19048 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
19049 const APInt &C = APInt(SVTBits, ND->getAPIntValue().getZExtValue());
19050 uint64_t ShAmt = C.getZExtValue();
19051 if (ShAmt >= SVTBits) {
19052 Elts.push_back(DAG.getUNDEF(SVT));
19055 Elts.push_back(DAG.getConstant(One.shl(ShAmt), SVT));
19057 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
19058 return DAG.getNode(ISD::MUL, dl, VT, R, BV);
19061 // Lower SHL with variable shift amount.
19062 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
19063 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT));
19065 Op = DAG.getNode(ISD::ADD, dl, VT, Op, DAG.getConstant(0x3f800000U, VT));
19066 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
19067 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
19068 return DAG.getNode(ISD::MUL, dl, VT, Op, R);
19071 // If possible, lower this shift as a sequence of two shifts by
19072 // constant plus a MOVSS/MOVSD instead of scalarizing it.
19074 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
19076 // Could be rewritten as:
19077 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
19079 // The advantage is that the two shifts from the example would be
19080 // lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
19081 // the vector shift into four scalar shifts plus four pairs of vector
19083 if ((VT == MVT::v8i16 || VT == MVT::v4i32) &&
19084 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
19085 unsigned TargetOpcode = X86ISD::MOVSS;
19086 bool CanBeSimplified;
19087 // The splat value for the first packed shift (the 'X' from the example).
19088 SDValue Amt1 = Amt->getOperand(0);
19089 // The splat value for the second packed shift (the 'Y' from the example).
19090 SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) :
19091 Amt->getOperand(2);
19093 // See if it is possible to replace this node with a sequence of
19094 // two shifts followed by a MOVSS/MOVSD
19095 if (VT == MVT::v4i32) {
19096 // Check if it is legal to use a MOVSS.
19097 CanBeSimplified = Amt2 == Amt->getOperand(2) &&
19098 Amt2 == Amt->getOperand(3);
19099 if (!CanBeSimplified) {
19100 // Otherwise, check if we can still simplify this node using a MOVSD.
19101 CanBeSimplified = Amt1 == Amt->getOperand(1) &&
19102 Amt->getOperand(2) == Amt->getOperand(3);
19103 TargetOpcode = X86ISD::MOVSD;
19104 Amt2 = Amt->getOperand(2);
19107 // Do similar checks for the case where the machine value type
19109 CanBeSimplified = Amt1 == Amt->getOperand(1);
19110 for (unsigned i=3; i != 8 && CanBeSimplified; ++i)
19111 CanBeSimplified = Amt2 == Amt->getOperand(i);
19113 if (!CanBeSimplified) {
19114 TargetOpcode = X86ISD::MOVSD;
19115 CanBeSimplified = true;
19116 Amt2 = Amt->getOperand(4);
19117 for (unsigned i=0; i != 4 && CanBeSimplified; ++i)
19118 CanBeSimplified = Amt1 == Amt->getOperand(i);
19119 for (unsigned j=4; j != 8 && CanBeSimplified; ++j)
19120 CanBeSimplified = Amt2 == Amt->getOperand(j);
19124 if (CanBeSimplified && isa<ConstantSDNode>(Amt1) &&
19125 isa<ConstantSDNode>(Amt2)) {
19126 // Replace this node with two shifts followed by a MOVSS/MOVSD.
19127 EVT CastVT = MVT::v4i32;
19129 DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), VT);
19130 SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
19132 DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), VT);
19133 SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
19134 if (TargetOpcode == X86ISD::MOVSD)
19135 CastVT = MVT::v2i64;
19136 SDValue BitCast1 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift1);
19137 SDValue BitCast2 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift2);
19138 SDValue Result = getTargetShuffleNode(TargetOpcode, dl, CastVT, BitCast2,
19140 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
19144 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
19145 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq.");
19148 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, VT));
19149 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op);
19151 // Turn 'a' into a mask suitable for VSELECT
19152 SDValue VSelM = DAG.getConstant(0x80, VT);
19153 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19154 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19156 SDValue CM1 = DAG.getConstant(0x0f, VT);
19157 SDValue CM2 = DAG.getConstant(0x3f, VT);
19159 // r = VSELECT(r, psllw(r & (char16)15, 4), a);
19160 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1);
19161 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 4, DAG);
19162 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19163 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19166 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19167 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19168 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19170 // r = VSELECT(r, psllw(r & (char16)63, 2), a);
19171 M = DAG.getNode(ISD::AND, dl, VT, R, CM2);
19172 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 2, DAG);
19173 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19174 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19177 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19178 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19179 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19181 // return VSELECT(r, r+r, a);
19182 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel,
19183 DAG.getNode(ISD::ADD, dl, VT, R, R), R);
19187 // It's worth extending once and using the v8i32 shifts for 16-bit types, but
19188 // the extra overheads to get from v16i8 to v8i32 make the existing SSE
19189 // solution better.
19190 if (Subtarget->hasInt256() && VT == MVT::v8i16) {
19191 MVT NewVT = VT == MVT::v8i16 ? MVT::v8i32 : MVT::v16i16;
19193 Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
19194 R = DAG.getNode(ExtOpc, dl, NewVT, R);
19195 Amt = DAG.getNode(ISD::ANY_EXTEND, dl, NewVT, Amt);
19196 return DAG.getNode(ISD::TRUNCATE, dl, VT,
19197 DAG.getNode(Op.getOpcode(), dl, NewVT, R, Amt));
19200 // Decompose 256-bit shifts into smaller 128-bit shifts.
19201 if (VT.is256BitVector()) {
19202 unsigned NumElems = VT.getVectorNumElements();
19203 MVT EltVT = VT.getVectorElementType();
19204 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19206 // Extract the two vectors
19207 SDValue V1 = Extract128BitVector(R, 0, DAG, dl);
19208 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl);
19210 // Recreate the shift amount vectors
19211 SDValue Amt1, Amt2;
19212 if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
19213 // Constant shift amount
19214 SmallVector<SDValue, 4> Amt1Csts;
19215 SmallVector<SDValue, 4> Amt2Csts;
19216 for (unsigned i = 0; i != NumElems/2; ++i)
19217 Amt1Csts.push_back(Amt->getOperand(i));
19218 for (unsigned i = NumElems/2; i != NumElems; ++i)
19219 Amt2Csts.push_back(Amt->getOperand(i));
19221 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt1Csts);
19222 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt2Csts);
19224 // Variable shift amount
19225 Amt1 = Extract128BitVector(Amt, 0, DAG, dl);
19226 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl);
19229 // Issue new vector shifts for the smaller types
19230 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1);
19231 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2);
19233 // Concatenate the result back
19234 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
19240 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
19241 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
19242 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
19243 // looks for this combo and may remove the "setcc" instruction if the "setcc"
19244 // has only one use.
19245 SDNode *N = Op.getNode();
19246 SDValue LHS = N->getOperand(0);
19247 SDValue RHS = N->getOperand(1);
19248 unsigned BaseOp = 0;
19251 switch (Op.getOpcode()) {
19252 default: llvm_unreachable("Unknown ovf instruction!");
19254 // A subtract of one will be selected as a INC. Note that INC doesn't
19255 // set CF, so we can't do this for UADDO.
19256 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19258 BaseOp = X86ISD::INC;
19259 Cond = X86::COND_O;
19262 BaseOp = X86ISD::ADD;
19263 Cond = X86::COND_O;
19266 BaseOp = X86ISD::ADD;
19267 Cond = X86::COND_B;
19270 // A subtract of one will be selected as a DEC. Note that DEC doesn't
19271 // set CF, so we can't do this for USUBO.
19272 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19274 BaseOp = X86ISD::DEC;
19275 Cond = X86::COND_O;
19278 BaseOp = X86ISD::SUB;
19279 Cond = X86::COND_O;
19282 BaseOp = X86ISD::SUB;
19283 Cond = X86::COND_B;
19286 BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL;
19287 Cond = X86::COND_O;
19289 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
19290 if (N->getValueType(0) == MVT::i8) {
19291 BaseOp = X86ISD::UMUL8;
19292 Cond = X86::COND_O;
19295 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
19297 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
19300 DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
19301 DAG.getConstant(X86::COND_O, MVT::i32),
19302 SDValue(Sum.getNode(), 2));
19304 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19308 // Also sets EFLAGS.
19309 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
19310 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
19313 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
19314 DAG.getConstant(Cond, MVT::i32),
19315 SDValue(Sum.getNode(), 1));
19317 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19320 // Sign extension of the low part of vector elements. This may be used either
19321 // when sign extend instructions are not available or if the vector element
19322 // sizes already match the sign-extended size. If the vector elements are in
19323 // their pre-extended size and sign extend instructions are available, that will
19324 // be handled by LowerSIGN_EXTEND.
19325 SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
19326 SelectionDAG &DAG) const {
19328 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
19329 MVT VT = Op.getSimpleValueType();
19331 if (!Subtarget->hasSSE2() || !VT.isVector())
19334 unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
19335 ExtraVT.getScalarType().getSizeInBits();
19337 switch (VT.SimpleTy) {
19338 default: return SDValue();
19341 if (!Subtarget->hasFp256())
19343 if (!Subtarget->hasInt256()) {
19344 // needs to be split
19345 unsigned NumElems = VT.getVectorNumElements();
19347 // Extract the LHS vectors
19348 SDValue LHS = Op.getOperand(0);
19349 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
19350 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
19352 MVT EltVT = VT.getVectorElementType();
19353 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19355 EVT ExtraEltVT = ExtraVT.getVectorElementType();
19356 unsigned ExtraNumElems = ExtraVT.getVectorNumElements();
19357 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT,
19359 SDValue Extra = DAG.getValueType(ExtraVT);
19361 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra);
19362 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra);
19364 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);
19369 SDValue Op0 = Op.getOperand(0);
19371 // This is a sign extension of some low part of vector elements without
19372 // changing the size of the vector elements themselves:
19373 // Shift-Left + Shift-Right-Algebraic.
19374 SDValue Shl = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Op0,
19376 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Shl, BitsDiff,
19382 /// Returns true if the operand type is exactly twice the native width, and
19383 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
19384 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
19385 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
19386 bool X86TargetLowering::needsCmpXchgNb(const Type *MemType) const {
19387 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
19390 return !Subtarget->is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
19391 else if (OpWidth == 128)
19392 return Subtarget->hasCmpxchg16b();
19397 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
19398 return needsCmpXchgNb(SI->getValueOperand()->getType());
19401 // Note: this turns large loads into lock cmpxchg8b/16b.
19402 // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
19403 bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
19404 auto PTy = cast<PointerType>(LI->getPointerOperand()->getType());
19405 return needsCmpXchgNb(PTy->getElementType());
19408 bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
19409 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19410 const Type *MemType = AI->getType();
19412 // If the operand is too big, we must see if cmpxchg8/16b is available
19413 // and default to library calls otherwise.
19414 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19415 return needsCmpXchgNb(MemType);
19417 AtomicRMWInst::BinOp Op = AI->getOperation();
19420 llvm_unreachable("Unknown atomic operation");
19421 case AtomicRMWInst::Xchg:
19422 case AtomicRMWInst::Add:
19423 case AtomicRMWInst::Sub:
19424 // It's better to use xadd, xsub or xchg for these in all cases.
19426 case AtomicRMWInst::Or:
19427 case AtomicRMWInst::And:
19428 case AtomicRMWInst::Xor:
19429 // If the atomicrmw's result isn't actually used, we can just add a "lock"
19430 // prefix to a normal instruction for these operations.
19431 return !AI->use_empty();
19432 case AtomicRMWInst::Nand:
19433 case AtomicRMWInst::Max:
19434 case AtomicRMWInst::Min:
19435 case AtomicRMWInst::UMax:
19436 case AtomicRMWInst::UMin:
19437 // These always require a non-trivial set of data operations on x86. We must
19438 // use a cmpxchg loop.
19443 static bool hasMFENCE(const X86Subtarget& Subtarget) {
19444 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
19445 // no-sse2). There isn't any reason to disable it if the target processor
19447 return Subtarget.hasSSE2() || Subtarget.is64Bit();
19451 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
19452 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19453 const Type *MemType = AI->getType();
19454 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
19455 // there is no benefit in turning such RMWs into loads, and it is actually
19456 // harmful as it introduces a mfence.
19457 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19460 auto Builder = IRBuilder<>(AI);
19461 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
19462 auto SynchScope = AI->getSynchScope();
19463 // We must restrict the ordering to avoid generating loads with Release or
19464 // ReleaseAcquire orderings.
19465 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
19466 auto Ptr = AI->getPointerOperand();
19468 // Before the load we need a fence. Here is an example lifted from
19469 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
19472 // x.store(1, relaxed);
19473 // r1 = y.fetch_add(0, release);
19475 // y.fetch_add(42, acquire);
19476 // r2 = x.load(relaxed);
19477 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
19478 // lowered to just a load without a fence. A mfence flushes the store buffer,
19479 // making the optimization clearly correct.
19480 // FIXME: it is required if isAtLeastRelease(Order) but it is not clear
19481 // otherwise, we might be able to be more agressive on relaxed idempotent
19482 // rmw. In practice, they do not look useful, so we don't try to be
19483 // especially clever.
19484 if (SynchScope == SingleThread) {
19485 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
19486 // the IR level, so we must wrap it in an intrinsic.
19488 } else if (hasMFENCE(*Subtarget)) {
19489 Function *MFence = llvm::Intrinsic::getDeclaration(M,
19490 Intrinsic::x86_sse2_mfence);
19491 Builder.CreateCall(MFence);
19493 // FIXME: it might make sense to use a locked operation here but on a
19494 // different cache-line to prevent cache-line bouncing. In practice it
19495 // is probably a small win, and x86 processors without mfence are rare
19496 // enough that we do not bother.
19500 // Finally we can emit the atomic load.
19501 LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
19502 AI->getType()->getPrimitiveSizeInBits());
19503 Loaded->setAtomic(Order, SynchScope);
19504 AI->replaceAllUsesWith(Loaded);
19505 AI->eraseFromParent();
19509 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
19510 SelectionDAG &DAG) {
19512 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
19513 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
19514 SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
19515 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
19517 // The only fence that needs an instruction is a sequentially-consistent
19518 // cross-thread fence.
19519 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
19520 if (hasMFENCE(*Subtarget))
19521 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
19523 SDValue Chain = Op.getOperand(0);
19524 SDValue Zero = DAG.getConstant(0, MVT::i32);
19526 DAG.getRegister(X86::ESP, MVT::i32), // Base
19527 DAG.getTargetConstant(1, MVT::i8), // Scale
19528 DAG.getRegister(0, MVT::i32), // Index
19529 DAG.getTargetConstant(0, MVT::i32), // Disp
19530 DAG.getRegister(0, MVT::i32), // Segment.
19534 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
19535 return SDValue(Res, 0);
19538 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
19539 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
19542 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
19543 SelectionDAG &DAG) {
19544 MVT T = Op.getSimpleValueType();
19548 switch(T.SimpleTy) {
19549 default: llvm_unreachable("Invalid value type!");
19550 case MVT::i8: Reg = X86::AL; size = 1; break;
19551 case MVT::i16: Reg = X86::AX; size = 2; break;
19552 case MVT::i32: Reg = X86::EAX; size = 4; break;
19554 assert(Subtarget->is64Bit() && "Node not type legal!");
19555 Reg = X86::RAX; size = 8;
19558 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
19559 Op.getOperand(2), SDValue());
19560 SDValue Ops[] = { cpIn.getValue(0),
19563 DAG.getTargetConstant(size, MVT::i8),
19564 cpIn.getValue(1) };
19565 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
19566 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
19567 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
19571 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
19572 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
19573 MVT::i32, cpOut.getValue(2));
19574 SDValue Success = DAG.getNode(X86ISD::SETCC, DL, Op->getValueType(1),
19575 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
19577 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
19578 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
19579 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
19583 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
19584 SelectionDAG &DAG) {
19585 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
19586 MVT DstVT = Op.getSimpleValueType();
19588 if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) {
19589 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19590 if (DstVT != MVT::f64)
19591 // This conversion needs to be expanded.
19594 SDValue InVec = Op->getOperand(0);
19596 unsigned NumElts = SrcVT.getVectorNumElements();
19597 EVT SVT = SrcVT.getVectorElementType();
19599 // Widen the vector in input in the case of MVT::v2i32.
19600 // Example: from MVT::v2i32 to MVT::v4i32.
19601 SmallVector<SDValue, 16> Elts;
19602 for (unsigned i = 0, e = NumElts; i != e; ++i)
19603 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec,
19604 DAG.getIntPtrConstant(i)));
19606 // Explicitly mark the extra elements as Undef.
19607 SDValue Undef = DAG.getUNDEF(SVT);
19608 for (unsigned i = NumElts, e = NumElts * 2; i != e; ++i)
19609 Elts.push_back(Undef);
19611 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
19612 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
19613 SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
19614 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
19615 DAG.getIntPtrConstant(0));
19618 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
19619 Subtarget->hasMMX() && "Unexpected custom BITCAST");
19620 assert((DstVT == MVT::i64 ||
19621 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
19622 "Unexpected custom BITCAST");
19623 // i64 <=> MMX conversions are Legal.
19624 if (SrcVT==MVT::i64 && DstVT.isVector())
19626 if (DstVT==MVT::i64 && SrcVT.isVector())
19628 // MMX <=> MMX conversions are Legal.
19629 if (SrcVT.isVector() && DstVT.isVector())
19631 // All other conversions need to be expanded.
19635 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
19636 SelectionDAG &DAG) {
19637 SDNode *Node = Op.getNode();
19640 Op = Op.getOperand(0);
19641 EVT VT = Op.getValueType();
19642 assert((VT.is128BitVector() || VT.is256BitVector()) &&
19643 "CTPOP lowering only implemented for 128/256-bit wide vector types");
19645 unsigned NumElts = VT.getVectorNumElements();
19646 EVT EltVT = VT.getVectorElementType();
19647 unsigned Len = EltVT.getSizeInBits();
19649 // This is the vectorized version of the "best" algorithm from
19650 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
19651 // with a minor tweak to use a series of adds + shifts instead of vector
19652 // multiplications. Implemented for the v2i64, v4i64, v4i32, v8i32 types:
19654 // v2i64, v4i64, v4i32 => Only profitable w/ popcnt disabled
19655 // v8i32 => Always profitable
19657 // FIXME: There a couple of possible improvements:
19659 // 1) Support for i8 and i16 vectors (needs measurements if popcnt enabled).
19660 // 2) Use strategies from http://wm.ite.pl/articles/sse-popcount.html
19662 assert(EltVT.isInteger() && (Len == 32 || Len == 64) && Len % 8 == 0 &&
19663 "CTPOP not implemented for this vector element type.");
19665 // X86 canonicalize ANDs to vXi64, generate the appropriate bitcasts to avoid
19666 // extra legalization.
19667 bool NeedsBitcast = EltVT == MVT::i32;
19668 MVT BitcastVT = VT.is256BitVector() ? MVT::v4i64 : MVT::v2i64;
19670 SDValue Cst55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), EltVT);
19671 SDValue Cst33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), EltVT);
19672 SDValue Cst0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), EltVT);
19674 // v = v - ((v >> 1) & 0x55555555...)
19675 SmallVector<SDValue, 8> Ones(NumElts, DAG.getConstant(1, EltVT));
19676 SDValue OnesV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ones);
19677 SDValue Srl = DAG.getNode(ISD::SRL, dl, VT, Op, OnesV);
19679 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19681 SmallVector<SDValue, 8> Mask55(NumElts, Cst55);
19682 SDValue M55 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask55);
19684 M55 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M55);
19686 SDValue And = DAG.getNode(ISD::AND, dl, Srl.getValueType(), Srl, M55);
19687 if (VT != And.getValueType())
19688 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19689 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Op, And);
19691 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
19692 SmallVector<SDValue, 8> Mask33(NumElts, Cst33);
19693 SDValue M33 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask33);
19694 SmallVector<SDValue, 8> Twos(NumElts, DAG.getConstant(2, EltVT));
19695 SDValue TwosV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Twos);
19697 Srl = DAG.getNode(ISD::SRL, dl, VT, Sub, TwosV);
19698 if (NeedsBitcast) {
19699 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19700 M33 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M33);
19701 Sub = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Sub);
19704 SDValue AndRHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Srl, M33);
19705 SDValue AndLHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Sub, M33);
19706 if (VT != AndRHS.getValueType()) {
19707 AndRHS = DAG.getNode(ISD::BITCAST, dl, VT, AndRHS);
19708 AndLHS = DAG.getNode(ISD::BITCAST, dl, VT, AndLHS);
19710 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, AndLHS, AndRHS);
19712 // v = (v + (v >> 4)) & 0x0F0F0F0F...
19713 SmallVector<SDValue, 8> Fours(NumElts, DAG.getConstant(4, EltVT));
19714 SDValue FoursV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Fours);
19715 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, FoursV);
19716 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19718 SmallVector<SDValue, 8> Mask0F(NumElts, Cst0F);
19719 SDValue M0F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask0F);
19720 if (NeedsBitcast) {
19721 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19722 M0F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M0F);
19724 And = DAG.getNode(ISD::AND, dl, M0F.getValueType(), Add, M0F);
19725 if (VT != And.getValueType())
19726 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19728 // The algorithm mentioned above uses:
19729 // v = (v * 0x01010101...) >> (Len - 8)
19731 // Change it to use vector adds + vector shifts which yield faster results on
19732 // Haswell than using vector integer multiplication.
19734 // For i32 elements:
19735 // v = v + (v >> 8)
19736 // v = v + (v >> 16)
19738 // For i64 elements:
19739 // v = v + (v >> 8)
19740 // v = v + (v >> 16)
19741 // v = v + (v >> 32)
19744 SmallVector<SDValue, 8> Csts;
19745 for (unsigned i = 8; i <= Len/2; i *= 2) {
19746 Csts.assign(NumElts, DAG.getConstant(i, EltVT));
19747 SDValue CstsV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Csts);
19748 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, CstsV);
19749 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19753 // The result is on the least significant 6-bits on i32 and 7-bits on i64.
19754 SDValue Cst3F = DAG.getConstant(APInt(Len, Len == 32 ? 0x3F : 0x7F), EltVT);
19755 SmallVector<SDValue, 8> Cst3FV(NumElts, Cst3F);
19756 SDValue M3F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Cst3FV);
19757 if (NeedsBitcast) {
19758 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19759 M3F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M3F);
19761 And = DAG.getNode(ISD::AND, dl, M3F.getValueType(), Add, M3F);
19762 if (VT != And.getValueType())
19763 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19768 static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
19769 SDNode *Node = Op.getNode();
19771 EVT T = Node->getValueType(0);
19772 SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
19773 DAG.getConstant(0, T), Node->getOperand(2));
19774 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
19775 cast<AtomicSDNode>(Node)->getMemoryVT(),
19776 Node->getOperand(0),
19777 Node->getOperand(1), negOp,
19778 cast<AtomicSDNode>(Node)->getMemOperand(),
19779 cast<AtomicSDNode>(Node)->getOrdering(),
19780 cast<AtomicSDNode>(Node)->getSynchScope());
19783 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
19784 SDNode *Node = Op.getNode();
19786 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
19788 // Convert seq_cst store -> xchg
19789 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
19790 // FIXME: On 32-bit, store -> fist or movq would be more efficient
19791 // (The only way to get a 16-byte store is cmpxchg16b)
19792 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
19793 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
19794 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
19795 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
19796 cast<AtomicSDNode>(Node)->getMemoryVT(),
19797 Node->getOperand(0),
19798 Node->getOperand(1), Node->getOperand(2),
19799 cast<AtomicSDNode>(Node)->getMemOperand(),
19800 cast<AtomicSDNode>(Node)->getOrdering(),
19801 cast<AtomicSDNode>(Node)->getSynchScope());
19802 return Swap.getValue(1);
19804 // Other atomic stores have a simple pattern.
19808 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
19809 EVT VT = Op.getNode()->getSimpleValueType(0);
19811 // Let legalize expand this if it isn't a legal type yet.
19812 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
19815 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
19818 bool ExtraOp = false;
19819 switch (Op.getOpcode()) {
19820 default: llvm_unreachable("Invalid code");
19821 case ISD::ADDC: Opc = X86ISD::ADD; break;
19822 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break;
19823 case ISD::SUBC: Opc = X86ISD::SUB; break;
19824 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break;
19828 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19830 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19831 Op.getOperand(1), Op.getOperand(2));
19834 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
19835 SelectionDAG &DAG) {
19836 assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit());
19838 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
19839 // which returns the values as { float, float } (in XMM0) or
19840 // { double, double } (which is returned in XMM0, XMM1).
19842 SDValue Arg = Op.getOperand(0);
19843 EVT ArgVT = Arg.getValueType();
19844 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
19846 TargetLowering::ArgListTy Args;
19847 TargetLowering::ArgListEntry Entry;
19851 Entry.isSExt = false;
19852 Entry.isZExt = false;
19853 Args.push_back(Entry);
19855 bool isF64 = ArgVT == MVT::f64;
19856 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
19857 // the small struct {f32, f32} is returned in (eax, edx). For f64,
19858 // the results are returned via SRet in memory.
19859 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret";
19860 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19861 SDValue Callee = DAG.getExternalSymbol(LibcallName, TLI.getPointerTy());
19863 Type *RetTy = isF64
19864 ? (Type*)StructType::get(ArgTy, ArgTy, nullptr)
19865 : (Type*)VectorType::get(ArgTy, 4);
19867 TargetLowering::CallLoweringInfo CLI(DAG);
19868 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
19869 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
19871 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
19874 // Returned in xmm0 and xmm1.
19875 return CallResult.first;
19877 // Returned in bits 0:31 and 32:64 xmm0.
19878 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19879 CallResult.first, DAG.getIntPtrConstant(0));
19880 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19881 CallResult.first, DAG.getIntPtrConstant(1));
19882 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
19883 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
19886 /// LowerOperation - Provide custom lowering hooks for some operations.
19888 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
19889 switch (Op.getOpcode()) {
19890 default: llvm_unreachable("Should not custom lower this!");
19891 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG);
19892 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
19893 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
19894 return LowerCMP_SWAP(Op, Subtarget, DAG);
19895 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
19896 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
19897 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
19898 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
19899 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
19900 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
19901 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
19902 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
19903 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
19904 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
19905 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
19906 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
19907 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
19908 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
19909 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
19910 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
19911 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
19912 case ISD::SHL_PARTS:
19913 case ISD::SRA_PARTS:
19914 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
19915 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
19916 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
19917 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
19918 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
19919 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
19920 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
19921 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
19922 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
19923 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
19924 case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG);
19926 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
19927 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
19928 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
19929 case ISD::SETCC: return LowerSETCC(Op, DAG);
19930 case ISD::SELECT: return LowerSELECT(Op, DAG);
19931 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
19932 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
19933 case ISD::VASTART: return LowerVASTART(Op, DAG);
19934 case ISD::VAARG: return LowerVAARG(Op, DAG);
19935 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
19936 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, Subtarget, DAG);
19937 case ISD::INTRINSIC_VOID:
19938 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
19939 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
19940 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
19941 case ISD::FRAME_TO_ARGS_OFFSET:
19942 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
19943 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
19944 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
19945 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
19946 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
19947 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
19948 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
19949 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
19950 case ISD::CTLZ: return LowerCTLZ(Op, DAG);
19951 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG);
19952 case ISD::CTTZ: return LowerCTTZ(Op, DAG);
19953 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
19954 case ISD::UMUL_LOHI:
19955 case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
19958 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
19964 case ISD::UMULO: return LowerXALUO(Op, DAG);
19965 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
19966 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
19970 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
19971 case ISD::ADD: return LowerADD(Op, DAG);
19972 case ISD::SUB: return LowerSUB(Op, DAG);
19973 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
19977 /// ReplaceNodeResults - Replace a node with an illegal result type
19978 /// with a new node built out of custom code.
19979 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
19980 SmallVectorImpl<SDValue>&Results,
19981 SelectionDAG &DAG) const {
19983 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19984 switch (N->getOpcode()) {
19986 llvm_unreachable("Do not know how to custom type legalize this operation!");
19987 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
19988 case X86ISD::FMINC:
19990 case X86ISD::FMAXC:
19991 case X86ISD::FMAX: {
19992 EVT VT = N->getValueType(0);
19993 if (VT != MVT::v2f32)
19994 llvm_unreachable("Unexpected type (!= v2f32) on FMIN/FMAX.");
19995 SDValue UNDEF = DAG.getUNDEF(VT);
19996 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
19997 N->getOperand(0), UNDEF);
19998 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
19999 N->getOperand(1), UNDEF);
20000 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
20003 case ISD::SIGN_EXTEND_INREG:
20008 // We don't want to expand or promote these.
20015 case ISD::UDIVREM: {
20016 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
20017 Results.push_back(V);
20020 case ISD::FP_TO_SINT:
20021 case ISD::FP_TO_UINT: {
20022 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
20024 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType()))
20027 std::pair<SDValue,SDValue> Vals =
20028 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
20029 SDValue FIST = Vals.first, StackSlot = Vals.second;
20030 if (FIST.getNode()) {
20031 EVT VT = N->getValueType(0);
20032 // Return a load from the stack slot.
20033 if (StackSlot.getNode())
20034 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
20035 MachinePointerInfo(),
20036 false, false, false, 0));
20038 Results.push_back(FIST);
20042 case ISD::UINT_TO_FP: {
20043 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20044 if (N->getOperand(0).getValueType() != MVT::v2i32 ||
20045 N->getValueType(0) != MVT::v2f32)
20047 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
20049 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
20051 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias);
20052 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
20053 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias));
20054 Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or);
20055 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
20056 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
20059 case ISD::FP_ROUND: {
20060 if (!TLI.isTypeLegal(N->getOperand(0).getValueType()))
20062 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
20063 Results.push_back(V);
20066 case ISD::INTRINSIC_W_CHAIN: {
20067 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
20069 default : llvm_unreachable("Do not know how to custom type "
20070 "legalize this intrinsic operation!");
20071 case Intrinsic::x86_rdtsc:
20072 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20074 case Intrinsic::x86_rdtscp:
20075 return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
20077 case Intrinsic::x86_rdpmc:
20078 return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
20081 case ISD::READCYCLECOUNTER: {
20082 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20085 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
20086 EVT T = N->getValueType(0);
20087 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
20088 bool Regs64bit = T == MVT::i128;
20089 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
20090 SDValue cpInL, cpInH;
20091 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20092 DAG.getConstant(0, HalfT));
20093 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20094 DAG.getConstant(1, HalfT));
20095 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
20096 Regs64bit ? X86::RAX : X86::EAX,
20098 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
20099 Regs64bit ? X86::RDX : X86::EDX,
20100 cpInH, cpInL.getValue(1));
20101 SDValue swapInL, swapInH;
20102 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20103 DAG.getConstant(0, HalfT));
20104 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20105 DAG.getConstant(1, HalfT));
20106 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl,
20107 Regs64bit ? X86::RBX : X86::EBX,
20108 swapInL, cpInH.getValue(1));
20109 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl,
20110 Regs64bit ? X86::RCX : X86::ECX,
20111 swapInH, swapInL.getValue(1));
20112 SDValue Ops[] = { swapInH.getValue(0),
20114 swapInH.getValue(1) };
20115 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
20116 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
20117 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG :
20118 X86ISD::LCMPXCHG8_DAG;
20119 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
20120 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
20121 Regs64bit ? X86::RAX : X86::EAX,
20122 HalfT, Result.getValue(1));
20123 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
20124 Regs64bit ? X86::RDX : X86::EDX,
20125 HalfT, cpOutL.getValue(2));
20126 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
20128 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
20129 MVT::i32, cpOutH.getValue(2));
20131 DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
20132 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
20133 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
20135 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
20136 Results.push_back(Success);
20137 Results.push_back(EFLAGS.getValue(1));
20140 case ISD::ATOMIC_SWAP:
20141 case ISD::ATOMIC_LOAD_ADD:
20142 case ISD::ATOMIC_LOAD_SUB:
20143 case ISD::ATOMIC_LOAD_AND:
20144 case ISD::ATOMIC_LOAD_OR:
20145 case ISD::ATOMIC_LOAD_XOR:
20146 case ISD::ATOMIC_LOAD_NAND:
20147 case ISD::ATOMIC_LOAD_MIN:
20148 case ISD::ATOMIC_LOAD_MAX:
20149 case ISD::ATOMIC_LOAD_UMIN:
20150 case ISD::ATOMIC_LOAD_UMAX:
20151 case ISD::ATOMIC_LOAD: {
20152 // Delegate to generic TypeLegalization. Situations we can really handle
20153 // should have already been dealt with by AtomicExpandPass.cpp.
20156 case ISD::BITCAST: {
20157 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20158 EVT DstVT = N->getValueType(0);
20159 EVT SrcVT = N->getOperand(0)->getValueType(0);
20161 if (SrcVT != MVT::f64 ||
20162 (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
20165 unsigned NumElts = DstVT.getVectorNumElements();
20166 EVT SVT = DstVT.getVectorElementType();
20167 EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
20168 SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
20169 MVT::v2f64, N->getOperand(0));
20170 SDValue ToVecInt = DAG.getNode(ISD::BITCAST, dl, WiderVT, Expanded);
20172 if (ExperimentalVectorWideningLegalization) {
20173 // If we are legalizing vectors by widening, we already have the desired
20174 // legal vector type, just return it.
20175 Results.push_back(ToVecInt);
20179 SmallVector<SDValue, 8> Elts;
20180 for (unsigned i = 0, e = NumElts; i != e; ++i)
20181 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
20182 ToVecInt, DAG.getIntPtrConstant(i)));
20184 Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
20189 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
20191 default: return nullptr;
20192 case X86ISD::BSF: return "X86ISD::BSF";
20193 case X86ISD::BSR: return "X86ISD::BSR";
20194 case X86ISD::SHLD: return "X86ISD::SHLD";
20195 case X86ISD::SHRD: return "X86ISD::SHRD";
20196 case X86ISD::FAND: return "X86ISD::FAND";
20197 case X86ISD::FANDN: return "X86ISD::FANDN";
20198 case X86ISD::FOR: return "X86ISD::FOR";
20199 case X86ISD::FXOR: return "X86ISD::FXOR";
20200 case X86ISD::FSRL: return "X86ISD::FSRL";
20201 case X86ISD::FILD: return "X86ISD::FILD";
20202 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
20203 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
20204 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
20205 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
20206 case X86ISD::FLD: return "X86ISD::FLD";
20207 case X86ISD::FST: return "X86ISD::FST";
20208 case X86ISD::CALL: return "X86ISD::CALL";
20209 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
20210 case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
20211 case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
20212 case X86ISD::BT: return "X86ISD::BT";
20213 case X86ISD::CMP: return "X86ISD::CMP";
20214 case X86ISD::COMI: return "X86ISD::COMI";
20215 case X86ISD::UCOMI: return "X86ISD::UCOMI";
20216 case X86ISD::CMPM: return "X86ISD::CMPM";
20217 case X86ISD::CMPMU: return "X86ISD::CMPMU";
20218 case X86ISD::SETCC: return "X86ISD::SETCC";
20219 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
20220 case X86ISD::FSETCC: return "X86ISD::FSETCC";
20221 case X86ISD::CMOV: return "X86ISD::CMOV";
20222 case X86ISD::BRCOND: return "X86ISD::BRCOND";
20223 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
20224 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
20225 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
20226 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
20227 case X86ISD::Wrapper: return "X86ISD::Wrapper";
20228 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
20229 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
20230 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
20231 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
20232 case X86ISD::PINSRB: return "X86ISD::PINSRB";
20233 case X86ISD::PINSRW: return "X86ISD::PINSRW";
20234 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
20235 case X86ISD::ANDNP: return "X86ISD::ANDNP";
20236 case X86ISD::PSIGN: return "X86ISD::PSIGN";
20237 case X86ISD::BLENDI: return "X86ISD::BLENDI";
20238 case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
20239 case X86ISD::SUBUS: return "X86ISD::SUBUS";
20240 case X86ISD::HADD: return "X86ISD::HADD";
20241 case X86ISD::HSUB: return "X86ISD::HSUB";
20242 case X86ISD::FHADD: return "X86ISD::FHADD";
20243 case X86ISD::FHSUB: return "X86ISD::FHSUB";
20244 case X86ISD::UMAX: return "X86ISD::UMAX";
20245 case X86ISD::UMIN: return "X86ISD::UMIN";
20246 case X86ISD::SMAX: return "X86ISD::SMAX";
20247 case X86ISD::SMIN: return "X86ISD::SMIN";
20248 case X86ISD::FMAX: return "X86ISD::FMAX";
20249 case X86ISD::FMIN: return "X86ISD::FMIN";
20250 case X86ISD::FMAXC: return "X86ISD::FMAXC";
20251 case X86ISD::FMINC: return "X86ISD::FMINC";
20252 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
20253 case X86ISD::FRCP: return "X86ISD::FRCP";
20254 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
20255 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
20256 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
20257 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
20258 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
20259 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
20260 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
20261 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
20262 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
20263 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
20264 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
20265 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
20266 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
20267 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
20268 case X86ISD::VZEXT: return "X86ISD::VZEXT";
20269 case X86ISD::VSEXT: return "X86ISD::VSEXT";
20270 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
20271 case X86ISD::VTRUNCM: return "X86ISD::VTRUNCM";
20272 case X86ISD::VINSERT: return "X86ISD::VINSERT";
20273 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
20274 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
20275 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
20276 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
20277 case X86ISD::VSHL: return "X86ISD::VSHL";
20278 case X86ISD::VSRL: return "X86ISD::VSRL";
20279 case X86ISD::VSRA: return "X86ISD::VSRA";
20280 case X86ISD::VSHLI: return "X86ISD::VSHLI";
20281 case X86ISD::VSRLI: return "X86ISD::VSRLI";
20282 case X86ISD::VSRAI: return "X86ISD::VSRAI";
20283 case X86ISD::CMPP: return "X86ISD::CMPP";
20284 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
20285 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
20286 case X86ISD::PCMPEQM: return "X86ISD::PCMPEQM";
20287 case X86ISD::PCMPGTM: return "X86ISD::PCMPGTM";
20288 case X86ISD::ADD: return "X86ISD::ADD";
20289 case X86ISD::SUB: return "X86ISD::SUB";
20290 case X86ISD::ADC: return "X86ISD::ADC";
20291 case X86ISD::SBB: return "X86ISD::SBB";
20292 case X86ISD::SMUL: return "X86ISD::SMUL";
20293 case X86ISD::UMUL: return "X86ISD::UMUL";
20294 case X86ISD::SMUL8: return "X86ISD::SMUL8";
20295 case X86ISD::UMUL8: return "X86ISD::UMUL8";
20296 case X86ISD::SDIVREM8_SEXT_HREG: return "X86ISD::SDIVREM8_SEXT_HREG";
20297 case X86ISD::UDIVREM8_ZEXT_HREG: return "X86ISD::UDIVREM8_ZEXT_HREG";
20298 case X86ISD::INC: return "X86ISD::INC";
20299 case X86ISD::DEC: return "X86ISD::DEC";
20300 case X86ISD::OR: return "X86ISD::OR";
20301 case X86ISD::XOR: return "X86ISD::XOR";
20302 case X86ISD::AND: return "X86ISD::AND";
20303 case X86ISD::BEXTR: return "X86ISD::BEXTR";
20304 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
20305 case X86ISD::PTEST: return "X86ISD::PTEST";
20306 case X86ISD::TESTP: return "X86ISD::TESTP";
20307 case X86ISD::TESTM: return "X86ISD::TESTM";
20308 case X86ISD::TESTNM: return "X86ISD::TESTNM";
20309 case X86ISD::KORTEST: return "X86ISD::KORTEST";
20310 case X86ISD::PACKSS: return "X86ISD::PACKSS";
20311 case X86ISD::PACKUS: return "X86ISD::PACKUS";
20312 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
20313 case X86ISD::VALIGN: return "X86ISD::VALIGN";
20314 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
20315 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
20316 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
20317 case X86ISD::SHUFP: return "X86ISD::SHUFP";
20318 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
20319 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
20320 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
20321 case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
20322 case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
20323 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
20324 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
20325 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
20326 case X86ISD::MOVSD: return "X86ISD::MOVSD";
20327 case X86ISD::MOVSS: return "X86ISD::MOVSS";
20328 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
20329 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
20330 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
20331 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
20332 case X86ISD::VEXTRACT: return "X86ISD::VEXTRACT";
20333 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
20334 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
20335 case X86ISD::VPERMV: return "X86ISD::VPERMV";
20336 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
20337 case X86ISD::VPERMIV3: return "X86ISD::VPERMIV3";
20338 case X86ISD::VPERMI: return "X86ISD::VPERMI";
20339 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
20340 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
20341 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
20342 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
20343 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
20344 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
20345 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
20346 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL";
20347 case X86ISD::SAHF: return "X86ISD::SAHF";
20348 case X86ISD::RDRAND: return "X86ISD::RDRAND";
20349 case X86ISD::RDSEED: return "X86ISD::RDSEED";
20350 case X86ISD::FMADD: return "X86ISD::FMADD";
20351 case X86ISD::FMSUB: return "X86ISD::FMSUB";
20352 case X86ISD::FNMADD: return "X86ISD::FNMADD";
20353 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
20354 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
20355 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
20356 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI";
20357 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI";
20358 case X86ISD::XTEST: return "X86ISD::XTEST";
20359 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
20360 case X86ISD::EXPAND: return "X86ISD::EXPAND";
20361 case X86ISD::SELECT: return "X86ISD::SELECT";
20362 case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
20363 case X86ISD::RCP28: return "X86ISD::RCP28";
20364 case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
20368 // isLegalAddressingMode - Return true if the addressing mode represented
20369 // by AM is legal for this target, for a load/store of the specified type.
20370 bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
20372 // X86 supports extremely general addressing modes.
20373 CodeModel::Model M = getTargetMachine().getCodeModel();
20374 Reloc::Model R = getTargetMachine().getRelocationModel();
20376 // X86 allows a sign-extended 32-bit immediate field as a displacement.
20377 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
20382 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
20384 // If a reference to this global requires an extra load, we can't fold it.
20385 if (isGlobalStubReference(GVFlags))
20388 // If BaseGV requires a register for the PIC base, we cannot also have a
20389 // BaseReg specified.
20390 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
20393 // If lower 4G is not available, then we must use rip-relative addressing.
20394 if ((M != CodeModel::Small || R != Reloc::Static) &&
20395 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
20399 switch (AM.Scale) {
20405 // These scales always work.
20410 // These scales are formed with basereg+scalereg. Only accept if there is
20415 default: // Other stuff never works.
20422 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
20423 unsigned Bits = Ty->getScalarSizeInBits();
20425 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
20426 // particularly cheaper than those without.
20430 // On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
20431 // variable shifts just as cheap as scalar ones.
20432 if (Subtarget->hasInt256() && (Bits == 32 || Bits == 64))
20435 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
20436 // fully general vector.
20440 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
20441 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20443 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
20444 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
20445 return NumBits1 > NumBits2;
20448 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
20449 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20452 if (!isTypeLegal(EVT::getEVT(Ty1)))
20455 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
20457 // Assuming the caller doesn't have a zeroext or signext return parameter,
20458 // truncation all the way down to i1 is valid.
20462 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
20463 return isInt<32>(Imm);
20466 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
20467 // Can also use sub to handle negated immediates.
20468 return isInt<32>(Imm);
20471 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
20472 if (!VT1.isInteger() || !VT2.isInteger())
20474 unsigned NumBits1 = VT1.getSizeInBits();
20475 unsigned NumBits2 = VT2.getSizeInBits();
20476 return NumBits1 > NumBits2;
20479 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
20480 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20481 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit();
20484 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
20485 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20486 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
20489 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
20490 EVT VT1 = Val.getValueType();
20491 if (isZExtFree(VT1, VT2))
20494 if (Val.getOpcode() != ISD::LOAD)
20497 if (!VT1.isSimple() || !VT1.isInteger() ||
20498 !VT2.isSimple() || !VT2.isInteger())
20501 switch (VT1.getSimpleVT().SimpleTy) {
20506 // X86 has 8, 16, and 32-bit zero-extending loads.
20513 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue) const { return true; }
20516 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
20517 if (!(Subtarget->hasFMA() || Subtarget->hasFMA4()))
20520 VT = VT.getScalarType();
20522 if (!VT.isSimple())
20525 switch (VT.getSimpleVT().SimpleTy) {
20536 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
20537 // i16 instructions are longer (0x66 prefix) and potentially slower.
20538 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
20541 /// isShuffleMaskLegal - Targets can use this to indicate that they only
20542 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
20543 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
20544 /// are assumed to be legal.
20546 X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
20548 if (!VT.isSimple())
20551 MVT SVT = VT.getSimpleVT();
20553 // Very little shuffling can be done for 64-bit vectors right now.
20554 if (VT.getSizeInBits() == 64)
20557 // This is an experimental legality test that is tailored to match the
20558 // legality test of the experimental lowering more closely. They are gated
20559 // separately to ease testing of performance differences.
20560 if (ExperimentalVectorShuffleLegality)
20561 // We only care that the types being shuffled are legal. The lowering can
20562 // handle any possible shuffle mask that results.
20563 return isTypeLegal(SVT);
20565 // If this is a single-input shuffle with no 128 bit lane crossings we can
20566 // lower it into pshufb.
20567 if ((SVT.is128BitVector() && Subtarget->hasSSSE3()) ||
20568 (SVT.is256BitVector() && Subtarget->hasInt256())) {
20569 bool isLegal = true;
20570 for (unsigned I = 0, E = M.size(); I != E; ++I) {
20571 if (M[I] >= (int)SVT.getVectorNumElements() ||
20572 ShuffleCrosses128bitLane(SVT, I, M[I])) {
20581 // FIXME: blends, shifts.
20582 return (SVT.getVectorNumElements() == 2 ||
20583 ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
20584 isMOVLMask(M, SVT) ||
20585 isCommutedMOVLMask(M, SVT) ||
20586 isMOVHLPSMask(M, SVT) ||
20587 isSHUFPMask(M, SVT) ||
20588 isSHUFPMask(M, SVT, /* Commuted */ true) ||
20589 isPSHUFDMask(M, SVT) ||
20590 isPSHUFDMask(M, SVT, /* SecondOperand */ true) ||
20591 isPSHUFHWMask(M, SVT, Subtarget->hasInt256()) ||
20592 isPSHUFLWMask(M, SVT, Subtarget->hasInt256()) ||
20593 isPALIGNRMask(M, SVT, Subtarget) ||
20594 isUNPCKLMask(M, SVT, Subtarget->hasInt256()) ||
20595 isUNPCKHMask(M, SVT, Subtarget->hasInt256()) ||
20596 isUNPCKL_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20597 isUNPCKH_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20598 isBlendMask(M, SVT, Subtarget->hasSSE41(), Subtarget->hasInt256()) ||
20599 (Subtarget->hasSSE41() && isINSERTPSMask(M, SVT)));
20603 X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
20605 if (!VT.isSimple())
20608 MVT SVT = VT.getSimpleVT();
20610 // This is an experimental legality test that is tailored to match the
20611 // legality test of the experimental lowering more closely. They are gated
20612 // separately to ease testing of performance differences.
20613 if (ExperimentalVectorShuffleLegality)
20614 // The new vector shuffle lowering is very good at managing zero-inputs.
20615 return isShuffleMaskLegal(Mask, VT);
20617 unsigned NumElts = SVT.getVectorNumElements();
20618 // FIXME: This collection of masks seems suspect.
20621 if (NumElts == 4 && SVT.is128BitVector()) {
20622 return (isMOVLMask(Mask, SVT) ||
20623 isCommutedMOVLMask(Mask, SVT, true) ||
20624 isSHUFPMask(Mask, SVT) ||
20625 isSHUFPMask(Mask, SVT, /* Commuted */ true) ||
20626 isBlendMask(Mask, SVT, Subtarget->hasSSE41(),
20627 Subtarget->hasInt256()));
20632 //===----------------------------------------------------------------------===//
20633 // X86 Scheduler Hooks
20634 //===----------------------------------------------------------------------===//
20636 /// Utility function to emit xbegin specifying the start of an RTM region.
20637 static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
20638 const TargetInstrInfo *TII) {
20639 DebugLoc DL = MI->getDebugLoc();
20641 const BasicBlock *BB = MBB->getBasicBlock();
20642 MachineFunction::iterator I = MBB;
20645 // For the v = xbegin(), we generate
20656 MachineBasicBlock *thisMBB = MBB;
20657 MachineFunction *MF = MBB->getParent();
20658 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
20659 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
20660 MF->insert(I, mainMBB);
20661 MF->insert(I, sinkMBB);
20663 // Transfer the remainder of BB and its successor edges to sinkMBB.
20664 sinkMBB->splice(sinkMBB->begin(), MBB,
20665 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
20666 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
20670 // # fallthrough to mainMBB
20671 // # abortion to sinkMBB
20672 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB);
20673 thisMBB->addSuccessor(mainMBB);
20674 thisMBB->addSuccessor(sinkMBB);
20678 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1);
20679 mainMBB->addSuccessor(sinkMBB);
20682 // EAX is live into the sinkMBB
20683 sinkMBB->addLiveIn(X86::EAX);
20684 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
20685 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20688 MI->eraseFromParent();
20692 // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
20693 // or XMM0_V32I8 in AVX all of this code can be replaced with that
20694 // in the .td file.
20695 static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
20696 const TargetInstrInfo *TII) {
20698 switch (MI->getOpcode()) {
20699 default: llvm_unreachable("illegal opcode!");
20700 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
20701 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
20702 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break;
20703 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break;
20704 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break;
20705 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break;
20706 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break;
20707 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
20710 DebugLoc dl = MI->getDebugLoc();
20711 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20713 unsigned NumArgs = MI->getNumOperands();
20714 for (unsigned i = 1; i < NumArgs; ++i) {
20715 MachineOperand &Op = MI->getOperand(i);
20716 if (!(Op.isReg() && Op.isImplicit()))
20717 MIB.addOperand(Op);
20719 if (MI->hasOneMemOperand())
20720 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20722 BuildMI(*BB, MI, dl,
20723 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20724 .addReg(X86::XMM0);
20726 MI->eraseFromParent();
20730 // FIXME: Custom handling because TableGen doesn't support multiple implicit
20731 // defs in an instruction pattern
20732 static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
20733 const TargetInstrInfo *TII) {
20735 switch (MI->getOpcode()) {
20736 default: llvm_unreachable("illegal opcode!");
20737 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
20738 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
20739 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break;
20740 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break;
20741 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break;
20742 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break;
20743 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break;
20744 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
20747 DebugLoc dl = MI->getDebugLoc();
20748 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20750 unsigned NumArgs = MI->getNumOperands(); // remove the results
20751 for (unsigned i = 1; i < NumArgs; ++i) {
20752 MachineOperand &Op = MI->getOperand(i);
20753 if (!(Op.isReg() && Op.isImplicit()))
20754 MIB.addOperand(Op);
20756 if (MI->hasOneMemOperand())
20757 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20759 BuildMI(*BB, MI, dl,
20760 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20763 MI->eraseFromParent();
20767 static MachineBasicBlock *EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
20768 const X86Subtarget *Subtarget) {
20769 DebugLoc dl = MI->getDebugLoc();
20770 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20771 // Address into RAX/EAX, other two args into ECX, EDX.
20772 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
20773 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
20774 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
20775 for (int i = 0; i < X86::AddrNumOperands; ++i)
20776 MIB.addOperand(MI->getOperand(i));
20778 unsigned ValOps = X86::AddrNumOperands;
20779 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
20780 .addReg(MI->getOperand(ValOps).getReg());
20781 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
20782 .addReg(MI->getOperand(ValOps+1).getReg());
20784 // The instruction doesn't actually take any operands though.
20785 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
20787 MI->eraseFromParent(); // The pseudo is gone now.
20791 MachineBasicBlock *
20792 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
20793 MachineBasicBlock *MBB) const {
20794 // Emit va_arg instruction on X86-64.
20796 // Operands to this pseudo-instruction:
20797 // 0 ) Output : destination address (reg)
20798 // 1-5) Input : va_list address (addr, i64mem)
20799 // 6 ) ArgSize : Size (in bytes) of vararg type
20800 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
20801 // 8 ) Align : Alignment of type
20802 // 9 ) EFLAGS (implicit-def)
20804 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
20805 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands");
20807 unsigned DestReg = MI->getOperand(0).getReg();
20808 MachineOperand &Base = MI->getOperand(1);
20809 MachineOperand &Scale = MI->getOperand(2);
20810 MachineOperand &Index = MI->getOperand(3);
20811 MachineOperand &Disp = MI->getOperand(4);
20812 MachineOperand &Segment = MI->getOperand(5);
20813 unsigned ArgSize = MI->getOperand(6).getImm();
20814 unsigned ArgMode = MI->getOperand(7).getImm();
20815 unsigned Align = MI->getOperand(8).getImm();
20817 // Memory Reference
20818 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
20819 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
20820 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
20822 // Machine Information
20823 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20824 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
20825 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
20826 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
20827 DebugLoc DL = MI->getDebugLoc();
20829 // struct va_list {
20832 // i64 overflow_area (address)
20833 // i64 reg_save_area (address)
20835 // sizeof(va_list) = 24
20836 // alignment(va_list) = 8
20838 unsigned TotalNumIntRegs = 6;
20839 unsigned TotalNumXMMRegs = 8;
20840 bool UseGPOffset = (ArgMode == 1);
20841 bool UseFPOffset = (ArgMode == 2);
20842 unsigned MaxOffset = TotalNumIntRegs * 8 +
20843 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
20845 /* Align ArgSize to a multiple of 8 */
20846 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
20847 bool NeedsAlign = (Align > 8);
20849 MachineBasicBlock *thisMBB = MBB;
20850 MachineBasicBlock *overflowMBB;
20851 MachineBasicBlock *offsetMBB;
20852 MachineBasicBlock *endMBB;
20854 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
20855 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
20856 unsigned OffsetReg = 0;
20858 if (!UseGPOffset && !UseFPOffset) {
20859 // If we only pull from the overflow region, we don't create a branch.
20860 // We don't need to alter control flow.
20861 OffsetDestReg = 0; // unused
20862 OverflowDestReg = DestReg;
20864 offsetMBB = nullptr;
20865 overflowMBB = thisMBB;
20868 // First emit code to check if gp_offset (or fp_offset) is below the bound.
20869 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
20870 // If not, pull from overflow_area. (branch to overflowMBB)
20875 // offsetMBB overflowMBB
20880 // Registers for the PHI in endMBB
20881 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
20882 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
20884 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
20885 MachineFunction *MF = MBB->getParent();
20886 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20887 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20888 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20890 MachineFunction::iterator MBBIter = MBB;
20893 // Insert the new basic blocks
20894 MF->insert(MBBIter, offsetMBB);
20895 MF->insert(MBBIter, overflowMBB);
20896 MF->insert(MBBIter, endMBB);
20898 // Transfer the remainder of MBB and its successor edges to endMBB.
20899 endMBB->splice(endMBB->begin(), thisMBB,
20900 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
20901 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
20903 // Make offsetMBB and overflowMBB successors of thisMBB
20904 thisMBB->addSuccessor(offsetMBB);
20905 thisMBB->addSuccessor(overflowMBB);
20907 // endMBB is a successor of both offsetMBB and overflowMBB
20908 offsetMBB->addSuccessor(endMBB);
20909 overflowMBB->addSuccessor(endMBB);
20911 // Load the offset value into a register
20912 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
20913 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
20917 .addDisp(Disp, UseFPOffset ? 4 : 0)
20918 .addOperand(Segment)
20919 .setMemRefs(MMOBegin, MMOEnd);
20921 // Check if there is enough room left to pull this argument.
20922 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
20924 .addImm(MaxOffset + 8 - ArgSizeA8);
20926 // Branch to "overflowMBB" if offset >= max
20927 // Fall through to "offsetMBB" otherwise
20928 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
20929 .addMBB(overflowMBB);
20932 // In offsetMBB, emit code to use the reg_save_area.
20934 assert(OffsetReg != 0);
20936 // Read the reg_save_area address.
20937 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
20938 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
20943 .addOperand(Segment)
20944 .setMemRefs(MMOBegin, MMOEnd);
20946 // Zero-extend the offset
20947 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
20948 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
20951 .addImm(X86::sub_32bit);
20953 // Add the offset to the reg_save_area to get the final address.
20954 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
20955 .addReg(OffsetReg64)
20956 .addReg(RegSaveReg);
20958 // Compute the offset for the next argument
20959 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
20960 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
20962 .addImm(UseFPOffset ? 16 : 8);
20964 // Store it back into the va_list.
20965 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
20969 .addDisp(Disp, UseFPOffset ? 4 : 0)
20970 .addOperand(Segment)
20971 .addReg(NextOffsetReg)
20972 .setMemRefs(MMOBegin, MMOEnd);
20975 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
20980 // Emit code to use overflow area
20983 // Load the overflow_area address into a register.
20984 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
20985 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
20990 .addOperand(Segment)
20991 .setMemRefs(MMOBegin, MMOEnd);
20993 // If we need to align it, do so. Otherwise, just copy the address
20994 // to OverflowDestReg.
20996 // Align the overflow address
20997 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2");
20998 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
21000 // aligned_addr = (addr + (align-1)) & ~(align-1)
21001 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
21002 .addReg(OverflowAddrReg)
21005 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
21007 .addImm(~(uint64_t)(Align-1));
21009 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
21010 .addReg(OverflowAddrReg);
21013 // Compute the next overflow address after this argument.
21014 // (the overflow address should be kept 8-byte aligned)
21015 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
21016 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
21017 .addReg(OverflowDestReg)
21018 .addImm(ArgSizeA8);
21020 // Store the new overflow address.
21021 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
21026 .addOperand(Segment)
21027 .addReg(NextAddrReg)
21028 .setMemRefs(MMOBegin, MMOEnd);
21030 // If we branched, emit the PHI to the front of endMBB.
21032 BuildMI(*endMBB, endMBB->begin(), DL,
21033 TII->get(X86::PHI), DestReg)
21034 .addReg(OffsetDestReg).addMBB(offsetMBB)
21035 .addReg(OverflowDestReg).addMBB(overflowMBB);
21038 // Erase the pseudo instruction
21039 MI->eraseFromParent();
21044 MachineBasicBlock *
21045 X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
21047 MachineBasicBlock *MBB) const {
21048 // Emit code to save XMM registers to the stack. The ABI says that the
21049 // number of registers to save is given in %al, so it's theoretically
21050 // possible to do an indirect jump trick to avoid saving all of them,
21051 // however this code takes a simpler approach and just executes all
21052 // of the stores if %al is non-zero. It's less code, and it's probably
21053 // easier on the hardware branch predictor, and stores aren't all that
21054 // expensive anyway.
21056 // Create the new basic blocks. One block contains all the XMM stores,
21057 // and one block is the final destination regardless of whether any
21058 // stores were performed.
21059 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
21060 MachineFunction *F = MBB->getParent();
21061 MachineFunction::iterator MBBIter = MBB;
21063 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
21064 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
21065 F->insert(MBBIter, XMMSaveMBB);
21066 F->insert(MBBIter, EndMBB);
21068 // Transfer the remainder of MBB and its successor edges to EndMBB.
21069 EndMBB->splice(EndMBB->begin(), MBB,
21070 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21071 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
21073 // The original block will now fall through to the XMM save block.
21074 MBB->addSuccessor(XMMSaveMBB);
21075 // The XMMSaveMBB will fall through to the end block.
21076 XMMSaveMBB->addSuccessor(EndMBB);
21078 // Now add the instructions.
21079 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21080 DebugLoc DL = MI->getDebugLoc();
21082 unsigned CountReg = MI->getOperand(0).getReg();
21083 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm();
21084 int64_t VarArgsFPOffset = MI->getOperand(2).getImm();
21086 if (!Subtarget->isTargetWin64()) {
21087 // If %al is 0, branch around the XMM save block.
21088 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
21089 BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
21090 MBB->addSuccessor(EndMBB);
21093 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
21094 // that was just emitted, but clearly shouldn't be "saved".
21095 assert((MI->getNumOperands() <= 3 ||
21096 !MI->getOperand(MI->getNumOperands() - 1).isReg() ||
21097 MI->getOperand(MI->getNumOperands() - 1).getReg() == X86::EFLAGS)
21098 && "Expected last argument to be EFLAGS");
21099 unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
21100 // In the XMM save block, save all the XMM argument registers.
21101 for (int i = 3, e = MI->getNumOperands() - 1; i != e; ++i) {
21102 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
21103 MachineMemOperand *MMO =
21104 F->getMachineMemOperand(
21105 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset),
21106 MachineMemOperand::MOStore,
21107 /*Size=*/16, /*Align=*/16);
21108 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
21109 .addFrameIndex(RegSaveFrameIndex)
21110 .addImm(/*Scale=*/1)
21111 .addReg(/*IndexReg=*/0)
21112 .addImm(/*Disp=*/Offset)
21113 .addReg(/*Segment=*/0)
21114 .addReg(MI->getOperand(i).getReg())
21115 .addMemOperand(MMO);
21118 MI->eraseFromParent(); // The pseudo instruction is gone now.
21123 // The EFLAGS operand of SelectItr might be missing a kill marker
21124 // because there were multiple uses of EFLAGS, and ISel didn't know
21125 // which to mark. Figure out whether SelectItr should have had a
21126 // kill marker, and set it if it should. Returns the correct kill
21128 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
21129 MachineBasicBlock* BB,
21130 const TargetRegisterInfo* TRI) {
21131 // Scan forward through BB for a use/def of EFLAGS.
21132 MachineBasicBlock::iterator miI(std::next(SelectItr));
21133 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
21134 const MachineInstr& mi = *miI;
21135 if (mi.readsRegister(X86::EFLAGS))
21137 if (mi.definesRegister(X86::EFLAGS))
21138 break; // Should have kill-flag - update below.
21141 // If we hit the end of the block, check whether EFLAGS is live into a
21143 if (miI == BB->end()) {
21144 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
21145 sEnd = BB->succ_end();
21146 sItr != sEnd; ++sItr) {
21147 MachineBasicBlock* succ = *sItr;
21148 if (succ->isLiveIn(X86::EFLAGS))
21153 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
21154 // out. SelectMI should have a kill flag on EFLAGS.
21155 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
21159 MachineBasicBlock *
21160 X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
21161 MachineBasicBlock *BB) const {
21162 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21163 DebugLoc DL = MI->getDebugLoc();
21165 // To "insert" a SELECT_CC instruction, we actually have to insert the
21166 // diamond control-flow pattern. The incoming instruction knows the
21167 // destination vreg to set, the condition code register to branch on, the
21168 // true/false values to select between, and a branch opcode to use.
21169 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21170 MachineFunction::iterator It = BB;
21176 // cmpTY ccX, r1, r2
21178 // fallthrough --> copy0MBB
21179 MachineBasicBlock *thisMBB = BB;
21180 MachineFunction *F = BB->getParent();
21181 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
21182 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
21183 F->insert(It, copy0MBB);
21184 F->insert(It, sinkMBB);
21186 // If the EFLAGS register isn't dead in the terminator, then claim that it's
21187 // live into the sink and copy blocks.
21188 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
21189 if (!MI->killsRegister(X86::EFLAGS) &&
21190 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) {
21191 copy0MBB->addLiveIn(X86::EFLAGS);
21192 sinkMBB->addLiveIn(X86::EFLAGS);
21195 // Transfer the remainder of BB and its successor edges to sinkMBB.
21196 sinkMBB->splice(sinkMBB->begin(), BB,
21197 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21198 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
21200 // Add the true and fallthrough blocks as its successors.
21201 BB->addSuccessor(copy0MBB);
21202 BB->addSuccessor(sinkMBB);
21204 // Create the conditional branch instruction.
21206 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
21207 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
21210 // %FalseValue = ...
21211 // # fallthrough to sinkMBB
21212 copy0MBB->addSuccessor(sinkMBB);
21215 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
21217 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21218 TII->get(X86::PHI), MI->getOperand(0).getReg())
21219 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
21220 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
21222 MI->eraseFromParent(); // The pseudo instruction is gone now.
21226 MachineBasicBlock *
21227 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
21228 MachineBasicBlock *BB) const {
21229 MachineFunction *MF = BB->getParent();
21230 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21231 DebugLoc DL = MI->getDebugLoc();
21232 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21234 assert(MF->shouldSplitStack());
21236 const bool Is64Bit = Subtarget->is64Bit();
21237 const bool IsLP64 = Subtarget->isTarget64BitLP64();
21239 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
21240 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
21243 // ... [Till the alloca]
21244 // If stacklet is not large enough, jump to mallocMBB
21247 // Allocate by subtracting from RSP
21248 // Jump to continueMBB
21251 // Allocate by call to runtime
21255 // [rest of original BB]
21258 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21259 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21260 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21262 MachineRegisterInfo &MRI = MF->getRegInfo();
21263 const TargetRegisterClass *AddrRegClass =
21264 getRegClassFor(getPointerTy());
21266 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21267 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21268 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
21269 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
21270 sizeVReg = MI->getOperand(1).getReg(),
21271 physSPReg = IsLP64 || Subtarget->isTargetNaCl64() ? X86::RSP : X86::ESP;
21273 MachineFunction::iterator MBBIter = BB;
21276 MF->insert(MBBIter, bumpMBB);
21277 MF->insert(MBBIter, mallocMBB);
21278 MF->insert(MBBIter, continueMBB);
21280 continueMBB->splice(continueMBB->begin(), BB,
21281 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21282 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
21284 // Add code to the main basic block to check if the stack limit has been hit,
21285 // and if so, jump to mallocMBB otherwise to bumpMBB.
21286 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
21287 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
21288 .addReg(tmpSPVReg).addReg(sizeVReg);
21289 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
21290 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
21291 .addReg(SPLimitVReg);
21292 BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
21294 // bumpMBB simply decreases the stack pointer, since we know the current
21295 // stacklet has enough space.
21296 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
21297 .addReg(SPLimitVReg);
21298 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
21299 .addReg(SPLimitVReg);
21300 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21302 // Calls into a routine in libgcc to allocate more space from the heap.
21303 const uint32_t *RegMask =
21304 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21306 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
21308 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21309 .addExternalSymbol("__morestack_allocate_stack_space")
21310 .addRegMask(RegMask)
21311 .addReg(X86::RDI, RegState::Implicit)
21312 .addReg(X86::RAX, RegState::ImplicitDefine);
21313 } else if (Is64Bit) {
21314 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
21316 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21317 .addExternalSymbol("__morestack_allocate_stack_space")
21318 .addRegMask(RegMask)
21319 .addReg(X86::EDI, RegState::Implicit)
21320 .addReg(X86::EAX, RegState::ImplicitDefine);
21322 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
21324 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
21325 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
21326 .addExternalSymbol("__morestack_allocate_stack_space")
21327 .addRegMask(RegMask)
21328 .addReg(X86::EAX, RegState::ImplicitDefine);
21332 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
21335 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
21336 .addReg(IsLP64 ? X86::RAX : X86::EAX);
21337 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21339 // Set up the CFG correctly.
21340 BB->addSuccessor(bumpMBB);
21341 BB->addSuccessor(mallocMBB);
21342 mallocMBB->addSuccessor(continueMBB);
21343 bumpMBB->addSuccessor(continueMBB);
21345 // Take care of the PHI nodes.
21346 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
21347 MI->getOperand(0).getReg())
21348 .addReg(mallocPtrVReg).addMBB(mallocMBB)
21349 .addReg(bumpSPPtrVReg).addMBB(bumpMBB);
21351 // Delete the original pseudo instruction.
21352 MI->eraseFromParent();
21355 return continueMBB;
21358 MachineBasicBlock *
21359 X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
21360 MachineBasicBlock *BB) const {
21361 DebugLoc DL = MI->getDebugLoc();
21363 assert(!Subtarget->isTargetMachO());
21365 X86FrameLowering::emitStackProbeCall(*BB->getParent(), *BB, MI, DL);
21367 MI->eraseFromParent(); // The pseudo instruction is gone now.
21371 MachineBasicBlock *
21372 X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
21373 MachineBasicBlock *BB) const {
21374 // This is pretty easy. We're taking the value that we received from
21375 // our load from the relocation, sticking it in either RDI (x86-64)
21376 // or EAX and doing an indirect call. The return value will then
21377 // be in the normal return register.
21378 MachineFunction *F = BB->getParent();
21379 const X86InstrInfo *TII = Subtarget->getInstrInfo();
21380 DebugLoc DL = MI->getDebugLoc();
21382 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
21383 assert(MI->getOperand(3).isGlobal() && "This should be a global");
21385 // Get a register mask for the lowered call.
21386 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
21387 // proper register mask.
21388 const uint32_t *RegMask =
21389 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21390 if (Subtarget->is64Bit()) {
21391 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21392 TII->get(X86::MOV64rm), X86::RDI)
21394 .addImm(0).addReg(0)
21395 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21396 MI->getOperand(3).getTargetFlags())
21398 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
21399 addDirectMem(MIB, X86::RDI);
21400 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
21401 } else if (F->getTarget().getRelocationModel() != Reloc::PIC_) {
21402 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21403 TII->get(X86::MOV32rm), X86::EAX)
21405 .addImm(0).addReg(0)
21406 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21407 MI->getOperand(3).getTargetFlags())
21409 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21410 addDirectMem(MIB, X86::EAX);
21411 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21413 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21414 TII->get(X86::MOV32rm), X86::EAX)
21415 .addReg(TII->getGlobalBaseReg(F))
21416 .addImm(0).addReg(0)
21417 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21418 MI->getOperand(3).getTargetFlags())
21420 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21421 addDirectMem(MIB, X86::EAX);
21422 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21425 MI->eraseFromParent(); // The pseudo instruction is gone now.
21429 MachineBasicBlock *
21430 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
21431 MachineBasicBlock *MBB) const {
21432 DebugLoc DL = MI->getDebugLoc();
21433 MachineFunction *MF = MBB->getParent();
21434 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21435 MachineRegisterInfo &MRI = MF->getRegInfo();
21437 const BasicBlock *BB = MBB->getBasicBlock();
21438 MachineFunction::iterator I = MBB;
21441 // Memory Reference
21442 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21443 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21446 unsigned MemOpndSlot = 0;
21448 unsigned CurOp = 0;
21450 DstReg = MI->getOperand(CurOp++).getReg();
21451 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
21452 assert(RC->hasType(MVT::i32) && "Invalid destination!");
21453 unsigned mainDstReg = MRI.createVirtualRegister(RC);
21454 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
21456 MemOpndSlot = CurOp;
21458 MVT PVT = getPointerTy();
21459 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21460 "Invalid Pointer Size!");
21462 // For v = setjmp(buf), we generate
21465 // buf[LabelOffset] = restoreMBB
21466 // SjLjSetup restoreMBB
21472 // v = phi(main, restore)
21475 // if base pointer being used, load it from frame
21478 MachineBasicBlock *thisMBB = MBB;
21479 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
21480 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
21481 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
21482 MF->insert(I, mainMBB);
21483 MF->insert(I, sinkMBB);
21484 MF->push_back(restoreMBB);
21486 MachineInstrBuilder MIB;
21488 // Transfer the remainder of BB and its successor edges to sinkMBB.
21489 sinkMBB->splice(sinkMBB->begin(), MBB,
21490 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21491 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
21494 unsigned PtrStoreOpc = 0;
21495 unsigned LabelReg = 0;
21496 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21497 Reloc::Model RM = MF->getTarget().getRelocationModel();
21498 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
21499 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC);
21501 // Prepare IP either in reg or imm.
21502 if (!UseImmLabel) {
21503 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
21504 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
21505 LabelReg = MRI.createVirtualRegister(PtrRC);
21506 if (Subtarget->is64Bit()) {
21507 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
21511 .addMBB(restoreMBB)
21514 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
21515 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
21516 .addReg(XII->getGlobalBaseReg(MF))
21519 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference())
21523 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
21525 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
21526 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21527 if (i == X86::AddrDisp)
21528 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset);
21530 MIB.addOperand(MI->getOperand(MemOpndSlot + i));
21533 MIB.addReg(LabelReg);
21535 MIB.addMBB(restoreMBB);
21536 MIB.setMemRefs(MMOBegin, MMOEnd);
21538 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
21539 .addMBB(restoreMBB);
21541 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21542 MIB.addRegMask(RegInfo->getNoPreservedMask());
21543 thisMBB->addSuccessor(mainMBB);
21544 thisMBB->addSuccessor(restoreMBB);
21548 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
21549 mainMBB->addSuccessor(sinkMBB);
21552 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21553 TII->get(X86::PHI), DstReg)
21554 .addReg(mainDstReg).addMBB(mainMBB)
21555 .addReg(restoreDstReg).addMBB(restoreMBB);
21558 if (RegInfo->hasBasePointer(*MF)) {
21559 const bool Uses64BitFramePtr =
21560 Subtarget->isTarget64BitLP64() || Subtarget->isTargetNaCl64();
21561 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
21562 X86FI->setRestoreBasePointer(MF);
21563 unsigned FramePtr = RegInfo->getFrameRegister(*MF);
21564 unsigned BasePtr = RegInfo->getBaseRegister();
21565 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
21566 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
21567 FramePtr, true, X86FI->getRestoreBasePointerOffset())
21568 .setMIFlag(MachineInstr::FrameSetup);
21570 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
21571 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
21572 restoreMBB->addSuccessor(sinkMBB);
21574 MI->eraseFromParent();
21578 MachineBasicBlock *
21579 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
21580 MachineBasicBlock *MBB) const {
21581 DebugLoc DL = MI->getDebugLoc();
21582 MachineFunction *MF = MBB->getParent();
21583 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21584 MachineRegisterInfo &MRI = MF->getRegInfo();
21586 // Memory Reference
21587 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21588 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21590 MVT PVT = getPointerTy();
21591 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21592 "Invalid Pointer Size!");
21594 const TargetRegisterClass *RC =
21595 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
21596 unsigned Tmp = MRI.createVirtualRegister(RC);
21597 // Since FP is only updated here but NOT referenced, it's treated as GPR.
21598 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21599 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
21600 unsigned SP = RegInfo->getStackRegister();
21602 MachineInstrBuilder MIB;
21604 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21605 const int64_t SPOffset = 2 * PVT.getStoreSize();
21607 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
21608 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
21611 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
21612 for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
21613 MIB.addOperand(MI->getOperand(i));
21614 MIB.setMemRefs(MMOBegin, MMOEnd);
21616 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
21617 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21618 if (i == X86::AddrDisp)
21619 MIB.addDisp(MI->getOperand(i), LabelOffset);
21621 MIB.addOperand(MI->getOperand(i));
21623 MIB.setMemRefs(MMOBegin, MMOEnd);
21625 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
21626 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21627 if (i == X86::AddrDisp)
21628 MIB.addDisp(MI->getOperand(i), SPOffset);
21630 MIB.addOperand(MI->getOperand(i));
21632 MIB.setMemRefs(MMOBegin, MMOEnd);
21634 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
21636 MI->eraseFromParent();
21640 // Replace 213-type (isel default) FMA3 instructions with 231-type for
21641 // accumulator loops. Writing back to the accumulator allows the coalescer
21642 // to remove extra copies in the loop.
21643 MachineBasicBlock *
21644 X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
21645 MachineBasicBlock *MBB) const {
21646 MachineOperand &AddendOp = MI->getOperand(3);
21648 // Bail out early if the addend isn't a register - we can't switch these.
21649 if (!AddendOp.isReg())
21652 MachineFunction &MF = *MBB->getParent();
21653 MachineRegisterInfo &MRI = MF.getRegInfo();
21655 // Check whether the addend is defined by a PHI:
21656 assert(MRI.hasOneDef(AddendOp.getReg()) && "Multiple defs in SSA?");
21657 MachineInstr &AddendDef = *MRI.def_instr_begin(AddendOp.getReg());
21658 if (!AddendDef.isPHI())
21661 // Look for the following pattern:
21663 // %addend = phi [%entry, 0], [%loop, %result]
21665 // %result<tied1> = FMA213 %m2<tied0>, %m1, %addend
21669 // %addend = phi [%entry, 0], [%loop, %result]
21671 // %result<tied1> = FMA231 %addend<tied0>, %m1, %m2
21673 for (unsigned i = 1, e = AddendDef.getNumOperands(); i < e; i += 2) {
21674 assert(AddendDef.getOperand(i).isReg());
21675 MachineOperand PHISrcOp = AddendDef.getOperand(i);
21676 MachineInstr &PHISrcInst = *MRI.def_instr_begin(PHISrcOp.getReg());
21677 if (&PHISrcInst == MI) {
21678 // Found a matching instruction.
21679 unsigned NewFMAOpc = 0;
21680 switch (MI->getOpcode()) {
21681 case X86::VFMADDPDr213r: NewFMAOpc = X86::VFMADDPDr231r; break;
21682 case X86::VFMADDPSr213r: NewFMAOpc = X86::VFMADDPSr231r; break;
21683 case X86::VFMADDSDr213r: NewFMAOpc = X86::VFMADDSDr231r; break;
21684 case X86::VFMADDSSr213r: NewFMAOpc = X86::VFMADDSSr231r; break;
21685 case X86::VFMSUBPDr213r: NewFMAOpc = X86::VFMSUBPDr231r; break;
21686 case X86::VFMSUBPSr213r: NewFMAOpc = X86::VFMSUBPSr231r; break;
21687 case X86::VFMSUBSDr213r: NewFMAOpc = X86::VFMSUBSDr231r; break;
21688 case X86::VFMSUBSSr213r: NewFMAOpc = X86::VFMSUBSSr231r; break;
21689 case X86::VFNMADDPDr213r: NewFMAOpc = X86::VFNMADDPDr231r; break;
21690 case X86::VFNMADDPSr213r: NewFMAOpc = X86::VFNMADDPSr231r; break;
21691 case X86::VFNMADDSDr213r: NewFMAOpc = X86::VFNMADDSDr231r; break;
21692 case X86::VFNMADDSSr213r: NewFMAOpc = X86::VFNMADDSSr231r; break;
21693 case X86::VFNMSUBPDr213r: NewFMAOpc = X86::VFNMSUBPDr231r; break;
21694 case X86::VFNMSUBPSr213r: NewFMAOpc = X86::VFNMSUBPSr231r; break;
21695 case X86::VFNMSUBSDr213r: NewFMAOpc = X86::VFNMSUBSDr231r; break;
21696 case X86::VFNMSUBSSr213r: NewFMAOpc = X86::VFNMSUBSSr231r; break;
21697 case X86::VFMADDSUBPDr213r: NewFMAOpc = X86::VFMADDSUBPDr231r; break;
21698 case X86::VFMADDSUBPSr213r: NewFMAOpc = X86::VFMADDSUBPSr231r; break;
21699 case X86::VFMSUBADDPDr213r: NewFMAOpc = X86::VFMSUBADDPDr231r; break;
21700 case X86::VFMSUBADDPSr213r: NewFMAOpc = X86::VFMSUBADDPSr231r; break;
21702 case X86::VFMADDPDr213rY: NewFMAOpc = X86::VFMADDPDr231rY; break;
21703 case X86::VFMADDPSr213rY: NewFMAOpc = X86::VFMADDPSr231rY; break;
21704 case X86::VFMSUBPDr213rY: NewFMAOpc = X86::VFMSUBPDr231rY; break;
21705 case X86::VFMSUBPSr213rY: NewFMAOpc = X86::VFMSUBPSr231rY; break;
21706 case X86::VFNMADDPDr213rY: NewFMAOpc = X86::VFNMADDPDr231rY; break;
21707 case X86::VFNMADDPSr213rY: NewFMAOpc = X86::VFNMADDPSr231rY; break;
21708 case X86::VFNMSUBPDr213rY: NewFMAOpc = X86::VFNMSUBPDr231rY; break;
21709 case X86::VFNMSUBPSr213rY: NewFMAOpc = X86::VFNMSUBPSr231rY; break;
21710 case X86::VFMADDSUBPDr213rY: NewFMAOpc = X86::VFMADDSUBPDr231rY; break;
21711 case X86::VFMADDSUBPSr213rY: NewFMAOpc = X86::VFMADDSUBPSr231rY; break;
21712 case X86::VFMSUBADDPDr213rY: NewFMAOpc = X86::VFMSUBADDPDr231rY; break;
21713 case X86::VFMSUBADDPSr213rY: NewFMAOpc = X86::VFMSUBADDPSr231rY; break;
21714 default: llvm_unreachable("Unrecognized FMA variant.");
21717 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
21718 MachineInstrBuilder MIB =
21719 BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
21720 .addOperand(MI->getOperand(0))
21721 .addOperand(MI->getOperand(3))
21722 .addOperand(MI->getOperand(2))
21723 .addOperand(MI->getOperand(1));
21724 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
21725 MI->eraseFromParent();
21732 MachineBasicBlock *
21733 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
21734 MachineBasicBlock *BB) const {
21735 switch (MI->getOpcode()) {
21736 default: llvm_unreachable("Unexpected instr type to insert");
21737 case X86::TAILJMPd64:
21738 case X86::TAILJMPr64:
21739 case X86::TAILJMPm64:
21740 case X86::TAILJMPd64_REX:
21741 case X86::TAILJMPr64_REX:
21742 case X86::TAILJMPm64_REX:
21743 llvm_unreachable("TAILJMP64 would not be touched here.");
21744 case X86::TCRETURNdi64:
21745 case X86::TCRETURNri64:
21746 case X86::TCRETURNmi64:
21748 case X86::WIN_ALLOCA:
21749 return EmitLoweredWinAlloca(MI, BB);
21750 case X86::SEG_ALLOCA_32:
21751 case X86::SEG_ALLOCA_64:
21752 return EmitLoweredSegAlloca(MI, BB);
21753 case X86::TLSCall_32:
21754 case X86::TLSCall_64:
21755 return EmitLoweredTLSCall(MI, BB);
21756 case X86::CMOV_GR8:
21757 case X86::CMOV_FR32:
21758 case X86::CMOV_FR64:
21759 case X86::CMOV_V4F32:
21760 case X86::CMOV_V2F64:
21761 case X86::CMOV_V2I64:
21762 case X86::CMOV_V8F32:
21763 case X86::CMOV_V4F64:
21764 case X86::CMOV_V4I64:
21765 case X86::CMOV_V16F32:
21766 case X86::CMOV_V8F64:
21767 case X86::CMOV_V8I64:
21768 case X86::CMOV_GR16:
21769 case X86::CMOV_GR32:
21770 case X86::CMOV_RFP32:
21771 case X86::CMOV_RFP64:
21772 case X86::CMOV_RFP80:
21773 return EmitLoweredSelect(MI, BB);
21775 case X86::FP32_TO_INT16_IN_MEM:
21776 case X86::FP32_TO_INT32_IN_MEM:
21777 case X86::FP32_TO_INT64_IN_MEM:
21778 case X86::FP64_TO_INT16_IN_MEM:
21779 case X86::FP64_TO_INT32_IN_MEM:
21780 case X86::FP64_TO_INT64_IN_MEM:
21781 case X86::FP80_TO_INT16_IN_MEM:
21782 case X86::FP80_TO_INT32_IN_MEM:
21783 case X86::FP80_TO_INT64_IN_MEM: {
21784 MachineFunction *F = BB->getParent();
21785 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21786 DebugLoc DL = MI->getDebugLoc();
21788 // Change the floating point control register to use "round towards zero"
21789 // mode when truncating to an integer value.
21790 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
21791 addFrameReference(BuildMI(*BB, MI, DL,
21792 TII->get(X86::FNSTCW16m)), CWFrameIdx);
21794 // Load the old value of the high byte of the control word...
21796 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
21797 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
21800 // Set the high part to be round to zero...
21801 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
21804 // Reload the modified control word now...
21805 addFrameReference(BuildMI(*BB, MI, DL,
21806 TII->get(X86::FLDCW16m)), CWFrameIdx);
21808 // Restore the memory image of control word to original value
21809 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
21812 // Get the X86 opcode to use.
21814 switch (MI->getOpcode()) {
21815 default: llvm_unreachable("illegal opcode!");
21816 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
21817 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
21818 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
21819 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
21820 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
21821 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
21822 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
21823 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
21824 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
21828 MachineOperand &Op = MI->getOperand(0);
21830 AM.BaseType = X86AddressMode::RegBase;
21831 AM.Base.Reg = Op.getReg();
21833 AM.BaseType = X86AddressMode::FrameIndexBase;
21834 AM.Base.FrameIndex = Op.getIndex();
21836 Op = MI->getOperand(1);
21838 AM.Scale = Op.getImm();
21839 Op = MI->getOperand(2);
21841 AM.IndexReg = Op.getImm();
21842 Op = MI->getOperand(3);
21843 if (Op.isGlobal()) {
21844 AM.GV = Op.getGlobal();
21846 AM.Disp = Op.getImm();
21848 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
21849 .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
21851 // Reload the original control word now.
21852 addFrameReference(BuildMI(*BB, MI, DL,
21853 TII->get(X86::FLDCW16m)), CWFrameIdx);
21855 MI->eraseFromParent(); // The pseudo instruction is gone now.
21858 // String/text processing lowering.
21859 case X86::PCMPISTRM128REG:
21860 case X86::VPCMPISTRM128REG:
21861 case X86::PCMPISTRM128MEM:
21862 case X86::VPCMPISTRM128MEM:
21863 case X86::PCMPESTRM128REG:
21864 case X86::VPCMPESTRM128REG:
21865 case X86::PCMPESTRM128MEM:
21866 case X86::VPCMPESTRM128MEM:
21867 assert(Subtarget->hasSSE42() &&
21868 "Target must have SSE4.2 or AVX features enabled");
21869 return EmitPCMPSTRM(MI, BB, Subtarget->getInstrInfo());
21871 // String/text processing lowering.
21872 case X86::PCMPISTRIREG:
21873 case X86::VPCMPISTRIREG:
21874 case X86::PCMPISTRIMEM:
21875 case X86::VPCMPISTRIMEM:
21876 case X86::PCMPESTRIREG:
21877 case X86::VPCMPESTRIREG:
21878 case X86::PCMPESTRIMEM:
21879 case X86::VPCMPESTRIMEM:
21880 assert(Subtarget->hasSSE42() &&
21881 "Target must have SSE4.2 or AVX features enabled");
21882 return EmitPCMPSTRI(MI, BB, Subtarget->getInstrInfo());
21884 // Thread synchronization.
21886 return EmitMonitor(MI, BB, Subtarget);
21890 return EmitXBegin(MI, BB, Subtarget->getInstrInfo());
21892 case X86::VASTART_SAVE_XMM_REGS:
21893 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
21895 case X86::VAARG_64:
21896 return EmitVAARG64WithCustomInserter(MI, BB);
21898 case X86::EH_SjLj_SetJmp32:
21899 case X86::EH_SjLj_SetJmp64:
21900 return emitEHSjLjSetJmp(MI, BB);
21902 case X86::EH_SjLj_LongJmp32:
21903 case X86::EH_SjLj_LongJmp64:
21904 return emitEHSjLjLongJmp(MI, BB);
21906 case TargetOpcode::STATEPOINT:
21907 // As an implementation detail, STATEPOINT shares the STACKMAP format at
21908 // this point in the process. We diverge later.
21909 return emitPatchPoint(MI, BB);
21911 case TargetOpcode::STACKMAP:
21912 case TargetOpcode::PATCHPOINT:
21913 return emitPatchPoint(MI, BB);
21915 case X86::VFMADDPDr213r:
21916 case X86::VFMADDPSr213r:
21917 case X86::VFMADDSDr213r:
21918 case X86::VFMADDSSr213r:
21919 case X86::VFMSUBPDr213r:
21920 case X86::VFMSUBPSr213r:
21921 case X86::VFMSUBSDr213r:
21922 case X86::VFMSUBSSr213r:
21923 case X86::VFNMADDPDr213r:
21924 case X86::VFNMADDPSr213r:
21925 case X86::VFNMADDSDr213r:
21926 case X86::VFNMADDSSr213r:
21927 case X86::VFNMSUBPDr213r:
21928 case X86::VFNMSUBPSr213r:
21929 case X86::VFNMSUBSDr213r:
21930 case X86::VFNMSUBSSr213r:
21931 case X86::VFMADDSUBPDr213r:
21932 case X86::VFMADDSUBPSr213r:
21933 case X86::VFMSUBADDPDr213r:
21934 case X86::VFMSUBADDPSr213r:
21935 case X86::VFMADDPDr213rY:
21936 case X86::VFMADDPSr213rY:
21937 case X86::VFMSUBPDr213rY:
21938 case X86::VFMSUBPSr213rY:
21939 case X86::VFNMADDPDr213rY:
21940 case X86::VFNMADDPSr213rY:
21941 case X86::VFNMSUBPDr213rY:
21942 case X86::VFNMSUBPSr213rY:
21943 case X86::VFMADDSUBPDr213rY:
21944 case X86::VFMADDSUBPSr213rY:
21945 case X86::VFMSUBADDPDr213rY:
21946 case X86::VFMSUBADDPSr213rY:
21947 return emitFMA3Instr(MI, BB);
21951 //===----------------------------------------------------------------------===//
21952 // X86 Optimization Hooks
21953 //===----------------------------------------------------------------------===//
21955 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
21958 const SelectionDAG &DAG,
21959 unsigned Depth) const {
21960 unsigned BitWidth = KnownZero.getBitWidth();
21961 unsigned Opc = Op.getOpcode();
21962 assert((Opc >= ISD::BUILTIN_OP_END ||
21963 Opc == ISD::INTRINSIC_WO_CHAIN ||
21964 Opc == ISD::INTRINSIC_W_CHAIN ||
21965 Opc == ISD::INTRINSIC_VOID) &&
21966 "Should use MaskedValueIsZero if you don't know whether Op"
21967 " is a target node!");
21969 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
21983 // These nodes' second result is a boolean.
21984 if (Op.getResNo() == 0)
21987 case X86ISD::SETCC:
21988 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
21990 case ISD::INTRINSIC_WO_CHAIN: {
21991 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
21992 unsigned NumLoBits = 0;
21995 case Intrinsic::x86_sse_movmsk_ps:
21996 case Intrinsic::x86_avx_movmsk_ps_256:
21997 case Intrinsic::x86_sse2_movmsk_pd:
21998 case Intrinsic::x86_avx_movmsk_pd_256:
21999 case Intrinsic::x86_mmx_pmovmskb:
22000 case Intrinsic::x86_sse2_pmovmskb_128:
22001 case Intrinsic::x86_avx2_pmovmskb: {
22002 // High bits of movmskp{s|d}, pmovmskb are known zero.
22004 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
22005 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break;
22006 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break;
22007 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break;
22008 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break;
22009 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break;
22010 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
22011 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
22013 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
22022 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
22024 const SelectionDAG &,
22025 unsigned Depth) const {
22026 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
22027 if (Op.getOpcode() == X86ISD::SETCC_CARRY)
22028 return Op.getValueType().getScalarType().getSizeInBits();
22034 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
22035 /// node is a GlobalAddress + offset.
22036 bool X86TargetLowering::isGAPlusOffset(SDNode *N,
22037 const GlobalValue* &GA,
22038 int64_t &Offset) const {
22039 if (N->getOpcode() == X86ISD::Wrapper) {
22040 if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
22041 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
22042 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
22046 return TargetLowering::isGAPlusOffset(N, GA, Offset);
22049 /// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the
22050 /// same as extracting the high 128-bit part of 256-bit vector and then
22051 /// inserting the result into the low part of a new 256-bit vector
22052 static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) {
22053 EVT VT = SVOp->getValueType(0);
22054 unsigned NumElems = VT.getVectorNumElements();
22056 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
22057 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j)
22058 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
22059 SVOp->getMaskElt(j) >= 0)
22065 /// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the
22066 /// same as extracting the low 128-bit part of 256-bit vector and then
22067 /// inserting the result into the high part of a new 256-bit vector
22068 static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) {
22069 EVT VT = SVOp->getValueType(0);
22070 unsigned NumElems = VT.getVectorNumElements();
22072 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22073 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j)
22074 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
22075 SVOp->getMaskElt(j) >= 0)
22081 /// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors.
22082 static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
22083 TargetLowering::DAGCombinerInfo &DCI,
22084 const X86Subtarget* Subtarget) {
22086 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
22087 SDValue V1 = SVOp->getOperand(0);
22088 SDValue V2 = SVOp->getOperand(1);
22089 EVT VT = SVOp->getValueType(0);
22090 unsigned NumElems = VT.getVectorNumElements();
22092 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
22093 V2.getOpcode() == ISD::CONCAT_VECTORS) {
22097 // V UNDEF BUILD_VECTOR UNDEF
22099 // CONCAT_VECTOR CONCAT_VECTOR
22102 // RESULT: V + zero extended
22104 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR ||
22105 V2.getOperand(1).getOpcode() != ISD::UNDEF ||
22106 V1.getOperand(1).getOpcode() != ISD::UNDEF)
22109 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()))
22112 // To match the shuffle mask, the first half of the mask should
22113 // be exactly the first vector, and all the rest a splat with the
22114 // first element of the second one.
22115 for (unsigned i = 0; i != NumElems/2; ++i)
22116 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) ||
22117 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems))
22120 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD.
22121 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) {
22122 if (Ld->hasNUsesOfValue(1, 0)) {
22123 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other);
22124 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
22126 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
22128 Ld->getPointerInfo(),
22129 Ld->getAlignment(),
22130 false/*isVolatile*/, true/*ReadMem*/,
22131 false/*WriteMem*/);
22133 // Make sure the newly-created LOAD is in the same position as Ld in
22134 // terms of dependency. We create a TokenFactor for Ld and ResNode,
22135 // and update uses of Ld's output chain to use the TokenFactor.
22136 if (Ld->hasAnyUseOfValue(1)) {
22137 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
22138 SDValue(Ld, 1), SDValue(ResNode.getNode(), 1));
22139 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
22140 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1),
22141 SDValue(ResNode.getNode(), 1));
22144 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode);
22148 // Emit a zeroed vector and insert the desired subvector on its
22150 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
22151 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
22152 return DCI.CombineTo(N, InsV);
22155 //===--------------------------------------------------------------------===//
22156 // Combine some shuffles into subvector extracts and inserts:
22159 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
22160 if (isShuffleHigh128VectorInsertLow(SVOp)) {
22161 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl);
22162 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl);
22163 return DCI.CombineTo(N, InsV);
22166 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22167 if (isShuffleLow128VectorInsertHigh(SVOp)) {
22168 SDValue V = Extract128BitVector(V1, 0, DAG, dl);
22169 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl);
22170 return DCI.CombineTo(N, InsV);
22176 /// \brief Combine an arbitrary chain of shuffles into a single instruction if
22179 /// This is the leaf of the recursive combinine below. When we have found some
22180 /// chain of single-use x86 shuffle instructions and accumulated the combined
22181 /// shuffle mask represented by them, this will try to pattern match that mask
22182 /// into either a single instruction if there is a special purpose instruction
22183 /// for this operation, or into a PSHUFB instruction which is a fully general
22184 /// instruction but should only be used to replace chains over a certain depth.
22185 static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
22186 int Depth, bool HasPSHUFB, SelectionDAG &DAG,
22187 TargetLowering::DAGCombinerInfo &DCI,
22188 const X86Subtarget *Subtarget) {
22189 assert(!Mask.empty() && "Cannot combine an empty shuffle mask!");
22191 // Find the operand that enters the chain. Note that multiple uses are OK
22192 // here, we're not going to remove the operand we find.
22193 SDValue Input = Op.getOperand(0);
22194 while (Input.getOpcode() == ISD::BITCAST)
22195 Input = Input.getOperand(0);
22197 MVT VT = Input.getSimpleValueType();
22198 MVT RootVT = Root.getSimpleValueType();
22201 // Just remove no-op shuffle masks.
22202 if (Mask.size() == 1) {
22203 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Input),
22208 // Use the float domain if the operand type is a floating point type.
22209 bool FloatDomain = VT.isFloatingPoint();
22211 // For floating point shuffles, we don't have free copies in the shuffle
22212 // instructions or the ability to load as part of the instruction, so
22213 // canonicalize their shuffles to UNPCK or MOV variants.
22215 // Note that even with AVX we prefer the PSHUFD form of shuffle for integer
22216 // vectors because it can have a load folded into it that UNPCK cannot. This
22217 // doesn't preclude something switching to the shorter encoding post-RA.
22219 if (Mask.equals(0, 0) || Mask.equals(1, 1)) {
22220 bool Lo = Mask.equals(0, 0);
22223 // Check if we have SSE3 which will let us use MOVDDUP. That instruction
22224 // is no slower than UNPCKLPD but has the option to fold the input operand
22225 // into even an unaligned memory load.
22226 if (Lo && Subtarget->hasSSE3()) {
22227 Shuffle = X86ISD::MOVDDUP;
22228 ShuffleVT = MVT::v2f64;
22230 // We have MOVLHPS and MOVHLPS throughout SSE and they encode smaller
22231 // than the UNPCK variants.
22232 Shuffle = Lo ? X86ISD::MOVLHPS : X86ISD::MOVHLPS;
22233 ShuffleVT = MVT::v4f32;
22235 if (Depth == 1 && Root->getOpcode() == Shuffle)
22236 return false; // Nothing to do!
22237 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22238 DCI.AddToWorklist(Op.getNode());
22239 if (Shuffle == X86ISD::MOVDDUP)
22240 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22242 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22243 DCI.AddToWorklist(Op.getNode());
22244 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22248 if (Subtarget->hasSSE3() &&
22249 (Mask.equals(0, 0, 2, 2) || Mask.equals(1, 1, 3, 3))) {
22250 bool Lo = Mask.equals(0, 0, 2, 2);
22251 unsigned Shuffle = Lo ? X86ISD::MOVSLDUP : X86ISD::MOVSHDUP;
22252 MVT ShuffleVT = MVT::v4f32;
22253 if (Depth == 1 && Root->getOpcode() == Shuffle)
22254 return false; // Nothing to do!
22255 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22256 DCI.AddToWorklist(Op.getNode());
22257 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22258 DCI.AddToWorklist(Op.getNode());
22259 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22263 if (Mask.equals(0, 0, 1, 1) || Mask.equals(2, 2, 3, 3)) {
22264 bool Lo = Mask.equals(0, 0, 1, 1);
22265 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22266 MVT ShuffleVT = MVT::v4f32;
22267 if (Depth == 1 && Root->getOpcode() == Shuffle)
22268 return false; // Nothing to do!
22269 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22270 DCI.AddToWorklist(Op.getNode());
22271 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22272 DCI.AddToWorklist(Op.getNode());
22273 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22279 // We always canonicalize the 8 x i16 and 16 x i8 shuffles into their UNPCK
22280 // variants as none of these have single-instruction variants that are
22281 // superior to the UNPCK formulation.
22282 if (!FloatDomain &&
22283 (Mask.equals(0, 0, 1, 1, 2, 2, 3, 3) ||
22284 Mask.equals(4, 4, 5, 5, 6, 6, 7, 7) ||
22285 Mask.equals(0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7) ||
22286 Mask.equals(8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
22288 bool Lo = Mask[0] == 0;
22289 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22290 if (Depth == 1 && Root->getOpcode() == Shuffle)
22291 return false; // Nothing to do!
22293 switch (Mask.size()) {
22295 ShuffleVT = MVT::v8i16;
22298 ShuffleVT = MVT::v16i8;
22301 llvm_unreachable("Impossible mask size!");
22303 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22304 DCI.AddToWorklist(Op.getNode());
22305 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22306 DCI.AddToWorklist(Op.getNode());
22307 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22312 // Don't try to re-form single instruction chains under any circumstances now
22313 // that we've done encoding canonicalization for them.
22317 // If we have 3 or more shuffle instructions or a chain involving PSHUFB, we
22318 // can replace them with a single PSHUFB instruction profitably. Intel's
22319 // manuals suggest only using PSHUFB if doing so replacing 5 instructions, but
22320 // in practice PSHUFB tends to be *very* fast so we're more aggressive.
22321 if ((Depth >= 3 || HasPSHUFB) && Subtarget->hasSSSE3()) {
22322 SmallVector<SDValue, 16> PSHUFBMask;
22323 assert(Mask.size() <= 16 && "Can't shuffle elements smaller than bytes!");
22324 int Ratio = 16 / Mask.size();
22325 for (unsigned i = 0; i < 16; ++i) {
22326 if (Mask[i / Ratio] == SM_SentinelUndef) {
22327 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
22330 int M = Mask[i / Ratio] != SM_SentinelZero
22331 ? Ratio * Mask[i / Ratio] + i % Ratio
22333 PSHUFBMask.push_back(DAG.getConstant(M, MVT::i8));
22335 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Input);
22336 DCI.AddToWorklist(Op.getNode());
22337 SDValue PSHUFBMaskOp =
22338 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, PSHUFBMask);
22339 DCI.AddToWorklist(PSHUFBMaskOp.getNode());
22340 Op = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, Op, PSHUFBMaskOp);
22341 DCI.AddToWorklist(Op.getNode());
22342 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22347 // Failed to find any combines.
22351 /// \brief Fully generic combining of x86 shuffle instructions.
22353 /// This should be the last combine run over the x86 shuffle instructions. Once
22354 /// they have been fully optimized, this will recursively consider all chains
22355 /// of single-use shuffle instructions, build a generic model of the cumulative
22356 /// shuffle operation, and check for simpler instructions which implement this
22357 /// operation. We use this primarily for two purposes:
22359 /// 1) Collapse generic shuffles to specialized single instructions when
22360 /// equivalent. In most cases, this is just an encoding size win, but
22361 /// sometimes we will collapse multiple generic shuffles into a single
22362 /// special-purpose shuffle.
22363 /// 2) Look for sequences of shuffle instructions with 3 or more total
22364 /// instructions, and replace them with the slightly more expensive SSSE3
22365 /// PSHUFB instruction if available. We do this as the last combining step
22366 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
22367 /// a suitable short sequence of other instructions. The PHUFB will either
22368 /// use a register or have to read from memory and so is slightly (but only
22369 /// slightly) more expensive than the other shuffle instructions.
22371 /// Because this is inherently a quadratic operation (for each shuffle in
22372 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
22373 /// This should never be an issue in practice as the shuffle lowering doesn't
22374 /// produce sequences of more than 8 instructions.
22376 /// FIXME: We will currently miss some cases where the redundant shuffling
22377 /// would simplify under the threshold for PSHUFB formation because of
22378 /// combine-ordering. To fix this, we should do the redundant instruction
22379 /// combining in this recursive walk.
22380 static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
22381 ArrayRef<int> RootMask,
22382 int Depth, bool HasPSHUFB,
22384 TargetLowering::DAGCombinerInfo &DCI,
22385 const X86Subtarget *Subtarget) {
22386 // Bound the depth of our recursive combine because this is ultimately
22387 // quadratic in nature.
22391 // Directly rip through bitcasts to find the underlying operand.
22392 while (Op.getOpcode() == ISD::BITCAST && Op.getOperand(0).hasOneUse())
22393 Op = Op.getOperand(0);
22395 MVT VT = Op.getSimpleValueType();
22396 if (!VT.isVector())
22397 return false; // Bail if we hit a non-vector.
22398 // FIXME: This routine should be taught about 256-bit shuffles, or a 256-bit
22399 // version should be added.
22400 if (VT.getSizeInBits() != 128)
22403 assert(Root.getSimpleValueType().isVector() &&
22404 "Shuffles operate on vector types!");
22405 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
22406 "Can only combine shuffles of the same vector register size.");
22408 if (!isTargetShuffle(Op.getOpcode()))
22410 SmallVector<int, 16> OpMask;
22412 bool HaveMask = getTargetShuffleMask(Op.getNode(), VT, OpMask, IsUnary);
22413 // We only can combine unary shuffles which we can decode the mask for.
22414 if (!HaveMask || !IsUnary)
22417 assert(VT.getVectorNumElements() == OpMask.size() &&
22418 "Different mask size from vector size!");
22419 assert(((RootMask.size() > OpMask.size() &&
22420 RootMask.size() % OpMask.size() == 0) ||
22421 (OpMask.size() > RootMask.size() &&
22422 OpMask.size() % RootMask.size() == 0) ||
22423 OpMask.size() == RootMask.size()) &&
22424 "The smaller number of elements must divide the larger.");
22425 int RootRatio = std::max<int>(1, OpMask.size() / RootMask.size());
22426 int OpRatio = std::max<int>(1, RootMask.size() / OpMask.size());
22427 assert(((RootRatio == 1 && OpRatio == 1) ||
22428 (RootRatio == 1) != (OpRatio == 1)) &&
22429 "Must not have a ratio for both incoming and op masks!");
22431 SmallVector<int, 16> Mask;
22432 Mask.reserve(std::max(OpMask.size(), RootMask.size()));
22434 // Merge this shuffle operation's mask into our accumulated mask. Note that
22435 // this shuffle's mask will be the first applied to the input, followed by the
22436 // root mask to get us all the way to the root value arrangement. The reason
22437 // for this order is that we are recursing up the operation chain.
22438 for (int i = 0, e = std::max(OpMask.size(), RootMask.size()); i < e; ++i) {
22439 int RootIdx = i / RootRatio;
22440 if (RootMask[RootIdx] < 0) {
22441 // This is a zero or undef lane, we're done.
22442 Mask.push_back(RootMask[RootIdx]);
22446 int RootMaskedIdx = RootMask[RootIdx] * RootRatio + i % RootRatio;
22447 int OpIdx = RootMaskedIdx / OpRatio;
22448 if (OpMask[OpIdx] < 0) {
22449 // The incoming lanes are zero or undef, it doesn't matter which ones we
22451 Mask.push_back(OpMask[OpIdx]);
22455 // Ok, we have non-zero lanes, map them through.
22456 Mask.push_back(OpMask[OpIdx] * OpRatio +
22457 RootMaskedIdx % OpRatio);
22460 // See if we can recurse into the operand to combine more things.
22461 switch (Op.getOpcode()) {
22462 case X86ISD::PSHUFB:
22464 case X86ISD::PSHUFD:
22465 case X86ISD::PSHUFHW:
22466 case X86ISD::PSHUFLW:
22467 if (Op.getOperand(0).hasOneUse() &&
22468 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22469 HasPSHUFB, DAG, DCI, Subtarget))
22473 case X86ISD::UNPCKL:
22474 case X86ISD::UNPCKH:
22475 assert(Op.getOperand(0) == Op.getOperand(1) && "We only combine unary shuffles!");
22476 // We can't check for single use, we have to check that this shuffle is the only user.
22477 if (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
22478 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22479 HasPSHUFB, DAG, DCI, Subtarget))
22484 // Minor canonicalization of the accumulated shuffle mask to make it easier
22485 // to match below. All this does is detect masks with squential pairs of
22486 // elements, and shrink them to the half-width mask. It does this in a loop
22487 // so it will reduce the size of the mask to the minimal width mask which
22488 // performs an equivalent shuffle.
22489 SmallVector<int, 16> WidenedMask;
22490 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
22491 Mask = std::move(WidenedMask);
22492 WidenedMask.clear();
22495 return combineX86ShuffleChain(Op, Root, Mask, Depth, HasPSHUFB, DAG, DCI,
22499 /// \brief Get the PSHUF-style mask from PSHUF node.
22501 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
22502 /// PSHUF-style masks that can be reused with such instructions.
22503 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
22504 SmallVector<int, 4> Mask;
22506 bool HaveMask = getTargetShuffleMask(N.getNode(), N.getSimpleValueType(), Mask, IsUnary);
22510 switch (N.getOpcode()) {
22511 case X86ISD::PSHUFD:
22513 case X86ISD::PSHUFLW:
22516 case X86ISD::PSHUFHW:
22517 Mask.erase(Mask.begin(), Mask.begin() + 4);
22518 for (int &M : Mask)
22522 llvm_unreachable("No valid shuffle instruction found!");
22526 /// \brief Search for a combinable shuffle across a chain ending in pshufd.
22528 /// We walk up the chain and look for a combinable shuffle, skipping over
22529 /// shuffles that we could hoist this shuffle's transformation past without
22530 /// altering anything.
22532 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
22534 TargetLowering::DAGCombinerInfo &DCI) {
22535 assert(N.getOpcode() == X86ISD::PSHUFD &&
22536 "Called with something other than an x86 128-bit half shuffle!");
22539 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
22540 // of the shuffles in the chain so that we can form a fresh chain to replace
22542 SmallVector<SDValue, 8> Chain;
22543 SDValue V = N.getOperand(0);
22544 for (; V.hasOneUse(); V = V.getOperand(0)) {
22545 switch (V.getOpcode()) {
22547 return SDValue(); // Nothing combined!
22550 // Skip bitcasts as we always know the type for the target specific
22554 case X86ISD::PSHUFD:
22555 // Found another dword shuffle.
22558 case X86ISD::PSHUFLW:
22559 // Check that the low words (being shuffled) are the identity in the
22560 // dword shuffle, and the high words are self-contained.
22561 if (Mask[0] != 0 || Mask[1] != 1 ||
22562 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
22565 Chain.push_back(V);
22568 case X86ISD::PSHUFHW:
22569 // Check that the high words (being shuffled) are the identity in the
22570 // dword shuffle, and the low words are self-contained.
22571 if (Mask[2] != 2 || Mask[3] != 3 ||
22572 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
22575 Chain.push_back(V);
22578 case X86ISD::UNPCKL:
22579 case X86ISD::UNPCKH:
22580 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
22581 // shuffle into a preceding word shuffle.
22582 if (V.getValueType() != MVT::v16i8 && V.getValueType() != MVT::v8i16)
22585 // Search for a half-shuffle which we can combine with.
22586 unsigned CombineOp =
22587 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
22588 if (V.getOperand(0) != V.getOperand(1) ||
22589 !V->isOnlyUserOf(V.getOperand(0).getNode()))
22591 Chain.push_back(V);
22592 V = V.getOperand(0);
22594 switch (V.getOpcode()) {
22596 return SDValue(); // Nothing to combine.
22598 case X86ISD::PSHUFLW:
22599 case X86ISD::PSHUFHW:
22600 if (V.getOpcode() == CombineOp)
22603 Chain.push_back(V);
22607 V = V.getOperand(0);
22611 } while (V.hasOneUse());
22614 // Break out of the loop if we break out of the switch.
22618 if (!V.hasOneUse())
22619 // We fell out of the loop without finding a viable combining instruction.
22622 // Merge this node's mask and our incoming mask.
22623 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22624 for (int &M : Mask)
22626 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
22627 getV4X86ShuffleImm8ForMask(Mask, DAG));
22629 // Rebuild the chain around this new shuffle.
22630 while (!Chain.empty()) {
22631 SDValue W = Chain.pop_back_val();
22633 if (V.getValueType() != W.getOperand(0).getValueType())
22634 V = DAG.getNode(ISD::BITCAST, DL, W.getOperand(0).getValueType(), V);
22636 switch (W.getOpcode()) {
22638 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
22640 case X86ISD::UNPCKL:
22641 case X86ISD::UNPCKH:
22642 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
22645 case X86ISD::PSHUFD:
22646 case X86ISD::PSHUFLW:
22647 case X86ISD::PSHUFHW:
22648 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
22652 if (V.getValueType() != N.getValueType())
22653 V = DAG.getNode(ISD::BITCAST, DL, N.getValueType(), V);
22655 // Return the new chain to replace N.
22659 /// \brief Search for a combinable shuffle across a chain ending in pshuflw or pshufhw.
22661 /// We walk up the chain, skipping shuffles of the other half and looking
22662 /// through shuffles which switch halves trying to find a shuffle of the same
22663 /// pair of dwords.
22664 static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
22666 TargetLowering::DAGCombinerInfo &DCI) {
22668 (N.getOpcode() == X86ISD::PSHUFLW || N.getOpcode() == X86ISD::PSHUFHW) &&
22669 "Called with something other than an x86 128-bit half shuffle!");
22671 unsigned CombineOpcode = N.getOpcode();
22673 // Walk up a single-use chain looking for a combinable shuffle.
22674 SDValue V = N.getOperand(0);
22675 for (; V.hasOneUse(); V = V.getOperand(0)) {
22676 switch (V.getOpcode()) {
22678 return false; // Nothing combined!
22681 // Skip bitcasts as we always know the type for the target specific
22685 case X86ISD::PSHUFLW:
22686 case X86ISD::PSHUFHW:
22687 if (V.getOpcode() == CombineOpcode)
22690 // Other-half shuffles are no-ops.
22693 // Break out of the loop if we break out of the switch.
22697 if (!V.hasOneUse())
22698 // We fell out of the loop without finding a viable combining instruction.
22701 // Combine away the bottom node as its shuffle will be accumulated into
22702 // a preceding shuffle.
22703 DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22705 // Record the old value.
22708 // Merge this node's mask and our incoming mask (adjusted to account for all
22709 // the pshufd instructions encountered).
22710 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22711 for (int &M : Mask)
22713 V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
22714 getV4X86ShuffleImm8ForMask(Mask, DAG));
22716 // Check that the shuffles didn't cancel each other out. If not, we need to
22717 // combine to the new one.
22719 // Replace the combinable shuffle with the combined one, updating all users
22720 // so that we re-evaluate the chain here.
22721 DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
22726 /// \brief Try to combine x86 target specific shuffles.
22727 static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
22728 TargetLowering::DAGCombinerInfo &DCI,
22729 const X86Subtarget *Subtarget) {
22731 MVT VT = N.getSimpleValueType();
22732 SmallVector<int, 4> Mask;
22734 switch (N.getOpcode()) {
22735 case X86ISD::PSHUFD:
22736 case X86ISD::PSHUFLW:
22737 case X86ISD::PSHUFHW:
22738 Mask = getPSHUFShuffleMask(N);
22739 assert(Mask.size() == 4);
22745 // Nuke no-op shuffles that show up after combining.
22746 if (isNoopShuffleMask(Mask))
22747 return DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22749 // Look for simplifications involving one or two shuffle instructions.
22750 SDValue V = N.getOperand(0);
22751 switch (N.getOpcode()) {
22754 case X86ISD::PSHUFLW:
22755 case X86ISD::PSHUFHW:
22756 assert(VT == MVT::v8i16);
22759 if (combineRedundantHalfShuffle(N, Mask, DAG, DCI))
22760 return SDValue(); // We combined away this shuffle, so we're done.
22762 // See if this reduces to a PSHUFD which is no more expensive and can
22763 // combine with more operations. Note that it has to at least flip the
22764 // dwords as otherwise it would have been removed as a no-op.
22765 if (Mask[0] == 2 && Mask[1] == 3 && Mask[2] == 0 && Mask[3] == 1) {
22766 int DMask[] = {0, 1, 2, 3};
22767 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
22768 DMask[DOffset + 0] = DOffset + 1;
22769 DMask[DOffset + 1] = DOffset + 0;
22770 V = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V);
22771 DCI.AddToWorklist(V.getNode());
22772 V = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V,
22773 getV4X86ShuffleImm8ForMask(DMask, DAG));
22774 DCI.AddToWorklist(V.getNode());
22775 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
22778 // Look for shuffle patterns which can be implemented as a single unpack.
22779 // FIXME: This doesn't handle the location of the PSHUFD generically, and
22780 // only works when we have a PSHUFD followed by two half-shuffles.
22781 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
22782 (V.getOpcode() == X86ISD::PSHUFLW ||
22783 V.getOpcode() == X86ISD::PSHUFHW) &&
22784 V.getOpcode() != N.getOpcode() &&
22786 SDValue D = V.getOperand(0);
22787 while (D.getOpcode() == ISD::BITCAST && D.hasOneUse())
22788 D = D.getOperand(0);
22789 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
22790 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22791 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
22792 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22793 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22795 for (int i = 0; i < 4; ++i) {
22796 WordMask[i + NOffset] = Mask[i] + NOffset;
22797 WordMask[i + VOffset] = VMask[i] + VOffset;
22799 // Map the word mask through the DWord mask.
22801 for (int i = 0; i < 8; ++i)
22802 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
22803 const int UnpackLoMask[] = {0, 0, 1, 1, 2, 2, 3, 3};
22804 const int UnpackHiMask[] = {4, 4, 5, 5, 6, 6, 7, 7};
22805 if (std::equal(std::begin(MappedMask), std::end(MappedMask),
22806 std::begin(UnpackLoMask)) ||
22807 std::equal(std::begin(MappedMask), std::end(MappedMask),
22808 std::begin(UnpackHiMask))) {
22809 // We can replace all three shuffles with an unpack.
22810 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, D.getOperand(0));
22811 DCI.AddToWorklist(V.getNode());
22812 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
22814 DL, MVT::v8i16, V, V);
22821 case X86ISD::PSHUFD:
22822 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG, DCI))
22831 /// \brief Try to combine a shuffle into a target-specific add-sub node.
22833 /// We combine this directly on the abstract vector shuffle nodes so it is
22834 /// easier to generically match. We also insert dummy vector shuffle nodes for
22835 /// the operands which explicitly discard the lanes which are unused by this
22836 /// operation to try to flow through the rest of the combiner the fact that
22837 /// they're unused.
22838 static SDValue combineShuffleToAddSub(SDNode *N, SelectionDAG &DAG) {
22840 EVT VT = N->getValueType(0);
22842 // We only handle target-independent shuffles.
22843 // FIXME: It would be easy and harmless to use the target shuffle mask
22844 // extraction tool to support more.
22845 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
22848 auto *SVN = cast<ShuffleVectorSDNode>(N);
22849 ArrayRef<int> Mask = SVN->getMask();
22850 SDValue V1 = N->getOperand(0);
22851 SDValue V2 = N->getOperand(1);
22853 // We require the first shuffle operand to be the SUB node, and the second to
22854 // be the ADD node.
22855 // FIXME: We should support the commuted patterns.
22856 if (V1->getOpcode() != ISD::FSUB || V2->getOpcode() != ISD::FADD)
22859 // If there are other uses of these operations we can't fold them.
22860 if (!V1->hasOneUse() || !V2->hasOneUse())
22863 // Ensure that both operations have the same operands. Note that we can
22864 // commute the FADD operands.
22865 SDValue LHS = V1->getOperand(0), RHS = V1->getOperand(1);
22866 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
22867 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
22870 // We're looking for blends between FADD and FSUB nodes. We insist on these
22871 // nodes being lined up in a specific expected pattern.
22872 if (!(isShuffleEquivalent(Mask, 0, 3) ||
22873 isShuffleEquivalent(Mask, 0, 5, 2, 7) ||
22874 isShuffleEquivalent(Mask, 0, 9, 2, 11, 4, 13, 6, 15)))
22877 // Only specific types are legal at this point, assert so we notice if and
22878 // when these change.
22879 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v8f32 ||
22880 VT == MVT::v4f64) &&
22881 "Unknown vector type encountered!");
22883 return DAG.getNode(X86ISD::ADDSUB, DL, VT, LHS, RHS);
22886 /// PerformShuffleCombine - Performs several different shuffle combines.
22887 static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
22888 TargetLowering::DAGCombinerInfo &DCI,
22889 const X86Subtarget *Subtarget) {
22891 SDValue N0 = N->getOperand(0);
22892 SDValue N1 = N->getOperand(1);
22893 EVT VT = N->getValueType(0);
22895 // Don't create instructions with illegal types after legalize types has run.
22896 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22897 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType()))
22900 // If we have legalized the vector types, look for blends of FADD and FSUB
22901 // nodes that we can fuse into an ADDSUB node.
22902 if (TLI.isTypeLegal(VT) && Subtarget->hasSSE3())
22903 if (SDValue AddSub = combineShuffleToAddSub(N, DAG))
22906 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode
22907 if (Subtarget->hasFp256() && VT.is256BitVector() &&
22908 N->getOpcode() == ISD::VECTOR_SHUFFLE)
22909 return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
22911 // During Type Legalization, when promoting illegal vector types,
22912 // the backend might introduce new shuffle dag nodes and bitcasts.
22914 // This code performs the following transformation:
22915 // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
22916 // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
22918 // We do this only if both the bitcast and the BINOP dag nodes have
22919 // one use. Also, perform this transformation only if the new binary
22920 // operation is legal. This is to avoid introducing dag nodes that
22921 // potentially need to be further expanded (or custom lowered) into a
22922 // less optimal sequence of dag nodes.
22923 if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
22924 N1.getOpcode() == ISD::UNDEF && N0.hasOneUse() &&
22925 N0.getOpcode() == ISD::BITCAST) {
22926 SDValue BC0 = N0.getOperand(0);
22927 EVT SVT = BC0.getValueType();
22928 unsigned Opcode = BC0.getOpcode();
22929 unsigned NumElts = VT.getVectorNumElements();
22931 if (BC0.hasOneUse() && SVT.isVector() &&
22932 SVT.getVectorNumElements() * 2 == NumElts &&
22933 TLI.isOperationLegal(Opcode, VT)) {
22934 bool CanFold = false;
22946 unsigned SVTNumElts = SVT.getVectorNumElements();
22947 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
22948 for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
22949 CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
22950 for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
22951 CanFold = SVOp->getMaskElt(i) < 0;
22954 SDValue BC00 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(0));
22955 SDValue BC01 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(1));
22956 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
22957 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, &SVOp->getMask()[0]);
22962 // Only handle 128 wide vector from here on.
22963 if (!VT.is128BitVector())
22966 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
22967 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
22968 // consecutive, non-overlapping, and in the right order.
22969 SmallVector<SDValue, 16> Elts;
22970 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
22971 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
22973 SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true);
22977 if (isTargetShuffle(N->getOpcode())) {
22979 PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget);
22980 if (Shuffle.getNode())
22983 // Try recursively combining arbitrary sequences of x86 shuffle
22984 // instructions into higher-order shuffles. We do this after combining
22985 // specific PSHUF instruction sequences into their minimal form so that we
22986 // can evaluate how many specialized shuffle instructions are involved in
22987 // a particular chain.
22988 SmallVector<int, 1> NonceMask; // Just a placeholder.
22989 NonceMask.push_back(0);
22990 if (combineX86ShufflesRecursively(SDValue(N, 0), SDValue(N, 0), NonceMask,
22991 /*Depth*/ 1, /*HasPSHUFB*/ false, DAG,
22993 return SDValue(); // This routine will use CombineTo to replace N.
22999 /// PerformTruncateCombine - Converts truncate operation to
23000 /// a sequence of vector shuffle operations.
23001 /// It is possible when we truncate 256-bit vector to 128-bit vector
23002 static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
23003 TargetLowering::DAGCombinerInfo &DCI,
23004 const X86Subtarget *Subtarget) {
23008 /// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target
23009 /// specific shuffle of a load can be folded into a single element load.
23010 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
23011 /// shuffles have been custom lowered so we need to handle those here.
23012 static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
23013 TargetLowering::DAGCombinerInfo &DCI) {
23014 if (DCI.isBeforeLegalizeOps())
23017 SDValue InVec = N->getOperand(0);
23018 SDValue EltNo = N->getOperand(1);
23020 if (!isa<ConstantSDNode>(EltNo))
23023 EVT OriginalVT = InVec.getValueType();
23025 if (InVec.getOpcode() == ISD::BITCAST) {
23026 // Don't duplicate a load with other uses.
23027 if (!InVec.hasOneUse())
23029 EVT BCVT = InVec.getOperand(0).getValueType();
23030 if (BCVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
23032 InVec = InVec.getOperand(0);
23035 EVT CurrentVT = InVec.getValueType();
23037 if (!isTargetShuffle(InVec.getOpcode()))
23040 // Don't duplicate a load with other uses.
23041 if (!InVec.hasOneUse())
23044 SmallVector<int, 16> ShuffleMask;
23046 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(),
23047 ShuffleMask, UnaryShuffle))
23050 // Select the input vector, guarding against out of range extract vector.
23051 unsigned NumElems = CurrentVT.getVectorNumElements();
23052 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
23053 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt];
23054 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0)
23055 : InVec.getOperand(1);
23057 // If inputs to shuffle are the same for both ops, then allow 2 uses
23058 unsigned AllowedUses = InVec.getNumOperands() > 1 &&
23059 InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
23061 if (LdNode.getOpcode() == ISD::BITCAST) {
23062 // Don't duplicate a load with other uses.
23063 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
23066 AllowedUses = 1; // only allow 1 load use if we have a bitcast
23067 LdNode = LdNode.getOperand(0);
23070 if (!ISD::isNormalLoad(LdNode.getNode()))
23073 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
23075 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
23078 EVT EltVT = N->getValueType(0);
23079 // If there's a bitcast before the shuffle, check if the load type and
23080 // alignment is valid.
23081 unsigned Align = LN0->getAlignment();
23082 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23083 unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
23084 EltVT.getTypeForEVT(*DAG.getContext()));
23086 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
23089 // All checks match so transform back to vector_shuffle so that DAG combiner
23090 // can finish the job
23093 // Create shuffle node taking into account the case that its a unary shuffle
23094 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT)
23095 : InVec.getOperand(1);
23096 Shuffle = DAG.getVectorShuffle(CurrentVT, dl,
23097 InVec.getOperand(0), Shuffle,
23099 Shuffle = DAG.getNode(ISD::BITCAST, dl, OriginalVT, Shuffle);
23100 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
23104 /// \brief Detect bitcasts between i32 to x86mmx low word. Since MMX types are
23105 /// special and don't usually play with other vector types, it's better to
23106 /// handle them early to be sure we emit efficient code by avoiding
23107 /// store-load conversions.
23108 static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG) {
23109 if (N->getValueType(0) != MVT::x86mmx ||
23110 N->getOperand(0)->getOpcode() != ISD::BUILD_VECTOR ||
23111 N->getOperand(0)->getValueType(0) != MVT::v2i32)
23114 SDValue V = N->getOperand(0);
23115 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V.getOperand(1));
23116 if (C && C->getZExtValue() == 0 && V.getOperand(0).getValueType() == MVT::i32)
23117 return DAG.getNode(X86ISD::MMX_MOVW2D, SDLoc(V.getOperand(0)),
23118 N->getValueType(0), V.getOperand(0));
23123 /// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
23124 /// generation and convert it from being a bunch of shuffles and extracts
23125 /// into a somewhat faster sequence. For i686, the best sequence is apparently
23126 /// storing the value and loading scalars back, while for x64 we should
23127 /// use 64-bit extracts and shifts.
23128 static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
23129 TargetLowering::DAGCombinerInfo &DCI) {
23130 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI);
23131 if (NewOp.getNode())
23134 SDValue InputVector = N->getOperand(0);
23136 // Detect mmx to i32 conversion through a v2i32 elt extract.
23137 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
23138 N->getValueType(0) == MVT::i32 &&
23139 InputVector.getValueType() == MVT::v2i32) {
23141 // The bitcast source is a direct mmx result.
23142 SDValue MMXSrc = InputVector.getNode()->getOperand(0);
23143 if (MMXSrc.getValueType() == MVT::x86mmx)
23144 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23145 N->getValueType(0),
23146 InputVector.getNode()->getOperand(0));
23148 // The mmx is indirect: (i64 extract_elt (v1i64 bitcast (x86mmx ...))).
23149 SDValue MMXSrcOp = MMXSrc.getOperand(0);
23150 if (MMXSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT && MMXSrc.hasOneUse() &&
23151 MMXSrc.getValueType() == MVT::i64 && MMXSrcOp.hasOneUse() &&
23152 MMXSrcOp.getOpcode() == ISD::BITCAST &&
23153 MMXSrcOp.getValueType() == MVT::v1i64 &&
23154 MMXSrcOp.getOperand(0).getValueType() == MVT::x86mmx)
23155 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23156 N->getValueType(0),
23157 MMXSrcOp.getOperand(0));
23160 // Only operate on vectors of 4 elements, where the alternative shuffling
23161 // gets to be more expensive.
23162 if (InputVector.getValueType() != MVT::v4i32)
23165 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
23166 // single use which is a sign-extend or zero-extend, and all elements are
23168 SmallVector<SDNode *, 4> Uses;
23169 unsigned ExtractedElements = 0;
23170 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
23171 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
23172 if (UI.getUse().getResNo() != InputVector.getResNo())
23175 SDNode *Extract = *UI;
23176 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
23179 if (Extract->getValueType(0) != MVT::i32)
23181 if (!Extract->hasOneUse())
23183 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
23184 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
23186 if (!isa<ConstantSDNode>(Extract->getOperand(1)))
23189 // Record which element was extracted.
23190 ExtractedElements |=
23191 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue();
23193 Uses.push_back(Extract);
23196 // If not all the elements were used, this may not be worthwhile.
23197 if (ExtractedElements != 15)
23200 // Ok, we've now decided to do the transformation.
23201 // If 64-bit shifts are legal, use the extract-shift sequence,
23202 // otherwise bounce the vector off the cache.
23203 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23205 SDLoc dl(InputVector);
23207 if (TLI.isOperationLegal(ISD::SRA, MVT::i64)) {
23208 SDValue Cst = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, InputVector);
23209 EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy();
23210 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23211 DAG.getConstant(0, VecIdxTy));
23212 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23213 DAG.getConstant(1, VecIdxTy));
23215 SDValue ShAmt = DAG.getConstant(32,
23216 DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64));
23217 Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
23218 Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23219 DAG.getNode(ISD::SRA, dl, MVT::i64, BottomHalf, ShAmt));
23220 Vals[2] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, TopHalf);
23221 Vals[3] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23222 DAG.getNode(ISD::SRA, dl, MVT::i64, TopHalf, ShAmt));
23224 // Store the value to a temporary stack slot.
23225 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
23226 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
23227 MachinePointerInfo(), false, false, 0);
23229 EVT ElementType = InputVector.getValueType().getVectorElementType();
23230 unsigned EltSize = ElementType.getSizeInBits() / 8;
23232 // Replace each use (extract) with a load of the appropriate element.
23233 for (unsigned i = 0; i < 4; ++i) {
23234 uint64_t Offset = EltSize * i;
23235 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
23237 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
23238 StackPtr, OffsetVal);
23240 // Load the scalar.
23241 Vals[i] = DAG.getLoad(ElementType, dl, Ch,
23242 ScalarAddr, MachinePointerInfo(),
23243 false, false, false, 0);
23248 // Replace the extracts
23249 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
23250 UE = Uses.end(); UI != UE; ++UI) {
23251 SDNode *Extract = *UI;
23253 SDValue Idx = Extract->getOperand(1);
23254 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
23255 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), Vals[IdxVal]);
23258 // The replacement was made in place; don't return anything.
23262 /// \brief Matches a VSELECT onto min/max or return 0 if the node doesn't match.
23263 static std::pair<unsigned, bool>
23264 matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, SDValue RHS,
23265 SelectionDAG &DAG, const X86Subtarget *Subtarget) {
23266 if (!VT.isVector())
23267 return std::make_pair(0, false);
23269 bool NeedSplit = false;
23270 switch (VT.getSimpleVT().SimpleTy) {
23271 default: return std::make_pair(0, false);
23274 if (!Subtarget->hasVLX())
23275 return std::make_pair(0, false);
23279 if (!Subtarget->hasBWI())
23280 return std::make_pair(0, false);
23284 if (!Subtarget->hasAVX512())
23285 return std::make_pair(0, false);
23290 if (!Subtarget->hasAVX2())
23292 if (!Subtarget->hasAVX())
23293 return std::make_pair(0, false);
23298 if (!Subtarget->hasSSE2())
23299 return std::make_pair(0, false);
23302 // SSE2 has only a small subset of the operations.
23303 bool hasUnsigned = Subtarget->hasSSE41() ||
23304 (Subtarget->hasSSE2() && VT == MVT::v16i8);
23305 bool hasSigned = Subtarget->hasSSE41() ||
23306 (Subtarget->hasSSE2() && VT == MVT::v8i16);
23308 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23311 // Check for x CC y ? x : y.
23312 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23313 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23318 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23321 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23324 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23327 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23329 // Check for x CC y ? y : x -- a min/max with reversed arms.
23330 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23331 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23336 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23339 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23342 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23345 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23349 return std::make_pair(Opc, NeedSplit);
23353 transformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
23354 const X86Subtarget *Subtarget) {
23356 SDValue Cond = N->getOperand(0);
23357 SDValue LHS = N->getOperand(1);
23358 SDValue RHS = N->getOperand(2);
23360 if (Cond.getOpcode() == ISD::SIGN_EXTEND) {
23361 SDValue CondSrc = Cond->getOperand(0);
23362 if (CondSrc->getOpcode() == ISD::SIGN_EXTEND_INREG)
23363 Cond = CondSrc->getOperand(0);
23366 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
23369 // A vselect where all conditions and data are constants can be optimized into
23370 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
23371 if (ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
23372 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
23375 unsigned MaskValue = 0;
23376 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
23379 MVT VT = N->getSimpleValueType(0);
23380 unsigned NumElems = VT.getVectorNumElements();
23381 SmallVector<int, 8> ShuffleMask(NumElems, -1);
23382 for (unsigned i = 0; i < NumElems; ++i) {
23383 // Be sure we emit undef where we can.
23384 if (Cond.getOperand(i)->getOpcode() == ISD::UNDEF)
23385 ShuffleMask[i] = -1;
23387 ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1);
23390 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23391 if (!TLI.isShuffleMaskLegal(ShuffleMask, VT))
23393 return DAG.getVectorShuffle(VT, dl, LHS, RHS, &ShuffleMask[0]);
23396 /// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
23398 static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
23399 TargetLowering::DAGCombinerInfo &DCI,
23400 const X86Subtarget *Subtarget) {
23402 SDValue Cond = N->getOperand(0);
23403 // Get the LHS/RHS of the select.
23404 SDValue LHS = N->getOperand(1);
23405 SDValue RHS = N->getOperand(2);
23406 EVT VT = LHS.getValueType();
23407 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23409 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
23410 // instructions match the semantics of the common C idiom x<y?x:y but not
23411 // x<=y?x:y, because of how they handle negative zero (which can be
23412 // ignored in unsafe-math mode).
23413 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
23414 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
23415 VT != MVT::f80 && (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
23416 (Subtarget->hasSSE2() ||
23417 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
23418 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23420 unsigned Opcode = 0;
23421 // Check for x CC y ? x : y.
23422 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23423 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23427 // Converting this to a min would handle NaNs incorrectly, and swapping
23428 // the operands would cause it to handle comparisons between positive
23429 // and negative zero incorrectly.
23430 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23431 if (!DAG.getTarget().Options.UnsafeFPMath &&
23432 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23434 std::swap(LHS, RHS);
23436 Opcode = X86ISD::FMIN;
23439 // Converting this to a min would handle comparisons between positive
23440 // and negative zero incorrectly.
23441 if (!DAG.getTarget().Options.UnsafeFPMath &&
23442 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23444 Opcode = X86ISD::FMIN;
23447 // Converting this to a min would handle both negative zeros and NaNs
23448 // incorrectly, but we can swap the operands to fix both.
23449 std::swap(LHS, RHS);
23453 Opcode = X86ISD::FMIN;
23457 // Converting this to a max would handle comparisons between positive
23458 // and negative zero incorrectly.
23459 if (!DAG.getTarget().Options.UnsafeFPMath &&
23460 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23462 Opcode = X86ISD::FMAX;
23465 // Converting this to a max would handle NaNs incorrectly, and swapping
23466 // the operands would cause it to handle comparisons between positive
23467 // and negative zero incorrectly.
23468 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23469 if (!DAG.getTarget().Options.UnsafeFPMath &&
23470 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23472 std::swap(LHS, RHS);
23474 Opcode = X86ISD::FMAX;
23477 // Converting this to a max would handle both negative zeros and NaNs
23478 // incorrectly, but we can swap the operands to fix both.
23479 std::swap(LHS, RHS);
23483 Opcode = X86ISD::FMAX;
23486 // Check for x CC y ? y : x -- a min/max with reversed arms.
23487 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23488 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23492 // Converting this to a min would handle comparisons between positive
23493 // and negative zero incorrectly, and swapping the operands would
23494 // cause it to handle NaNs incorrectly.
23495 if (!DAG.getTarget().Options.UnsafeFPMath &&
23496 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
23497 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23499 std::swap(LHS, RHS);
23501 Opcode = X86ISD::FMIN;
23504 // Converting this to a min would handle NaNs incorrectly.
23505 if (!DAG.getTarget().Options.UnsafeFPMath &&
23506 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
23508 Opcode = X86ISD::FMIN;
23511 // Converting this to a min would handle both negative zeros and NaNs
23512 // incorrectly, but we can swap the operands to fix both.
23513 std::swap(LHS, RHS);
23517 Opcode = X86ISD::FMIN;
23521 // Converting this to a max would handle NaNs incorrectly.
23522 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23524 Opcode = X86ISD::FMAX;
23527 // Converting this to a max would handle comparisons between positive
23528 // and negative zero incorrectly, and swapping the operands would
23529 // cause it to handle NaNs incorrectly.
23530 if (!DAG.getTarget().Options.UnsafeFPMath &&
23531 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
23532 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23534 std::swap(LHS, RHS);
23536 Opcode = X86ISD::FMAX;
23539 // Converting this to a max would handle both negative zeros and NaNs
23540 // incorrectly, but we can swap the operands to fix both.
23541 std::swap(LHS, RHS);
23545 Opcode = X86ISD::FMAX;
23551 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
23554 EVT CondVT = Cond.getValueType();
23555 if (Subtarget->hasAVX512() && VT.isVector() && CondVT.isVector() &&
23556 CondVT.getVectorElementType() == MVT::i1) {
23557 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
23558 // lowering on KNL. In this case we convert it to
23559 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
23560 // The same situation for all 128 and 256-bit vectors of i8 and i16.
23561 // Since SKX these selects have a proper lowering.
23562 EVT OpVT = LHS.getValueType();
23563 if ((OpVT.is128BitVector() || OpVT.is256BitVector()) &&
23564 (OpVT.getVectorElementType() == MVT::i8 ||
23565 OpVT.getVectorElementType() == MVT::i16) &&
23566 !(Subtarget->hasBWI() && Subtarget->hasVLX())) {
23567 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond);
23568 DCI.AddToWorklist(Cond.getNode());
23569 return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS);
23572 // If this is a select between two integer constants, try to do some
23574 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
23575 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS))
23576 // Don't do this for crazy integer types.
23577 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) {
23578 // If this is efficiently invertible, canonicalize the LHSC/RHSC values
23579 // so that TrueC (the true value) is larger than FalseC.
23580 bool NeedsCondInvert = false;
23582 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
23583 // Efficiently invertible.
23584 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
23585 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
23586 isa<ConstantSDNode>(Cond.getOperand(1))))) {
23587 NeedsCondInvert = true;
23588 std::swap(TrueC, FalseC);
23591 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
23592 if (FalseC->getAPIntValue() == 0 &&
23593 TrueC->getAPIntValue().isPowerOf2()) {
23594 if (NeedsCondInvert) // Invert the condition if needed.
23595 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23596 DAG.getConstant(1, Cond.getValueType()));
23598 // Zero extend the condition if needed.
23599 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
23601 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
23602 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
23603 DAG.getConstant(ShAmt, MVT::i8));
23606 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
23607 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
23608 if (NeedsCondInvert) // Invert the condition if needed.
23609 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23610 DAG.getConstant(1, Cond.getValueType()));
23612 // Zero extend the condition if needed.
23613 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
23614 FalseC->getValueType(0), Cond);
23615 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23616 SDValue(FalseC, 0));
23619 // Optimize cases that will turn into an LEA instruction. This requires
23620 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
23621 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
23622 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
23623 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
23625 bool isFastMultiplier = false;
23627 switch ((unsigned char)Diff) {
23629 case 1: // result = add base, cond
23630 case 2: // result = lea base( , cond*2)
23631 case 3: // result = lea base(cond, cond*2)
23632 case 4: // result = lea base( , cond*4)
23633 case 5: // result = lea base(cond, cond*4)
23634 case 8: // result = lea base( , cond*8)
23635 case 9: // result = lea base(cond, cond*8)
23636 isFastMultiplier = true;
23641 if (isFastMultiplier) {
23642 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
23643 if (NeedsCondInvert) // Invert the condition if needed.
23644 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23645 DAG.getConstant(1, Cond.getValueType()));
23647 // Zero extend the condition if needed.
23648 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
23650 // Scale the condition by the difference.
23652 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
23653 DAG.getConstant(Diff, Cond.getValueType()));
23655 // Add the base if non-zero.
23656 if (FalseC->getAPIntValue() != 0)
23657 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23658 SDValue(FalseC, 0));
23665 // Canonicalize max and min:
23666 // (x > y) ? x : y -> (x >= y) ? x : y
23667 // (x < y) ? x : y -> (x <= y) ? x : y
23668 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
23669 // the need for an extra compare
23670 // against zero. e.g.
23671 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
23673 // testl %edi, %edi
23675 // cmovgl %edi, %eax
23679 // cmovsl %eax, %edi
23680 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
23681 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23682 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23683 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23688 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
23689 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
23690 Cond.getOperand(0), Cond.getOperand(1), NewCC);
23691 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS);
23696 // Early exit check
23697 if (!TLI.isTypeLegal(VT))
23700 // Match VSELECTs into subs with unsigned saturation.
23701 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
23702 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
23703 ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
23704 (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
23705 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23707 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
23708 // left side invert the predicate to simplify logic below.
23710 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
23712 CC = ISD::getSetCCInverse(CC, true);
23713 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
23717 if (Other.getNode() && Other->getNumOperands() == 2 &&
23718 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) {
23719 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
23720 SDValue CondRHS = Cond->getOperand(1);
23722 // Look for a general sub with unsigned saturation first.
23723 // x >= y ? x-y : 0 --> subus x, y
23724 // x > y ? x-y : 0 --> subus x, y
23725 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
23726 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS))
23727 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS);
23729 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS))
23730 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
23731 if (auto *CondRHSBV = dyn_cast<BuildVectorSDNode>(CondRHS))
23732 if (auto *CondRHSConst = CondRHSBV->getConstantSplatNode())
23733 // If the RHS is a constant we have to reverse the const
23734 // canonicalization.
23735 // x > C-1 ? x+-C : 0 --> subus x, C
23736 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
23737 CondRHSConst->getAPIntValue() ==
23738 (-OpRHSConst->getAPIntValue() - 1))
23739 return DAG.getNode(
23740 X86ISD::SUBUS, DL, VT, OpLHS,
23741 DAG.getConstant(-OpRHSConst->getAPIntValue(), VT));
23743 // Another special case: If C was a sign bit, the sub has been
23744 // canonicalized into a xor.
23745 // FIXME: Would it be better to use computeKnownBits to determine
23746 // whether it's safe to decanonicalize the xor?
23747 // x s< 0 ? x^C : 0 --> subus x, C
23748 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
23749 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
23750 OpRHSConst->getAPIntValue().isSignBit())
23751 // Note that we have to rebuild the RHS constant here to ensure we
23752 // don't rely on particular values of undef lanes.
23753 return DAG.getNode(
23754 X86ISD::SUBUS, DL, VT, OpLHS,
23755 DAG.getConstant(OpRHSConst->getAPIntValue(), VT));
23760 // Try to match a min/max vector operation.
23761 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC) {
23762 std::pair<unsigned, bool> ret = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget);
23763 unsigned Opc = ret.first;
23764 bool NeedSplit = ret.second;
23766 if (Opc && NeedSplit) {
23767 unsigned NumElems = VT.getVectorNumElements();
23768 // Extract the LHS vectors
23769 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, DL);
23770 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, DL);
23772 // Extract the RHS vectors
23773 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, DL);
23774 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, DL);
23776 // Create min/max for each subvector
23777 LHS = DAG.getNode(Opc, DL, LHS1.getValueType(), LHS1, RHS1);
23778 RHS = DAG.getNode(Opc, DL, LHS2.getValueType(), LHS2, RHS2);
23780 // Merge the result
23781 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS, RHS);
23783 return DAG.getNode(Opc, DL, VT, LHS, RHS);
23786 // Simplify vector selection if condition value type matches vselect
23788 if (N->getOpcode() == ISD::VSELECT && CondVT == VT) {
23789 assert(Cond.getValueType().isVector() &&
23790 "vector select expects a vector selector!");
23792 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
23793 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
23795 // Try invert the condition if true value is not all 1s and false value
23797 if (!TValIsAllOnes && !FValIsAllZeros &&
23798 // Check if the selector will be produced by CMPP*/PCMP*
23799 Cond.getOpcode() == ISD::SETCC &&
23800 // Check if SETCC has already been promoted
23801 TLI.getSetCCResultType(*DAG.getContext(), VT) == CondVT) {
23802 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
23803 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
23805 if (TValIsAllZeros || FValIsAllOnes) {
23806 SDValue CC = Cond.getOperand(2);
23807 ISD::CondCode NewCC =
23808 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
23809 Cond.getOperand(0).getValueType().isInteger());
23810 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), NewCC);
23811 std::swap(LHS, RHS);
23812 TValIsAllOnes = FValIsAllOnes;
23813 FValIsAllZeros = TValIsAllZeros;
23817 if (TValIsAllOnes || FValIsAllZeros) {
23820 if (TValIsAllOnes && FValIsAllZeros)
23822 else if (TValIsAllOnes)
23823 Ret = DAG.getNode(ISD::OR, DL, CondVT, Cond,
23824 DAG.getNode(ISD::BITCAST, DL, CondVT, RHS));
23825 else if (FValIsAllZeros)
23826 Ret = DAG.getNode(ISD::AND, DL, CondVT, Cond,
23827 DAG.getNode(ISD::BITCAST, DL, CondVT, LHS));
23829 return DAG.getNode(ISD::BITCAST, DL, VT, Ret);
23833 // If we know that this node is legal then we know that it is going to be
23834 // matched by one of the SSE/AVX BLEND instructions. These instructions only
23835 // depend on the highest bit in each word. Try to use SimplifyDemandedBits
23836 // to simplify previous instructions.
23837 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
23838 !DCI.isBeforeLegalize() &&
23839 // We explicitly check against v8i16 and v16i16 because, although
23840 // they're marked as Custom, they might only be legal when Cond is a
23841 // build_vector of constants. This will be taken care in a later
23843 (TLI.isOperationLegalOrCustom(ISD::VSELECT, VT) && VT != MVT::v16i16 &&
23844 VT != MVT::v8i16) &&
23845 // Don't optimize vector of constants. Those are handled by
23846 // the generic code and all the bits must be properly set for
23847 // the generic optimizer.
23848 !ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
23849 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits();
23851 // Don't optimize vector selects that map to mask-registers.
23855 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
23856 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
23858 APInt KnownZero, KnownOne;
23859 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
23860 DCI.isBeforeLegalizeOps());
23861 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) ||
23862 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne,
23864 // If we changed the computation somewhere in the DAG, this change
23865 // will affect all users of Cond.
23866 // Make sure it is fine and update all the nodes so that we do not
23867 // use the generic VSELECT anymore. Otherwise, we may perform
23868 // wrong optimizations as we messed up with the actual expectation
23869 // for the vector boolean values.
23870 if (Cond != TLO.Old) {
23871 // Check all uses of that condition operand to check whether it will be
23872 // consumed by non-BLEND instructions, which may depend on all bits are
23874 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23876 if (I->getOpcode() != ISD::VSELECT)
23877 // TODO: Add other opcodes eventually lowered into BLEND.
23880 // Update all the users of the condition, before committing the change,
23881 // so that the VSELECT optimizations that expect the correct vector
23882 // boolean value will not be triggered.
23883 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23885 DAG.ReplaceAllUsesOfValueWith(
23887 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(*I), I->getValueType(0),
23888 Cond, I->getOperand(1), I->getOperand(2)));
23889 DCI.CommitTargetLoweringOpt(TLO);
23892 // At this point, only Cond is changed. Change the condition
23893 // just for N to keep the opportunity to optimize all other
23894 // users their own way.
23895 DAG.ReplaceAllUsesOfValueWith(
23897 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(N), N->getValueType(0),
23898 TLO.New, N->getOperand(1), N->getOperand(2)));
23903 // We should generate an X86ISD::BLENDI from a vselect if its argument
23904 // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
23905 // constants. This specific pattern gets generated when we split a
23906 // selector for a 512 bit vector in a machine without AVX512 (but with
23907 // 256-bit vectors), during legalization:
23909 // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
23911 // Iff we find this pattern and the build_vectors are built from
23912 // constants, we translate the vselect into a shuffle_vector that we
23913 // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
23914 if ((N->getOpcode() == ISD::VSELECT ||
23915 N->getOpcode() == X86ISD::SHRUNKBLEND) &&
23916 !DCI.isBeforeLegalize()) {
23917 SDValue Shuffle = transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
23918 if (Shuffle.getNode())
23925 // Check whether a boolean test is testing a boolean value generated by
23926 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
23929 // Simplify the following patterns:
23930 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
23931 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
23932 // to (Op EFLAGS Cond)
23934 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
23935 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
23936 // to (Op EFLAGS !Cond)
23938 // where Op could be BRCOND or CMOV.
23940 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
23941 // Quit if not CMP and SUB with its value result used.
23942 if (Cmp.getOpcode() != X86ISD::CMP &&
23943 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0)))
23946 // Quit if not used as a boolean value.
23947 if (CC != X86::COND_E && CC != X86::COND_NE)
23950 // Check CMP operands. One of them should be 0 or 1 and the other should be
23951 // an SetCC or extended from it.
23952 SDValue Op1 = Cmp.getOperand(0);
23953 SDValue Op2 = Cmp.getOperand(1);
23956 const ConstantSDNode* C = nullptr;
23957 bool needOppositeCond = (CC == X86::COND_E);
23958 bool checkAgainstTrue = false; // Is it a comparison against 1?
23960 if ((C = dyn_cast<ConstantSDNode>(Op1)))
23962 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
23964 else // Quit if all operands are not constants.
23967 if (C->getZExtValue() == 1) {
23968 needOppositeCond = !needOppositeCond;
23969 checkAgainstTrue = true;
23970 } else if (C->getZExtValue() != 0)
23971 // Quit if the constant is neither 0 or 1.
23974 bool truncatedToBoolWithAnd = false;
23975 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
23976 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
23977 SetCC.getOpcode() == ISD::TRUNCATE ||
23978 SetCC.getOpcode() == ISD::AND) {
23979 if (SetCC.getOpcode() == ISD::AND) {
23981 ConstantSDNode *CS;
23982 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(0))) &&
23983 CS->getZExtValue() == 1)
23985 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(1))) &&
23986 CS->getZExtValue() == 1)
23990 SetCC = SetCC.getOperand(OpIdx);
23991 truncatedToBoolWithAnd = true;
23993 SetCC = SetCC.getOperand(0);
23996 switch (SetCC.getOpcode()) {
23997 case X86ISD::SETCC_CARRY:
23998 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
23999 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
24000 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
24001 // truncated to i1 using 'and'.
24002 if (checkAgainstTrue && !truncatedToBoolWithAnd)
24004 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
24005 "Invalid use of SETCC_CARRY!");
24007 case X86ISD::SETCC:
24008 // Set the condition code or opposite one if necessary.
24009 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
24010 if (needOppositeCond)
24011 CC = X86::GetOppositeBranchCondition(CC);
24012 return SetCC.getOperand(1);
24013 case X86ISD::CMOV: {
24014 // Check whether false/true value has canonical one, i.e. 0 or 1.
24015 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
24016 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
24017 // Quit if true value is not a constant.
24020 // Quit if false value is not a constant.
24022 SDValue Op = SetCC.getOperand(0);
24023 // Skip 'zext' or 'trunc' node.
24024 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
24025 Op.getOpcode() == ISD::TRUNCATE)
24026 Op = Op.getOperand(0);
24027 // A special case for rdrand/rdseed, where 0 is set if false cond is
24029 if ((Op.getOpcode() != X86ISD::RDRAND &&
24030 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
24033 // Quit if false value is not the constant 0 or 1.
24034 bool FValIsFalse = true;
24035 if (FVal && FVal->getZExtValue() != 0) {
24036 if (FVal->getZExtValue() != 1)
24038 // If FVal is 1, opposite cond is needed.
24039 needOppositeCond = !needOppositeCond;
24040 FValIsFalse = false;
24042 // Quit if TVal is not the constant opposite of FVal.
24043 if (FValIsFalse && TVal->getZExtValue() != 1)
24045 if (!FValIsFalse && TVal->getZExtValue() != 0)
24047 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
24048 if (needOppositeCond)
24049 CC = X86::GetOppositeBranchCondition(CC);
24050 return SetCC.getOperand(3);
24057 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
24058 static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
24059 TargetLowering::DAGCombinerInfo &DCI,
24060 const X86Subtarget *Subtarget) {
24063 // If the flag operand isn't dead, don't touch this CMOV.
24064 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
24067 SDValue FalseOp = N->getOperand(0);
24068 SDValue TrueOp = N->getOperand(1);
24069 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
24070 SDValue Cond = N->getOperand(3);
24072 if (CC == X86::COND_E || CC == X86::COND_NE) {
24073 switch (Cond.getOpcode()) {
24077 // If operand of BSR / BSF are proven never zero, then ZF cannot be set.
24078 if (DAG.isKnownNeverZero(Cond.getOperand(0)))
24079 return (CC == X86::COND_E) ? FalseOp : TrueOp;
24085 Flags = checkBoolTestSetCCCombine(Cond, CC);
24086 if (Flags.getNode() &&
24087 // Extra check as FCMOV only supports a subset of X86 cond.
24088 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
24089 SDValue Ops[] = { FalseOp, TrueOp,
24090 DAG.getConstant(CC, MVT::i8), Flags };
24091 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
24094 // If this is a select between two integer constants, try to do some
24095 // optimizations. Note that the operands are ordered the opposite of SELECT
24097 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
24098 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
24099 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
24100 // larger than FalseC (the false value).
24101 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
24102 CC = X86::GetOppositeBranchCondition(CC);
24103 std::swap(TrueC, FalseC);
24104 std::swap(TrueOp, FalseOp);
24107 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
24108 // This is efficient for any integer data type (including i8/i16) and
24110 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
24111 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24112 DAG.getConstant(CC, MVT::i8), Cond);
24114 // Zero extend the condition if needed.
24115 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
24117 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
24118 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
24119 DAG.getConstant(ShAmt, MVT::i8));
24120 if (N->getNumValues() == 2) // Dead flag value?
24121 return DCI.CombineTo(N, Cond, SDValue());
24125 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
24126 // for any integer data type, including i8/i16.
24127 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
24128 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24129 DAG.getConstant(CC, MVT::i8), Cond);
24131 // Zero extend the condition if needed.
24132 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
24133 FalseC->getValueType(0), Cond);
24134 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24135 SDValue(FalseC, 0));
24137 if (N->getNumValues() == 2) // Dead flag value?
24138 return DCI.CombineTo(N, Cond, SDValue());
24142 // Optimize cases that will turn into an LEA instruction. This requires
24143 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
24144 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
24145 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
24146 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
24148 bool isFastMultiplier = false;
24150 switch ((unsigned char)Diff) {
24152 case 1: // result = add base, cond
24153 case 2: // result = lea base( , cond*2)
24154 case 3: // result = lea base(cond, cond*2)
24155 case 4: // result = lea base( , cond*4)
24156 case 5: // result = lea base(cond, cond*4)
24157 case 8: // result = lea base( , cond*8)
24158 case 9: // result = lea base(cond, cond*8)
24159 isFastMultiplier = true;
24164 if (isFastMultiplier) {
24165 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
24166 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24167 DAG.getConstant(CC, MVT::i8), Cond);
24168 // Zero extend the condition if needed.
24169 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
24171 // Scale the condition by the difference.
24173 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
24174 DAG.getConstant(Diff, Cond.getValueType()));
24176 // Add the base if non-zero.
24177 if (FalseC->getAPIntValue() != 0)
24178 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24179 SDValue(FalseC, 0));
24180 if (N->getNumValues() == 2) // Dead flag value?
24181 return DCI.CombineTo(N, Cond, SDValue());
24188 // Handle these cases:
24189 // (select (x != c), e, c) -> select (x != c), e, x),
24190 // (select (x == c), c, e) -> select (x == c), x, e)
24191 // where the c is an integer constant, and the "select" is the combination
24192 // of CMOV and CMP.
24194 // The rationale for this change is that the conditional-move from a constant
24195 // needs two instructions, however, conditional-move from a register needs
24196 // only one instruction.
24198 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
24199 // some instruction-combining opportunities. This opt needs to be
24200 // postponed as late as possible.
24202 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
24203 // the DCI.xxxx conditions are provided to postpone the optimization as
24204 // late as possible.
24206 ConstantSDNode *CmpAgainst = nullptr;
24207 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
24208 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
24209 !isa<ConstantSDNode>(Cond.getOperand(0))) {
24211 if (CC == X86::COND_NE &&
24212 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
24213 CC = X86::GetOppositeBranchCondition(CC);
24214 std::swap(TrueOp, FalseOp);
24217 if (CC == X86::COND_E &&
24218 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
24219 SDValue Ops[] = { FalseOp, Cond.getOperand(0),
24220 DAG.getConstant(CC, MVT::i8), Cond };
24221 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops);
24229 static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
24230 const X86Subtarget *Subtarget) {
24231 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
24233 default: return SDValue();
24234 // SSE/AVX/AVX2 blend intrinsics.
24235 case Intrinsic::x86_avx2_pblendvb:
24236 case Intrinsic::x86_avx2_pblendw:
24237 case Intrinsic::x86_avx2_pblendd_128:
24238 case Intrinsic::x86_avx2_pblendd_256:
24239 // Don't try to simplify this intrinsic if we don't have AVX2.
24240 if (!Subtarget->hasAVX2())
24243 case Intrinsic::x86_avx_blend_pd_256:
24244 case Intrinsic::x86_avx_blend_ps_256:
24245 case Intrinsic::x86_avx_blendv_pd_256:
24246 case Intrinsic::x86_avx_blendv_ps_256:
24247 // Don't try to simplify this intrinsic if we don't have AVX.
24248 if (!Subtarget->hasAVX())
24251 case Intrinsic::x86_sse41_pblendw:
24252 case Intrinsic::x86_sse41_blendpd:
24253 case Intrinsic::x86_sse41_blendps:
24254 case Intrinsic::x86_sse41_blendvps:
24255 case Intrinsic::x86_sse41_blendvpd:
24256 case Intrinsic::x86_sse41_pblendvb: {
24257 SDValue Op0 = N->getOperand(1);
24258 SDValue Op1 = N->getOperand(2);
24259 SDValue Mask = N->getOperand(3);
24261 // Don't try to simplify this intrinsic if we don't have SSE4.1.
24262 if (!Subtarget->hasSSE41())
24265 // fold (blend A, A, Mask) -> A
24268 // fold (blend A, B, allZeros) -> A
24269 if (ISD::isBuildVectorAllZeros(Mask.getNode()))
24271 // fold (blend A, B, allOnes) -> B
24272 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
24275 // Simplify the case where the mask is a constant i32 value.
24276 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Mask)) {
24277 if (C->isNullValue())
24279 if (C->isAllOnesValue())
24286 // Packed SSE2/AVX2 arithmetic shift immediate intrinsics.
24287 case Intrinsic::x86_sse2_psrai_w:
24288 case Intrinsic::x86_sse2_psrai_d:
24289 case Intrinsic::x86_avx2_psrai_w:
24290 case Intrinsic::x86_avx2_psrai_d:
24291 case Intrinsic::x86_sse2_psra_w:
24292 case Intrinsic::x86_sse2_psra_d:
24293 case Intrinsic::x86_avx2_psra_w:
24294 case Intrinsic::x86_avx2_psra_d: {
24295 SDValue Op0 = N->getOperand(1);
24296 SDValue Op1 = N->getOperand(2);
24297 EVT VT = Op0.getValueType();
24298 assert(VT.isVector() && "Expected a vector type!");
24300 if (isa<BuildVectorSDNode>(Op1))
24301 Op1 = Op1.getOperand(0);
24303 if (!isa<ConstantSDNode>(Op1))
24306 EVT SVT = VT.getVectorElementType();
24307 unsigned SVTBits = SVT.getSizeInBits();
24309 ConstantSDNode *CND = cast<ConstantSDNode>(Op1);
24310 const APInt &C = APInt(SVTBits, CND->getAPIntValue().getZExtValue());
24311 uint64_t ShAmt = C.getZExtValue();
24313 // Don't try to convert this shift into a ISD::SRA if the shift
24314 // count is bigger than or equal to the element size.
24315 if (ShAmt >= SVTBits)
24318 // Trivial case: if the shift count is zero, then fold this
24319 // into the first operand.
24323 // Replace this packed shift intrinsic with a target independent
24325 SDValue Splat = DAG.getConstant(C, VT);
24326 return DAG.getNode(ISD::SRA, SDLoc(N), VT, Op0, Splat);
24331 /// PerformMulCombine - Optimize a single multiply with constant into two
24332 /// in order to implement it with two cheaper instructions, e.g.
24333 /// LEA + SHL, LEA + LEA.
24334 static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
24335 TargetLowering::DAGCombinerInfo &DCI) {
24336 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
24339 EVT VT = N->getValueType(0);
24340 if (VT != MVT::i64 && VT != MVT::i32)
24343 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
24346 uint64_t MulAmt = C->getZExtValue();
24347 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
24350 uint64_t MulAmt1 = 0;
24351 uint64_t MulAmt2 = 0;
24352 if ((MulAmt % 9) == 0) {
24354 MulAmt2 = MulAmt / 9;
24355 } else if ((MulAmt % 5) == 0) {
24357 MulAmt2 = MulAmt / 5;
24358 } else if ((MulAmt % 3) == 0) {
24360 MulAmt2 = MulAmt / 3;
24363 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
24366 if (isPowerOf2_64(MulAmt2) &&
24367 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
24368 // If second multiplifer is pow2, issue it first. We want the multiply by
24369 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
24371 std::swap(MulAmt1, MulAmt2);
24374 if (isPowerOf2_64(MulAmt1))
24375 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
24376 DAG.getConstant(Log2_64(MulAmt1), MVT::i8));
24378 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
24379 DAG.getConstant(MulAmt1, VT));
24381 if (isPowerOf2_64(MulAmt2))
24382 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
24383 DAG.getConstant(Log2_64(MulAmt2), MVT::i8));
24385 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
24386 DAG.getConstant(MulAmt2, VT));
24388 // Do not add new nodes to DAG combiner worklist.
24389 DCI.CombineTo(N, NewMul, false);
24394 static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
24395 SDValue N0 = N->getOperand(0);
24396 SDValue N1 = N->getOperand(1);
24397 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
24398 EVT VT = N0.getValueType();
24400 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
24401 // since the result of setcc_c is all zero's or all ones.
24402 if (VT.isInteger() && !VT.isVector() &&
24403 N1C && N0.getOpcode() == ISD::AND &&
24404 N0.getOperand(1).getOpcode() == ISD::Constant) {
24405 SDValue N00 = N0.getOperand(0);
24406 if (N00.getOpcode() == X86ISD::SETCC_CARRY ||
24407 ((N00.getOpcode() == ISD::ANY_EXTEND ||
24408 N00.getOpcode() == ISD::ZERO_EXTEND) &&
24409 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) {
24410 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
24411 APInt ShAmt = N1C->getAPIntValue();
24412 Mask = Mask.shl(ShAmt);
24414 return DAG.getNode(ISD::AND, SDLoc(N), VT,
24415 N00, DAG.getConstant(Mask, VT));
24419 // Hardware support for vector shifts is sparse which makes us scalarize the
24420 // vector operations in many cases. Also, on sandybridge ADD is faster than
24422 // (shl V, 1) -> add V,V
24423 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
24424 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
24425 assert(N0.getValueType().isVector() && "Invalid vector shift type");
24426 // We shift all of the values by one. In many cases we do not have
24427 // hardware support for this operation. This is better expressed as an ADD
24429 if (N1SplatC->getZExtValue() == 1)
24430 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
24436 /// \brief Returns a vector of 0s if the node in input is a vector logical
24437 /// shift by a constant amount which is known to be bigger than or equal
24438 /// to the vector element size in bits.
24439 static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
24440 const X86Subtarget *Subtarget) {
24441 EVT VT = N->getValueType(0);
24443 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
24444 (!Subtarget->hasInt256() ||
24445 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
24448 SDValue Amt = N->getOperand(1);
24450 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
24451 if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
24452 APInt ShiftAmt = AmtSplat->getAPIntValue();
24453 unsigned MaxAmount = VT.getVectorElementType().getSizeInBits();
24455 // SSE2/AVX2 logical shifts always return a vector of 0s
24456 // if the shift amount is bigger than or equal to
24457 // the element size. The constant shift amount will be
24458 // encoded as a 8-bit immediate.
24459 if (ShiftAmt.trunc(8).uge(MaxAmount))
24460 return getZeroVector(VT, Subtarget, DAG, DL);
24466 /// PerformShiftCombine - Combine shifts.
24467 static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
24468 TargetLowering::DAGCombinerInfo &DCI,
24469 const X86Subtarget *Subtarget) {
24470 if (N->getOpcode() == ISD::SHL) {
24471 SDValue V = PerformSHLCombine(N, DAG);
24472 if (V.getNode()) return V;
24475 if (N->getOpcode() != ISD::SRA) {
24476 // Try to fold this logical shift into a zero vector.
24477 SDValue V = performShiftToAllZeros(N, DAG, Subtarget);
24478 if (V.getNode()) return V;
24484 // CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..))
24485 // where both setccs reference the same FP CMP, and rewrite for CMPEQSS
24486 // and friends. Likewise for OR -> CMPNEQSS.
24487 static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
24488 TargetLowering::DAGCombinerInfo &DCI,
24489 const X86Subtarget *Subtarget) {
24492 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
24493 // we're requiring SSE2 for both.
24494 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
24495 SDValue N0 = N->getOperand(0);
24496 SDValue N1 = N->getOperand(1);
24497 SDValue CMP0 = N0->getOperand(1);
24498 SDValue CMP1 = N1->getOperand(1);
24501 // The SETCCs should both refer to the same CMP.
24502 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
24505 SDValue CMP00 = CMP0->getOperand(0);
24506 SDValue CMP01 = CMP0->getOperand(1);
24507 EVT VT = CMP00.getValueType();
24509 if (VT == MVT::f32 || VT == MVT::f64) {
24510 bool ExpectingFlags = false;
24511 // Check for any users that want flags:
24512 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
24513 !ExpectingFlags && UI != UE; ++UI)
24514 switch (UI->getOpcode()) {
24519 ExpectingFlags = true;
24521 case ISD::CopyToReg:
24522 case ISD::SIGN_EXTEND:
24523 case ISD::ZERO_EXTEND:
24524 case ISD::ANY_EXTEND:
24528 if (!ExpectingFlags) {
24529 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
24530 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
24532 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
24533 X86::CondCode tmp = cc0;
24538 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
24539 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
24540 // FIXME: need symbolic constants for these magic numbers.
24541 // See X86ATTInstPrinter.cpp:printSSECC().
24542 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
24543 if (Subtarget->hasAVX512()) {
24544 SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
24545 CMP01, DAG.getConstant(x86cc, MVT::i8));
24546 if (N->getValueType(0) != MVT::i1)
24547 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0),
24551 SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
24552 CMP00.getValueType(), CMP00, CMP01,
24553 DAG.getConstant(x86cc, MVT::i8));
24555 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
24556 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
24558 if (is64BitFP && !Subtarget->is64Bit()) {
24559 // On a 32-bit target, we cannot bitcast the 64-bit float to a
24560 // 64-bit integer, since that's not a legal type. Since
24561 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
24562 // bits, but can do this little dance to extract the lowest 32 bits
24563 // and work with those going forward.
24564 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
24566 SDValue Vector32 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32,
24568 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
24569 Vector32, DAG.getIntPtrConstant(0));
24573 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, IntVT, OnesOrZeroesF);
24574 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
24575 DAG.getConstant(1, IntVT));
24576 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed);
24577 return OneBitOfTruth;
24585 /// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector
24586 /// so it can be folded inside ANDNP.
24587 static bool CanFoldXORWithAllOnes(const SDNode *N) {
24588 EVT VT = N->getValueType(0);
24590 // Match direct AllOnes for 128 and 256-bit vectors
24591 if (ISD::isBuildVectorAllOnes(N))
24594 // Look through a bit convert.
24595 if (N->getOpcode() == ISD::BITCAST)
24596 N = N->getOperand(0).getNode();
24598 // Sometimes the operand may come from a insert_subvector building a 256-bit
24600 if (VT.is256BitVector() &&
24601 N->getOpcode() == ISD::INSERT_SUBVECTOR) {
24602 SDValue V1 = N->getOperand(0);
24603 SDValue V2 = N->getOperand(1);
24605 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR &&
24606 V1.getOperand(0).getOpcode() == ISD::UNDEF &&
24607 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) &&
24608 ISD::isBuildVectorAllOnes(V2.getNode()))
24615 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
24616 // register. In most cases we actually compare or select YMM-sized registers
24617 // and mixing the two types creates horrible code. This method optimizes
24618 // some of the transition sequences.
24619 static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
24620 TargetLowering::DAGCombinerInfo &DCI,
24621 const X86Subtarget *Subtarget) {
24622 EVT VT = N->getValueType(0);
24623 if (!VT.is256BitVector())
24626 assert((N->getOpcode() == ISD::ANY_EXTEND ||
24627 N->getOpcode() == ISD::ZERO_EXTEND ||
24628 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
24630 SDValue Narrow = N->getOperand(0);
24631 EVT NarrowVT = Narrow->getValueType(0);
24632 if (!NarrowVT.is128BitVector())
24635 if (Narrow->getOpcode() != ISD::XOR &&
24636 Narrow->getOpcode() != ISD::AND &&
24637 Narrow->getOpcode() != ISD::OR)
24640 SDValue N0 = Narrow->getOperand(0);
24641 SDValue N1 = Narrow->getOperand(1);
24644 // The Left side has to be a trunc.
24645 if (N0.getOpcode() != ISD::TRUNCATE)
24648 // The type of the truncated inputs.
24649 EVT WideVT = N0->getOperand(0)->getValueType(0);
24653 // The right side has to be a 'trunc' or a constant vector.
24654 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE;
24655 ConstantSDNode *RHSConstSplat = nullptr;
24656 if (auto *RHSBV = dyn_cast<BuildVectorSDNode>(N1))
24657 RHSConstSplat = RHSBV->getConstantSplatNode();
24658 if (!RHSTrunc && !RHSConstSplat)
24661 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24663 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT))
24666 // Set N0 and N1 to hold the inputs to the new wide operation.
24667 N0 = N0->getOperand(0);
24668 if (RHSConstSplat) {
24669 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getScalarType(),
24670 SDValue(RHSConstSplat, 0));
24671 SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1);
24672 N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C);
24673 } else if (RHSTrunc) {
24674 N1 = N1->getOperand(0);
24677 // Generate the wide operation.
24678 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1);
24679 unsigned Opcode = N->getOpcode();
24681 case ISD::ANY_EXTEND:
24683 case ISD::ZERO_EXTEND: {
24684 unsigned InBits = NarrowVT.getScalarType().getSizeInBits();
24685 APInt Mask = APInt::getAllOnesValue(InBits);
24686 Mask = Mask.zext(VT.getScalarType().getSizeInBits());
24687 return DAG.getNode(ISD::AND, DL, VT,
24688 Op, DAG.getConstant(Mask, VT));
24690 case ISD::SIGN_EXTEND:
24691 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
24692 Op, DAG.getValueType(NarrowVT));
24694 llvm_unreachable("Unexpected opcode");
24698 static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
24699 TargetLowering::DAGCombinerInfo &DCI,
24700 const X86Subtarget *Subtarget) {
24701 EVT VT = N->getValueType(0);
24702 if (DCI.isBeforeLegalizeOps())
24705 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24709 // Create BEXTR instructions
24710 // BEXTR is ((X >> imm) & (2**size-1))
24711 if (VT == MVT::i32 || VT == MVT::i64) {
24712 SDValue N0 = N->getOperand(0);
24713 SDValue N1 = N->getOperand(1);
24716 // Check for BEXTR.
24717 if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
24718 (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
24719 ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
24720 ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
24721 if (MaskNode && ShiftNode) {
24722 uint64_t Mask = MaskNode->getZExtValue();
24723 uint64_t Shift = ShiftNode->getZExtValue();
24724 if (isMask_64(Mask)) {
24725 uint64_t MaskSize = countPopulation(Mask);
24726 if (Shift + MaskSize <= VT.getSizeInBits())
24727 return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
24728 DAG.getConstant(Shift | (MaskSize << 8), VT));
24736 // Want to form ANDNP nodes:
24737 // 1) In the hopes of then easily combining them with OR and AND nodes
24738 // to form PBLEND/PSIGN.
24739 // 2) To match ANDN packed intrinsics
24740 if (VT != MVT::v2i64 && VT != MVT::v4i64)
24743 SDValue N0 = N->getOperand(0);
24744 SDValue N1 = N->getOperand(1);
24747 // Check LHS for vnot
24748 if (N0.getOpcode() == ISD::XOR &&
24749 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
24750 CanFoldXORWithAllOnes(N0.getOperand(1).getNode()))
24751 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1);
24753 // Check RHS for vnot
24754 if (N1.getOpcode() == ISD::XOR &&
24755 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
24756 CanFoldXORWithAllOnes(N1.getOperand(1).getNode()))
24757 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0);
24762 static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
24763 TargetLowering::DAGCombinerInfo &DCI,
24764 const X86Subtarget *Subtarget) {
24765 if (DCI.isBeforeLegalizeOps())
24768 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24772 SDValue N0 = N->getOperand(0);
24773 SDValue N1 = N->getOperand(1);
24774 EVT VT = N->getValueType(0);
24776 // look for psign/blend
24777 if (VT == MVT::v2i64 || VT == MVT::v4i64) {
24778 if (!Subtarget->hasSSSE3() ||
24779 (VT == MVT::v4i64 && !Subtarget->hasInt256()))
24782 // Canonicalize pandn to RHS
24783 if (N0.getOpcode() == X86ISD::ANDNP)
24785 // or (and (m, y), (pandn m, x))
24786 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
24787 SDValue Mask = N1.getOperand(0);
24788 SDValue X = N1.getOperand(1);
24790 if (N0.getOperand(0) == Mask)
24791 Y = N0.getOperand(1);
24792 if (N0.getOperand(1) == Mask)
24793 Y = N0.getOperand(0);
24795 // Check to see if the mask appeared in both the AND and ANDNP and
24799 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
24800 // Look through mask bitcast.
24801 if (Mask.getOpcode() == ISD::BITCAST)
24802 Mask = Mask.getOperand(0);
24803 if (X.getOpcode() == ISD::BITCAST)
24804 X = X.getOperand(0);
24805 if (Y.getOpcode() == ISD::BITCAST)
24806 Y = Y.getOperand(0);
24808 EVT MaskVT = Mask.getValueType();
24810 // Validate that the Mask operand is a vector sra node.
24811 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
24812 // there is no psrai.b
24813 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
24814 unsigned SraAmt = ~0;
24815 if (Mask.getOpcode() == ISD::SRA) {
24816 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
24817 if (auto *AmtConst = AmtBV->getConstantSplatNode())
24818 SraAmt = AmtConst->getZExtValue();
24819 } else if (Mask.getOpcode() == X86ISD::VSRAI) {
24820 SDValue SraC = Mask.getOperand(1);
24821 SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
24823 if ((SraAmt + 1) != EltBits)
24828 // Now we know we at least have a plendvb with the mask val. See if
24829 // we can form a psignb/w/d.
24830 // psign = x.type == y.type == mask.type && y = sub(0, x);
24831 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
24832 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
24833 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
24834 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
24835 "Unsupported VT for PSIGN");
24836 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0));
24837 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24839 // PBLENDVB only available on SSE 4.1
24840 if (!Subtarget->hasSSE41())
24843 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
24845 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X);
24846 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y);
24847 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask);
24848 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
24849 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24853 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
24856 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
24857 MachineFunction &MF = DAG.getMachineFunction();
24859 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
24861 // SHLD/SHRD instructions have lower register pressure, but on some
24862 // platforms they have higher latency than the equivalent
24863 // series of shifts/or that would otherwise be generated.
24864 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
24865 // have higher latencies and we are not optimizing for size.
24866 if (!OptForSize && Subtarget->isSHLDSlow())
24869 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
24871 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
24873 if (!N0.hasOneUse() || !N1.hasOneUse())
24876 SDValue ShAmt0 = N0.getOperand(1);
24877 if (ShAmt0.getValueType() != MVT::i8)
24879 SDValue ShAmt1 = N1.getOperand(1);
24880 if (ShAmt1.getValueType() != MVT::i8)
24882 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
24883 ShAmt0 = ShAmt0.getOperand(0);
24884 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
24885 ShAmt1 = ShAmt1.getOperand(0);
24888 unsigned Opc = X86ISD::SHLD;
24889 SDValue Op0 = N0.getOperand(0);
24890 SDValue Op1 = N1.getOperand(0);
24891 if (ShAmt0.getOpcode() == ISD::SUB) {
24892 Opc = X86ISD::SHRD;
24893 std::swap(Op0, Op1);
24894 std::swap(ShAmt0, ShAmt1);
24897 unsigned Bits = VT.getSizeInBits();
24898 if (ShAmt1.getOpcode() == ISD::SUB) {
24899 SDValue Sum = ShAmt1.getOperand(0);
24900 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
24901 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
24902 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
24903 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
24904 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
24905 return DAG.getNode(Opc, DL, VT,
24907 DAG.getNode(ISD::TRUNCATE, DL,
24910 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
24911 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
24913 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits)
24914 return DAG.getNode(Opc, DL, VT,
24915 N0.getOperand(0), N1.getOperand(0),
24916 DAG.getNode(ISD::TRUNCATE, DL,
24923 // Generate NEG and CMOV for integer abs.
24924 static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
24925 EVT VT = N->getValueType(0);
24927 // Since X86 does not have CMOV for 8-bit integer, we don't convert
24928 // 8-bit integer abs to NEG and CMOV.
24929 if (VT.isInteger() && VT.getSizeInBits() == 8)
24932 SDValue N0 = N->getOperand(0);
24933 SDValue N1 = N->getOperand(1);
24936 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
24937 // and change it to SUB and CMOV.
24938 if (VT.isInteger() && N->getOpcode() == ISD::XOR &&
24939 N0.getOpcode() == ISD::ADD &&
24940 N0.getOperand(1) == N1 &&
24941 N1.getOpcode() == ISD::SRA &&
24942 N1.getOperand(0) == N0.getOperand(0))
24943 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1)))
24944 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) {
24945 // Generate SUB & CMOV.
24946 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
24947 DAG.getConstant(0, VT), N0.getOperand(0));
24949 SDValue Ops[] = { N0.getOperand(0), Neg,
24950 DAG.getConstant(X86::COND_GE, MVT::i8),
24951 SDValue(Neg.getNode(), 1) };
24952 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops);
24957 // PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes
24958 static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
24959 TargetLowering::DAGCombinerInfo &DCI,
24960 const X86Subtarget *Subtarget) {
24961 if (DCI.isBeforeLegalizeOps())
24964 if (Subtarget->hasCMov()) {
24965 SDValue RV = performIntegerAbsCombine(N, DAG);
24973 /// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
24974 static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
24975 TargetLowering::DAGCombinerInfo &DCI,
24976 const X86Subtarget *Subtarget) {
24977 LoadSDNode *Ld = cast<LoadSDNode>(N);
24978 EVT RegVT = Ld->getValueType(0);
24979 EVT MemVT = Ld->getMemoryVT();
24981 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24983 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
24984 // into two 16-byte operations.
24985 ISD::LoadExtType Ext = Ld->getExtensionType();
24986 unsigned Alignment = Ld->getAlignment();
24987 bool IsAligned = Alignment == 0 || Alignment >= MemVT.getSizeInBits()/8;
24988 if (RegVT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
24989 !DCI.isBeforeLegalizeOps() && !IsAligned && Ext == ISD::NON_EXTLOAD) {
24990 unsigned NumElems = RegVT.getVectorNumElements();
24994 SDValue Ptr = Ld->getBasePtr();
24995 SDValue Increment = DAG.getConstant(16, TLI.getPointerTy());
24997 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
24999 SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
25000 Ld->getPointerInfo(), Ld->isVolatile(),
25001 Ld->isNonTemporal(), Ld->isInvariant(),
25003 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25004 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
25005 Ld->getPointerInfo(), Ld->isVolatile(),
25006 Ld->isNonTemporal(), Ld->isInvariant(),
25007 std::min(16U, Alignment));
25008 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
25010 Load2.getValue(1));
25012 SDValue NewVec = DAG.getUNDEF(RegVT);
25013 NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl);
25014 NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl);
25015 return DCI.CombineTo(N, NewVec, TF, true);
25021 /// PerformMLOADCombine - Resolve extending loads
25022 static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
25023 TargetLowering::DAGCombinerInfo &DCI,
25024 const X86Subtarget *Subtarget) {
25025 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
25026 if (Mld->getExtensionType() != ISD::SEXTLOAD)
25029 EVT VT = Mld->getValueType(0);
25030 unsigned NumElems = VT.getVectorNumElements();
25031 EVT LdVT = Mld->getMemoryVT();
25034 assert(LdVT != VT && "Cannot extend to the same type");
25035 unsigned ToSz = VT.getVectorElementType().getSizeInBits();
25036 unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
25037 // From, To sizes and ElemCount must be pow of two
25038 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
25039 "Unexpected size for extending masked load");
25041 unsigned SizeRatio = ToSz / FromSz;
25042 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
25044 // Create a type on which we perform the shuffle
25045 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25046 LdVT.getScalarType(), NumElems*SizeRatio);
25047 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25049 // Convert Src0 value
25050 SDValue WideSrc0 = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mld->getSrc0());
25051 if (Mld->getSrc0().getOpcode() != ISD::UNDEF) {
25052 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25053 for (unsigned i = 0; i != NumElems; ++i)
25054 ShuffleVec[i] = i * SizeRatio;
25056 // Can't shuffle using an illegal type.
25057 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
25058 && "WideVecVT should be legal");
25059 WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
25060 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
25062 // Prepare the new mask
25064 SDValue Mask = Mld->getMask();
25065 if (Mask.getValueType() == VT) {
25066 // Mask and original value have the same type
25067 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
25068 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25069 for (unsigned i = 0; i != NumElems; ++i)
25070 ShuffleVec[i] = i * SizeRatio;
25071 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
25072 ShuffleVec[i] = NumElems*SizeRatio;
25073 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
25074 DAG.getConstant(0, WideVecVT),
25078 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
25079 unsigned WidenNumElts = NumElems*SizeRatio;
25080 unsigned MaskNumElts = VT.getVectorNumElements();
25081 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
25084 unsigned NumConcat = WidenNumElts / MaskNumElts;
25085 SmallVector<SDValue, 16> Ops(NumConcat);
25086 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
25088 for (unsigned i = 1; i != NumConcat; ++i)
25091 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
25094 SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
25095 Mld->getBasePtr(), NewMask, WideSrc0,
25096 Mld->getMemoryVT(), Mld->getMemOperand(),
25098 SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd);
25099 return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
25102 /// PerformMSTORECombine - Resolve truncating stores
25103 static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
25104 const X86Subtarget *Subtarget) {
25105 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
25106 if (!Mst->isTruncatingStore())
25109 EVT VT = Mst->getValue().getValueType();
25110 unsigned NumElems = VT.getVectorNumElements();
25111 EVT StVT = Mst->getMemoryVT();
25114 assert(StVT != VT && "Cannot truncate to the same type");
25115 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25116 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25118 // From, To sizes and ElemCount must be pow of two
25119 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
25120 "Unexpected size for truncating masked store");
25121 // We are going to use the original vector elt for storing.
25122 // Accumulated smaller vector elements must be a multiple of the store size.
25123 assert (((NumElems * FromSz) % ToSz) == 0 &&
25124 "Unexpected ratio for truncating masked store");
25126 unsigned SizeRatio = FromSz / ToSz;
25127 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25129 // Create a type on which we perform the shuffle
25130 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25131 StVT.getScalarType(), NumElems*SizeRatio);
25133 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25135 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mst->getValue());
25136 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25137 for (unsigned i = 0; i != NumElems; ++i)
25138 ShuffleVec[i] = i * SizeRatio;
25140 // Can't shuffle using an illegal type.
25141 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
25142 && "WideVecVT should be legal");
25144 SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25145 DAG.getUNDEF(WideVecVT),
25149 SDValue Mask = Mst->getMask();
25150 if (Mask.getValueType() == VT) {
25151 // Mask and original value have the same type
25152 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
25153 for (unsigned i = 0; i != NumElems; ++i)
25154 ShuffleVec[i] = i * SizeRatio;
25155 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
25156 ShuffleVec[i] = NumElems*SizeRatio;
25157 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
25158 DAG.getConstant(0, WideVecVT),
25162 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
25163 unsigned WidenNumElts = NumElems*SizeRatio;
25164 unsigned MaskNumElts = VT.getVectorNumElements();
25165 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
25168 unsigned NumConcat = WidenNumElts / MaskNumElts;
25169 SmallVector<SDValue, 16> Ops(NumConcat);
25170 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
25172 for (unsigned i = 1; i != NumConcat; ++i)
25175 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
25178 return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal, Mst->getBasePtr(),
25179 NewMask, StVT, Mst->getMemOperand(), false);
25181 /// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
25182 static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
25183 const X86Subtarget *Subtarget) {
25184 StoreSDNode *St = cast<StoreSDNode>(N);
25185 EVT VT = St->getValue().getValueType();
25186 EVT StVT = St->getMemoryVT();
25188 SDValue StoredVal = St->getOperand(1);
25189 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25191 // If we are saving a concatenation of two XMM registers and 32-byte stores
25192 // are slow, such as on Sandy Bridge, perform two 16-byte stores.
25193 unsigned Alignment = St->getAlignment();
25194 bool IsAligned = Alignment == 0 || Alignment >= VT.getSizeInBits()/8;
25195 if (VT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25196 StVT == VT && !IsAligned) {
25197 unsigned NumElems = VT.getVectorNumElements();
25201 SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl);
25202 SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl);
25204 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy());
25205 SDValue Ptr0 = St->getBasePtr();
25206 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride);
25208 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0,
25209 St->getPointerInfo(), St->isVolatile(),
25210 St->isNonTemporal(), Alignment);
25211 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1,
25212 St->getPointerInfo(), St->isVolatile(),
25213 St->isNonTemporal(),
25214 std::min(16U, Alignment));
25215 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
25218 // Optimize trunc store (of multiple scalars) to shuffle and store.
25219 // First, pack all of the elements in one place. Next, store to memory
25220 // in fewer chunks.
25221 if (St->isTruncatingStore() && VT.isVector()) {
25222 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25223 unsigned NumElems = VT.getVectorNumElements();
25224 assert(StVT != VT && "Cannot truncate to the same type");
25225 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25226 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25228 // From, To sizes and ElemCount must be pow of two
25229 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
25230 // We are going to use the original vector elt for storing.
25231 // Accumulated smaller vector elements must be a multiple of the store size.
25232 if (0 != (NumElems * FromSz) % ToSz) return SDValue();
25234 unsigned SizeRatio = FromSz / ToSz;
25236 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25238 // Create a type on which we perform the shuffle
25239 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25240 StVT.getScalarType(), NumElems*SizeRatio);
25242 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25244 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue());
25245 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
25246 for (unsigned i = 0; i != NumElems; ++i)
25247 ShuffleVec[i] = i * SizeRatio;
25249 // Can't shuffle using an illegal type.
25250 if (!TLI.isTypeLegal(WideVecVT))
25253 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25254 DAG.getUNDEF(WideVecVT),
25256 // At this point all of the data is stored at the bottom of the
25257 // register. We now need to save it to mem.
25259 // Find the largest store unit
25260 MVT StoreType = MVT::i8;
25261 for (MVT Tp : MVT::integer_valuetypes()) {
25262 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
25266 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
25267 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
25268 (64 <= NumElems * ToSz))
25269 StoreType = MVT::f64;
25271 // Bitcast the original vector into a vector of store-size units
25272 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
25273 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
25274 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
25275 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff);
25276 SmallVector<SDValue, 8> Chains;
25277 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8,
25278 TLI.getPointerTy());
25279 SDValue Ptr = St->getBasePtr();
25281 // Perform one or more big stores into memory.
25282 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
25283 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
25284 StoreType, ShuffWide,
25285 DAG.getIntPtrConstant(i));
25286 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr,
25287 St->getPointerInfo(), St->isVolatile(),
25288 St->isNonTemporal(), St->getAlignment());
25289 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25290 Chains.push_back(Ch);
25293 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
25296 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
25297 // the FP state in cases where an emms may be missing.
25298 // A preferable solution to the general problem is to figure out the right
25299 // places to insert EMMS. This qualifies as a quick hack.
25301 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
25302 if (VT.getSizeInBits() != 64)
25305 const Function *F = DAG.getMachineFunction().getFunction();
25306 bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat);
25307 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
25308 && Subtarget->hasSSE2();
25309 if ((VT.isVector() ||
25310 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
25311 isa<LoadSDNode>(St->getValue()) &&
25312 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
25313 St->getChain().hasOneUse() && !St->isVolatile()) {
25314 SDNode* LdVal = St->getValue().getNode();
25315 LoadSDNode *Ld = nullptr;
25316 int TokenFactorIndex = -1;
25317 SmallVector<SDValue, 8> Ops;
25318 SDNode* ChainVal = St->getChain().getNode();
25319 // Must be a store of a load. We currently handle two cases: the load
25320 // is a direct child, and it's under an intervening TokenFactor. It is
25321 // possible to dig deeper under nested TokenFactors.
25322 if (ChainVal == LdVal)
25323 Ld = cast<LoadSDNode>(St->getChain());
25324 else if (St->getValue().hasOneUse() &&
25325 ChainVal->getOpcode() == ISD::TokenFactor) {
25326 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) {
25327 if (ChainVal->getOperand(i).getNode() == LdVal) {
25328 TokenFactorIndex = i;
25329 Ld = cast<LoadSDNode>(St->getValue());
25331 Ops.push_back(ChainVal->getOperand(i));
25335 if (!Ld || !ISD::isNormalLoad(Ld))
25338 // If this is not the MMX case, i.e. we are just turning i64 load/store
25339 // into f64 load/store, avoid the transformation if there are multiple
25340 // uses of the loaded value.
25341 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
25346 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
25347 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
25349 if (Subtarget->is64Bit() || F64IsLegal) {
25350 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
25351 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
25352 Ld->getPointerInfo(), Ld->isVolatile(),
25353 Ld->isNonTemporal(), Ld->isInvariant(),
25354 Ld->getAlignment());
25355 SDValue NewChain = NewLd.getValue(1);
25356 if (TokenFactorIndex != -1) {
25357 Ops.push_back(NewChain);
25358 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25360 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
25361 St->getPointerInfo(),
25362 St->isVolatile(), St->isNonTemporal(),
25363 St->getAlignment());
25366 // Otherwise, lower to two pairs of 32-bit loads / stores.
25367 SDValue LoAddr = Ld->getBasePtr();
25368 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
25369 DAG.getConstant(4, MVT::i32));
25371 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
25372 Ld->getPointerInfo(),
25373 Ld->isVolatile(), Ld->isNonTemporal(),
25374 Ld->isInvariant(), Ld->getAlignment());
25375 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
25376 Ld->getPointerInfo().getWithOffset(4),
25377 Ld->isVolatile(), Ld->isNonTemporal(),
25379 MinAlign(Ld->getAlignment(), 4));
25381 SDValue NewChain = LoLd.getValue(1);
25382 if (TokenFactorIndex != -1) {
25383 Ops.push_back(LoLd);
25384 Ops.push_back(HiLd);
25385 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25388 LoAddr = St->getBasePtr();
25389 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
25390 DAG.getConstant(4, MVT::i32));
25392 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
25393 St->getPointerInfo(),
25394 St->isVolatile(), St->isNonTemporal(),
25395 St->getAlignment());
25396 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
25397 St->getPointerInfo().getWithOffset(4),
25399 St->isNonTemporal(),
25400 MinAlign(St->getAlignment(), 4));
25401 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
25406 /// Return 'true' if this vector operation is "horizontal"
25407 /// and return the operands for the horizontal operation in LHS and RHS. A
25408 /// horizontal operation performs the binary operation on successive elements
25409 /// of its first operand, then on successive elements of its second operand,
25410 /// returning the resulting values in a vector. For example, if
25411 /// A = < float a0, float a1, float a2, float a3 >
25413 /// B = < float b0, float b1, float b2, float b3 >
25414 /// then the result of doing a horizontal operation on A and B is
25415 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
25416 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
25417 /// A horizontal-op B, for some already available A and B, and if so then LHS is
25418 /// set to A, RHS to B, and the routine returns 'true'.
25419 /// Note that the binary operation should have the property that if one of the
25420 /// operands is UNDEF then the result is UNDEF.
25421 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
25422 // Look for the following pattern: if
25423 // A = < float a0, float a1, float a2, float a3 >
25424 // B = < float b0, float b1, float b2, float b3 >
25426 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
25427 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
25428 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
25429 // which is A horizontal-op B.
25431 // At least one of the operands should be a vector shuffle.
25432 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
25433 RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
25436 MVT VT = LHS.getSimpleValueType();
25438 assert((VT.is128BitVector() || VT.is256BitVector()) &&
25439 "Unsupported vector type for horizontal add/sub");
25441 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to
25442 // operate independently on 128-bit lanes.
25443 unsigned NumElts = VT.getVectorNumElements();
25444 unsigned NumLanes = VT.getSizeInBits()/128;
25445 unsigned NumLaneElts = NumElts / NumLanes;
25446 assert((NumLaneElts % 2 == 0) &&
25447 "Vector type should have an even number of elements in each lane");
25448 unsigned HalfLaneElts = NumLaneElts/2;
25450 // View LHS in the form
25451 // LHS = VECTOR_SHUFFLE A, B, LMask
25452 // If LHS is not a shuffle then pretend it is the shuffle
25453 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
25454 // NOTE: in what follows a default initialized SDValue represents an UNDEF of
25457 SmallVector<int, 16> LMask(NumElts);
25458 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25459 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF)
25460 A = LHS.getOperand(0);
25461 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF)
25462 B = LHS.getOperand(1);
25463 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
25464 std::copy(Mask.begin(), Mask.end(), LMask.begin());
25466 if (LHS.getOpcode() != ISD::UNDEF)
25468 for (unsigned i = 0; i != NumElts; ++i)
25472 // Likewise, view RHS in the form
25473 // RHS = VECTOR_SHUFFLE C, D, RMask
25475 SmallVector<int, 16> RMask(NumElts);
25476 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25477 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF)
25478 C = RHS.getOperand(0);
25479 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF)
25480 D = RHS.getOperand(1);
25481 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
25482 std::copy(Mask.begin(), Mask.end(), RMask.begin());
25484 if (RHS.getOpcode() != ISD::UNDEF)
25486 for (unsigned i = 0; i != NumElts; ++i)
25490 // Check that the shuffles are both shuffling the same vectors.
25491 if (!(A == C && B == D) && !(A == D && B == C))
25494 // If everything is UNDEF then bail out: it would be better to fold to UNDEF.
25495 if (!A.getNode() && !B.getNode())
25498 // If A and B occur in reverse order in RHS, then "swap" them (which means
25499 // rewriting the mask).
25501 CommuteVectorShuffleMask(RMask, NumElts);
25503 // At this point LHS and RHS are equivalent to
25504 // LHS = VECTOR_SHUFFLE A, B, LMask
25505 // RHS = VECTOR_SHUFFLE A, B, RMask
25506 // Check that the masks correspond to performing a horizontal operation.
25507 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
25508 for (unsigned i = 0; i != NumLaneElts; ++i) {
25509 int LIdx = LMask[i+l], RIdx = RMask[i+l];
25511 // Ignore any UNDEF components.
25512 if (LIdx < 0 || RIdx < 0 ||
25513 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
25514 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
25517 // Check that successive elements are being operated on. If not, this is
25518 // not a horizontal operation.
25519 unsigned Src = (i/HalfLaneElts); // each lane is split between srcs
25520 int Index = 2*(i%HalfLaneElts) + NumElts*Src + l;
25521 if (!(LIdx == Index && RIdx == Index + 1) &&
25522 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
25527 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
25528 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
25532 /// Do target-specific dag combines on floating point adds.
25533 static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
25534 const X86Subtarget *Subtarget) {
25535 EVT VT = N->getValueType(0);
25536 SDValue LHS = N->getOperand(0);
25537 SDValue RHS = N->getOperand(1);
25539 // Try to synthesize horizontal adds from adds of shuffles.
25540 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25541 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25542 isHorizontalBinOp(LHS, RHS, true))
25543 return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS);
25547 /// Do target-specific dag combines on floating point subs.
25548 static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
25549 const X86Subtarget *Subtarget) {
25550 EVT VT = N->getValueType(0);
25551 SDValue LHS = N->getOperand(0);
25552 SDValue RHS = N->getOperand(1);
25554 // Try to synthesize horizontal subs from subs of shuffles.
25555 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25556 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25557 isHorizontalBinOp(LHS, RHS, false))
25558 return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS);
25562 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
25563 static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
25564 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
25566 // F[X]OR(0.0, x) -> x
25567 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25568 if (C->getValueAPF().isPosZero())
25569 return N->getOperand(1);
25571 // F[X]OR(x, 0.0) -> x
25572 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25573 if (C->getValueAPF().isPosZero())
25574 return N->getOperand(0);
25578 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
25579 static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
25580 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
25582 // Only perform optimizations if UnsafeMath is used.
25583 if (!DAG.getTarget().Options.UnsafeFPMath)
25586 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
25587 // into FMINC and FMAXC, which are Commutative operations.
25588 unsigned NewOp = 0;
25589 switch (N->getOpcode()) {
25590 default: llvm_unreachable("unknown opcode");
25591 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
25592 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
25595 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
25596 N->getOperand(0), N->getOperand(1));
25599 /// Do target-specific dag combines on X86ISD::FAND nodes.
25600 static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
25601 // FAND(0.0, x) -> 0.0
25602 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25603 if (C->getValueAPF().isPosZero())
25604 return N->getOperand(0);
25606 // FAND(x, 0.0) -> 0.0
25607 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25608 if (C->getValueAPF().isPosZero())
25609 return N->getOperand(1);
25614 /// Do target-specific dag combines on X86ISD::FANDN nodes
25615 static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG) {
25616 // FANDN(0.0, x) -> x
25617 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25618 if (C->getValueAPF().isPosZero())
25619 return N->getOperand(1);
25621 // FANDN(x, 0.0) -> 0.0
25622 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25623 if (C->getValueAPF().isPosZero())
25624 return N->getOperand(1);
25629 static SDValue PerformBTCombine(SDNode *N,
25631 TargetLowering::DAGCombinerInfo &DCI) {
25632 // BT ignores high bits in the bit index operand.
25633 SDValue Op1 = N->getOperand(1);
25634 if (Op1.hasOneUse()) {
25635 unsigned BitWidth = Op1.getValueSizeInBits();
25636 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
25637 APInt KnownZero, KnownOne;
25638 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
25639 !DCI.isBeforeLegalizeOps());
25640 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25641 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) ||
25642 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
25643 DCI.CommitTargetLoweringOpt(TLO);
25648 static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
25649 SDValue Op = N->getOperand(0);
25650 if (Op.getOpcode() == ISD::BITCAST)
25651 Op = Op.getOperand(0);
25652 EVT VT = N->getValueType(0), OpVT = Op.getValueType();
25653 if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
25654 VT.getVectorElementType().getSizeInBits() ==
25655 OpVT.getVectorElementType().getSizeInBits()) {
25656 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
25661 static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
25662 const X86Subtarget *Subtarget) {
25663 EVT VT = N->getValueType(0);
25664 if (!VT.isVector())
25667 SDValue N0 = N->getOperand(0);
25668 SDValue N1 = N->getOperand(1);
25669 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
25672 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
25673 // both SSE and AVX2 since there is no sign-extended shift right
25674 // operation on a vector with 64-bit elements.
25675 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
25676 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
25677 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
25678 N0.getOpcode() == ISD::SIGN_EXTEND)) {
25679 SDValue N00 = N0.getOperand(0);
25681 // EXTLOAD has a better solution on AVX2,
25682 // it may be replaced with X86ISD::VSEXT node.
25683 if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256())
25684 if (!ISD::isNormalLoad(N00.getNode()))
25687 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
25688 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
25690 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
25696 static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
25697 TargetLowering::DAGCombinerInfo &DCI,
25698 const X86Subtarget *Subtarget) {
25699 SDValue N0 = N->getOperand(0);
25700 EVT VT = N->getValueType(0);
25702 // (i8,i32 sext (sdivrem (i8 x, i8 y)) ->
25703 // (i8,i32 (sdivrem_sext_hreg (i8 x, i8 y)
25704 // This exposes the sext to the sdivrem lowering, so that it directly extends
25705 // from AH (which we otherwise need to do contortions to access).
25706 if (N0.getOpcode() == ISD::SDIVREM && N0.getResNo() == 1 &&
25707 N0.getValueType() == MVT::i8 && VT == MVT::i32) {
25709 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25710 SDValue R = DAG.getNode(X86ISD::SDIVREM8_SEXT_HREG, dl, NodeTys,
25711 N0.getOperand(0), N0.getOperand(1));
25712 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25713 return R.getValue(1);
25716 if (!DCI.isBeforeLegalizeOps())
25719 if (!Subtarget->hasFp256())
25722 if (VT.isVector() && VT.getSizeInBits() == 256) {
25723 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25731 static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
25732 const X86Subtarget* Subtarget) {
25734 EVT VT = N->getValueType(0);
25736 // Let legalize expand this if it isn't a legal type yet.
25737 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
25740 EVT ScalarVT = VT.getScalarType();
25741 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
25742 (!Subtarget->hasFMA() && !Subtarget->hasFMA4()))
25745 SDValue A = N->getOperand(0);
25746 SDValue B = N->getOperand(1);
25747 SDValue C = N->getOperand(2);
25749 bool NegA = (A.getOpcode() == ISD::FNEG);
25750 bool NegB = (B.getOpcode() == ISD::FNEG);
25751 bool NegC = (C.getOpcode() == ISD::FNEG);
25753 // Negative multiplication when NegA xor NegB
25754 bool NegMul = (NegA != NegB);
25756 A = A.getOperand(0);
25758 B = B.getOperand(0);
25760 C = C.getOperand(0);
25764 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB;
25766 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB;
25768 return DAG.getNode(Opcode, dl, VT, A, B, C);
25771 static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
25772 TargetLowering::DAGCombinerInfo &DCI,
25773 const X86Subtarget *Subtarget) {
25774 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
25775 // (and (i32 x86isd::setcc_carry), 1)
25776 // This eliminates the zext. This transformation is necessary because
25777 // ISD::SETCC is always legalized to i8.
25779 SDValue N0 = N->getOperand(0);
25780 EVT VT = N->getValueType(0);
25782 if (N0.getOpcode() == ISD::AND &&
25784 N0.getOperand(0).hasOneUse()) {
25785 SDValue N00 = N0.getOperand(0);
25786 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25787 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
25788 if (!C || C->getZExtValue() != 1)
25790 return DAG.getNode(ISD::AND, dl, VT,
25791 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25792 N00.getOperand(0), N00.getOperand(1)),
25793 DAG.getConstant(1, VT));
25797 if (N0.getOpcode() == ISD::TRUNCATE &&
25799 N0.getOperand(0).hasOneUse()) {
25800 SDValue N00 = N0.getOperand(0);
25801 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25802 return DAG.getNode(ISD::AND, dl, VT,
25803 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25804 N00.getOperand(0), N00.getOperand(1)),
25805 DAG.getConstant(1, VT));
25808 if (VT.is256BitVector()) {
25809 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25814 // (i8,i32 zext (udivrem (i8 x, i8 y)) ->
25815 // (i8,i32 (udivrem_zext_hreg (i8 x, i8 y)
25816 // This exposes the zext to the udivrem lowering, so that it directly extends
25817 // from AH (which we otherwise need to do contortions to access).
25818 if (N0.getOpcode() == ISD::UDIVREM &&
25819 N0.getResNo() == 1 && N0.getValueType() == MVT::i8 &&
25820 (VT == MVT::i32 || VT == MVT::i64)) {
25821 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25822 SDValue R = DAG.getNode(X86ISD::UDIVREM8_ZEXT_HREG, dl, NodeTys,
25823 N0.getOperand(0), N0.getOperand(1));
25824 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25825 return R.getValue(1);
25831 // Optimize x == -y --> x+y == 0
25832 // x != -y --> x+y != 0
25833 static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
25834 const X86Subtarget* Subtarget) {
25835 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
25836 SDValue LHS = N->getOperand(0);
25837 SDValue RHS = N->getOperand(1);
25838 EVT VT = N->getValueType(0);
25841 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
25842 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0)))
25843 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) {
25844 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25845 LHS.getValueType(), RHS, LHS.getOperand(1));
25846 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25847 addV, DAG.getConstant(0, addV.getValueType()), CC);
25849 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB)
25850 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0)))
25851 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) {
25852 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25853 RHS.getValueType(), LHS, RHS.getOperand(1));
25854 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25855 addV, DAG.getConstant(0, addV.getValueType()), CC);
25858 if (VT.getScalarType() == MVT::i1) {
25859 bool IsSEXT0 = (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
25860 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25861 bool IsVZero0 = ISD::isBuildVectorAllZeros(LHS.getNode());
25862 if (!IsSEXT0 && !IsVZero0)
25864 bool IsSEXT1 = (RHS.getOpcode() == ISD::SIGN_EXTEND) &&
25865 (RHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25866 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
25868 if (!IsSEXT1 && !IsVZero1)
25871 if (IsSEXT0 && IsVZero1) {
25872 assert(VT == LHS.getOperand(0).getValueType() && "Uexpected operand type");
25873 if (CC == ISD::SETEQ)
25874 return DAG.getNOT(DL, LHS.getOperand(0), VT);
25875 return LHS.getOperand(0);
25877 if (IsSEXT1 && IsVZero0) {
25878 assert(VT == RHS.getOperand(0).getValueType() && "Uexpected operand type");
25879 if (CC == ISD::SETEQ)
25880 return DAG.getNOT(DL, RHS.getOperand(0), VT);
25881 return RHS.getOperand(0);
25888 static SDValue PerformINSERTPSCombine(SDNode *N, SelectionDAG &DAG,
25889 const X86Subtarget *Subtarget) {
25891 MVT VT = N->getOperand(1)->getSimpleValueType(0);
25892 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
25893 "X86insertps is only defined for v4x32");
25895 SDValue Ld = N->getOperand(1);
25896 if (MayFoldLoad(Ld)) {
25897 // Extract the countS bits from the immediate so we can get the proper
25898 // address when narrowing the vector load to a specific element.
25899 // When the second source op is a memory address, interps doesn't use
25900 // countS and just gets an f32 from that address.
25901 unsigned DestIndex =
25902 cast<ConstantSDNode>(N->getOperand(2))->getZExtValue() >> 6;
25903 Ld = NarrowVectorLoadToElement(cast<LoadSDNode>(Ld), DestIndex, DAG);
25907 // Create this as a scalar to vector to match the instruction pattern.
25908 SDValue LoadScalarToVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Ld);
25909 // countS bits are ignored when loading from memory on insertps, which
25910 // means we don't need to explicitly set them to 0.
25911 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N->getOperand(0),
25912 LoadScalarToVector, N->getOperand(2));
25915 // Helper function of PerformSETCCCombine. It is to materialize "setb reg"
25916 // as "sbb reg,reg", since it can be extended without zext and produces
25917 // an all-ones bit which is more useful than 0/1 in some cases.
25918 static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
25921 return DAG.getNode(ISD::AND, DL, VT,
25922 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
25923 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS),
25924 DAG.getConstant(1, VT));
25925 assert (VT == MVT::i1 && "Unexpected type for SECCC node");
25926 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1,
25927 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
25928 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS));
25931 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
25932 static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
25933 TargetLowering::DAGCombinerInfo &DCI,
25934 const X86Subtarget *Subtarget) {
25936 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
25937 SDValue EFLAGS = N->getOperand(1);
25939 if (CC == X86::COND_A) {
25940 // Try to convert COND_A into COND_B in an attempt to facilitate
25941 // materializing "setb reg".
25943 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
25944 // cannot take an immediate as its first operand.
25946 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
25947 EFLAGS.getValueType().isInteger() &&
25948 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
25949 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
25950 EFLAGS.getNode()->getVTList(),
25951 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
25952 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
25953 return MaterializeSETB(DL, NewEFLAGS, DAG, N->getSimpleValueType(0));
25957 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without
25958 // a zext and produces an all-ones bit which is more useful than 0/1 in some
25960 if (CC == X86::COND_B)
25961 return MaterializeSETB(DL, EFLAGS, DAG, N->getSimpleValueType(0));
25965 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
25966 if (Flags.getNode()) {
25967 SDValue Cond = DAG.getConstant(CC, MVT::i8);
25968 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
25974 // Optimize branch condition evaluation.
25976 static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
25977 TargetLowering::DAGCombinerInfo &DCI,
25978 const X86Subtarget *Subtarget) {
25980 SDValue Chain = N->getOperand(0);
25981 SDValue Dest = N->getOperand(1);
25982 SDValue EFLAGS = N->getOperand(3);
25983 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
25987 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
25988 if (Flags.getNode()) {
25989 SDValue Cond = DAG.getConstant(CC, MVT::i8);
25990 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
25997 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
25998 SelectionDAG &DAG) {
25999 // Take advantage of vector comparisons producing 0 or -1 in each lane to
26000 // optimize away operation when it's from a constant.
26002 // The general transformation is:
26003 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
26004 // AND(VECTOR_CMP(x,y), constant2)
26005 // constant2 = UNARYOP(constant)
26007 // Early exit if this isn't a vector operation, the operand of the
26008 // unary operation isn't a bitwise AND, or if the sizes of the operations
26009 // aren't the same.
26010 EVT VT = N->getValueType(0);
26011 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
26012 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
26013 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
26016 // Now check that the other operand of the AND is a constant. We could
26017 // make the transformation for non-constant splats as well, but it's unclear
26018 // that would be a benefit as it would not eliminate any operations, just
26019 // perform one more step in scalar code before moving to the vector unit.
26020 if (BuildVectorSDNode *BV =
26021 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
26022 // Bail out if the vector isn't a constant.
26023 if (!BV->isConstant())
26026 // Everything checks out. Build up the new and improved node.
26028 EVT IntVT = BV->getValueType(0);
26029 // Create a new constant of the appropriate type for the transformed
26031 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
26032 // The AND node needs bitcasts to/from an integer vector type around it.
26033 SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
26034 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
26035 N->getOperand(0)->getOperand(0), MaskConst);
26036 SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
26043 static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
26044 const X86Subtarget *Subtarget) {
26045 // First try to optimize away the conversion entirely when it's
26046 // conditionally from a constant. Vectors only.
26047 SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
26048 if (Res != SDValue())
26051 // Now move on to more general possibilities.
26052 SDValue Op0 = N->getOperand(0);
26053 EVT InVT = Op0->getValueType(0);
26055 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32))
26056 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) {
26058 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
26059 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
26060 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P);
26063 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
26064 // a 32-bit target where SSE doesn't support i64->FP operations.
26065 if (Op0.getOpcode() == ISD::LOAD) {
26066 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
26067 EVT VT = Ld->getValueType(0);
26068 if (!Ld->isVolatile() && !N->getValueType(0).isVector() &&
26069 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
26070 !Subtarget->is64Bit() && VT == MVT::i64) {
26071 SDValue FILDChain = Subtarget->getTargetLowering()->BuildFILD(
26072 SDValue(N, 0), Ld->getValueType(0), Ld->getChain(), Op0, DAG);
26073 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
26080 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
26081 static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
26082 X86TargetLowering::DAGCombinerInfo &DCI) {
26083 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
26084 // the result is either zero or one (depending on the input carry bit).
26085 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
26086 if (X86::isZeroNode(N->getOperand(0)) &&
26087 X86::isZeroNode(N->getOperand(1)) &&
26088 // We don't have a good way to replace an EFLAGS use, so only do this when
26090 SDValue(N, 1).use_empty()) {
26092 EVT VT = N->getValueType(0);
26093 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1));
26094 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
26095 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
26096 DAG.getConstant(X86::COND_B,MVT::i8),
26098 DAG.getConstant(1, VT));
26099 return DCI.CombineTo(N, Res1, CarryOut);
26105 // fold (add Y, (sete X, 0)) -> adc 0, Y
26106 // (add Y, (setne X, 0)) -> sbb -1, Y
26107 // (sub (sete X, 0), Y) -> sbb 0, Y
26108 // (sub (setne X, 0), Y) -> adc -1, Y
26109 static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
26112 // Look through ZExts.
26113 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
26114 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse())
26117 SDValue SetCC = Ext.getOperand(0);
26118 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse())
26121 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
26122 if (CC != X86::COND_E && CC != X86::COND_NE)
26125 SDValue Cmp = SetCC.getOperand(1);
26126 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
26127 !X86::isZeroNode(Cmp.getOperand(1)) ||
26128 !Cmp.getOperand(0).getValueType().isInteger())
26131 SDValue CmpOp0 = Cmp.getOperand(0);
26132 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0,
26133 DAG.getConstant(1, CmpOp0.getValueType()));
26135 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1);
26136 if (CC == X86::COND_NE)
26137 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB,
26138 DL, OtherVal.getValueType(), OtherVal,
26139 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp);
26140 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC,
26141 DL, OtherVal.getValueType(), OtherVal,
26142 DAG.getConstant(0, OtherVal.getValueType()), NewCmp);
26145 /// PerformADDCombine - Do target-specific dag combines on integer adds.
26146 static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG,
26147 const X86Subtarget *Subtarget) {
26148 EVT VT = N->getValueType(0);
26149 SDValue Op0 = N->getOperand(0);
26150 SDValue Op1 = N->getOperand(1);
26152 // Try to synthesize horizontal adds from adds of shuffles.
26153 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26154 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26155 isHorizontalBinOp(Op0, Op1, true))
26156 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
26158 return OptimizeConditionalInDecrement(N, DAG);
26161 static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
26162 const X86Subtarget *Subtarget) {
26163 SDValue Op0 = N->getOperand(0);
26164 SDValue Op1 = N->getOperand(1);
26166 // X86 can't encode an immediate LHS of a sub. See if we can push the
26167 // negation into a preceding instruction.
26168 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
26169 // If the RHS of the sub is a XOR with one use and a constant, invert the
26170 // immediate. Then add one to the LHS of the sub so we can turn
26171 // X-Y -> X+~Y+1, saving one register.
26172 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
26173 isa<ConstantSDNode>(Op1.getOperand(1))) {
26174 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
26175 EVT VT = Op0.getValueType();
26176 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
26178 DAG.getConstant(~XorC, VT));
26179 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
26180 DAG.getConstant(C->getAPIntValue()+1, VT));
26184 // Try to synthesize horizontal adds from adds of shuffles.
26185 EVT VT = N->getValueType(0);
26186 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26187 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26188 isHorizontalBinOp(Op0, Op1, true))
26189 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
26191 return OptimizeConditionalInDecrement(N, DAG);
26194 /// performVZEXTCombine - Performs build vector combines
26195 static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
26196 TargetLowering::DAGCombinerInfo &DCI,
26197 const X86Subtarget *Subtarget) {
26199 MVT VT = N->getSimpleValueType(0);
26200 SDValue Op = N->getOperand(0);
26201 MVT OpVT = Op.getSimpleValueType();
26202 MVT OpEltVT = OpVT.getVectorElementType();
26203 unsigned InputBits = OpEltVT.getSizeInBits() * VT.getVectorNumElements();
26205 // (vzext (bitcast (vzext (x)) -> (vzext x)
26207 while (V.getOpcode() == ISD::BITCAST)
26208 V = V.getOperand(0);
26210 if (V != Op && V.getOpcode() == X86ISD::VZEXT) {
26211 MVT InnerVT = V.getSimpleValueType();
26212 MVT InnerEltVT = InnerVT.getVectorElementType();
26214 // If the element sizes match exactly, we can just do one larger vzext. This
26215 // is always an exact type match as vzext operates on integer types.
26216 if (OpEltVT == InnerEltVT) {
26217 assert(OpVT == InnerVT && "Types must match for vzext!");
26218 return DAG.getNode(X86ISD::VZEXT, DL, VT, V.getOperand(0));
26221 // The only other way we can combine them is if only a single element of the
26222 // inner vzext is used in the input to the outer vzext.
26223 if (InnerEltVT.getSizeInBits() < InputBits)
26226 // In this case, the inner vzext is completely dead because we're going to
26227 // only look at bits inside of the low element. Just do the outer vzext on
26228 // a bitcast of the input to the inner.
26229 return DAG.getNode(X86ISD::VZEXT, DL, VT,
26230 DAG.getNode(ISD::BITCAST, DL, OpVT, V));
26233 // Check if we can bypass extracting and re-inserting an element of an input
26234 // vector. Essentialy:
26235 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
26236 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
26237 V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
26238 V.getOperand(0).getSimpleValueType().getSizeInBits() == InputBits) {
26239 SDValue ExtractedV = V.getOperand(0);
26240 SDValue OrigV = ExtractedV.getOperand(0);
26241 if (auto *ExtractIdx = dyn_cast<ConstantSDNode>(ExtractedV.getOperand(1)))
26242 if (ExtractIdx->getZExtValue() == 0) {
26243 MVT OrigVT = OrigV.getSimpleValueType();
26244 // Extract a subvector if necessary...
26245 if (OrigVT.getSizeInBits() > OpVT.getSizeInBits()) {
26246 int Ratio = OrigVT.getSizeInBits() / OpVT.getSizeInBits();
26247 OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(),
26248 OrigVT.getVectorNumElements() / Ratio);
26249 OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV,
26250 DAG.getIntPtrConstant(0));
26252 Op = DAG.getNode(ISD::BITCAST, DL, OpVT, OrigV);
26253 return DAG.getNode(X86ISD::VZEXT, DL, VT, Op);
26260 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
26261 DAGCombinerInfo &DCI) const {
26262 SelectionDAG &DAG = DCI.DAG;
26263 switch (N->getOpcode()) {
26265 case ISD::EXTRACT_VECTOR_ELT:
26266 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI);
26269 case X86ISD::SHRUNKBLEND:
26270 return PerformSELECTCombine(N, DAG, DCI, Subtarget);
26271 case ISD::BITCAST: return PerformBITCASTCombine(N, DAG);
26272 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget);
26273 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
26274 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
26275 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
26276 case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
26279 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget);
26280 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
26281 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
26282 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
26283 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
26284 case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget);
26285 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
26286 case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget);
26287 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, Subtarget);
26288 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
26289 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
26291 case X86ISD::FOR: return PerformFORCombine(N, DAG);
26293 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG);
26294 case X86ISD::FAND: return PerformFANDCombine(N, DAG);
26295 case X86ISD::FANDN: return PerformFANDNCombine(N, DAG);
26296 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
26297 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
26298 case ISD::ANY_EXTEND:
26299 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
26300 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
26301 case ISD::SIGN_EXTEND_INREG:
26302 return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
26303 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget);
26304 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG, Subtarget);
26305 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget);
26306 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
26307 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget);
26308 case X86ISD::SHUFP: // Handle all target specific shuffles
26309 case X86ISD::PALIGNR:
26310 case X86ISD::UNPCKH:
26311 case X86ISD::UNPCKL:
26312 case X86ISD::MOVHLPS:
26313 case X86ISD::MOVLHPS:
26314 case X86ISD::PSHUFB:
26315 case X86ISD::PSHUFD:
26316 case X86ISD::PSHUFHW:
26317 case X86ISD::PSHUFLW:
26318 case X86ISD::MOVSS:
26319 case X86ISD::MOVSD:
26320 case X86ISD::VPERMILPI:
26321 case X86ISD::VPERM2X128:
26322 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
26323 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
26324 case ISD::INTRINSIC_WO_CHAIN:
26325 return PerformINTRINSIC_WO_CHAINCombine(N, DAG, Subtarget);
26326 case X86ISD::INSERTPS: {
26327 if (getTargetMachine().getOptLevel() > CodeGenOpt::None)
26328 return PerformINSERTPSCombine(N, DAG, Subtarget);
26331 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DAG, Subtarget);
26337 /// isTypeDesirableForOp - Return true if the target has native support for
26338 /// the specified value type and it is 'desirable' to use the type for the
26339 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
26340 /// instruction encodings are longer and some i16 instructions are slow.
26341 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
26342 if (!isTypeLegal(VT))
26344 if (VT != MVT::i16)
26351 case ISD::SIGN_EXTEND:
26352 case ISD::ZERO_EXTEND:
26353 case ISD::ANY_EXTEND:
26366 /// IsDesirableToPromoteOp - This method query the target whether it is
26367 /// beneficial for dag combiner to promote the specified node. If true, it
26368 /// should return the desired promotion type by reference.
26369 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
26370 EVT VT = Op.getValueType();
26371 if (VT != MVT::i16)
26374 bool Promote = false;
26375 bool Commute = false;
26376 switch (Op.getOpcode()) {
26379 LoadSDNode *LD = cast<LoadSDNode>(Op);
26380 // If the non-extending load has a single use and it's not live out, then it
26381 // might be folded.
26382 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
26383 Op.hasOneUse()*/) {
26384 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
26385 UE = Op.getNode()->use_end(); UI != UE; ++UI) {
26386 // The only case where we'd want to promote LOAD (rather then it being
26387 // promoted as an operand is when it's only use is liveout.
26388 if (UI->getOpcode() != ISD::CopyToReg)
26395 case ISD::SIGN_EXTEND:
26396 case ISD::ZERO_EXTEND:
26397 case ISD::ANY_EXTEND:
26402 SDValue N0 = Op.getOperand(0);
26403 // Look out for (store (shl (load), x)).
26404 if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
26417 SDValue N0 = Op.getOperand(0);
26418 SDValue N1 = Op.getOperand(1);
26419 if (!Commute && MayFoldLoad(N1))
26421 // Avoid disabling potential load folding opportunities.
26422 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op)))
26424 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op)))
26434 //===----------------------------------------------------------------------===//
26435 // X86 Inline Assembly Support
26436 //===----------------------------------------------------------------------===//
26439 // Helper to match a string separated by whitespace.
26440 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) {
26441 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace.
26443 for (unsigned i = 0, e = args.size(); i != e; ++i) {
26444 StringRef piece(*args[i]);
26445 if (!s.startswith(piece)) // Check if the piece matches.
26448 s = s.substr(piece.size());
26449 StringRef::size_type pos = s.find_first_not_of(" \t");
26450 if (pos == 0) // We matched a prefix.
26458 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={};
26461 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
26463 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
26464 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
26465 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
26466 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
26468 if (AsmPieces.size() == 3)
26470 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
26477 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
26478 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
26480 std::string AsmStr = IA->getAsmString();
26482 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
26483 if (!Ty || Ty->getBitWidth() % 16 != 0)
26486 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
26487 SmallVector<StringRef, 4> AsmPieces;
26488 SplitString(AsmStr, AsmPieces, ";\n");
26490 switch (AsmPieces.size()) {
26491 default: return false;
26493 // FIXME: this should verify that we are targeting a 486 or better. If not,
26494 // we will turn this bswap into something that will be lowered to logical
26495 // ops instead of emitting the bswap asm. For now, we don't support 486 or
26496 // lower so don't worry about this.
26498 if (matchAsm(AsmPieces[0], "bswap", "$0") ||
26499 matchAsm(AsmPieces[0], "bswapl", "$0") ||
26500 matchAsm(AsmPieces[0], "bswapq", "$0") ||
26501 matchAsm(AsmPieces[0], "bswap", "${0:q}") ||
26502 matchAsm(AsmPieces[0], "bswapl", "${0:q}") ||
26503 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) {
26504 // No need to check constraints, nothing other than the equivalent of
26505 // "=r,0" would be valid here.
26506 return IntrinsicLowering::LowerToByteSwap(CI);
26509 // rorw $$8, ${0:w} --> llvm.bswap.i16
26510 if (CI->getType()->isIntegerTy(16) &&
26511 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26512 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") ||
26513 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) {
26515 const std::string &ConstraintsStr = IA->getConstraintString();
26516 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26517 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26518 if (clobbersFlagRegisters(AsmPieces))
26519 return IntrinsicLowering::LowerToByteSwap(CI);
26523 if (CI->getType()->isIntegerTy(32) &&
26524 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26525 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") &&
26526 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") &&
26527 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) {
26529 const std::string &ConstraintsStr = IA->getConstraintString();
26530 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26531 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26532 if (clobbersFlagRegisters(AsmPieces))
26533 return IntrinsicLowering::LowerToByteSwap(CI);
26536 if (CI->getType()->isIntegerTy(64)) {
26537 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
26538 if (Constraints.size() >= 2 &&
26539 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
26540 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
26541 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
26542 if (matchAsm(AsmPieces[0], "bswap", "%eax") &&
26543 matchAsm(AsmPieces[1], "bswap", "%edx") &&
26544 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx"))
26545 return IntrinsicLowering::LowerToByteSwap(CI);
26553 /// getConstraintType - Given a constraint letter, return the type of
26554 /// constraint it is for this target.
26555 X86TargetLowering::ConstraintType
26556 X86TargetLowering::getConstraintType(const std::string &Constraint) const {
26557 if (Constraint.size() == 1) {
26558 switch (Constraint[0]) {
26569 return C_RegisterClass;
26593 return TargetLowering::getConstraintType(Constraint);
26596 /// Examine constraint type and operand type and determine a weight value.
26597 /// This object must already have been set up with the operand type
26598 /// and the current alternative constraint selected.
26599 TargetLowering::ConstraintWeight
26600 X86TargetLowering::getSingleConstraintMatchWeight(
26601 AsmOperandInfo &info, const char *constraint) const {
26602 ConstraintWeight weight = CW_Invalid;
26603 Value *CallOperandVal = info.CallOperandVal;
26604 // If we don't have a value, we can't do a match,
26605 // but allow it at the lowest weight.
26606 if (!CallOperandVal)
26608 Type *type = CallOperandVal->getType();
26609 // Look at the constraint type.
26610 switch (*constraint) {
26612 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
26623 if (CallOperandVal->getType()->isIntegerTy())
26624 weight = CW_SpecificReg;
26629 if (type->isFloatingPointTy())
26630 weight = CW_SpecificReg;
26633 if (type->isX86_MMXTy() && Subtarget->hasMMX())
26634 weight = CW_SpecificReg;
26638 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) ||
26639 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256()))
26640 weight = CW_Register;
26643 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
26644 if (C->getZExtValue() <= 31)
26645 weight = CW_Constant;
26649 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26650 if (C->getZExtValue() <= 63)
26651 weight = CW_Constant;
26655 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26656 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
26657 weight = CW_Constant;
26661 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26662 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
26663 weight = CW_Constant;
26667 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26668 if (C->getZExtValue() <= 3)
26669 weight = CW_Constant;
26673 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26674 if (C->getZExtValue() <= 0xff)
26675 weight = CW_Constant;
26680 if (dyn_cast<ConstantFP>(CallOperandVal)) {
26681 weight = CW_Constant;
26685 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26686 if ((C->getSExtValue() >= -0x80000000LL) &&
26687 (C->getSExtValue() <= 0x7fffffffLL))
26688 weight = CW_Constant;
26692 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26693 if (C->getZExtValue() <= 0xffffffff)
26694 weight = CW_Constant;
26701 /// LowerXConstraint - try to replace an X constraint, which matches anything,
26702 /// with another that has more specific requirements based on the type of the
26703 /// corresponding operand.
26704 const char *X86TargetLowering::
26705 LowerXConstraint(EVT ConstraintVT) const {
26706 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
26707 // 'f' like normal targets.
26708 if (ConstraintVT.isFloatingPoint()) {
26709 if (Subtarget->hasSSE2())
26711 if (Subtarget->hasSSE1())
26715 return TargetLowering::LowerXConstraint(ConstraintVT);
26718 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
26719 /// vector. If it is invalid, don't add anything to Ops.
26720 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
26721 std::string &Constraint,
26722 std::vector<SDValue>&Ops,
26723 SelectionDAG &DAG) const {
26726 // Only support length 1 constraints for now.
26727 if (Constraint.length() > 1) return;
26729 char ConstraintLetter = Constraint[0];
26730 switch (ConstraintLetter) {
26733 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26734 if (C->getZExtValue() <= 31) {
26735 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26741 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26742 if (C->getZExtValue() <= 63) {
26743 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26749 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26750 if (isInt<8>(C->getSExtValue())) {
26751 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26757 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26758 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
26759 (Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) {
26760 Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType());
26766 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26767 if (C->getZExtValue() <= 3) {
26768 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26774 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26775 if (C->getZExtValue() <= 255) {
26776 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26782 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26783 if (C->getZExtValue() <= 127) {
26784 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26790 // 32-bit signed value
26791 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26792 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26793 C->getSExtValue())) {
26794 // Widen to 64 bits here to get it sign extended.
26795 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
26798 // FIXME gcc accepts some relocatable values here too, but only in certain
26799 // memory models; it's complicated.
26804 // 32-bit unsigned value
26805 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26806 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26807 C->getZExtValue())) {
26808 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26812 // FIXME gcc accepts some relocatable values here too, but only in certain
26813 // memory models; it's complicated.
26817 // Literal immediates are always ok.
26818 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
26819 // Widen to 64 bits here to get it sign extended.
26820 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64);
26824 // In any sort of PIC mode addresses need to be computed at runtime by
26825 // adding in a register or some sort of table lookup. These can't
26826 // be used as immediates.
26827 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
26830 // If we are in non-pic codegen mode, we allow the address of a global (with
26831 // an optional displacement) to be used with 'i'.
26832 GlobalAddressSDNode *GA = nullptr;
26833 int64_t Offset = 0;
26835 // Match either (GA), (GA+C), (GA+C1+C2), etc.
26837 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
26838 Offset += GA->getOffset();
26840 } else if (Op.getOpcode() == ISD::ADD) {
26841 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26842 Offset += C->getZExtValue();
26843 Op = Op.getOperand(0);
26846 } else if (Op.getOpcode() == ISD::SUB) {
26847 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26848 Offset += -C->getZExtValue();
26849 Op = Op.getOperand(0);
26854 // Otherwise, this isn't something we can handle, reject it.
26858 const GlobalValue *GV = GA->getGlobal();
26859 // If we require an extra load to get this address, as in PIC mode, we
26860 // can't accept it.
26861 if (isGlobalStubReference(
26862 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget())))
26865 Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
26866 GA->getValueType(0), Offset);
26871 if (Result.getNode()) {
26872 Ops.push_back(Result);
26875 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
26878 std::pair<unsigned, const TargetRegisterClass*>
26879 X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
26881 // First, see if this is a constraint that directly corresponds to an LLVM
26883 if (Constraint.size() == 1) {
26884 // GCC Constraint Letters
26885 switch (Constraint[0]) {
26887 // TODO: Slight differences here in allocation order and leaving
26888 // RIP in the class. Do they matter any more here than they do
26889 // in the normal allocation?
26890 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
26891 if (Subtarget->is64Bit()) {
26892 if (VT == MVT::i32 || VT == MVT::f32)
26893 return std::make_pair(0U, &X86::GR32RegClass);
26894 if (VT == MVT::i16)
26895 return std::make_pair(0U, &X86::GR16RegClass);
26896 if (VT == MVT::i8 || VT == MVT::i1)
26897 return std::make_pair(0U, &X86::GR8RegClass);
26898 if (VT == MVT::i64 || VT == MVT::f64)
26899 return std::make_pair(0U, &X86::GR64RegClass);
26902 // 32-bit fallthrough
26903 case 'Q': // Q_REGS
26904 if (VT == MVT::i32 || VT == MVT::f32)
26905 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
26906 if (VT == MVT::i16)
26907 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
26908 if (VT == MVT::i8 || VT == MVT::i1)
26909 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
26910 if (VT == MVT::i64)
26911 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
26913 case 'r': // GENERAL_REGS
26914 case 'l': // INDEX_REGS
26915 if (VT == MVT::i8 || VT == MVT::i1)
26916 return std::make_pair(0U, &X86::GR8RegClass);
26917 if (VT == MVT::i16)
26918 return std::make_pair(0U, &X86::GR16RegClass);
26919 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit())
26920 return std::make_pair(0U, &X86::GR32RegClass);
26921 return std::make_pair(0U, &X86::GR64RegClass);
26922 case 'R': // LEGACY_REGS
26923 if (VT == MVT::i8 || VT == MVT::i1)
26924 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
26925 if (VT == MVT::i16)
26926 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
26927 if (VT == MVT::i32 || !Subtarget->is64Bit())
26928 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
26929 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
26930 case 'f': // FP Stack registers.
26931 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
26932 // value to the correct fpstack register class.
26933 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
26934 return std::make_pair(0U, &X86::RFP32RegClass);
26935 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
26936 return std::make_pair(0U, &X86::RFP64RegClass);
26937 return std::make_pair(0U, &X86::RFP80RegClass);
26938 case 'y': // MMX_REGS if MMX allowed.
26939 if (!Subtarget->hasMMX()) break;
26940 return std::make_pair(0U, &X86::VR64RegClass);
26941 case 'Y': // SSE_REGS if SSE2 allowed
26942 if (!Subtarget->hasSSE2()) break;
26944 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
26945 if (!Subtarget->hasSSE1()) break;
26947 switch (VT.SimpleTy) {
26949 // Scalar SSE types.
26952 return std::make_pair(0U, &X86::FR32RegClass);
26955 return std::make_pair(0U, &X86::FR64RegClass);
26963 return std::make_pair(0U, &X86::VR128RegClass);
26971 return std::make_pair(0U, &X86::VR256RegClass);
26976 return std::make_pair(0U, &X86::VR512RegClass);
26982 // Use the default implementation in TargetLowering to convert the register
26983 // constraint into a member of a register class.
26984 std::pair<unsigned, const TargetRegisterClass*> Res;
26985 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
26987 // Not found as a standard register?
26989 // Map st(0) -> st(7) -> ST0
26990 if (Constraint.size() == 7 && Constraint[0] == '{' &&
26991 tolower(Constraint[1]) == 's' &&
26992 tolower(Constraint[2]) == 't' &&
26993 Constraint[3] == '(' &&
26994 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
26995 Constraint[5] == ')' &&
26996 Constraint[6] == '}') {
26998 Res.first = X86::FP0+Constraint[4]-'0';
26999 Res.second = &X86::RFP80RegClass;
27003 // GCC allows "st(0)" to be called just plain "st".
27004 if (StringRef("{st}").equals_lower(Constraint)) {
27005 Res.first = X86::FP0;
27006 Res.second = &X86::RFP80RegClass;
27011 if (StringRef("{flags}").equals_lower(Constraint)) {
27012 Res.first = X86::EFLAGS;
27013 Res.second = &X86::CCRRegClass;
27017 // 'A' means EAX + EDX.
27018 if (Constraint == "A") {
27019 Res.first = X86::EAX;
27020 Res.second = &X86::GR32_ADRegClass;
27026 // Otherwise, check to see if this is a register class of the wrong value
27027 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
27028 // turn into {ax},{dx}.
27029 if (Res.second->hasType(VT))
27030 return Res; // Correct type already, nothing to do.
27032 // All of the single-register GCC register classes map their values onto
27033 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
27034 // really want an 8-bit or 32-bit register, map to the appropriate register
27035 // class and return the appropriate register.
27036 if (Res.second == &X86::GR16RegClass) {
27037 if (VT == MVT::i8 || VT == MVT::i1) {
27038 unsigned DestReg = 0;
27039 switch (Res.first) {
27041 case X86::AX: DestReg = X86::AL; break;
27042 case X86::DX: DestReg = X86::DL; break;
27043 case X86::CX: DestReg = X86::CL; break;
27044 case X86::BX: DestReg = X86::BL; break;
27047 Res.first = DestReg;
27048 Res.second = &X86::GR8RegClass;
27050 } else if (VT == MVT::i32 || VT == MVT::f32) {
27051 unsigned DestReg = 0;
27052 switch (Res.first) {
27054 case X86::AX: DestReg = X86::EAX; break;
27055 case X86::DX: DestReg = X86::EDX; break;
27056 case X86::CX: DestReg = X86::ECX; break;
27057 case X86::BX: DestReg = X86::EBX; break;
27058 case X86::SI: DestReg = X86::ESI; break;
27059 case X86::DI: DestReg = X86::EDI; break;
27060 case X86::BP: DestReg = X86::EBP; break;
27061 case X86::SP: DestReg = X86::ESP; break;
27064 Res.first = DestReg;
27065 Res.second = &X86::GR32RegClass;
27067 } else if (VT == MVT::i64 || VT == MVT::f64) {
27068 unsigned DestReg = 0;
27069 switch (Res.first) {
27071 case X86::AX: DestReg = X86::RAX; break;
27072 case X86::DX: DestReg = X86::RDX; break;
27073 case X86::CX: DestReg = X86::RCX; break;
27074 case X86::BX: DestReg = X86::RBX; break;
27075 case X86::SI: DestReg = X86::RSI; break;
27076 case X86::DI: DestReg = X86::RDI; break;
27077 case X86::BP: DestReg = X86::RBP; break;
27078 case X86::SP: DestReg = X86::RSP; break;
27081 Res.first = DestReg;
27082 Res.second = &X86::GR64RegClass;
27085 } else if (Res.second == &X86::FR32RegClass ||
27086 Res.second == &X86::FR64RegClass ||
27087 Res.second == &X86::VR128RegClass ||
27088 Res.second == &X86::VR256RegClass ||
27089 Res.second == &X86::FR32XRegClass ||
27090 Res.second == &X86::FR64XRegClass ||
27091 Res.second == &X86::VR128XRegClass ||
27092 Res.second == &X86::VR256XRegClass ||
27093 Res.second == &X86::VR512RegClass) {
27094 // Handle references to XMM physical registers that got mapped into the
27095 // wrong class. This can happen with constraints like {xmm0} where the
27096 // target independent register mapper will just pick the first match it can
27097 // find, ignoring the required type.
27099 if (VT == MVT::f32 || VT == MVT::i32)
27100 Res.second = &X86::FR32RegClass;
27101 else if (VT == MVT::f64 || VT == MVT::i64)
27102 Res.second = &X86::FR64RegClass;
27103 else if (X86::VR128RegClass.hasType(VT))
27104 Res.second = &X86::VR128RegClass;
27105 else if (X86::VR256RegClass.hasType(VT))
27106 Res.second = &X86::VR256RegClass;
27107 else if (X86::VR512RegClass.hasType(VT))
27108 Res.second = &X86::VR512RegClass;
27114 int X86TargetLowering::getScalingFactorCost(const AddrMode &AM,
27116 // Scaling factors are not free at all.
27117 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
27118 // will take 2 allocations in the out of order engine instead of 1
27119 // for plain addressing mode, i.e. inst (reg1).
27121 // vaddps (%rsi,%drx), %ymm0, %ymm1
27122 // Requires two allocations (one for the load, one for the computation)
27124 // vaddps (%rsi), %ymm0, %ymm1
27125 // Requires just 1 allocation, i.e., freeing allocations for other operations
27126 // and having less micro operations to execute.
27128 // For some X86 architectures, this is even worse because for instance for
27129 // stores, the complex addressing mode forces the instruction to use the
27130 // "load" ports instead of the dedicated "store" port.
27131 // E.g., on Haswell:
27132 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
27133 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
27134 if (isLegalAddressingMode(AM, Ty))
27135 // Scale represents reg2 * scale, thus account for 1
27136 // as soon as we use a second register.
27137 return AM.Scale != 0;
27141 bool X86TargetLowering::isTargetFTOL() const {
27142 return Subtarget->isTargetKnownWindowsMSVC() && !Subtarget->is64Bit();