1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "X86ISelLowering.h"
16 #include "Utils/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "X86TargetMachine.h"
22 #include "X86TargetObjectFile.h"
23 #include "llvm/ADT/SmallBitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/ADT/VariadicFunction.h"
29 #include "llvm/CodeGen/IntrinsicLowering.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineJumpTableInfo.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/CallingConv.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/MC/MCAsmInfo.h"
46 #include "llvm/MC/MCContext.h"
47 #include "llvm/MC/MCExpr.h"
48 #include "llvm/MC/MCSymbol.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Target/TargetOptions.h"
54 #include "X86IntrinsicsInfo.h"
60 #define DEBUG_TYPE "x86-isel"
62 STATISTIC(NumTailCalls, "Number of tail calls");
64 static cl::opt<bool> ExperimentalVectorWideningLegalization(
65 "x86-experimental-vector-widening-legalization", cl::init(false),
66 cl::desc("Enable an experimental vector type legalization through widening "
67 "rather than promotion."),
70 static cl::opt<bool> ExperimentalVectorShuffleLowering(
71 "x86-experimental-vector-shuffle-lowering", cl::init(true),
72 cl::desc("Enable an experimental vector shuffle lowering code path."),
75 static cl::opt<bool> ExperimentalVectorShuffleLegality(
76 "x86-experimental-vector-shuffle-legality", cl::init(false),
77 cl::desc("Enable experimental shuffle legality based on the experimental "
78 "shuffle lowering. Should only be used with the experimental "
82 static cl::opt<int> ReciprocalEstimateRefinementSteps(
83 "x86-recip-refinement-steps", cl::init(1),
84 cl::desc("Specify the number of Newton-Raphson iterations applied to the "
85 "result of the hardware reciprocal estimate instruction."),
88 // Forward declarations.
89 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
92 static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
93 SelectionDAG &DAG, SDLoc dl,
94 unsigned vectorWidth) {
95 assert((vectorWidth == 128 || vectorWidth == 256) &&
96 "Unsupported vector width");
97 EVT VT = Vec.getValueType();
98 EVT ElVT = VT.getVectorElementType();
99 unsigned Factor = VT.getSizeInBits()/vectorWidth;
100 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
101 VT.getVectorNumElements()/Factor);
103 // Extract from UNDEF is UNDEF.
104 if (Vec.getOpcode() == ISD::UNDEF)
105 return DAG.getUNDEF(ResultVT);
107 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
108 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
110 // This is the index of the first element of the vectorWidth-bit chunk
112 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / vectorWidth)
115 // If the input is a buildvector just emit a smaller one.
116 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
117 return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
118 makeArrayRef(Vec->op_begin() + NormalizedIdxVal,
121 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
122 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
125 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
126 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
127 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
128 /// instructions or a simple subregister reference. Idx is an index in the
129 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
130 /// lowering EXTRACT_VECTOR_ELT operations easier.
131 static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
132 SelectionDAG &DAG, SDLoc dl) {
133 assert((Vec.getValueType().is256BitVector() ||
134 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
135 return ExtractSubVector(Vec, IdxVal, DAG, dl, 128);
138 /// Generate a DAG to grab 256-bits from a 512-bit vector.
139 static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal,
140 SelectionDAG &DAG, SDLoc dl) {
141 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
142 return ExtractSubVector(Vec, IdxVal, DAG, dl, 256);
145 static SDValue InsertSubVector(SDValue Result, SDValue Vec,
146 unsigned IdxVal, SelectionDAG &DAG,
147 SDLoc dl, unsigned vectorWidth) {
148 assert((vectorWidth == 128 || vectorWidth == 256) &&
149 "Unsupported vector width");
150 // Inserting UNDEF is Result
151 if (Vec.getOpcode() == ISD::UNDEF)
153 EVT VT = Vec.getValueType();
154 EVT ElVT = VT.getVectorElementType();
155 EVT ResultVT = Result.getValueType();
157 // Insert the relevant vectorWidth bits.
158 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
160 // This is the index of the first element of the vectorWidth-bit chunk
162 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/vectorWidth)
165 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
166 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
169 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
170 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
171 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
172 /// simple superregister reference. Idx is an index in the 128 bits
173 /// we want. It need not be aligned to a 128-bit boundary. That makes
174 /// lowering INSERT_VECTOR_ELT operations easier.
175 static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
176 SelectionDAG &DAG,SDLoc dl) {
177 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
178 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
181 static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
182 SelectionDAG &DAG, SDLoc dl) {
183 assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
184 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
187 /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128
188 /// instructions. This is used because creating CONCAT_VECTOR nodes of
189 /// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
190 /// large BUILD_VECTORS.
191 static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
192 unsigned NumElems, SelectionDAG &DAG,
194 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
195 return Insert128BitVector(V, V2, NumElems/2, DAG, dl);
198 static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
199 unsigned NumElems, SelectionDAG &DAG,
201 SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
202 return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
205 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
206 const X86Subtarget &STI)
207 : TargetLowering(TM), Subtarget(&STI) {
208 X86ScalarSSEf64 = Subtarget->hasSSE2();
209 X86ScalarSSEf32 = Subtarget->hasSSE1();
210 TD = getDataLayout();
212 // Set up the TargetLowering object.
213 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
215 // X86 is weird. It always uses i8 for shift amounts and setcc results.
216 setBooleanContents(ZeroOrOneBooleanContent);
217 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
218 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
220 // For 64-bit, since we have so many registers, use the ILP scheduler.
221 // For 32-bit, use the register pressure specific scheduling.
222 // For Atom, always use ILP scheduling.
223 if (Subtarget->isAtom())
224 setSchedulingPreference(Sched::ILP);
225 else if (Subtarget->is64Bit())
226 setSchedulingPreference(Sched::ILP);
228 setSchedulingPreference(Sched::RegPressure);
229 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
230 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
232 // Bypass expensive divides on Atom when compiling with O2.
233 if (TM.getOptLevel() >= CodeGenOpt::Default) {
234 if (Subtarget->hasSlowDivide32())
235 addBypassSlowDiv(32, 8);
236 if (Subtarget->hasSlowDivide64() && Subtarget->is64Bit())
237 addBypassSlowDiv(64, 16);
240 if (Subtarget->isTargetKnownWindowsMSVC()) {
241 // Setup Windows compiler runtime calls.
242 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
243 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
244 setLibcallName(RTLIB::SREM_I64, "_allrem");
245 setLibcallName(RTLIB::UREM_I64, "_aullrem");
246 setLibcallName(RTLIB::MUL_I64, "_allmul");
247 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
248 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
249 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
250 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
251 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
253 // The _ftol2 runtime function has an unusual calling conv, which
254 // is modeled by a special pseudo-instruction.
255 setLibcallName(RTLIB::FPTOUINT_F64_I64, nullptr);
256 setLibcallName(RTLIB::FPTOUINT_F32_I64, nullptr);
257 setLibcallName(RTLIB::FPTOUINT_F64_I32, nullptr);
258 setLibcallName(RTLIB::FPTOUINT_F32_I32, nullptr);
261 if (Subtarget->isTargetDarwin()) {
262 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
263 setUseUnderscoreSetJmp(false);
264 setUseUnderscoreLongJmp(false);
265 } else if (Subtarget->isTargetWindowsGNU()) {
266 // MS runtime is weird: it exports _setjmp, but longjmp!
267 setUseUnderscoreSetJmp(true);
268 setUseUnderscoreLongJmp(false);
270 setUseUnderscoreSetJmp(true);
271 setUseUnderscoreLongJmp(true);
274 // Set up the register classes.
275 addRegisterClass(MVT::i8, &X86::GR8RegClass);
276 addRegisterClass(MVT::i16, &X86::GR16RegClass);
277 addRegisterClass(MVT::i32, &X86::GR32RegClass);
278 if (Subtarget->is64Bit())
279 addRegisterClass(MVT::i64, &X86::GR64RegClass);
281 for (MVT VT : MVT::integer_valuetypes())
282 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
284 // We don't accept any truncstore of integer registers.
285 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
286 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
287 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
288 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
289 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
290 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
292 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
294 // SETOEQ and SETUNE require checking two conditions.
295 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
296 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
297 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
298 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
299 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
300 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
302 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
304 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
305 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
306 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
308 if (Subtarget->is64Bit()) {
309 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
310 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
311 } else if (!TM.Options.UseSoftFloat) {
312 // We have an algorithm for SSE2->double, and we turn this into a
313 // 64-bit FILD followed by conditional FADD for other targets.
314 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
315 // We have an algorithm for SSE2, and we turn this into a 64-bit
316 // FILD for other targets.
317 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
320 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
322 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
323 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
325 if (!TM.Options.UseSoftFloat) {
326 // SSE has no i16 to fp conversion, only i32
327 if (X86ScalarSSEf32) {
328 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
329 // f32 and f64 cases are Legal, f80 case is not
330 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
332 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
333 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
336 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
337 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
340 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
341 // are Legal, f80 is custom lowered.
342 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
343 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
345 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
347 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
348 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
350 if (X86ScalarSSEf32) {
351 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
352 // f32 and f64 cases are Legal, f80 case is not
353 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
355 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
356 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
359 // Handle FP_TO_UINT by promoting the destination to a larger signed
361 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
362 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
363 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
365 if (Subtarget->is64Bit()) {
366 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
367 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
368 } else if (!TM.Options.UseSoftFloat) {
369 // Since AVX is a superset of SSE3, only check for SSE here.
370 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3())
371 // Expand FP_TO_UINT into a select.
372 // FIXME: We would like to use a Custom expander here eventually to do
373 // the optimal thing for SSE vs. the default expansion in the legalizer.
374 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
376 // With SSE3 we can use fisttpll to convert to a signed i64; without
377 // SSE, we're stuck with a fistpll.
378 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
381 if (isTargetFTOL()) {
382 // Use the _ftol2 runtime function, which has a pseudo-instruction
383 // to handle its weird calling convention.
384 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
387 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
388 if (!X86ScalarSSEf64) {
389 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
390 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
391 if (Subtarget->is64Bit()) {
392 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
393 // Without SSE, i64->f64 goes through memory.
394 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
398 // Scalar integer divide and remainder are lowered to use operations that
399 // produce two results, to match the available instructions. This exposes
400 // the two-result form to trivial CSE, which is able to combine x/y and x%y
401 // into a single instruction.
403 // Scalar integer multiply-high is also lowered to use two-result
404 // operations, to match the available instructions. However, plain multiply
405 // (low) operations are left as Legal, as there are single-result
406 // instructions for this in x86. Using the two-result multiply instructions
407 // when both high and low results are needed must be arranged by dagcombine.
408 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
410 setOperationAction(ISD::MULHS, VT, Expand);
411 setOperationAction(ISD::MULHU, VT, Expand);
412 setOperationAction(ISD::SDIV, VT, Expand);
413 setOperationAction(ISD::UDIV, VT, Expand);
414 setOperationAction(ISD::SREM, VT, Expand);
415 setOperationAction(ISD::UREM, VT, Expand);
417 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences.
418 setOperationAction(ISD::ADDC, VT, Custom);
419 setOperationAction(ISD::ADDE, VT, Custom);
420 setOperationAction(ISD::SUBC, VT, Custom);
421 setOperationAction(ISD::SUBE, VT, Custom);
424 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
425 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
426 setOperationAction(ISD::BR_CC , MVT::f32, Expand);
427 setOperationAction(ISD::BR_CC , MVT::f64, Expand);
428 setOperationAction(ISD::BR_CC , MVT::f80, Expand);
429 setOperationAction(ISD::BR_CC , MVT::i8, Expand);
430 setOperationAction(ISD::BR_CC , MVT::i16, Expand);
431 setOperationAction(ISD::BR_CC , MVT::i32, Expand);
432 setOperationAction(ISD::BR_CC , MVT::i64, Expand);
433 setOperationAction(ISD::SELECT_CC , MVT::f32, Expand);
434 setOperationAction(ISD::SELECT_CC , MVT::f64, Expand);
435 setOperationAction(ISD::SELECT_CC , MVT::f80, Expand);
436 setOperationAction(ISD::SELECT_CC , MVT::i8, Expand);
437 setOperationAction(ISD::SELECT_CC , MVT::i16, Expand);
438 setOperationAction(ISD::SELECT_CC , MVT::i32, Expand);
439 setOperationAction(ISD::SELECT_CC , MVT::i64, Expand);
440 if (Subtarget->is64Bit())
441 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
442 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
443 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
444 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
445 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
446 setOperationAction(ISD::FREM , MVT::f32 , Expand);
447 setOperationAction(ISD::FREM , MVT::f64 , Expand);
448 setOperationAction(ISD::FREM , MVT::f80 , Expand);
449 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
451 // Promote the i8 variants and force them on up to i32 which has a shorter
453 setOperationAction(ISD::CTTZ , MVT::i8 , Promote);
454 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32);
455 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote);
456 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32);
457 if (Subtarget->hasBMI()) {
458 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand);
459 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand);
460 if (Subtarget->is64Bit())
461 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
463 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
464 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
465 if (Subtarget->is64Bit())
466 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
469 if (Subtarget->hasLZCNT()) {
470 // When promoting the i8 variants, force them to i32 for a shorter
472 setOperationAction(ISD::CTLZ , MVT::i8 , Promote);
473 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32);
474 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote);
475 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
476 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand);
477 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand);
478 if (Subtarget->is64Bit())
479 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
481 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
482 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
483 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
484 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
485 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
486 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
487 if (Subtarget->is64Bit()) {
488 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
489 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
493 // Special handling for half-precision floating point conversions.
494 // If we don't have F16C support, then lower half float conversions
495 // into library calls.
496 if (TM.Options.UseSoftFloat || !Subtarget->hasF16C()) {
497 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
498 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
501 // There's never any support for operations beyond MVT::f32.
502 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
503 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
504 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
505 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
507 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
508 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
509 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
510 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
511 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
512 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
514 if (Subtarget->hasPOPCNT()) {
515 setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
517 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
518 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
519 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
520 if (Subtarget->is64Bit())
521 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
524 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
526 if (!Subtarget->hasMOVBE())
527 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
529 // These should be promoted to a larger select which is supported.
530 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
531 // X86 wants to expand cmov itself.
532 setOperationAction(ISD::SELECT , MVT::i8 , Custom);
533 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
534 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
535 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
536 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
537 setOperationAction(ISD::SELECT , MVT::f80 , Custom);
538 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
539 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
540 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
541 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
542 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
543 setOperationAction(ISD::SETCC , MVT::f80 , Custom);
544 if (Subtarget->is64Bit()) {
545 setOperationAction(ISD::SELECT , MVT::i64 , Custom);
546 setOperationAction(ISD::SETCC , MVT::i64 , Custom);
548 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
549 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
550 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
551 // support continuation, user-level threading, and etc.. As a result, no
552 // other SjLj exception interfaces are implemented and please don't build
553 // your own exception handling based on them.
554 // LLVM/Clang supports zero-cost DWARF exception handling.
555 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
556 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
559 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
560 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
561 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
562 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
563 if (Subtarget->is64Bit())
564 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
565 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
566 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom);
567 if (Subtarget->is64Bit()) {
568 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
569 setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
570 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
571 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
572 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom);
574 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
575 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
576 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
577 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
578 if (Subtarget->is64Bit()) {
579 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
580 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
581 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
584 if (Subtarget->hasSSE1())
585 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
587 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
589 // Expand certain atomics
590 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
592 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
593 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
594 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
597 if (Subtarget->hasCmpxchg16b()) {
598 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
601 // FIXME - use subtarget debug flags
602 if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetELF() &&
603 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWin64()) {
604 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
607 if (Subtarget->is64Bit()) {
608 setExceptionPointerRegister(X86::RAX);
609 setExceptionSelectorRegister(X86::RDX);
611 setExceptionPointerRegister(X86::EAX);
612 setExceptionSelectorRegister(X86::EDX);
614 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
615 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
617 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
618 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
620 setOperationAction(ISD::TRAP, MVT::Other, Legal);
621 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
623 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
624 setOperationAction(ISD::VASTART , MVT::Other, Custom);
625 setOperationAction(ISD::VAEND , MVT::Other, Expand);
626 if (Subtarget->is64Bit() && !Subtarget->isTargetWin64()) {
627 // TargetInfo::X86_64ABIBuiltinVaList
628 setOperationAction(ISD::VAARG , MVT::Other, Custom);
629 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
631 // TargetInfo::CharPtrBuiltinVaList
632 setOperationAction(ISD::VAARG , MVT::Other, Expand);
633 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
636 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
637 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
639 setOperationAction(ISD::DYNAMIC_STACKALLOC, getPointerTy(), Custom);
641 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) {
642 // f32 and f64 use SSE.
643 // Set up the FP register classes.
644 addRegisterClass(MVT::f32, &X86::FR32RegClass);
645 addRegisterClass(MVT::f64, &X86::FR64RegClass);
647 // Use ANDPD to simulate FABS.
648 setOperationAction(ISD::FABS , MVT::f64, Custom);
649 setOperationAction(ISD::FABS , MVT::f32, Custom);
651 // Use XORP to simulate FNEG.
652 setOperationAction(ISD::FNEG , MVT::f64, Custom);
653 setOperationAction(ISD::FNEG , MVT::f32, Custom);
655 // Use ANDPD and ORPD to simulate FCOPYSIGN.
656 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
657 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
659 // Lower this to FGETSIGNx86 plus an AND.
660 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
661 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
663 // We don't support sin/cos/fmod
664 setOperationAction(ISD::FSIN , MVT::f64, Expand);
665 setOperationAction(ISD::FCOS , MVT::f64, Expand);
666 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
667 setOperationAction(ISD::FSIN , MVT::f32, Expand);
668 setOperationAction(ISD::FCOS , MVT::f32, Expand);
669 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
671 // Expand FP immediates into loads from the stack, except for the special
673 addLegalFPImmediate(APFloat(+0.0)); // xorpd
674 addLegalFPImmediate(APFloat(+0.0f)); // xorps
675 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) {
676 // Use SSE for f32, x87 for f64.
677 // Set up the FP register classes.
678 addRegisterClass(MVT::f32, &X86::FR32RegClass);
679 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
681 // Use ANDPS to simulate FABS.
682 setOperationAction(ISD::FABS , MVT::f32, Custom);
684 // Use XORP to simulate FNEG.
685 setOperationAction(ISD::FNEG , MVT::f32, Custom);
687 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
689 // Use ANDPS and ORPS to simulate FCOPYSIGN.
690 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
691 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
693 // We don't support sin/cos/fmod
694 setOperationAction(ISD::FSIN , MVT::f32, Expand);
695 setOperationAction(ISD::FCOS , MVT::f32, Expand);
696 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
698 // Special cases we handle for FP constants.
699 addLegalFPImmediate(APFloat(+0.0f)); // xorps
700 addLegalFPImmediate(APFloat(+0.0)); // FLD0
701 addLegalFPImmediate(APFloat(+1.0)); // FLD1
702 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
703 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
705 if (!TM.Options.UnsafeFPMath) {
706 setOperationAction(ISD::FSIN , MVT::f64, Expand);
707 setOperationAction(ISD::FCOS , MVT::f64, Expand);
708 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
710 } else if (!TM.Options.UseSoftFloat) {
711 // f32 and f64 in x87.
712 // Set up the FP register classes.
713 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
714 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
716 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
717 setOperationAction(ISD::UNDEF, MVT::f32, Expand);
718 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
719 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
721 if (!TM.Options.UnsafeFPMath) {
722 setOperationAction(ISD::FSIN , MVT::f64, Expand);
723 setOperationAction(ISD::FSIN , MVT::f32, Expand);
724 setOperationAction(ISD::FCOS , MVT::f64, Expand);
725 setOperationAction(ISD::FCOS , MVT::f32, Expand);
726 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
727 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
729 addLegalFPImmediate(APFloat(+0.0)); // FLD0
730 addLegalFPImmediate(APFloat(+1.0)); // FLD1
731 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
732 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
733 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
734 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
735 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
736 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
739 // We don't support FMA.
740 setOperationAction(ISD::FMA, MVT::f64, Expand);
741 setOperationAction(ISD::FMA, MVT::f32, Expand);
743 // Long double always uses X87.
744 if (!TM.Options.UseSoftFloat) {
745 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
746 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
747 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
749 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended);
750 addLegalFPImmediate(TmpFlt); // FLD0
752 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
755 APFloat TmpFlt2(+1.0);
756 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
758 addLegalFPImmediate(TmpFlt2); // FLD1
759 TmpFlt2.changeSign();
760 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
763 if (!TM.Options.UnsafeFPMath) {
764 setOperationAction(ISD::FSIN , MVT::f80, Expand);
765 setOperationAction(ISD::FCOS , MVT::f80, Expand);
766 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
769 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
770 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
771 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
772 setOperationAction(ISD::FRINT, MVT::f80, Expand);
773 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
774 setOperationAction(ISD::FMA, MVT::f80, Expand);
777 // Always use a library call for pow.
778 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
779 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
780 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
782 setOperationAction(ISD::FLOG, MVT::f80, Expand);
783 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
784 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
785 setOperationAction(ISD::FEXP, MVT::f80, Expand);
786 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
787 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
788 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
790 // First set operation action for all vector types to either promote
791 // (for widening) or expand (for scalarization). Then we will selectively
792 // turn on ones that can be effectively codegen'd.
793 for (MVT VT : MVT::vector_valuetypes()) {
794 setOperationAction(ISD::ADD , VT, Expand);
795 setOperationAction(ISD::SUB , VT, Expand);
796 setOperationAction(ISD::FADD, VT, Expand);
797 setOperationAction(ISD::FNEG, VT, Expand);
798 setOperationAction(ISD::FSUB, VT, Expand);
799 setOperationAction(ISD::MUL , VT, Expand);
800 setOperationAction(ISD::FMUL, VT, Expand);
801 setOperationAction(ISD::SDIV, VT, Expand);
802 setOperationAction(ISD::UDIV, VT, Expand);
803 setOperationAction(ISD::FDIV, VT, Expand);
804 setOperationAction(ISD::SREM, VT, Expand);
805 setOperationAction(ISD::UREM, VT, Expand);
806 setOperationAction(ISD::LOAD, VT, Expand);
807 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
808 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
809 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
810 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
811 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
812 setOperationAction(ISD::FABS, VT, Expand);
813 setOperationAction(ISD::FSIN, VT, Expand);
814 setOperationAction(ISD::FSINCOS, VT, Expand);
815 setOperationAction(ISD::FCOS, VT, Expand);
816 setOperationAction(ISD::FSINCOS, VT, Expand);
817 setOperationAction(ISD::FREM, VT, Expand);
818 setOperationAction(ISD::FMA, VT, Expand);
819 setOperationAction(ISD::FPOWI, VT, Expand);
820 setOperationAction(ISD::FSQRT, VT, Expand);
821 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
822 setOperationAction(ISD::FFLOOR, VT, Expand);
823 setOperationAction(ISD::FCEIL, VT, Expand);
824 setOperationAction(ISD::FTRUNC, VT, Expand);
825 setOperationAction(ISD::FRINT, VT, Expand);
826 setOperationAction(ISD::FNEARBYINT, VT, Expand);
827 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
828 setOperationAction(ISD::MULHS, VT, Expand);
829 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
830 setOperationAction(ISD::MULHU, VT, Expand);
831 setOperationAction(ISD::SDIVREM, VT, Expand);
832 setOperationAction(ISD::UDIVREM, VT, Expand);
833 setOperationAction(ISD::FPOW, VT, Expand);
834 setOperationAction(ISD::CTPOP, VT, Expand);
835 setOperationAction(ISD::CTTZ, VT, Expand);
836 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
837 setOperationAction(ISD::CTLZ, VT, Expand);
838 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
839 setOperationAction(ISD::SHL, VT, Expand);
840 setOperationAction(ISD::SRA, VT, Expand);
841 setOperationAction(ISD::SRL, VT, Expand);
842 setOperationAction(ISD::ROTL, VT, Expand);
843 setOperationAction(ISD::ROTR, VT, Expand);
844 setOperationAction(ISD::BSWAP, VT, Expand);
845 setOperationAction(ISD::SETCC, VT, Expand);
846 setOperationAction(ISD::FLOG, VT, Expand);
847 setOperationAction(ISD::FLOG2, VT, Expand);
848 setOperationAction(ISD::FLOG10, VT, Expand);
849 setOperationAction(ISD::FEXP, VT, Expand);
850 setOperationAction(ISD::FEXP2, VT, Expand);
851 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
852 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
853 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
854 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
855 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
856 setOperationAction(ISD::TRUNCATE, VT, Expand);
857 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
858 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
859 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
860 setOperationAction(ISD::VSELECT, VT, Expand);
861 setOperationAction(ISD::SELECT_CC, VT, Expand);
862 for (MVT InnerVT : MVT::vector_valuetypes()) {
863 setTruncStoreAction(InnerVT, VT, Expand);
865 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
866 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
868 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
869 // types, we have to deal with them whether we ask for Expansion or not.
870 // Setting Expand causes its own optimisation problems though, so leave
872 if (VT.getVectorElementType() == MVT::i1)
873 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
877 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
878 // with -msoft-float, disable use of MMX as well.
879 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) {
880 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
881 // No operations on x86mmx supported, everything uses intrinsics.
884 // MMX-sized vectors (other than x86mmx) are expected to be expanded
885 // into smaller operations.
886 setOperationAction(ISD::MULHS, MVT::v8i8, Expand);
887 setOperationAction(ISD::MULHS, MVT::v4i16, Expand);
888 setOperationAction(ISD::MULHS, MVT::v2i32, Expand);
889 setOperationAction(ISD::MULHS, MVT::v1i64, Expand);
890 setOperationAction(ISD::AND, MVT::v8i8, Expand);
891 setOperationAction(ISD::AND, MVT::v4i16, Expand);
892 setOperationAction(ISD::AND, MVT::v2i32, Expand);
893 setOperationAction(ISD::AND, MVT::v1i64, Expand);
894 setOperationAction(ISD::OR, MVT::v8i8, Expand);
895 setOperationAction(ISD::OR, MVT::v4i16, Expand);
896 setOperationAction(ISD::OR, MVT::v2i32, Expand);
897 setOperationAction(ISD::OR, MVT::v1i64, Expand);
898 setOperationAction(ISD::XOR, MVT::v8i8, Expand);
899 setOperationAction(ISD::XOR, MVT::v4i16, Expand);
900 setOperationAction(ISD::XOR, MVT::v2i32, Expand);
901 setOperationAction(ISD::XOR, MVT::v1i64, Expand);
902 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand);
903 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand);
904 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand);
905 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand);
906 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
907 setOperationAction(ISD::SELECT, MVT::v8i8, Expand);
908 setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
909 setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
910 setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
911 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand);
912 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand);
913 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand);
914 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
916 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) {
917 addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
919 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
920 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
921 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
922 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
923 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
924 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
925 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
926 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
927 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
928 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
929 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
930 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
931 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
934 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) {
935 addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
937 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
938 // registers cannot be used even for integer operations.
939 addRegisterClass(MVT::v16i8, &X86::VR128RegClass);
940 addRegisterClass(MVT::v8i16, &X86::VR128RegClass);
941 addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
942 addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
944 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
945 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
946 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
947 setOperationAction(ISD::ADD, MVT::v2i64, Legal);
948 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
949 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
950 setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
951 setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
952 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
953 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
954 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
955 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
956 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
957 setOperationAction(ISD::SUB, MVT::v2i64, Legal);
958 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
959 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
960 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
961 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
962 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
963 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
964 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
965 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
967 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
968 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
969 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
970 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
972 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
973 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
974 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
975 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
976 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
978 // Only provide customized ctpop vector bit twiddling for vector types we
979 // know to perform better than using the popcnt instructions on each vector
980 // element. If popcnt isn't supported, always provide the custom version.
981 if (!Subtarget->hasPOPCNT()) {
982 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
983 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom);
986 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
987 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
988 MVT VT = (MVT::SimpleValueType)i;
989 // Do not attempt to custom lower non-power-of-2 vectors
990 if (!isPowerOf2_32(VT.getVectorNumElements()))
992 // Do not attempt to custom lower non-128-bit vectors
993 if (!VT.is128BitVector())
995 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
996 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
997 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1000 // We support custom legalizing of sext and anyext loads for specific
1001 // memory vector types which we can load as a scalar (or sequence of
1002 // scalars) and extend in-register to a legal 128-bit vector type. For sext
1003 // loads these must work with a single scalar load.
1004 for (MVT VT : MVT::integer_vector_valuetypes()) {
1005 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
1006 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
1007 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
1008 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
1009 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
1010 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
1011 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
1012 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
1013 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
1016 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1017 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1018 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
1019 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
1020 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
1021 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
1023 if (Subtarget->is64Bit()) {
1024 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1025 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1028 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
1029 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
1030 MVT VT = (MVT::SimpleValueType)i;
1032 // Do not attempt to promote non-128-bit vectors
1033 if (!VT.is128BitVector())
1036 setOperationAction(ISD::AND, VT, Promote);
1037 AddPromotedToType (ISD::AND, VT, MVT::v2i64);
1038 setOperationAction(ISD::OR, VT, Promote);
1039 AddPromotedToType (ISD::OR, VT, MVT::v2i64);
1040 setOperationAction(ISD::XOR, VT, Promote);
1041 AddPromotedToType (ISD::XOR, VT, MVT::v2i64);
1042 setOperationAction(ISD::LOAD, VT, Promote);
1043 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
1044 setOperationAction(ISD::SELECT, VT, Promote);
1045 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64);
1048 // Custom lower v2i64 and v2f64 selects.
1049 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
1050 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
1051 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
1052 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
1054 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
1055 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
1057 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1058 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1059 // As there is no 64-bit GPR available, we need build a special custom
1060 // sequence to convert from v2i32 to v2f32.
1061 if (!Subtarget->is64Bit())
1062 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
1064 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1065 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
1067 for (MVT VT : MVT::fp_vector_valuetypes())
1068 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
1070 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1071 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1072 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1075 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
1076 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1077 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1078 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1079 setOperationAction(ISD::FRINT, MVT::f32, Legal);
1080 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1081 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1082 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1083 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1084 setOperationAction(ISD::FRINT, MVT::f64, Legal);
1085 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1087 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1088 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
1089 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1090 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
1091 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
1092 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
1093 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
1094 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
1095 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
1096 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
1098 // FIXME: Do we need to handle scalar-to-vector here?
1099 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1101 setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
1102 setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
1103 setOperationAction(ISD::VSELECT, MVT::v4i32, Custom);
1104 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
1105 setOperationAction(ISD::VSELECT, MVT::v8i16, Custom);
1106 // There is no BLENDI for byte vectors. We don't need to custom lower
1107 // some vselects for now.
1108 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1110 // SSE41 brings specific instructions for doing vector sign extend even in
1111 // cases where we don't have SRA.
1112 for (MVT VT : MVT::integer_vector_valuetypes()) {
1113 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
1114 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
1115 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
1118 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1119 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1120 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1121 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1122 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1123 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1124 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1126 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1127 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1128 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1129 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1130 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1131 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1133 // i8 and i16 vectors are custom because the source register and source
1134 // source memory operand types are not the same width. f32 vectors are
1135 // custom since the immediate controlling the insert encodes additional
1137 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1138 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1139 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1140 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1142 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
1143 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
1144 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
1145 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
1147 // FIXME: these should be Legal, but that's only for the case where
1148 // the index is constant. For now custom expand to deal with that.
1149 if (Subtarget->is64Bit()) {
1150 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1151 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1155 if (Subtarget->hasSSE2()) {
1156 setOperationAction(ISD::SRL, MVT::v8i16, Custom);
1157 setOperationAction(ISD::SRL, MVT::v16i8, Custom);
1159 setOperationAction(ISD::SHL, MVT::v8i16, Custom);
1160 setOperationAction(ISD::SHL, MVT::v16i8, Custom);
1162 setOperationAction(ISD::SRA, MVT::v8i16, Custom);
1163 setOperationAction(ISD::SRA, MVT::v16i8, Custom);
1165 // In the customized shift lowering, the legal cases in AVX2 will be
1167 setOperationAction(ISD::SRL, MVT::v2i64, Custom);
1168 setOperationAction(ISD::SRL, MVT::v4i32, Custom);
1170 setOperationAction(ISD::SHL, MVT::v2i64, Custom);
1171 setOperationAction(ISD::SHL, MVT::v4i32, Custom);
1173 setOperationAction(ISD::SRA, MVT::v4i32, Custom);
1176 if (!TM.Options.UseSoftFloat && Subtarget->hasFp256()) {
1177 addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
1178 addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
1179 addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
1180 addRegisterClass(MVT::v8f32, &X86::VR256RegClass);
1181 addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
1182 addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
1184 setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
1185 setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
1186 setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
1188 setOperationAction(ISD::FADD, MVT::v8f32, Legal);
1189 setOperationAction(ISD::FSUB, MVT::v8f32, Legal);
1190 setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
1191 setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
1192 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
1193 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal);
1194 setOperationAction(ISD::FCEIL, MVT::v8f32, Legal);
1195 setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal);
1196 setOperationAction(ISD::FRINT, MVT::v8f32, Legal);
1197 setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal);
1198 setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
1199 setOperationAction(ISD::FABS, MVT::v8f32, Custom);
1201 setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1202 setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1203 setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1204 setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1205 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1206 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1207 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
1208 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1209 setOperationAction(ISD::FRINT, MVT::v4f64, Legal);
1210 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal);
1211 setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
1212 setOperationAction(ISD::FABS, MVT::v4f64, Custom);
1214 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1215 // even though v8i16 is a legal type.
1216 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Promote);
1217 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Promote);
1218 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1220 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote);
1221 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1222 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
1224 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
1225 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
1227 for (MVT VT : MVT::fp_vector_valuetypes())
1228 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
1230 setOperationAction(ISD::SRL, MVT::v16i16, Custom);
1231 setOperationAction(ISD::SRL, MVT::v32i8, Custom);
1233 setOperationAction(ISD::SHL, MVT::v16i16, Custom);
1234 setOperationAction(ISD::SHL, MVT::v32i8, Custom);
1236 setOperationAction(ISD::SRA, MVT::v16i16, Custom);
1237 setOperationAction(ISD::SRA, MVT::v32i8, Custom);
1239 setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
1240 setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
1241 setOperationAction(ISD::SETCC, MVT::v8i32, Custom);
1242 setOperationAction(ISD::SETCC, MVT::v4i64, Custom);
1244 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1245 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1246 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1248 setOperationAction(ISD::VSELECT, MVT::v4f64, Custom);
1249 setOperationAction(ISD::VSELECT, MVT::v4i64, Custom);
1250 setOperationAction(ISD::VSELECT, MVT::v8i32, Custom);
1251 setOperationAction(ISD::VSELECT, MVT::v8f32, Custom);
1253 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1254 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
1255 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1256 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom);
1257 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
1258 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom);
1259 setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom);
1260 setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom);
1261 setOperationAction(ISD::ANY_EXTEND, MVT::v16i16, Custom);
1262 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1263 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1264 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1266 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) {
1267 setOperationAction(ISD::FMA, MVT::v8f32, Legal);
1268 setOperationAction(ISD::FMA, MVT::v4f64, Legal);
1269 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
1270 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
1271 setOperationAction(ISD::FMA, MVT::f32, Legal);
1272 setOperationAction(ISD::FMA, MVT::f64, Legal);
1275 if (Subtarget->hasInt256()) {
1276 setOperationAction(ISD::ADD, MVT::v4i64, Legal);
1277 setOperationAction(ISD::ADD, MVT::v8i32, Legal);
1278 setOperationAction(ISD::ADD, MVT::v16i16, Legal);
1279 setOperationAction(ISD::ADD, MVT::v32i8, Legal);
1281 setOperationAction(ISD::SUB, MVT::v4i64, Legal);
1282 setOperationAction(ISD::SUB, MVT::v8i32, Legal);
1283 setOperationAction(ISD::SUB, MVT::v16i16, Legal);
1284 setOperationAction(ISD::SUB, MVT::v32i8, Legal);
1286 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1287 setOperationAction(ISD::MUL, MVT::v8i32, Legal);
1288 setOperationAction(ISD::MUL, MVT::v16i16, Legal);
1289 // Don't lower v32i8 because there is no 128-bit byte mul
1291 setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
1292 setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
1293 setOperationAction(ISD::MULHU, MVT::v16i16, Legal);
1294 setOperationAction(ISD::MULHS, MVT::v16i16, Legal);
1296 setOperationAction(ISD::VSELECT, MVT::v16i16, Custom);
1297 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1299 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1300 // when we have a 256bit-wide blend with immediate.
1301 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1303 // Only provide customized ctpop vector bit twiddling for vector types we
1304 // know to perform better than using the popcnt instructions on each
1305 // vector element. If popcnt isn't supported, always provide the custom
1307 if (!Subtarget->hasPOPCNT())
1308 setOperationAction(ISD::CTPOP, MVT::v4i64, Custom);
1310 // Custom CTPOP always performs better on natively supported v8i32
1311 setOperationAction(ISD::CTPOP, MVT::v8i32, Custom);
1313 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1314 setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1315 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1316 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1317 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1318 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1319 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1321 setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1322 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1323 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1324 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1325 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1326 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1328 setOperationAction(ISD::ADD, MVT::v4i64, Custom);
1329 setOperationAction(ISD::ADD, MVT::v8i32, Custom);
1330 setOperationAction(ISD::ADD, MVT::v16i16, Custom);
1331 setOperationAction(ISD::ADD, MVT::v32i8, Custom);
1333 setOperationAction(ISD::SUB, MVT::v4i64, Custom);
1334 setOperationAction(ISD::SUB, MVT::v8i32, Custom);
1335 setOperationAction(ISD::SUB, MVT::v16i16, Custom);
1336 setOperationAction(ISD::SUB, MVT::v32i8, Custom);
1338 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1339 setOperationAction(ISD::MUL, MVT::v8i32, Custom);
1340 setOperationAction(ISD::MUL, MVT::v16i16, Custom);
1341 // Don't lower v32i8 because there is no 128-bit byte mul
1344 // In the customized shift lowering, the legal cases in AVX2 will be
1346 setOperationAction(ISD::SRL, MVT::v4i64, Custom);
1347 setOperationAction(ISD::SRL, MVT::v8i32, Custom);
1349 setOperationAction(ISD::SHL, MVT::v4i64, Custom);
1350 setOperationAction(ISD::SHL, MVT::v8i32, Custom);
1352 setOperationAction(ISD::SRA, MVT::v8i32, Custom);
1354 // Custom lower several nodes for 256-bit types.
1355 for (MVT VT : MVT::vector_valuetypes()) {
1356 if (VT.getScalarSizeInBits() >= 32) {
1357 setOperationAction(ISD::MLOAD, VT, Legal);
1358 setOperationAction(ISD::MSTORE, VT, Legal);
1360 // Extract subvector is special because the value type
1361 // (result) is 128-bit but the source is 256-bit wide.
1362 if (VT.is128BitVector()) {
1363 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1365 // Do not attempt to custom lower other non-256-bit vectors
1366 if (!VT.is256BitVector())
1369 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1370 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1371 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1372 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1373 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1374 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1375 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1378 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
1379 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) {
1380 MVT VT = (MVT::SimpleValueType)i;
1382 // Do not attempt to promote non-256-bit vectors
1383 if (!VT.is256BitVector())
1386 setOperationAction(ISD::AND, VT, Promote);
1387 AddPromotedToType (ISD::AND, VT, MVT::v4i64);
1388 setOperationAction(ISD::OR, VT, Promote);
1389 AddPromotedToType (ISD::OR, VT, MVT::v4i64);
1390 setOperationAction(ISD::XOR, VT, Promote);
1391 AddPromotedToType (ISD::XOR, VT, MVT::v4i64);
1392 setOperationAction(ISD::LOAD, VT, Promote);
1393 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
1394 setOperationAction(ISD::SELECT, VT, Promote);
1395 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
1399 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512()) {
1400 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1401 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1402 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1403 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1405 addRegisterClass(MVT::i1, &X86::VK1RegClass);
1406 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1407 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1409 for (MVT VT : MVT::fp_vector_valuetypes())
1410 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
1412 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1413 setOperationAction(ISD::SETCC, MVT::i1, Custom);
1414 setOperationAction(ISD::XOR, MVT::i1, Legal);
1415 setOperationAction(ISD::OR, MVT::i1, Legal);
1416 setOperationAction(ISD::AND, MVT::i1, Legal);
1417 setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
1418 setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
1419 setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
1420 setOperationAction(ISD::LOAD, MVT::v16i32, Legal);
1421 setOperationAction(ISD::LOAD, MVT::v16i1, Legal);
1423 setOperationAction(ISD::FADD, MVT::v16f32, Legal);
1424 setOperationAction(ISD::FSUB, MVT::v16f32, Legal);
1425 setOperationAction(ISD::FMUL, MVT::v16f32, Legal);
1426 setOperationAction(ISD::FDIV, MVT::v16f32, Legal);
1427 setOperationAction(ISD::FSQRT, MVT::v16f32, Legal);
1428 setOperationAction(ISD::FNEG, MVT::v16f32, Custom);
1430 setOperationAction(ISD::FADD, MVT::v8f64, Legal);
1431 setOperationAction(ISD::FSUB, MVT::v8f64, Legal);
1432 setOperationAction(ISD::FMUL, MVT::v8f64, Legal);
1433 setOperationAction(ISD::FDIV, MVT::v8f64, Legal);
1434 setOperationAction(ISD::FSQRT, MVT::v8f64, Legal);
1435 setOperationAction(ISD::FNEG, MVT::v8f64, Custom);
1436 setOperationAction(ISD::FMA, MVT::v8f64, Legal);
1437 setOperationAction(ISD::FMA, MVT::v16f32, Legal);
1439 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1440 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1441 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1442 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1443 if (Subtarget->is64Bit()) {
1444 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1445 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1446 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1447 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1449 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1450 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1451 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1452 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1453 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1454 setOperationAction(ISD::SINT_TO_FP, MVT::v8i1, Custom);
1455 setOperationAction(ISD::SINT_TO_FP, MVT::v16i1, Custom);
1456 setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Promote);
1457 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Promote);
1458 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1459 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1460 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1461 setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal);
1462 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1464 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
1465 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1466 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1467 setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
1468 setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
1469 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1470 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1471 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1472 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1473 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1474 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
1475 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
1476 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1478 setOperationAction(ISD::FFLOOR, MVT::v16f32, Legal);
1479 setOperationAction(ISD::FFLOOR, MVT::v8f64, Legal);
1480 setOperationAction(ISD::FCEIL, MVT::v16f32, Legal);
1481 setOperationAction(ISD::FCEIL, MVT::v8f64, Legal);
1482 setOperationAction(ISD::FTRUNC, MVT::v16f32, Legal);
1483 setOperationAction(ISD::FTRUNC, MVT::v8f64, Legal);
1484 setOperationAction(ISD::FRINT, MVT::v16f32, Legal);
1485 setOperationAction(ISD::FRINT, MVT::v8f64, Legal);
1486 setOperationAction(ISD::FNEARBYINT, MVT::v16f32, Legal);
1487 setOperationAction(ISD::FNEARBYINT, MVT::v8f64, Legal);
1489 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1490 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1491 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1492 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1493 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
1494 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Legal);
1496 setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
1497 setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
1499 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1501 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
1502 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
1503 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
1504 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
1505 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
1506 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
1507 setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
1508 setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
1509 setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
1511 setOperationAction(ISD::ADD, MVT::v8i64, Legal);
1512 setOperationAction(ISD::ADD, MVT::v16i32, Legal);
1514 setOperationAction(ISD::SUB, MVT::v8i64, Legal);
1515 setOperationAction(ISD::SUB, MVT::v16i32, Legal);
1517 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1519 setOperationAction(ISD::SRL, MVT::v8i64, Custom);
1520 setOperationAction(ISD::SRL, MVT::v16i32, Custom);
1522 setOperationAction(ISD::SHL, MVT::v8i64, Custom);
1523 setOperationAction(ISD::SHL, MVT::v16i32, Custom);
1525 setOperationAction(ISD::SRA, MVT::v8i64, Custom);
1526 setOperationAction(ISD::SRA, MVT::v16i32, Custom);
1528 setOperationAction(ISD::AND, MVT::v8i64, Legal);
1529 setOperationAction(ISD::OR, MVT::v8i64, Legal);
1530 setOperationAction(ISD::XOR, MVT::v8i64, Legal);
1531 setOperationAction(ISD::AND, MVT::v16i32, Legal);
1532 setOperationAction(ISD::OR, MVT::v16i32, Legal);
1533 setOperationAction(ISD::XOR, MVT::v16i32, Legal);
1535 if (Subtarget->hasCDI()) {
1536 setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
1537 setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
1540 // Custom lower several nodes.
1541 for (MVT VT : MVT::vector_valuetypes()) {
1542 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1543 // Extract subvector is special because the value type
1544 // (result) is 256/128-bit but the source is 512-bit wide.
1545 if (VT.is128BitVector() || VT.is256BitVector()) {
1546 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1548 if (VT.getVectorElementType() == MVT::i1)
1549 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1551 // Do not attempt to custom lower other non-512-bit vectors
1552 if (!VT.is512BitVector())
1555 if ( EltSize >= 32) {
1556 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1557 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1558 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1559 setOperationAction(ISD::VSELECT, VT, Legal);
1560 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1561 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1562 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1563 setOperationAction(ISD::MLOAD, VT, Legal);
1564 setOperationAction(ISD::MSTORE, VT, Legal);
1567 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1568 MVT VT = (MVT::SimpleValueType)i;
1570 // Do not attempt to promote non-512-bit vectors.
1571 if (!VT.is512BitVector())
1574 setOperationAction(ISD::SELECT, VT, Promote);
1575 AddPromotedToType (ISD::SELECT, VT, MVT::v8i64);
1579 if (!TM.Options.UseSoftFloat && Subtarget->hasBWI()) {
1580 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1581 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1583 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1584 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1586 setOperationAction(ISD::LOAD, MVT::v32i16, Legal);
1587 setOperationAction(ISD::LOAD, MVT::v64i8, Legal);
1588 setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
1589 setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
1590 setOperationAction(ISD::ADD, MVT::v32i16, Legal);
1591 setOperationAction(ISD::ADD, MVT::v64i8, Legal);
1592 setOperationAction(ISD::SUB, MVT::v32i16, Legal);
1593 setOperationAction(ISD::SUB, MVT::v64i8, Legal);
1594 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1596 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1597 const MVT VT = (MVT::SimpleValueType)i;
1599 const unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1601 // Do not attempt to promote non-512-bit vectors.
1602 if (!VT.is512BitVector())
1606 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1607 setOperationAction(ISD::VSELECT, VT, Legal);
1612 if (!TM.Options.UseSoftFloat && Subtarget->hasVLX()) {
1613 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1614 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1616 setOperationAction(ISD::SETCC, MVT::v4i1, Custom);
1617 setOperationAction(ISD::SETCC, MVT::v2i1, Custom);
1618 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Legal);
1620 setOperationAction(ISD::AND, MVT::v8i32, Legal);
1621 setOperationAction(ISD::OR, MVT::v8i32, Legal);
1622 setOperationAction(ISD::XOR, MVT::v8i32, Legal);
1623 setOperationAction(ISD::AND, MVT::v4i32, Legal);
1624 setOperationAction(ISD::OR, MVT::v4i32, Legal);
1625 setOperationAction(ISD::XOR, MVT::v4i32, Legal);
1628 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion
1629 // of this type with custom code.
1630 for (MVT VT : MVT::vector_valuetypes())
1631 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
1633 // We want to custom lower some of our intrinsics.
1634 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1635 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1636 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1637 if (!Subtarget->is64Bit())
1638 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1640 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1641 // handle type legalization for these operations here.
1643 // FIXME: We really should do custom legalization for addition and
1644 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1645 // than generic legalization for 64-bit multiplication-with-overflow, though.
1646 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) {
1647 // Add/Sub/Mul with overflow operations are custom lowered.
1649 setOperationAction(ISD::SADDO, VT, Custom);
1650 setOperationAction(ISD::UADDO, VT, Custom);
1651 setOperationAction(ISD::SSUBO, VT, Custom);
1652 setOperationAction(ISD::USUBO, VT, Custom);
1653 setOperationAction(ISD::SMULO, VT, Custom);
1654 setOperationAction(ISD::UMULO, VT, Custom);
1658 if (!Subtarget->is64Bit()) {
1659 // These libcalls are not available in 32-bit.
1660 setLibcallName(RTLIB::SHL_I128, nullptr);
1661 setLibcallName(RTLIB::SRL_I128, nullptr);
1662 setLibcallName(RTLIB::SRA_I128, nullptr);
1665 // Combine sin / cos into one node or libcall if possible.
1666 if (Subtarget->hasSinCos()) {
1667 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1668 setLibcallName(RTLIB::SINCOS_F64, "sincos");
1669 if (Subtarget->isTargetDarwin()) {
1670 // For MacOSX, we don't want the normal expansion of a libcall to sincos.
1671 // We want to issue a libcall to __sincos_stret to avoid memory traffic.
1672 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1673 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1677 if (Subtarget->isTargetWin64()) {
1678 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1679 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1680 setOperationAction(ISD::SREM, MVT::i128, Custom);
1681 setOperationAction(ISD::UREM, MVT::i128, Custom);
1682 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1683 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1686 // We have target-specific dag combine patterns for the following nodes:
1687 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1688 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1689 setTargetDAGCombine(ISD::BITCAST);
1690 setTargetDAGCombine(ISD::VSELECT);
1691 setTargetDAGCombine(ISD::SELECT);
1692 setTargetDAGCombine(ISD::SHL);
1693 setTargetDAGCombine(ISD::SRA);
1694 setTargetDAGCombine(ISD::SRL);
1695 setTargetDAGCombine(ISD::OR);
1696 setTargetDAGCombine(ISD::AND);
1697 setTargetDAGCombine(ISD::ADD);
1698 setTargetDAGCombine(ISD::FADD);
1699 setTargetDAGCombine(ISD::FSUB);
1700 setTargetDAGCombine(ISD::FMA);
1701 setTargetDAGCombine(ISD::SUB);
1702 setTargetDAGCombine(ISD::LOAD);
1703 setTargetDAGCombine(ISD::MLOAD);
1704 setTargetDAGCombine(ISD::STORE);
1705 setTargetDAGCombine(ISD::MSTORE);
1706 setTargetDAGCombine(ISD::ZERO_EXTEND);
1707 setTargetDAGCombine(ISD::ANY_EXTEND);
1708 setTargetDAGCombine(ISD::SIGN_EXTEND);
1709 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1710 setTargetDAGCombine(ISD::TRUNCATE);
1711 setTargetDAGCombine(ISD::SINT_TO_FP);
1712 setTargetDAGCombine(ISD::SETCC);
1713 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1714 setTargetDAGCombine(ISD::BUILD_VECTOR);
1715 setTargetDAGCombine(ISD::MUL);
1716 setTargetDAGCombine(ISD::XOR);
1718 computeRegisterProperties();
1720 // On Darwin, -Os means optimize for size without hurting performance,
1721 // do not reduce the limit.
1722 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1723 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8;
1724 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1725 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1726 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1727 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1728 setPrefLoopAlignment(4); // 2^4 bytes.
1730 // Predictable cmov don't hurt on atom because it's in-order.
1731 PredictableSelectIsExpensive = !Subtarget->isAtom();
1732 EnableExtLdPromotion = true;
1733 setPrefFunctionAlignment(4); // 2^4 bytes.
1735 verifyIntrinsicTables();
1738 // This has so far only been implemented for 64-bit MachO.
1739 bool X86TargetLowering::useLoadStackGuardNode() const {
1740 return Subtarget->isTargetMachO() && Subtarget->is64Bit();
1743 TargetLoweringBase::LegalizeTypeAction
1744 X86TargetLowering::getPreferredVectorAction(EVT VT) const {
1745 if (ExperimentalVectorWideningLegalization &&
1746 VT.getVectorNumElements() != 1 &&
1747 VT.getVectorElementType().getSimpleVT() != MVT::i1)
1748 return TypeWidenVector;
1750 return TargetLoweringBase::getPreferredVectorAction(VT);
1753 EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
1755 return Subtarget->hasAVX512() ? MVT::i1: MVT::i8;
1757 const unsigned NumElts = VT.getVectorNumElements();
1758 const EVT EltVT = VT.getVectorElementType();
1759 if (VT.is512BitVector()) {
1760 if (Subtarget->hasAVX512())
1761 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1762 EltVT == MVT::f32 || EltVT == MVT::f64)
1764 case 8: return MVT::v8i1;
1765 case 16: return MVT::v16i1;
1767 if (Subtarget->hasBWI())
1768 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1770 case 32: return MVT::v32i1;
1771 case 64: return MVT::v64i1;
1775 if (VT.is256BitVector() || VT.is128BitVector()) {
1776 if (Subtarget->hasVLX())
1777 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1778 EltVT == MVT::f32 || EltVT == MVT::f64)
1780 case 2: return MVT::v2i1;
1781 case 4: return MVT::v4i1;
1782 case 8: return MVT::v8i1;
1784 if (Subtarget->hasBWI() && Subtarget->hasVLX())
1785 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1787 case 8: return MVT::v8i1;
1788 case 16: return MVT::v16i1;
1789 case 32: return MVT::v32i1;
1793 return VT.changeVectorElementTypeToInteger();
1796 /// Helper for getByValTypeAlignment to determine
1797 /// the desired ByVal argument alignment.
1798 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
1801 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1802 if (VTy->getBitWidth() == 128)
1804 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1805 unsigned EltAlign = 0;
1806 getMaxByValAlign(ATy->getElementType(), EltAlign);
1807 if (EltAlign > MaxAlign)
1808 MaxAlign = EltAlign;
1809 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1810 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1811 unsigned EltAlign = 0;
1812 getMaxByValAlign(STy->getElementType(i), EltAlign);
1813 if (EltAlign > MaxAlign)
1814 MaxAlign = EltAlign;
1821 /// Return the desired alignment for ByVal aggregate
1822 /// function arguments in the caller parameter area. For X86, aggregates
1823 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
1824 /// are at 4-byte boundaries.
1825 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
1826 if (Subtarget->is64Bit()) {
1827 // Max of 8 and alignment of type.
1828 unsigned TyAlign = TD->getABITypeAlignment(Ty);
1835 if (Subtarget->hasSSE1())
1836 getMaxByValAlign(Ty, Align);
1840 /// Returns the target specific optimal type for load
1841 /// and store operations as a result of memset, memcpy, and memmove
1842 /// lowering. If DstAlign is zero that means it's safe to destination
1843 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
1844 /// means there isn't a need to check it against alignment requirement,
1845 /// probably because the source does not need to be loaded. If 'IsMemset' is
1846 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
1847 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
1848 /// source is constant so it does not need to be loaded.
1849 /// It returns EVT::Other if the type should be determined using generic
1850 /// target-independent logic.
1852 X86TargetLowering::getOptimalMemOpType(uint64_t Size,
1853 unsigned DstAlign, unsigned SrcAlign,
1854 bool IsMemset, bool ZeroMemset,
1856 MachineFunction &MF) const {
1857 const Function *F = MF.getFunction();
1858 if ((!IsMemset || ZeroMemset) &&
1859 !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
1861 (Subtarget->isUnalignedMemAccessFast() ||
1862 ((DstAlign == 0 || DstAlign >= 16) &&
1863 (SrcAlign == 0 || SrcAlign >= 16)))) {
1865 if (Subtarget->hasInt256())
1867 if (Subtarget->hasFp256())
1870 if (Subtarget->hasSSE2())
1872 if (Subtarget->hasSSE1())
1874 } else if (!MemcpyStrSrc && Size >= 8 &&
1875 !Subtarget->is64Bit() &&
1876 Subtarget->hasSSE2()) {
1877 // Do not use f64 to lower memcpy if source is string constant. It's
1878 // better to use i32 to avoid the loads.
1882 if (Subtarget->is64Bit() && Size >= 8)
1887 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
1889 return X86ScalarSSEf32;
1890 else if (VT == MVT::f64)
1891 return X86ScalarSSEf64;
1896 X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
1901 *Fast = Subtarget->isUnalignedMemAccessFast();
1905 /// Return the entry encoding for a jump table in the
1906 /// current function. The returned value is a member of the
1907 /// MachineJumpTableInfo::JTEntryKind enum.
1908 unsigned X86TargetLowering::getJumpTableEncoding() const {
1909 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
1911 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1912 Subtarget->isPICStyleGOT())
1913 return MachineJumpTableInfo::EK_Custom32;
1915 // Otherwise, use the normal jump table encoding heuristics.
1916 return TargetLowering::getJumpTableEncoding();
1920 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
1921 const MachineBasicBlock *MBB,
1922 unsigned uid,MCContext &Ctx) const{
1923 assert(MBB->getParent()->getTarget().getRelocationModel() == Reloc::PIC_ &&
1924 Subtarget->isPICStyleGOT());
1925 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
1927 return MCSymbolRefExpr::Create(MBB->getSymbol(),
1928 MCSymbolRefExpr::VK_GOTOFF, Ctx);
1931 /// Returns relocation base for the given PIC jumptable.
1932 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
1933 SelectionDAG &DAG) const {
1934 if (!Subtarget->is64Bit())
1935 // This doesn't have SDLoc associated with it, but is not really the
1936 // same as a Register.
1937 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy());
1941 /// This returns the relocation base for the given PIC jumptable,
1942 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
1943 const MCExpr *X86TargetLowering::
1944 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
1945 MCContext &Ctx) const {
1946 // X86-64 uses RIP relative addressing based on the jump table label.
1947 if (Subtarget->isPICStyleRIPRel())
1948 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
1950 // Otherwise, the reference is relative to the PIC base.
1951 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
1954 // FIXME: Why this routine is here? Move to RegInfo!
1955 std::pair<const TargetRegisterClass*, uint8_t>
1956 X86TargetLowering::findRepresentativeClass(MVT VT) const{
1957 const TargetRegisterClass *RRC = nullptr;
1959 switch (VT.SimpleTy) {
1961 return TargetLowering::findRepresentativeClass(VT);
1962 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
1963 RRC = Subtarget->is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
1966 RRC = &X86::VR64RegClass;
1968 case MVT::f32: case MVT::f64:
1969 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1970 case MVT::v4f32: case MVT::v2f64:
1971 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32:
1973 RRC = &X86::VR128RegClass;
1976 return std::make_pair(RRC, Cost);
1979 bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
1980 unsigned &Offset) const {
1981 if (!Subtarget->isTargetLinux())
1984 if (Subtarget->is64Bit()) {
1985 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
1987 if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
1999 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
2000 unsigned DestAS) const {
2001 assert(SrcAS != DestAS && "Expected different address spaces!");
2003 return SrcAS < 256 && DestAS < 256;
2006 //===----------------------------------------------------------------------===//
2007 // Return Value Calling Convention Implementation
2008 //===----------------------------------------------------------------------===//
2010 #include "X86GenCallingConv.inc"
2013 X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2014 MachineFunction &MF, bool isVarArg,
2015 const SmallVectorImpl<ISD::OutputArg> &Outs,
2016 LLVMContext &Context) const {
2017 SmallVector<CCValAssign, 16> RVLocs;
2018 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2019 return CCInfo.CheckReturn(Outs, RetCC_X86);
2022 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2023 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2028 X86TargetLowering::LowerReturn(SDValue Chain,
2029 CallingConv::ID CallConv, bool isVarArg,
2030 const SmallVectorImpl<ISD::OutputArg> &Outs,
2031 const SmallVectorImpl<SDValue> &OutVals,
2032 SDLoc dl, SelectionDAG &DAG) const {
2033 MachineFunction &MF = DAG.getMachineFunction();
2034 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2036 SmallVector<CCValAssign, 16> RVLocs;
2037 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2038 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2041 SmallVector<SDValue, 6> RetOps;
2042 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2043 // Operand #1 = Bytes To Pop
2044 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(),
2047 // Copy the result values into the output registers.
2048 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2049 CCValAssign &VA = RVLocs[i];
2050 assert(VA.isRegLoc() && "Can only return in registers!");
2051 SDValue ValToCopy = OutVals[i];
2052 EVT ValVT = ValToCopy.getValueType();
2054 // Promote values to the appropriate types.
2055 if (VA.getLocInfo() == CCValAssign::SExt)
2056 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2057 else if (VA.getLocInfo() == CCValAssign::ZExt)
2058 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2059 else if (VA.getLocInfo() == CCValAssign::AExt)
2060 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2061 else if (VA.getLocInfo() == CCValAssign::BCvt)
2062 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy);
2064 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2065 "Unexpected FP-extend for return value.");
2067 // If this is x86-64, and we disabled SSE, we can't return FP values,
2068 // or SSE or MMX vectors.
2069 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2070 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2071 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
2072 report_fatal_error("SSE register return with SSE disabled");
2074 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2075 // llvm-gcc has never done it right and no one has noticed, so this
2076 // should be OK for now.
2077 if (ValVT == MVT::f64 &&
2078 (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
2079 report_fatal_error("SSE2 register return with SSE2 disabled");
2081 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2082 // the RET instruction and handled by the FP Stackifier.
2083 if (VA.getLocReg() == X86::FP0 ||
2084 VA.getLocReg() == X86::FP1) {
2085 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2086 // change the value to the FP stack register class.
2087 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2088 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2089 RetOps.push_back(ValToCopy);
2090 // Don't emit a copytoreg.
2094 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2095 // which is returned in RAX / RDX.
2096 if (Subtarget->is64Bit()) {
2097 if (ValVT == MVT::x86mmx) {
2098 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2099 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy);
2100 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2102 // If we don't have SSE2 available, convert to v4f32 so the generated
2103 // register is legal.
2104 if (!Subtarget->hasSSE2())
2105 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy);
2110 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
2111 Flag = Chain.getValue(1);
2112 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2115 // The x86-64 ABIs require that for returning structs by value we copy
2116 // the sret argument into %rax/%eax (depending on ABI) for the return.
2117 // Win32 requires us to put the sret argument to %eax as well.
2118 // We saved the argument into a virtual register in the entry block,
2119 // so now we copy the value out and into %rax/%eax.
2121 // Checking Function.hasStructRetAttr() here is insufficient because the IR
2122 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2123 // false, then an sret argument may be implicitly inserted in the SelDAG. In
2124 // either case FuncInfo->setSRetReturnReg() will have been called.
2125 if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
2126 assert((Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) &&
2127 "No need for an sret register");
2128 SDValue Val = DAG.getCopyFromReg(Chain, dl, SRetReg, getPointerTy());
2131 = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ?
2132 X86::RAX : X86::EAX;
2133 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2134 Flag = Chain.getValue(1);
2136 // RAX/EAX now acts like a return value.
2137 RetOps.push_back(DAG.getRegister(RetValReg, getPointerTy()));
2140 RetOps[0] = Chain; // Update chain.
2142 // Add the flag if we have it.
2144 RetOps.push_back(Flag);
2146 return DAG.getNode(X86ISD::RET_FLAG, dl, MVT::Other, RetOps);
2149 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2150 if (N->getNumValues() != 1)
2152 if (!N->hasNUsesOfValue(1, 0))
2155 SDValue TCChain = Chain;
2156 SDNode *Copy = *N->use_begin();
2157 if (Copy->getOpcode() == ISD::CopyToReg) {
2158 // If the copy has a glue operand, we conservatively assume it isn't safe to
2159 // perform a tail call.
2160 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2162 TCChain = Copy->getOperand(0);
2163 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2166 bool HasRet = false;
2167 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2169 if (UI->getOpcode() != X86ISD::RET_FLAG)
2171 // If we are returning more than one value, we can definitely
2172 // not make a tail call see PR19530
2173 if (UI->getNumOperands() > 4)
2175 if (UI->getNumOperands() == 4 &&
2176 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2189 X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
2190 ISD::NodeType ExtendKind) const {
2192 // TODO: Is this also valid on 32-bit?
2193 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND)
2194 ReturnMVT = MVT::i8;
2196 ReturnMVT = MVT::i32;
2198 EVT MinVT = getRegisterType(Context, ReturnMVT);
2199 return VT.bitsLT(MinVT) ? MinVT : VT;
2202 /// Lower the result values of a call into the
2203 /// appropriate copies out of appropriate physical registers.
2206 X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
2207 CallingConv::ID CallConv, bool isVarArg,
2208 const SmallVectorImpl<ISD::InputArg> &Ins,
2209 SDLoc dl, SelectionDAG &DAG,
2210 SmallVectorImpl<SDValue> &InVals) const {
2212 // Assign locations to each value returned by this call.
2213 SmallVector<CCValAssign, 16> RVLocs;
2214 bool Is64Bit = Subtarget->is64Bit();
2215 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2217 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2219 // Copy all of the result registers out of their specified physreg.
2220 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2221 CCValAssign &VA = RVLocs[i];
2222 EVT CopyVT = VA.getValVT();
2224 // If this is x86-64, and we disabled SSE, we can't return FP values
2225 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
2226 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
2227 report_fatal_error("SSE register return with SSE disabled");
2230 // If we prefer to use the value in xmm registers, copy it out as f80 and
2231 // use a truncate to move it from fp stack reg to xmm reg.
2232 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2233 isScalarFPTypeInSSEReg(VA.getValVT()))
2236 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
2237 CopyVT, InFlag).getValue(1);
2238 SDValue Val = Chain.getValue(0);
2240 if (CopyVT != VA.getValVT())
2241 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2242 // This truncation won't change the value.
2243 DAG.getIntPtrConstant(1));
2245 InFlag = Chain.getValue(2);
2246 InVals.push_back(Val);
2252 //===----------------------------------------------------------------------===//
2253 // C & StdCall & Fast Calling Convention implementation
2254 //===----------------------------------------------------------------------===//
2255 // StdCall calling convention seems to be standard for many Windows' API
2256 // routines and around. It differs from C calling convention just a little:
2257 // callee should clean up the stack, not caller. Symbols should be also
2258 // decorated in some fancy way :) It doesn't support any vector arguments.
2259 // For info on fast calling convention see Fast Calling Convention (tail call)
2260 // implementation LowerX86_32FastCCCallTo.
2262 /// CallIsStructReturn - Determines whether a call uses struct return
2264 enum StructReturnType {
2269 static StructReturnType
2270 callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
2272 return NotStructReturn;
2274 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2275 if (!Flags.isSRet())
2276 return NotStructReturn;
2277 if (Flags.isInReg())
2278 return RegStructReturn;
2279 return StackStructReturn;
2282 /// Determines whether a function uses struct return semantics.
2283 static StructReturnType
2284 argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
2286 return NotStructReturn;
2288 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2289 if (!Flags.isSRet())
2290 return NotStructReturn;
2291 if (Flags.isInReg())
2292 return RegStructReturn;
2293 return StackStructReturn;
2296 /// Make a copy of an aggregate at address specified by "Src" to address
2297 /// "Dst" with size and alignment information specified by the specific
2298 /// parameter attribute. The copy will be passed as a byval function parameter.
2300 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
2301 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
2303 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
2305 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2306 /*isVolatile*/false, /*AlwaysInline=*/true,
2307 MachinePointerInfo(), MachinePointerInfo());
2310 /// Return true if the calling convention is one that
2311 /// supports tail call optimization.
2312 static bool IsTailCallConvention(CallingConv::ID CC) {
2313 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2314 CC == CallingConv::HiPE);
2317 /// \brief Return true if the calling convention is a C calling convention.
2318 static bool IsCCallConvention(CallingConv::ID CC) {
2319 return (CC == CallingConv::C || CC == CallingConv::X86_64_Win64 ||
2320 CC == CallingConv::X86_64_SysV);
2323 bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
2324 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
2328 CallingConv::ID CalleeCC = CS.getCallingConv();
2329 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
2335 /// Return true if the function is being made into
2336 /// a tailcall target by changing its ABI.
2337 static bool FuncIsMadeTailCallSafe(CallingConv::ID CC,
2338 bool GuaranteedTailCallOpt) {
2339 return GuaranteedTailCallOpt && IsTailCallConvention(CC);
2343 X86TargetLowering::LowerMemArgument(SDValue Chain,
2344 CallingConv::ID CallConv,
2345 const SmallVectorImpl<ISD::InputArg> &Ins,
2346 SDLoc dl, SelectionDAG &DAG,
2347 const CCValAssign &VA,
2348 MachineFrameInfo *MFI,
2350 // Create the nodes corresponding to a load from this parameter slot.
2351 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2352 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(
2353 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
2354 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
2357 // If value is passed by pointer we have address passed instead of the value
2359 if (VA.getLocInfo() == CCValAssign::Indirect)
2360 ValVT = VA.getLocVT();
2362 ValVT = VA.getValVT();
2364 // FIXME: For now, all byval parameter objects are marked mutable. This can be
2365 // changed with more analysis.
2366 // In case of tail call optimization mark all arguments mutable. Since they
2367 // could be overwritten by lowering of arguments in case of a tail call.
2368 if (Flags.isByVal()) {
2369 unsigned Bytes = Flags.getByValSize();
2370 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
2371 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable);
2372 return DAG.getFrameIndex(FI, getPointerTy());
2374 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
2375 VA.getLocMemOffset(), isImmutable);
2376 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2377 return DAG.getLoad(ValVT, dl, Chain, FIN,
2378 MachinePointerInfo::getFixedStack(FI),
2379 false, false, false, 0);
2383 // FIXME: Get this from tablegen.
2384 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
2385 const X86Subtarget *Subtarget) {
2386 assert(Subtarget->is64Bit());
2388 if (Subtarget->isCallingConvWin64(CallConv)) {
2389 static const MCPhysReg GPR64ArgRegsWin64[] = {
2390 X86::RCX, X86::RDX, X86::R8, X86::R9
2392 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
2395 static const MCPhysReg GPR64ArgRegs64Bit[] = {
2396 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
2398 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
2401 // FIXME: Get this from tablegen.
2402 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
2403 CallingConv::ID CallConv,
2404 const X86Subtarget *Subtarget) {
2405 assert(Subtarget->is64Bit());
2406 if (Subtarget->isCallingConvWin64(CallConv)) {
2407 // The XMM registers which might contain var arg parameters are shadowed
2408 // in their paired GPR. So we only need to save the GPR to their home
2410 // TODO: __vectorcall will change this.
2414 const Function *Fn = MF.getFunction();
2415 bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat);
2416 assert(!(MF.getTarget().Options.UseSoftFloat && NoImplicitFloatOps) &&
2417 "SSE register cannot be used when SSE is disabled!");
2418 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps ||
2419 !Subtarget->hasSSE1())
2420 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
2424 static const MCPhysReg XMMArgRegs64Bit[] = {
2425 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2426 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2428 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
2432 X86TargetLowering::LowerFormalArguments(SDValue Chain,
2433 CallingConv::ID CallConv,
2435 const SmallVectorImpl<ISD::InputArg> &Ins,
2438 SmallVectorImpl<SDValue> &InVals)
2440 MachineFunction &MF = DAG.getMachineFunction();
2441 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2443 const Function* Fn = MF.getFunction();
2444 if (Fn->hasExternalLinkage() &&
2445 Subtarget->isTargetCygMing() &&
2446 Fn->getName() == "main")
2447 FuncInfo->setForceFramePointer(true);
2449 MachineFrameInfo *MFI = MF.getFrameInfo();
2450 bool Is64Bit = Subtarget->is64Bit();
2451 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2453 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2454 "Var args not supported with calling convention fastcc, ghc or hipe");
2456 // Assign locations to all of the incoming arguments.
2457 SmallVector<CCValAssign, 16> ArgLocs;
2458 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2460 // Allocate shadow area for Win64
2462 CCInfo.AllocateStack(32, 8);
2464 CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
2466 unsigned LastVal = ~0U;
2468 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2469 CCValAssign &VA = ArgLocs[i];
2470 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
2472 assert(VA.getValNo() != LastVal &&
2473 "Don't support value assigned to multiple locs yet");
2475 LastVal = VA.getValNo();
2477 if (VA.isRegLoc()) {
2478 EVT RegVT = VA.getLocVT();
2479 const TargetRegisterClass *RC;
2480 if (RegVT == MVT::i32)
2481 RC = &X86::GR32RegClass;
2482 else if (Is64Bit && RegVT == MVT::i64)
2483 RC = &X86::GR64RegClass;
2484 else if (RegVT == MVT::f32)
2485 RC = &X86::FR32RegClass;
2486 else if (RegVT == MVT::f64)
2487 RC = &X86::FR64RegClass;
2488 else if (RegVT.is512BitVector())
2489 RC = &X86::VR512RegClass;
2490 else if (RegVT.is256BitVector())
2491 RC = &X86::VR256RegClass;
2492 else if (RegVT.is128BitVector())
2493 RC = &X86::VR128RegClass;
2494 else if (RegVT == MVT::x86mmx)
2495 RC = &X86::VR64RegClass;
2496 else if (RegVT == MVT::i1)
2497 RC = &X86::VK1RegClass;
2498 else if (RegVT == MVT::v8i1)
2499 RC = &X86::VK8RegClass;
2500 else if (RegVT == MVT::v16i1)
2501 RC = &X86::VK16RegClass;
2502 else if (RegVT == MVT::v32i1)
2503 RC = &X86::VK32RegClass;
2504 else if (RegVT == MVT::v64i1)
2505 RC = &X86::VK64RegClass;
2507 llvm_unreachable("Unknown argument type!");
2509 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2510 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2512 // If this is an 8 or 16-bit value, it is really passed promoted to 32
2513 // bits. Insert an assert[sz]ext to capture this, then truncate to the
2515 if (VA.getLocInfo() == CCValAssign::SExt)
2516 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2517 DAG.getValueType(VA.getValVT()));
2518 else if (VA.getLocInfo() == CCValAssign::ZExt)
2519 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2520 DAG.getValueType(VA.getValVT()));
2521 else if (VA.getLocInfo() == CCValAssign::BCvt)
2522 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
2524 if (VA.isExtInLoc()) {
2525 // Handle MMX values passed in XMM regs.
2526 if (RegVT.isVector())
2527 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
2529 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2532 assert(VA.isMemLoc());
2533 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i);
2536 // If value is passed via pointer - do a load.
2537 if (VA.getLocInfo() == CCValAssign::Indirect)
2538 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
2539 MachinePointerInfo(), false, false, false, 0);
2541 InVals.push_back(ArgValue);
2544 if (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) {
2545 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2546 // The x86-64 ABIs require that for returning structs by value we copy
2547 // the sret argument into %rax/%eax (depending on ABI) for the return.
2548 // Win32 requires us to put the sret argument to %eax as well.
2549 // Save the argument into a virtual register so that we can access it
2550 // from the return points.
2551 if (Ins[i].Flags.isSRet()) {
2552 unsigned Reg = FuncInfo->getSRetReturnReg();
2554 MVT PtrTy = getPointerTy();
2555 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
2556 FuncInfo->setSRetReturnReg(Reg);
2558 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[i]);
2559 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
2565 unsigned StackSize = CCInfo.getNextStackOffset();
2566 // Align stack specially for tail calls.
2567 if (FuncIsMadeTailCallSafe(CallConv,
2568 MF.getTarget().Options.GuaranteedTailCallOpt))
2569 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
2571 // If the function takes variable number of arguments, make a frame index for
2572 // the start of the first vararg value... for expansion of llvm.va_start. We
2573 // can skip this if there are no va_start calls.
2574 if (MFI->hasVAStart() &&
2575 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
2576 CallConv != CallingConv::X86_ThisCall))) {
2577 FuncInfo->setVarArgsFrameIndex(
2578 MFI->CreateFixedObject(1, StackSize, true));
2581 // Figure out if XMM registers are in use.
2582 assert(!(MF.getTarget().Options.UseSoftFloat &&
2583 Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
2584 "SSE register cannot be used when SSE is disabled!");
2586 // 64-bit calling conventions support varargs and register parameters, so we
2587 // have to do extra work to spill them in the prologue.
2588 if (Is64Bit && isVarArg && MFI->hasVAStart()) {
2589 // Find the first unallocated argument registers.
2590 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
2591 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
2592 unsigned NumIntRegs =
2593 CCInfo.getFirstUnallocated(ArgGPRs.data(), ArgGPRs.size());
2594 unsigned NumXMMRegs =
2595 CCInfo.getFirstUnallocated(ArgXMMs.data(), ArgXMMs.size());
2596 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
2597 "SSE register cannot be used when SSE is disabled!");
2599 // Gather all the live in physical registers.
2600 SmallVector<SDValue, 6> LiveGPRs;
2601 SmallVector<SDValue, 8> LiveXMMRegs;
2603 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
2604 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
2606 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
2608 if (!ArgXMMs.empty()) {
2609 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2610 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
2611 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
2612 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
2613 LiveXMMRegs.push_back(
2614 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
2619 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
2620 // Get to the caller-allocated home save location. Add 8 to account
2621 // for the return address.
2622 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
2623 FuncInfo->setRegSaveFrameIndex(
2624 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
2625 // Fixup to set vararg frame on shadow area (4 x i64).
2627 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
2629 // For X86-64, if there are vararg parameters that are passed via
2630 // registers, then we must store them to their spots on the stack so
2631 // they may be loaded by deferencing the result of va_next.
2632 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
2633 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
2634 FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
2635 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
2638 // Store the integer parameter registers.
2639 SmallVector<SDValue, 8> MemOps;
2640 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
2642 unsigned Offset = FuncInfo->getVarArgsGPOffset();
2643 for (SDValue Val : LiveGPRs) {
2644 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
2645 DAG.getIntPtrConstant(Offset));
2647 DAG.getStore(Val.getValue(1), dl, Val, FIN,
2648 MachinePointerInfo::getFixedStack(
2649 FuncInfo->getRegSaveFrameIndex(), Offset),
2651 MemOps.push_back(Store);
2655 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
2656 // Now store the XMM (fp + vector) parameter registers.
2657 SmallVector<SDValue, 12> SaveXMMOps;
2658 SaveXMMOps.push_back(Chain);
2659 SaveXMMOps.push_back(ALVal);
2660 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2661 FuncInfo->getRegSaveFrameIndex()));
2662 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2663 FuncInfo->getVarArgsFPOffset()));
2664 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
2666 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
2667 MVT::Other, SaveXMMOps));
2670 if (!MemOps.empty())
2671 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
2674 if (isVarArg && MFI->hasMustTailInVarArgFunc()) {
2675 // Find the largest legal vector type.
2676 MVT VecVT = MVT::Other;
2677 // FIXME: Only some x86_32 calling conventions support AVX512.
2678 if (Subtarget->hasAVX512() &&
2679 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
2680 CallConv == CallingConv::Intel_OCL_BI)))
2681 VecVT = MVT::v16f32;
2682 else if (Subtarget->hasAVX())
2684 else if (Subtarget->hasSSE2())
2687 // We forward some GPRs and some vector types.
2688 SmallVector<MVT, 2> RegParmTypes;
2689 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
2690 RegParmTypes.push_back(IntVT);
2691 if (VecVT != MVT::Other)
2692 RegParmTypes.push_back(VecVT);
2694 // Compute the set of forwarded registers. The rest are scratch.
2695 SmallVectorImpl<ForwardedRegister> &Forwards =
2696 FuncInfo->getForwardedMustTailRegParms();
2697 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
2699 // Conservatively forward AL on x86_64, since it might be used for varargs.
2700 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
2701 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2702 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
2705 // Copy all forwards from physical to virtual registers.
2706 for (ForwardedRegister &F : Forwards) {
2707 // FIXME: Can we use a less constrained schedule?
2708 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
2709 F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
2710 Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
2714 // Some CCs need callee pop.
2715 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
2716 MF.getTarget().Options.GuaranteedTailCallOpt)) {
2717 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
2719 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
2720 // If this is an sret function, the return should pop the hidden pointer.
2721 if (!Is64Bit && !IsTailCallConvention(CallConv) &&
2722 !Subtarget->getTargetTriple().isOSMSVCRT() &&
2723 argsAreStructReturn(Ins) == StackStructReturn)
2724 FuncInfo->setBytesToPopOnReturn(4);
2728 // RegSaveFrameIndex is X86-64 only.
2729 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
2730 if (CallConv == CallingConv::X86_FastCall ||
2731 CallConv == CallingConv::X86_ThisCall)
2732 // fastcc functions can't have varargs.
2733 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
2736 FuncInfo->setArgumentStackSize(StackSize);
2742 X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
2743 SDValue StackPtr, SDValue Arg,
2744 SDLoc dl, SelectionDAG &DAG,
2745 const CCValAssign &VA,
2746 ISD::ArgFlagsTy Flags) const {
2747 unsigned LocMemOffset = VA.getLocMemOffset();
2748 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
2749 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
2750 if (Flags.isByVal())
2751 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
2753 return DAG.getStore(Chain, dl, Arg, PtrOff,
2754 MachinePointerInfo::getStack(LocMemOffset),
2758 /// Emit a load of return address if tail call
2759 /// optimization is performed and it is required.
2761 X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
2762 SDValue &OutRetAddr, SDValue Chain,
2763 bool IsTailCall, bool Is64Bit,
2764 int FPDiff, SDLoc dl) const {
2765 // Adjust the Return address stack slot.
2766 EVT VT = getPointerTy();
2767 OutRetAddr = getReturnAddressFrameIndex(DAG);
2769 // Load the "old" Return address.
2770 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
2771 false, false, false, 0);
2772 return SDValue(OutRetAddr.getNode(), 1);
2775 /// Emit a store of the return address if tail call
2776 /// optimization is performed and it is required (FPDiff!=0).
2777 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
2778 SDValue Chain, SDValue RetAddrFrIdx,
2779 EVT PtrVT, unsigned SlotSize,
2780 int FPDiff, SDLoc dl) {
2781 // Store the return address to the appropriate stack slot.
2782 if (!FPDiff) return Chain;
2783 // Calculate the new stack slot for the return address.
2784 int NewReturnAddrFI =
2785 MF.getFrameInfo()->CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
2787 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
2788 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
2789 MachinePointerInfo::getFixedStack(NewReturnAddrFI),
2795 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2796 SmallVectorImpl<SDValue> &InVals) const {
2797 SelectionDAG &DAG = CLI.DAG;
2799 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2800 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2801 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2802 SDValue Chain = CLI.Chain;
2803 SDValue Callee = CLI.Callee;
2804 CallingConv::ID CallConv = CLI.CallConv;
2805 bool &isTailCall = CLI.IsTailCall;
2806 bool isVarArg = CLI.IsVarArg;
2808 MachineFunction &MF = DAG.getMachineFunction();
2809 bool Is64Bit = Subtarget->is64Bit();
2810 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2811 StructReturnType SR = callIsStructReturn(Outs);
2812 bool IsSibcall = false;
2813 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
2815 if (MF.getTarget().Options.DisableTailCalls)
2818 bool IsMustTail = CLI.CS && CLI.CS->isMustTailCall();
2820 // Force this to be a tail call. The verifier rules are enough to ensure
2821 // that we can lower this successfully without moving the return address
2824 } else if (isTailCall) {
2825 // Check if it's really possible to do a tail call.
2826 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
2827 isVarArg, SR != NotStructReturn,
2828 MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
2829 Outs, OutVals, Ins, DAG);
2831 // Sibcalls are automatically detected tailcalls which do not require
2833 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
2840 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2841 "Var args not supported with calling convention fastcc, ghc or hipe");
2843 // Analyze operands of the call, assigning locations to each operand.
2844 SmallVector<CCValAssign, 16> ArgLocs;
2845 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2847 // Allocate shadow area for Win64
2849 CCInfo.AllocateStack(32, 8);
2851 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
2853 // Get a count of how many bytes are to be pushed on the stack.
2854 unsigned NumBytes = CCInfo.getNextStackOffset();
2856 // This is a sibcall. The memory operands are available in caller's
2857 // own caller's stack.
2859 else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2860 IsTailCallConvention(CallConv))
2861 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
2864 if (isTailCall && !IsSibcall && !IsMustTail) {
2865 // Lower arguments at fp - stackoffset + fpdiff.
2866 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
2868 FPDiff = NumBytesCallerPushed - NumBytes;
2870 // Set the delta of movement of the returnaddr stackslot.
2871 // But only set if delta is greater than previous delta.
2872 if (FPDiff < X86Info->getTCReturnAddrDelta())
2873 X86Info->setTCReturnAddrDelta(FPDiff);
2876 unsigned NumBytesToPush = NumBytes;
2877 unsigned NumBytesToPop = NumBytes;
2879 // If we have an inalloca argument, all stack space has already been allocated
2880 // for us and be right at the top of the stack. We don't support multiple
2881 // arguments passed in memory when using inalloca.
2882 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
2884 if (!ArgLocs.back().isMemLoc())
2885 report_fatal_error("cannot use inalloca attribute on a register "
2887 if (ArgLocs.back().getLocMemOffset() != 0)
2888 report_fatal_error("any parameter with the inalloca attribute must be "
2889 "the only memory argument");
2893 Chain = DAG.getCALLSEQ_START(
2894 Chain, DAG.getIntPtrConstant(NumBytesToPush, true), dl);
2896 SDValue RetAddrFrIdx;
2897 // Load return address for tail calls.
2898 if (isTailCall && FPDiff)
2899 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2900 Is64Bit, FPDiff, dl);
2902 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2903 SmallVector<SDValue, 8> MemOpChains;
2906 // Walk the register/memloc assignments, inserting copies/loads. In the case
2907 // of tail call optimization arguments are handle later.
2908 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2909 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2910 // Skip inalloca arguments, they have already been written.
2911 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2912 if (Flags.isInAlloca())
2915 CCValAssign &VA = ArgLocs[i];
2916 EVT RegVT = VA.getLocVT();
2917 SDValue Arg = OutVals[i];
2918 bool isByVal = Flags.isByVal();
2920 // Promote the value if needed.
2921 switch (VA.getLocInfo()) {
2922 default: llvm_unreachable("Unknown loc info!");
2923 case CCValAssign::Full: break;
2924 case CCValAssign::SExt:
2925 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
2927 case CCValAssign::ZExt:
2928 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
2930 case CCValAssign::AExt:
2931 if (RegVT.is128BitVector()) {
2932 // Special case: passing MMX values in XMM registers.
2933 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
2934 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
2935 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
2937 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
2939 case CCValAssign::BCvt:
2940 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg);
2942 case CCValAssign::Indirect: {
2943 // Store the argument.
2944 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
2945 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2946 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot,
2947 MachinePointerInfo::getFixedStack(FI),
2954 if (VA.isRegLoc()) {
2955 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2956 if (isVarArg && IsWin64) {
2957 // Win64 ABI requires argument XMM reg to be copied to the corresponding
2958 // shadow reg if callee is a varargs function.
2959 unsigned ShadowReg = 0;
2960 switch (VA.getLocReg()) {
2961 case X86::XMM0: ShadowReg = X86::RCX; break;
2962 case X86::XMM1: ShadowReg = X86::RDX; break;
2963 case X86::XMM2: ShadowReg = X86::R8; break;
2964 case X86::XMM3: ShadowReg = X86::R9; break;
2967 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
2969 } else if (!IsSibcall && (!isTailCall || isByVal)) {
2970 assert(VA.isMemLoc());
2971 if (!StackPtr.getNode())
2972 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
2974 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2975 dl, DAG, VA, Flags));
2979 if (!MemOpChains.empty())
2980 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2982 if (Subtarget->isPICStyleGOT()) {
2983 // ELF / PIC requires GOT in the EBX register before function calls via PLT
2986 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX),
2987 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy())));
2989 // If we are tail calling and generating PIC/GOT style code load the
2990 // address of the callee into ECX. The value in ecx is used as target of
2991 // the tail jump. This is done to circumvent the ebx/callee-saved problem
2992 // for tail calls on PIC/GOT architectures. Normally we would just put the
2993 // address of GOT into ebx and then call target@PLT. But for tail calls
2994 // ebx would be restored (since ebx is callee saved) before jumping to the
2997 // Note: The actual moving to ECX is done further down.
2998 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
2999 if (G && !G->getGlobal()->hasHiddenVisibility() &&
3000 !G->getGlobal()->hasProtectedVisibility())
3001 Callee = LowerGlobalAddress(Callee, DAG);
3002 else if (isa<ExternalSymbolSDNode>(Callee))
3003 Callee = LowerExternalSymbol(Callee, DAG);
3007 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
3008 // From AMD64 ABI document:
3009 // For calls that may call functions that use varargs or stdargs
3010 // (prototype-less calls or calls to functions containing ellipsis (...) in
3011 // the declaration) %al is used as hidden argument to specify the number
3012 // of SSE registers used. The contents of %al do not need to match exactly
3013 // the number of registers, but must be an ubound on the number of SSE
3014 // registers used and is in the range 0 - 8 inclusive.
3016 // Count the number of XMM registers allocated.
3017 static const MCPhysReg XMMArgRegs[] = {
3018 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3019 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3021 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
3022 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3023 && "SSE registers cannot be used when SSE is disabled");
3025 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3026 DAG.getConstant(NumXMMRegs, MVT::i8)));
3029 if (isVarArg && IsMustTail) {
3030 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3031 for (const auto &F : Forwards) {
3032 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3033 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3037 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3038 // don't need this because the eligibility check rejects calls that require
3039 // shuffling arguments passed in memory.
3040 if (!IsSibcall && isTailCall) {
3041 // Force all the incoming stack arguments to be loaded from the stack
3042 // before any new outgoing arguments are stored to the stack, because the
3043 // outgoing stack slots may alias the incoming argument stack slots, and
3044 // the alias isn't otherwise explicit. This is slightly more conservative
3045 // than necessary, because it means that each store effectively depends
3046 // on every argument instead of just those arguments it would clobber.
3047 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3049 SmallVector<SDValue, 8> MemOpChains2;
3052 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3053 CCValAssign &VA = ArgLocs[i];
3056 assert(VA.isMemLoc());
3057 SDValue Arg = OutVals[i];
3058 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3059 // Skip inalloca arguments. They don't require any work.
3060 if (Flags.isInAlloca())
3062 // Create frame index.
3063 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3064 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3065 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
3066 FIN = DAG.getFrameIndex(FI, getPointerTy());
3068 if (Flags.isByVal()) {
3069 // Copy relative to framepointer.
3070 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
3071 if (!StackPtr.getNode())
3072 StackPtr = DAG.getCopyFromReg(Chain, dl,
3073 RegInfo->getStackRegister(),
3075 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
3077 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3081 // Store relative to framepointer.
3082 MemOpChains2.push_back(
3083 DAG.getStore(ArgChain, dl, Arg, FIN,
3084 MachinePointerInfo::getFixedStack(FI),
3089 if (!MemOpChains2.empty())
3090 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3092 // Store the return address to the appropriate stack slot.
3093 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3094 getPointerTy(), RegInfo->getSlotSize(),
3098 // Build a sequence of copy-to-reg nodes chained together with token chain
3099 // and flag operands which copy the outgoing args into registers.
3101 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3102 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3103 RegsToPass[i].second, InFlag);
3104 InFlag = Chain.getValue(1);
3107 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3108 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3109 // In the 64-bit large code model, we have to make all calls
3110 // through a register, since the call instruction's 32-bit
3111 // pc-relative offset may not be large enough to hold the whole
3113 } else if (Callee->getOpcode() == ISD::GlobalAddress) {
3114 // If the callee is a GlobalAddress node (quite common, every direct call
3115 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
3117 GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
3119 // We should use extra load for direct calls to dllimported functions in
3121 const GlobalValue *GV = G->getGlobal();
3122 if (!GV->hasDLLImportStorageClass()) {
3123 unsigned char OpFlags = 0;
3124 bool ExtraLoad = false;
3125 unsigned WrapperKind = ISD::DELETED_NODE;
3127 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
3128 // external symbols most go through the PLT in PIC mode. If the symbol
3129 // has hidden or protected visibility, or if it is static or local, then
3130 // we don't need to use the PLT - we can directly call it.
3131 if (Subtarget->isTargetELF() &&
3132 DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
3133 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
3134 OpFlags = X86II::MO_PLT;
3135 } else if (Subtarget->isPICStyleStubAny() &&
3136 (GV->isDeclaration() || GV->isWeakForLinker()) &&
3137 (!Subtarget->getTargetTriple().isMacOSX() ||
3138 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3139 // PC-relative references to external symbols should go through $stub,
3140 // unless we're building with the leopard linker or later, which
3141 // automatically synthesizes these stubs.
3142 OpFlags = X86II::MO_DARWIN_STUB;
3143 } else if (Subtarget->isPICStyleRIPRel() && isa<Function>(GV) &&
3144 cast<Function>(GV)->hasFnAttribute(Attribute::NonLazyBind)) {
3145 // If the function is marked as non-lazy, generate an indirect call
3146 // which loads from the GOT directly. This avoids runtime overhead
3147 // at the cost of eager binding (and one extra byte of encoding).
3148 OpFlags = X86II::MO_GOTPCREL;
3149 WrapperKind = X86ISD::WrapperRIP;
3153 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
3154 G->getOffset(), OpFlags);
3156 // Add a wrapper if needed.
3157 if (WrapperKind != ISD::DELETED_NODE)
3158 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee);
3159 // Add extra indirection if needed.
3161 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
3162 MachinePointerInfo::getGOT(),
3163 false, false, false, 0);
3165 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3166 unsigned char OpFlags = 0;
3168 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
3169 // external symbols should go through the PLT.
3170 if (Subtarget->isTargetELF() &&
3171 DAG.getTarget().getRelocationModel() == Reloc::PIC_) {
3172 OpFlags = X86II::MO_PLT;
3173 } else if (Subtarget->isPICStyleStubAny() &&
3174 (!Subtarget->getTargetTriple().isMacOSX() ||
3175 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3176 // PC-relative references to external symbols should go through $stub,
3177 // unless we're building with the leopard linker or later, which
3178 // automatically synthesizes these stubs.
3179 OpFlags = X86II::MO_DARWIN_STUB;
3182 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
3184 } else if (Subtarget->isTarget64BitILP32() &&
3185 Callee->getValueType(0) == MVT::i32) {
3186 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
3187 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
3190 // Returns a chain & a flag for retval copy to use.
3191 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3192 SmallVector<SDValue, 8> Ops;
3194 if (!IsSibcall && isTailCall) {
3195 Chain = DAG.getCALLSEQ_END(Chain,
3196 DAG.getIntPtrConstant(NumBytesToPop, true),
3197 DAG.getIntPtrConstant(0, true), InFlag, dl);
3198 InFlag = Chain.getValue(1);
3201 Ops.push_back(Chain);
3202 Ops.push_back(Callee);
3205 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
3207 // Add argument registers to the end of the list so that they are known live
3209 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3210 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3211 RegsToPass[i].second.getValueType()));
3213 // Add a register mask operand representing the call-preserved registers.
3214 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
3215 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
3216 assert(Mask && "Missing call preserved mask for calling convention");
3217 Ops.push_back(DAG.getRegisterMask(Mask));
3219 if (InFlag.getNode())
3220 Ops.push_back(InFlag);
3224 //// If this is the first return lowered for this function, add the regs
3225 //// to the liveout set for the function.
3226 // This isn't right, although it's probably harmless on x86; liveouts
3227 // should be computed from returns not tail calls. Consider a void
3228 // function making a tail call to a function returning int.
3229 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
3232 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
3233 InFlag = Chain.getValue(1);
3235 // Create the CALLSEQ_END node.
3236 unsigned NumBytesForCalleeToPop;
3237 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3238 DAG.getTarget().Options.GuaranteedTailCallOpt))
3239 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
3240 else if (!Is64Bit && !IsTailCallConvention(CallConv) &&
3241 !Subtarget->getTargetTriple().isOSMSVCRT() &&
3242 SR == StackStructReturn)
3243 // If this is a call to a struct-return function, the callee
3244 // pops the hidden struct pointer, so we have to push it back.
3245 // This is common for Darwin/X86, Linux & Mingw32 targets.
3246 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
3247 NumBytesForCalleeToPop = 4;
3249 NumBytesForCalleeToPop = 0; // Callee pops nothing.
3251 // Returns a flag for retval copy to use.
3253 Chain = DAG.getCALLSEQ_END(Chain,
3254 DAG.getIntPtrConstant(NumBytesToPop, true),
3255 DAG.getIntPtrConstant(NumBytesForCalleeToPop,
3258 InFlag = Chain.getValue(1);
3261 // Handle result values, copying them out of physregs into vregs that we
3263 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
3264 Ins, dl, DAG, InVals);
3267 //===----------------------------------------------------------------------===//
3268 // Fast Calling Convention (tail call) implementation
3269 //===----------------------------------------------------------------------===//
3271 // Like std call, callee cleans arguments, convention except that ECX is
3272 // reserved for storing the tail called function address. Only 2 registers are
3273 // free for argument passing (inreg). Tail call optimization is performed
3275 // * tailcallopt is enabled
3276 // * caller/callee are fastcc
3277 // On X86_64 architecture with GOT-style position independent code only local
3278 // (within module) calls are supported at the moment.
3279 // To keep the stack aligned according to platform abi the function
3280 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
3281 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
3282 // If a tail called function callee has more arguments than the caller the
3283 // caller needs to make sure that there is room to move the RETADDR to. This is
3284 // achieved by reserving an area the size of the argument delta right after the
3285 // original RETADDR, but before the saved framepointer or the spilled registers
3286 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
3298 /// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
3299 /// for a 16 byte align requirement.
3301 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
3302 SelectionDAG& DAG) const {
3303 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3304 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
3305 unsigned StackAlignment = TFI.getStackAlignment();
3306 uint64_t AlignMask = StackAlignment - 1;
3307 int64_t Offset = StackSize;
3308 unsigned SlotSize = RegInfo->getSlotSize();
3309 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
3310 // Number smaller than 12 so just add the difference.
3311 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
3313 // Mask out lower bits, add stackalignment once plus the 12 bytes.
3314 Offset = ((~AlignMask) & Offset) + StackAlignment +
3315 (StackAlignment-SlotSize);
3320 /// MatchingStackOffset - Return true if the given stack call argument is
3321 /// already available in the same position (relatively) of the caller's
3322 /// incoming argument stack.
3324 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
3325 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
3326 const X86InstrInfo *TII) {
3327 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
3329 if (Arg.getOpcode() == ISD::CopyFromReg) {
3330 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
3331 if (!TargetRegisterInfo::isVirtualRegister(VR))
3333 MachineInstr *Def = MRI->getVRegDef(VR);
3336 if (!Flags.isByVal()) {
3337 if (!TII->isLoadFromStackSlot(Def, FI))
3340 unsigned Opcode = Def->getOpcode();
3341 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
3342 Opcode == X86::LEA64_32r) &&
3343 Def->getOperand(1).isFI()) {
3344 FI = Def->getOperand(1).getIndex();
3345 Bytes = Flags.getByValSize();
3349 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
3350 if (Flags.isByVal())
3351 // ByVal argument is passed in as a pointer but it's now being
3352 // dereferenced. e.g.
3353 // define @foo(%struct.X* %A) {
3354 // tail call @bar(%struct.X* byval %A)
3357 SDValue Ptr = Ld->getBasePtr();
3358 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
3361 FI = FINode->getIndex();
3362 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
3363 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
3364 FI = FINode->getIndex();
3365 Bytes = Flags.getByValSize();
3369 assert(FI != INT_MAX);
3370 if (!MFI->isFixedObjectIndex(FI))
3372 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
3375 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
3376 /// for tail call optimization. Targets which want to do tail call
3377 /// optimization should implement this function.
3379 X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
3380 CallingConv::ID CalleeCC,
3382 bool isCalleeStructRet,
3383 bool isCallerStructRet,
3385 const SmallVectorImpl<ISD::OutputArg> &Outs,
3386 const SmallVectorImpl<SDValue> &OutVals,
3387 const SmallVectorImpl<ISD::InputArg> &Ins,
3388 SelectionDAG &DAG) const {
3389 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
3392 // If -tailcallopt is specified, make fastcc functions tail-callable.
3393 const MachineFunction &MF = DAG.getMachineFunction();
3394 const Function *CallerF = MF.getFunction();
3396 // If the function return type is x86_fp80 and the callee return type is not,
3397 // then the FP_EXTEND of the call result is not a nop. It's not safe to
3398 // perform a tailcall optimization here.
3399 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
3402 CallingConv::ID CallerCC = CallerF->getCallingConv();
3403 bool CCMatch = CallerCC == CalleeCC;
3404 bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
3405 bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
3407 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
3408 if (IsTailCallConvention(CalleeCC) && CCMatch)
3413 // Look for obvious safe cases to perform tail call optimization that do not
3414 // require ABI changes. This is what gcc calls sibcall.
3416 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
3417 // emit a special epilogue.
3418 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3419 if (RegInfo->needsStackRealignment(MF))
3422 // Also avoid sibcall optimization if either caller or callee uses struct
3423 // return semantics.
3424 if (isCalleeStructRet || isCallerStructRet)
3427 // An stdcall/thiscall caller is expected to clean up its arguments; the
3428 // callee isn't going to do that.
3429 // FIXME: this is more restrictive than needed. We could produce a tailcall
3430 // when the stack adjustment matches. For example, with a thiscall that takes
3431 // only one argument.
3432 if (!CCMatch && (CallerCC == CallingConv::X86_StdCall ||
3433 CallerCC == CallingConv::X86_ThisCall))
3436 // Do not sibcall optimize vararg calls unless all arguments are passed via
3438 if (isVarArg && !Outs.empty()) {
3440 // Optimizing for varargs on Win64 is unlikely to be safe without
3441 // additional testing.
3442 if (IsCalleeWin64 || IsCallerWin64)
3445 SmallVector<CCValAssign, 16> ArgLocs;
3446 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3449 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3450 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
3451 if (!ArgLocs[i].isRegLoc())
3455 // If the call result is in ST0 / ST1, it needs to be popped off the x87
3456 // stack. Therefore, if it's not used by the call it is not safe to optimize
3457 // this into a sibcall.
3458 bool Unused = false;
3459 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3466 SmallVector<CCValAssign, 16> RVLocs;
3467 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), RVLocs,
3469 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3470 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
3471 CCValAssign &VA = RVLocs[i];
3472 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
3477 // If the calling conventions do not match, then we'd better make sure the
3478 // results are returned in the same way as what the caller expects.
3480 SmallVector<CCValAssign, 16> RVLocs1;
3481 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
3483 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
3485 SmallVector<CCValAssign, 16> RVLocs2;
3486 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
3488 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
3490 if (RVLocs1.size() != RVLocs2.size())
3492 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
3493 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
3495 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
3497 if (RVLocs1[i].isRegLoc()) {
3498 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
3501 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
3507 // If the callee takes no arguments then go on to check the results of the
3509 if (!Outs.empty()) {
3510 // Check if stack adjustment is needed. For now, do not do this if any
3511 // argument is passed on the stack.
3512 SmallVector<CCValAssign, 16> ArgLocs;
3513 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3516 // Allocate shadow area for Win64
3518 CCInfo.AllocateStack(32, 8);
3520 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3521 if (CCInfo.getNextStackOffset()) {
3522 MachineFunction &MF = DAG.getMachineFunction();
3523 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn())
3526 // Check if the arguments are already laid out in the right way as
3527 // the caller's fixed stack objects.
3528 MachineFrameInfo *MFI = MF.getFrameInfo();
3529 const MachineRegisterInfo *MRI = &MF.getRegInfo();
3530 const X86InstrInfo *TII = Subtarget->getInstrInfo();
3531 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3532 CCValAssign &VA = ArgLocs[i];
3533 SDValue Arg = OutVals[i];
3534 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3535 if (VA.getLocInfo() == CCValAssign::Indirect)
3537 if (!VA.isRegLoc()) {
3538 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
3545 // If the tailcall address may be in a register, then make sure it's
3546 // possible to register allocate for it. In 32-bit, the call address can
3547 // only target EAX, EDX, or ECX since the tail call must be scheduled after
3548 // callee-saved registers are restored. These happen to be the same
3549 // registers used to pass 'inreg' arguments so watch out for those.
3550 if (!Subtarget->is64Bit() &&
3551 ((!isa<GlobalAddressSDNode>(Callee) &&
3552 !isa<ExternalSymbolSDNode>(Callee)) ||
3553 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
3554 unsigned NumInRegs = 0;
3555 // In PIC we need an extra register to formulate the address computation
3557 unsigned MaxInRegs =
3558 (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
3560 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3561 CCValAssign &VA = ArgLocs[i];
3564 unsigned Reg = VA.getLocReg();
3567 case X86::EAX: case X86::EDX: case X86::ECX:
3568 if (++NumInRegs == MaxInRegs)
3580 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
3581 const TargetLibraryInfo *libInfo) const {
3582 return X86::createFastISel(funcInfo, libInfo);
3585 //===----------------------------------------------------------------------===//
3586 // Other Lowering Hooks
3587 //===----------------------------------------------------------------------===//
3589 static bool MayFoldLoad(SDValue Op) {
3590 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
3593 static bool MayFoldIntoStore(SDValue Op) {
3594 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
3597 static bool isTargetShuffle(unsigned Opcode) {
3599 default: return false;
3600 case X86ISD::BLENDI:
3601 case X86ISD::PSHUFB:
3602 case X86ISD::PSHUFD:
3603 case X86ISD::PSHUFHW:
3604 case X86ISD::PSHUFLW:
3606 case X86ISD::PALIGNR:
3607 case X86ISD::MOVLHPS:
3608 case X86ISD::MOVLHPD:
3609 case X86ISD::MOVHLPS:
3610 case X86ISD::MOVLPS:
3611 case X86ISD::MOVLPD:
3612 case X86ISD::MOVSHDUP:
3613 case X86ISD::MOVSLDUP:
3614 case X86ISD::MOVDDUP:
3617 case X86ISD::UNPCKL:
3618 case X86ISD::UNPCKH:
3619 case X86ISD::VPERMILPI:
3620 case X86ISD::VPERM2X128:
3621 case X86ISD::VPERMI:
3626 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3627 SDValue V1, SelectionDAG &DAG) {
3629 default: llvm_unreachable("Unknown x86 shuffle node");
3630 case X86ISD::MOVSHDUP:
3631 case X86ISD::MOVSLDUP:
3632 case X86ISD::MOVDDUP:
3633 return DAG.getNode(Opc, dl, VT, V1);
3637 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3638 SDValue V1, unsigned TargetMask,
3639 SelectionDAG &DAG) {
3641 default: llvm_unreachable("Unknown x86 shuffle node");
3642 case X86ISD::PSHUFD:
3643 case X86ISD::PSHUFHW:
3644 case X86ISD::PSHUFLW:
3645 case X86ISD::VPERMILPI:
3646 case X86ISD::VPERMI:
3647 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8));
3651 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3652 SDValue V1, SDValue V2, unsigned TargetMask,
3653 SelectionDAG &DAG) {
3655 default: llvm_unreachable("Unknown x86 shuffle node");
3656 case X86ISD::PALIGNR:
3657 case X86ISD::VALIGN:
3659 case X86ISD::VPERM2X128:
3660 return DAG.getNode(Opc, dl, VT, V1, V2,
3661 DAG.getConstant(TargetMask, MVT::i8));
3665 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3666 SDValue V1, SDValue V2, SelectionDAG &DAG) {
3668 default: llvm_unreachable("Unknown x86 shuffle node");
3669 case X86ISD::MOVLHPS:
3670 case X86ISD::MOVLHPD:
3671 case X86ISD::MOVHLPS:
3672 case X86ISD::MOVLPS:
3673 case X86ISD::MOVLPD:
3676 case X86ISD::UNPCKL:
3677 case X86ISD::UNPCKH:
3678 return DAG.getNode(Opc, dl, VT, V1, V2);
3682 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
3683 MachineFunction &MF = DAG.getMachineFunction();
3684 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3685 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3686 int ReturnAddrIndex = FuncInfo->getRAIndex();
3688 if (ReturnAddrIndex == 0) {
3689 // Set up a frame object for the return address.
3690 unsigned SlotSize = RegInfo->getSlotSize();
3691 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize,
3694 FuncInfo->setRAIndex(ReturnAddrIndex);
3697 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
3700 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
3701 bool hasSymbolicDisplacement) {
3702 // Offset should fit into 32 bit immediate field.
3703 if (!isInt<32>(Offset))
3706 // If we don't have a symbolic displacement - we don't have any extra
3708 if (!hasSymbolicDisplacement)
3711 // FIXME: Some tweaks might be needed for medium code model.
3712 if (M != CodeModel::Small && M != CodeModel::Kernel)
3715 // For small code model we assume that latest object is 16MB before end of 31
3716 // bits boundary. We may also accept pretty large negative constants knowing
3717 // that all objects are in the positive half of address space.
3718 if (M == CodeModel::Small && Offset < 16*1024*1024)
3721 // For kernel code model we know that all object resist in the negative half
3722 // of 32bits address space. We may not accept negative offsets, since they may
3723 // be just off and we may accept pretty large positive ones.
3724 if (M == CodeModel::Kernel && Offset >= 0)
3730 /// isCalleePop - Determines whether the callee is required to pop its
3731 /// own arguments. Callee pop is necessary to support tail calls.
3732 bool X86::isCalleePop(CallingConv::ID CallingConv,
3733 bool is64Bit, bool IsVarArg, bool TailCallOpt) {
3734 switch (CallingConv) {
3737 case CallingConv::X86_StdCall:
3738 case CallingConv::X86_FastCall:
3739 case CallingConv::X86_ThisCall:
3741 case CallingConv::Fast:
3742 case CallingConv::GHC:
3743 case CallingConv::HiPE:
3750 /// \brief Return true if the condition is an unsigned comparison operation.
3751 static bool isX86CCUnsigned(unsigned X86CC) {
3753 default: llvm_unreachable("Invalid integer condition!");
3754 case X86::COND_E: return true;
3755 case X86::COND_G: return false;
3756 case X86::COND_GE: return false;
3757 case X86::COND_L: return false;
3758 case X86::COND_LE: return false;
3759 case X86::COND_NE: return true;
3760 case X86::COND_B: return true;
3761 case X86::COND_A: return true;
3762 case X86::COND_BE: return true;
3763 case X86::COND_AE: return true;
3765 llvm_unreachable("covered switch fell through?!");
3768 /// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
3769 /// specific condition code, returning the condition code and the LHS/RHS of the
3770 /// comparison to make.
3771 static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
3772 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
3774 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
3775 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
3776 // X > -1 -> X == 0, jump !sign.
3777 RHS = DAG.getConstant(0, RHS.getValueType());
3778 return X86::COND_NS;
3780 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
3781 // X < 0 -> X == 0, jump on sign.
3784 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
3786 RHS = DAG.getConstant(0, RHS.getValueType());
3787 return X86::COND_LE;
3791 switch (SetCCOpcode) {
3792 default: llvm_unreachable("Invalid integer condition!");
3793 case ISD::SETEQ: return X86::COND_E;
3794 case ISD::SETGT: return X86::COND_G;
3795 case ISD::SETGE: return X86::COND_GE;
3796 case ISD::SETLT: return X86::COND_L;
3797 case ISD::SETLE: return X86::COND_LE;
3798 case ISD::SETNE: return X86::COND_NE;
3799 case ISD::SETULT: return X86::COND_B;
3800 case ISD::SETUGT: return X86::COND_A;
3801 case ISD::SETULE: return X86::COND_BE;
3802 case ISD::SETUGE: return X86::COND_AE;
3806 // First determine if it is required or is profitable to flip the operands.
3808 // If LHS is a foldable load, but RHS is not, flip the condition.
3809 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
3810 !ISD::isNON_EXTLoad(RHS.getNode())) {
3811 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
3812 std::swap(LHS, RHS);
3815 switch (SetCCOpcode) {
3821 std::swap(LHS, RHS);
3825 // On a floating point condition, the flags are set as follows:
3827 // 0 | 0 | 0 | X > Y
3828 // 0 | 0 | 1 | X < Y
3829 // 1 | 0 | 0 | X == Y
3830 // 1 | 1 | 1 | unordered
3831 switch (SetCCOpcode) {
3832 default: llvm_unreachable("Condcode should be pre-legalized away");
3834 case ISD::SETEQ: return X86::COND_E;
3835 case ISD::SETOLT: // flipped
3837 case ISD::SETGT: return X86::COND_A;
3838 case ISD::SETOLE: // flipped
3840 case ISD::SETGE: return X86::COND_AE;
3841 case ISD::SETUGT: // flipped
3843 case ISD::SETLT: return X86::COND_B;
3844 case ISD::SETUGE: // flipped
3846 case ISD::SETLE: return X86::COND_BE;
3848 case ISD::SETNE: return X86::COND_NE;
3849 case ISD::SETUO: return X86::COND_P;
3850 case ISD::SETO: return X86::COND_NP;
3852 case ISD::SETUNE: return X86::COND_INVALID;
3856 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
3857 /// code. Current x86 isa includes the following FP cmov instructions:
3858 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
3859 static bool hasFPCMov(unsigned X86CC) {
3875 /// isFPImmLegal - Returns true if the target can instruction select the
3876 /// specified FP immediate natively. If false, the legalizer will
3877 /// materialize the FP immediate as a load from a constant pool.
3878 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
3879 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
3880 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
3886 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
3887 ISD::LoadExtType ExtTy,
3889 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
3890 // relocation target a movq or addq instruction: don't let the load shrink.
3891 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
3892 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
3893 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
3894 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
3898 /// \brief Returns true if it is beneficial to convert a load of a constant
3899 /// to just the constant itself.
3900 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
3902 assert(Ty->isIntegerTy());
3904 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3905 if (BitSize == 0 || BitSize > 64)
3910 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
3911 unsigned Index) const {
3912 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
3915 return (Index == 0 || Index == ResVT.getVectorNumElements());
3918 bool X86TargetLowering::isCheapToSpeculateCttz() const {
3919 // Speculate cttz only if we can directly use TZCNT.
3920 return Subtarget->hasBMI();
3923 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
3924 // Speculate ctlz only if we can directly use LZCNT.
3925 return Subtarget->hasLZCNT();
3928 /// isUndefOrInRange - Return true if Val is undef or if its value falls within
3929 /// the specified range (L, H].
3930 static bool isUndefOrInRange(int Val, int Low, int Hi) {
3931 return (Val < 0) || (Val >= Low && Val < Hi);
3934 /// isUndefOrEqual - Val is either less than zero (undef) or equal to the
3935 /// specified value.
3936 static bool isUndefOrEqual(int Val, int CmpVal) {
3937 return (Val < 0 || Val == CmpVal);
3940 /// isSequentialOrUndefInRange - Return true if every element in Mask, beginning
3941 /// from position Pos and ending in Pos+Size, falls within the specified
3942 /// sequential range (Low, Low+Size]. or is undef.
3943 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
3944 unsigned Pos, unsigned Size, int Low) {
3945 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
3946 if (!isUndefOrEqual(Mask[i], Low))
3951 /// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
3952 /// is suitable for input to PSHUFD. That is, it doesn't reference the other
3953 /// operand - by default will match for first operand.
3954 static bool isPSHUFDMask(ArrayRef<int> Mask, MVT VT,
3955 bool TestSecondOperand = false) {
3956 if (VT != MVT::v4f32 && VT != MVT::v4i32 &&
3957 VT != MVT::v2f64 && VT != MVT::v2i64)
3960 unsigned NumElems = VT.getVectorNumElements();
3961 unsigned Lo = TestSecondOperand ? NumElems : 0;
3962 unsigned Hi = Lo + NumElems;
3964 for (unsigned i = 0; i < NumElems; ++i)
3965 if (!isUndefOrInRange(Mask[i], (int)Lo, (int)Hi))
3971 /// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
3972 /// is suitable for input to PSHUFHW.
3973 static bool isPSHUFHWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3974 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3977 // Lower quadword copied in order or undef.
3978 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0))
3981 // Upper quadword shuffled.
3982 for (unsigned i = 4; i != 8; ++i)
3983 if (!isUndefOrInRange(Mask[i], 4, 8))
3986 if (VT == MVT::v16i16) {
3987 // Lower quadword copied in order or undef.
3988 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8))
3991 // Upper quadword shuffled.
3992 for (unsigned i = 12; i != 16; ++i)
3993 if (!isUndefOrInRange(Mask[i], 12, 16))
4000 /// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
4001 /// is suitable for input to PSHUFLW.
4002 static bool isPSHUFLWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4003 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
4006 // Upper quadword copied in order.
4007 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4))
4010 // Lower quadword shuffled.
4011 for (unsigned i = 0; i != 4; ++i)
4012 if (!isUndefOrInRange(Mask[i], 0, 4))
4015 if (VT == MVT::v16i16) {
4016 // Upper quadword copied in order.
4017 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12))
4020 // Lower quadword shuffled.
4021 for (unsigned i = 8; i != 12; ++i)
4022 if (!isUndefOrInRange(Mask[i], 8, 12))
4029 /// \brief Return true if the mask specifies a shuffle of elements that is
4030 /// suitable for input to intralane (palignr) or interlane (valign) vector
4032 static bool isAlignrMask(ArrayRef<int> Mask, MVT VT, bool InterLane) {
4033 unsigned NumElts = VT.getVectorNumElements();
4034 unsigned NumLanes = InterLane ? 1: VT.getSizeInBits()/128;
4035 unsigned NumLaneElts = NumElts/NumLanes;
4037 // Do not handle 64-bit element shuffles with palignr.
4038 if (NumLaneElts == 2)
4041 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) {
4043 for (i = 0; i != NumLaneElts; ++i) {
4048 // Lane is all undef, go to next lane
4049 if (i == NumLaneElts)
4052 int Start = Mask[i+l];
4054 // Make sure its in this lane in one of the sources
4055 if (!isUndefOrInRange(Start, l, l+NumLaneElts) &&
4056 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts))
4059 // If not lane 0, then we must match lane 0
4060 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l))
4063 // Correct second source to be contiguous with first source
4064 if (Start >= (int)NumElts)
4065 Start -= NumElts - NumLaneElts;
4067 // Make sure we're shifting in the right direction.
4068 if (Start <= (int)(i+l))
4073 // Check the rest of the elements to see if they are consecutive.
4074 for (++i; i != NumLaneElts; ++i) {
4075 int Idx = Mask[i+l];
4077 // Make sure its in this lane
4078 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) &&
4079 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts))
4082 // If not lane 0, then we must match lane 0
4083 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l))
4086 if (Idx >= (int)NumElts)
4087 Idx -= NumElts - NumLaneElts;
4089 if (!isUndefOrEqual(Idx, Start+i))
4098 /// \brief Return true if the node specifies a shuffle of elements that is
4099 /// suitable for input to PALIGNR.
4100 static bool isPALIGNRMask(ArrayRef<int> Mask, MVT VT,
4101 const X86Subtarget *Subtarget) {
4102 if ((VT.is128BitVector() && !Subtarget->hasSSSE3()) ||
4103 (VT.is256BitVector() && !Subtarget->hasInt256()) ||
4104 VT.is512BitVector())
4105 // FIXME: Add AVX512BW.
4108 return isAlignrMask(Mask, VT, false);
4111 /// \brief Return true if the node specifies a shuffle of elements that is
4112 /// suitable for input to VALIGN.
4113 static bool isVALIGNMask(ArrayRef<int> Mask, MVT VT,
4114 const X86Subtarget *Subtarget) {
4115 // FIXME: Add AVX512VL.
4116 if (!VT.is512BitVector() || !Subtarget->hasAVX512())
4118 return isAlignrMask(Mask, VT, true);
4121 /// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
4122 /// the two vector operands have swapped position.
4123 static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask,
4124 unsigned NumElems) {
4125 for (unsigned i = 0; i != NumElems; ++i) {
4129 else if (idx < (int)NumElems)
4130 Mask[i] = idx + NumElems;
4132 Mask[i] = idx - NumElems;
4136 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
4137 /// specifies a shuffle of elements that is suitable for input to 128/256-bit
4138 /// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be
4139 /// reverse of what x86 shuffles want.
4140 static bool isSHUFPMask(ArrayRef<int> Mask, MVT VT, bool Commuted = false) {
4142 unsigned NumElems = VT.getVectorNumElements();
4143 unsigned NumLanes = VT.getSizeInBits()/128;
4144 unsigned NumLaneElems = NumElems/NumLanes;
4146 if (NumLaneElems != 2 && NumLaneElems != 4)
4149 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4150 bool symmetricMaskRequired =
4151 (VT.getSizeInBits() >= 256) && (EltSize == 32);
4153 // VSHUFPSY divides the resulting vector into 4 chunks.
4154 // The sources are also splitted into 4 chunks, and each destination
4155 // chunk must come from a different source chunk.
4157 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0
4158 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9
4160 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4,
4161 // Y3..Y0, Y3..Y0, X3..X0, X3..X0
4163 // VSHUFPDY divides the resulting vector into 4 chunks.
4164 // The sources are also splitted into 4 chunks, and each destination
4165 // chunk must come from a different source chunk.
4167 // SRC1 => X3 X2 X1 X0
4168 // SRC2 => Y3 Y2 Y1 Y0
4170 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0
4172 SmallVector<int, 4> MaskVal(NumLaneElems, -1);
4173 unsigned HalfLaneElems = NumLaneElems/2;
4174 for (unsigned l = 0; l != NumElems; l += NumLaneElems) {
4175 for (unsigned i = 0; i != NumLaneElems; ++i) {
4176 int Idx = Mask[i+l];
4177 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0);
4178 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems))
4180 // For VSHUFPSY, the mask of the second half must be the same as the
4181 // first but with the appropriate offsets. This works in the same way as
4182 // VPERMILPS works with masks.
4183 if (!symmetricMaskRequired || Idx < 0)
4185 if (MaskVal[i] < 0) {
4186 MaskVal[i] = Idx - l;
4189 if ((signed)(Idx - l) != MaskVal[i])
4197 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
4198 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
4199 static bool isMOVHLPSMask(ArrayRef<int> Mask, MVT VT) {
4200 if (!VT.is128BitVector())
4203 unsigned NumElems = VT.getVectorNumElements();
4208 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
4209 return isUndefOrEqual(Mask[0], 6) &&
4210 isUndefOrEqual(Mask[1], 7) &&
4211 isUndefOrEqual(Mask[2], 2) &&
4212 isUndefOrEqual(Mask[3], 3);
4215 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
4216 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
4218 static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, MVT VT) {
4219 if (!VT.is128BitVector())
4222 unsigned NumElems = VT.getVectorNumElements();
4227 return isUndefOrEqual(Mask[0], 2) &&
4228 isUndefOrEqual(Mask[1], 3) &&
4229 isUndefOrEqual(Mask[2], 2) &&
4230 isUndefOrEqual(Mask[3], 3);
4233 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
4234 /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
4235 static bool isMOVLPMask(ArrayRef<int> Mask, MVT VT) {
4236 if (!VT.is128BitVector())
4239 unsigned NumElems = VT.getVectorNumElements();
4241 if (NumElems != 2 && NumElems != 4)
4244 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4245 if (!isUndefOrEqual(Mask[i], i + NumElems))
4248 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
4249 if (!isUndefOrEqual(Mask[i], i))
4255 /// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
4256 /// specifies a shuffle of elements that is suitable for input to MOVLHPS.
4257 static bool isMOVLHPSMask(ArrayRef<int> Mask, MVT VT) {
4258 if (!VT.is128BitVector())
4261 unsigned NumElems = VT.getVectorNumElements();
4263 if (NumElems != 2 && NumElems != 4)
4266 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4267 if (!isUndefOrEqual(Mask[i], i))
4270 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4271 if (!isUndefOrEqual(Mask[i + e], i + NumElems))
4277 /// isINSERTPSMask - Return true if the specified VECTOR_SHUFFLE operand
4278 /// specifies a shuffle of elements that is suitable for input to INSERTPS.
4279 /// i. e: If all but one element come from the same vector.
4280 static bool isINSERTPSMask(ArrayRef<int> Mask, MVT VT) {
4281 // TODO: Deal with AVX's VINSERTPS
4282 if (!VT.is128BitVector() || (VT != MVT::v4f32 && VT != MVT::v4i32))
4285 unsigned CorrectPosV1 = 0;
4286 unsigned CorrectPosV2 = 0;
4287 for (int i = 0, e = (int)VT.getVectorNumElements(); i != e; ++i) {
4288 if (Mask[i] == -1) {
4296 else if (Mask[i] == i + 4)
4300 if (CorrectPosV1 == 3 || CorrectPosV2 == 3)
4301 // We have 3 elements (undefs count as elements from any vector) from one
4302 // vector, and one from another.
4309 // Some special combinations that can be optimized.
4312 SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp,
4313 SelectionDAG &DAG) {
4314 MVT VT = SVOp->getSimpleValueType(0);
4317 if (VT != MVT::v8i32 && VT != MVT::v8f32)
4320 ArrayRef<int> Mask = SVOp->getMask();
4322 // These are the special masks that may be optimized.
4323 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14};
4324 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15};
4325 bool MatchEvenMask = true;
4326 bool MatchOddMask = true;
4327 for (int i=0; i<8; ++i) {
4328 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i]))
4329 MatchEvenMask = false;
4330 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i]))
4331 MatchOddMask = false;
4334 if (!MatchEvenMask && !MatchOddMask)
4337 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT);
4339 SDValue Op0 = SVOp->getOperand(0);
4340 SDValue Op1 = SVOp->getOperand(1);
4342 if (MatchEvenMask) {
4343 // Shift the second operand right to 32 bits.
4344 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 };
4345 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask);
4347 // Shift the first operand left to 32 bits.
4348 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 };
4349 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask);
4351 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15};
4352 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask);
4355 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
4356 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
4357 static bool isUNPCKLMask(ArrayRef<int> Mask, MVT VT,
4358 bool HasInt256, bool V2IsSplat = false) {
4360 assert(VT.getSizeInBits() >= 128 &&
4361 "Unsupported vector type for unpckl");
4363 unsigned NumElts = VT.getVectorNumElements();
4364 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4365 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4368 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4369 "Unsupported vector type for unpckh");
4371 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4372 unsigned NumLanes = VT.getSizeInBits()/128;
4373 unsigned NumLaneElts = NumElts/NumLanes;
4375 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4376 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4377 int BitI = Mask[l+i];
4378 int BitI1 = Mask[l+i+1];
4379 if (!isUndefOrEqual(BitI, j))
4382 if (!isUndefOrEqual(BitI1, NumElts))
4385 if (!isUndefOrEqual(BitI1, j + NumElts))
4394 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
4395 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
4396 static bool isUNPCKHMask(ArrayRef<int> Mask, MVT VT,
4397 bool HasInt256, bool V2IsSplat = false) {
4398 assert(VT.getSizeInBits() >= 128 &&
4399 "Unsupported vector type for unpckh");
4401 unsigned NumElts = VT.getVectorNumElements();
4402 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4403 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4406 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4407 "Unsupported vector type for unpckh");
4409 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4410 unsigned NumLanes = VT.getSizeInBits()/128;
4411 unsigned NumLaneElts = NumElts/NumLanes;
4413 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4414 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4415 int BitI = Mask[l+i];
4416 int BitI1 = Mask[l+i+1];
4417 if (!isUndefOrEqual(BitI, j))
4420 if (isUndefOrEqual(BitI1, NumElts))
4423 if (!isUndefOrEqual(BitI1, j+NumElts))
4431 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
4432 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
4434 static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4435 unsigned NumElts = VT.getVectorNumElements();
4436 bool Is256BitVec = VT.is256BitVector();
4438 if (VT.is512BitVector())
4440 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4441 "Unsupported vector type for unpckh");
4443 if (Is256BitVec && NumElts != 4 && NumElts != 8 &&
4444 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4447 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern
4448 // FIXME: Need a better way to get rid of this, there's no latency difference
4449 // between UNPCKLPD and MOVDDUP, the later should always be checked first and
4450 // the former later. We should also remove the "_undef" special mask.
4451 if (NumElts == 4 && Is256BitVec)
4454 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4455 // independently on 128-bit lanes.
4456 unsigned NumLanes = VT.getSizeInBits()/128;
4457 unsigned NumLaneElts = NumElts/NumLanes;
4459 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4460 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4461 int BitI = Mask[l+i];
4462 int BitI1 = Mask[l+i+1];
4464 if (!isUndefOrEqual(BitI, j))
4466 if (!isUndefOrEqual(BitI1, j))
4474 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
4475 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
4477 static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4478 unsigned NumElts = VT.getVectorNumElements();
4480 if (VT.is512BitVector())
4483 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4484 "Unsupported vector type for unpckh");
4486 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4487 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4490 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4491 // independently on 128-bit lanes.
4492 unsigned NumLanes = VT.getSizeInBits()/128;
4493 unsigned NumLaneElts = NumElts/NumLanes;
4495 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4496 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4497 int BitI = Mask[l+i];
4498 int BitI1 = Mask[l+i+1];
4499 if (!isUndefOrEqual(BitI, j))
4501 if (!isUndefOrEqual(BitI1, j))
4508 // Match for INSERTI64x4 INSERTF64x4 instructions (src0[0], src1[0]) or
4509 // (src1[0], src0[1]), manipulation with 256-bit sub-vectors
4510 static bool isINSERT64x4Mask(ArrayRef<int> Mask, MVT VT, unsigned int *Imm) {
4511 if (!VT.is512BitVector())
4514 unsigned NumElts = VT.getVectorNumElements();
4515 unsigned HalfSize = NumElts/2;
4516 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, 0)) {
4517 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, NumElts)) {
4522 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, NumElts)) {
4523 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, HalfSize)) {
4531 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
4532 /// specifies a shuffle of elements that is suitable for input to MOVSS,
4533 /// MOVSD, and MOVD, i.e. setting the lowest element.
4534 static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) {
4535 if (VT.getVectorElementType().getSizeInBits() < 32)
4537 if (!VT.is128BitVector())
4540 unsigned NumElts = VT.getVectorNumElements();
4542 if (!isUndefOrEqual(Mask[0], NumElts))
4545 for (unsigned i = 1; i != NumElts; ++i)
4546 if (!isUndefOrEqual(Mask[i], i))
4552 /// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered
4553 /// as permutations between 128-bit chunks or halves. As an example: this
4555 /// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15>
4556 /// The first half comes from the second half of V1 and the second half from the
4557 /// the second half of V2.
4558 static bool isVPERM2X128Mask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4559 if (!HasFp256 || !VT.is256BitVector())
4562 // The shuffle result is divided into half A and half B. In total the two
4563 // sources have 4 halves, namely: C, D, E, F. The final values of A and
4564 // B must come from C, D, E or F.
4565 unsigned HalfSize = VT.getVectorNumElements()/2;
4566 bool MatchA = false, MatchB = false;
4568 // Check if A comes from one of C, D, E, F.
4569 for (unsigned Half = 0; Half != 4; ++Half) {
4570 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) {
4576 // Check if B comes from one of C, D, E, F.
4577 for (unsigned Half = 0; Half != 4; ++Half) {
4578 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) {
4584 return MatchA && MatchB;
4587 /// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle
4588 /// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions.
4589 static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) {
4590 MVT VT = SVOp->getSimpleValueType(0);
4592 unsigned HalfSize = VT.getVectorNumElements()/2;
4594 unsigned FstHalf = 0, SndHalf = 0;
4595 for (unsigned i = 0; i < HalfSize; ++i) {
4596 if (SVOp->getMaskElt(i) > 0) {
4597 FstHalf = SVOp->getMaskElt(i)/HalfSize;
4601 for (unsigned i = HalfSize; i < HalfSize*2; ++i) {
4602 if (SVOp->getMaskElt(i) > 0) {
4603 SndHalf = SVOp->getMaskElt(i)/HalfSize;
4608 return (FstHalf | (SndHalf << 4));
4611 // Symmetric in-lane mask. Each lane has 4 elements (for imm8)
4612 static bool isPermImmMask(ArrayRef<int> Mask, MVT VT, unsigned& Imm8) {
4613 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4617 unsigned NumElts = VT.getVectorNumElements();
4619 if (VT.is128BitVector() || (VT.is256BitVector() && EltSize == 64)) {
4620 for (unsigned i = 0; i != NumElts; ++i) {
4623 Imm8 |= Mask[i] << (i*2);
4628 unsigned LaneSize = 4;
4629 SmallVector<int, 4> MaskVal(LaneSize, -1);
4631 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4632 for (unsigned i = 0; i != LaneSize; ++i) {
4633 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4637 if (MaskVal[i] < 0) {
4638 MaskVal[i] = Mask[i+l] - l;
4639 Imm8 |= MaskVal[i] << (i*2);
4642 if (Mask[i+l] != (signed)(MaskVal[i]+l))
4649 /// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand
4650 /// specifies a shuffle of elements that is suitable for input to VPERMILPD*.
4651 /// Note that VPERMIL mask matching is different depending whether theunderlying
4652 /// type is 32 or 64. In the VPERMILPS the high half of the mask should point
4653 /// to the same elements of the low, but to the higher half of the source.
4654 /// In VPERMILPD the two lanes could be shuffled independently of each other
4655 /// with the same restriction that lanes can't be crossed. Also handles PSHUFDY.
4656 static bool isVPERMILPMask(ArrayRef<int> Mask, MVT VT) {
4657 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4658 if (VT.getSizeInBits() < 256 || EltSize < 32)
4660 bool symmetricMaskRequired = (EltSize == 32);
4661 unsigned NumElts = VT.getVectorNumElements();
4663 unsigned NumLanes = VT.getSizeInBits()/128;
4664 unsigned LaneSize = NumElts/NumLanes;
4665 // 2 or 4 elements in one lane
4667 SmallVector<int, 4> ExpectedMaskVal(LaneSize, -1);
4668 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4669 for (unsigned i = 0; i != LaneSize; ++i) {
4670 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4672 if (symmetricMaskRequired) {
4673 if (ExpectedMaskVal[i] < 0 && Mask[i+l] >= 0) {
4674 ExpectedMaskVal[i] = Mask[i+l] - l;
4677 if (!isUndefOrEqual(Mask[i+l], ExpectedMaskVal[i]+l))
4685 /// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse
4686 /// of what x86 movss want. X86 movs requires the lowest element to be lowest
4687 /// element of vector 2 and the other elements to come from vector 1 in order.
4688 static bool isCommutedMOVLMask(ArrayRef<int> Mask, MVT VT,
4689 bool V2IsSplat = false, bool V2IsUndef = false) {
4690 if (!VT.is128BitVector())
4693 unsigned NumOps = VT.getVectorNumElements();
4694 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
4697 if (!isUndefOrEqual(Mask[0], 0))
4700 for (unsigned i = 1; i != NumOps; ++i)
4701 if (!(isUndefOrEqual(Mask[i], i+NumOps) ||
4702 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) ||
4703 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps))))
4709 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4710 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
4711 /// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7>
4712 static bool isMOVSHDUPMask(ArrayRef<int> Mask, MVT VT,
4713 const X86Subtarget *Subtarget) {
4714 if (!Subtarget->hasSSE3())
4717 unsigned NumElems = VT.getVectorNumElements();
4719 if ((VT.is128BitVector() && NumElems != 4) ||
4720 (VT.is256BitVector() && NumElems != 8) ||
4721 (VT.is512BitVector() && NumElems != 16))
4724 // "i+1" is the value the indexed mask element must have
4725 for (unsigned i = 0; i != NumElems; i += 2)
4726 if (!isUndefOrEqual(Mask[i], i+1) ||
4727 !isUndefOrEqual(Mask[i+1], i+1))
4733 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4734 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
4735 /// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6>
4736 static bool isMOVSLDUPMask(ArrayRef<int> Mask, MVT VT,
4737 const X86Subtarget *Subtarget) {
4738 if (!Subtarget->hasSSE3())
4741 unsigned NumElems = VT.getVectorNumElements();
4743 if ((VT.is128BitVector() && NumElems != 4) ||
4744 (VT.is256BitVector() && NumElems != 8) ||
4745 (VT.is512BitVector() && NumElems != 16))
4748 // "i" is the value the indexed mask element must have
4749 for (unsigned i = 0; i != NumElems; i += 2)
4750 if (!isUndefOrEqual(Mask[i], i) ||
4751 !isUndefOrEqual(Mask[i+1], i))
4757 /// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand
4758 /// specifies a shuffle of elements that is suitable for input to 256-bit
4759 /// version of MOVDDUP.
4760 static bool isMOVDDUPYMask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4761 if (!HasFp256 || !VT.is256BitVector())
4764 unsigned NumElts = VT.getVectorNumElements();
4768 for (unsigned i = 0; i != NumElts/2; ++i)
4769 if (!isUndefOrEqual(Mask[i], 0))
4771 for (unsigned i = NumElts/2; i != NumElts; ++i)
4772 if (!isUndefOrEqual(Mask[i], NumElts/2))
4777 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4778 /// specifies a shuffle of elements that is suitable for input to 128-bit
4779 /// version of MOVDDUP.
4780 static bool isMOVDDUPMask(ArrayRef<int> Mask, MVT VT) {
4781 if (!VT.is128BitVector())
4784 unsigned e = VT.getVectorNumElements() / 2;
4785 for (unsigned i = 0; i != e; ++i)
4786 if (!isUndefOrEqual(Mask[i], i))
4788 for (unsigned i = 0; i != e; ++i)
4789 if (!isUndefOrEqual(Mask[e+i], i))
4794 /// isVEXTRACTIndex - Return true if the specified
4795 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
4796 /// suitable for instruction that extract 128 or 256 bit vectors
4797 static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
4798 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4799 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4802 // The index should be aligned on a vecWidth-bit boundary.
4804 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4806 MVT VT = N->getSimpleValueType(0);
4807 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4808 bool Result = (Index * ElSize) % vecWidth == 0;
4813 /// isVINSERTIndex - Return true if the specified INSERT_SUBVECTOR
4814 /// operand specifies a subvector insert that is suitable for input to
4815 /// insertion of 128 or 256-bit subvectors
4816 static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) {
4817 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4818 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4820 // The index should be aligned on a vecWidth-bit boundary.
4822 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4824 MVT VT = N->getSimpleValueType(0);
4825 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4826 bool Result = (Index * ElSize) % vecWidth == 0;
4831 bool X86::isVINSERT128Index(SDNode *N) {
4832 return isVINSERTIndex(N, 128);
4835 bool X86::isVINSERT256Index(SDNode *N) {
4836 return isVINSERTIndex(N, 256);
4839 bool X86::isVEXTRACT128Index(SDNode *N) {
4840 return isVEXTRACTIndex(N, 128);
4843 bool X86::isVEXTRACT256Index(SDNode *N) {
4844 return isVEXTRACTIndex(N, 256);
4847 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
4848 /// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions.
4849 /// Handles 128-bit and 256-bit.
4850 static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
4851 MVT VT = N->getSimpleValueType(0);
4853 assert((VT.getSizeInBits() >= 128) &&
4854 "Unsupported vector type for PSHUF/SHUFP");
4856 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate
4857 // independently on 128-bit lanes.
4858 unsigned NumElts = VT.getVectorNumElements();
4859 unsigned NumLanes = VT.getSizeInBits()/128;
4860 unsigned NumLaneElts = NumElts/NumLanes;
4862 assert((NumLaneElts == 2 || NumLaneElts == 4 || NumLaneElts == 8) &&
4863 "Only supports 2, 4 or 8 elements per lane");
4865 unsigned Shift = (NumLaneElts >= 4) ? 1 : 0;
4867 for (unsigned i = 0; i != NumElts; ++i) {
4868 int Elt = N->getMaskElt(i);
4869 if (Elt < 0) continue;
4870 Elt &= NumLaneElts - 1;
4871 unsigned ShAmt = (i << Shift) % 8;
4872 Mask |= Elt << ShAmt;
4878 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
4879 /// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction.
4880 static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) {
4881 MVT VT = N->getSimpleValueType(0);
4883 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4884 "Unsupported vector type for PSHUFHW");
4886 unsigned NumElts = VT.getVectorNumElements();
4889 for (unsigned l = 0; l != NumElts; l += 8) {
4890 // 8 nodes per lane, but we only care about the last 4.
4891 for (unsigned i = 0; i < 4; ++i) {
4892 int Elt = N->getMaskElt(l+i+4);
4893 if (Elt < 0) continue;
4894 Elt &= 0x3; // only 2-bits.
4895 Mask |= Elt << (i * 2);
4902 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
4903 /// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction.
4904 static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) {
4905 MVT VT = N->getSimpleValueType(0);
4907 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4908 "Unsupported vector type for PSHUFHW");
4910 unsigned NumElts = VT.getVectorNumElements();
4913 for (unsigned l = 0; l != NumElts; l += 8) {
4914 // 8 nodes per lane, but we only care about the first 4.
4915 for (unsigned i = 0; i < 4; ++i) {
4916 int Elt = N->getMaskElt(l+i);
4917 if (Elt < 0) continue;
4918 Elt &= 0x3; // only 2-bits
4919 Mask |= Elt << (i * 2);
4926 /// \brief Return the appropriate immediate to shuffle the specified
4927 /// VECTOR_SHUFFLE mask with the PALIGNR (if InterLane is false) or with
4928 /// VALIGN (if Interlane is true) instructions.
4929 static unsigned getShuffleAlignrImmediate(ShuffleVectorSDNode *SVOp,
4931 MVT VT = SVOp->getSimpleValueType(0);
4932 unsigned EltSize = InterLane ? 1 :
4933 VT.getVectorElementType().getSizeInBits() >> 3;
4935 unsigned NumElts = VT.getVectorNumElements();
4936 unsigned NumLanes = VT.is512BitVector() ? 1 : VT.getSizeInBits()/128;
4937 unsigned NumLaneElts = NumElts/NumLanes;
4941 for (i = 0; i != NumElts; ++i) {
4942 Val = SVOp->getMaskElt(i);
4946 if (Val >= (int)NumElts)
4947 Val -= NumElts - NumLaneElts;
4949 assert(Val - i > 0 && "PALIGNR imm should be positive");
4950 return (Val - i) * EltSize;
4953 /// \brief Return the appropriate immediate to shuffle the specified
4954 /// VECTOR_SHUFFLE mask with the PALIGNR instruction.
4955 static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) {
4956 return getShuffleAlignrImmediate(SVOp, false);
4959 /// \brief Return the appropriate immediate to shuffle the specified
4960 /// VECTOR_SHUFFLE mask with the VALIGN instruction.
4961 static unsigned getShuffleVALIGNImmediate(ShuffleVectorSDNode *SVOp) {
4962 return getShuffleAlignrImmediate(SVOp, true);
4966 static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) {
4967 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4968 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4969 llvm_unreachable("Illegal extract subvector for VEXTRACT");
4972 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4974 MVT VecVT = N->getOperand(0).getSimpleValueType();
4975 MVT ElVT = VecVT.getVectorElementType();
4977 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4978 return Index / NumElemsPerChunk;
4981 static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) {
4982 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4983 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4984 llvm_unreachable("Illegal insert subvector for VINSERT");
4987 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4989 MVT VecVT = N->getSimpleValueType(0);
4990 MVT ElVT = VecVT.getVectorElementType();
4992 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4993 return Index / NumElemsPerChunk;
4996 /// getExtractVEXTRACT128Immediate - Return the appropriate immediate
4997 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128
4998 /// and VINSERTI128 instructions.
4999 unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) {
5000 return getExtractVEXTRACTImmediate(N, 128);
5003 /// getExtractVEXTRACT256Immediate - Return the appropriate immediate
5004 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF64x4
5005 /// and VINSERTI64x4 instructions.
5006 unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) {
5007 return getExtractVEXTRACTImmediate(N, 256);
5010 /// getInsertVINSERT128Immediate - Return the appropriate immediate
5011 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128
5012 /// and VINSERTI128 instructions.
5013 unsigned X86::getInsertVINSERT128Immediate(SDNode *N) {
5014 return getInsertVINSERTImmediate(N, 128);
5017 /// getInsertVINSERT256Immediate - Return the appropriate immediate
5018 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF46x4
5019 /// and VINSERTI64x4 instructions.
5020 unsigned X86::getInsertVINSERT256Immediate(SDNode *N) {
5021 return getInsertVINSERTImmediate(N, 256);
5024 /// isZero - Returns true if Elt is a constant integer zero
5025 static bool isZero(SDValue V) {
5026 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
5027 return C && C->isNullValue();
5030 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
5032 bool X86::isZeroNode(SDValue Elt) {
5035 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Elt))
5036 return CFP->getValueAPF().isPosZero();
5040 /// ShouldXformToMOVHLPS - Return true if the node should be transformed to
5041 /// match movhlps. The lower half elements should come from upper half of
5042 /// V1 (and in order), and the upper half elements should come from the upper
5043 /// half of V2 (and in order).
5044 static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, MVT VT) {
5045 if (!VT.is128BitVector())
5047 if (VT.getVectorNumElements() != 4)
5049 for (unsigned i = 0, e = 2; i != e; ++i)
5050 if (!isUndefOrEqual(Mask[i], i+2))
5052 for (unsigned i = 2; i != 4; ++i)
5053 if (!isUndefOrEqual(Mask[i], i+4))
5058 /// isScalarLoadToVector - Returns true if the node is a scalar load that
5059 /// is promoted to a vector. It also returns the LoadSDNode by reference if
5061 static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = nullptr) {
5062 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR)
5064 N = N->getOperand(0).getNode();
5065 if (!ISD::isNON_EXTLoad(N))
5068 *LD = cast<LoadSDNode>(N);
5072 // Test whether the given value is a vector value which will be legalized
5074 static bool WillBeConstantPoolLoad(SDNode *N) {
5075 if (N->getOpcode() != ISD::BUILD_VECTOR)
5078 // Check for any non-constant elements.
5079 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
5080 switch (N->getOperand(i).getNode()->getOpcode()) {
5082 case ISD::ConstantFP:
5089 // Vectors of all-zeros and all-ones are materialized with special
5090 // instructions rather than being loaded.
5091 return !ISD::isBuildVectorAllZeros(N) &&
5092 !ISD::isBuildVectorAllOnes(N);
5095 /// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
5096 /// match movlp{s|d}. The lower half elements should come from lower half of
5097 /// V1 (and in order), and the upper half elements should come from the upper
5098 /// half of V2 (and in order). And since V1 will become the source of the
5099 /// MOVLP, it must be either a vector load or a scalar load to vector.
5100 static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
5101 ArrayRef<int> Mask, MVT VT) {
5102 if (!VT.is128BitVector())
5105 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
5107 // Is V2 is a vector load, don't do this transformation. We will try to use
5108 // load folding shufps op.
5109 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2))
5112 unsigned NumElems = VT.getVectorNumElements();
5114 if (NumElems != 2 && NumElems != 4)
5116 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
5117 if (!isUndefOrEqual(Mask[i], i))
5119 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
5120 if (!isUndefOrEqual(Mask[i], i+NumElems))
5125 /// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
5126 /// to an zero vector.
5127 /// FIXME: move to dag combiner / method on ShuffleVectorSDNode
5128 static bool isZeroShuffle(ShuffleVectorSDNode *N) {
5129 SDValue V1 = N->getOperand(0);
5130 SDValue V2 = N->getOperand(1);
5131 unsigned NumElems = N->getValueType(0).getVectorNumElements();
5132 for (unsigned i = 0; i != NumElems; ++i) {
5133 int Idx = N->getMaskElt(i);
5134 if (Idx >= (int)NumElems) {
5135 unsigned Opc = V2.getOpcode();
5136 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode()))
5138 if (Opc != ISD::BUILD_VECTOR ||
5139 !X86::isZeroNode(V2.getOperand(Idx-NumElems)))
5141 } else if (Idx >= 0) {
5142 unsigned Opc = V1.getOpcode();
5143 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode()))
5145 if (Opc != ISD::BUILD_VECTOR ||
5146 !X86::isZeroNode(V1.getOperand(Idx)))
5153 /// getZeroVector - Returns a vector of specified type with all zero elements.
5155 static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
5156 SelectionDAG &DAG, SDLoc dl) {
5157 assert(VT.isVector() && "Expected a vector type");
5159 // Always build SSE zero vectors as <4 x i32> bitcasted
5160 // to their dest type. This ensures they get CSE'd.
5162 if (VT.is128BitVector()) { // SSE
5163 if (Subtarget->hasSSE2()) { // SSE2
5164 SDValue Cst = DAG.getConstant(0, MVT::i32);
5165 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5167 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5168 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
5170 } else if (VT.is256BitVector()) { // AVX
5171 if (Subtarget->hasInt256()) { // AVX2
5172 SDValue Cst = DAG.getConstant(0, MVT::i32);
5173 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5174 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5176 // 256-bit logic and arithmetic instructions in AVX are all
5177 // floating-point, no support for integer ops. Emit fp zeroed vectors.
5178 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5179 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5180 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops);
5182 } else if (VT.is512BitVector()) { // AVX-512
5183 SDValue Cst = DAG.getConstant(0, MVT::i32);
5184 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
5185 Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5186 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
5187 } else if (VT.getScalarType() == MVT::i1) {
5188 assert(VT.getVectorNumElements() <= 16 && "Unexpected vector type");
5189 SDValue Cst = DAG.getConstant(0, MVT::i1);
5190 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
5191 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
5193 llvm_unreachable("Unexpected vector type");
5195 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5198 /// getOnesVector - Returns a vector of specified type with all bits set.
5199 /// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with
5200 /// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately.
5201 /// Then bitcast to their original type, ensuring they get CSE'd.
5202 static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG,
5204 assert(VT.isVector() && "Expected a vector type");
5206 SDValue Cst = DAG.getConstant(~0U, MVT::i32);
5208 if (VT.is256BitVector()) {
5209 if (HasInt256) { // AVX2
5210 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5211 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5213 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5214 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
5216 } else if (VT.is128BitVector()) {
5217 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5219 llvm_unreachable("Unexpected vector type");
5221 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5224 /// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
5225 /// that point to V2 points to its first element.
5226 static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) {
5227 for (unsigned i = 0; i != NumElems; ++i) {
5228 if (Mask[i] > (int)NumElems) {
5234 /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
5235 /// operation of specified width.
5236 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
5238 unsigned NumElems = VT.getVectorNumElements();
5239 SmallVector<int, 8> Mask;
5240 Mask.push_back(NumElems);
5241 for (unsigned i = 1; i != NumElems; ++i)
5243 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5246 /// getUnpackl - Returns a vector_shuffle node for an unpackl operation.
5247 static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5249 unsigned NumElems = VT.getVectorNumElements();
5250 SmallVector<int, 8> Mask;
5251 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
5253 Mask.push_back(i + NumElems);
5255 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5258 /// getUnpackh - Returns a vector_shuffle node for an unpackh operation.
5259 static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5261 unsigned NumElems = VT.getVectorNumElements();
5262 SmallVector<int, 8> Mask;
5263 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) {
5264 Mask.push_back(i + Half);
5265 Mask.push_back(i + NumElems + Half);
5267 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5270 // PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by
5271 // a generic shuffle instruction because the target has no such instructions.
5272 // Generate shuffles which repeat i16 and i8 several times until they can be
5273 // represented by v4f32 and then be manipulated by target suported shuffles.
5274 static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) {
5275 MVT VT = V.getSimpleValueType();
5276 int NumElems = VT.getVectorNumElements();
5279 while (NumElems > 4) {
5280 if (EltNo < NumElems/2) {
5281 V = getUnpackl(DAG, dl, VT, V, V);
5283 V = getUnpackh(DAG, dl, VT, V, V);
5284 EltNo -= NumElems/2;
5291 /// getLegalSplat - Generate a legal splat with supported x86 shuffles
5292 static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) {
5293 MVT VT = V.getSimpleValueType();
5296 if (VT.is128BitVector()) {
5297 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V);
5298 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
5299 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32),
5301 } else if (VT.is256BitVector()) {
5302 // To use VPERMILPS to splat scalars, the second half of indicies must
5303 // refer to the higher part, which is a duplication of the lower one,
5304 // because VPERMILPS can only handle in-lane permutations.
5305 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo,
5306 EltNo+4, EltNo+4, EltNo+4, EltNo+4 };
5308 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V);
5309 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32),
5312 llvm_unreachable("Vector size not supported");
5314 return DAG.getNode(ISD::BITCAST, dl, VT, V);
5317 /// PromoteSplat - Splat is promoted to target supported vector shuffles.
5318 static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
5319 MVT SrcVT = SV->getSimpleValueType(0);
5320 SDValue V1 = SV->getOperand(0);
5323 int EltNo = SV->getSplatIndex();
5324 int NumElems = SrcVT.getVectorNumElements();
5325 bool Is256BitVec = SrcVT.is256BitVector();
5327 assert(((SrcVT.is128BitVector() && NumElems > 4) || Is256BitVec) &&
5328 "Unknown how to promote splat for type");
5330 // Extract the 128-bit part containing the splat element and update
5331 // the splat element index when it refers to the higher register.
5333 V1 = Extract128BitVector(V1, EltNo, DAG, dl);
5334 if (EltNo >= NumElems/2)
5335 EltNo -= NumElems/2;
5338 // All i16 and i8 vector types can't be used directly by a generic shuffle
5339 // instruction because the target has no such instruction. Generate shuffles
5340 // which repeat i16 and i8 several times until they fit in i32, and then can
5341 // be manipulated by target suported shuffles.
5342 MVT EltVT = SrcVT.getVectorElementType();
5343 if (EltVT == MVT::i8 || EltVT == MVT::i16)
5344 V1 = PromoteSplati8i16(V1, DAG, EltNo);
5346 // Recreate the 256-bit vector and place the same 128-bit vector
5347 // into the low and high part. This is necessary because we want
5348 // to use VPERM* to shuffle the vectors
5350 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1);
5353 return getLegalSplat(DAG, V1, EltNo);
5356 /// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
5357 /// vector of zero or undef vector. This produces a shuffle where the low
5358 /// element of V2 is swizzled into the zero/undef vector, landing at element
5359 /// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
5360 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
5362 const X86Subtarget *Subtarget,
5363 SelectionDAG &DAG) {
5364 MVT VT = V2.getSimpleValueType();
5366 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
5367 unsigned NumElems = VT.getVectorNumElements();
5368 SmallVector<int, 16> MaskVec;
5369 for (unsigned i = 0; i != NumElems; ++i)
5370 // If this is the insertion idx, put the low elt of V2 here.
5371 MaskVec.push_back(i == Idx ? NumElems : i);
5372 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]);
5375 /// getTargetShuffleMask - Calculates the shuffle mask corresponding to the
5376 /// target specific opcode. Returns true if the Mask could be calculated. Sets
5377 /// IsUnary to true if only uses one source. Note that this will set IsUnary for
5378 /// shuffles which use a single input multiple times, and in those cases it will
5379 /// adjust the mask to only have indices within that single input.
5380 static bool getTargetShuffleMask(SDNode *N, MVT VT,
5381 SmallVectorImpl<int> &Mask, bool &IsUnary) {
5382 unsigned NumElems = VT.getVectorNumElements();
5386 bool IsFakeUnary = false;
5387 switch(N->getOpcode()) {
5388 case X86ISD::BLENDI:
5389 ImmN = N->getOperand(N->getNumOperands()-1);
5390 DecodeBLENDMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5393 ImmN = N->getOperand(N->getNumOperands()-1);
5394 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5395 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5397 case X86ISD::UNPCKH:
5398 DecodeUNPCKHMask(VT, Mask);
5399 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5401 case X86ISD::UNPCKL:
5402 DecodeUNPCKLMask(VT, Mask);
5403 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5405 case X86ISD::MOVHLPS:
5406 DecodeMOVHLPSMask(NumElems, Mask);
5407 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5409 case X86ISD::MOVLHPS:
5410 DecodeMOVLHPSMask(NumElems, Mask);
5411 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5413 case X86ISD::PALIGNR:
5414 ImmN = N->getOperand(N->getNumOperands()-1);
5415 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5417 case X86ISD::PSHUFD:
5418 case X86ISD::VPERMILPI:
5419 ImmN = N->getOperand(N->getNumOperands()-1);
5420 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5423 case X86ISD::PSHUFHW:
5424 ImmN = N->getOperand(N->getNumOperands()-1);
5425 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5428 case X86ISD::PSHUFLW:
5429 ImmN = N->getOperand(N->getNumOperands()-1);
5430 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5433 case X86ISD::PSHUFB: {
5435 SDValue MaskNode = N->getOperand(1);
5436 while (MaskNode->getOpcode() == ISD::BITCAST)
5437 MaskNode = MaskNode->getOperand(0);
5439 if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
5440 // If we have a build-vector, then things are easy.
5441 EVT VT = MaskNode.getValueType();
5442 assert(VT.isVector() &&
5443 "Can't produce a non-vector with a build_vector!");
5444 if (!VT.isInteger())
5447 int NumBytesPerElement = VT.getVectorElementType().getSizeInBits() / 8;
5449 SmallVector<uint64_t, 32> RawMask;
5450 for (int i = 0, e = MaskNode->getNumOperands(); i < e; ++i) {
5451 SDValue Op = MaskNode->getOperand(i);
5452 if (Op->getOpcode() == ISD::UNDEF) {
5453 RawMask.push_back((uint64_t)SM_SentinelUndef);
5456 auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
5459 APInt MaskElement = CN->getAPIntValue();
5461 // We now have to decode the element which could be any integer size and
5462 // extract each byte of it.
5463 for (int j = 0; j < NumBytesPerElement; ++j) {
5464 // Note that this is x86 and so always little endian: the low byte is
5465 // the first byte of the mask.
5466 RawMask.push_back(MaskElement.getLoBits(8).getZExtValue());
5467 MaskElement = MaskElement.lshr(8);
5470 DecodePSHUFBMask(RawMask, Mask);
5474 auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
5478 SDValue Ptr = MaskLoad->getBasePtr();
5479 if (Ptr->getOpcode() == X86ISD::Wrapper)
5480 Ptr = Ptr->getOperand(0);
5482 auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
5483 if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
5486 if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
5487 DecodePSHUFBMask(C, Mask);
5495 case X86ISD::VPERMI:
5496 ImmN = N->getOperand(N->getNumOperands()-1);
5497 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5502 DecodeScalarMoveMask(VT, /* IsLoad */ false, Mask);
5504 case X86ISD::VPERM2X128:
5505 ImmN = N->getOperand(N->getNumOperands()-1);
5506 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5507 if (Mask.empty()) return false;
5509 case X86ISD::MOVSLDUP:
5510 DecodeMOVSLDUPMask(VT, Mask);
5513 case X86ISD::MOVSHDUP:
5514 DecodeMOVSHDUPMask(VT, Mask);
5517 case X86ISD::MOVDDUP:
5518 DecodeMOVDDUPMask(VT, Mask);
5521 case X86ISD::MOVLHPD:
5522 case X86ISD::MOVLPD:
5523 case X86ISD::MOVLPS:
5524 // Not yet implemented
5526 default: llvm_unreachable("unknown target shuffle node");
5529 // If we have a fake unary shuffle, the shuffle mask is spread across two
5530 // inputs that are actually the same node. Re-map the mask to always point
5531 // into the first input.
5534 if (M >= (int)Mask.size())
5540 /// getShuffleScalarElt - Returns the scalar element that will make up the ith
5541 /// element of the result of the vector shuffle.
5542 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
5545 return SDValue(); // Limit search depth.
5547 SDValue V = SDValue(N, 0);
5548 EVT VT = V.getValueType();
5549 unsigned Opcode = V.getOpcode();
5551 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
5552 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
5553 int Elt = SV->getMaskElt(Index);
5556 return DAG.getUNDEF(VT.getVectorElementType());
5558 unsigned NumElems = VT.getVectorNumElements();
5559 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
5560 : SV->getOperand(1);
5561 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
5564 // Recurse into target specific vector shuffles to find scalars.
5565 if (isTargetShuffle(Opcode)) {
5566 MVT ShufVT = V.getSimpleValueType();
5567 unsigned NumElems = ShufVT.getVectorNumElements();
5568 SmallVector<int, 16> ShuffleMask;
5571 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary))
5574 int Elt = ShuffleMask[Index];
5576 return DAG.getUNDEF(ShufVT.getVectorElementType());
5578 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0)
5580 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
5584 // Actual nodes that may contain scalar elements
5585 if (Opcode == ISD::BITCAST) {
5586 V = V.getOperand(0);
5587 EVT SrcVT = V.getValueType();
5588 unsigned NumElems = VT.getVectorNumElements();
5590 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
5594 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
5595 return (Index == 0) ? V.getOperand(0)
5596 : DAG.getUNDEF(VT.getVectorElementType());
5598 if (V.getOpcode() == ISD::BUILD_VECTOR)
5599 return V.getOperand(Index);
5604 /// getNumOfConsecutiveZeros - Return the number of elements of a vector
5605 /// shuffle operation which come from a consecutively from a zero. The
5606 /// search can start in two different directions, from left or right.
5607 /// We count undefs as zeros until PreferredNum is reached.
5608 static unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp,
5609 unsigned NumElems, bool ZerosFromLeft,
5611 unsigned PreferredNum = -1U) {
5612 unsigned NumZeros = 0;
5613 for (unsigned i = 0; i != NumElems; ++i) {
5614 unsigned Index = ZerosFromLeft ? i : NumElems - i - 1;
5615 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0);
5619 if (X86::isZeroNode(Elt))
5621 else if (Elt.getOpcode() == ISD::UNDEF) // Undef as zero up to PreferredNum.
5622 NumZeros = std::min(NumZeros + 1, PreferredNum);
5630 /// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE)
5631 /// correspond consecutively to elements from one of the vector operands,
5632 /// starting from its index OpIdx. Also tell OpNum which source vector operand.
5634 bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp,
5635 unsigned MaskI, unsigned MaskE, unsigned OpIdx,
5636 unsigned NumElems, unsigned &OpNum) {
5637 bool SeenV1 = false;
5638 bool SeenV2 = false;
5640 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) {
5641 int Idx = SVOp->getMaskElt(i);
5642 // Ignore undef indicies
5646 if (Idx < (int)NumElems)
5651 // Only accept consecutive elements from the same vector
5652 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2))
5656 OpNum = SeenV1 ? 0 : 1;
5660 /// isVectorShiftRight - Returns true if the shuffle can be implemented as a
5661 /// logical left shift of a vector.
5662 static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5663 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5665 SVOp->getSimpleValueType(0).getVectorNumElements();
5666 unsigned NumZeros = getNumOfConsecutiveZeros(
5667 SVOp, NumElems, false /* check zeros from right */, DAG,
5668 SVOp->getMaskElt(0));
5674 // Considering the elements in the mask that are not consecutive zeros,
5675 // check if they consecutively come from only one of the source vectors.
5677 // V1 = {X, A, B, C} 0
5679 // vector_shuffle V1, V2 <1, 2, 3, X>
5681 if (!isShuffleMaskConsecutive(SVOp,
5682 0, // Mask Start Index
5683 NumElems-NumZeros, // Mask End Index(exclusive)
5684 NumZeros, // Where to start looking in the src vector
5685 NumElems, // Number of elements in vector
5686 OpSrc)) // Which source operand ?
5691 ShVal = SVOp->getOperand(OpSrc);
5695 /// isVectorShiftLeft - Returns true if the shuffle can be implemented as a
5696 /// logical left shift of a vector.
5697 static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5698 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5700 SVOp->getSimpleValueType(0).getVectorNumElements();
5701 unsigned NumZeros = getNumOfConsecutiveZeros(
5702 SVOp, NumElems, true /* check zeros from left */, DAG,
5703 NumElems - SVOp->getMaskElt(NumElems - 1) - 1);
5709 // Considering the elements in the mask that are not consecutive zeros,
5710 // check if they consecutively come from only one of the source vectors.
5712 // 0 { A, B, X, X } = V2
5714 // vector_shuffle V1, V2 <X, X, 4, 5>
5716 if (!isShuffleMaskConsecutive(SVOp,
5717 NumZeros, // Mask Start Index
5718 NumElems, // Mask End Index(exclusive)
5719 0, // Where to start looking in the src vector
5720 NumElems, // Number of elements in vector
5721 OpSrc)) // Which source operand ?
5726 ShVal = SVOp->getOperand(OpSrc);
5730 /// isVectorShift - Returns true if the shuffle can be implemented as a
5731 /// logical left or right shift of a vector.
5732 static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5733 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5734 // Although the logic below support any bitwidth size, there are no
5735 // shift instructions which handle more than 128-bit vectors.
5736 if (!SVOp->getSimpleValueType(0).is128BitVector())
5739 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) ||
5740 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt))
5746 /// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
5748 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
5749 unsigned NumNonZero, unsigned NumZero,
5751 const X86Subtarget* Subtarget,
5752 const TargetLowering &TLI) {
5759 for (unsigned i = 0; i < 16; ++i) {
5760 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
5761 if (ThisIsNonZero && First) {
5763 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5765 V = DAG.getUNDEF(MVT::v8i16);
5770 SDValue ThisElt, LastElt;
5771 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
5772 if (LastIsNonZero) {
5773 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
5774 MVT::i16, Op.getOperand(i-1));
5776 if (ThisIsNonZero) {
5777 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
5778 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
5779 ThisElt, DAG.getConstant(8, MVT::i8));
5781 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
5785 if (ThisElt.getNode())
5786 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
5787 DAG.getIntPtrConstant(i/2));
5791 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V);
5794 /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
5796 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
5797 unsigned NumNonZero, unsigned NumZero,
5799 const X86Subtarget* Subtarget,
5800 const TargetLowering &TLI) {
5807 for (unsigned i = 0; i < 8; ++i) {
5808 bool isNonZero = (NonZeros & (1 << i)) != 0;
5812 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5814 V = DAG.getUNDEF(MVT::v8i16);
5817 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
5818 MVT::v8i16, V, Op.getOperand(i),
5819 DAG.getIntPtrConstant(i));
5826 /// LowerBuildVectorv4x32 - Custom lower build_vector of v4i32 or v4f32.
5827 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
5828 const X86Subtarget *Subtarget,
5829 const TargetLowering &TLI) {
5830 // Find all zeroable elements.
5832 for (int i=0; i < 4; ++i) {
5833 SDValue Elt = Op->getOperand(i);
5834 Zeroable[i] = (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt));
5836 assert(std::count_if(&Zeroable[0], &Zeroable[4],
5837 [](bool M) { return !M; }) > 1 &&
5838 "We expect at least two non-zero elements!");
5840 // We only know how to deal with build_vector nodes where elements are either
5841 // zeroable or extract_vector_elt with constant index.
5842 SDValue FirstNonZero;
5843 unsigned FirstNonZeroIdx;
5844 for (unsigned i=0; i < 4; ++i) {
5847 SDValue Elt = Op->getOperand(i);
5848 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5849 !isa<ConstantSDNode>(Elt.getOperand(1)))
5851 // Make sure that this node is extracting from a 128-bit vector.
5852 MVT VT = Elt.getOperand(0).getSimpleValueType();
5853 if (!VT.is128BitVector())
5855 if (!FirstNonZero.getNode()) {
5857 FirstNonZeroIdx = i;
5861 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
5862 SDValue V1 = FirstNonZero.getOperand(0);
5863 MVT VT = V1.getSimpleValueType();
5865 // See if this build_vector can be lowered as a blend with zero.
5867 unsigned EltMaskIdx, EltIdx;
5869 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
5870 if (Zeroable[EltIdx]) {
5871 // The zero vector will be on the right hand side.
5872 Mask[EltIdx] = EltIdx+4;
5876 Elt = Op->getOperand(EltIdx);
5877 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
5878 EltMaskIdx = cast<ConstantSDNode>(Elt.getOperand(1))->getZExtValue();
5879 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
5881 Mask[EltIdx] = EltIdx;
5885 // Let the shuffle legalizer deal with blend operations.
5886 SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
5887 if (V1.getSimpleValueType() != VT)
5888 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), VT, V1);
5889 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, &Mask[0]);
5892 // See if we can lower this build_vector to a INSERTPS.
5893 if (!Subtarget->hasSSE41())
5896 SDValue V2 = Elt.getOperand(0);
5897 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
5900 bool CanFold = true;
5901 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
5905 SDValue Current = Op->getOperand(i);
5906 SDValue SrcVector = Current->getOperand(0);
5909 CanFold = SrcVector == V1 &&
5910 cast<ConstantSDNode>(Current.getOperand(1))->getZExtValue() == i;
5916 assert(V1.getNode() && "Expected at least two non-zero elements!");
5917 if (V1.getSimpleValueType() != MVT::v4f32)
5918 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), MVT::v4f32, V1);
5919 if (V2.getSimpleValueType() != MVT::v4f32)
5920 V2 = DAG.getNode(ISD::BITCAST, SDLoc(V2), MVT::v4f32, V2);
5922 // Ok, we can emit an INSERTPS instruction.
5924 for (int i = 0; i < 4; ++i)
5928 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
5929 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
5930 SDValue Result = DAG.getNode(X86ISD::INSERTPS, SDLoc(Op), MVT::v4f32, V1, V2,
5931 DAG.getIntPtrConstant(InsertPSMask));
5932 return DAG.getNode(ISD::BITCAST, SDLoc(Op), VT, Result);
5935 /// Return a vector logical shift node.
5936 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
5937 unsigned NumBits, SelectionDAG &DAG,
5938 const TargetLowering &TLI, SDLoc dl) {
5939 assert(VT.is128BitVector() && "Unknown type for VShift");
5940 MVT ShVT = MVT::v2i64;
5941 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
5942 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
5943 MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(SrcOp.getValueType());
5944 assert(NumBits % 8 == 0 && "Only support byte sized shifts");
5945 SDValue ShiftVal = DAG.getConstant(NumBits/8, ScalarShiftTy);
5946 return DAG.getNode(ISD::BITCAST, dl, VT,
5947 DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
5951 LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
5953 // Check if the scalar load can be widened into a vector load. And if
5954 // the address is "base + cst" see if the cst can be "absorbed" into
5955 // the shuffle mask.
5956 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
5957 SDValue Ptr = LD->getBasePtr();
5958 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
5960 EVT PVT = LD->getValueType(0);
5961 if (PVT != MVT::i32 && PVT != MVT::f32)
5966 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
5967 FI = FINode->getIndex();
5969 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
5970 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
5971 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5972 Offset = Ptr.getConstantOperandVal(1);
5973 Ptr = Ptr.getOperand(0);
5978 // FIXME: 256-bit vector instructions don't require a strict alignment,
5979 // improve this code to support it better.
5980 unsigned RequiredAlign = VT.getSizeInBits()/8;
5981 SDValue Chain = LD->getChain();
5982 // Make sure the stack object alignment is at least 16 or 32.
5983 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
5984 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
5985 if (MFI->isFixedObjectIndex(FI)) {
5986 // Can't change the alignment. FIXME: It's possible to compute
5987 // the exact stack offset and reference FI + adjust offset instead.
5988 // If someone *really* cares about this. That's the way to implement it.
5991 MFI->setObjectAlignment(FI, RequiredAlign);
5995 // (Offset % 16 or 32) must be multiple of 4. Then address is then
5996 // Ptr + (Offset & ~15).
5999 if ((Offset % RequiredAlign) & 3)
6001 int64_t StartOffset = Offset & ~(RequiredAlign-1);
6003 Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(),
6004 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType()));
6006 int EltNo = (Offset - StartOffset) >> 2;
6007 unsigned NumElems = VT.getVectorNumElements();
6009 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
6010 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
6011 LD->getPointerInfo().getWithOffset(StartOffset),
6012 false, false, false, 0);
6014 SmallVector<int, 8> Mask;
6015 for (unsigned i = 0; i != NumElems; ++i)
6016 Mask.push_back(EltNo);
6018 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
6024 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
6025 /// elements can be replaced by a single large load which has the same value as
6026 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
6028 /// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
6030 /// FIXME: we'd also like to handle the case where the last elements are zero
6031 /// rather than undef via VZEXT_LOAD, but we do not detect that case today.
6032 /// There's even a handy isZeroNode for that purpose.
6033 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
6034 SDLoc &DL, SelectionDAG &DAG,
6035 bool isAfterLegalize) {
6036 unsigned NumElems = Elts.size();
6038 LoadSDNode *LDBase = nullptr;
6039 unsigned LastLoadedElt = -1U;
6041 // For each element in the initializer, see if we've found a load or an undef.
6042 // If we don't find an initial load element, or later load elements are
6043 // non-consecutive, bail out.
6044 for (unsigned i = 0; i < NumElems; ++i) {
6045 SDValue Elt = Elts[i];
6046 // Look through a bitcast.
6047 if (Elt.getNode() && Elt.getOpcode() == ISD::BITCAST)
6048 Elt = Elt.getOperand(0);
6049 if (!Elt.getNode() ||
6050 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
6053 if (Elt.getNode()->getOpcode() == ISD::UNDEF)
6055 LDBase = cast<LoadSDNode>(Elt.getNode());
6059 if (Elt.getOpcode() == ISD::UNDEF)
6062 LoadSDNode *LD = cast<LoadSDNode>(Elt);
6063 EVT LdVT = Elt.getValueType();
6064 // Each loaded element must be the correct fractional portion of the
6065 // requested vector load.
6066 if (LdVT.getSizeInBits() != VT.getSizeInBits() / NumElems)
6068 if (!DAG.isConsecutiveLoad(LD, LDBase, LdVT.getSizeInBits() / 8, i))
6073 // If we have found an entire vector of loads and undefs, then return a large
6074 // load of the entire vector width starting at the base pointer. If we found
6075 // consecutive loads for the low half, generate a vzext_load node.
6076 if (LastLoadedElt == NumElems - 1) {
6077 assert(LDBase && "Did not find base load for merging consecutive loads");
6078 EVT EltVT = LDBase->getValueType(0);
6079 // Ensure that the input vector size for the merged loads matches the
6080 // cumulative size of the input elements.
6081 if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems)
6084 if (isAfterLegalize &&
6085 !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
6088 SDValue NewLd = SDValue();
6090 NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
6091 LDBase->getPointerInfo(), LDBase->isVolatile(),
6092 LDBase->isNonTemporal(), LDBase->isInvariant(),
6093 LDBase->getAlignment());
6095 if (LDBase->hasAnyUseOfValue(1)) {
6096 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6098 SDValue(NewLd.getNode(), 1));
6099 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6100 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6101 SDValue(NewLd.getNode(), 1));
6107 //TODO: The code below fires only for for loading the low v2i32 / v2f32
6108 //of a v4i32 / v4f32. It's probably worth generalizing.
6109 EVT EltVT = VT.getVectorElementType();
6110 if (NumElems == 4 && LastLoadedElt == 1 && (EltVT.getSizeInBits() == 32) &&
6111 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
6112 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
6113 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
6115 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::i64,
6116 LDBase->getPointerInfo(),
6117 LDBase->getAlignment(),
6118 false/*isVolatile*/, true/*ReadMem*/,
6121 // Make sure the newly-created LOAD is in the same position as LDBase in
6122 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and
6123 // update uses of LDBase's output chain to use the TokenFactor.
6124 if (LDBase->hasAnyUseOfValue(1)) {
6125 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6126 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1));
6127 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6128 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6129 SDValue(ResNode.getNode(), 1));
6132 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode);
6137 /// LowerVectorBroadcast - Attempt to use the vbroadcast instruction
6138 /// to generate a splat value for the following cases:
6139 /// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
6140 /// 2. A splat shuffle which uses a scalar_to_vector node which comes from
6141 /// a scalar load, or a constant.
6142 /// The VBROADCAST node is returned when a pattern is found,
6143 /// or SDValue() otherwise.
6144 static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
6145 SelectionDAG &DAG) {
6146 // VBROADCAST requires AVX.
6147 // TODO: Splats could be generated for non-AVX CPUs using SSE
6148 // instructions, but there's less potential gain for only 128-bit vectors.
6149 if (!Subtarget->hasAVX())
6152 MVT VT = Op.getSimpleValueType();
6155 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
6156 "Unsupported vector type for broadcast.");
6161 switch (Op.getOpcode()) {
6163 // Unknown pattern found.
6166 case ISD::BUILD_VECTOR: {
6167 auto *BVOp = cast<BuildVectorSDNode>(Op.getNode());
6168 BitVector UndefElements;
6169 SDValue Splat = BVOp->getSplatValue(&UndefElements);
6171 // We need a splat of a single value to use broadcast, and it doesn't
6172 // make any sense if the value is only in one element of the vector.
6173 if (!Splat || (VT.getVectorNumElements() - UndefElements.count()) <= 1)
6177 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6178 Ld.getOpcode() == ISD::ConstantFP);
6180 // Make sure that all of the users of a non-constant load are from the
6181 // BUILD_VECTOR node.
6182 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
6187 case ISD::VECTOR_SHUFFLE: {
6188 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
6190 // Shuffles must have a splat mask where the first element is
6192 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0)
6195 SDValue Sc = Op.getOperand(0);
6196 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
6197 Sc.getOpcode() != ISD::BUILD_VECTOR) {
6199 if (!Subtarget->hasInt256())
6202 // Use the register form of the broadcast instruction available on AVX2.
6203 if (VT.getSizeInBits() >= 256)
6204 Sc = Extract128BitVector(Sc, 0, DAG, dl);
6205 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
6208 Ld = Sc.getOperand(0);
6209 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6210 Ld.getOpcode() == ISD::ConstantFP);
6212 // The scalar_to_vector node and the suspected
6213 // load node must have exactly one user.
6214 // Constants may have multiple users.
6216 // AVX-512 has register version of the broadcast
6217 bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() &&
6218 Ld.getValueType().getSizeInBits() >= 32;
6219 if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
6226 unsigned ScalarSize = Ld.getValueType().getSizeInBits();
6227 bool IsGE256 = (VT.getSizeInBits() >= 256);
6229 // When optimizing for size, generate up to 5 extra bytes for a broadcast
6230 // instruction to save 8 or more bytes of constant pool data.
6231 // TODO: If multiple splats are generated to load the same constant,
6232 // it may be detrimental to overall size. There needs to be a way to detect
6233 // that condition to know if this is truly a size win.
6234 const Function *F = DAG.getMachineFunction().getFunction();
6235 bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize);
6237 // Handle broadcasting a single constant scalar from the constant pool
6239 // On Sandybridge (no AVX2), it is still better to load a constant vector
6240 // from the constant pool and not to broadcast it from a scalar.
6241 // But override that restriction when optimizing for size.
6242 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
6243 if (ConstSplatVal && (Subtarget->hasAVX2() || OptForSize)) {
6244 EVT CVT = Ld.getValueType();
6245 assert(!CVT.isVector() && "Must not broadcast a vector type");
6247 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
6248 // For size optimization, also splat v2f64 and v2i64, and for size opt
6249 // with AVX2, also splat i8 and i16.
6250 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
6251 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6252 (OptForSize && (ScalarSize == 64 || Subtarget->hasAVX2()))) {
6253 const Constant *C = nullptr;
6254 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
6255 C = CI->getConstantIntValue();
6256 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
6257 C = CF->getConstantFPValue();
6259 assert(C && "Invalid constant type");
6261 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6262 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
6263 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
6264 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP,
6265 MachinePointerInfo::getConstantPool(),
6266 false, false, false, Alignment);
6268 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6272 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
6274 // Handle AVX2 in-register broadcasts.
6275 if (!IsLoad && Subtarget->hasInt256() &&
6276 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
6277 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6279 // The scalar source must be a normal load.
6283 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6284 (Subtarget->hasVLX() && ScalarSize == 64))
6285 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6287 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
6288 // double since there is no vbroadcastsd xmm
6289 if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) {
6290 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
6291 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6294 // Unsupported broadcast.
6298 /// \brief For an EXTRACT_VECTOR_ELT with a constant index return the real
6299 /// underlying vector and index.
6301 /// Modifies \p ExtractedFromVec to the real vector and returns the real
6303 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
6305 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
6306 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
6309 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
6311 // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
6313 // (extract_vector_elt (vector_shuffle<2,u,u,u>
6314 // (extract_subvector (v8f32 %vreg0), Constant<4>),
6317 // In this case the vector is the extract_subvector expression and the index
6318 // is 2, as specified by the shuffle.
6319 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
6320 SDValue ShuffleVec = SVOp->getOperand(0);
6321 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
6322 assert(ShuffleVecVT.getVectorElementType() ==
6323 ExtractedFromVec.getSimpleValueType().getVectorElementType());
6325 int ShuffleIdx = SVOp->getMaskElt(Idx);
6326 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
6327 ExtractedFromVec = ShuffleVec;
6333 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
6334 MVT VT = Op.getSimpleValueType();
6336 // Skip if insert_vec_elt is not supported.
6337 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6338 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
6342 unsigned NumElems = Op.getNumOperands();
6346 SmallVector<unsigned, 4> InsertIndices;
6347 SmallVector<int, 8> Mask(NumElems, -1);
6349 for (unsigned i = 0; i != NumElems; ++i) {
6350 unsigned Opc = Op.getOperand(i).getOpcode();
6352 if (Opc == ISD::UNDEF)
6355 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
6356 // Quit if more than 1 elements need inserting.
6357 if (InsertIndices.size() > 1)
6360 InsertIndices.push_back(i);
6364 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
6365 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
6366 // Quit if non-constant index.
6367 if (!isa<ConstantSDNode>(ExtIdx))
6369 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
6371 // Quit if extracted from vector of different type.
6372 if (ExtractedFromVec.getValueType() != VT)
6375 if (!VecIn1.getNode())
6376 VecIn1 = ExtractedFromVec;
6377 else if (VecIn1 != ExtractedFromVec) {
6378 if (!VecIn2.getNode())
6379 VecIn2 = ExtractedFromVec;
6380 else if (VecIn2 != ExtractedFromVec)
6381 // Quit if more than 2 vectors to shuffle
6385 if (ExtractedFromVec == VecIn1)
6387 else if (ExtractedFromVec == VecIn2)
6388 Mask[i] = Idx + NumElems;
6391 if (!VecIn1.getNode())
6394 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
6395 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]);
6396 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
6397 unsigned Idx = InsertIndices[i];
6398 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
6399 DAG.getIntPtrConstant(Idx));
6405 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
6407 X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
6409 MVT VT = Op.getSimpleValueType();
6410 assert((VT.getVectorElementType() == MVT::i1) && (VT.getSizeInBits() <= 16) &&
6411 "Unexpected type in LowerBUILD_VECTORvXi1!");
6414 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6415 SDValue Cst = DAG.getTargetConstant(0, MVT::i1);
6416 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6417 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6420 if (ISD::isBuildVectorAllOnes(Op.getNode())) {
6421 SDValue Cst = DAG.getTargetConstant(1, MVT::i1);
6422 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6423 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6426 bool AllContants = true;
6427 uint64_t Immediate = 0;
6428 int NonConstIdx = -1;
6429 bool IsSplat = true;
6430 unsigned NumNonConsts = 0;
6431 unsigned NumConsts = 0;
6432 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
6433 SDValue In = Op.getOperand(idx);
6434 if (In.getOpcode() == ISD::UNDEF)
6436 if (!isa<ConstantSDNode>(In)) {
6437 AllContants = false;
6442 if (cast<ConstantSDNode>(In)->getZExtValue())
6443 Immediate |= (1ULL << idx);
6445 if (In != Op.getOperand(0))
6450 SDValue FullMask = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1,
6451 DAG.getConstant(Immediate, MVT::i16));
6452 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, FullMask,
6453 DAG.getIntPtrConstant(0));
6456 if (NumNonConsts == 1 && NonConstIdx != 0) {
6459 SDValue VecAsImm = DAG.getConstant(Immediate,
6460 MVT::getIntegerVT(VT.getSizeInBits()));
6461 DstVec = DAG.getNode(ISD::BITCAST, dl, VT, VecAsImm);
6464 DstVec = DAG.getUNDEF(VT);
6465 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
6466 Op.getOperand(NonConstIdx),
6467 DAG.getIntPtrConstant(NonConstIdx));
6469 if (!IsSplat && (NonConstIdx != 0))
6470 llvm_unreachable("Unsupported BUILD_VECTOR operation");
6471 MVT SelectVT = (VT == MVT::v16i1)? MVT::i16 : MVT::i8;
6474 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6475 DAG.getConstant(-1, SelectVT),
6476 DAG.getConstant(0, SelectVT));
6478 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6479 DAG.getConstant((Immediate | 1), SelectVT),
6480 DAG.getConstant(Immediate, SelectVT));
6481 return DAG.getNode(ISD::BITCAST, dl, VT, Select);
6484 /// \brief Return true if \p N implements a horizontal binop and return the
6485 /// operands for the horizontal binop into V0 and V1.
6487 /// This is a helper function of PerformBUILD_VECTORCombine.
6488 /// This function checks that the build_vector \p N in input implements a
6489 /// horizontal operation. Parameter \p Opcode defines the kind of horizontal
6490 /// operation to match.
6491 /// For example, if \p Opcode is equal to ISD::ADD, then this function
6492 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
6493 /// is equal to ISD::SUB, then this function checks if this is a horizontal
6496 /// This function only analyzes elements of \p N whose indices are
6497 /// in range [BaseIdx, LastIdx).
6498 static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
6500 unsigned BaseIdx, unsigned LastIdx,
6501 SDValue &V0, SDValue &V1) {
6502 EVT VT = N->getValueType(0);
6504 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
6505 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
6506 "Invalid Vector in input!");
6508 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
6509 bool CanFold = true;
6510 unsigned ExpectedVExtractIdx = BaseIdx;
6511 unsigned NumElts = LastIdx - BaseIdx;
6512 V0 = DAG.getUNDEF(VT);
6513 V1 = DAG.getUNDEF(VT);
6515 // Check if N implements a horizontal binop.
6516 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
6517 SDValue Op = N->getOperand(i + BaseIdx);
6520 if (Op->getOpcode() == ISD::UNDEF) {
6521 // Update the expected vector extract index.
6522 if (i * 2 == NumElts)
6523 ExpectedVExtractIdx = BaseIdx;
6524 ExpectedVExtractIdx += 2;
6528 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
6533 SDValue Op0 = Op.getOperand(0);
6534 SDValue Op1 = Op.getOperand(1);
6536 // Try to match the following pattern:
6537 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
6538 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6539 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6540 Op0.getOperand(0) == Op1.getOperand(0) &&
6541 isa<ConstantSDNode>(Op0.getOperand(1)) &&
6542 isa<ConstantSDNode>(Op1.getOperand(1)));
6546 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6547 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
6549 if (i * 2 < NumElts) {
6550 if (V0.getOpcode() == ISD::UNDEF)
6551 V0 = Op0.getOperand(0);
6553 if (V1.getOpcode() == ISD::UNDEF)
6554 V1 = Op0.getOperand(0);
6555 if (i * 2 == NumElts)
6556 ExpectedVExtractIdx = BaseIdx;
6559 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
6560 if (I0 == ExpectedVExtractIdx)
6561 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
6562 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
6563 // Try to match the following dag sequence:
6564 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
6565 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
6569 ExpectedVExtractIdx += 2;
6575 /// \brief Emit a sequence of two 128-bit horizontal add/sub followed by
6576 /// a concat_vector.
6578 /// This is a helper function of PerformBUILD_VECTORCombine.
6579 /// This function expects two 256-bit vectors called V0 and V1.
6580 /// At first, each vector is split into two separate 128-bit vectors.
6581 /// Then, the resulting 128-bit vectors are used to implement two
6582 /// horizontal binary operations.
6584 /// The kind of horizontal binary operation is defined by \p X86Opcode.
6586 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
6587 /// the two new horizontal binop.
6588 /// When Mode is set, the first horizontal binop dag node would take as input
6589 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
6590 /// horizontal binop dag node would take as input the lower 128-bit of V1
6591 /// and the upper 128-bit of V1.
6593 /// HADD V0_LO, V0_HI
6594 /// HADD V1_LO, V1_HI
6596 /// Otherwise, the first horizontal binop dag node takes as input the lower
6597 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
6598 /// dag node takes the the upper 128-bit of V0 and the upper 128-bit of V1.
6600 /// HADD V0_LO, V1_LO
6601 /// HADD V0_HI, V1_HI
6603 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
6604 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
6605 /// the upper 128-bits of the result.
6606 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
6607 SDLoc DL, SelectionDAG &DAG,
6608 unsigned X86Opcode, bool Mode,
6609 bool isUndefLO, bool isUndefHI) {
6610 EVT VT = V0.getValueType();
6611 assert(VT.is256BitVector() && VT == V1.getValueType() &&
6612 "Invalid nodes in input!");
6614 unsigned NumElts = VT.getVectorNumElements();
6615 SDValue V0_LO = Extract128BitVector(V0, 0, DAG, DL);
6616 SDValue V0_HI = Extract128BitVector(V0, NumElts/2, DAG, DL);
6617 SDValue V1_LO = Extract128BitVector(V1, 0, DAG, DL);
6618 SDValue V1_HI = Extract128BitVector(V1, NumElts/2, DAG, DL);
6619 EVT NewVT = V0_LO.getValueType();
6621 SDValue LO = DAG.getUNDEF(NewVT);
6622 SDValue HI = DAG.getUNDEF(NewVT);
6625 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6626 if (!isUndefLO && V0->getOpcode() != ISD::UNDEF)
6627 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
6628 if (!isUndefHI && V1->getOpcode() != ISD::UNDEF)
6629 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
6631 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6632 if (!isUndefLO && (V0_LO->getOpcode() != ISD::UNDEF ||
6633 V1_LO->getOpcode() != ISD::UNDEF))
6634 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
6636 if (!isUndefHI && (V0_HI->getOpcode() != ISD::UNDEF ||
6637 V1_HI->getOpcode() != ISD::UNDEF))
6638 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
6641 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
6644 /// \brief Try to fold a build_vector that performs an 'addsub' into the
6645 /// sequence of 'vadd + vsub + blendi'.
6646 static SDValue matchAddSub(const BuildVectorSDNode *BV, SelectionDAG &DAG,
6647 const X86Subtarget *Subtarget) {
6649 EVT VT = BV->getValueType(0);
6650 unsigned NumElts = VT.getVectorNumElements();
6651 SDValue InVec0 = DAG.getUNDEF(VT);
6652 SDValue InVec1 = DAG.getUNDEF(VT);
6654 assert((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
6655 VT == MVT::v2f64) && "build_vector with an invalid type found!");
6657 // Odd-numbered elements in the input build vector are obtained from
6658 // adding two integer/float elements.
6659 // Even-numbered elements in the input build vector are obtained from
6660 // subtracting two integer/float elements.
6661 unsigned ExpectedOpcode = ISD::FSUB;
6662 unsigned NextExpectedOpcode = ISD::FADD;
6663 bool AddFound = false;
6664 bool SubFound = false;
6666 for (unsigned i = 0, e = NumElts; i != e; ++i) {
6667 SDValue Op = BV->getOperand(i);
6669 // Skip 'undef' values.
6670 unsigned Opcode = Op.getOpcode();
6671 if (Opcode == ISD::UNDEF) {
6672 std::swap(ExpectedOpcode, NextExpectedOpcode);
6676 // Early exit if we found an unexpected opcode.
6677 if (Opcode != ExpectedOpcode)
6680 SDValue Op0 = Op.getOperand(0);
6681 SDValue Op1 = Op.getOperand(1);
6683 // Try to match the following pattern:
6684 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
6685 // Early exit if we cannot match that sequence.
6686 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6687 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6688 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
6689 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
6690 Op0.getOperand(1) != Op1.getOperand(1))
6693 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6697 // We found a valid add/sub node. Update the information accordingly.
6703 // Update InVec0 and InVec1.
6704 if (InVec0.getOpcode() == ISD::UNDEF)
6705 InVec0 = Op0.getOperand(0);
6706 if (InVec1.getOpcode() == ISD::UNDEF)
6707 InVec1 = Op1.getOperand(0);
6709 // Make sure that operands in input to each add/sub node always
6710 // come from a same pair of vectors.
6711 if (InVec0 != Op0.getOperand(0)) {
6712 if (ExpectedOpcode == ISD::FSUB)
6715 // FADD is commutable. Try to commute the operands
6716 // and then test again.
6717 std::swap(Op0, Op1);
6718 if (InVec0 != Op0.getOperand(0))
6722 if (InVec1 != Op1.getOperand(0))
6725 // Update the pair of expected opcodes.
6726 std::swap(ExpectedOpcode, NextExpectedOpcode);
6729 // Don't try to fold this build_vector into an ADDSUB if the inputs are undef.
6730 if (AddFound && SubFound && InVec0.getOpcode() != ISD::UNDEF &&
6731 InVec1.getOpcode() != ISD::UNDEF)
6732 return DAG.getNode(X86ISD::ADDSUB, DL, VT, InVec0, InVec1);
6737 static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
6738 const X86Subtarget *Subtarget) {
6740 EVT VT = N->getValueType(0);
6741 unsigned NumElts = VT.getVectorNumElements();
6742 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
6743 SDValue InVec0, InVec1;
6745 // Try to match an ADDSUB.
6746 if ((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
6747 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
6748 SDValue Value = matchAddSub(BV, DAG, Subtarget);
6749 if (Value.getNode())
6753 // Try to match horizontal ADD/SUB.
6754 unsigned NumUndefsLO = 0;
6755 unsigned NumUndefsHI = 0;
6756 unsigned Half = NumElts/2;
6758 // Count the number of UNDEF operands in the build_vector in input.
6759 for (unsigned i = 0, e = Half; i != e; ++i)
6760 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6763 for (unsigned i = Half, e = NumElts; i != e; ++i)
6764 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6767 // Early exit if this is either a build_vector of all UNDEFs or all the
6768 // operands but one are UNDEF.
6769 if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
6772 if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget->hasSSE3()) {
6773 // Try to match an SSE3 float HADD/HSUB.
6774 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6775 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6777 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6778 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6779 } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) {
6780 // Try to match an SSSE3 integer HADD/HSUB.
6781 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6782 return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
6784 if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6785 return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
6788 if (!Subtarget->hasAVX())
6791 if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
6792 // Try to match an AVX horizontal add/sub of packed single/double
6793 // precision floating point values from 256-bit vectors.
6794 SDValue InVec2, InVec3;
6795 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
6796 isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
6797 ((InVec0.getOpcode() == ISD::UNDEF ||
6798 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6799 ((InVec1.getOpcode() == ISD::UNDEF ||
6800 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6801 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6803 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
6804 isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
6805 ((InVec0.getOpcode() == ISD::UNDEF ||
6806 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6807 ((InVec1.getOpcode() == ISD::UNDEF ||
6808 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6809 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6810 } else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
6811 // Try to match an AVX2 horizontal add/sub of signed integers.
6812 SDValue InVec2, InVec3;
6814 bool CanFold = true;
6816 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
6817 isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
6818 ((InVec0.getOpcode() == ISD::UNDEF ||
6819 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6820 ((InVec1.getOpcode() == ISD::UNDEF ||
6821 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6822 X86Opcode = X86ISD::HADD;
6823 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
6824 isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
6825 ((InVec0.getOpcode() == ISD::UNDEF ||
6826 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6827 ((InVec1.getOpcode() == ISD::UNDEF ||
6828 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6829 X86Opcode = X86ISD::HSUB;
6834 // Fold this build_vector into a single horizontal add/sub.
6835 // Do this only if the target has AVX2.
6836 if (Subtarget->hasAVX2())
6837 return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
6839 // Do not try to expand this build_vector into a pair of horizontal
6840 // add/sub if we can emit a pair of scalar add/sub.
6841 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6844 // Convert this build_vector into a pair of horizontal binop followed by
6846 bool isUndefLO = NumUndefsLO == Half;
6847 bool isUndefHI = NumUndefsHI == Half;
6848 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
6849 isUndefLO, isUndefHI);
6853 if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
6854 VT == MVT::v16i16) && Subtarget->hasAVX()) {
6856 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6857 X86Opcode = X86ISD::HADD;
6858 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6859 X86Opcode = X86ISD::HSUB;
6860 else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6861 X86Opcode = X86ISD::FHADD;
6862 else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6863 X86Opcode = X86ISD::FHSUB;
6867 // Don't try to expand this build_vector into a pair of horizontal add/sub
6868 // if we can simply emit a pair of scalar add/sub.
6869 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6872 // Convert this build_vector into two horizontal add/sub followed by
6874 bool isUndefLO = NumUndefsLO == Half;
6875 bool isUndefHI = NumUndefsHI == Half;
6876 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
6877 isUndefLO, isUndefHI);
6884 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
6887 MVT VT = Op.getSimpleValueType();
6888 MVT ExtVT = VT.getVectorElementType();
6889 unsigned NumElems = Op.getNumOperands();
6891 // Generate vectors for predicate vectors.
6892 if (VT.getScalarType() == MVT::i1 && Subtarget->hasAVX512())
6893 return LowerBUILD_VECTORvXi1(Op, DAG);
6895 // Vectors containing all zeros can be matched by pxor and xorps later
6896 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6897 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
6898 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
6899 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
6902 return getZeroVector(VT, Subtarget, DAG, dl);
6905 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
6906 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
6907 // vpcmpeqd on 256-bit vectors.
6908 if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
6909 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256()))
6912 if (!VT.is512BitVector())
6913 return getOnesVector(VT, Subtarget->hasInt256(), DAG, dl);
6916 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
6917 if (Broadcast.getNode())
6920 unsigned EVTBits = ExtVT.getSizeInBits();
6922 unsigned NumZero = 0;
6923 unsigned NumNonZero = 0;
6924 unsigned NonZeros = 0;
6925 bool IsAllConstants = true;
6926 SmallSet<SDValue, 8> Values;
6927 for (unsigned i = 0; i < NumElems; ++i) {
6928 SDValue Elt = Op.getOperand(i);
6929 if (Elt.getOpcode() == ISD::UNDEF)
6932 if (Elt.getOpcode() != ISD::Constant &&
6933 Elt.getOpcode() != ISD::ConstantFP)
6934 IsAllConstants = false;
6935 if (X86::isZeroNode(Elt))
6938 NonZeros |= (1 << i);
6943 // All undef vector. Return an UNDEF. All zero vectors were handled above.
6944 if (NumNonZero == 0)
6945 return DAG.getUNDEF(VT);
6947 // Special case for single non-zero, non-undef, element.
6948 if (NumNonZero == 1) {
6949 unsigned Idx = countTrailingZeros(NonZeros);
6950 SDValue Item = Op.getOperand(Idx);
6952 // If this is an insertion of an i64 value on x86-32, and if the top bits of
6953 // the value are obviously zero, truncate the value to i32 and do the
6954 // insertion that way. Only do this if the value is non-constant or if the
6955 // value is a constant being inserted into element 0. It is cheaper to do
6956 // a constant pool load than it is to do a movd + shuffle.
6957 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
6958 (!IsAllConstants || Idx == 0)) {
6959 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
6961 assert(VT == MVT::v2i64 && "Expected an SSE value type!");
6962 EVT VecVT = MVT::v4i32;
6963 unsigned VecElts = 4;
6965 // Truncate the value (which may itself be a constant) to i32, and
6966 // convert it to a vector with movd (S2V+shuffle to zero extend).
6967 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
6968 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
6970 // If using the new shuffle lowering, just directly insert this.
6971 if (ExperimentalVectorShuffleLowering)
6973 ISD::BITCAST, dl, VT,
6974 getShuffleVectorZeroOrUndef(Item, Idx * 2, true, Subtarget, DAG));
6976 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6978 // Now we have our 32-bit value zero extended in the low element of
6979 // a vector. If Idx != 0, swizzle it into place.
6981 SmallVector<int, 4> Mask;
6982 Mask.push_back(Idx);
6983 for (unsigned i = 1; i != VecElts; ++i)
6985 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT),
6988 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
6992 // If we have a constant or non-constant insertion into the low element of
6993 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
6994 // the rest of the elements. This will be matched as movd/movq/movss/movsd
6995 // depending on what the source datatype is.
6998 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7000 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
7001 (ExtVT == MVT::i64 && Subtarget->is64Bit())) {
7002 if (VT.is256BitVector() || VT.is512BitVector()) {
7003 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
7004 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
7005 Item, DAG.getIntPtrConstant(0));
7007 assert(VT.is128BitVector() && "Expected an SSE value type!");
7008 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7009 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
7010 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7013 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
7014 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
7015 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
7016 if (VT.is256BitVector()) {
7017 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
7018 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
7020 assert(VT.is128BitVector() && "Expected an SSE value type!");
7021 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7023 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
7027 // Is it a vector logical left shift?
7028 if (NumElems == 2 && Idx == 1 &&
7029 X86::isZeroNode(Op.getOperand(0)) &&
7030 !X86::isZeroNode(Op.getOperand(1))) {
7031 unsigned NumBits = VT.getSizeInBits();
7032 return getVShift(true, VT,
7033 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
7034 VT, Op.getOperand(1)),
7035 NumBits/2, DAG, *this, dl);
7038 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
7041 // Otherwise, if this is a vector with i32 or f32 elements, and the element
7042 // is a non-constant being inserted into an element other than the low one,
7043 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
7044 // movd/movss) to move this into the low element, then shuffle it into
7046 if (EVTBits == 32) {
7047 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7049 // If using the new shuffle lowering, just directly insert this.
7050 if (ExperimentalVectorShuffleLowering)
7051 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
7053 // Turn it into a shuffle of zero and zero-extended scalar to vector.
7054 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG);
7055 SmallVector<int, 8> MaskVec;
7056 for (unsigned i = 0; i != NumElems; ++i)
7057 MaskVec.push_back(i == Idx ? 0 : 1);
7058 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]);
7062 // Splat is obviously ok. Let legalizer expand it to a shuffle.
7063 if (Values.size() == 1) {
7064 if (EVTBits == 32) {
7065 // Instead of a shuffle like this:
7066 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
7067 // Check if it's possible to issue this instead.
7068 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
7069 unsigned Idx = countTrailingZeros(NonZeros);
7070 SDValue Item = Op.getOperand(Idx);
7071 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
7072 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
7077 // A vector full of immediates; various special cases are already
7078 // handled, so this is best done with a single constant-pool load.
7082 // For AVX-length vectors, see if we can use a vector load to get all of the
7083 // elements, otherwise build the individual 128-bit pieces and use
7084 // shuffles to put them in place.
7085 if (VT.is256BitVector() || VT.is512BitVector()) {
7086 SmallVector<SDValue, 64> V;
7087 for (unsigned i = 0; i != NumElems; ++i)
7088 V.push_back(Op.getOperand(i));
7090 // Check for a build vector of consecutive loads.
7091 if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
7094 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
7096 // Build both the lower and upper subvector.
7097 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7098 makeArrayRef(&V[0], NumElems/2));
7099 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7100 makeArrayRef(&V[NumElems / 2], NumElems/2));
7102 // Recreate the wider vector with the lower and upper part.
7103 if (VT.is256BitVector())
7104 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7105 return Concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7108 // Let legalizer expand 2-wide build_vectors.
7109 if (EVTBits == 64) {
7110 if (NumNonZero == 1) {
7111 // One half is zero or undef.
7112 unsigned Idx = countTrailingZeros(NonZeros);
7113 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
7114 Op.getOperand(Idx));
7115 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
7120 // If element VT is < 32 bits, convert it to inserts into a zero vector.
7121 if (EVTBits == 8 && NumElems == 16) {
7122 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
7124 if (V.getNode()) return V;
7127 if (EVTBits == 16 && NumElems == 8) {
7128 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
7130 if (V.getNode()) return V;
7133 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
7134 if (EVTBits == 32 && NumElems == 4) {
7135 SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget, *this);
7140 // If element VT is == 32 bits, turn it into a number of shuffles.
7141 SmallVector<SDValue, 8> V(NumElems);
7142 if (NumElems == 4 && NumZero > 0) {
7143 for (unsigned i = 0; i < 4; ++i) {
7144 bool isZero = !(NonZeros & (1 << i));
7146 V[i] = getZeroVector(VT, Subtarget, DAG, dl);
7148 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7151 for (unsigned i = 0; i < 2; ++i) {
7152 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
7155 V[i] = V[i*2]; // Must be a zero vector.
7158 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]);
7161 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]);
7164 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]);
7169 bool Reverse1 = (NonZeros & 0x3) == 2;
7170 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
7174 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
7175 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
7177 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
7180 if (Values.size() > 1 && VT.is128BitVector()) {
7181 // Check for a build vector of consecutive loads.
7182 for (unsigned i = 0; i < NumElems; ++i)
7183 V[i] = Op.getOperand(i);
7185 // Check for elements which are consecutive loads.
7186 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false);
7190 // Check for a build vector from mostly shuffle plus few inserting.
7191 SDValue Sh = buildFromShuffleMostly(Op, DAG);
7195 // For SSE 4.1, use insertps to put the high elements into the low element.
7196 if (Subtarget->hasSSE41()) {
7198 if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
7199 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
7201 Result = DAG.getUNDEF(VT);
7203 for (unsigned i = 1; i < NumElems; ++i) {
7204 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
7205 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
7206 Op.getOperand(i), DAG.getIntPtrConstant(i));
7211 // Otherwise, expand into a number of unpckl*, start by extending each of
7212 // our (non-undef) elements to the full vector width with the element in the
7213 // bottom slot of the vector (which generates no code for SSE).
7214 for (unsigned i = 0; i < NumElems; ++i) {
7215 if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
7216 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7218 V[i] = DAG.getUNDEF(VT);
7221 // Next, we iteratively mix elements, e.g. for v4f32:
7222 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
7223 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
7224 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
7225 unsigned EltStride = NumElems >> 1;
7226 while (EltStride != 0) {
7227 for (unsigned i = 0; i < EltStride; ++i) {
7228 // If V[i+EltStride] is undef and this is the first round of mixing,
7229 // then it is safe to just drop this shuffle: V[i] is already in the
7230 // right place, the one element (since it's the first round) being
7231 // inserted as undef can be dropped. This isn't safe for successive
7232 // rounds because they will permute elements within both vectors.
7233 if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
7234 EltStride == NumElems/2)
7237 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
7246 // LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction
7247 // to create 256-bit vectors from two other 128-bit ones.
7248 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7250 MVT ResVT = Op.getSimpleValueType();
7252 assert((ResVT.is256BitVector() ||
7253 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
7255 SDValue V1 = Op.getOperand(0);
7256 SDValue V2 = Op.getOperand(1);
7257 unsigned NumElems = ResVT.getVectorNumElements();
7258 if(ResVT.is256BitVector())
7259 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7261 if (Op.getNumOperands() == 4) {
7262 MVT HalfVT = MVT::getVectorVT(ResVT.getScalarType(),
7263 ResVT.getVectorNumElements()/2);
7264 SDValue V3 = Op.getOperand(2);
7265 SDValue V4 = Op.getOperand(3);
7266 return Concat256BitVectors(Concat128BitVectors(V1, V2, HalfVT, NumElems/2, DAG, dl),
7267 Concat128BitVectors(V3, V4, HalfVT, NumElems/2, DAG, dl), ResVT, NumElems, DAG, dl);
7269 return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7272 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7273 MVT LLVM_ATTRIBUTE_UNUSED VT = Op.getSimpleValueType();
7274 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
7275 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
7276 Op.getNumOperands() == 4)));
7278 // AVX can use the vinsertf128 instruction to create 256-bit vectors
7279 // from two other 128-bit ones.
7281 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
7282 return LowerAVXCONCAT_VECTORS(Op, DAG);
7286 //===----------------------------------------------------------------------===//
7287 // Vector shuffle lowering
7289 // This is an experimental code path for lowering vector shuffles on x86. It is
7290 // designed to handle arbitrary vector shuffles and blends, gracefully
7291 // degrading performance as necessary. It works hard to recognize idiomatic
7292 // shuffles and lower them to optimal instruction patterns without leaving
7293 // a framework that allows reasonably efficient handling of all vector shuffle
7295 //===----------------------------------------------------------------------===//
7297 /// \brief Tiny helper function to identify a no-op mask.
7299 /// This is a somewhat boring predicate function. It checks whether the mask
7300 /// array input, which is assumed to be a single-input shuffle mask of the kind
7301 /// used by the X86 shuffle instructions (not a fully general
7302 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
7303 /// in-place shuffle are 'no-op's.
7304 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
7305 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7306 if (Mask[i] != -1 && Mask[i] != i)
7311 /// \brief Helper function to classify a mask as a single-input mask.
7313 /// This isn't a generic single-input test because in the vector shuffle
7314 /// lowering we canonicalize single inputs to be the first input operand. This
7315 /// means we can more quickly test for a single input by only checking whether
7316 /// an input from the second operand exists. We also assume that the size of
7317 /// mask corresponds to the size of the input vectors which isn't true in the
7318 /// fully general case.
7319 static bool isSingleInputShuffleMask(ArrayRef<int> Mask) {
7321 if (M >= (int)Mask.size())
7326 /// \brief Test whether there are elements crossing 128-bit lanes in this
7329 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
7330 /// and we routinely test for these.
7331 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
7332 int LaneSize = 128 / VT.getScalarSizeInBits();
7333 int Size = Mask.size();
7334 for (int i = 0; i < Size; ++i)
7335 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
7340 /// \brief Test whether a shuffle mask is equivalent within each 128-bit lane.
7342 /// This checks a shuffle mask to see if it is performing the same
7343 /// 128-bit lane-relative shuffle in each 128-bit lane. This trivially implies
7344 /// that it is also not lane-crossing. It may however involve a blend from the
7345 /// same lane of a second vector.
7347 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
7348 /// non-trivial to compute in the face of undef lanes. The representation is
7349 /// *not* suitable for use with existing 128-bit shuffles as it will contain
7350 /// entries from both V1 and V2 inputs to the wider mask.
7352 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
7353 SmallVectorImpl<int> &RepeatedMask) {
7354 int LaneSize = 128 / VT.getScalarSizeInBits();
7355 RepeatedMask.resize(LaneSize, -1);
7356 int Size = Mask.size();
7357 for (int i = 0; i < Size; ++i) {
7360 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
7361 // This entry crosses lanes, so there is no way to model this shuffle.
7364 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
7365 if (RepeatedMask[i % LaneSize] == -1)
7366 // This is the first non-undef entry in this slot of a 128-bit lane.
7367 RepeatedMask[i % LaneSize] =
7368 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + Size;
7369 else if (RepeatedMask[i % LaneSize] + (i / LaneSize) * LaneSize != Mask[i])
7370 // Found a mismatch with the repeated mask.
7376 /// \brief Base case helper for testing a single mask element.
7377 static bool isShuffleEquivalentImpl(SDValue V1, SDValue V2,
7378 BuildVectorSDNode *BV1,
7379 BuildVectorSDNode *BV2, ArrayRef<int> Mask,
7381 int Size = Mask.size();
7382 if (Mask[i] != -1 && Mask[i] != Arg) {
7383 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
7384 auto *ArgsBV = Arg < Size ? BV1 : BV2;
7385 if (!MaskBV || !ArgsBV ||
7386 MaskBV->getOperand(Mask[i] % Size) != ArgsBV->getOperand(Arg % Size))
7392 /// \brief Recursive helper to peel off and test each mask element.
7393 template <typename... Ts>
7394 static bool isShuffleEquivalentImpl(SDValue V1, SDValue V2,
7395 BuildVectorSDNode *BV1,
7396 BuildVectorSDNode *BV2, ArrayRef<int> Mask,
7397 int i, int Arg, Ts... Args) {
7398 if (!isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, i, Arg))
7401 return isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, i + 1, Args...);
7404 /// \brief Checks whether a shuffle mask is equivalent to an explicit list of
7407 /// This is a fast way to test a shuffle mask against a fixed pattern:
7409 /// if (isShuffleEquivalent(Mask, 3, 2, 1, 0)) { ... }
7411 /// It returns true if the mask is exactly as wide as the argument list, and
7412 /// each element of the mask is either -1 (signifying undef) or the value given
7413 /// in the argument.
7414 template <typename... Ts>
7415 static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
7417 if (Mask.size() != sizeof...(Args))
7420 // If the values are build vectors, we can look through them to find
7421 // equivalent inputs that make the shuffles equivalent.
7422 auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
7423 auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
7425 // Recursively peel off arguments and test them against the mask.
7426 return isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, 0, Args...);
7429 /// \brief Get a 4-lane 8-bit shuffle immediate for a mask.
7431 /// This helper function produces an 8-bit shuffle immediate corresponding to
7432 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
7433 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
7436 /// NB: We rely heavily on "undef" masks preserving the input lane.
7437 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,
7438 SelectionDAG &DAG) {
7439 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
7440 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
7441 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
7442 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
7443 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
7446 Imm |= (Mask[0] == -1 ? 0 : Mask[0]) << 0;
7447 Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2;
7448 Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4;
7449 Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6;
7450 return DAG.getConstant(Imm, MVT::i8);
7453 /// \brief Try to emit a blend instruction for a shuffle.
7455 /// This doesn't do any checks for the availability of instructions for blending
7456 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
7457 /// be matched in the backend with the type given. What it does check for is
7458 /// that the shuffle mask is in fact a blend.
7459 static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
7460 SDValue V2, ArrayRef<int> Mask,
7461 const X86Subtarget *Subtarget,
7462 SelectionDAG &DAG) {
7464 unsigned BlendMask = 0;
7465 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7466 if (Mask[i] >= Size) {
7467 if (Mask[i] != i + Size)
7468 return SDValue(); // Shuffled V2 input!
7469 BlendMask |= 1u << i;
7472 if (Mask[i] >= 0 && Mask[i] != i)
7473 return SDValue(); // Shuffled V1 input!
7475 switch (VT.SimpleTy) {
7480 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
7481 DAG.getConstant(BlendMask, MVT::i8));
7485 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7489 // If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
7490 // that instruction.
7491 if (Subtarget->hasAVX2()) {
7492 // Scale the blend by the number of 32-bit dwords per element.
7493 int Scale = VT.getScalarSizeInBits() / 32;
7495 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7496 if (Mask[i] >= Size)
7497 for (int j = 0; j < Scale; ++j)
7498 BlendMask |= 1u << (i * Scale + j);
7500 MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32;
7501 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7502 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7503 return DAG.getNode(ISD::BITCAST, DL, VT,
7504 DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
7505 DAG.getConstant(BlendMask, MVT::i8)));
7509 // For integer shuffles we need to expand the mask and cast the inputs to
7510 // v8i16s prior to blending.
7511 int Scale = 8 / VT.getVectorNumElements();
7513 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7514 if (Mask[i] >= Size)
7515 for (int j = 0; j < Scale; ++j)
7516 BlendMask |= 1u << (i * Scale + j);
7518 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
7519 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
7520 return DAG.getNode(ISD::BITCAST, DL, VT,
7521 DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
7522 DAG.getConstant(BlendMask, MVT::i8)));
7526 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7527 SmallVector<int, 8> RepeatedMask;
7528 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
7529 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
7530 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
7532 for (int i = 0; i < 8; ++i)
7533 if (RepeatedMask[i] >= 16)
7534 BlendMask |= 1u << i;
7535 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
7536 DAG.getConstant(BlendMask, MVT::i8));
7542 // Scale the blend by the number of bytes per element.
7543 int Scale = VT.getScalarSizeInBits() / 8;
7545 // This form of blend is always done on bytes. Compute the byte vector
7547 MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
7549 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
7550 // mix of LLVM's code generator and the x86 backend. We tell the code
7551 // generator that boolean values in the elements of an x86 vector register
7552 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
7553 // mapping a select to operand #1, and 'false' mapping to operand #2. The
7554 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
7555 // of the element (the remaining are ignored) and 0 in that high bit would
7556 // mean operand #1 while 1 in the high bit would mean operand #2. So while
7557 // the LLVM model for boolean values in vector elements gets the relevant
7558 // bit set, it is set backwards and over constrained relative to x86's
7560 SmallVector<SDValue, 32> VSELECTMask;
7561 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7562 for (int j = 0; j < Scale; ++j)
7563 VSELECTMask.push_back(
7564 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
7565 : DAG.getConstant(Mask[i] < Size ? -1 : 0, MVT::i8));
7567 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7568 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7570 ISD::BITCAST, DL, VT,
7571 DAG.getNode(ISD::VSELECT, DL, BlendVT,
7572 DAG.getNode(ISD::BUILD_VECTOR, DL, BlendVT, VSELECTMask),
7577 llvm_unreachable("Not a supported integer vector type!");
7581 /// \brief Try to lower as a blend of elements from two inputs followed by
7582 /// a single-input permutation.
7584 /// This matches the pattern where we can blend elements from two inputs and
7585 /// then reduce the shuffle to a single-input permutation.
7586 static SDValue lowerVectorShuffleAsBlendAndPermute(SDLoc DL, MVT VT, SDValue V1,
7589 SelectionDAG &DAG) {
7590 // We build up the blend mask while checking whether a blend is a viable way
7591 // to reduce the shuffle.
7592 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7593 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
7595 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7599 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
7601 if (BlendMask[Mask[i] % Size] == -1)
7602 BlendMask[Mask[i] % Size] = Mask[i];
7603 else if (BlendMask[Mask[i] % Size] != Mask[i])
7604 return SDValue(); // Can't blend in the needed input!
7606 PermuteMask[i] = Mask[i] % Size;
7609 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7610 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
7613 /// \brief Generic routine to decompose a shuffle and blend into indepndent
7614 /// blends and permutes.
7616 /// This matches the extremely common pattern for handling combined
7617 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
7618 /// operations. It will try to pick the best arrangement of shuffles and
7620 static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
7624 SelectionDAG &DAG) {
7625 // Shuffle the input elements into the desired positions in V1 and V2 and
7626 // blend them together.
7627 SmallVector<int, 32> V1Mask(Mask.size(), -1);
7628 SmallVector<int, 32> V2Mask(Mask.size(), -1);
7629 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7630 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7631 if (Mask[i] >= 0 && Mask[i] < Size) {
7632 V1Mask[i] = Mask[i];
7634 } else if (Mask[i] >= Size) {
7635 V2Mask[i] = Mask[i] - Size;
7636 BlendMask[i] = i + Size;
7639 // Try to lower with the simpler initial blend strategy unless one of the
7640 // input shuffles would be a no-op. We prefer to shuffle inputs as the
7641 // shuffle may be able to fold with a load or other benefit. However, when
7642 // we'll have to do 2x as many shuffles in order to achieve this, blending
7643 // first is a better strategy.
7644 if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask))
7645 if (SDValue BlendPerm =
7646 lowerVectorShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask, DAG))
7649 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
7650 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
7651 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7654 /// \brief Try to lower a vector shuffle as a byte rotation.
7656 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
7657 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
7658 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
7659 /// try to generically lower a vector shuffle through such an pattern. It
7660 /// does not check for the profitability of lowering either as PALIGNR or
7661 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
7662 /// This matches shuffle vectors that look like:
7664 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
7666 /// Essentially it concatenates V1 and V2, shifts right by some number of
7667 /// elements, and takes the low elements as the result. Note that while this is
7668 /// specified as a *right shift* because x86 is little-endian, it is a *left
7669 /// rotate* of the vector lanes.
7670 static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
7673 const X86Subtarget *Subtarget,
7674 SelectionDAG &DAG) {
7675 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7677 int NumElts = Mask.size();
7678 int NumLanes = VT.getSizeInBits() / 128;
7679 int NumLaneElts = NumElts / NumLanes;
7681 // We need to detect various ways of spelling a rotation:
7682 // [11, 12, 13, 14, 15, 0, 1, 2]
7683 // [-1, 12, 13, 14, -1, -1, 1, -1]
7684 // [-1, -1, -1, -1, -1, -1, 1, 2]
7685 // [ 3, 4, 5, 6, 7, 8, 9, 10]
7686 // [-1, 4, 5, 6, -1, -1, 9, -1]
7687 // [-1, 4, 5, 6, -1, -1, -1, -1]
7690 for (int l = 0; l < NumElts; l += NumLaneElts) {
7691 for (int i = 0; i < NumLaneElts; ++i) {
7692 if (Mask[l + i] == -1)
7694 assert(Mask[l + i] >= 0 && "Only -1 is a valid negative mask element!");
7696 // Get the mod-Size index and lane correct it.
7697 int LaneIdx = (Mask[l + i] % NumElts) - l;
7698 // Make sure it was in this lane.
7699 if (LaneIdx < 0 || LaneIdx >= NumLaneElts)
7702 // Determine where a rotated vector would have started.
7703 int StartIdx = i - LaneIdx;
7705 // The identity rotation isn't interesting, stop.
7708 // If we found the tail of a vector the rotation must be the missing
7709 // front. If we found the head of a vector, it must be how much of the
7711 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumLaneElts - StartIdx;
7714 Rotation = CandidateRotation;
7715 else if (Rotation != CandidateRotation)
7716 // The rotations don't match, so we can't match this mask.
7719 // Compute which value this mask is pointing at.
7720 SDValue MaskV = Mask[l + i] < NumElts ? V1 : V2;
7722 // Compute which of the two target values this index should be assigned
7723 // to. This reflects whether the high elements are remaining or the low
7724 // elements are remaining.
7725 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
7727 // Either set up this value if we've not encountered it before, or check
7728 // that it remains consistent.
7731 else if (TargetV != MaskV)
7732 // This may be a rotation, but it pulls from the inputs in some
7733 // unsupported interleaving.
7738 // Check that we successfully analyzed the mask, and normalize the results.
7739 assert(Rotation != 0 && "Failed to locate a viable rotation!");
7740 assert((Lo || Hi) && "Failed to find a rotated input vector!");
7746 // The actual rotate instruction rotates bytes, so we need to scale the
7747 // rotation based on how many bytes are in the vector lane.
7748 int Scale = 16 / NumLaneElts;
7750 // SSSE3 targets can use the palignr instruction.
7751 if (Subtarget->hasSSSE3()) {
7752 // Cast the inputs to i8 vector of correct length to match PALIGNR.
7753 MVT AlignVT = MVT::getVectorVT(MVT::i8, 16 * NumLanes);
7754 Lo = DAG.getNode(ISD::BITCAST, DL, AlignVT, Lo);
7755 Hi = DAG.getNode(ISD::BITCAST, DL, AlignVT, Hi);
7757 return DAG.getNode(ISD::BITCAST, DL, VT,
7758 DAG.getNode(X86ISD::PALIGNR, DL, AlignVT, Hi, Lo,
7759 DAG.getConstant(Rotation * Scale, MVT::i8)));
7762 assert(VT.getSizeInBits() == 128 &&
7763 "Rotate-based lowering only supports 128-bit lowering!");
7764 assert(Mask.size() <= 16 &&
7765 "Can shuffle at most 16 bytes in a 128-bit vector!");
7767 // Default SSE2 implementation
7768 int LoByteShift = 16 - Rotation * Scale;
7769 int HiByteShift = Rotation * Scale;
7771 // Cast the inputs to v2i64 to match PSLLDQ/PSRLDQ.
7772 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Lo);
7773 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Hi);
7775 SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo,
7776 DAG.getConstant(LoByteShift, MVT::i8));
7777 SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi,
7778 DAG.getConstant(HiByteShift, MVT::i8));
7779 return DAG.getNode(ISD::BITCAST, DL, VT,
7780 DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift));
7783 /// \brief Compute whether each element of a shuffle is zeroable.
7785 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
7786 /// Either it is an undef element in the shuffle mask, the element of the input
7787 /// referenced is undef, or the element of the input referenced is known to be
7788 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
7789 /// as many lanes with this technique as possible to simplify the remaining
7791 static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
7792 SDValue V1, SDValue V2) {
7793 SmallBitVector Zeroable(Mask.size(), false);
7795 while (V1.getOpcode() == ISD::BITCAST)
7796 V1 = V1->getOperand(0);
7797 while (V2.getOpcode() == ISD::BITCAST)
7798 V2 = V2->getOperand(0);
7800 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
7801 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
7803 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7805 // Handle the easy cases.
7806 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
7811 // If this is an index into a build_vector node (which has the same number
7812 // of elements), dig out the input value and use it.
7813 SDValue V = M < Size ? V1 : V2;
7814 if (V.getOpcode() != ISD::BUILD_VECTOR || Size != (int)V.getNumOperands())
7817 SDValue Input = V.getOperand(M % Size);
7818 // The UNDEF opcode check really should be dead code here, but not quite
7819 // worth asserting on (it isn't invalid, just unexpected).
7820 if (Input.getOpcode() == ISD::UNDEF || X86::isZeroNode(Input))
7827 /// \brief Try to emit a bitmask instruction for a shuffle.
7829 /// This handles cases where we can model a blend exactly as a bitmask due to
7830 /// one of the inputs being zeroable.
7831 static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1,
7832 SDValue V2, ArrayRef<int> Mask,
7833 SelectionDAG &DAG) {
7834 MVT EltVT = VT.getScalarType();
7835 int NumEltBits = EltVT.getSizeInBits();
7836 MVT IntEltVT = MVT::getIntegerVT(NumEltBits);
7837 SDValue Zero = DAG.getConstant(0, IntEltVT);
7838 SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), IntEltVT);
7839 if (EltVT.isFloatingPoint()) {
7840 Zero = DAG.getNode(ISD::BITCAST, DL, EltVT, Zero);
7841 AllOnes = DAG.getNode(ISD::BITCAST, DL, EltVT, AllOnes);
7843 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
7844 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7846 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7849 if (Mask[i] % Size != i)
7850 return SDValue(); // Not a blend.
7852 V = Mask[i] < Size ? V1 : V2;
7853 else if (V != (Mask[i] < Size ? V1 : V2))
7854 return SDValue(); // Can only let one input through the mask.
7856 VMaskOps[i] = AllOnes;
7859 return SDValue(); // No non-zeroable elements!
7861 SDValue VMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, VMaskOps);
7862 V = DAG.getNode(VT.isFloatingPoint()
7863 ? (unsigned) X86ISD::FAND : (unsigned) ISD::AND,
7868 /// \brief Try to lower a vector shuffle as a byte shift (shifts in zeros).
7870 /// Attempts to match a shuffle mask against the PSRLDQ and PSLLDQ
7871 /// byte-shift instructions. The mask must consist of a shifted sequential
7872 /// shuffle from one of the input vectors and zeroable elements for the
7873 /// remaining 'shifted in' elements.
7874 static SDValue lowerVectorShuffleAsByteShift(SDLoc DL, MVT VT, SDValue V1,
7875 SDValue V2, ArrayRef<int> Mask,
7876 SelectionDAG &DAG) {
7877 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7879 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7881 int NumElts = VT.getVectorNumElements();
7882 int NumLanes = VT.getSizeInBits() / 128;
7883 int NumLaneElts = NumElts / NumLanes;
7884 int Scale = 16 / NumLaneElts;
7885 MVT ShiftVT = MVT::getVectorVT(MVT::i64, 2 * NumLanes);
7887 // PSLLDQ : (little-endian) left byte shift
7888 // [ zz, 0, 1, 2, 3, 4, 5, 6]
7889 // [ zz, zz, -1, -1, 2, 3, 4, -1]
7890 // [ zz, zz, zz, zz, zz, zz, -1, 1]
7891 // PSRLDQ : (little-endian) right byte shift
7892 // [ 5, 6, 7, zz, zz, zz, zz, zz]
7893 // [ -1, 5, 6, 7, zz, zz, zz, zz]
7894 // [ 1, 2, -1, -1, -1, -1, zz, zz]
7895 auto MatchByteShift = [&](int Shift) -> SDValue {
7896 bool MatchLeft = true, MatchRight = true;
7897 for (int l = 0; l < NumElts; l += NumLaneElts) {
7898 for (int i = 0; i < Shift; ++i)
7899 MatchLeft &= Zeroable[l + i];
7900 for (int i = NumLaneElts - Shift; i < NumLaneElts; ++i)
7901 MatchRight &= Zeroable[l + i];
7903 if (!(MatchLeft || MatchRight))
7906 bool MatchV1 = true, MatchV2 = true;
7907 for (int l = 0; l < NumElts; l += NumLaneElts) {
7908 unsigned Pos = MatchLeft ? Shift + l : l;
7909 unsigned Low = MatchLeft ? l : Shift + l;
7910 unsigned Len = NumLaneElts - Shift;
7911 MatchV1 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low);
7912 MatchV2 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low + NumElts);
7914 if (!(MatchV1 || MatchV2))
7917 int ByteShift = Shift * Scale;
7918 unsigned Op = MatchRight ? X86ISD::VSRLDQ : X86ISD::VSHLDQ;
7919 SDValue V = MatchV1 ? V1 : V2;
7920 V = DAG.getNode(ISD::BITCAST, DL, ShiftVT, V);
7921 V = DAG.getNode(Op, DL, ShiftVT, V,
7922 DAG.getConstant(ByteShift, MVT::i8));
7923 return DAG.getNode(ISD::BITCAST, DL, VT, V);
7926 for (int Shift = 1; Shift < NumLaneElts; ++Shift)
7927 if (SDValue S = MatchByteShift(Shift))
7934 /// \brief Try to lower a vector shuffle as a bit shift (shifts in zeros).
7936 /// Attempts to match a shuffle mask against the PSRL(W/D/Q) and PSLL(W/D/Q)
7937 /// SSE2 and AVX2 logical bit-shift instructions. The function matches
7938 /// elements from one of the input vectors shuffled to the left or right
7939 /// with zeroable elements 'shifted in'.
7940 static SDValue lowerVectorShuffleAsBitShift(SDLoc DL, MVT VT, SDValue V1,
7941 SDValue V2, ArrayRef<int> Mask,
7942 SelectionDAG &DAG) {
7943 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7945 int Size = Mask.size();
7946 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
7948 // PSRL : (little-endian) right bit shift.
7951 // PSHL : (little-endian) left bit shift.
7953 // [ -1, 4, zz, -1 ]
7954 auto MatchBitShift = [&](int Shift, int Scale) -> SDValue {
7955 MVT ShiftSVT = MVT::getIntegerVT(VT.getScalarSizeInBits() * Scale);
7956 MVT ShiftVT = MVT::getVectorVT(ShiftSVT, Size / Scale);
7957 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
7958 "Illegal integer vector type");
7960 bool MatchLeft = true, MatchRight = true;
7961 for (int i = 0; i != Size; i += Scale) {
7962 for (int j = 0; j != Shift; ++j) {
7963 MatchLeft &= Zeroable[i + j];
7965 for (int j = Scale - Shift; j != Scale; ++j) {
7966 MatchRight &= Zeroable[i + j];
7969 if (!(MatchLeft || MatchRight))
7972 bool MatchV1 = true, MatchV2 = true;
7973 for (int i = 0; i != Size; i += Scale) {
7974 unsigned Pos = MatchLeft ? i + Shift : i;
7975 unsigned Low = MatchLeft ? i : i + Shift;
7976 unsigned Len = Scale - Shift;
7977 MatchV1 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low);
7978 MatchV2 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low + Size);
7980 if (!(MatchV1 || MatchV2))
7983 // Cast the inputs to ShiftVT to match VSRLI/VSHLI and back again.
7984 unsigned OpCode = MatchLeft ? X86ISD::VSHLI : X86ISD::VSRLI;
7985 int ShiftAmt = Shift * VT.getScalarSizeInBits();
7986 SDValue V = MatchV1 ? V1 : V2;
7987 V = DAG.getNode(ISD::BITCAST, DL, ShiftVT, V);
7988 V = DAG.getNode(OpCode, DL, ShiftVT, V, DAG.getConstant(ShiftAmt, MVT::i8));
7989 return DAG.getNode(ISD::BITCAST, DL, VT, V);
7992 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
7993 // keep doubling the size of the integer elements up to that. We can
7994 // then shift the elements of the integer vector by whole multiples of
7995 // their width within the elements of the larger integer vector. Test each
7996 // multiple to see if we can find a match with the moved element indices
7997 // and that the shifted in elements are all zeroable.
7998 for (int Scale = 2; Scale * VT.getScalarSizeInBits() <= 64; Scale *= 2)
7999 for (int Shift = 1; Shift != Scale; ++Shift)
8000 if (SDValue BitShift = MatchBitShift(Shift, Scale))
8007 /// \brief Lower a vector shuffle as a zero or any extension.
8009 /// Given a specific number of elements, element bit width, and extension
8010 /// stride, produce either a zero or any extension based on the available
8011 /// features of the subtarget.
8012 static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
8013 SDLoc DL, MVT VT, int Scale, bool AnyExt, SDValue InputV,
8014 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8015 assert(Scale > 1 && "Need a scale to extend.");
8016 int NumElements = VT.getVectorNumElements();
8017 int EltBits = VT.getScalarSizeInBits();
8018 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
8019 "Only 8, 16, and 32 bit elements can be extended.");
8020 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
8022 // Found a valid zext mask! Try various lowering strategies based on the
8023 // input type and available ISA extensions.
8024 if (Subtarget->hasSSE41()) {
8025 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
8026 NumElements / Scale);
8027 return DAG.getNode(ISD::BITCAST, DL, VT,
8028 DAG.getNode(X86ISD::VZEXT, DL, ExtVT, InputV));
8031 // For any extends we can cheat for larger element sizes and use shuffle
8032 // instructions that can fold with a load and/or copy.
8033 if (AnyExt && EltBits == 32) {
8034 int PSHUFDMask[4] = {0, -1, 1, -1};
8036 ISD::BITCAST, DL, VT,
8037 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8038 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
8039 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
8041 if (AnyExt && EltBits == 16 && Scale > 2) {
8042 int PSHUFDMask[4] = {0, -1, 0, -1};
8043 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8044 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
8045 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG));
8046 int PSHUFHWMask[4] = {1, -1, -1, -1};
8048 ISD::BITCAST, DL, VT,
8049 DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16,
8050 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, InputV),
8051 getV4X86ShuffleImm8ForMask(PSHUFHWMask, DAG)));
8054 // If this would require more than 2 unpack instructions to expand, use
8055 // pshufb when available. We can only use more than 2 unpack instructions
8056 // when zero extending i8 elements which also makes it easier to use pshufb.
8057 if (Scale > 4 && EltBits == 8 && Subtarget->hasSSSE3()) {
8058 assert(NumElements == 16 && "Unexpected byte vector width!");
8059 SDValue PSHUFBMask[16];
8060 for (int i = 0; i < 16; ++i)
8062 DAG.getConstant((i % Scale == 0) ? i / Scale : 0x80, MVT::i8);
8063 InputV = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, InputV);
8064 return DAG.getNode(ISD::BITCAST, DL, VT,
8065 DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
8066 DAG.getNode(ISD::BUILD_VECTOR, DL,
8067 MVT::v16i8, PSHUFBMask)));
8070 // Otherwise emit a sequence of unpacks.
8072 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
8073 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
8074 : getZeroVector(InputVT, Subtarget, DAG, DL);
8075 InputV = DAG.getNode(ISD::BITCAST, DL, InputVT, InputV);
8076 InputV = DAG.getNode(X86ISD::UNPCKL, DL, InputVT, InputV, Ext);
8080 } while (Scale > 1);
8081 return DAG.getNode(ISD::BITCAST, DL, VT, InputV);
8084 /// \brief Try to lower a vector shuffle as a zero extension on any microarch.
8086 /// This routine will try to do everything in its power to cleverly lower
8087 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
8088 /// check for the profitability of this lowering, it tries to aggressively
8089 /// match this pattern. It will use all of the micro-architectural details it
8090 /// can to emit an efficient lowering. It handles both blends with all-zero
8091 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
8092 /// masking out later).
8094 /// The reason we have dedicated lowering for zext-style shuffles is that they
8095 /// are both incredibly common and often quite performance sensitive.
8096 static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
8097 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8098 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8099 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8101 int Bits = VT.getSizeInBits();
8102 int NumElements = VT.getVectorNumElements();
8103 assert(VT.getScalarSizeInBits() <= 32 &&
8104 "Exceeds 32-bit integer zero extension limit");
8105 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
8107 // Define a helper function to check a particular ext-scale and lower to it if
8109 auto Lower = [&](int Scale) -> SDValue {
8112 for (int i = 0; i < NumElements; ++i) {
8114 continue; // Valid anywhere but doesn't tell us anything.
8115 if (i % Scale != 0) {
8116 // Each of the extended elements need to be zeroable.
8120 // We no longer are in the anyext case.
8125 // Each of the base elements needs to be consecutive indices into the
8126 // same input vector.
8127 SDValue V = Mask[i] < NumElements ? V1 : V2;
8130 else if (InputV != V)
8131 return SDValue(); // Flip-flopping inputs.
8133 if (Mask[i] % NumElements != i / Scale)
8134 return SDValue(); // Non-consecutive strided elements.
8137 // If we fail to find an input, we have a zero-shuffle which should always
8138 // have already been handled.
8139 // FIXME: Maybe handle this here in case during blending we end up with one?
8143 return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
8144 DL, VT, Scale, AnyExt, InputV, Subtarget, DAG);
8147 // The widest scale possible for extending is to a 64-bit integer.
8148 assert(Bits % 64 == 0 &&
8149 "The number of bits in a vector must be divisible by 64 on x86!");
8150 int NumExtElements = Bits / 64;
8152 // Each iteration, try extending the elements half as much, but into twice as
8154 for (; NumExtElements < NumElements; NumExtElements *= 2) {
8155 assert(NumElements % NumExtElements == 0 &&
8156 "The input vector size must be divisible by the extended size.");
8157 if (SDValue V = Lower(NumElements / NumExtElements))
8161 // General extends failed, but 128-bit vectors may be able to use MOVQ.
8165 // Returns one of the source operands if the shuffle can be reduced to a
8166 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
8167 auto CanZExtLowHalf = [&]() {
8168 for (int i = NumElements / 2; i != NumElements; ++i)
8171 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
8173 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
8178 if (SDValue V = CanZExtLowHalf()) {
8179 V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V);
8180 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
8181 return DAG.getNode(ISD::BITCAST, DL, VT, V);
8184 // No viable ext lowering found.
8188 /// \brief Try to get a scalar value for a specific element of a vector.
8190 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
8191 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
8192 SelectionDAG &DAG) {
8193 MVT VT = V.getSimpleValueType();
8194 MVT EltVT = VT.getVectorElementType();
8195 while (V.getOpcode() == ISD::BITCAST)
8196 V = V.getOperand(0);
8197 // If the bitcasts shift the element size, we can't extract an equivalent
8199 MVT NewVT = V.getSimpleValueType();
8200 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
8203 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8204 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR))
8205 return DAG.getNode(ISD::BITCAST, SDLoc(V), EltVT, V.getOperand(Idx));
8210 /// \brief Helper to test for a load that can be folded with x86 shuffles.
8212 /// This is particularly important because the set of instructions varies
8213 /// significantly based on whether the operand is a load or not.
8214 static bool isShuffleFoldableLoad(SDValue V) {
8215 while (V.getOpcode() == ISD::BITCAST)
8216 V = V.getOperand(0);
8218 return ISD::isNON_EXTLoad(V.getNode());
8221 /// \brief Try to lower insertion of a single element into a zero vector.
8223 /// This is a common pattern that we have especially efficient patterns to lower
8224 /// across all subtarget feature sets.
8225 static SDValue lowerVectorShuffleAsElementInsertion(
8226 MVT VT, SDLoc DL, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8227 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8228 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8230 MVT EltVT = VT.getVectorElementType();
8232 int V2Index = std::find_if(Mask.begin(), Mask.end(),
8233 [&Mask](int M) { return M >= (int)Mask.size(); }) -
8235 bool IsV1Zeroable = true;
8236 for (int i = 0, Size = Mask.size(); i < Size; ++i)
8237 if (i != V2Index && !Zeroable[i]) {
8238 IsV1Zeroable = false;
8242 // Check for a single input from a SCALAR_TO_VECTOR node.
8243 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
8244 // all the smarts here sunk into that routine. However, the current
8245 // lowering of BUILD_VECTOR makes that nearly impossible until the old
8246 // vector shuffle lowering is dead.
8247 if (SDValue V2S = getScalarValueForVectorElement(
8248 V2, Mask[V2Index] - Mask.size(), DAG)) {
8249 // We need to zext the scalar if it is smaller than an i32.
8250 V2S = DAG.getNode(ISD::BITCAST, DL, EltVT, V2S);
8251 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
8252 // Using zext to expand a narrow element won't work for non-zero
8257 // Zero-extend directly to i32.
8259 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
8261 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
8262 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
8263 EltVT == MVT::i16) {
8264 // Either not inserting from the low element of the input or the input
8265 // element size is too small to use VZEXT_MOVL to clear the high bits.
8269 if (!IsV1Zeroable) {
8270 // If V1 can't be treated as a zero vector we have fewer options to lower
8271 // this. We can't support integer vectors or non-zero targets cheaply, and
8272 // the V1 elements can't be permuted in any way.
8273 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
8274 if (!VT.isFloatingPoint() || V2Index != 0)
8276 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
8277 V1Mask[V2Index] = -1;
8278 if (!isNoopShuffleMask(V1Mask))
8280 // This is essentially a special case blend operation, but if we have
8281 // general purpose blend operations, they are always faster. Bail and let
8282 // the rest of the lowering handle these as blends.
8283 if (Subtarget->hasSSE41())
8286 // Otherwise, use MOVSD or MOVSS.
8287 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
8288 "Only two types of floating point element types to handle!");
8289 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
8293 // This lowering only works for the low element with floating point vectors.
8294 if (VT.isFloatingPoint() && V2Index != 0)
8297 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
8299 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8302 // If we have 4 or fewer lanes we can cheaply shuffle the element into
8303 // the desired position. Otherwise it is more efficient to do a vector
8304 // shift left. We know that we can do a vector shift left because all
8305 // the inputs are zero.
8306 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
8307 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
8308 V2Shuffle[V2Index] = 0;
8309 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
8311 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V2);
8313 X86ISD::VSHLDQ, DL, MVT::v2i64, V2,
8315 V2Index * EltVT.getSizeInBits()/8,
8316 DAG.getTargetLoweringInfo().getScalarShiftAmountTy(MVT::v2i64)));
8317 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8323 /// \brief Try to lower broadcast of a single element.
8325 /// For convenience, this code also bundles all of the subtarget feature set
8326 /// filtering. While a little annoying to re-dispatch on type here, there isn't
8327 /// a convenient way to factor it out.
8328 static SDValue lowerVectorShuffleAsBroadcast(MVT VT, SDLoc DL, SDValue V,
8330 const X86Subtarget *Subtarget,
8331 SelectionDAG &DAG) {
8332 if (!Subtarget->hasAVX())
8334 if (VT.isInteger() && !Subtarget->hasAVX2())
8337 // Check that the mask is a broadcast.
8338 int BroadcastIdx = -1;
8340 if (M >= 0 && BroadcastIdx == -1)
8342 else if (M >= 0 && M != BroadcastIdx)
8345 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
8346 "a sorted mask where the broadcast "
8349 // Go up the chain of (vector) values to try and find a scalar load that
8350 // we can combine with the broadcast.
8352 switch (V.getOpcode()) {
8353 case ISD::CONCAT_VECTORS: {
8354 int OperandSize = Mask.size() / V.getNumOperands();
8355 V = V.getOperand(BroadcastIdx / OperandSize);
8356 BroadcastIdx %= OperandSize;
8360 case ISD::INSERT_SUBVECTOR: {
8361 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
8362 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
8366 int BeginIdx = (int)ConstantIdx->getZExtValue();
8368 BeginIdx + (int)VInner.getValueType().getVectorNumElements();
8369 if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) {
8370 BroadcastIdx -= BeginIdx;
8381 // Check if this is a broadcast of a scalar. We special case lowering
8382 // for scalars so that we can more effectively fold with loads.
8383 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8384 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
8385 V = V.getOperand(BroadcastIdx);
8387 // If the scalar isn't a load we can't broadcast from it in AVX1, only with
8389 if (!Subtarget->hasAVX2() && !isShuffleFoldableLoad(V))
8391 } else if (BroadcastIdx != 0 || !Subtarget->hasAVX2()) {
8392 // We can't broadcast from a vector register w/o AVX2, and we can only
8393 // broadcast from the zero-element of a vector register.
8397 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, V);
8400 // Check for whether we can use INSERTPS to perform the shuffle. We only use
8401 // INSERTPS when the V1 elements are already in the correct locations
8402 // because otherwise we can just always use two SHUFPS instructions which
8403 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
8404 // perform INSERTPS if a single V1 element is out of place and all V2
8405 // elements are zeroable.
8406 static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
8408 SelectionDAG &DAG) {
8409 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8410 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8411 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8412 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8414 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8417 int V1DstIndex = -1;
8418 int V2DstIndex = -1;
8419 bool V1UsedInPlace = false;
8421 for (int i = 0; i < 4; ++i) {
8422 // Synthesize a zero mask from the zeroable elements (includes undefs).
8428 // Flag if we use any V1 inputs in place.
8430 V1UsedInPlace = true;
8434 // We can only insert a single non-zeroable element.
8435 if (V1DstIndex != -1 || V2DstIndex != -1)
8439 // V1 input out of place for insertion.
8442 // V2 input for insertion.
8447 // Don't bother if we have no (non-zeroable) element for insertion.
8448 if (V1DstIndex == -1 && V2DstIndex == -1)
8451 // Determine element insertion src/dst indices. The src index is from the
8452 // start of the inserted vector, not the start of the concatenated vector.
8453 unsigned V2SrcIndex = 0;
8454 if (V1DstIndex != -1) {
8455 // If we have a V1 input out of place, we use V1 as the V2 element insertion
8456 // and don't use the original V2 at all.
8457 V2SrcIndex = Mask[V1DstIndex];
8458 V2DstIndex = V1DstIndex;
8461 V2SrcIndex = Mask[V2DstIndex] - 4;
8464 // If no V1 inputs are used in place, then the result is created only from
8465 // the zero mask and the V2 insertion - so remove V1 dependency.
8467 V1 = DAG.getUNDEF(MVT::v4f32);
8469 unsigned InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
8470 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
8472 // Insert the V2 element into the desired position.
8474 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
8475 DAG.getConstant(InsertPSMask, MVT::i8));
8478 /// \brief Try to lower a shuffle as a permute of the inputs followed by an
8479 /// UNPCK instruction.
8481 /// This specifically targets cases where we end up with alternating between
8482 /// the two inputs, and so can permute them into something that feeds a single
8483 /// UNPCK instruction. Note that this routine only targets integer vectors
8484 /// because for floating point vectors we have a generalized SHUFPS lowering
8485 /// strategy that handles everything that doesn't *exactly* match an unpack,
8486 /// making this clever lowering unnecessary.
8487 static SDValue lowerVectorShuffleAsUnpack(MVT VT, SDLoc DL, SDValue V1,
8488 SDValue V2, ArrayRef<int> Mask,
8489 SelectionDAG &DAG) {
8490 assert(!VT.isFloatingPoint() &&
8491 "This routine only supports integer vectors.");
8492 assert(!isSingleInputShuffleMask(Mask) &&
8493 "This routine should only be used when blending two inputs.");
8494 assert(Mask.size() >= 2 && "Single element masks are invalid.");
8496 int Size = Mask.size();
8498 int NumLoInputs = std::count_if(Mask.begin(), Mask.end(), [Size](int M) {
8499 return M >= 0 && M % Size < Size / 2;
8501 int NumHiInputs = std::count_if(
8502 Mask.begin(), Mask.end(), [Size](int M) { return M % Size > Size / 2; });
8504 bool UnpackLo = NumLoInputs >= NumHiInputs;
8506 auto TryUnpack = [&](MVT UnpackVT, int Scale) {
8507 SmallVector<int, 32> V1Mask(Mask.size(), -1);
8508 SmallVector<int, 32> V2Mask(Mask.size(), -1);
8510 for (int i = 0; i < Size; ++i) {
8514 // Each element of the unpack contains Scale elements from this mask.
8515 int UnpackIdx = i / Scale;
8517 // We only handle the case where V1 feeds the first slots of the unpack.
8518 // We rely on canonicalization to ensure this is the case.
8519 if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
8522 // Setup the mask for this input. The indexing is tricky as we have to
8523 // handle the unpack stride.
8524 SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
8525 VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
8529 // Shuffle the inputs into place.
8530 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
8531 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
8533 // Cast the inputs to the type we will use to unpack them.
8534 V1 = DAG.getNode(ISD::BITCAST, DL, UnpackVT, V1);
8535 V2 = DAG.getNode(ISD::BITCAST, DL, UnpackVT, V2);
8537 // Unpack the inputs and cast the result back to the desired type.
8538 return DAG.getNode(ISD::BITCAST, DL, VT,
8539 DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH,
8540 DL, UnpackVT, V1, V2));
8543 // We try each unpack from the largest to the smallest to try and find one
8544 // that fits this mask.
8545 int OrigNumElements = VT.getVectorNumElements();
8546 int OrigScalarSize = VT.getScalarSizeInBits();
8547 for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2) {
8548 int Scale = ScalarSize / OrigScalarSize;
8549 int NumElements = OrigNumElements / Scale;
8550 MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), NumElements);
8551 if (SDValue Unpack = TryUnpack(UnpackVT, Scale))
8558 /// \brief Handle lowering of 2-lane 64-bit floating point shuffles.
8560 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
8561 /// support for floating point shuffles but not integer shuffles. These
8562 /// instructions will incur a domain crossing penalty on some chips though so
8563 /// it is better to avoid lowering through this for integer vectors where
8565 static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8566 const X86Subtarget *Subtarget,
8567 SelectionDAG &DAG) {
8569 assert(Op.getSimpleValueType() == MVT::v2f64 && "Bad shuffle type!");
8570 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8571 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8572 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8573 ArrayRef<int> Mask = SVOp->getMask();
8574 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8576 if (isSingleInputShuffleMask(Mask)) {
8577 // Use low duplicate instructions for masks that match their pattern.
8578 if (Subtarget->hasSSE3())
8579 if (isShuffleEquivalent(V1, V2, Mask, 0, 0))
8580 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, V1);
8582 // Straight shuffle of a single input vector. Simulate this by using the
8583 // single input as both of the "inputs" to this instruction..
8584 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
8586 if (Subtarget->hasAVX()) {
8587 // If we have AVX, we can use VPERMILPS which will allow folding a load
8588 // into the shuffle.
8589 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
8590 DAG.getConstant(SHUFPDMask, MVT::i8));
8593 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1,
8594 DAG.getConstant(SHUFPDMask, MVT::i8));
8596 assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
8597 assert(Mask[1] >= 2 && "Non-canonicalized blend!");
8599 // If we have a single input, insert that into V1 if we can do so cheaply.
8600 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8601 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8602 MVT::v2f64, DL, V1, V2, Mask, Subtarget, DAG))
8604 // Try inverting the insertion since for v2 masks it is easy to do and we
8605 // can't reliably sort the mask one way or the other.
8606 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8607 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8608 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8609 MVT::v2f64, DL, V2, V1, InverseMask, Subtarget, DAG))
8613 // Try to use one of the special instruction patterns to handle two common
8614 // blend patterns if a zero-blend above didn't work.
8615 if (isShuffleEquivalent(V1, V2, Mask, 0, 3) || isShuffleEquivalent(V1, V2, Mask, 1, 3))
8616 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
8617 // We can either use a special instruction to load over the low double or
8618 // to move just the low double.
8620 isShuffleFoldableLoad(V1S) ? X86ISD::MOVLPD : X86ISD::MOVSD,
8622 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
8624 if (Subtarget->hasSSE41())
8625 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
8629 // Use dedicated unpack instructions for masks that match their pattern.
8630 if (isShuffleEquivalent(V1, V2, Mask, 0, 2))
8631 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2f64, V1, V2);
8632 if (isShuffleEquivalent(V1, V2, Mask, 1, 3))
8633 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2f64, V1, V2);
8635 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
8636 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V2,
8637 DAG.getConstant(SHUFPDMask, MVT::i8));
8640 /// \brief Handle lowering of 2-lane 64-bit integer shuffles.
8642 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
8643 /// the integer unit to minimize domain crossing penalties. However, for blends
8644 /// it falls back to the floating point shuffle operation with appropriate bit
8646 static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8647 const X86Subtarget *Subtarget,
8648 SelectionDAG &DAG) {
8650 assert(Op.getSimpleValueType() == MVT::v2i64 && "Bad shuffle type!");
8651 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8652 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8653 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8654 ArrayRef<int> Mask = SVOp->getMask();
8655 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8657 if (isSingleInputShuffleMask(Mask)) {
8658 // Check for being able to broadcast a single element.
8659 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v2i64, DL, V1,
8660 Mask, Subtarget, DAG))
8663 // Straight shuffle of a single input vector. For everything from SSE2
8664 // onward this has a single fast instruction with no scary immediates.
8665 // We have to map the mask as it is actually a v4i32 shuffle instruction.
8666 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V1);
8667 int WidenedMask[4] = {
8668 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
8669 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
8671 ISD::BITCAST, DL, MVT::v2i64,
8672 DAG.getNode(X86ISD::PSHUFD, SDLoc(Op), MVT::v4i32, V1,
8673 getV4X86ShuffleImm8ForMask(WidenedMask, DAG)));
8676 // Try to use byte shift instructions.
8677 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8678 DL, MVT::v2i64, V1, V2, Mask, DAG))
8681 // If we have a single input from V2 insert that into V1 if we can do so
8683 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8684 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8685 MVT::v2i64, DL, V1, V2, Mask, Subtarget, DAG))
8687 // Try inverting the insertion since for v2 masks it is easy to do and we
8688 // can't reliably sort the mask one way or the other.
8689 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8690 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8691 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8692 MVT::v2i64, DL, V2, V1, InverseMask, Subtarget, DAG))
8696 // We have different paths for blend lowering, but they all must use the
8697 // *exact* same predicate.
8698 bool IsBlendSupported = Subtarget->hasSSE41();
8699 if (IsBlendSupported)
8700 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
8704 // Use dedicated unpack instructions for masks that match their pattern.
8705 if (isShuffleEquivalent(V1, V2, Mask, 0, 2))
8706 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, V1, V2);
8707 if (isShuffleEquivalent(V1, V2, Mask, 1, 3))
8708 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2i64, V1, V2);
8710 // Try to use byte rotation instructions.
8711 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8712 if (Subtarget->hasSSSE3())
8713 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8714 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
8717 // If we have direct support for blends, we should lower by decomposing into
8718 // a permute. That will be faster than the domain cross.
8719 if (IsBlendSupported)
8720 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2,
8723 // We implement this with SHUFPD which is pretty lame because it will likely
8724 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
8725 // However, all the alternatives are still more cycles and newer chips don't
8726 // have this problem. It would be really nice if x86 had better shuffles here.
8727 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V1);
8728 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V2);
8729 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
8730 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
8733 /// \brief Test whether this can be lowered with a single SHUFPS instruction.
8735 /// This is used to disable more specialized lowerings when the shufps lowering
8736 /// will happen to be efficient.
8737 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
8738 // This routine only handles 128-bit shufps.
8739 assert(Mask.size() == 4 && "Unsupported mask size!");
8741 // To lower with a single SHUFPS we need to have the low half and high half
8742 // each requiring a single input.
8743 if (Mask[0] != -1 && Mask[1] != -1 && (Mask[0] < 4) != (Mask[1] < 4))
8745 if (Mask[2] != -1 && Mask[3] != -1 && (Mask[2] < 4) != (Mask[3] < 4))
8751 /// \brief Lower a vector shuffle using the SHUFPS instruction.
8753 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
8754 /// It makes no assumptions about whether this is the *best* lowering, it simply
8756 static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
8757 ArrayRef<int> Mask, SDValue V1,
8758 SDValue V2, SelectionDAG &DAG) {
8759 SDValue LowV = V1, HighV = V2;
8760 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
8763 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8765 if (NumV2Elements == 1) {
8767 std::find_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }) -
8770 // Compute the index adjacent to V2Index and in the same half by toggling
8772 int V2AdjIndex = V2Index ^ 1;
8774 if (Mask[V2AdjIndex] == -1) {
8775 // Handles all the cases where we have a single V2 element and an undef.
8776 // This will only ever happen in the high lanes because we commute the
8777 // vector otherwise.
8779 std::swap(LowV, HighV);
8780 NewMask[V2Index] -= 4;
8782 // Handle the case where the V2 element ends up adjacent to a V1 element.
8783 // To make this work, blend them together as the first step.
8784 int V1Index = V2AdjIndex;
8785 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
8786 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
8787 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8789 // Now proceed to reconstruct the final blend as we have the necessary
8790 // high or low half formed.
8797 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
8798 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
8800 } else if (NumV2Elements == 2) {
8801 if (Mask[0] < 4 && Mask[1] < 4) {
8802 // Handle the easy case where we have V1 in the low lanes and V2 in the
8806 } else if (Mask[2] < 4 && Mask[3] < 4) {
8807 // We also handle the reversed case because this utility may get called
8808 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
8809 // arrange things in the right direction.
8815 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
8816 // trying to place elements directly, just blend them and set up the final
8817 // shuffle to place them.
8819 // The first two blend mask elements are for V1, the second two are for
8821 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
8822 Mask[2] < 4 ? Mask[2] : Mask[3],
8823 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
8824 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
8825 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
8826 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8828 // Now we do a normal shuffle of V1 by giving V1 as both operands to
8831 NewMask[0] = Mask[0] < 4 ? 0 : 2;
8832 NewMask[1] = Mask[0] < 4 ? 2 : 0;
8833 NewMask[2] = Mask[2] < 4 ? 1 : 3;
8834 NewMask[3] = Mask[2] < 4 ? 3 : 1;
8837 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
8838 getV4X86ShuffleImm8ForMask(NewMask, DAG));
8841 /// \brief Lower 4-lane 32-bit floating point shuffles.
8843 /// Uses instructions exclusively from the floating point unit to minimize
8844 /// domain crossing penalties, as these are sufficient to implement all v4f32
8846 static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8847 const X86Subtarget *Subtarget,
8848 SelectionDAG &DAG) {
8850 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8851 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8852 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8853 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8854 ArrayRef<int> Mask = SVOp->getMask();
8855 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8858 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8860 if (NumV2Elements == 0) {
8861 // Check for being able to broadcast a single element.
8862 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f32, DL, V1,
8863 Mask, Subtarget, DAG))
8866 // Use even/odd duplicate instructions for masks that match their pattern.
8867 if (Subtarget->hasSSE3()) {
8868 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2))
8869 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
8870 if (isShuffleEquivalent(V1, V2, Mask, 1, 1, 3, 3))
8871 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
8874 if (Subtarget->hasAVX()) {
8875 // If we have AVX, we can use VPERMILPS which will allow folding a load
8876 // into the shuffle.
8877 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
8878 getV4X86ShuffleImm8ForMask(Mask, DAG));
8881 // Otherwise, use a straight shuffle of a single input vector. We pass the
8882 // input vector to both operands to simulate this with a SHUFPS.
8883 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
8884 getV4X86ShuffleImm8ForMask(Mask, DAG));
8887 // There are special ways we can lower some single-element blends. However, we
8888 // have custom ways we can lower more complex single-element blends below that
8889 // we defer to if both this and BLENDPS fail to match, so restrict this to
8890 // when the V2 input is targeting element 0 of the mask -- that is the fast
8892 if (NumV2Elements == 1 && Mask[0] >= 4)
8893 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4f32, DL, V1, V2,
8894 Mask, Subtarget, DAG))
8897 if (Subtarget->hasSSE41()) {
8898 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
8902 // Use INSERTPS if we can complete the shuffle efficiently.
8903 if (SDValue V = lowerVectorShuffleAsInsertPS(Op, V1, V2, Mask, DAG))
8906 if (!isSingleSHUFPSMask(Mask))
8907 if (SDValue BlendPerm = lowerVectorShuffleAsBlendAndPermute(
8908 DL, MVT::v4f32, V1, V2, Mask, DAG))
8912 // Use dedicated unpack instructions for masks that match their pattern.
8913 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 1, 5))
8914 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V1, V2);
8915 if (isShuffleEquivalent(V1, V2, Mask, 2, 6, 3, 7))
8916 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V1, V2);
8918 // Otherwise fall back to a SHUFPS lowering strategy.
8919 return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
8922 /// \brief Lower 4-lane i32 vector shuffles.
8924 /// We try to handle these with integer-domain shuffles where we can, but for
8925 /// blends we use the floating point domain blend instructions.
8926 static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8927 const X86Subtarget *Subtarget,
8928 SelectionDAG &DAG) {
8930 assert(Op.getSimpleValueType() == MVT::v4i32 && "Bad shuffle type!");
8931 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8932 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8933 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8934 ArrayRef<int> Mask = SVOp->getMask();
8935 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8937 // Whenever we can lower this as a zext, that instruction is strictly faster
8938 // than any alternative. It also allows us to fold memory operands into the
8939 // shuffle in many cases.
8940 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2,
8941 Mask, Subtarget, DAG))
8945 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8947 if (NumV2Elements == 0) {
8948 // Check for being able to broadcast a single element.
8949 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i32, DL, V1,
8950 Mask, Subtarget, DAG))
8953 // Straight shuffle of a single input vector. For everything from SSE2
8954 // onward this has a single fast instruction with no scary immediates.
8955 // We coerce the shuffle pattern to be compatible with UNPCK instructions
8956 // but we aren't actually going to use the UNPCK instruction because doing
8957 // so prevents folding a load into this instruction or making a copy.
8958 const int UnpackLoMask[] = {0, 0, 1, 1};
8959 const int UnpackHiMask[] = {2, 2, 3, 3};
8960 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 1, 1))
8961 Mask = UnpackLoMask;
8962 else if (isShuffleEquivalent(V1, V2, Mask, 2, 2, 3, 3))
8963 Mask = UnpackHiMask;
8965 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
8966 getV4X86ShuffleImm8ForMask(Mask, DAG));
8969 // Try to use bit shift instructions.
8970 if (SDValue Shift = lowerVectorShuffleAsBitShift(
8971 DL, MVT::v4i32, V1, V2, Mask, DAG))
8974 // Try to use byte shift instructions.
8975 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8976 DL, MVT::v4i32, V1, V2, Mask, DAG))
8979 // There are special ways we can lower some single-element blends.
8980 if (NumV2Elements == 1)
8981 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4i32, DL, V1, V2,
8982 Mask, Subtarget, DAG))
8985 // We have different paths for blend lowering, but they all must use the
8986 // *exact* same predicate.
8987 bool IsBlendSupported = Subtarget->hasSSE41();
8988 if (IsBlendSupported)
8989 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
8993 if (SDValue Masked =
8994 lowerVectorShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask, DAG))
8997 // Use dedicated unpack instructions for masks that match their pattern.
8998 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 1, 5))
8999 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V1, V2);
9000 if (isShuffleEquivalent(V1, V2, Mask, 2, 6, 3, 7))
9001 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V1, V2);
9003 // Try to use byte rotation instructions.
9004 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
9005 if (Subtarget->hasSSSE3())
9006 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9007 DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
9010 // If we have direct support for blends, we should lower by decomposing into
9011 // a permute. That will be faster than the domain cross.
9012 if (IsBlendSupported)
9013 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2,
9016 // Try to lower by permuting the inputs into an unpack instruction.
9017 if (SDValue Unpack =
9018 lowerVectorShuffleAsUnpack(MVT::v4i32, DL, V1, V2, Mask, DAG))
9021 // We implement this with SHUFPS because it can blend from two vectors.
9022 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
9023 // up the inputs, bypassing domain shift penalties that we would encur if we
9024 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
9026 return DAG.getNode(ISD::BITCAST, DL, MVT::v4i32,
9027 DAG.getVectorShuffle(
9029 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V1),
9030 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V2), Mask));
9033 /// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
9034 /// shuffle lowering, and the most complex part.
9036 /// The lowering strategy is to try to form pairs of input lanes which are
9037 /// targeted at the same half of the final vector, and then use a dword shuffle
9038 /// to place them onto the right half, and finally unpack the paired lanes into
9039 /// their final position.
9041 /// The exact breakdown of how to form these dword pairs and align them on the
9042 /// correct sides is really tricky. See the comments within the function for
9043 /// more of the details.
9044 static SDValue lowerV8I16SingleInputVectorShuffle(
9045 SDLoc DL, SDValue V, MutableArrayRef<int> Mask,
9046 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
9047 assert(V.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9048 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
9049 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
9051 SmallVector<int, 4> LoInputs;
9052 std::copy_if(LoMask.begin(), LoMask.end(), std::back_inserter(LoInputs),
9053 [](int M) { return M >= 0; });
9054 std::sort(LoInputs.begin(), LoInputs.end());
9055 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
9056 SmallVector<int, 4> HiInputs;
9057 std::copy_if(HiMask.begin(), HiMask.end(), std::back_inserter(HiInputs),
9058 [](int M) { return M >= 0; });
9059 std::sort(HiInputs.begin(), HiInputs.end());
9060 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
9062 std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
9063 int NumHToL = LoInputs.size() - NumLToL;
9065 std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
9066 int NumHToH = HiInputs.size() - NumLToH;
9067 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
9068 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
9069 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
9070 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
9072 // Check for being able to broadcast a single element.
9073 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i16, DL, V,
9074 Mask, Subtarget, DAG))
9077 // Try to use bit shift instructions.
9078 if (SDValue Shift = lowerVectorShuffleAsBitShift(
9079 DL, MVT::v8i16, V, V, Mask, DAG))
9082 // Try to use byte shift instructions.
9083 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9084 DL, MVT::v8i16, V, V, Mask, DAG))
9087 // Use dedicated unpack instructions for masks that match their pattern.
9088 if (isShuffleEquivalent(V, V, Mask, 0, 0, 1, 1, 2, 2, 3, 3))
9089 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V, V);
9090 if (isShuffleEquivalent(V, V, Mask, 4, 4, 5, 5, 6, 6, 7, 7))
9091 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V, V);
9093 // Try to use byte rotation instructions.
9094 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9095 DL, MVT::v8i16, V, V, Mask, Subtarget, DAG))
9098 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
9099 // such inputs we can swap two of the dwords across the half mark and end up
9100 // with <=2 inputs to each half in each half. Once there, we can fall through
9101 // to the generic code below. For example:
9103 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
9104 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
9106 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
9107 // and an existing 2-into-2 on the other half. In this case we may have to
9108 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
9109 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
9110 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
9111 // because any other situation (including a 3-into-1 or 1-into-3 in the other
9112 // half than the one we target for fixing) will be fixed when we re-enter this
9113 // path. We will also combine away any sequence of PSHUFD instructions that
9114 // result into a single instruction. Here is an example of the tricky case:
9116 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
9117 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
9119 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
9121 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
9122 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
9124 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
9125 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
9127 // The result is fine to be handled by the generic logic.
9128 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
9129 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
9130 int AOffset, int BOffset) {
9131 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
9132 "Must call this with A having 3 or 1 inputs from the A half.");
9133 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
9134 "Must call this with B having 1 or 3 inputs from the B half.");
9135 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
9136 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
9138 // Compute the index of dword with only one word among the three inputs in
9139 // a half by taking the sum of the half with three inputs and subtracting
9140 // the sum of the actual three inputs. The difference is the remaining
9143 int &TripleDWord = AToAInputs.size() == 3 ? ADWord : BDWord;
9144 int &OneInputDWord = AToAInputs.size() == 3 ? BDWord : ADWord;
9145 int TripleInputOffset = AToAInputs.size() == 3 ? AOffset : BOffset;
9146 ArrayRef<int> TripleInputs = AToAInputs.size() == 3 ? AToAInputs : BToAInputs;
9147 int OneInput = AToAInputs.size() == 3 ? BToAInputs[0] : AToAInputs[0];
9148 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
9149 int TripleNonInputIdx =
9150 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
9151 TripleDWord = TripleNonInputIdx / 2;
9153 // We use xor with one to compute the adjacent DWord to whichever one the
9155 OneInputDWord = (OneInput / 2) ^ 1;
9157 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
9158 // and BToA inputs. If there is also such a problem with the BToB and AToB
9159 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
9160 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
9161 // is essential that we don't *create* a 3<-1 as then we might oscillate.
9162 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
9163 // Compute how many inputs will be flipped by swapping these DWords. We
9165 // to balance this to ensure we don't form a 3-1 shuffle in the other
9167 int NumFlippedAToBInputs =
9168 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
9169 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
9170 int NumFlippedBToBInputs =
9171 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
9172 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
9173 if ((NumFlippedAToBInputs == 1 &&
9174 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
9175 (NumFlippedBToBInputs == 1 &&
9176 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
9177 // We choose whether to fix the A half or B half based on whether that
9178 // half has zero flipped inputs. At zero, we may not be able to fix it
9179 // with that half. We also bias towards fixing the B half because that
9180 // will more commonly be the high half, and we have to bias one way.
9181 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
9182 ArrayRef<int> Inputs) {
9183 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
9184 bool IsFixIdxInput = std::find(Inputs.begin(), Inputs.end(),
9185 PinnedIdx ^ 1) != Inputs.end();
9186 // Determine whether the free index is in the flipped dword or the
9187 // unflipped dword based on where the pinned index is. We use this bit
9188 // in an xor to conditionally select the adjacent dword.
9189 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
9190 bool IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9191 FixFreeIdx) != Inputs.end();
9192 if (IsFixIdxInput == IsFixFreeIdxInput)
9194 IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9195 FixFreeIdx) != Inputs.end();
9196 assert(IsFixIdxInput != IsFixFreeIdxInput &&
9197 "We need to be changing the number of flipped inputs!");
9198 int PSHUFHalfMask[] = {0, 1, 2, 3};
9199 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
9200 V = DAG.getNode(FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
9202 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DAG));
9205 if (M != -1 && M == FixIdx)
9207 else if (M != -1 && M == FixFreeIdx)
9210 if (NumFlippedBToBInputs != 0) {
9212 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9213 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
9215 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
9217 AToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9218 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
9223 int PSHUFDMask[] = {0, 1, 2, 3};
9224 PSHUFDMask[ADWord] = BDWord;
9225 PSHUFDMask[BDWord] = ADWord;
9226 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9227 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9228 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9229 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9231 // Adjust the mask to match the new locations of A and B.
9233 if (M != -1 && M/2 == ADWord)
9234 M = 2 * BDWord + M % 2;
9235 else if (M != -1 && M/2 == BDWord)
9236 M = 2 * ADWord + M % 2;
9238 // Recurse back into this routine to re-compute state now that this isn't
9239 // a 3 and 1 problem.
9240 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9243 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
9244 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
9245 else if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
9246 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
9248 // At this point there are at most two inputs to the low and high halves from
9249 // each half. That means the inputs can always be grouped into dwords and
9250 // those dwords can then be moved to the correct half with a dword shuffle.
9251 // We use at most one low and one high word shuffle to collect these paired
9252 // inputs into dwords, and finally a dword shuffle to place them.
9253 int PSHUFLMask[4] = {-1, -1, -1, -1};
9254 int PSHUFHMask[4] = {-1, -1, -1, -1};
9255 int PSHUFDMask[4] = {-1, -1, -1, -1};
9257 // First fix the masks for all the inputs that are staying in their
9258 // original halves. This will then dictate the targets of the cross-half
9260 auto fixInPlaceInputs =
9261 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
9262 MutableArrayRef<int> SourceHalfMask,
9263 MutableArrayRef<int> HalfMask, int HalfOffset) {
9264 if (InPlaceInputs.empty())
9266 if (InPlaceInputs.size() == 1) {
9267 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9268 InPlaceInputs[0] - HalfOffset;
9269 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
9272 if (IncomingInputs.empty()) {
9273 // Just fix all of the in place inputs.
9274 for (int Input : InPlaceInputs) {
9275 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
9276 PSHUFDMask[Input / 2] = Input / 2;
9281 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
9282 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9283 InPlaceInputs[0] - HalfOffset;
9284 // Put the second input next to the first so that they are packed into
9285 // a dword. We find the adjacent index by toggling the low bit.
9286 int AdjIndex = InPlaceInputs[0] ^ 1;
9287 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
9288 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
9289 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
9291 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
9292 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
9294 // Now gather the cross-half inputs and place them into a free dword of
9295 // their target half.
9296 // FIXME: This operation could almost certainly be simplified dramatically to
9297 // look more like the 3-1 fixing operation.
9298 auto moveInputsToRightHalf = [&PSHUFDMask](
9299 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
9300 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
9301 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
9303 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
9304 return SourceHalfMask[Word] != -1 && SourceHalfMask[Word] != Word;
9306 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
9308 int LowWord = Word & ~1;
9309 int HighWord = Word | 1;
9310 return isWordClobbered(SourceHalfMask, LowWord) ||
9311 isWordClobbered(SourceHalfMask, HighWord);
9314 if (IncomingInputs.empty())
9317 if (ExistingInputs.empty()) {
9318 // Map any dwords with inputs from them into the right half.
9319 for (int Input : IncomingInputs) {
9320 // If the source half mask maps over the inputs, turn those into
9321 // swaps and use the swapped lane.
9322 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
9323 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == -1) {
9324 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
9325 Input - SourceOffset;
9326 // We have to swap the uses in our half mask in one sweep.
9327 for (int &M : HalfMask)
9328 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
9330 else if (M == Input)
9331 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9333 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
9334 Input - SourceOffset &&
9335 "Previous placement doesn't match!");
9337 // Note that this correctly re-maps both when we do a swap and when
9338 // we observe the other side of the swap above. We rely on that to
9339 // avoid swapping the members of the input list directly.
9340 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9343 // Map the input's dword into the correct half.
9344 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == -1)
9345 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
9347 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
9349 "Previous placement doesn't match!");
9352 // And just directly shift any other-half mask elements to be same-half
9353 // as we will have mirrored the dword containing the element into the
9354 // same position within that half.
9355 for (int &M : HalfMask)
9356 if (M >= SourceOffset && M < SourceOffset + 4) {
9357 M = M - SourceOffset + DestOffset;
9358 assert(M >= 0 && "This should never wrap below zero!");
9363 // Ensure we have the input in a viable dword of its current half. This
9364 // is particularly tricky because the original position may be clobbered
9365 // by inputs being moved and *staying* in that half.
9366 if (IncomingInputs.size() == 1) {
9367 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9368 int InputFixed = std::find(std::begin(SourceHalfMask),
9369 std::end(SourceHalfMask), -1) -
9370 std::begin(SourceHalfMask) + SourceOffset;
9371 SourceHalfMask[InputFixed - SourceOffset] =
9372 IncomingInputs[0] - SourceOffset;
9373 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
9375 IncomingInputs[0] = InputFixed;
9377 } else if (IncomingInputs.size() == 2) {
9378 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
9379 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9380 // We have two non-adjacent or clobbered inputs we need to extract from
9381 // the source half. To do this, we need to map them into some adjacent
9382 // dword slot in the source mask.
9383 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
9384 IncomingInputs[1] - SourceOffset};
9386 // If there is a free slot in the source half mask adjacent to one of
9387 // the inputs, place the other input in it. We use (Index XOR 1) to
9388 // compute an adjacent index.
9389 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
9390 SourceHalfMask[InputsFixed[0] ^ 1] == -1) {
9391 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
9392 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9393 InputsFixed[1] = InputsFixed[0] ^ 1;
9394 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
9395 SourceHalfMask[InputsFixed[1] ^ 1] == -1) {
9396 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
9397 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
9398 InputsFixed[0] = InputsFixed[1] ^ 1;
9399 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] == -1 &&
9400 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] == -1) {
9401 // The two inputs are in the same DWord but it is clobbered and the
9402 // adjacent DWord isn't used at all. Move both inputs to the free
9404 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
9405 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
9406 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
9407 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
9409 // The only way we hit this point is if there is no clobbering
9410 // (because there are no off-half inputs to this half) and there is no
9411 // free slot adjacent to one of the inputs. In this case, we have to
9412 // swap an input with a non-input.
9413 for (int i = 0; i < 4; ++i)
9414 assert((SourceHalfMask[i] == -1 || SourceHalfMask[i] == i) &&
9415 "We can't handle any clobbers here!");
9416 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
9417 "Cannot have adjacent inputs here!");
9419 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9420 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
9422 // We also have to update the final source mask in this case because
9423 // it may need to undo the above swap.
9424 for (int &M : FinalSourceHalfMask)
9425 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
9426 M = InputsFixed[1] + SourceOffset;
9427 else if (M == InputsFixed[1] + SourceOffset)
9428 M = (InputsFixed[0] ^ 1) + SourceOffset;
9430 InputsFixed[1] = InputsFixed[0] ^ 1;
9433 // Point everything at the fixed inputs.
9434 for (int &M : HalfMask)
9435 if (M == IncomingInputs[0])
9436 M = InputsFixed[0] + SourceOffset;
9437 else if (M == IncomingInputs[1])
9438 M = InputsFixed[1] + SourceOffset;
9440 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
9441 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
9444 llvm_unreachable("Unhandled input size!");
9447 // Now hoist the DWord down to the right half.
9448 int FreeDWord = (PSHUFDMask[DestOffset / 2] == -1 ? 0 : 1) + DestOffset / 2;
9449 assert(PSHUFDMask[FreeDWord] == -1 && "DWord not free");
9450 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
9451 for (int &M : HalfMask)
9452 for (int Input : IncomingInputs)
9454 M = FreeDWord * 2 + Input % 2;
9456 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
9457 /*SourceOffset*/ 4, /*DestOffset*/ 0);
9458 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
9459 /*SourceOffset*/ 0, /*DestOffset*/ 4);
9461 // Now enact all the shuffles we've computed to move the inputs into their
9463 if (!isNoopShuffleMask(PSHUFLMask))
9464 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9465 getV4X86ShuffleImm8ForMask(PSHUFLMask, DAG));
9466 if (!isNoopShuffleMask(PSHUFHMask))
9467 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9468 getV4X86ShuffleImm8ForMask(PSHUFHMask, DAG));
9469 if (!isNoopShuffleMask(PSHUFDMask))
9470 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9471 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9472 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9473 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9475 // At this point, each half should contain all its inputs, and we can then
9476 // just shuffle them into their final position.
9477 assert(std::count_if(LoMask.begin(), LoMask.end(),
9478 [](int M) { return M >= 4; }) == 0 &&
9479 "Failed to lift all the high half inputs to the low mask!");
9480 assert(std::count_if(HiMask.begin(), HiMask.end(),
9481 [](int M) { return M >= 0 && M < 4; }) == 0 &&
9482 "Failed to lift all the low half inputs to the high mask!");
9484 // Do a half shuffle for the low mask.
9485 if (!isNoopShuffleMask(LoMask))
9486 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9487 getV4X86ShuffleImm8ForMask(LoMask, DAG));
9489 // Do a half shuffle with the high mask after shifting its values down.
9490 for (int &M : HiMask)
9493 if (!isNoopShuffleMask(HiMask))
9494 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9495 getV4X86ShuffleImm8ForMask(HiMask, DAG));
9500 /// \brief Detect whether the mask pattern should be lowered through
9503 /// This essentially tests whether viewing the mask as an interleaving of two
9504 /// sub-sequences reduces the cross-input traffic of a blend operation. If so,
9505 /// lowering it through interleaving is a significantly better strategy.
9506 static bool shouldLowerAsInterleaving(ArrayRef<int> Mask) {
9507 int NumEvenInputs[2] = {0, 0};
9508 int NumOddInputs[2] = {0, 0};
9509 int NumLoInputs[2] = {0, 0};
9510 int NumHiInputs[2] = {0, 0};
9511 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
9515 int InputIdx = Mask[i] >= Size;
9518 ++NumLoInputs[InputIdx];
9520 ++NumHiInputs[InputIdx];
9523 ++NumEvenInputs[InputIdx];
9525 ++NumOddInputs[InputIdx];
9528 // The minimum number of cross-input results for both the interleaved and
9529 // split cases. If interleaving results in fewer cross-input results, return
9531 int InterleavedCrosses = std::min(NumEvenInputs[1] + NumOddInputs[0],
9532 NumEvenInputs[0] + NumOddInputs[1]);
9533 int SplitCrosses = std::min(NumLoInputs[1] + NumHiInputs[0],
9534 NumLoInputs[0] + NumHiInputs[1]);
9535 return InterleavedCrosses < SplitCrosses;
9538 /// \brief Blend two v8i16 vectors using a naive unpack strategy.
9540 /// This strategy only works when the inputs from each vector fit into a single
9541 /// half of that vector, and generally there are not so many inputs as to leave
9542 /// the in-place shuffles required highly constrained (and thus expensive). It
9543 /// shifts all the inputs into a single side of both input vectors and then
9544 /// uses an unpack to interleave these inputs in a single vector. At that
9545 /// point, we will fall back on the generic single input shuffle lowering.
9546 static SDValue lowerV8I16BasicBlendVectorShuffle(SDLoc DL, SDValue V1,
9548 MutableArrayRef<int> Mask,
9549 const X86Subtarget *Subtarget,
9550 SelectionDAG &DAG) {
9551 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9552 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9553 SmallVector<int, 3> LoV1Inputs, HiV1Inputs, LoV2Inputs, HiV2Inputs;
9554 for (int i = 0; i < 8; ++i)
9555 if (Mask[i] >= 0 && Mask[i] < 4)
9556 LoV1Inputs.push_back(i);
9557 else if (Mask[i] >= 4 && Mask[i] < 8)
9558 HiV1Inputs.push_back(i);
9559 else if (Mask[i] >= 8 && Mask[i] < 12)
9560 LoV2Inputs.push_back(i);
9561 else if (Mask[i] >= 12)
9562 HiV2Inputs.push_back(i);
9564 int NumV1Inputs = LoV1Inputs.size() + HiV1Inputs.size();
9565 int NumV2Inputs = LoV2Inputs.size() + HiV2Inputs.size();
9568 assert(NumV1Inputs > 0 && NumV1Inputs <= 3 && "At most 3 inputs supported");
9569 assert(NumV2Inputs > 0 && NumV2Inputs <= 3 && "At most 3 inputs supported");
9570 assert(NumV1Inputs + NumV2Inputs <= 4 && "At most 4 combined inputs");
9572 bool MergeFromLo = LoV1Inputs.size() + LoV2Inputs.size() >=
9573 HiV1Inputs.size() + HiV2Inputs.size();
9575 auto moveInputsToHalf = [&](SDValue V, ArrayRef<int> LoInputs,
9576 ArrayRef<int> HiInputs, bool MoveToLo,
9578 ArrayRef<int> GoodInputs = MoveToLo ? LoInputs : HiInputs;
9579 ArrayRef<int> BadInputs = MoveToLo ? HiInputs : LoInputs;
9580 if (BadInputs.empty())
9583 int MoveMask[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9584 int MoveOffset = MoveToLo ? 0 : 4;
9586 if (GoodInputs.empty()) {
9587 for (int BadInput : BadInputs) {
9588 MoveMask[Mask[BadInput] % 4 + MoveOffset] = Mask[BadInput] - MaskOffset;
9589 Mask[BadInput] = Mask[BadInput] % 4 + MoveOffset + MaskOffset;
9592 if (GoodInputs.size() == 2) {
9593 // If the low inputs are spread across two dwords, pack them into
9595 MoveMask[MoveOffset] = Mask[GoodInputs[0]] - MaskOffset;
9596 MoveMask[MoveOffset + 1] = Mask[GoodInputs[1]] - MaskOffset;
9597 Mask[GoodInputs[0]] = MoveOffset + MaskOffset;
9598 Mask[GoodInputs[1]] = MoveOffset + 1 + MaskOffset;
9600 // Otherwise pin the good inputs.
9601 for (int GoodInput : GoodInputs)
9602 MoveMask[Mask[GoodInput] - MaskOffset] = Mask[GoodInput] - MaskOffset;
9605 if (BadInputs.size() == 2) {
9606 // If we have two bad inputs then there may be either one or two good
9607 // inputs fixed in place. Find a fixed input, and then find the *other*
9608 // two adjacent indices by using modular arithmetic.
9610 std::find_if(std::begin(MoveMask) + MoveOffset, std::end(MoveMask),
9611 [](int M) { return M >= 0; }) -
9612 std::begin(MoveMask);
9614 ((((GoodMaskIdx - MoveOffset) & ~1) + 2) % 4) + MoveOffset;
9615 assert(MoveMask[MoveMaskIdx] == -1 && "Expected empty slot");
9616 assert(MoveMask[MoveMaskIdx + 1] == -1 && "Expected empty slot");
9617 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9618 MoveMask[MoveMaskIdx + 1] = Mask[BadInputs[1]] - MaskOffset;
9619 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9620 Mask[BadInputs[1]] = MoveMaskIdx + 1 + MaskOffset;
9622 assert(BadInputs.size() == 1 && "All sizes handled");
9623 int MoveMaskIdx = std::find(std::begin(MoveMask) + MoveOffset,
9624 std::end(MoveMask), -1) -
9625 std::begin(MoveMask);
9626 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9627 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9631 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9634 V1 = moveInputsToHalf(V1, LoV1Inputs, HiV1Inputs, MergeFromLo,
9636 V2 = moveInputsToHalf(V2, LoV2Inputs, HiV2Inputs, MergeFromLo,
9639 // FIXME: Select an interleaving of the merge of V1 and V2 that minimizes
9640 // cross-half traffic in the final shuffle.
9642 // Munge the mask to be a single-input mask after the unpack merges the
9646 M = 2 * (M % 4) + (M / 8);
9648 return DAG.getVectorShuffle(
9649 MVT::v8i16, DL, DAG.getNode(MergeFromLo ? X86ISD::UNPCKL : X86ISD::UNPCKH,
9650 DL, MVT::v8i16, V1, V2),
9651 DAG.getUNDEF(MVT::v8i16), Mask);
9654 /// \brief Generic lowering of 8-lane i16 shuffles.
9656 /// This handles both single-input shuffles and combined shuffle/blends with
9657 /// two inputs. The single input shuffles are immediately delegated to
9658 /// a dedicated lowering routine.
9660 /// The blends are lowered in one of three fundamental ways. If there are few
9661 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
9662 /// of the input is significantly cheaper when lowered as an interleaving of
9663 /// the two inputs, try to interleave them. Otherwise, blend the low and high
9664 /// halves of the inputs separately (making them have relatively few inputs)
9665 /// and then concatenate them.
9666 static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9667 const X86Subtarget *Subtarget,
9668 SelectionDAG &DAG) {
9670 assert(Op.getSimpleValueType() == MVT::v8i16 && "Bad shuffle type!");
9671 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9672 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9673 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9674 ArrayRef<int> OrigMask = SVOp->getMask();
9675 int MaskStorage[8] = {OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9676 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7]};
9677 MutableArrayRef<int> Mask(MaskStorage);
9679 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
9681 // Whenever we can lower this as a zext, that instruction is strictly faster
9682 // than any alternative.
9683 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9684 DL, MVT::v8i16, V1, V2, OrigMask, Subtarget, DAG))
9687 auto isV1 = [](int M) { return M >= 0 && M < 8; };
9688 auto isV2 = [](int M) { return M >= 8; };
9690 int NumV1Inputs = std::count_if(Mask.begin(), Mask.end(), isV1);
9691 int NumV2Inputs = std::count_if(Mask.begin(), Mask.end(), isV2);
9693 if (NumV2Inputs == 0)
9694 return lowerV8I16SingleInputVectorShuffle(DL, V1, Mask, Subtarget, DAG);
9696 assert(NumV1Inputs > 0 && "All single-input shuffles should be canonicalized "
9697 "to be V1-input shuffles.");
9699 // Try to use bit shift instructions.
9700 if (SDValue Shift = lowerVectorShuffleAsBitShift(
9701 DL, MVT::v8i16, V1, V2, Mask, DAG))
9704 // Try to use byte shift instructions.
9705 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9706 DL, MVT::v8i16, V1, V2, Mask, DAG))
9709 // There are special ways we can lower some single-element blends.
9710 if (NumV2Inputs == 1)
9711 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v8i16, DL, V1, V2,
9712 Mask, Subtarget, DAG))
9715 // We have different paths for blend lowering, but they all must use the
9716 // *exact* same predicate.
9717 bool IsBlendSupported = Subtarget->hasSSE41();
9718 if (IsBlendSupported)
9719 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
9723 if (SDValue Masked =
9724 lowerVectorShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask, DAG))
9727 // Use dedicated unpack instructions for masks that match their pattern.
9728 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 2, 10, 3, 11))
9729 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V1, V2);
9730 if (isShuffleEquivalent(V1, V2, Mask, 4, 12, 5, 13, 6, 14, 7, 15))
9731 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V1, V2);
9733 // Try to use byte rotation instructions.
9734 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9735 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
9738 if (NumV1Inputs + NumV2Inputs <= 4)
9739 return lowerV8I16BasicBlendVectorShuffle(DL, V1, V2, Mask, Subtarget, DAG);
9741 // Check whether an interleaving lowering is likely to be more efficient.
9742 // This isn't perfect but it is a strong heuristic that tends to work well on
9743 // the kinds of shuffles that show up in practice.
9745 // FIXME: Handle 1x, 2x, and 4x interleaving.
9746 if (shouldLowerAsInterleaving(Mask)) {
9747 // FIXME: Figure out whether we should pack these into the low or high
9750 int EMask[8], OMask[8];
9751 for (int i = 0; i < 4; ++i) {
9752 EMask[i] = Mask[2*i];
9753 OMask[i] = Mask[2*i + 1];
9758 SDValue Evens = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, EMask);
9759 SDValue Odds = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, OMask);
9761 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, Evens, Odds);
9764 // If we have direct support for blends, we should lower by decomposing into
9766 if (IsBlendSupported)
9767 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
9770 // Try to lower by permuting the inputs into an unpack instruction.
9771 if (SDValue Unpack =
9772 lowerVectorShuffleAsUnpack(MVT::v8i16, DL, V1, V2, Mask, DAG))
9775 int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9776 int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9778 for (int i = 0; i < 4; ++i) {
9779 LoBlendMask[i] = Mask[i];
9780 HiBlendMask[i] = Mask[i + 4];
9783 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
9784 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
9785 LoV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, LoV);
9786 HiV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, HiV);
9788 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9789 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, LoV, HiV));
9792 /// \brief Check whether a compaction lowering can be done by dropping even
9793 /// elements and compute how many times even elements must be dropped.
9795 /// This handles shuffles which take every Nth element where N is a power of
9796 /// two. Example shuffle masks:
9798 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
9799 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
9800 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
9801 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
9802 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
9803 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
9805 /// Any of these lanes can of course be undef.
9807 /// This routine only supports N <= 3.
9808 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
9811 /// \returns N above, or the number of times even elements must be dropped if
9812 /// there is such a number. Otherwise returns zero.
9813 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
9814 // Figure out whether we're looping over two inputs or just one.
9815 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9817 // The modulus for the shuffle vector entries is based on whether this is
9818 // a single input or not.
9819 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
9820 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
9821 "We should only be called with masks with a power-of-2 size!");
9823 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
9825 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
9826 // and 2^3 simultaneously. This is because we may have ambiguity with
9827 // partially undef inputs.
9828 bool ViableForN[3] = {true, true, true};
9830 for (int i = 0, e = Mask.size(); i < e; ++i) {
9831 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
9836 bool IsAnyViable = false;
9837 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9838 if (ViableForN[j]) {
9841 // The shuffle mask must be equal to (i * 2^N) % M.
9842 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
9845 ViableForN[j] = false;
9847 // Early exit if we exhaust the possible powers of two.
9852 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9856 // Return 0 as there is no viable power of two.
9860 /// \brief Generic lowering of v16i8 shuffles.
9862 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
9863 /// detect any complexity reducing interleaving. If that doesn't help, it uses
9864 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
9865 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
9867 static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9868 const X86Subtarget *Subtarget,
9869 SelectionDAG &DAG) {
9871 assert(Op.getSimpleValueType() == MVT::v16i8 && "Bad shuffle type!");
9872 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9873 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9874 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9875 ArrayRef<int> OrigMask = SVOp->getMask();
9876 assert(OrigMask.size() == 16 && "Unexpected mask size for v16 shuffle!");
9878 // Try to use bit shift instructions.
9879 if (SDValue Shift = lowerVectorShuffleAsBitShift(
9880 DL, MVT::v16i8, V1, V2, OrigMask, DAG))
9883 // Try to use byte shift instructions.
9884 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9885 DL, MVT::v16i8, V1, V2, OrigMask, DAG))
9888 // Try to use byte rotation instructions.
9889 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9890 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9893 // Try to use a zext lowering.
9894 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9895 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9898 int MaskStorage[16] = {
9899 OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9900 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7],
9901 OrigMask[8], OrigMask[9], OrigMask[10], OrigMask[11],
9902 OrigMask[12], OrigMask[13], OrigMask[14], OrigMask[15]};
9903 MutableArrayRef<int> Mask(MaskStorage);
9904 MutableArrayRef<int> LoMask = Mask.slice(0, 8);
9905 MutableArrayRef<int> HiMask = Mask.slice(8, 8);
9908 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 16; });
9910 // For single-input shuffles, there are some nicer lowering tricks we can use.
9911 if (NumV2Elements == 0) {
9912 // Check for being able to broadcast a single element.
9913 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i8, DL, V1,
9914 Mask, Subtarget, DAG))
9917 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
9918 // Notably, this handles splat and partial-splat shuffles more efficiently.
9919 // However, it only makes sense if the pre-duplication shuffle simplifies
9920 // things significantly. Currently, this means we need to be able to
9921 // express the pre-duplication shuffle as an i16 shuffle.
9923 // FIXME: We should check for other patterns which can be widened into an
9924 // i16 shuffle as well.
9925 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
9926 for (int i = 0; i < 16; i += 2)
9927 if (Mask[i] != -1 && Mask[i + 1] != -1 && Mask[i] != Mask[i + 1])
9932 auto tryToWidenViaDuplication = [&]() -> SDValue {
9933 if (!canWidenViaDuplication(Mask))
9935 SmallVector<int, 4> LoInputs;
9936 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(LoInputs),
9937 [](int M) { return M >= 0 && M < 8; });
9938 std::sort(LoInputs.begin(), LoInputs.end());
9939 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
9941 SmallVector<int, 4> HiInputs;
9942 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(HiInputs),
9943 [](int M) { return M >= 8; });
9944 std::sort(HiInputs.begin(), HiInputs.end());
9945 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
9948 bool TargetLo = LoInputs.size() >= HiInputs.size();
9949 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
9950 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
9952 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9953 SmallDenseMap<int, int, 8> LaneMap;
9954 for (int I : InPlaceInputs) {
9955 PreDupI16Shuffle[I/2] = I/2;
9958 int j = TargetLo ? 0 : 4, je = j + 4;
9959 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
9960 // Check if j is already a shuffle of this input. This happens when
9961 // there are two adjacent bytes after we move the low one.
9962 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
9963 // If we haven't yet mapped the input, search for a slot into which
9965 while (j < je && PreDupI16Shuffle[j] != -1)
9969 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
9972 // Map this input with the i16 shuffle.
9973 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
9976 // Update the lane map based on the mapping we ended up with.
9977 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
9980 ISD::BITCAST, DL, MVT::v16i8,
9981 DAG.getVectorShuffle(MVT::v8i16, DL,
9982 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9983 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
9985 // Unpack the bytes to form the i16s that will be shuffled into place.
9986 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9987 MVT::v16i8, V1, V1);
9989 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9990 for (int i = 0; i < 16; ++i)
9991 if (Mask[i] != -1) {
9992 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
9993 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
9994 if (PostDupI16Shuffle[i / 2] == -1)
9995 PostDupI16Shuffle[i / 2] = MappedMask;
9997 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
9998 "Conflicting entrties in the original shuffle!");
10000 return DAG.getNode(
10001 ISD::BITCAST, DL, MVT::v16i8,
10002 DAG.getVectorShuffle(MVT::v8i16, DL,
10003 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
10004 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
10006 if (SDValue V = tryToWidenViaDuplication())
10010 // Check whether an interleaving lowering is likely to be more efficient.
10011 // This isn't perfect but it is a strong heuristic that tends to work well on
10012 // the kinds of shuffles that show up in practice.
10014 // FIXME: We need to handle other interleaving widths (i16, i32, ...).
10015 if (shouldLowerAsInterleaving(Mask)) {
10016 int NumLoHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
10017 return (M >= 0 && M < 8) || (M >= 16 && M < 24);
10019 int NumHiHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
10020 return (M >= 8 && M < 16) || M >= 24;
10022 int EMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
10023 -1, -1, -1, -1, -1, -1, -1, -1};
10024 int OMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
10025 -1, -1, -1, -1, -1, -1, -1, -1};
10026 bool UnpackLo = NumLoHalf >= NumHiHalf;
10027 MutableArrayRef<int> TargetEMask(UnpackLo ? EMask : EMask + 8, 8);
10028 MutableArrayRef<int> TargetOMask(UnpackLo ? OMask : OMask + 8, 8);
10029 for (int i = 0; i < 8; ++i) {
10030 TargetEMask[i] = Mask[2 * i];
10031 TargetOMask[i] = Mask[2 * i + 1];
10034 SDValue Evens = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, EMask);
10035 SDValue Odds = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, OMask);
10037 return DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
10038 MVT::v16i8, Evens, Odds);
10041 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
10042 // with PSHUFB. It is important to do this before we attempt to generate any
10043 // blends but after all of the single-input lowerings. If the single input
10044 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
10045 // want to preserve that and we can DAG combine any longer sequences into
10046 // a PSHUFB in the end. But once we start blending from multiple inputs,
10047 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
10048 // and there are *very* few patterns that would actually be faster than the
10049 // PSHUFB approach because of its ability to zero lanes.
10051 // FIXME: The only exceptions to the above are blends which are exact
10052 // interleavings with direct instructions supporting them. We currently don't
10053 // handle those well here.
10054 if (Subtarget->hasSSSE3()) {
10055 SDValue V1Mask[16];
10056 SDValue V2Mask[16];
10057 bool V1InUse = false;
10058 bool V2InUse = false;
10059 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
10061 for (int i = 0; i < 16; ++i) {
10062 if (Mask[i] == -1) {
10063 V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
10065 const int ZeroMask = 0x80;
10066 int V1Idx = (Mask[i] < 16 ? Mask[i] : ZeroMask);
10067 int V2Idx = (Mask[i] < 16 ? ZeroMask : Mask[i] - 16);
10069 V1Idx = V2Idx = ZeroMask;
10070 V1Mask[i] = DAG.getConstant(V1Idx, MVT::i8);
10071 V2Mask[i] = DAG.getConstant(V2Idx, MVT::i8);
10072 V1InUse |= (ZeroMask != V1Idx);
10073 V2InUse |= (ZeroMask != V2Idx);
10077 // If both V1 and V2 are in use and we can use a direct blend, do so. This
10078 // avoids using blends to handle blends-with-zero which is important as
10079 // a single pshufb is significantly faster for that.
10080 if (V1InUse && V2InUse && Subtarget->hasSSE41())
10081 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i8, V1, V2, Mask,
10087 V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V1,
10088 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
10090 V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V2,
10091 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
10093 // If we need shuffled inputs from both, blend the two.
10094 if (V1InUse && V2InUse)
10095 return DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
10097 return V1; // Single inputs are easy.
10099 return V2; // Single inputs are easy.
10100 // Shuffling to a zeroable vector.
10101 return getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
10104 // There are special ways we can lower some single-element blends.
10105 if (NumV2Elements == 1)
10106 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v16i8, DL, V1, V2,
10107 Mask, Subtarget, DAG))
10110 // Check whether a compaction lowering can be done. This handles shuffles
10111 // which take every Nth element for some even N. See the helper function for
10114 // We special case these as they can be particularly efficiently handled with
10115 // the PACKUSB instruction on x86 and they show up in common patterns of
10116 // rearranging bytes to truncate wide elements.
10117 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask)) {
10118 // NumEvenDrops is the power of two stride of the elements. Another way of
10119 // thinking about it is that we need to drop the even elements this many
10120 // times to get the original input.
10121 bool IsSingleInput = isSingleInputShuffleMask(Mask);
10123 // First we need to zero all the dropped bytes.
10124 assert(NumEvenDrops <= 3 &&
10125 "No support for dropping even elements more than 3 times.");
10126 // We use the mask type to pick which bytes are preserved based on how many
10127 // elements are dropped.
10128 MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
10129 SDValue ByteClearMask =
10130 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8,
10131 DAG.getConstant(0xFF, MaskVTs[NumEvenDrops - 1]));
10132 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
10133 if (!IsSingleInput)
10134 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
10136 // Now pack things back together.
10137 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
10138 V2 = IsSingleInput ? V1 : DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
10139 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
10140 for (int i = 1; i < NumEvenDrops; ++i) {
10141 Result = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, Result);
10142 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
10148 int V1LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10149 int V1HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10150 int V2LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10151 int V2HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10153 auto buildBlendMasks = [](MutableArrayRef<int> HalfMask,
10154 MutableArrayRef<int> V1HalfBlendMask,
10155 MutableArrayRef<int> V2HalfBlendMask) {
10156 for (int i = 0; i < 8; ++i)
10157 if (HalfMask[i] >= 0 && HalfMask[i] < 16) {
10158 V1HalfBlendMask[i] = HalfMask[i];
10160 } else if (HalfMask[i] >= 16) {
10161 V2HalfBlendMask[i] = HalfMask[i] - 16;
10162 HalfMask[i] = i + 8;
10165 buildBlendMasks(LoMask, V1LoBlendMask, V2LoBlendMask);
10166 buildBlendMasks(HiMask, V1HiBlendMask, V2HiBlendMask);
10168 SDValue Zero = getZeroVector(MVT::v8i16, Subtarget, DAG, DL);
10170 auto buildLoAndHiV8s = [&](SDValue V, MutableArrayRef<int> LoBlendMask,
10171 MutableArrayRef<int> HiBlendMask) {
10173 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
10174 // them out and avoid using UNPCK{L,H} to extract the elements of V as
10176 if (std::none_of(LoBlendMask.begin(), LoBlendMask.end(),
10177 [](int M) { return M >= 0 && M % 2 == 1; }) &&
10178 std::none_of(HiBlendMask.begin(), HiBlendMask.end(),
10179 [](int M) { return M >= 0 && M % 2 == 1; })) {
10180 // Use a mask to drop the high bytes.
10181 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
10182 V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, V1,
10183 DAG.getConstant(0x00FF, MVT::v8i16));
10185 // This will be a single vector shuffle instead of a blend so nuke V2.
10186 V2 = DAG.getUNDEF(MVT::v8i16);
10188 // Squash the masks to point directly into V1.
10189 for (int &M : LoBlendMask)
10192 for (int &M : HiBlendMask)
10196 // Otherwise just unpack the low half of V into V1 and the high half into
10197 // V2 so that we can blend them as i16s.
10198 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
10199 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
10200 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
10201 DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
10204 SDValue BlendedLo = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
10205 SDValue BlendedHi = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
10206 return std::make_pair(BlendedLo, BlendedHi);
10208 SDValue V1Lo, V1Hi, V2Lo, V2Hi;
10209 std::tie(V1Lo, V1Hi) = buildLoAndHiV8s(V1, V1LoBlendMask, V1HiBlendMask);
10210 std::tie(V2Lo, V2Hi) = buildLoAndHiV8s(V2, V2LoBlendMask, V2HiBlendMask);
10212 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Lo, V2Lo, LoMask);
10213 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Hi, V2Hi, HiMask);
10215 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
10218 /// \brief Dispatching routine to lower various 128-bit x86 vector shuffles.
10220 /// This routine breaks down the specific type of 128-bit shuffle and
10221 /// dispatches to the lowering routines accordingly.
10222 static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10223 MVT VT, const X86Subtarget *Subtarget,
10224 SelectionDAG &DAG) {
10225 switch (VT.SimpleTy) {
10227 return lowerV2I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10229 return lowerV2F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10231 return lowerV4I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10233 return lowerV4F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10235 return lowerV8I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
10237 return lowerV16I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
10240 llvm_unreachable("Unimplemented!");
10244 /// \brief Helper function to test whether a shuffle mask could be
10245 /// simplified by widening the elements being shuffled.
10247 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
10248 /// leaves it in an unspecified state.
10250 /// NOTE: This must handle normal vector shuffle masks and *target* vector
10251 /// shuffle masks. The latter have the special property of a '-2' representing
10252 /// a zero-ed lane of a vector.
10253 static bool canWidenShuffleElements(ArrayRef<int> Mask,
10254 SmallVectorImpl<int> &WidenedMask) {
10255 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
10256 // If both elements are undef, its trivial.
10257 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] == SM_SentinelUndef) {
10258 WidenedMask.push_back(SM_SentinelUndef);
10262 // Check for an undef mask and a mask value properly aligned to fit with
10263 // a pair of values. If we find such a case, use the non-undef mask's value.
10264 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] >= 0 && Mask[i + 1] % 2 == 1) {
10265 WidenedMask.push_back(Mask[i + 1] / 2);
10268 if (Mask[i + 1] == SM_SentinelUndef && Mask[i] >= 0 && Mask[i] % 2 == 0) {
10269 WidenedMask.push_back(Mask[i] / 2);
10273 // When zeroing, we need to spread the zeroing across both lanes to widen.
10274 if (Mask[i] == SM_SentinelZero || Mask[i + 1] == SM_SentinelZero) {
10275 if ((Mask[i] == SM_SentinelZero || Mask[i] == SM_SentinelUndef) &&
10276 (Mask[i + 1] == SM_SentinelZero || Mask[i + 1] == SM_SentinelUndef)) {
10277 WidenedMask.push_back(SM_SentinelZero);
10283 // Finally check if the two mask values are adjacent and aligned with
10285 if (Mask[i] != SM_SentinelUndef && Mask[i] % 2 == 0 && Mask[i] + 1 == Mask[i + 1]) {
10286 WidenedMask.push_back(Mask[i] / 2);
10290 // Otherwise we can't safely widen the elements used in this shuffle.
10293 assert(WidenedMask.size() == Mask.size() / 2 &&
10294 "Incorrect size of mask after widening the elements!");
10299 /// \brief Generic routine to split vector shuffle into half-sized shuffles.
10301 /// This routine just extracts two subvectors, shuffles them independently, and
10302 /// then concatenates them back together. This should work effectively with all
10303 /// AVX vector shuffle types.
10304 static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10305 SDValue V2, ArrayRef<int> Mask,
10306 SelectionDAG &DAG) {
10307 assert(VT.getSizeInBits() >= 256 &&
10308 "Only for 256-bit or wider vector shuffles!");
10309 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
10310 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
10312 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
10313 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
10315 int NumElements = VT.getVectorNumElements();
10316 int SplitNumElements = NumElements / 2;
10317 MVT ScalarVT = VT.getScalarType();
10318 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
10320 // Rather than splitting build-vectors, just build two narrower build
10321 // vectors. This helps shuffling with splats and zeros.
10322 auto SplitVector = [&](SDValue V) {
10323 while (V.getOpcode() == ISD::BITCAST)
10324 V = V->getOperand(0);
10326 MVT OrigVT = V.getSimpleValueType();
10327 int OrigNumElements = OrigVT.getVectorNumElements();
10328 int OrigSplitNumElements = OrigNumElements / 2;
10329 MVT OrigScalarVT = OrigVT.getScalarType();
10330 MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
10334 auto *BV = dyn_cast<BuildVectorSDNode>(V);
10336 LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10337 DAG.getIntPtrConstant(0));
10338 HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10339 DAG.getIntPtrConstant(OrigSplitNumElements));
10342 SmallVector<SDValue, 16> LoOps, HiOps;
10343 for (int i = 0; i < OrigSplitNumElements; ++i) {
10344 LoOps.push_back(BV->getOperand(i));
10345 HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
10347 LoV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, LoOps);
10348 HiV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, HiOps);
10350 return std::make_pair(DAG.getNode(ISD::BITCAST, DL, SplitVT, LoV),
10351 DAG.getNode(ISD::BITCAST, DL, SplitVT, HiV));
10354 SDValue LoV1, HiV1, LoV2, HiV2;
10355 std::tie(LoV1, HiV1) = SplitVector(V1);
10356 std::tie(LoV2, HiV2) = SplitVector(V2);
10358 // Now create two 4-way blends of these half-width vectors.
10359 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
10360 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
10361 SmallVector<int, 32> V1BlendMask, V2BlendMask, BlendMask;
10362 for (int i = 0; i < SplitNumElements; ++i) {
10363 int M = HalfMask[i];
10364 if (M >= NumElements) {
10365 if (M >= NumElements + SplitNumElements)
10369 V2BlendMask.push_back(M - NumElements);
10370 V1BlendMask.push_back(-1);
10371 BlendMask.push_back(SplitNumElements + i);
10372 } else if (M >= 0) {
10373 if (M >= SplitNumElements)
10377 V2BlendMask.push_back(-1);
10378 V1BlendMask.push_back(M);
10379 BlendMask.push_back(i);
10381 V2BlendMask.push_back(-1);
10382 V1BlendMask.push_back(-1);
10383 BlendMask.push_back(-1);
10387 // Because the lowering happens after all combining takes place, we need to
10388 // manually combine these blend masks as much as possible so that we create
10389 // a minimal number of high-level vector shuffle nodes.
10391 // First try just blending the halves of V1 or V2.
10392 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
10393 return DAG.getUNDEF(SplitVT);
10394 if (!UseLoV2 && !UseHiV2)
10395 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10396 if (!UseLoV1 && !UseHiV1)
10397 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10399 SDValue V1Blend, V2Blend;
10400 if (UseLoV1 && UseHiV1) {
10402 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10404 // We only use half of V1 so map the usage down into the final blend mask.
10405 V1Blend = UseLoV1 ? LoV1 : HiV1;
10406 for (int i = 0; i < SplitNumElements; ++i)
10407 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
10408 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
10410 if (UseLoV2 && UseHiV2) {
10412 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10414 // We only use half of V2 so map the usage down into the final blend mask.
10415 V2Blend = UseLoV2 ? LoV2 : HiV2;
10416 for (int i = 0; i < SplitNumElements; ++i)
10417 if (BlendMask[i] >= SplitNumElements)
10418 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
10420 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
10422 SDValue Lo = HalfBlend(LoMask);
10423 SDValue Hi = HalfBlend(HiMask);
10424 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
10427 /// \brief Either split a vector in halves or decompose the shuffles and the
10430 /// This is provided as a good fallback for many lowerings of non-single-input
10431 /// shuffles with more than one 128-bit lane. In those cases, we want to select
10432 /// between splitting the shuffle into 128-bit components and stitching those
10433 /// back together vs. extracting the single-input shuffles and blending those
10435 static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
10436 SDValue V2, ArrayRef<int> Mask,
10437 SelectionDAG &DAG) {
10438 assert(!isSingleInputShuffleMask(Mask) && "This routine must not be used to "
10439 "lower single-input shuffles as it "
10440 "could then recurse on itself.");
10441 int Size = Mask.size();
10443 // If this can be modeled as a broadcast of two elements followed by a blend,
10444 // prefer that lowering. This is especially important because broadcasts can
10445 // often fold with memory operands.
10446 auto DoBothBroadcast = [&] {
10447 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
10450 if (V2BroadcastIdx == -1)
10451 V2BroadcastIdx = M - Size;
10452 else if (M - Size != V2BroadcastIdx)
10454 } else if (M >= 0) {
10455 if (V1BroadcastIdx == -1)
10456 V1BroadcastIdx = M;
10457 else if (M != V1BroadcastIdx)
10462 if (DoBothBroadcast())
10463 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
10466 // If the inputs all stem from a single 128-bit lane of each input, then we
10467 // split them rather than blending because the split will decompose to
10468 // unusually few instructions.
10469 int LaneCount = VT.getSizeInBits() / 128;
10470 int LaneSize = Size / LaneCount;
10471 SmallBitVector LaneInputs[2];
10472 LaneInputs[0].resize(LaneCount, false);
10473 LaneInputs[1].resize(LaneCount, false);
10474 for (int i = 0; i < Size; ++i)
10476 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
10477 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
10478 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10480 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
10481 // that the decomposed single-input shuffles don't end up here.
10482 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10485 /// \brief Lower a vector shuffle crossing multiple 128-bit lanes as
10486 /// a permutation and blend of those lanes.
10488 /// This essentially blends the out-of-lane inputs to each lane into the lane
10489 /// from a permuted copy of the vector. This lowering strategy results in four
10490 /// instructions in the worst case for a single-input cross lane shuffle which
10491 /// is lower than any other fully general cross-lane shuffle strategy I'm aware
10492 /// of. Special cases for each particular shuffle pattern should be handled
10493 /// prior to trying this lowering.
10494 static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT,
10495 SDValue V1, SDValue V2,
10496 ArrayRef<int> Mask,
10497 SelectionDAG &DAG) {
10498 // FIXME: This should probably be generalized for 512-bit vectors as well.
10499 assert(VT.getSizeInBits() == 256 && "Only for 256-bit vector shuffles!");
10500 int LaneSize = Mask.size() / 2;
10502 // If there are only inputs from one 128-bit lane, splitting will in fact be
10503 // less expensive. The flags track wether the given lane contains an element
10504 // that crosses to another lane.
10505 bool LaneCrossing[2] = {false, false};
10506 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10507 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10508 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
10509 if (!LaneCrossing[0] || !LaneCrossing[1])
10510 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10512 if (isSingleInputShuffleMask(Mask)) {
10513 SmallVector<int, 32> FlippedBlendMask;
10514 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10515 FlippedBlendMask.push_back(
10516 Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
10518 : Mask[i] % LaneSize +
10519 (i / LaneSize) * LaneSize + Size));
10521 // Flip the vector, and blend the results which should now be in-lane. The
10522 // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
10523 // 5 for the high source. The value 3 selects the high half of source 2 and
10524 // the value 2 selects the low half of source 2. We only use source 2 to
10525 // allow folding it into a memory operand.
10526 unsigned PERMMask = 3 | 2 << 4;
10527 SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
10528 V1, DAG.getConstant(PERMMask, MVT::i8));
10529 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
10532 // This now reduces to two single-input shuffles of V1 and V2 which at worst
10533 // will be handled by the above logic and a blend of the results, much like
10534 // other patterns in AVX.
10535 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10538 /// \brief Handle lowering 2-lane 128-bit shuffles.
10539 static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10540 SDValue V2, ArrayRef<int> Mask,
10541 const X86Subtarget *Subtarget,
10542 SelectionDAG &DAG) {
10543 // Blends are faster and handle all the non-lane-crossing cases.
10544 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask,
10548 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
10549 VT.getVectorNumElements() / 2);
10550 // Check for patterns which can be matched with a single insert of a 128-bit
10552 if (isShuffleEquivalent(V1, V2, Mask, 0, 1, 0, 1) ||
10553 isShuffleEquivalent(V1, V2, Mask, 0, 1, 4, 5)) {
10554 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10555 DAG.getIntPtrConstant(0));
10556 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
10557 Mask[2] < 4 ? V1 : V2, DAG.getIntPtrConstant(0));
10558 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10560 if (isShuffleEquivalent(V1, V2, Mask, 0, 1, 6, 7)) {
10561 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10562 DAG.getIntPtrConstant(0));
10563 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
10564 DAG.getIntPtrConstant(2));
10565 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10568 // Otherwise form a 128-bit permutation.
10569 // FIXME: Detect zero-vector inputs and use the VPERM2X128 to zero that half.
10570 unsigned PermMask = Mask[0] / 2 | (Mask[2] / 2) << 4;
10571 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
10572 DAG.getConstant(PermMask, MVT::i8));
10575 /// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
10576 /// shuffling each lane.
10578 /// This will only succeed when the result of fixing the 128-bit lanes results
10579 /// in a single-input non-lane-crossing shuffle with a repeating shuffle mask in
10580 /// each 128-bit lanes. This handles many cases where we can quickly blend away
10581 /// the lane crosses early and then use simpler shuffles within each lane.
10583 /// FIXME: It might be worthwhile at some point to support this without
10584 /// requiring the 128-bit lane-relative shuffles to be repeating, but currently
10585 /// in x86 only floating point has interesting non-repeating shuffles, and even
10586 /// those are still *marginally* more expensive.
10587 static SDValue lowerVectorShuffleByMerging128BitLanes(
10588 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
10589 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
10590 assert(!isSingleInputShuffleMask(Mask) &&
10591 "This is only useful with multiple inputs.");
10593 int Size = Mask.size();
10594 int LaneSize = 128 / VT.getScalarSizeInBits();
10595 int NumLanes = Size / LaneSize;
10596 assert(NumLanes > 1 && "Only handles 256-bit and wider shuffles.");
10598 // See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
10599 // check whether the in-128-bit lane shuffles share a repeating pattern.
10600 SmallVector<int, 4> Lanes;
10601 Lanes.resize(NumLanes, -1);
10602 SmallVector<int, 4> InLaneMask;
10603 InLaneMask.resize(LaneSize, -1);
10604 for (int i = 0; i < Size; ++i) {
10608 int j = i / LaneSize;
10610 if (Lanes[j] < 0) {
10611 // First entry we've seen for this lane.
10612 Lanes[j] = Mask[i] / LaneSize;
10613 } else if (Lanes[j] != Mask[i] / LaneSize) {
10614 // This doesn't match the lane selected previously!
10618 // Check that within each lane we have a consistent shuffle mask.
10619 int k = i % LaneSize;
10620 if (InLaneMask[k] < 0) {
10621 InLaneMask[k] = Mask[i] % LaneSize;
10622 } else if (InLaneMask[k] != Mask[i] % LaneSize) {
10623 // This doesn't fit a repeating in-lane mask.
10628 // First shuffle the lanes into place.
10629 MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
10630 VT.getSizeInBits() / 64);
10631 SmallVector<int, 8> LaneMask;
10632 LaneMask.resize(NumLanes * 2, -1);
10633 for (int i = 0; i < NumLanes; ++i)
10634 if (Lanes[i] >= 0) {
10635 LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
10636 LaneMask[2 * i + 1] = 2*Lanes[i] + 1;
10639 V1 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V1);
10640 V2 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V2);
10641 SDValue LaneShuffle = DAG.getVectorShuffle(LaneVT, DL, V1, V2, LaneMask);
10643 // Cast it back to the type we actually want.
10644 LaneShuffle = DAG.getNode(ISD::BITCAST, DL, VT, LaneShuffle);
10646 // Now do a simple shuffle that isn't lane crossing.
10647 SmallVector<int, 8> NewMask;
10648 NewMask.resize(Size, -1);
10649 for (int i = 0; i < Size; ++i)
10651 NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
10652 assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) &&
10653 "Must not introduce lane crosses at this point!");
10655 return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask);
10658 /// \brief Test whether the specified input (0 or 1) is in-place blended by the
10661 /// This returns true if the elements from a particular input are already in the
10662 /// slot required by the given mask and require no permutation.
10663 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
10664 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
10665 int Size = Mask.size();
10666 for (int i = 0; i < Size; ++i)
10667 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
10673 /// \brief Handle lowering of 4-lane 64-bit floating point shuffles.
10675 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
10676 /// isn't available.
10677 static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10678 const X86Subtarget *Subtarget,
10679 SelectionDAG &DAG) {
10681 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10682 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10683 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10684 ArrayRef<int> Mask = SVOp->getMask();
10685 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10687 SmallVector<int, 4> WidenedMask;
10688 if (canWidenShuffleElements(Mask, WidenedMask))
10689 return lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask, Subtarget,
10692 if (isSingleInputShuffleMask(Mask)) {
10693 // Check for being able to broadcast a single element.
10694 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f64, DL, V1,
10695 Mask, Subtarget, DAG))
10698 // Use low duplicate instructions for masks that match their pattern.
10699 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2))
10700 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
10702 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
10703 // Non-half-crossing single input shuffles can be lowerid with an
10704 // interleaved permutation.
10705 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
10706 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
10707 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
10708 DAG.getConstant(VPERMILPMask, MVT::i8));
10711 // With AVX2 we have direct support for this permutation.
10712 if (Subtarget->hasAVX2())
10713 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
10714 getV4X86ShuffleImm8ForMask(Mask, DAG));
10716 // Otherwise, fall back.
10717 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
10721 // X86 has dedicated unpack instructions that can handle specific blend
10722 // operations: UNPCKH and UNPCKL.
10723 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 2, 6))
10724 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V1, V2);
10725 if (isShuffleEquivalent(V1, V2, Mask, 1, 5, 3, 7))
10726 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V1, V2);
10728 // If we have a single input to the zero element, insert that into V1 if we
10729 // can do so cheaply.
10730 int NumV2Elements =
10731 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
10732 if (NumV2Elements == 1 && Mask[0] >= 4)
10733 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
10734 MVT::v4f64, DL, V1, V2, Mask, Subtarget, DAG))
10737 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
10741 // Check if the blend happens to exactly fit that of SHUFPD.
10742 if ((Mask[0] == -1 || Mask[0] < 2) &&
10743 (Mask[1] == -1 || (Mask[1] >= 4 && Mask[1] < 6)) &&
10744 (Mask[2] == -1 || (Mask[2] >= 2 && Mask[2] < 4)) &&
10745 (Mask[3] == -1 || Mask[3] >= 6)) {
10746 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 5) << 1) |
10747 ((Mask[2] == 3) << 2) | ((Mask[3] == 7) << 3);
10748 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V1, V2,
10749 DAG.getConstant(SHUFPDMask, MVT::i8));
10751 if ((Mask[0] == -1 || (Mask[0] >= 4 && Mask[0] < 6)) &&
10752 (Mask[1] == -1 || Mask[1] < 2) &&
10753 (Mask[2] == -1 || Mask[2] >= 6) &&
10754 (Mask[3] == -1 || (Mask[3] >= 2 && Mask[3] < 4))) {
10755 unsigned SHUFPDMask = (Mask[0] == 5) | ((Mask[1] == 1) << 1) |
10756 ((Mask[2] == 7) << 2) | ((Mask[3] == 3) << 3);
10757 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V2, V1,
10758 DAG.getConstant(SHUFPDMask, MVT::i8));
10761 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10762 // shuffle. However, if we have AVX2 and either inputs are already in place,
10763 // we will be able to shuffle even across lanes the other input in a single
10764 // instruction so skip this pattern.
10765 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10766 isShuffleMaskInputInPlace(1, Mask))))
10767 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10768 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
10771 // If we have AVX2 then we always want to lower with a blend because an v4 we
10772 // can fully permute the elements.
10773 if (Subtarget->hasAVX2())
10774 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
10777 // Otherwise fall back on generic lowering.
10778 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, DAG);
10781 /// \brief Handle lowering of 4-lane 64-bit integer shuffles.
10783 /// This routine is only called when we have AVX2 and thus a reasonable
10784 /// instruction set for v4i64 shuffling..
10785 static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10786 const X86Subtarget *Subtarget,
10787 SelectionDAG &DAG) {
10789 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10790 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10791 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10792 ArrayRef<int> Mask = SVOp->getMask();
10793 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10794 assert(Subtarget->hasAVX2() && "We can only lower v4i64 with AVX2!");
10796 SmallVector<int, 4> WidenedMask;
10797 if (canWidenShuffleElements(Mask, WidenedMask))
10798 return lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask, Subtarget,
10801 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
10805 // Check for being able to broadcast a single element.
10806 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i64, DL, V1,
10807 Mask, Subtarget, DAG))
10810 // When the shuffle is mirrored between the 128-bit lanes of the unit, we can
10811 // use lower latency instructions that will operate on both 128-bit lanes.
10812 SmallVector<int, 2> RepeatedMask;
10813 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
10814 if (isSingleInputShuffleMask(Mask)) {
10815 int PSHUFDMask[] = {-1, -1, -1, -1};
10816 for (int i = 0; i < 2; ++i)
10817 if (RepeatedMask[i] >= 0) {
10818 PSHUFDMask[2 * i] = 2 * RepeatedMask[i];
10819 PSHUFDMask[2 * i + 1] = 2 * RepeatedMask[i] + 1;
10821 return DAG.getNode(
10822 ISD::BITCAST, DL, MVT::v4i64,
10823 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
10824 DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, V1),
10825 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
10829 // AVX2 provides a direct instruction for permuting a single input across
10831 if (isSingleInputShuffleMask(Mask))
10832 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
10833 getV4X86ShuffleImm8ForMask(Mask, DAG));
10835 // Try to use byte shift instructions.
10836 if (SDValue Shift = lowerVectorShuffleAsByteShift(
10837 DL, MVT::v4i64, V1, V2, Mask, DAG))
10840 // Use dedicated unpack instructions for masks that match their pattern.
10841 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 2, 6))
10842 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V1, V2);
10843 if (isShuffleEquivalent(V1, V2, Mask, 1, 5, 3, 7))
10844 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V1, V2);
10846 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10847 // shuffle. However, if we have AVX2 and either inputs are already in place,
10848 // we will be able to shuffle even across lanes the other input in a single
10849 // instruction so skip this pattern.
10850 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10851 isShuffleMaskInputInPlace(1, Mask))))
10852 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10853 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
10856 // Otherwise fall back on generic blend lowering.
10857 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
10861 /// \brief Handle lowering of 8-lane 32-bit floating point shuffles.
10863 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
10864 /// isn't available.
10865 static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10866 const X86Subtarget *Subtarget,
10867 SelectionDAG &DAG) {
10869 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10870 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10871 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10872 ArrayRef<int> Mask = SVOp->getMask();
10873 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10875 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
10879 // Check for being able to broadcast a single element.
10880 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8f32, DL, V1,
10881 Mask, Subtarget, DAG))
10884 // If the shuffle mask is repeated in each 128-bit lane, we have many more
10885 // options to efficiently lower the shuffle.
10886 SmallVector<int, 4> RepeatedMask;
10887 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
10888 assert(RepeatedMask.size() == 4 &&
10889 "Repeated masks must be half the mask width!");
10891 // Use even/odd duplicate instructions for masks that match their pattern.
10892 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2, 4, 4, 6, 6))
10893 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
10894 if (isShuffleEquivalent(V1, V2, Mask, 1, 1, 3, 3, 5, 5, 7, 7))
10895 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
10897 if (isSingleInputShuffleMask(Mask))
10898 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
10899 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10901 // Use dedicated unpack instructions for masks that match their pattern.
10902 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10903 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V1, V2);
10904 if (isShuffleEquivalent(V1, V2, Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10905 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V1, V2);
10907 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
10908 // have already handled any direct blends. We also need to squash the
10909 // repeated mask into a simulated v4f32 mask.
10910 for (int i = 0; i < 4; ++i)
10911 if (RepeatedMask[i] >= 8)
10912 RepeatedMask[i] -= 4;
10913 return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
10916 // If we have a single input shuffle with different shuffle patterns in the
10917 // two 128-bit lanes use the variable mask to VPERMILPS.
10918 if (isSingleInputShuffleMask(Mask)) {
10919 SDValue VPermMask[8];
10920 for (int i = 0; i < 8; ++i)
10921 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10922 : DAG.getConstant(Mask[i], MVT::i32);
10923 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
10924 return DAG.getNode(
10925 X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
10926 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask));
10928 if (Subtarget->hasAVX2())
10929 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32,
10930 DAG.getNode(ISD::BITCAST, DL, MVT::v8f32,
10931 DAG.getNode(ISD::BUILD_VECTOR, DL,
10932 MVT::v8i32, VPermMask)),
10935 // Otherwise, fall back.
10936 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
10940 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10942 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10943 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
10946 // If we have AVX2 then we always want to lower with a blend because at v8 we
10947 // can fully permute the elements.
10948 if (Subtarget->hasAVX2())
10949 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
10952 // Otherwise fall back on generic lowering.
10953 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, DAG);
10956 /// \brief Handle lowering of 8-lane 32-bit integer shuffles.
10958 /// This routine is only called when we have AVX2 and thus a reasonable
10959 /// instruction set for v8i32 shuffling..
10960 static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10961 const X86Subtarget *Subtarget,
10962 SelectionDAG &DAG) {
10964 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10965 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10966 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10967 ArrayRef<int> Mask = SVOp->getMask();
10968 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10969 assert(Subtarget->hasAVX2() && "We can only lower v8i32 with AVX2!");
10971 // Whenever we can lower this as a zext, that instruction is strictly faster
10972 // than any alternative. It also allows us to fold memory operands into the
10973 // shuffle in many cases.
10974 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2,
10975 Mask, Subtarget, DAG))
10978 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
10982 // Check for being able to broadcast a single element.
10983 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i32, DL, V1,
10984 Mask, Subtarget, DAG))
10987 // If the shuffle mask is repeated in each 128-bit lane we can use more
10988 // efficient instructions that mirror the shuffles across the two 128-bit
10990 SmallVector<int, 4> RepeatedMask;
10991 if (is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask)) {
10992 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
10993 if (isSingleInputShuffleMask(Mask))
10994 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
10995 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10997 // Use dedicated unpack instructions for masks that match their pattern.
10998 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10999 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V1, V2);
11000 if (isShuffleEquivalent(V1, V2, Mask, 2, 10, 3, 11, 6, 14, 7, 15))
11001 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V1, V2);
11004 // Try to use bit shift instructions.
11005 if (SDValue Shift = lowerVectorShuffleAsBitShift(
11006 DL, MVT::v8i32, V1, V2, Mask, DAG))
11009 // Try to use byte shift instructions.
11010 if (SDValue Shift = lowerVectorShuffleAsByteShift(
11011 DL, MVT::v8i32, V1, V2, Mask, DAG))
11014 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
11015 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
11018 // If the shuffle patterns aren't repeated but it is a single input, directly
11019 // generate a cross-lane VPERMD instruction.
11020 if (isSingleInputShuffleMask(Mask)) {
11021 SDValue VPermMask[8];
11022 for (int i = 0; i < 8; ++i)
11023 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
11024 : DAG.getConstant(Mask[i], MVT::i32);
11025 return DAG.getNode(
11026 X86ISD::VPERMV, DL, MVT::v8i32,
11027 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
11030 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11032 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11033 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
11036 // Otherwise fall back on generic blend lowering.
11037 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
11041 /// \brief Handle lowering of 16-lane 16-bit integer shuffles.
11043 /// This routine is only called when we have AVX2 and thus a reasonable
11044 /// instruction set for v16i16 shuffling..
11045 static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11046 const X86Subtarget *Subtarget,
11047 SelectionDAG &DAG) {
11049 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
11050 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
11051 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11052 ArrayRef<int> Mask = SVOp->getMask();
11053 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11054 assert(Subtarget->hasAVX2() && "We can only lower v16i16 with AVX2!");
11056 // Whenever we can lower this as a zext, that instruction is strictly faster
11057 // than any alternative. It also allows us to fold memory operands into the
11058 // shuffle in many cases.
11059 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v16i16, V1, V2,
11060 Mask, Subtarget, DAG))
11063 // Check for being able to broadcast a single element.
11064 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i16, DL, V1,
11065 Mask, Subtarget, DAG))
11068 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
11072 // Use dedicated unpack instructions for masks that match their pattern.
11073 if (isShuffleEquivalent(V1, V2, Mask,
11074 // First 128-bit lane:
11075 0, 16, 1, 17, 2, 18, 3, 19,
11076 // Second 128-bit lane:
11077 8, 24, 9, 25, 10, 26, 11, 27))
11078 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i16, V1, V2);
11079 if (isShuffleEquivalent(V1, V2, Mask,
11080 // First 128-bit lane:
11081 4, 20, 5, 21, 6, 22, 7, 23,
11082 // Second 128-bit lane:
11083 12, 28, 13, 29, 14, 30, 15, 31))
11084 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i16, V1, V2);
11086 // Try to use bit shift instructions.
11087 if (SDValue Shift = lowerVectorShuffleAsBitShift(
11088 DL, MVT::v16i16, V1, V2, Mask, DAG))
11091 // Try to use byte shift instructions.
11092 if (SDValue Shift = lowerVectorShuffleAsByteShift(
11093 DL, MVT::v16i16, V1, V2, Mask, DAG))
11096 // Try to use byte rotation instructions.
11097 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
11098 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
11101 if (isSingleInputShuffleMask(Mask)) {
11102 // There are no generalized cross-lane shuffle operations available on i16
11104 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
11105 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
11108 SDValue PSHUFBMask[32];
11109 for (int i = 0; i < 16; ++i) {
11110 if (Mask[i] == -1) {
11111 PSHUFBMask[2 * i] = PSHUFBMask[2 * i + 1] = DAG.getUNDEF(MVT::i8);
11115 int M = i < 8 ? Mask[i] : Mask[i] - 8;
11116 assert(M >= 0 && M < 8 && "Invalid single-input mask!");
11117 PSHUFBMask[2 * i] = DAG.getConstant(2 * M, MVT::i8);
11118 PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, MVT::i8);
11120 return DAG.getNode(
11121 ISD::BITCAST, DL, MVT::v16i16,
11123 X86ISD::PSHUFB, DL, MVT::v32i8,
11124 DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1),
11125 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask)));
11128 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11130 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11131 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
11134 // Otherwise fall back on generic lowering.
11135 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG);
11138 /// \brief Handle lowering of 32-lane 8-bit integer shuffles.
11140 /// This routine is only called when we have AVX2 and thus a reasonable
11141 /// instruction set for v32i8 shuffling..
11142 static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11143 const X86Subtarget *Subtarget,
11144 SelectionDAG &DAG) {
11146 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
11147 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
11148 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11149 ArrayRef<int> Mask = SVOp->getMask();
11150 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
11151 assert(Subtarget->hasAVX2() && "We can only lower v32i8 with AVX2!");
11153 // Whenever we can lower this as a zext, that instruction is strictly faster
11154 // than any alternative. It also allows us to fold memory operands into the
11155 // shuffle in many cases.
11156 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2,
11157 Mask, Subtarget, DAG))
11160 // Check for being able to broadcast a single element.
11161 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v32i8, DL, V1,
11162 Mask, Subtarget, DAG))
11165 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
11169 // Use dedicated unpack instructions for masks that match their pattern.
11170 // Note that these are repeated 128-bit lane unpacks, not unpacks across all
11172 if (isShuffleEquivalent(
11174 // First 128-bit lane:
11175 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39,
11176 // Second 128-bit lane:
11177 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55))
11178 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v32i8, V1, V2);
11179 if (isShuffleEquivalent(
11181 // First 128-bit lane:
11182 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47,
11183 // Second 128-bit lane:
11184 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63))
11185 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v32i8, V1, V2);
11187 // Try to use bit shift instructions.
11188 if (SDValue Shift = lowerVectorShuffleAsBitShift(
11189 DL, MVT::v32i8, V1, V2, Mask, DAG))
11192 // Try to use byte shift instructions.
11193 if (SDValue Shift = lowerVectorShuffleAsByteShift(
11194 DL, MVT::v32i8, V1, V2, Mask, DAG))
11197 // Try to use byte rotation instructions.
11198 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
11199 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
11202 if (isSingleInputShuffleMask(Mask)) {
11203 // There are no generalized cross-lane shuffle operations available on i8
11205 if (is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
11206 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2,
11209 SDValue PSHUFBMask[32];
11210 for (int i = 0; i < 32; ++i)
11213 ? DAG.getUNDEF(MVT::i8)
11214 : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, MVT::i8);
11216 return DAG.getNode(
11217 X86ISD::PSHUFB, DL, MVT::v32i8, V1,
11218 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask));
11221 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11223 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11224 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
11227 // Otherwise fall back on generic lowering.
11228 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG);
11231 /// \brief High-level routine to lower various 256-bit x86 vector shuffles.
11233 /// This routine either breaks down the specific type of a 256-bit x86 vector
11234 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
11235 /// together based on the available instructions.
11236 static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11237 MVT VT, const X86Subtarget *Subtarget,
11238 SelectionDAG &DAG) {
11240 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11241 ArrayRef<int> Mask = SVOp->getMask();
11243 // There is a really nice hard cut-over between AVX1 and AVX2 that means we can
11244 // check for those subtargets here and avoid much of the subtarget querying in
11245 // the per-vector-type lowering routines. With AVX1 we have essentially *zero*
11246 // ability to manipulate a 256-bit vector with integer types. Since we'll use
11247 // floating point types there eventually, just immediately cast everything to
11248 // a float and operate entirely in that domain.
11249 if (VT.isInteger() && !Subtarget->hasAVX2()) {
11250 int ElementBits = VT.getScalarSizeInBits();
11251 if (ElementBits < 32)
11252 // No floating point type available, decompose into 128-bit vectors.
11253 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11255 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
11256 VT.getVectorNumElements());
11257 V1 = DAG.getNode(ISD::BITCAST, DL, FpVT, V1);
11258 V2 = DAG.getNode(ISD::BITCAST, DL, FpVT, V2);
11259 return DAG.getNode(ISD::BITCAST, DL, VT,
11260 DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
11263 switch (VT.SimpleTy) {
11265 return lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11267 return lowerV4I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11269 return lowerV8F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11271 return lowerV8I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11273 return lowerV16I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11275 return lowerV32I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11278 llvm_unreachable("Not a valid 256-bit x86 vector type!");
11282 /// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
11283 static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11284 const X86Subtarget *Subtarget,
11285 SelectionDAG &DAG) {
11287 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11288 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11289 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11290 ArrayRef<int> Mask = SVOp->getMask();
11291 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11293 // X86 has dedicated unpack instructions that can handle specific blend
11294 // operations: UNPCKH and UNPCKL.
11295 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 2, 10, 4, 12, 6, 14))
11296 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f64, V1, V2);
11297 if (isShuffleEquivalent(V1, V2, Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11298 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f64, V1, V2);
11300 // FIXME: Implement direct support for this type!
11301 return splitAndLowerVectorShuffle(DL, MVT::v8f64, V1, V2, Mask, DAG);
11304 /// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
11305 static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11306 const X86Subtarget *Subtarget,
11307 SelectionDAG &DAG) {
11309 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11310 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11311 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11312 ArrayRef<int> Mask = SVOp->getMask();
11313 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11315 // Use dedicated unpack instructions for masks that match their pattern.
11316 if (isShuffleEquivalent(V1, V2, Mask,
11317 0, 16, 1, 17, 4, 20, 5, 21,
11318 8, 24, 9, 25, 12, 28, 13, 29))
11319 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16f32, V1, V2);
11320 if (isShuffleEquivalent(V1, V2, Mask,
11321 2, 18, 3, 19, 6, 22, 7, 23,
11322 10, 26, 11, 27, 14, 30, 15, 31))
11323 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16f32, V1, V2);
11325 // FIXME: Implement direct support for this type!
11326 return splitAndLowerVectorShuffle(DL, MVT::v16f32, V1, V2, Mask, DAG);
11329 /// \brief Handle lowering of 8-lane 64-bit integer shuffles.
11330 static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11331 const X86Subtarget *Subtarget,
11332 SelectionDAG &DAG) {
11334 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11335 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11336 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11337 ArrayRef<int> Mask = SVOp->getMask();
11338 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11340 // X86 has dedicated unpack instructions that can handle specific blend
11341 // operations: UNPCKH and UNPCKL.
11342 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 2, 10, 4, 12, 6, 14))
11343 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i64, V1, V2);
11344 if (isShuffleEquivalent(V1, V2, Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11345 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i64, V1, V2);
11347 // FIXME: Implement direct support for this type!
11348 return splitAndLowerVectorShuffle(DL, MVT::v8i64, V1, V2, Mask, DAG);
11351 /// \brief Handle lowering of 16-lane 32-bit integer shuffles.
11352 static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11353 const X86Subtarget *Subtarget,
11354 SelectionDAG &DAG) {
11356 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11357 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11358 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11359 ArrayRef<int> Mask = SVOp->getMask();
11360 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11362 // Use dedicated unpack instructions for masks that match their pattern.
11363 if (isShuffleEquivalent(V1, V2, Mask,
11364 0, 16, 1, 17, 4, 20, 5, 21,
11365 8, 24, 9, 25, 12, 28, 13, 29))
11366 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i32, V1, V2);
11367 if (isShuffleEquivalent(V1, V2, Mask,
11368 2, 18, 3, 19, 6, 22, 7, 23,
11369 10, 26, 11, 27, 14, 30, 15, 31))
11370 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i32, V1, V2);
11372 // FIXME: Implement direct support for this type!
11373 return splitAndLowerVectorShuffle(DL, MVT::v16i32, V1, V2, Mask, DAG);
11376 /// \brief Handle lowering of 32-lane 16-bit integer shuffles.
11377 static SDValue lowerV32I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11378 const X86Subtarget *Subtarget,
11379 SelectionDAG &DAG) {
11381 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11382 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11383 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11384 ArrayRef<int> Mask = SVOp->getMask();
11385 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
11386 assert(Subtarget->hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
11388 // FIXME: Implement direct support for this type!
11389 return splitAndLowerVectorShuffle(DL, MVT::v32i16, V1, V2, Mask, DAG);
11392 /// \brief Handle lowering of 64-lane 8-bit integer shuffles.
11393 static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11394 const X86Subtarget *Subtarget,
11395 SelectionDAG &DAG) {
11397 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11398 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11399 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11400 ArrayRef<int> Mask = SVOp->getMask();
11401 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
11402 assert(Subtarget->hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
11404 // FIXME: Implement direct support for this type!
11405 return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
11408 /// \brief High-level routine to lower various 512-bit x86 vector shuffles.
11410 /// This routine either breaks down the specific type of a 512-bit x86 vector
11411 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
11412 /// together based on the available instructions.
11413 static SDValue lower512BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11414 MVT VT, const X86Subtarget *Subtarget,
11415 SelectionDAG &DAG) {
11417 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11418 ArrayRef<int> Mask = SVOp->getMask();
11419 assert(Subtarget->hasAVX512() &&
11420 "Cannot lower 512-bit vectors w/ basic ISA!");
11422 // Check for being able to broadcast a single element.
11423 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(VT.SimpleTy, DL, V1,
11424 Mask, Subtarget, DAG))
11427 // Dispatch to each element type for lowering. If we don't have supprot for
11428 // specific element type shuffles at 512 bits, immediately split them and
11429 // lower them. Each lowering routine of a given type is allowed to assume that
11430 // the requisite ISA extensions for that element type are available.
11431 switch (VT.SimpleTy) {
11433 return lowerV8F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11435 return lowerV16F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11437 return lowerV8I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11439 return lowerV16I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11441 if (Subtarget->hasBWI())
11442 return lowerV32I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11445 if (Subtarget->hasBWI())
11446 return lowerV64I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11450 llvm_unreachable("Not a valid 512-bit x86 vector type!");
11453 // Otherwise fall back on splitting.
11454 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11457 /// \brief Top-level lowering for x86 vector shuffles.
11459 /// This handles decomposition, canonicalization, and lowering of all x86
11460 /// vector shuffles. Most of the specific lowering strategies are encapsulated
11461 /// above in helper routines. The canonicalization attempts to widen shuffles
11462 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
11463 /// s.t. only one of the two inputs needs to be tested, etc.
11464 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
11465 SelectionDAG &DAG) {
11466 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11467 ArrayRef<int> Mask = SVOp->getMask();
11468 SDValue V1 = Op.getOperand(0);
11469 SDValue V2 = Op.getOperand(1);
11470 MVT VT = Op.getSimpleValueType();
11471 int NumElements = VT.getVectorNumElements();
11474 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
11476 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
11477 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11478 if (V1IsUndef && V2IsUndef)
11479 return DAG.getUNDEF(VT);
11481 // When we create a shuffle node we put the UNDEF node to second operand,
11482 // but in some cases the first operand may be transformed to UNDEF.
11483 // In this case we should just commute the node.
11485 return DAG.getCommutedVectorShuffle(*SVOp);
11487 // Check for non-undef masks pointing at an undef vector and make the masks
11488 // undef as well. This makes it easier to match the shuffle based solely on
11492 if (M >= NumElements) {
11493 SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
11494 for (int &M : NewMask)
11495 if (M >= NumElements)
11497 return DAG.getVectorShuffle(VT, dl, V1, V2, NewMask);
11500 // We actually see shuffles that are entirely re-arrangements of a set of
11501 // zero inputs. This mostly happens while decomposing complex shuffles into
11502 // simple ones. Directly lower these as a buildvector of zeros.
11503 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
11504 if (Zeroable.all())
11505 return getZeroVector(VT, Subtarget, DAG, dl);
11507 // Try to collapse shuffles into using a vector type with fewer elements but
11508 // wider element types. We cap this to not form integers or floating point
11509 // elements wider than 64 bits, but it might be interesting to form i128
11510 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
11511 SmallVector<int, 16> WidenedMask;
11512 if (VT.getScalarSizeInBits() < 64 &&
11513 canWidenShuffleElements(Mask, WidenedMask)) {
11514 MVT NewEltVT = VT.isFloatingPoint()
11515 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
11516 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
11517 MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
11518 // Make sure that the new vector type is legal. For example, v2f64 isn't
11520 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
11521 V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
11522 V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
11523 return DAG.getNode(ISD::BITCAST, dl, VT,
11524 DAG.getVectorShuffle(NewVT, dl, V1, V2, WidenedMask));
11528 int NumV1Elements = 0, NumUndefElements = 0, NumV2Elements = 0;
11529 for (int M : SVOp->getMask())
11531 ++NumUndefElements;
11532 else if (M < NumElements)
11537 // Commute the shuffle as needed such that more elements come from V1 than
11538 // V2. This allows us to match the shuffle pattern strictly on how many
11539 // elements come from V1 without handling the symmetric cases.
11540 if (NumV2Elements > NumV1Elements)
11541 return DAG.getCommutedVectorShuffle(*SVOp);
11543 // When the number of V1 and V2 elements are the same, try to minimize the
11544 // number of uses of V2 in the low half of the vector. When that is tied,
11545 // ensure that the sum of indices for V1 is equal to or lower than the sum
11546 // indices for V2. When those are equal, try to ensure that the number of odd
11547 // indices for V1 is lower than the number of odd indices for V2.
11548 if (NumV1Elements == NumV2Elements) {
11549 int LowV1Elements = 0, LowV2Elements = 0;
11550 for (int M : SVOp->getMask().slice(0, NumElements / 2))
11551 if (M >= NumElements)
11555 if (LowV2Elements > LowV1Elements) {
11556 return DAG.getCommutedVectorShuffle(*SVOp);
11557 } else if (LowV2Elements == LowV1Elements) {
11558 int SumV1Indices = 0, SumV2Indices = 0;
11559 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11560 if (SVOp->getMask()[i] >= NumElements)
11562 else if (SVOp->getMask()[i] >= 0)
11564 if (SumV2Indices < SumV1Indices) {
11565 return DAG.getCommutedVectorShuffle(*SVOp);
11566 } else if (SumV2Indices == SumV1Indices) {
11567 int NumV1OddIndices = 0, NumV2OddIndices = 0;
11568 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11569 if (SVOp->getMask()[i] >= NumElements)
11570 NumV2OddIndices += i % 2;
11571 else if (SVOp->getMask()[i] >= 0)
11572 NumV1OddIndices += i % 2;
11573 if (NumV2OddIndices < NumV1OddIndices)
11574 return DAG.getCommutedVectorShuffle(*SVOp);
11579 // For each vector width, delegate to a specialized lowering routine.
11580 if (VT.getSizeInBits() == 128)
11581 return lower128BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11583 if (VT.getSizeInBits() == 256)
11584 return lower256BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11586 // Force AVX-512 vectors to be scalarized for now.
11587 // FIXME: Implement AVX-512 support!
11588 if (VT.getSizeInBits() == 512)
11589 return lower512BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11591 llvm_unreachable("Unimplemented!");
11595 //===----------------------------------------------------------------------===//
11596 // Legacy vector shuffle lowering
11598 // This code is the legacy code handling vector shuffles until the above
11599 // replaces its functionality and performance.
11600 //===----------------------------------------------------------------------===//
11602 static bool isBlendMask(ArrayRef<int> MaskVals, MVT VT, bool hasSSE41,
11603 bool hasInt256, unsigned *MaskOut = nullptr) {
11604 MVT EltVT = VT.getVectorElementType();
11606 // There is no blend with immediate in AVX-512.
11607 if (VT.is512BitVector())
11610 if (!hasSSE41 || EltVT == MVT::i8)
11612 if (!hasInt256 && VT == MVT::v16i16)
11615 unsigned MaskValue = 0;
11616 unsigned NumElems = VT.getVectorNumElements();
11617 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
11618 unsigned NumLanes = (NumElems - 1) / 8 + 1;
11619 unsigned NumElemsInLane = NumElems / NumLanes;
11621 // Blend for v16i16 should be symmetric for both lanes.
11622 for (unsigned i = 0; i < NumElemsInLane; ++i) {
11624 int SndLaneEltIdx = (NumLanes == 2) ? MaskVals[i + NumElemsInLane] : -1;
11625 int EltIdx = MaskVals[i];
11627 if ((EltIdx < 0 || EltIdx == (int)i) &&
11628 (SndLaneEltIdx < 0 || SndLaneEltIdx == (int)(i + NumElemsInLane)))
11631 if (((unsigned)EltIdx == (i + NumElems)) &&
11632 (SndLaneEltIdx < 0 ||
11633 (unsigned)SndLaneEltIdx == i + NumElems + NumElemsInLane))
11634 MaskValue |= (1 << i);
11640 *MaskOut = MaskValue;
11644 // Try to lower a shuffle node into a simple blend instruction.
11645 // This function assumes isBlendMask returns true for this
11646 // SuffleVectorSDNode
11647 static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
11648 unsigned MaskValue,
11649 const X86Subtarget *Subtarget,
11650 SelectionDAG &DAG) {
11651 MVT VT = SVOp->getSimpleValueType(0);
11652 MVT EltVT = VT.getVectorElementType();
11653 assert(isBlendMask(SVOp->getMask(), VT, Subtarget->hasSSE41(),
11654 Subtarget->hasInt256() && "Trying to lower a "
11655 "VECTOR_SHUFFLE to a Blend but "
11656 "with the wrong mask"));
11657 SDValue V1 = SVOp->getOperand(0);
11658 SDValue V2 = SVOp->getOperand(1);
11660 unsigned NumElems = VT.getVectorNumElements();
11662 // Convert i32 vectors to floating point if it is not AVX2.
11663 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
11665 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
11666 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
11668 V1 = DAG.getNode(ISD::BITCAST, dl, VT, V1);
11669 V2 = DAG.getNode(ISD::BITCAST, dl, VT, V2);
11672 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, V1, V2,
11673 DAG.getConstant(MaskValue, MVT::i32));
11674 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
11677 /// In vector type \p VT, return true if the element at index \p InputIdx
11678 /// falls on a different 128-bit lane than \p OutputIdx.
11679 static bool ShuffleCrosses128bitLane(MVT VT, unsigned InputIdx,
11680 unsigned OutputIdx) {
11681 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
11682 return InputIdx * EltSize / 128 != OutputIdx * EltSize / 128;
11685 /// Generate a PSHUFB if possible. Selects elements from \p V1 according to
11686 /// \p MaskVals. MaskVals[OutputIdx] = InputIdx specifies that we want to
11687 /// shuffle the element at InputIdx in V1 to OutputIdx in the result. If \p
11688 /// MaskVals refers to elements outside of \p V1 or is undef (-1), insert a
11690 static SDValue getPSHUFB(ArrayRef<int> MaskVals, SDValue V1, SDLoc &dl,
11691 SelectionDAG &DAG) {
11692 MVT VT = V1.getSimpleValueType();
11693 assert(VT.is128BitVector() || VT.is256BitVector());
11695 MVT EltVT = VT.getVectorElementType();
11696 unsigned EltSizeInBytes = EltVT.getSizeInBits() / 8;
11697 unsigned NumElts = VT.getVectorNumElements();
11699 SmallVector<SDValue, 32> PshufbMask;
11700 for (unsigned OutputIdx = 0; OutputIdx < NumElts; ++OutputIdx) {
11701 int InputIdx = MaskVals[OutputIdx];
11702 unsigned InputByteIdx;
11704 if (InputIdx < 0 || NumElts <= (unsigned)InputIdx)
11705 InputByteIdx = 0x80;
11707 // Cross lane is not allowed.
11708 if (ShuffleCrosses128bitLane(VT, InputIdx, OutputIdx))
11710 InputByteIdx = InputIdx * EltSizeInBytes;
11711 // Index is an byte offset within the 128-bit lane.
11712 InputByteIdx &= 0xf;
11715 for (unsigned j = 0; j < EltSizeInBytes; ++j) {
11716 PshufbMask.push_back(DAG.getConstant(InputByteIdx, MVT::i8));
11717 if (InputByteIdx != 0x80)
11722 MVT ShufVT = MVT::getVectorVT(MVT::i8, PshufbMask.size());
11724 V1 = DAG.getNode(ISD::BITCAST, dl, ShufVT, V1);
11725 return DAG.getNode(X86ISD::PSHUFB, dl, ShufVT, V1,
11726 DAG.getNode(ISD::BUILD_VECTOR, dl, ShufVT, PshufbMask));
11729 // v8i16 shuffles - Prefer shuffles in the following order:
11730 // 1. [all] pshuflw, pshufhw, optional move
11731 // 2. [ssse3] 1 x pshufb
11732 // 3. [ssse3] 2 x pshufb + 1 x por
11733 // 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw)
11735 LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
11736 SelectionDAG &DAG) {
11737 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11738 SDValue V1 = SVOp->getOperand(0);
11739 SDValue V2 = SVOp->getOperand(1);
11741 SmallVector<int, 8> MaskVals;
11743 // Determine if more than 1 of the words in each of the low and high quadwords
11744 // of the result come from the same quadword of one of the two inputs. Undef
11745 // mask values count as coming from any quadword, for better codegen.
11747 // Lo/HiQuad[i] = j indicates how many words from the ith quad of the input
11748 // feeds this quad. For i, 0 and 1 refer to V1, 2 and 3 refer to V2.
11749 unsigned LoQuad[] = { 0, 0, 0, 0 };
11750 unsigned HiQuad[] = { 0, 0, 0, 0 };
11751 // Indices of quads used.
11752 std::bitset<4> InputQuads;
11753 for (unsigned i = 0; i < 8; ++i) {
11754 unsigned *Quad = i < 4 ? LoQuad : HiQuad;
11755 int EltIdx = SVOp->getMaskElt(i);
11756 MaskVals.push_back(EltIdx);
11764 ++Quad[EltIdx / 4];
11765 InputQuads.set(EltIdx / 4);
11768 int BestLoQuad = -1;
11769 unsigned MaxQuad = 1;
11770 for (unsigned i = 0; i < 4; ++i) {
11771 if (LoQuad[i] > MaxQuad) {
11773 MaxQuad = LoQuad[i];
11777 int BestHiQuad = -1;
11779 for (unsigned i = 0; i < 4; ++i) {
11780 if (HiQuad[i] > MaxQuad) {
11782 MaxQuad = HiQuad[i];
11786 // For SSSE3, If all 8 words of the result come from only 1 quadword of each
11787 // of the two input vectors, shuffle them into one input vector so only a
11788 // single pshufb instruction is necessary. If there are more than 2 input
11789 // quads, disable the next transformation since it does not help SSSE3.
11790 bool V1Used = InputQuads[0] || InputQuads[1];
11791 bool V2Used = InputQuads[2] || InputQuads[3];
11792 if (Subtarget->hasSSSE3()) {
11793 if (InputQuads.count() == 2 && V1Used && V2Used) {
11794 BestLoQuad = InputQuads[0] ? 0 : 1;
11795 BestHiQuad = InputQuads[2] ? 2 : 3;
11797 if (InputQuads.count() > 2) {
11803 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update
11804 // the shuffle mask. If a quad is scored as -1, that means that it contains
11805 // words from all 4 input quadwords.
11807 if (BestLoQuad >= 0 || BestHiQuad >= 0) {
11809 BestLoQuad < 0 ? 0 : BestLoQuad,
11810 BestHiQuad < 0 ? 1 : BestHiQuad
11812 NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
11813 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1),
11814 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]);
11815 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV);
11817 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
11818 // source words for the shuffle, to aid later transformations.
11819 bool AllWordsInNewV = true;
11820 bool InOrder[2] = { true, true };
11821 for (unsigned i = 0; i != 8; ++i) {
11822 int idx = MaskVals[i];
11824 InOrder[i/4] = false;
11825 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad)
11827 AllWordsInNewV = false;
11831 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV;
11832 if (AllWordsInNewV) {
11833 for (int i = 0; i != 8; ++i) {
11834 int idx = MaskVals[i];
11837 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4;
11838 if ((idx != i) && idx < 4)
11840 if ((idx != i) && idx > 3)
11849 // If we've eliminated the use of V2, and the new mask is a pshuflw or
11850 // pshufhw, that's as cheap as it gets. Return the new shuffle.
11851 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) {
11852 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW;
11853 unsigned TargetMask = 0;
11854 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV,
11855 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]);
11856 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11857 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp):
11858 getShufflePSHUFLWImmediate(SVOp);
11859 V1 = NewV.getOperand(0);
11860 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG);
11864 // Promote splats to a larger type which usually leads to more efficient code.
11865 // FIXME: Is this true if pshufb is available?
11866 if (SVOp->isSplat())
11867 return PromoteSplat(SVOp, DAG);
11869 // If we have SSSE3, and all words of the result are from 1 input vector,
11870 // case 2 is generated, otherwise case 3 is generated. If no SSSE3
11871 // is present, fall back to case 4.
11872 if (Subtarget->hasSSSE3()) {
11873 SmallVector<SDValue,16> pshufbMask;
11875 // If we have elements from both input vectors, set the high bit of the
11876 // shuffle mask element to zero out elements that come from V2 in the V1
11877 // mask, and elements that come from V1 in the V2 mask, so that the two
11878 // results can be OR'd together.
11879 bool TwoInputs = V1Used && V2Used;
11880 V1 = getPSHUFB(MaskVals, V1, dl, DAG);
11882 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11884 // Calculate the shuffle mask for the second input, shuffle it, and
11885 // OR it with the first shuffled input.
11886 CommuteVectorShuffleMask(MaskVals, 8);
11887 V2 = getPSHUFB(MaskVals, V2, dl, DAG);
11888 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11889 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11892 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
11893 // and update MaskVals with new element order.
11894 std::bitset<8> InOrder;
11895 if (BestLoQuad >= 0) {
11896 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 };
11897 for (int i = 0; i != 4; ++i) {
11898 int idx = MaskVals[i];
11901 } else if ((idx / 4) == BestLoQuad) {
11902 MaskV[i] = idx & 3;
11906 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11909 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11910 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11911 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16,
11912 NewV.getOperand(0),
11913 getShufflePSHUFLWImmediate(SVOp), DAG);
11917 // If BestHi >= 0, generate a pshufhw to put the high elements in order,
11918 // and update MaskVals with the new element order.
11919 if (BestHiQuad >= 0) {
11920 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 };
11921 for (unsigned i = 4; i != 8; ++i) {
11922 int idx = MaskVals[i];
11925 } else if ((idx / 4) == BestHiQuad) {
11926 MaskV[i] = (idx & 3) + 4;
11930 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11933 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11934 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11935 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16,
11936 NewV.getOperand(0),
11937 getShufflePSHUFHWImmediate(SVOp), DAG);
11941 // In case BestHi & BestLo were both -1, which means each quadword has a word
11942 // from each of the four input quadwords, calculate the InOrder bitvector now
11943 // before falling through to the insert/extract cleanup.
11944 if (BestLoQuad == -1 && BestHiQuad == -1) {
11946 for (int i = 0; i != 8; ++i)
11947 if (MaskVals[i] < 0 || MaskVals[i] == i)
11951 // The other elements are put in the right place using pextrw and pinsrw.
11952 for (unsigned i = 0; i != 8; ++i) {
11955 int EltIdx = MaskVals[i];
11958 SDValue ExtOp = (EltIdx < 8) ?
11959 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1,
11960 DAG.getIntPtrConstant(EltIdx)) :
11961 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2,
11962 DAG.getIntPtrConstant(EltIdx - 8));
11963 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp,
11964 DAG.getIntPtrConstant(i));
11969 /// \brief v16i16 shuffles
11971 /// FIXME: We only support generation of a single pshufb currently. We can
11972 /// generalize the other applicable cases from LowerVECTOR_SHUFFLEv8i16 as
11973 /// well (e.g 2 x pshufb + 1 x por).
11975 LowerVECTOR_SHUFFLEv16i16(SDValue Op, SelectionDAG &DAG) {
11976 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11977 SDValue V1 = SVOp->getOperand(0);
11978 SDValue V2 = SVOp->getOperand(1);
11981 if (V2.getOpcode() != ISD::UNDEF)
11984 SmallVector<int, 16> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11985 return getPSHUFB(MaskVals, V1, dl, DAG);
11988 // v16i8 shuffles - Prefer shuffles in the following order:
11989 // 1. [ssse3] 1 x pshufb
11990 // 2. [ssse3] 2 x pshufb + 1 x por
11991 // 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw
11992 static SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
11993 const X86Subtarget* Subtarget,
11994 SelectionDAG &DAG) {
11995 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11996 SDValue V1 = SVOp->getOperand(0);
11997 SDValue V2 = SVOp->getOperand(1);
11999 ArrayRef<int> MaskVals = SVOp->getMask();
12001 // Promote splats to a larger type which usually leads to more efficient code.
12002 // FIXME: Is this true if pshufb is available?
12003 if (SVOp->isSplat())
12004 return PromoteSplat(SVOp, DAG);
12006 // If we have SSSE3, case 1 is generated when all result bytes come from
12007 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is
12008 // present, fall back to case 3.
12010 // If SSSE3, use 1 pshufb instruction per vector with elements in the result.
12011 if (Subtarget->hasSSSE3()) {
12012 SmallVector<SDValue,16> pshufbMask;
12014 // If all result elements are from one input vector, then only translate
12015 // undef mask values to 0x80 (zero out result) in the pshufb mask.
12017 // Otherwise, we have elements from both input vectors, and must zero out
12018 // elements that come from V2 in the first mask, and V1 in the second mask
12019 // so that we can OR them together.
12020 for (unsigned i = 0; i != 16; ++i) {
12021 int EltIdx = MaskVals[i];
12022 if (EltIdx < 0 || EltIdx >= 16)
12024 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
12026 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
12027 DAG.getNode(ISD::BUILD_VECTOR, dl,
12028 MVT::v16i8, pshufbMask));
12030 // As PSHUFB will zero elements with negative indices, it's safe to ignore
12031 // the 2nd operand if it's undefined or zero.
12032 if (V2.getOpcode() == ISD::UNDEF ||
12033 ISD::isBuildVectorAllZeros(V2.getNode()))
12036 // Calculate the shuffle mask for the second input, shuffle it, and
12037 // OR it with the first shuffled input.
12038 pshufbMask.clear();
12039 for (unsigned i = 0; i != 16; ++i) {
12040 int EltIdx = MaskVals[i];
12041 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16;
12042 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
12044 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
12045 DAG.getNode(ISD::BUILD_VECTOR, dl,
12046 MVT::v16i8, pshufbMask));
12047 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
12050 // No SSSE3 - Calculate in place words and then fix all out of place words
12051 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from
12052 // the 16 different words that comprise the two doublequadword input vectors.
12053 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
12054 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
12056 for (int i = 0; i != 8; ++i) {
12057 int Elt0 = MaskVals[i*2];
12058 int Elt1 = MaskVals[i*2+1];
12060 // This word of the result is all undef, skip it.
12061 if (Elt0 < 0 && Elt1 < 0)
12064 // This word of the result is already in the correct place, skip it.
12065 if ((Elt0 == i*2) && (Elt1 == i*2+1))
12068 SDValue Elt0Src = Elt0 < 16 ? V1 : V2;
12069 SDValue Elt1Src = Elt1 < 16 ? V1 : V2;
12072 // If Elt0 and Elt1 are defined, are consecutive, and can be load
12073 // using a single extract together, load it and store it.
12074 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) {
12075 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
12076 DAG.getIntPtrConstant(Elt1 / 2));
12077 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
12078 DAG.getIntPtrConstant(i));
12082 // If Elt1 is defined, extract it from the appropriate source. If the
12083 // source byte is not also odd, shift the extracted word left 8 bits
12084 // otherwise clear the bottom 8 bits if we need to do an or.
12086 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
12087 DAG.getIntPtrConstant(Elt1 / 2));
12088 if ((Elt1 & 1) == 0)
12089 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
12091 TLI.getShiftAmountTy(InsElt.getValueType())));
12092 else if (Elt0 >= 0)
12093 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
12094 DAG.getConstant(0xFF00, MVT::i16));
12096 // If Elt0 is defined, extract it from the appropriate source. If the
12097 // source byte is not also even, shift the extracted word right 8 bits. If
12098 // Elt1 was also defined, OR the extracted values together before
12099 // inserting them in the result.
12101 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
12102 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2));
12103 if ((Elt0 & 1) != 0)
12104 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
12106 TLI.getShiftAmountTy(InsElt0.getValueType())));
12107 else if (Elt1 >= 0)
12108 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
12109 DAG.getConstant(0x00FF, MVT::i16));
12110 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0)
12113 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
12114 DAG.getIntPtrConstant(i));
12116 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV);
12119 // v32i8 shuffles - Translate to VPSHUFB if possible.
12121 SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp,
12122 const X86Subtarget *Subtarget,
12123 SelectionDAG &DAG) {
12124 MVT VT = SVOp->getSimpleValueType(0);
12125 SDValue V1 = SVOp->getOperand(0);
12126 SDValue V2 = SVOp->getOperand(1);
12128 SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
12130 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
12131 bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode());
12132 bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode());
12134 // VPSHUFB may be generated if
12135 // (1) one of input vector is undefined or zeroinitializer.
12136 // The mask value 0x80 puts 0 in the corresponding slot of the vector.
12137 // And (2) the mask indexes don't cross the 128-bit lane.
12138 if (VT != MVT::v32i8 || !Subtarget->hasInt256() ||
12139 (!V2IsUndef && !V2IsAllZero && !V1IsAllZero))
12142 if (V1IsAllZero && !V2IsAllZero) {
12143 CommuteVectorShuffleMask(MaskVals, 32);
12146 return getPSHUFB(MaskVals, V1, dl, DAG);
12149 /// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
12150 /// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be
12151 /// done when every pair / quad of shuffle mask elements point to elements in
12152 /// the right sequence. e.g.
12153 /// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15>
12155 SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
12156 SelectionDAG &DAG) {
12157 MVT VT = SVOp->getSimpleValueType(0);
12159 unsigned NumElems = VT.getVectorNumElements();
12162 switch (VT.SimpleTy) {
12163 default: llvm_unreachable("Unexpected!");
12166 return SDValue(SVOp, 0);
12167 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break;
12168 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break;
12169 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break;
12170 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break;
12171 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break;
12172 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break;
12175 SmallVector<int, 8> MaskVec;
12176 for (unsigned i = 0; i != NumElems; i += Scale) {
12178 for (unsigned j = 0; j != Scale; ++j) {
12179 int EltIdx = SVOp->getMaskElt(i+j);
12183 StartIdx = (EltIdx / Scale);
12184 if (EltIdx != (int)(StartIdx*Scale + j))
12187 MaskVec.push_back(StartIdx);
12190 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0));
12191 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1));
12192 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
12195 /// getVZextMovL - Return a zero-extending vector move low node.
12197 static SDValue getVZextMovL(MVT VT, MVT OpVT,
12198 SDValue SrcOp, SelectionDAG &DAG,
12199 const X86Subtarget *Subtarget, SDLoc dl) {
12200 if (VT == MVT::v2f64 || VT == MVT::v4f32) {
12201 LoadSDNode *LD = nullptr;
12202 if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
12203 LD = dyn_cast<LoadSDNode>(SrcOp);
12205 // movssrr and movsdrr do not clear top bits. Try to use movd, movq
12207 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
12208 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) &&
12209 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
12210 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST &&
12211 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {
12213 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
12214 return DAG.getNode(ISD::BITCAST, dl, VT,
12215 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
12216 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
12218 SrcOp.getOperand(0)
12224 return DAG.getNode(ISD::BITCAST, dl, VT,
12225 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
12226 DAG.getNode(ISD::BITCAST, dl,
12230 /// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles
12231 /// which could not be matched by any known target speficic shuffle
12233 LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
12235 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG);
12236 if (NewOp.getNode())
12239 MVT VT = SVOp->getSimpleValueType(0);
12241 unsigned NumElems = VT.getVectorNumElements();
12242 unsigned NumLaneElems = NumElems / 2;
12245 MVT EltVT = VT.getVectorElementType();
12246 MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems);
12249 SmallVector<int, 16> Mask;
12250 for (unsigned l = 0; l < 2; ++l) {
12251 // Build a shuffle mask for the output, discovering on the fly which
12252 // input vectors to use as shuffle operands (recorded in InputUsed).
12253 // If building a suitable shuffle vector proves too hard, then bail
12254 // out with UseBuildVector set.
12255 bool UseBuildVector = false;
12256 int InputUsed[2] = { -1, -1 }; // Not yet discovered.
12257 unsigned LaneStart = l * NumLaneElems;
12258 for (unsigned i = 0; i != NumLaneElems; ++i) {
12259 // The mask element. This indexes into the input.
12260 int Idx = SVOp->getMaskElt(i+LaneStart);
12262 // the mask element does not index into any input vector.
12263 Mask.push_back(-1);
12267 // The input vector this mask element indexes into.
12268 int Input = Idx / NumLaneElems;
12270 // Turn the index into an offset from the start of the input vector.
12271 Idx -= Input * NumLaneElems;
12273 // Find or create a shuffle vector operand to hold this input.
12275 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) {
12276 if (InputUsed[OpNo] == Input)
12277 // This input vector is already an operand.
12279 if (InputUsed[OpNo] < 0) {
12280 // Create a new operand for this input vector.
12281 InputUsed[OpNo] = Input;
12286 if (OpNo >= array_lengthof(InputUsed)) {
12287 // More than two input vectors used! Give up on trying to create a
12288 // shuffle vector. Insert all elements into a BUILD_VECTOR instead.
12289 UseBuildVector = true;
12293 // Add the mask index for the new shuffle vector.
12294 Mask.push_back(Idx + OpNo * NumLaneElems);
12297 if (UseBuildVector) {
12298 SmallVector<SDValue, 16> SVOps;
12299 for (unsigned i = 0; i != NumLaneElems; ++i) {
12300 // The mask element. This indexes into the input.
12301 int Idx = SVOp->getMaskElt(i+LaneStart);
12303 SVOps.push_back(DAG.getUNDEF(EltVT));
12307 // The input vector this mask element indexes into.
12308 int Input = Idx / NumElems;
12310 // Turn the index into an offset from the start of the input vector.
12311 Idx -= Input * NumElems;
12313 // Extract the vector element by hand.
12314 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
12315 SVOp->getOperand(Input),
12316 DAG.getIntPtrConstant(Idx)));
12319 // Construct the output using a BUILD_VECTOR.
12320 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, SVOps);
12321 } else if (InputUsed[0] < 0) {
12322 // No input vectors were used! The result is undefined.
12323 Output[l] = DAG.getUNDEF(NVT);
12325 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2),
12326 (InputUsed[0] % 2) * NumLaneElems,
12328 // If only one input was used, use an undefined vector for the other.
12329 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) :
12330 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2),
12331 (InputUsed[1] % 2) * NumLaneElems, DAG, dl);
12332 // At least one input vector was used. Create a new shuffle vector.
12333 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]);
12339 // Concatenate the result back
12340 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]);
12343 /// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with
12344 /// 4 elements, and match them with several different shuffle types.
12346 LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
12347 SDValue V1 = SVOp->getOperand(0);
12348 SDValue V2 = SVOp->getOperand(1);
12350 MVT VT = SVOp->getSimpleValueType(0);
12352 assert(VT.is128BitVector() && "Unsupported vector size");
12354 std::pair<int, int> Locs[4];
12355 int Mask1[] = { -1, -1, -1, -1 };
12356 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end());
12358 unsigned NumHi = 0;
12359 unsigned NumLo = 0;
12360 for (unsigned i = 0; i != 4; ++i) {
12361 int Idx = PermMask[i];
12363 Locs[i] = std::make_pair(-1, -1);
12365 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!");
12367 Locs[i] = std::make_pair(0, NumLo);
12368 Mask1[NumLo] = Idx;
12371 Locs[i] = std::make_pair(1, NumHi);
12373 Mask1[2+NumHi] = Idx;
12379 if (NumLo <= 2 && NumHi <= 2) {
12380 // If no more than two elements come from either vector. This can be
12381 // implemented with two shuffles. First shuffle gather the elements.
12382 // The second shuffle, which takes the first shuffle as both of its
12383 // vector operands, put the elements into the right order.
12384 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12386 int Mask2[] = { -1, -1, -1, -1 };
12388 for (unsigned i = 0; i != 4; ++i)
12389 if (Locs[i].first != -1) {
12390 unsigned Idx = (i < 2) ? 0 : 4;
12391 Idx += Locs[i].first * 2 + Locs[i].second;
12395 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]);
12398 if (NumLo == 3 || NumHi == 3) {
12399 // Otherwise, we must have three elements from one vector, call it X, and
12400 // one element from the other, call it Y. First, use a shufps to build an
12401 // intermediate vector with the one element from Y and the element from X
12402 // that will be in the same half in the final destination (the indexes don't
12403 // matter). Then, use a shufps to build the final vector, taking the half
12404 // containing the element from Y from the intermediate, and the other half
12407 // Normalize it so the 3 elements come from V1.
12408 CommuteVectorShuffleMask(PermMask, 4);
12412 // Find the element from V2.
12414 for (HiIndex = 0; HiIndex < 3; ++HiIndex) {
12415 int Val = PermMask[HiIndex];
12422 Mask1[0] = PermMask[HiIndex];
12424 Mask1[2] = PermMask[HiIndex^1];
12426 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12428 if (HiIndex >= 2) {
12429 Mask1[0] = PermMask[0];
12430 Mask1[1] = PermMask[1];
12431 Mask1[2] = HiIndex & 1 ? 6 : 4;
12432 Mask1[3] = HiIndex & 1 ? 4 : 6;
12433 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12436 Mask1[0] = HiIndex & 1 ? 2 : 0;
12437 Mask1[1] = HiIndex & 1 ? 0 : 2;
12438 Mask1[2] = PermMask[2];
12439 Mask1[3] = PermMask[3];
12444 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]);
12447 // Break it into (shuffle shuffle_hi, shuffle_lo).
12448 int LoMask[] = { -1, -1, -1, -1 };
12449 int HiMask[] = { -1, -1, -1, -1 };
12451 int *MaskPtr = LoMask;
12452 unsigned MaskIdx = 0;
12453 unsigned LoIdx = 0;
12454 unsigned HiIdx = 2;
12455 for (unsigned i = 0; i != 4; ++i) {
12462 int Idx = PermMask[i];
12464 Locs[i] = std::make_pair(-1, -1);
12465 } else if (Idx < 4) {
12466 Locs[i] = std::make_pair(MaskIdx, LoIdx);
12467 MaskPtr[LoIdx] = Idx;
12470 Locs[i] = std::make_pair(MaskIdx, HiIdx);
12471 MaskPtr[HiIdx] = Idx;
12476 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]);
12477 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]);
12478 int MaskOps[] = { -1, -1, -1, -1 };
12479 for (unsigned i = 0; i != 4; ++i)
12480 if (Locs[i].first != -1)
12481 MaskOps[i] = Locs[i].first * 4 + Locs[i].second;
12482 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]);
12485 static bool MayFoldVectorLoad(SDValue V) {
12486 while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
12487 V = V.getOperand(0);
12489 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
12490 V = V.getOperand(0);
12491 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR &&
12492 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF)
12493 // BUILD_VECTOR (load), undef
12494 V = V.getOperand(0);
12496 return MayFoldLoad(V);
12500 SDValue getMOVDDup(SDValue &Op, SDLoc &dl, SDValue V1, SelectionDAG &DAG) {
12501 MVT VT = Op.getSimpleValueType();
12503 // Canonicalize to v2f64.
12504 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
12505 return DAG.getNode(ISD::BITCAST, dl, VT,
12506 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64,
12511 SDValue getMOVLowToHigh(SDValue &Op, SDLoc &dl, SelectionDAG &DAG,
12513 SDValue V1 = Op.getOperand(0);
12514 SDValue V2 = Op.getOperand(1);
12515 MVT VT = Op.getSimpleValueType();
12517 assert(VT != MVT::v2i64 && "unsupported shuffle type");
12519 if (HasSSE2 && VT == MVT::v2f64)
12520 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG);
12522 // v4f32 or v4i32: canonicalize to v4f32 (which is legal for SSE1)
12523 return DAG.getNode(ISD::BITCAST, dl, VT,
12524 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32,
12525 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1),
12526 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG));
12530 SDValue getMOVHighToLow(SDValue &Op, SDLoc &dl, SelectionDAG &DAG) {
12531 SDValue V1 = Op.getOperand(0);
12532 SDValue V2 = Op.getOperand(1);
12533 MVT VT = Op.getSimpleValueType();
12535 assert((VT == MVT::v4i32 || VT == MVT::v4f32) &&
12536 "unsupported shuffle type");
12538 if (V2.getOpcode() == ISD::UNDEF)
12542 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG);
12546 SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
12547 SDValue V1 = Op.getOperand(0);
12548 SDValue V2 = Op.getOperand(1);
12549 MVT VT = Op.getSimpleValueType();
12550 unsigned NumElems = VT.getVectorNumElements();
12552 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second
12553 // operand of these instructions is only memory, so check if there's a
12554 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the
12556 bool CanFoldLoad = false;
12558 // Trivial case, when V2 comes from a load.
12559 if (MayFoldVectorLoad(V2))
12560 CanFoldLoad = true;
12562 // When V1 is a load, it can be folded later into a store in isel, example:
12563 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1)
12565 // (MOVLPSmr addr:$src1, VR128:$src2)
12566 // So, recognize this potential and also use MOVLPS or MOVLPD
12567 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op))
12568 CanFoldLoad = true;
12570 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12572 if (HasSSE2 && NumElems == 2)
12573 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
12576 // If we don't care about the second element, proceed to use movss.
12577 if (SVOp->getMaskElt(1) != -1)
12578 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG);
12581 // movl and movlp will both match v2i64, but v2i64 is never matched by
12582 // movl earlier because we make it strict to avoid messing with the movlp load
12583 // folding logic (see the code above getMOVLP call). Match it here then,
12584 // this is horrible, but will stay like this until we move all shuffle
12585 // matching to x86 specific nodes. Note that for the 1st condition all
12586 // types are matched with movsd.
12588 // FIXME: isMOVLMask should be checked and matched before getMOVLP,
12589 // as to remove this logic from here, as much as possible
12590 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT))
12591 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12592 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12595 assert(VT != MVT::v4i32 && "unsupported shuffle type");
12597 // Invert the operand order and use SHUFPS to match it.
12598 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1,
12599 getShuffleSHUFImmediate(SVOp), DAG);
12602 static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index,
12603 SelectionDAG &DAG) {
12605 MVT VT = Load->getSimpleValueType(0);
12606 MVT EVT = VT.getVectorElementType();
12607 SDValue Addr = Load->getOperand(1);
12608 SDValue NewAddr = DAG.getNode(
12609 ISD::ADD, dl, Addr.getSimpleValueType(), Addr,
12610 DAG.getConstant(Index * EVT.getStoreSize(), Addr.getSimpleValueType()));
12613 DAG.getLoad(EVT, dl, Load->getChain(), NewAddr,
12614 DAG.getMachineFunction().getMachineMemOperand(
12615 Load->getMemOperand(), 0, EVT.getStoreSize()));
12619 // It is only safe to call this function if isINSERTPSMask is true for
12620 // this shufflevector mask.
12621 static SDValue getINSERTPS(ShuffleVectorSDNode *SVOp, SDLoc &dl,
12622 SelectionDAG &DAG) {
12623 // Generate an insertps instruction when inserting an f32 from memory onto a
12624 // v4f32 or when copying a member from one v4f32 to another.
12625 // We also use it for transferring i32 from one register to another,
12626 // since it simply copies the same bits.
12627 // If we're transferring an i32 from memory to a specific element in a
12628 // register, we output a generic DAG that will match the PINSRD
12630 MVT VT = SVOp->getSimpleValueType(0);
12631 MVT EVT = VT.getVectorElementType();
12632 SDValue V1 = SVOp->getOperand(0);
12633 SDValue V2 = SVOp->getOperand(1);
12634 auto Mask = SVOp->getMask();
12635 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
12636 "unsupported vector type for insertps/pinsrd");
12638 auto FromV1Predicate = [](const int &i) { return i < 4 && i > -1; };
12639 auto FromV2Predicate = [](const int &i) { return i >= 4; };
12640 int FromV1 = std::count_if(Mask.begin(), Mask.end(), FromV1Predicate);
12644 unsigned DestIndex;
12648 DestIndex = std::find_if(Mask.begin(), Mask.end(), FromV1Predicate) -
12651 // If we have 1 element from each vector, we have to check if we're
12652 // changing V1's element's place. If so, we're done. Otherwise, we
12653 // should assume we're changing V2's element's place and behave
12655 int FromV2 = std::count_if(Mask.begin(), Mask.end(), FromV2Predicate);
12656 assert(DestIndex <= INT32_MAX && "truncated destination index");
12657 if (FromV1 == FromV2 &&
12658 static_cast<int>(DestIndex) == Mask[DestIndex] % 4) {
12662 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12665 assert(std::count_if(Mask.begin(), Mask.end(), FromV2Predicate) == 1 &&
12666 "More than one element from V1 and from V2, or no elements from one "
12667 "of the vectors. This case should not have returned true from "
12672 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12675 // Get an index into the source vector in the range [0,4) (the mask is
12676 // in the range [0,8) because it can address V1 and V2)
12677 unsigned SrcIndex = Mask[DestIndex] % 4;
12678 if (MayFoldLoad(From)) {
12679 // Trivial case, when From comes from a load and is only used by the
12680 // shuffle. Make it use insertps from the vector that we need from that
12683 NarrowVectorLoadToElement(cast<LoadSDNode>(From), SrcIndex, DAG);
12684 if (!NewLoad.getNode())
12687 if (EVT == MVT::f32) {
12688 // Create this as a scalar to vector to match the instruction pattern.
12689 SDValue LoadScalarToVector =
12690 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, NewLoad);
12691 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4);
12692 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, LoadScalarToVector,
12694 } else { // EVT == MVT::i32
12695 // If we're getting an i32 from memory, use an INSERT_VECTOR_ELT
12696 // instruction, to match the PINSRD instruction, which loads an i32 to a
12697 // certain vector element.
12698 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, To, NewLoad,
12699 DAG.getConstant(DestIndex, MVT::i32));
12703 // Vector-element-to-vector
12704 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4 | SrcIndex << 6);
12705 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, From, InsertpsMask);
12708 // Reduce a vector shuffle to zext.
12709 static SDValue LowerVectorIntExtend(SDValue Op, const X86Subtarget *Subtarget,
12710 SelectionDAG &DAG) {
12711 // PMOVZX is only available from SSE41.
12712 if (!Subtarget->hasSSE41())
12715 MVT VT = Op.getSimpleValueType();
12717 // Only AVX2 support 256-bit vector integer extending.
12718 if (!Subtarget->hasInt256() && VT.is256BitVector())
12721 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12723 SDValue V1 = Op.getOperand(0);
12724 SDValue V2 = Op.getOperand(1);
12725 unsigned NumElems = VT.getVectorNumElements();
12727 // Extending is an unary operation and the element type of the source vector
12728 // won't be equal to or larger than i64.
12729 if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() ||
12730 VT.getVectorElementType() == MVT::i64)
12733 // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4.
12734 unsigned Shift = 1; // Start from 2, i.e. 1 << 1.
12735 while ((1U << Shift) < NumElems) {
12736 if (SVOp->getMaskElt(1U << Shift) == 1)
12739 // The maximal ratio is 8, i.e. from i8 to i64.
12744 // Check the shuffle mask.
12745 unsigned Mask = (1U << Shift) - 1;
12746 for (unsigned i = 0; i != NumElems; ++i) {
12747 int EltIdx = SVOp->getMaskElt(i);
12748 if ((i & Mask) != 0 && EltIdx != -1)
12750 if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift))
12754 unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift;
12755 MVT NeVT = MVT::getIntegerVT(NBits);
12756 MVT NVT = MVT::getVectorVT(NeVT, NumElems >> Shift);
12758 if (!DAG.getTargetLoweringInfo().isTypeLegal(NVT))
12761 return DAG.getNode(ISD::BITCAST, DL, VT,
12762 DAG.getNode(X86ISD::VZEXT, DL, NVT, V1));
12765 static SDValue NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
12766 SelectionDAG &DAG) {
12767 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12768 MVT VT = Op.getSimpleValueType();
12770 SDValue V1 = Op.getOperand(0);
12771 SDValue V2 = Op.getOperand(1);
12773 if (isZeroShuffle(SVOp))
12774 return getZeroVector(VT, Subtarget, DAG, dl);
12776 // Handle splat operations
12777 if (SVOp->isSplat()) {
12778 // Use vbroadcast whenever the splat comes from a foldable load
12779 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
12780 if (Broadcast.getNode())
12784 // Check integer expanding shuffles.
12785 SDValue NewOp = LowerVectorIntExtend(Op, Subtarget, DAG);
12786 if (NewOp.getNode())
12789 // If the shuffle can be profitably rewritten as a narrower shuffle, then
12791 if (VT == MVT::v8i16 || VT == MVT::v16i8 || VT == MVT::v16i16 ||
12792 VT == MVT::v32i8) {
12793 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12794 if (NewOp.getNode())
12795 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
12796 } else if (VT.is128BitVector() && Subtarget->hasSSE2()) {
12797 // FIXME: Figure out a cleaner way to do this.
12798 if (ISD::isBuildVectorAllZeros(V2.getNode())) {
12799 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12800 if (NewOp.getNode()) {
12801 MVT NewVT = NewOp.getSimpleValueType();
12802 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(),
12803 NewVT, true, false))
12804 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), DAG, Subtarget,
12807 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
12808 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12809 if (NewOp.getNode()) {
12810 MVT NewVT = NewOp.getSimpleValueType();
12811 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT))
12812 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), DAG, Subtarget,
12821 X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
12822 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12823 SDValue V1 = Op.getOperand(0);
12824 SDValue V2 = Op.getOperand(1);
12825 MVT VT = Op.getSimpleValueType();
12827 unsigned NumElems = VT.getVectorNumElements();
12828 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
12829 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
12830 bool V1IsSplat = false;
12831 bool V2IsSplat = false;
12832 bool HasSSE2 = Subtarget->hasSSE2();
12833 bool HasFp256 = Subtarget->hasFp256();
12834 bool HasInt256 = Subtarget->hasInt256();
12835 MachineFunction &MF = DAG.getMachineFunction();
12837 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
12839 // Check if we should use the experimental vector shuffle lowering. If so,
12840 // delegate completely to that code path.
12841 if (ExperimentalVectorShuffleLowering)
12842 return lowerVectorShuffle(Op, Subtarget, DAG);
12844 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
12846 if (V1IsUndef && V2IsUndef)
12847 return DAG.getUNDEF(VT);
12849 // When we create a shuffle node we put the UNDEF node to second operand,
12850 // but in some cases the first operand may be transformed to UNDEF.
12851 // In this case we should just commute the node.
12853 return DAG.getCommutedVectorShuffle(*SVOp);
12855 // Vector shuffle lowering takes 3 steps:
12857 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable
12858 // narrowing and commutation of operands should be handled.
12859 // 2) Matching of shuffles with known shuffle masks to x86 target specific
12861 // 3) Rewriting of unmatched masks into new generic shuffle operations,
12862 // so the shuffle can be broken into other shuffles and the legalizer can
12863 // try the lowering again.
12865 // The general idea is that no vector_shuffle operation should be left to
12866 // be matched during isel, all of them must be converted to a target specific
12869 // Normalize the input vectors. Here splats, zeroed vectors, profitable
12870 // narrowing and commutation of operands should be handled. The actual code
12871 // doesn't include all of those, work in progress...
12872 SDValue NewOp = NormalizeVectorShuffle(Op, Subtarget, DAG);
12873 if (NewOp.getNode())
12876 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end());
12878 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and
12879 // unpckh_undef). Only use pshufd if speed is more important than size.
12880 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12881 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12882 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12883 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12885 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() &&
12886 V2IsUndef && MayFoldVectorLoad(V1))
12887 return getMOVDDup(Op, dl, V1, DAG);
12889 if (isMOVHLPS_v_undef_Mask(M, VT))
12890 return getMOVHighToLow(Op, dl, DAG);
12892 // Use to match splats
12893 if (HasSSE2 && isUNPCKHMask(M, VT, HasInt256) && V2IsUndef &&
12894 (VT == MVT::v2f64 || VT == MVT::v2i64))
12895 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12897 if (isPSHUFDMask(M, VT)) {
12898 // The actual implementation will match the mask in the if above and then
12899 // during isel it can match several different instructions, not only pshufd
12900 // as its name says, sad but true, emulate the behavior for now...
12901 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64)))
12902 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG);
12904 unsigned TargetMask = getShuffleSHUFImmediate(SVOp);
12906 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32))
12907 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG);
12909 if (HasFp256 && (VT == MVT::v4f32 || VT == MVT::v2f64))
12910 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1, TargetMask,
12913 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1,
12917 if (isPALIGNRMask(M, VT, Subtarget))
12918 return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2,
12919 getShufflePALIGNRImmediate(SVOp),
12922 if (isVALIGNMask(M, VT, Subtarget))
12923 return getTargetShuffleNode(X86ISD::VALIGN, dl, VT, V1, V2,
12924 getShuffleVALIGNImmediate(SVOp),
12927 // Check if this can be converted into a logical shift.
12928 bool isLeft = false;
12929 unsigned ShAmt = 0;
12931 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt);
12932 if (isShift && ShVal.hasOneUse()) {
12933 // If the shifted value has multiple uses, it may be cheaper to use
12934 // v_set0 + movlhps or movhlps, etc.
12935 MVT EltVT = VT.getVectorElementType();
12936 ShAmt *= EltVT.getSizeInBits();
12937 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12940 if (isMOVLMask(M, VT)) {
12941 if (ISD::isBuildVectorAllZeros(V1.getNode()))
12942 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
12943 if (!isMOVLPMask(M, VT)) {
12944 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
12945 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12947 if (VT == MVT::v4i32 || VT == MVT::v4f32)
12948 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12952 // FIXME: fold these into legal mask.
12953 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasInt256))
12954 return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
12956 if (isMOVHLPSMask(M, VT))
12957 return getMOVHighToLow(Op, dl, DAG);
12959 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget))
12960 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG);
12962 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget))
12963 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG);
12965 if (isMOVLPMask(M, VT))
12966 return getMOVLP(Op, dl, DAG, HasSSE2);
12968 if (ShouldXformToMOVHLPS(M, VT) ||
12969 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT))
12970 return DAG.getCommutedVectorShuffle(*SVOp);
12973 // No better options. Use a vshldq / vsrldq.
12974 MVT EltVT = VT.getVectorElementType();
12975 ShAmt *= EltVT.getSizeInBits();
12976 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12979 bool Commuted = false;
12980 // FIXME: This should also accept a bitcast of a splat? Be careful, not
12981 // 1,1,1,1 -> v8i16 though.
12982 BitVector UndefElements;
12983 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V1.getNode()))
12984 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12986 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V2.getNode()))
12987 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12990 // Canonicalize the splat or undef, if present, to be on the RHS.
12991 if (!V2IsUndef && V1IsSplat && !V2IsSplat) {
12992 CommuteVectorShuffleMask(M, NumElems);
12994 std::swap(V1IsSplat, V2IsSplat);
12998 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) {
12999 // Shuffling low element of v1 into undef, just return v1.
13002 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which
13003 // the instruction selector will not match, so get a canonical MOVL with
13004 // swapped operands to undo the commute.
13005 return getMOVL(DAG, dl, VT, V2, V1);
13008 if (isUNPCKLMask(M, VT, HasInt256))
13009 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
13011 if (isUNPCKHMask(M, VT, HasInt256))
13012 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
13015 // Normalize mask so all entries that point to V2 points to its first
13016 // element then try to match unpck{h|l} again. If match, return a
13017 // new vector_shuffle with the corrected mask.p
13018 SmallVector<int, 8> NewMask(M.begin(), M.end());
13019 NormalizeMask(NewMask, NumElems);
13020 if (isUNPCKLMask(NewMask, VT, HasInt256, true))
13021 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
13022 if (isUNPCKHMask(NewMask, VT, HasInt256, true))
13023 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
13027 // Commute is back and try unpck* again.
13028 // FIXME: this seems wrong.
13029 CommuteVectorShuffleMask(M, NumElems);
13031 std::swap(V1IsSplat, V2IsSplat);
13033 if (isUNPCKLMask(M, VT, HasInt256))
13034 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
13036 if (isUNPCKHMask(M, VT, HasInt256))
13037 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
13040 // Normalize the node to match x86 shuffle ops if needed
13041 if (!V2IsUndef && (isSHUFPMask(M, VT, /* Commuted */ true)))
13042 return DAG.getCommutedVectorShuffle(*SVOp);
13044 // The checks below are all present in isShuffleMaskLegal, but they are
13045 // inlined here right now to enable us to directly emit target specific
13046 // nodes, and remove one by one until they don't return Op anymore.
13048 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) &&
13049 SVOp->getSplatIndex() == 0 && V2IsUndef) {
13050 if (VT == MVT::v2f64 || VT == MVT::v2i64)
13051 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
13054 if (isPSHUFHWMask(M, VT, HasInt256))
13055 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1,
13056 getShufflePSHUFHWImmediate(SVOp),
13059 if (isPSHUFLWMask(M, VT, HasInt256))
13060 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1,
13061 getShufflePSHUFLWImmediate(SVOp),
13064 unsigned MaskValue;
13065 if (isBlendMask(M, VT, Subtarget->hasSSE41(), HasInt256, &MaskValue))
13066 return LowerVECTOR_SHUFFLEtoBlend(SVOp, MaskValue, Subtarget, DAG);
13068 if (isSHUFPMask(M, VT))
13069 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2,
13070 getShuffleSHUFImmediate(SVOp), DAG);
13072 if (isUNPCKL_v_undef_Mask(M, VT, HasInt256))
13073 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
13074 if (isUNPCKH_v_undef_Mask(M, VT, HasInt256))
13075 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
13077 //===--------------------------------------------------------------------===//
13078 // Generate target specific nodes for 128 or 256-bit shuffles only
13079 // supported in the AVX instruction set.
13082 // Handle VMOVDDUPY permutations
13083 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasFp256))
13084 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG);
13086 // Handle VPERMILPS/D* permutations
13087 if (isVPERMILPMask(M, VT)) {
13088 if ((HasInt256 && VT == MVT::v8i32) || VT == MVT::v16i32)
13089 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1,
13090 getShuffleSHUFImmediate(SVOp), DAG);
13091 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1,
13092 getShuffleSHUFImmediate(SVOp), DAG);
13096 if (VT.is512BitVector() && isINSERT64x4Mask(M, VT, &Idx))
13097 return Insert256BitVector(V1, Extract256BitVector(V2, 0, DAG, dl),
13098 Idx*(NumElems/2), DAG, dl);
13100 // Handle VPERM2F128/VPERM2I128 permutations
13101 if (isVPERM2X128Mask(M, VT, HasFp256))
13102 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1,
13103 V2, getShuffleVPERM2X128Immediate(SVOp), DAG);
13105 if (Subtarget->hasSSE41() && isINSERTPSMask(M, VT))
13106 return getINSERTPS(SVOp, dl, DAG);
13109 if (V2IsUndef && HasInt256 && isPermImmMask(M, VT, Imm8))
13110 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, Imm8, DAG);
13112 if ((V2IsUndef && HasInt256 && VT.is256BitVector() && NumElems == 8) ||
13113 VT.is512BitVector()) {
13114 MVT MaskEltVT = MVT::getIntegerVT(VT.getVectorElementType().getSizeInBits());
13115 MVT MaskVectorVT = MVT::getVectorVT(MaskEltVT, NumElems);
13116 SmallVector<SDValue, 16> permclMask;
13117 for (unsigned i = 0; i != NumElems; ++i) {
13118 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MaskEltVT));
13121 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVectorVT, permclMask);
13123 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32
13124 return DAG.getNode(X86ISD::VPERMV, dl, VT,
13125 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1);
13126 return DAG.getNode(X86ISD::VPERMV3, dl, VT, V1,
13127 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V2);
13130 //===--------------------------------------------------------------------===//
13131 // Since no target specific shuffle was selected for this generic one,
13132 // lower it into other known shuffles. FIXME: this isn't true yet, but
13133 // this is the plan.
13136 // Handle v8i16 specifically since SSE can do byte extraction and insertion.
13137 if (VT == MVT::v8i16) {
13138 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG);
13139 if (NewOp.getNode())
13143 if (VT == MVT::v16i16 && HasInt256) {
13144 SDValue NewOp = LowerVECTOR_SHUFFLEv16i16(Op, DAG);
13145 if (NewOp.getNode())
13149 if (VT == MVT::v16i8) {
13150 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, Subtarget, DAG);
13151 if (NewOp.getNode())
13155 if (VT == MVT::v32i8) {
13156 SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG);
13157 if (NewOp.getNode())
13161 // Handle all 128-bit wide vectors with 4 elements, and match them with
13162 // several different shuffle types.
13163 if (NumElems == 4 && VT.is128BitVector())
13164 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG);
13166 // Handle general 256-bit shuffles
13167 if (VT.is256BitVector())
13168 return LowerVECTOR_SHUFFLE_256(SVOp, DAG);
13173 // This function assumes its argument is a BUILD_VECTOR of constants or
13174 // undef SDNodes. i.e: ISD::isBuildVectorOfConstantSDNodes(BuildVector) is
13176 static bool BUILD_VECTORtoBlendMask(BuildVectorSDNode *BuildVector,
13177 unsigned &MaskValue) {
13179 unsigned NumElems = BuildVector->getNumOperands();
13180 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
13181 unsigned NumLanes = (NumElems - 1) / 8 + 1;
13182 unsigned NumElemsInLane = NumElems / NumLanes;
13184 // Blend for v16i16 should be symetric for the both lanes.
13185 for (unsigned i = 0; i < NumElemsInLane; ++i) {
13186 SDValue EltCond = BuildVector->getOperand(i);
13187 SDValue SndLaneEltCond =
13188 (NumLanes == 2) ? BuildVector->getOperand(i + NumElemsInLane) : EltCond;
13190 int Lane1Cond = -1, Lane2Cond = -1;
13191 if (isa<ConstantSDNode>(EltCond))
13192 Lane1Cond = !isZero(EltCond);
13193 if (isa<ConstantSDNode>(SndLaneEltCond))
13194 Lane2Cond = !isZero(SndLaneEltCond);
13196 if (Lane1Cond == Lane2Cond || Lane2Cond < 0)
13197 // Lane1Cond != 0, means we want the first argument.
13198 // Lane1Cond == 0, means we want the second argument.
13199 // The encoding of this argument is 0 for the first argument, 1
13200 // for the second. Therefore, invert the condition.
13201 MaskValue |= !Lane1Cond << i;
13202 else if (Lane1Cond < 0)
13203 MaskValue |= !Lane2Cond << i;
13210 /// \brief Try to lower a VSELECT instruction to an immediate-controlled blend
13212 static SDValue lowerVSELECTtoBLENDI(SDValue Op, const X86Subtarget *Subtarget,
13213 SelectionDAG &DAG) {
13214 SDValue Cond = Op.getOperand(0);
13215 SDValue LHS = Op.getOperand(1);
13216 SDValue RHS = Op.getOperand(2);
13218 MVT VT = Op.getSimpleValueType();
13219 MVT EltVT = VT.getVectorElementType();
13220 unsigned NumElems = VT.getVectorNumElements();
13222 // There is no blend with immediate in AVX-512.
13223 if (VT.is512BitVector())
13226 if (!Subtarget->hasSSE41() || EltVT == MVT::i8)
13228 if (!Subtarget->hasInt256() && VT == MVT::v16i16)
13231 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
13234 // Check the mask for BLEND and build the value.
13235 unsigned MaskValue = 0;
13236 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
13239 // Convert i32 vectors to floating point if it is not AVX2.
13240 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
13242 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
13243 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
13245 LHS = DAG.getNode(ISD::BITCAST, dl, VT, LHS);
13246 RHS = DAG.getNode(ISD::BITCAST, dl, VT, RHS);
13249 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, LHS, RHS,
13250 DAG.getConstant(MaskValue, MVT::i32));
13251 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
13254 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
13255 // A vselect where all conditions and data are constants can be optimized into
13256 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
13257 if (ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(0).getNode()) &&
13258 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(1).getNode()) &&
13259 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(2).getNode()))
13262 SDValue BlendOp = lowerVSELECTtoBLENDI(Op, Subtarget, DAG);
13263 if (BlendOp.getNode())
13266 // Some types for vselect were previously set to Expand, not Legal or
13267 // Custom. Return an empty SDValue so we fall-through to Expand, after
13268 // the Custom lowering phase.
13269 MVT VT = Op.getSimpleValueType();
13270 switch (VT.SimpleTy) {
13275 if (Subtarget->hasBWI() && Subtarget->hasVLX())
13280 // We couldn't create a "Blend with immediate" node.
13281 // This node should still be legal, but we'll have to emit a blendv*
13286 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
13287 MVT VT = Op.getSimpleValueType();
13290 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
13293 if (VT.getSizeInBits() == 8) {
13294 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
13295 Op.getOperand(0), Op.getOperand(1));
13296 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13297 DAG.getValueType(VT));
13298 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13301 if (VT.getSizeInBits() == 16) {
13302 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13303 // If Idx is 0, it's cheaper to do a move instead of a pextrw.
13305 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13306 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13307 DAG.getNode(ISD::BITCAST, dl,
13310 Op.getOperand(1)));
13311 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
13312 Op.getOperand(0), Op.getOperand(1));
13313 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13314 DAG.getValueType(VT));
13315 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13318 if (VT == MVT::f32) {
13319 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
13320 // the result back to FR32 register. It's only worth matching if the
13321 // result has a single use which is a store or a bitcast to i32. And in
13322 // the case of a store, it's not worth it if the index is a constant 0,
13323 // because a MOVSSmr can be used instead, which is smaller and faster.
13324 if (!Op.hasOneUse())
13326 SDNode *User = *Op.getNode()->use_begin();
13327 if ((User->getOpcode() != ISD::STORE ||
13328 (isa<ConstantSDNode>(Op.getOperand(1)) &&
13329 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
13330 (User->getOpcode() != ISD::BITCAST ||
13331 User->getValueType(0) != MVT::i32))
13333 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13334 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
13337 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
13340 if (VT == MVT::i32 || VT == MVT::i64) {
13341 // ExtractPS/pextrq works with constant index.
13342 if (isa<ConstantSDNode>(Op.getOperand(1)))
13348 /// Extract one bit from mask vector, like v16i1 or v8i1.
13349 /// AVX-512 feature.
13351 X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const {
13352 SDValue Vec = Op.getOperand(0);
13354 MVT VecVT = Vec.getSimpleValueType();
13355 SDValue Idx = Op.getOperand(1);
13356 MVT EltVT = Op.getSimpleValueType();
13358 assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
13359 assert((VecVT.getVectorNumElements() <= 16 || Subtarget->hasBWI()) &&
13360 "Unexpected vector type in ExtractBitFromMaskVector");
13362 // variable index can't be handled in mask registers,
13363 // extend vector to VR512
13364 if (!isa<ConstantSDNode>(Idx)) {
13365 MVT ExtVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13366 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Vec);
13367 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
13368 ExtVT.getVectorElementType(), Ext, Idx);
13369 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
13372 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13373 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13374 if (!Subtarget->hasDQI() && (VecVT.getVectorNumElements() <= 8))
13375 rc = getRegClassFor(MVT::v16i1);
13376 unsigned MaxSift = rc->getSize()*8 - 1;
13377 Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
13378 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13379 Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
13380 DAG.getConstant(MaxSift, MVT::i8));
13381 return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec,
13382 DAG.getIntPtrConstant(0));
13386 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
13387 SelectionDAG &DAG) const {
13389 SDValue Vec = Op.getOperand(0);
13390 MVT VecVT = Vec.getSimpleValueType();
13391 SDValue Idx = Op.getOperand(1);
13393 if (Op.getSimpleValueType() == MVT::i1)
13394 return ExtractBitFromMaskVector(Op, DAG);
13396 if (!isa<ConstantSDNode>(Idx)) {
13397 if (VecVT.is512BitVector() ||
13398 (VecVT.is256BitVector() && Subtarget->hasInt256() &&
13399 VecVT.getVectorElementType().getSizeInBits() == 32)) {
13402 MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
13403 MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
13404 MaskEltVT.getSizeInBits());
13406 Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT);
13407 SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT,
13408 getZeroVector(MaskVT, Subtarget, DAG, dl),
13409 Idx, DAG.getConstant(0, getPointerTy()));
13410 SDValue Perm = DAG.getNode(X86ISD::VPERMV, dl, VecVT, Mask, Vec);
13411 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(),
13412 Perm, DAG.getConstant(0, getPointerTy()));
13417 // If this is a 256-bit vector result, first extract the 128-bit vector and
13418 // then extract the element from the 128-bit vector.
13419 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
13421 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13422 // Get the 128-bit vector.
13423 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl);
13424 MVT EltVT = VecVT.getVectorElementType();
13426 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
13428 //if (IdxVal >= NumElems/2)
13429 // IdxVal -= NumElems/2;
13430 IdxVal -= (IdxVal/ElemsPerChunk)*ElemsPerChunk;
13431 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
13432 DAG.getConstant(IdxVal, MVT::i32));
13435 assert(VecVT.is128BitVector() && "Unexpected vector length");
13437 if (Subtarget->hasSSE41()) {
13438 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
13443 MVT VT = Op.getSimpleValueType();
13444 // TODO: handle v16i8.
13445 if (VT.getSizeInBits() == 16) {
13446 SDValue Vec = Op.getOperand(0);
13447 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13449 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13450 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13451 DAG.getNode(ISD::BITCAST, dl,
13453 Op.getOperand(1)));
13454 // Transform it so it match pextrw which produces a 32-bit result.
13455 MVT EltVT = MVT::i32;
13456 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT,
13457 Op.getOperand(0), Op.getOperand(1));
13458 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
13459 DAG.getValueType(VT));
13460 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13463 if (VT.getSizeInBits() == 32) {
13464 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13468 // SHUFPS the element to the lowest double word, then movss.
13469 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 };
13470 MVT VVT = Op.getOperand(0).getSimpleValueType();
13471 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13472 DAG.getUNDEF(VVT), Mask);
13473 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13474 DAG.getIntPtrConstant(0));
13477 if (VT.getSizeInBits() == 64) {
13478 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
13479 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
13480 // to match extract_elt for f64.
13481 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13485 // UNPCKHPD the element to the lowest double word, then movsd.
13486 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
13487 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
13488 int Mask[2] = { 1, -1 };
13489 MVT VVT = Op.getOperand(0).getSimpleValueType();
13490 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13491 DAG.getUNDEF(VVT), Mask);
13492 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13493 DAG.getIntPtrConstant(0));
13499 /// Insert one bit to mask vector, like v16i1 or v8i1.
13500 /// AVX-512 feature.
13502 X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
13504 SDValue Vec = Op.getOperand(0);
13505 SDValue Elt = Op.getOperand(1);
13506 SDValue Idx = Op.getOperand(2);
13507 MVT VecVT = Vec.getSimpleValueType();
13509 if (!isa<ConstantSDNode>(Idx)) {
13510 // Non constant index. Extend source and destination,
13511 // insert element and then truncate the result.
13512 MVT ExtVecVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13513 MVT ExtEltVT = (VecVT == MVT::v8i1 ? MVT::i64 : MVT::i32);
13514 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
13515 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVecVT, Vec),
13516 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx);
13517 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
13520 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13521 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
13522 if (Vec.getOpcode() == ISD::UNDEF)
13523 return DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13524 DAG.getConstant(IdxVal, MVT::i8));
13525 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13526 unsigned MaxSift = rc->getSize()*8 - 1;
13527 EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13528 DAG.getConstant(MaxSift, MVT::i8));
13529 EltInVec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, EltInVec,
13530 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13531 return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
13534 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
13535 SelectionDAG &DAG) const {
13536 MVT VT = Op.getSimpleValueType();
13537 MVT EltVT = VT.getVectorElementType();
13539 if (EltVT == MVT::i1)
13540 return InsertBitToMaskVector(Op, DAG);
13543 SDValue N0 = Op.getOperand(0);
13544 SDValue N1 = Op.getOperand(1);
13545 SDValue N2 = Op.getOperand(2);
13546 if (!isa<ConstantSDNode>(N2))
13548 auto *N2C = cast<ConstantSDNode>(N2);
13549 unsigned IdxVal = N2C->getZExtValue();
13551 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
13552 // into that, and then insert the subvector back into the result.
13553 if (VT.is256BitVector() || VT.is512BitVector()) {
13554 // Get the desired 128-bit vector half.
13555 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
13557 // Insert the element into the desired half.
13558 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
13559 unsigned IdxIn128 = IdxVal - (IdxVal / NumEltsIn128) * NumEltsIn128;
13561 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
13562 DAG.getConstant(IdxIn128, MVT::i32));
13564 // Insert the changed part back to the 256-bit vector
13565 return Insert128BitVector(N0, V, IdxVal, DAG, dl);
13567 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
13569 if (Subtarget->hasSSE41()) {
13570 if (EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) {
13572 if (VT == MVT::v8i16) {
13573 Opc = X86ISD::PINSRW;
13575 assert(VT == MVT::v16i8);
13576 Opc = X86ISD::PINSRB;
13579 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
13581 if (N1.getValueType() != MVT::i32)
13582 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13583 if (N2.getValueType() != MVT::i32)
13584 N2 = DAG.getIntPtrConstant(IdxVal);
13585 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
13588 if (EltVT == MVT::f32) {
13589 // Bits [7:6] of the constant are the source select. This will always be
13590 // zero here. The DAG Combiner may combine an extract_elt index into
13592 // bits. For example (insert (extract, 3), 2) could be matched by
13594 // the '3' into bits [7:6] of X86ISD::INSERTPS.
13595 // Bits [5:4] of the constant are the destination select. This is the
13596 // value of the incoming immediate.
13597 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
13598 // combine either bitwise AND or insert of float 0.0 to set these bits.
13599 N2 = DAG.getIntPtrConstant(IdxVal << 4);
13600 // Create this as a scalar to vector..
13601 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
13602 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
13605 if (EltVT == MVT::i32 || EltVT == MVT::i64) {
13606 // PINSR* works with constant index.
13611 if (EltVT == MVT::i8)
13614 if (EltVT.getSizeInBits() == 16) {
13615 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
13616 // as its second argument.
13617 if (N1.getValueType() != MVT::i32)
13618 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13619 if (N2.getValueType() != MVT::i32)
13620 N2 = DAG.getIntPtrConstant(IdxVal);
13621 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
13626 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
13628 MVT OpVT = Op.getSimpleValueType();
13630 // If this is a 256-bit vector result, first insert into a 128-bit
13631 // vector and then insert into the 256-bit vector.
13632 if (!OpVT.is128BitVector()) {
13633 // Insert into a 128-bit vector.
13634 unsigned SizeFactor = OpVT.getSizeInBits()/128;
13635 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
13636 OpVT.getVectorNumElements() / SizeFactor);
13638 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
13640 // Insert the 128-bit vector.
13641 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
13644 if (OpVT == MVT::v1i64 &&
13645 Op.getOperand(0).getValueType() == MVT::i64)
13646 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
13648 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
13649 assert(OpVT.is128BitVector() && "Expected an SSE type!");
13650 return DAG.getNode(ISD::BITCAST, dl, OpVT,
13651 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
13654 // Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
13655 // a simple subregister reference or explicit instructions to grab
13656 // upper bits of a vector.
13657 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13658 SelectionDAG &DAG) {
13660 SDValue In = Op.getOperand(0);
13661 SDValue Idx = Op.getOperand(1);
13662 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13663 MVT ResVT = Op.getSimpleValueType();
13664 MVT InVT = In.getSimpleValueType();
13666 if (Subtarget->hasFp256()) {
13667 if (ResVT.is128BitVector() &&
13668 (InVT.is256BitVector() || InVT.is512BitVector()) &&
13669 isa<ConstantSDNode>(Idx)) {
13670 return Extract128BitVector(In, IdxVal, DAG, dl);
13672 if (ResVT.is256BitVector() && InVT.is512BitVector() &&
13673 isa<ConstantSDNode>(Idx)) {
13674 return Extract256BitVector(In, IdxVal, DAG, dl);
13680 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
13681 // simple superregister reference or explicit instructions to insert
13682 // the upper bits of a vector.
13683 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13684 SelectionDAG &DAG) {
13685 if (!Subtarget->hasAVX())
13689 SDValue Vec = Op.getOperand(0);
13690 SDValue SubVec = Op.getOperand(1);
13691 SDValue Idx = Op.getOperand(2);
13693 if (!isa<ConstantSDNode>(Idx))
13696 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13697 MVT OpVT = Op.getSimpleValueType();
13698 MVT SubVecVT = SubVec.getSimpleValueType();
13700 // Fold two 16-byte subvector loads into one 32-byte load:
13701 // (insert_subvector (insert_subvector undef, (load addr), 0),
13702 // (load addr + 16), Elts/2)
13704 if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
13705 Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
13706 OpVT.is256BitVector() && SubVecVT.is128BitVector() &&
13707 !Subtarget->isUnalignedMem32Slow()) {
13708 SDValue SubVec2 = Vec.getOperand(1);
13709 if (auto *Idx2 = dyn_cast<ConstantSDNode>(Vec.getOperand(2))) {
13710 if (Idx2->getZExtValue() == 0) {
13711 SDValue Ops[] = { SubVec2, SubVec };
13712 SDValue LD = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, false);
13719 if ((OpVT.is256BitVector() || OpVT.is512BitVector()) &&
13720 SubVecVT.is128BitVector())
13721 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
13723 if (OpVT.is512BitVector() && SubVecVT.is256BitVector())
13724 return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
13729 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
13730 // their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
13731 // one of the above mentioned nodes. It has to be wrapped because otherwise
13732 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
13733 // be used to form addressing mode. These wrapped nodes will be selected
13736 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
13737 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
13739 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13740 // global base reg.
13741 unsigned char OpFlag = 0;
13742 unsigned WrapperKind = X86ISD::Wrapper;
13743 CodeModel::Model M = DAG.getTarget().getCodeModel();
13745 if (Subtarget->isPICStyleRIPRel() &&
13746 (M == CodeModel::Small || M == CodeModel::Kernel))
13747 WrapperKind = X86ISD::WrapperRIP;
13748 else if (Subtarget->isPICStyleGOT())
13749 OpFlag = X86II::MO_GOTOFF;
13750 else if (Subtarget->isPICStyleStubPIC())
13751 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13753 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
13754 CP->getAlignment(),
13755 CP->getOffset(), OpFlag);
13757 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13758 // With PIC, the address is actually $g + Offset.
13760 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13761 DAG.getNode(X86ISD::GlobalBaseReg,
13762 SDLoc(), getPointerTy()),
13769 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
13770 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
13772 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13773 // global base reg.
13774 unsigned char OpFlag = 0;
13775 unsigned WrapperKind = X86ISD::Wrapper;
13776 CodeModel::Model M = DAG.getTarget().getCodeModel();
13778 if (Subtarget->isPICStyleRIPRel() &&
13779 (M == CodeModel::Small || M == CodeModel::Kernel))
13780 WrapperKind = X86ISD::WrapperRIP;
13781 else if (Subtarget->isPICStyleGOT())
13782 OpFlag = X86II::MO_GOTOFF;
13783 else if (Subtarget->isPICStyleStubPIC())
13784 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13786 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
13789 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13791 // With PIC, the address is actually $g + Offset.
13793 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13794 DAG.getNode(X86ISD::GlobalBaseReg,
13795 SDLoc(), getPointerTy()),
13802 X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
13803 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
13805 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13806 // global base reg.
13807 unsigned char OpFlag = 0;
13808 unsigned WrapperKind = X86ISD::Wrapper;
13809 CodeModel::Model M = DAG.getTarget().getCodeModel();
13811 if (Subtarget->isPICStyleRIPRel() &&
13812 (M == CodeModel::Small || M == CodeModel::Kernel)) {
13813 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF())
13814 OpFlag = X86II::MO_GOTPCREL;
13815 WrapperKind = X86ISD::WrapperRIP;
13816 } else if (Subtarget->isPICStyleGOT()) {
13817 OpFlag = X86II::MO_GOT;
13818 } else if (Subtarget->isPICStyleStubPIC()) {
13819 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE;
13820 } else if (Subtarget->isPICStyleStubNoDynamic()) {
13821 OpFlag = X86II::MO_DARWIN_NONLAZY;
13824 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag);
13827 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13829 // With PIC, the address is actually $g + Offset.
13830 if (DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
13831 !Subtarget->is64Bit()) {
13832 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13833 DAG.getNode(X86ISD::GlobalBaseReg,
13834 SDLoc(), getPointerTy()),
13838 // For symbols that require a load from a stub to get the address, emit the
13840 if (isGlobalStubReference(OpFlag))
13841 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result,
13842 MachinePointerInfo::getGOT(), false, false, false, 0);
13848 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
13849 // Create the TargetBlockAddressAddress node.
13850 unsigned char OpFlags =
13851 Subtarget->ClassifyBlockAddressReference();
13852 CodeModel::Model M = DAG.getTarget().getCodeModel();
13853 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
13854 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
13856 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset,
13859 if (Subtarget->isPICStyleRIPRel() &&
13860 (M == CodeModel::Small || M == CodeModel::Kernel))
13861 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13863 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13865 // With PIC, the address is actually $g + Offset.
13866 if (isGlobalRelativeToPICBase(OpFlags)) {
13867 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13868 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13876 X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
13877 int64_t Offset, SelectionDAG &DAG) const {
13878 // Create the TargetGlobalAddress node, folding in the constant
13879 // offset if it is legal.
13880 unsigned char OpFlags =
13881 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget());
13882 CodeModel::Model M = DAG.getTarget().getCodeModel();
13884 if (OpFlags == X86II::MO_NO_FLAG &&
13885 X86::isOffsetSuitableForCodeModel(Offset, M)) {
13886 // A direct static reference to a global.
13887 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
13890 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
13893 if (Subtarget->isPICStyleRIPRel() &&
13894 (M == CodeModel::Small || M == CodeModel::Kernel))
13895 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13897 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13899 // With PIC, the address is actually $g + Offset.
13900 if (isGlobalRelativeToPICBase(OpFlags)) {
13901 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13902 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13906 // For globals that require a load from a stub to get the address, emit the
13908 if (isGlobalStubReference(OpFlags))
13909 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
13910 MachinePointerInfo::getGOT(), false, false, false, 0);
13912 // If there was a non-zero offset that we didn't fold, create an explicit
13913 // addition for it.
13915 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result,
13916 DAG.getConstant(Offset, getPointerTy()));
13922 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
13923 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
13924 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
13925 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
13929 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
13930 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
13931 unsigned char OperandFlags, bool LocalDynamic = false) {
13932 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13933 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13935 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13936 GA->getValueType(0),
13940 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
13944 SDValue Ops[] = { Chain, TGA, *InFlag };
13945 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13947 SDValue Ops[] = { Chain, TGA };
13948 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13951 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
13952 MFI->setAdjustsStack(true);
13953 MFI->setHasCalls(true);
13955 SDValue Flag = Chain.getValue(1);
13956 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
13959 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
13961 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13964 SDLoc dl(GA); // ? function entry point might be better
13965 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13966 DAG.getNode(X86ISD::GlobalBaseReg,
13967 SDLoc(), PtrVT), InFlag);
13968 InFlag = Chain.getValue(1);
13970 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
13973 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
13975 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13977 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
13978 X86::RAX, X86II::MO_TLSGD);
13981 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
13987 // Get the start address of the TLS block for this module.
13988 X86MachineFunctionInfo* MFI = DAG.getMachineFunction()
13989 .getInfo<X86MachineFunctionInfo>();
13990 MFI->incNumLocalDynamicTLSAccesses();
13994 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
13995 X86II::MO_TLSLD, /*LocalDynamic=*/true);
13998 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13999 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
14000 InFlag = Chain.getValue(1);
14001 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
14002 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
14005 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
14009 unsigned char OperandFlags = X86II::MO_DTPOFF;
14010 unsigned WrapperKind = X86ISD::Wrapper;
14011 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
14012 GA->getValueType(0),
14013 GA->getOffset(), OperandFlags);
14014 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
14016 // Add x@dtpoff with the base.
14017 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
14020 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
14021 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
14022 const EVT PtrVT, TLSModel::Model model,
14023 bool is64Bit, bool isPIC) {
14026 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
14027 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
14028 is64Bit ? 257 : 256));
14030 SDValue ThreadPointer =
14031 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0),
14032 MachinePointerInfo(Ptr), false, false, false, 0);
14034 unsigned char OperandFlags = 0;
14035 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
14037 unsigned WrapperKind = X86ISD::Wrapper;
14038 if (model == TLSModel::LocalExec) {
14039 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
14040 } else if (model == TLSModel::InitialExec) {
14042 OperandFlags = X86II::MO_GOTTPOFF;
14043 WrapperKind = X86ISD::WrapperRIP;
14045 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
14048 llvm_unreachable("Unexpected model");
14051 // emit "addl x@ntpoff,%eax" (local exec)
14052 // or "addl x@indntpoff,%eax" (initial exec)
14053 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
14055 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
14056 GA->getOffset(), OperandFlags);
14057 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
14059 if (model == TLSModel::InitialExec) {
14060 if (isPIC && !is64Bit) {
14061 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
14062 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
14066 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
14067 MachinePointerInfo::getGOT(), false, false, false, 0);
14070 // The address of the thread local variable is the add of the thread
14071 // pointer with the offset of the variable.
14072 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
14076 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
14078 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
14079 const GlobalValue *GV = GA->getGlobal();
14081 if (Subtarget->isTargetELF()) {
14082 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
14085 case TLSModel::GeneralDynamic:
14086 if (Subtarget->is64Bit())
14087 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
14088 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
14089 case TLSModel::LocalDynamic:
14090 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(),
14091 Subtarget->is64Bit());
14092 case TLSModel::InitialExec:
14093 case TLSModel::LocalExec:
14094 return LowerToTLSExecModel(
14095 GA, DAG, getPointerTy(), model, Subtarget->is64Bit(),
14096 DAG.getTarget().getRelocationModel() == Reloc::PIC_);
14098 llvm_unreachable("Unknown TLS model.");
14101 if (Subtarget->isTargetDarwin()) {
14102 // Darwin only has one model of TLS. Lower to that.
14103 unsigned char OpFlag = 0;
14104 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
14105 X86ISD::WrapperRIP : X86ISD::Wrapper;
14107 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
14108 // global base reg.
14109 bool PIC32 = (DAG.getTarget().getRelocationModel() == Reloc::PIC_) &&
14110 !Subtarget->is64Bit();
14112 OpFlag = X86II::MO_TLVP_PIC_BASE;
14114 OpFlag = X86II::MO_TLVP;
14116 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
14117 GA->getValueType(0),
14118 GA->getOffset(), OpFlag);
14119 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
14121 // With PIC32, the address is actually $g + Offset.
14123 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
14124 DAG.getNode(X86ISD::GlobalBaseReg,
14125 SDLoc(), getPointerTy()),
14128 // Lowering the machine isd will make sure everything is in the right
14130 SDValue Chain = DAG.getEntryNode();
14131 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
14132 SDValue Args[] = { Chain, Offset };
14133 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
14135 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
14136 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
14137 MFI->setAdjustsStack(true);
14139 // And our return value (tls address) is in the standard call return value
14141 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
14142 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(),
14143 Chain.getValue(1));
14146 if (Subtarget->isTargetKnownWindowsMSVC() ||
14147 Subtarget->isTargetWindowsGNU()) {
14148 // Just use the implicit TLS architecture
14149 // Need to generate someting similar to:
14150 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
14152 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
14153 // mov rcx, qword [rdx+rcx*8]
14154 // mov eax, .tls$:tlsvar
14155 // [rax+rcx] contains the address
14156 // Windows 64bit: gs:0x58
14157 // Windows 32bit: fs:__tls_array
14160 SDValue Chain = DAG.getEntryNode();
14162 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
14163 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
14164 // use its literal value of 0x2C.
14165 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit()
14166 ? Type::getInt8PtrTy(*DAG.getContext(),
14168 : Type::getInt32PtrTy(*DAG.getContext(),
14172 Subtarget->is64Bit()
14173 ? DAG.getIntPtrConstant(0x58)
14174 : (Subtarget->isTargetWindowsGNU()
14175 ? DAG.getIntPtrConstant(0x2C)
14176 : DAG.getExternalSymbol("_tls_array", getPointerTy()));
14178 SDValue ThreadPointer =
14179 DAG.getLoad(getPointerTy(), dl, Chain, TlsArray,
14180 MachinePointerInfo(Ptr), false, false, false, 0);
14182 // Load the _tls_index variable
14183 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy());
14184 if (Subtarget->is64Bit())
14185 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain,
14186 IDX, MachinePointerInfo(), MVT::i32,
14187 false, false, false, 0);
14189 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(),
14190 false, false, false, 0);
14192 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()),
14194 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale);
14196 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX);
14197 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(),
14198 false, false, false, 0);
14200 // Get the offset of start of .tls section
14201 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
14202 GA->getValueType(0),
14203 GA->getOffset(), X86II::MO_SECREL);
14204 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA);
14206 // The address of the thread local variable is the add of the thread
14207 // pointer with the offset of the variable.
14208 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset);
14211 llvm_unreachable("TLS not implemented for this target.");
14214 /// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values
14215 /// and take a 2 x i32 value to shift plus a shift amount.
14216 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
14217 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
14218 MVT VT = Op.getSimpleValueType();
14219 unsigned VTBits = VT.getSizeInBits();
14221 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
14222 SDValue ShOpLo = Op.getOperand(0);
14223 SDValue ShOpHi = Op.getOperand(1);
14224 SDValue ShAmt = Op.getOperand(2);
14225 // X86ISD::SHLD and X86ISD::SHRD have defined overflow behavior but the
14226 // generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
14228 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14229 DAG.getConstant(VTBits - 1, MVT::i8));
14230 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
14231 DAG.getConstant(VTBits - 1, MVT::i8))
14232 : DAG.getConstant(0, VT);
14234 SDValue Tmp2, Tmp3;
14235 if (Op.getOpcode() == ISD::SHL_PARTS) {
14236 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
14237 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
14239 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
14240 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
14243 // If the shift amount is larger or equal than the width of a part we can't
14244 // rely on the results of shld/shrd. Insert a test and select the appropriate
14245 // values for large shift amounts.
14246 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14247 DAG.getConstant(VTBits, MVT::i8));
14248 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
14249 AndNode, DAG.getConstant(0, MVT::i8));
14252 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
14253 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
14254 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
14256 if (Op.getOpcode() == ISD::SHL_PARTS) {
14257 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14258 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14260 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14261 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14264 SDValue Ops[2] = { Lo, Hi };
14265 return DAG.getMergeValues(Ops, dl);
14268 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
14269 SelectionDAG &DAG) const {
14270 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
14273 if (SrcVT.isVector()) {
14274 if (SrcVT.getVectorElementType() == MVT::i1) {
14275 MVT IntegerVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
14276 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14277 DAG.getNode(ISD::SIGN_EXTEND, dl, IntegerVT,
14278 Op.getOperand(0)));
14283 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
14284 "Unknown SINT_TO_FP to lower!");
14286 // These are really Legal; return the operand so the caller accepts it as
14288 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
14290 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
14291 Subtarget->is64Bit()) {
14295 unsigned Size = SrcVT.getSizeInBits()/8;
14296 MachineFunction &MF = DAG.getMachineFunction();
14297 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false);
14298 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14299 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14301 MachinePointerInfo::getFixedStack(SSFI),
14303 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
14306 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
14308 SelectionDAG &DAG) const {
14312 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
14314 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
14316 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
14318 unsigned ByteSize = SrcVT.getSizeInBits()/8;
14320 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
14321 MachineMemOperand *MMO;
14323 int SSFI = FI->getIndex();
14325 DAG.getMachineFunction()
14326 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14327 MachineMemOperand::MOLoad, ByteSize, ByteSize);
14329 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
14330 StackSlot = StackSlot.getOperand(1);
14332 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
14333 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
14335 Tys, Ops, SrcVT, MMO);
14338 Chain = Result.getValue(1);
14339 SDValue InFlag = Result.getValue(2);
14341 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
14342 // shouldn't be necessary except that RFP cannot be live across
14343 // multiple blocks. When stackifier is fixed, they can be uncoupled.
14344 MachineFunction &MF = DAG.getMachineFunction();
14345 unsigned SSFISize = Op.getValueType().getSizeInBits()/8;
14346 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false);
14347 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14348 Tys = DAG.getVTList(MVT::Other);
14350 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
14352 MachineMemOperand *MMO =
14353 DAG.getMachineFunction()
14354 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14355 MachineMemOperand::MOStore, SSFISize, SSFISize);
14357 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
14358 Ops, Op.getValueType(), MMO);
14359 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot,
14360 MachinePointerInfo::getFixedStack(SSFI),
14361 false, false, false, 0);
14367 // LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
14368 SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
14369 SelectionDAG &DAG) const {
14370 // This algorithm is not obvious. Here it is what we're trying to output:
14373 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
14374 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
14376 haddpd %xmm0, %xmm0
14378 pshufd $0x4e, %xmm0, %xmm1
14384 LLVMContext *Context = DAG.getContext();
14386 // Build some magic constants.
14387 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
14388 Constant *C0 = ConstantDataVector::get(*Context, CV0);
14389 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
14391 SmallVector<Constant*,2> CV1;
14393 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14394 APInt(64, 0x4330000000000000ULL))));
14396 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14397 APInt(64, 0x4530000000000000ULL))));
14398 Constant *C1 = ConstantVector::get(CV1);
14399 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
14401 // Load the 64-bit value into an XMM register.
14402 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
14404 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
14405 MachinePointerInfo::getConstantPool(),
14406 false, false, false, 16);
14407 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32,
14408 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1),
14411 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
14412 MachinePointerInfo::getConstantPool(),
14413 false, false, false, 16);
14414 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1);
14415 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
14418 if (Subtarget->hasSSE3()) {
14419 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
14420 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
14422 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub);
14423 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32,
14425 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64,
14426 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle),
14430 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
14431 DAG.getIntPtrConstant(0));
14434 // LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
14435 SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
14436 SelectionDAG &DAG) const {
14438 // FP constant to bias correct the final result.
14439 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
14442 // Load the 32-bit value into an XMM register.
14443 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
14446 // Zero out the upper parts of the register.
14447 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
14449 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14450 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
14451 DAG.getIntPtrConstant(0));
14453 // Or the load with the bias.
14454 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
14455 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14456 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14457 MVT::v2f64, Load)),
14458 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14459 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14460 MVT::v2f64, Bias)));
14461 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14462 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or),
14463 DAG.getIntPtrConstant(0));
14465 // Subtract the bias.
14466 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
14468 // Handle final rounding.
14469 EVT DestVT = Op.getValueType();
14471 if (DestVT.bitsLT(MVT::f64))
14472 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
14473 DAG.getIntPtrConstant(0));
14474 if (DestVT.bitsGT(MVT::f64))
14475 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
14477 // Handle final rounding.
14481 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
14482 const X86Subtarget &Subtarget) {
14483 // The algorithm is the following:
14484 // #ifdef __SSE4_1__
14485 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14486 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14487 // (uint4) 0x53000000, 0xaa);
14489 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14490 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14492 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14493 // return (float4) lo + fhi;
14496 SDValue V = Op->getOperand(0);
14497 EVT VecIntVT = V.getValueType();
14498 bool Is128 = VecIntVT == MVT::v4i32;
14499 EVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
14500 // If we convert to something else than the supported type, e.g., to v4f64,
14502 if (VecFloatVT != Op->getValueType(0))
14505 unsigned NumElts = VecIntVT.getVectorNumElements();
14506 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
14507 "Unsupported custom type");
14508 assert(NumElts <= 8 && "The size of the constant array must be fixed");
14510 // In the #idef/#else code, we have in common:
14511 // - The vector of constants:
14517 // Create the splat vector for 0x4b000000.
14518 SDValue CstLow = DAG.getConstant(0x4b000000, MVT::i32);
14519 SDValue CstLowArray[] = {CstLow, CstLow, CstLow, CstLow,
14520 CstLow, CstLow, CstLow, CstLow};
14521 SDValue VecCstLow = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14522 makeArrayRef(&CstLowArray[0], NumElts));
14523 // Create the splat vector for 0x53000000.
14524 SDValue CstHigh = DAG.getConstant(0x53000000, MVT::i32);
14525 SDValue CstHighArray[] = {CstHigh, CstHigh, CstHigh, CstHigh,
14526 CstHigh, CstHigh, CstHigh, CstHigh};
14527 SDValue VecCstHigh = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14528 makeArrayRef(&CstHighArray[0], NumElts));
14530 // Create the right shift.
14531 SDValue CstShift = DAG.getConstant(16, MVT::i32);
14532 SDValue CstShiftArray[] = {CstShift, CstShift, CstShift, CstShift,
14533 CstShift, CstShift, CstShift, CstShift};
14534 SDValue VecCstShift = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14535 makeArrayRef(&CstShiftArray[0], NumElts));
14536 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
14539 if (Subtarget.hasSSE41()) {
14540 EVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
14541 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14542 SDValue VecCstLowBitcast =
14543 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstLow);
14544 SDValue VecBitcast = DAG.getNode(ISD::BITCAST, DL, VecI16VT, V);
14545 // Low will be bitcasted right away, so do not bother bitcasting back to its
14547 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
14548 VecCstLowBitcast, DAG.getConstant(0xaa, MVT::i32));
14549 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14550 // (uint4) 0x53000000, 0xaa);
14551 SDValue VecCstHighBitcast =
14552 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstHigh);
14553 SDValue VecShiftBitcast =
14554 DAG.getNode(ISD::BITCAST, DL, VecI16VT, HighShift);
14555 // High will be bitcasted right away, so do not bother bitcasting back to
14556 // its original type.
14557 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
14558 VecCstHighBitcast, DAG.getConstant(0xaa, MVT::i32));
14560 SDValue CstMask = DAG.getConstant(0xffff, MVT::i32);
14561 SDValue VecCstMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, CstMask,
14562 CstMask, CstMask, CstMask);
14563 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14564 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
14565 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
14567 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14568 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
14571 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
14572 SDValue CstFAdd = DAG.getConstantFP(
14573 APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), MVT::f32);
14574 SDValue CstFAddArray[] = {CstFAdd, CstFAdd, CstFAdd, CstFAdd,
14575 CstFAdd, CstFAdd, CstFAdd, CstFAdd};
14576 SDValue VecCstFAdd = DAG.getNode(ISD::BUILD_VECTOR, DL, VecFloatVT,
14577 makeArrayRef(&CstFAddArray[0], NumElts));
14579 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14580 SDValue HighBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, High);
14582 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
14583 // return (float4) lo + fhi;
14584 SDValue LowBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, Low);
14585 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
14588 SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
14589 SelectionDAG &DAG) const {
14590 SDValue N0 = Op.getOperand(0);
14591 MVT SVT = N0.getSimpleValueType();
14594 switch (SVT.SimpleTy) {
14596 llvm_unreachable("Custom UINT_TO_FP is not supported!");
14601 MVT NVT = MVT::getVectorVT(MVT::i32, SVT.getVectorNumElements());
14602 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14603 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0));
14607 return lowerUINT_TO_FP_vXi32(Op, DAG, *Subtarget);
14609 llvm_unreachable(nullptr);
14612 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
14613 SelectionDAG &DAG) const {
14614 SDValue N0 = Op.getOperand(0);
14617 if (Op.getValueType().isVector())
14618 return lowerUINT_TO_FP_vec(Op, DAG);
14620 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
14621 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
14622 // the optimization here.
14623 if (DAG.SignBitIsZero(N0))
14624 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
14626 MVT SrcVT = N0.getSimpleValueType();
14627 MVT DstVT = Op.getSimpleValueType();
14628 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
14629 return LowerUINT_TO_FP_i64(Op, DAG);
14630 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
14631 return LowerUINT_TO_FP_i32(Op, DAG);
14632 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
14635 // Make a 64-bit buffer, and use it to build an FILD.
14636 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
14637 if (SrcVT == MVT::i32) {
14638 SDValue WordOff = DAG.getConstant(4, getPointerTy());
14639 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
14640 getPointerTy(), StackSlot, WordOff);
14641 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14642 StackSlot, MachinePointerInfo(),
14644 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
14645 OffsetSlot, MachinePointerInfo(),
14647 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
14651 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
14652 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14653 StackSlot, MachinePointerInfo(),
14655 // For i64 source, we need to add the appropriate power of 2 if the input
14656 // was negative. This is the same as the optimization in
14657 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
14658 // we must be careful to do the computation in x87 extended precision, not
14659 // in SSE. (The generic code can't know it's OK to do this, or how to.)
14660 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
14661 MachineMemOperand *MMO =
14662 DAG.getMachineFunction()
14663 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14664 MachineMemOperand::MOLoad, 8, 8);
14666 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
14667 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
14668 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
14671 APInt FF(32, 0x5F800000ULL);
14673 // Check whether the sign bit is set.
14674 SDValue SignSet = DAG.getSetCC(dl,
14675 getSetCCResultType(*DAG.getContext(), MVT::i64),
14676 Op.getOperand(0), DAG.getConstant(0, MVT::i64),
14679 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
14680 SDValue FudgePtr = DAG.getConstantPool(
14681 ConstantInt::get(*DAG.getContext(), FF.zext(64)),
14684 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
14685 SDValue Zero = DAG.getIntPtrConstant(0);
14686 SDValue Four = DAG.getIntPtrConstant(4);
14687 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
14689 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset);
14691 // Load the value out, extending it from f32 to f80.
14692 // FIXME: Avoid the extend by constructing the right constant pool?
14693 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(),
14694 FudgePtr, MachinePointerInfo::getConstantPool(),
14695 MVT::f32, false, false, false, 4);
14696 // Extend everything to 80 bits to force it to be done on x87.
14697 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
14698 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0));
14701 std::pair<SDValue,SDValue>
14702 X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
14703 bool IsSigned, bool IsReplace) const {
14706 EVT DstTy = Op.getValueType();
14708 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) {
14709 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
14713 assert(DstTy.getSimpleVT() <= MVT::i64 &&
14714 DstTy.getSimpleVT() >= MVT::i16 &&
14715 "Unknown FP_TO_INT to lower!");
14717 // These are really Legal.
14718 if (DstTy == MVT::i32 &&
14719 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14720 return std::make_pair(SDValue(), SDValue());
14721 if (Subtarget->is64Bit() &&
14722 DstTy == MVT::i64 &&
14723 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14724 return std::make_pair(SDValue(), SDValue());
14726 // We lower FP->int64 either into FISTP64 followed by a load from a temporary
14727 // stack slot, or into the FTOL runtime function.
14728 MachineFunction &MF = DAG.getMachineFunction();
14729 unsigned MemSize = DstTy.getSizeInBits()/8;
14730 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14731 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14734 if (!IsSigned && isIntegerTypeFTOL(DstTy))
14735 Opc = X86ISD::WIN_FTOL;
14737 switch (DstTy.getSimpleVT().SimpleTy) {
14738 default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
14739 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
14740 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
14741 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
14744 SDValue Chain = DAG.getEntryNode();
14745 SDValue Value = Op.getOperand(0);
14746 EVT TheVT = Op.getOperand(0).getValueType();
14747 // FIXME This causes a redundant load/store if the SSE-class value is already
14748 // in memory, such as if it is on the callstack.
14749 if (isScalarFPTypeInSSEReg(TheVT)) {
14750 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
14751 Chain = DAG.getStore(Chain, DL, Value, StackSlot,
14752 MachinePointerInfo::getFixedStack(SSFI),
14754 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
14756 Chain, StackSlot, DAG.getValueType(TheVT)
14759 MachineMemOperand *MMO =
14760 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14761 MachineMemOperand::MOLoad, MemSize, MemSize);
14762 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
14763 Chain = Value.getValue(1);
14764 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14765 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14768 MachineMemOperand *MMO =
14769 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14770 MachineMemOperand::MOStore, MemSize, MemSize);
14772 if (Opc != X86ISD::WIN_FTOL) {
14773 // Build the FP_TO_INT*_IN_MEM
14774 SDValue Ops[] = { Chain, Value, StackSlot };
14775 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
14777 return std::make_pair(FIST, StackSlot);
14779 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL,
14780 DAG.getVTList(MVT::Other, MVT::Glue),
14782 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX,
14783 MVT::i32, ftol.getValue(1));
14784 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX,
14785 MVT::i32, eax.getValue(2));
14786 SDValue Ops[] = { eax, edx };
14787 SDValue pair = IsReplace
14788 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops)
14789 : DAG.getMergeValues(Ops, DL);
14790 return std::make_pair(pair, SDValue());
14794 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
14795 const X86Subtarget *Subtarget) {
14796 MVT VT = Op->getSimpleValueType(0);
14797 SDValue In = Op->getOperand(0);
14798 MVT InVT = In.getSimpleValueType();
14801 // Optimize vectors in AVX mode:
14804 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
14805 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
14806 // Concat upper and lower parts.
14809 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
14810 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
14811 // Concat upper and lower parts.
14814 if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) &&
14815 ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) &&
14816 ((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
14819 if (Subtarget->hasInt256())
14820 return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
14822 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
14823 SDValue Undef = DAG.getUNDEF(InVT);
14824 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
14825 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14826 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14828 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(),
14829 VT.getVectorNumElements()/2);
14831 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo);
14832 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi);
14834 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
14837 static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
14838 SelectionDAG &DAG) {
14839 MVT VT = Op->getSimpleValueType(0);
14840 SDValue In = Op->getOperand(0);
14841 MVT InVT = In.getSimpleValueType();
14843 unsigned int NumElts = VT.getVectorNumElements();
14844 if (NumElts != 8 && NumElts != 16)
14847 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
14848 return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
14850 EVT ExtVT = (NumElts == 8)? MVT::v8i64 : MVT::v16i32;
14851 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14852 // Now we have only mask extension
14853 assert(InVT.getVectorElementType() == MVT::i1);
14854 SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
14855 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14856 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
14857 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14858 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14859 MachinePointerInfo::getConstantPool(),
14860 false, false, false, Alignment);
14862 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, DL, ExtVT, In, Ld);
14863 if (VT.is512BitVector())
14865 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Brcst);
14868 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14869 SelectionDAG &DAG) {
14870 if (Subtarget->hasFp256()) {
14871 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14879 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14880 SelectionDAG &DAG) {
14882 MVT VT = Op.getSimpleValueType();
14883 SDValue In = Op.getOperand(0);
14884 MVT SVT = In.getSimpleValueType();
14886 if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
14887 return LowerZERO_EXTEND_AVX512(Op, DAG);
14889 if (Subtarget->hasFp256()) {
14890 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14895 assert(!VT.is256BitVector() || !SVT.is128BitVector() ||
14896 VT.getVectorNumElements() != SVT.getVectorNumElements());
14900 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
14902 MVT VT = Op.getSimpleValueType();
14903 SDValue In = Op.getOperand(0);
14904 MVT InVT = In.getSimpleValueType();
14906 if (VT == MVT::i1) {
14907 assert((InVT.isInteger() && (InVT.getSizeInBits() <= 64)) &&
14908 "Invalid scalar TRUNCATE operation");
14909 if (InVT.getSizeInBits() >= 32)
14911 In = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, In);
14912 return DAG.getNode(ISD::TRUNCATE, DL, VT, In);
14914 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
14915 "Invalid TRUNCATE operation");
14917 if (InVT.is512BitVector() || VT.getVectorElementType() == MVT::i1) {
14918 if (VT.getVectorElementType().getSizeInBits() >=8)
14919 return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
14921 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
14922 unsigned NumElts = InVT.getVectorNumElements();
14923 assert ((NumElts == 8 || NumElts == 16) && "Unexpected vector type");
14924 if (InVT.getSizeInBits() < 512) {
14925 MVT ExtVT = (NumElts == 16)? MVT::v16i32 : MVT::v8i64;
14926 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
14930 SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
14931 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14932 SDValue CP = DAG.getConstantPool(C, getPointerTy());
14933 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14934 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14935 MachinePointerInfo::getConstantPool(),
14936 false, false, false, Alignment);
14937 SDValue OneV = DAG.getNode(X86ISD::VBROADCAST, DL, InVT, Ld);
14938 SDValue And = DAG.getNode(ISD::AND, DL, InVT, OneV, In);
14939 return DAG.getNode(X86ISD::TESTM, DL, VT, And, And);
14942 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
14943 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
14944 if (Subtarget->hasInt256()) {
14945 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
14946 In = DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, In);
14947 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32),
14949 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
14950 DAG.getIntPtrConstant(0));
14953 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14954 DAG.getIntPtrConstant(0));
14955 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14956 DAG.getIntPtrConstant(2));
14957 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14958 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14959 static const int ShufMask[] = {0, 2, 4, 6};
14960 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
14963 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
14964 // On AVX2, v8i32 -> v8i16 becomed PSHUFB.
14965 if (Subtarget->hasInt256()) {
14966 In = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, In);
14968 SmallVector<SDValue,32> pshufbMask;
14969 for (unsigned i = 0; i < 2; ++i) {
14970 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8));
14971 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8));
14972 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8));
14973 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8));
14974 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8));
14975 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8));
14976 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8));
14977 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8));
14978 for (unsigned j = 0; j < 8; ++j)
14979 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
14981 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
14982 In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
14983 In = DAG.getNode(ISD::BITCAST, DL, MVT::v4i64, In);
14985 static const int ShufMask[] = {0, 2, -1, -1};
14986 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64),
14988 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14989 DAG.getIntPtrConstant(0));
14990 return DAG.getNode(ISD::BITCAST, DL, VT, In);
14993 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14994 DAG.getIntPtrConstant(0));
14996 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14997 DAG.getIntPtrConstant(4));
14999 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpLo);
15000 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpHi);
15002 // The PSHUFB mask:
15003 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
15004 -1, -1, -1, -1, -1, -1, -1, -1};
15006 SDValue Undef = DAG.getUNDEF(MVT::v16i8);
15007 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, Undef, ShufMask1);
15008 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, Undef, ShufMask1);
15010 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
15011 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
15013 // The MOVLHPS Mask:
15014 static const int ShufMask2[] = {0, 1, 4, 5};
15015 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
15016 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, res);
15019 // Handle truncation of V256 to V128 using shuffles.
15020 if (!VT.is128BitVector() || !InVT.is256BitVector())
15023 assert(Subtarget->hasFp256() && "256-bit vector without AVX!");
15025 unsigned NumElems = VT.getVectorNumElements();
15026 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
15028 SmallVector<int, 16> MaskVec(NumElems * 2, -1);
15029 // Prepare truncation shuffle mask
15030 for (unsigned i = 0; i != NumElems; ++i)
15031 MaskVec[i] = i * 2;
15032 SDValue V = DAG.getVectorShuffle(NVT, DL,
15033 DAG.getNode(ISD::BITCAST, DL, NVT, In),
15034 DAG.getUNDEF(NVT), &MaskVec[0]);
15035 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
15036 DAG.getIntPtrConstant(0));
15039 SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
15040 SelectionDAG &DAG) const {
15041 assert(!Op.getSimpleValueType().isVector());
15043 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
15044 /*IsSigned=*/ true, /*IsReplace=*/ false);
15045 SDValue FIST = Vals.first, StackSlot = Vals.second;
15046 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
15047 if (!FIST.getNode()) return Op;
15049 if (StackSlot.getNode())
15050 // Load the result.
15051 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
15052 FIST, StackSlot, MachinePointerInfo(),
15053 false, false, false, 0);
15055 // The node is the result.
15059 SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
15060 SelectionDAG &DAG) const {
15061 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
15062 /*IsSigned=*/ false, /*IsReplace=*/ false);
15063 SDValue FIST = Vals.first, StackSlot = Vals.second;
15064 assert(FIST.getNode() && "Unexpected failure");
15066 if (StackSlot.getNode())
15067 // Load the result.
15068 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
15069 FIST, StackSlot, MachinePointerInfo(),
15070 false, false, false, 0);
15072 // The node is the result.
15076 static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
15078 MVT VT = Op.getSimpleValueType();
15079 SDValue In = Op.getOperand(0);
15080 MVT SVT = In.getSimpleValueType();
15082 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
15084 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
15085 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
15086 In, DAG.getUNDEF(SVT)));
15089 /// The only differences between FABS and FNEG are the mask and the logic op.
15090 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
15091 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
15092 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
15093 "Wrong opcode for lowering FABS or FNEG.");
15095 bool IsFABS = (Op.getOpcode() == ISD::FABS);
15097 // If this is a FABS and it has an FNEG user, bail out to fold the combination
15098 // into an FNABS. We'll lower the FABS after that if it is still in use.
15100 for (SDNode *User : Op->uses())
15101 if (User->getOpcode() == ISD::FNEG)
15104 SDValue Op0 = Op.getOperand(0);
15105 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
15108 MVT VT = Op.getSimpleValueType();
15109 // Assume scalar op for initialization; update for vector if needed.
15110 // Note that there are no scalar bitwise logical SSE/AVX instructions, so we
15111 // generate a 16-byte vector constant and logic op even for the scalar case.
15112 // Using a 16-byte mask allows folding the load of the mask with
15113 // the logic op, so it can save (~4 bytes) on code size.
15115 unsigned NumElts = VT == MVT::f64 ? 2 : 4;
15116 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
15117 // decide if we should generate a 16-byte constant mask when we only need 4 or
15118 // 8 bytes for the scalar case.
15119 if (VT.isVector()) {
15120 EltVT = VT.getVectorElementType();
15121 NumElts = VT.getVectorNumElements();
15124 unsigned EltBits = EltVT.getSizeInBits();
15125 LLVMContext *Context = DAG.getContext();
15126 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
15128 IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignBit(EltBits);
15129 Constant *C = ConstantInt::get(*Context, MaskElt);
15130 C = ConstantVector::getSplat(NumElts, C);
15131 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15132 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy());
15133 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
15134 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
15135 MachinePointerInfo::getConstantPool(),
15136 false, false, false, Alignment);
15138 if (VT.isVector()) {
15139 // For a vector, cast operands to a vector type, perform the logic op,
15140 // and cast the result back to the original value type.
15141 MVT VecVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
15142 SDValue MaskCasted = DAG.getNode(ISD::BITCAST, dl, VecVT, Mask);
15143 SDValue Operand = IsFNABS ?
15144 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0.getOperand(0)) :
15145 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0);
15146 unsigned BitOp = IsFABS ? ISD::AND : IsFNABS ? ISD::OR : ISD::XOR;
15147 return DAG.getNode(ISD::BITCAST, dl, VT,
15148 DAG.getNode(BitOp, dl, VecVT, Operand, MaskCasted));
15151 // If not vector, then scalar.
15152 unsigned BitOp = IsFABS ? X86ISD::FAND : IsFNABS ? X86ISD::FOR : X86ISD::FXOR;
15153 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
15154 return DAG.getNode(BitOp, dl, VT, Operand, Mask);
15157 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
15158 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15159 LLVMContext *Context = DAG.getContext();
15160 SDValue Op0 = Op.getOperand(0);
15161 SDValue Op1 = Op.getOperand(1);
15163 MVT VT = Op.getSimpleValueType();
15164 MVT SrcVT = Op1.getSimpleValueType();
15166 // If second operand is smaller, extend it first.
15167 if (SrcVT.bitsLT(VT)) {
15168 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1);
15171 // And if it is bigger, shrink it first.
15172 if (SrcVT.bitsGT(VT)) {
15173 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1));
15177 // At this point the operands and the result should have the same
15178 // type, and that won't be f80 since that is not custom lowered.
15180 const fltSemantics &Sem =
15181 VT == MVT::f64 ? APFloat::IEEEdouble : APFloat::IEEEsingle;
15182 const unsigned SizeInBits = VT.getSizeInBits();
15184 SmallVector<Constant *, 4> CV(
15185 VT == MVT::f64 ? 2 : 4,
15186 ConstantFP::get(*Context, APFloat(Sem, APInt(SizeInBits, 0))));
15188 // First, clear all bits but the sign bit from the second operand (sign).
15189 CV[0] = ConstantFP::get(*Context,
15190 APFloat(Sem, APInt::getHighBitsSet(SizeInBits, 1)));
15191 Constant *C = ConstantVector::get(CV);
15192 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
15193 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
15194 MachinePointerInfo::getConstantPool(),
15195 false, false, false, 16);
15196 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1);
15198 // Next, clear the sign bit from the first operand (magnitude).
15199 // If it's a constant, we can clear it here.
15200 if (ConstantFPSDNode *Op0CN = dyn_cast<ConstantFPSDNode>(Op0)) {
15201 APFloat APF = Op0CN->getValueAPF();
15202 // If the magnitude is a positive zero, the sign bit alone is enough.
15203 if (APF.isPosZero())
15206 CV[0] = ConstantFP::get(*Context, APF);
15208 CV[0] = ConstantFP::get(
15210 APFloat(Sem, APInt::getLowBitsSet(SizeInBits, SizeInBits - 1)));
15212 C = ConstantVector::get(CV);
15213 CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
15214 SDValue Val = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
15215 MachinePointerInfo::getConstantPool(),
15216 false, false, false, 16);
15217 // If the magnitude operand wasn't a constant, we need to AND out the sign.
15218 if (!isa<ConstantFPSDNode>(Op0))
15219 Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Val);
15221 // OR the magnitude value with the sign bit.
15222 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit);
15225 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
15226 SDValue N0 = Op.getOperand(0);
15228 MVT VT = Op.getSimpleValueType();
15230 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
15231 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
15232 DAG.getConstant(1, VT));
15233 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT));
15236 // Check whether an OR'd tree is PTEST-able.
15237 static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
15238 SelectionDAG &DAG) {
15239 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
15241 if (!Subtarget->hasSSE41())
15244 if (!Op->hasOneUse())
15247 SDNode *N = Op.getNode();
15250 SmallVector<SDValue, 8> Opnds;
15251 DenseMap<SDValue, unsigned> VecInMap;
15252 SmallVector<SDValue, 8> VecIns;
15253 EVT VT = MVT::Other;
15255 // Recognize a special case where a vector is casted into wide integer to
15257 Opnds.push_back(N->getOperand(0));
15258 Opnds.push_back(N->getOperand(1));
15260 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
15261 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
15262 // BFS traverse all OR'd operands.
15263 if (I->getOpcode() == ISD::OR) {
15264 Opnds.push_back(I->getOperand(0));
15265 Opnds.push_back(I->getOperand(1));
15266 // Re-evaluate the number of nodes to be traversed.
15267 e += 2; // 2 more nodes (LHS and RHS) are pushed.
15271 // Quit if a non-EXTRACT_VECTOR_ELT
15272 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
15275 // Quit if without a constant index.
15276 SDValue Idx = I->getOperand(1);
15277 if (!isa<ConstantSDNode>(Idx))
15280 SDValue ExtractedFromVec = I->getOperand(0);
15281 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
15282 if (M == VecInMap.end()) {
15283 VT = ExtractedFromVec.getValueType();
15284 // Quit if not 128/256-bit vector.
15285 if (!VT.is128BitVector() && !VT.is256BitVector())
15287 // Quit if not the same type.
15288 if (VecInMap.begin() != VecInMap.end() &&
15289 VT != VecInMap.begin()->first.getValueType())
15291 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
15292 VecIns.push_back(ExtractedFromVec);
15294 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
15297 assert((VT.is128BitVector() || VT.is256BitVector()) &&
15298 "Not extracted from 128-/256-bit vector.");
15300 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
15302 for (DenseMap<SDValue, unsigned>::const_iterator
15303 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
15304 // Quit if not all elements are used.
15305 if (I->second != FullMask)
15309 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
15311 // Cast all vectors into TestVT for PTEST.
15312 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
15313 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]);
15315 // If more than one full vectors are evaluated, OR them first before PTEST.
15316 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
15317 // Each iteration will OR 2 nodes and append the result until there is only
15318 // 1 node left, i.e. the final OR'd value of all vectors.
15319 SDValue LHS = VecIns[Slot];
15320 SDValue RHS = VecIns[Slot + 1];
15321 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
15324 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
15325 VecIns.back(), VecIns.back());
15328 /// \brief return true if \c Op has a use that doesn't just read flags.
15329 static bool hasNonFlagsUse(SDValue Op) {
15330 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
15332 SDNode *User = *UI;
15333 unsigned UOpNo = UI.getOperandNo();
15334 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
15335 // Look pass truncate.
15336 UOpNo = User->use_begin().getOperandNo();
15337 User = *User->use_begin();
15340 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
15341 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
15347 /// Emit nodes that will be selected as "test Op0,Op0", or something
15349 SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
15350 SelectionDAG &DAG) const {
15351 if (Op.getValueType() == MVT::i1) {
15352 SDValue ExtOp = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Op);
15353 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, ExtOp,
15354 DAG.getConstant(0, MVT::i8));
15356 // CF and OF aren't always set the way we want. Determine which
15357 // of these we need.
15358 bool NeedCF = false;
15359 bool NeedOF = false;
15362 case X86::COND_A: case X86::COND_AE:
15363 case X86::COND_B: case X86::COND_BE:
15366 case X86::COND_G: case X86::COND_GE:
15367 case X86::COND_L: case X86::COND_LE:
15368 case X86::COND_O: case X86::COND_NO: {
15369 // Check if we really need to set the
15370 // Overflow flag. If NoSignedWrap is present
15371 // that is not actually needed.
15372 switch (Op->getOpcode()) {
15377 const BinaryWithFlagsSDNode *BinNode =
15378 cast<BinaryWithFlagsSDNode>(Op.getNode());
15379 if (BinNode->hasNoSignedWrap())
15389 // See if we can use the EFLAGS value from the operand instead of
15390 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
15391 // we prove that the arithmetic won't overflow, we can't use OF or CF.
15392 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
15393 // Emit a CMP with 0, which is the TEST pattern.
15394 //if (Op.getValueType() == MVT::i1)
15395 // return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op,
15396 // DAG.getConstant(0, MVT::i1));
15397 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15398 DAG.getConstant(0, Op.getValueType()));
15400 unsigned Opcode = 0;
15401 unsigned NumOperands = 0;
15403 // Truncate operations may prevent the merge of the SETCC instruction
15404 // and the arithmetic instruction before it. Attempt to truncate the operands
15405 // of the arithmetic instruction and use a reduced bit-width instruction.
15406 bool NeedTruncation = false;
15407 SDValue ArithOp = Op;
15408 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
15409 SDValue Arith = Op->getOperand(0);
15410 // Both the trunc and the arithmetic op need to have one user each.
15411 if (Arith->hasOneUse())
15412 switch (Arith.getOpcode()) {
15419 NeedTruncation = true;
15425 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
15426 // which may be the result of a CAST. We use the variable 'Op', which is the
15427 // non-casted variable when we check for possible users.
15428 switch (ArithOp.getOpcode()) {
15430 // Due to an isel shortcoming, be conservative if this add is likely to be
15431 // selected as part of a load-modify-store instruction. When the root node
15432 // in a match is a store, isel doesn't know how to remap non-chain non-flag
15433 // uses of other nodes in the match, such as the ADD in this case. This
15434 // leads to the ADD being left around and reselected, with the result being
15435 // two adds in the output. Alas, even if none our users are stores, that
15436 // doesn't prove we're O.K. Ergo, if we have any parents that aren't
15437 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
15438 // climbing the DAG back to the root, and it doesn't seem to be worth the
15440 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15441 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15442 if (UI->getOpcode() != ISD::CopyToReg &&
15443 UI->getOpcode() != ISD::SETCC &&
15444 UI->getOpcode() != ISD::STORE)
15447 if (ConstantSDNode *C =
15448 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
15449 // An add of one will be selected as an INC.
15450 if (C->getAPIntValue() == 1 && !Subtarget->slowIncDec()) {
15451 Opcode = X86ISD::INC;
15456 // An add of negative one (subtract of one) will be selected as a DEC.
15457 if (C->getAPIntValue().isAllOnesValue() && !Subtarget->slowIncDec()) {
15458 Opcode = X86ISD::DEC;
15464 // Otherwise use a regular EFLAGS-setting add.
15465 Opcode = X86ISD::ADD;
15470 // If we have a constant logical shift that's only used in a comparison
15471 // against zero turn it into an equivalent AND. This allows turning it into
15472 // a TEST instruction later.
15473 if ((X86CC == X86::COND_E || X86CC == X86::COND_NE) && Op->hasOneUse() &&
15474 isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
15475 EVT VT = Op.getValueType();
15476 unsigned BitWidth = VT.getSizeInBits();
15477 unsigned ShAmt = Op->getConstantOperandVal(1);
15478 if (ShAmt >= BitWidth) // Avoid undefined shifts.
15480 APInt Mask = ArithOp.getOpcode() == ISD::SRL
15481 ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
15482 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
15483 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
15485 SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
15486 DAG.getConstant(Mask, VT));
15487 DAG.ReplaceAllUsesWith(Op, New);
15493 // If the primary and result isn't used, don't bother using X86ISD::AND,
15494 // because a TEST instruction will be better.
15495 if (!hasNonFlagsUse(Op))
15501 // Due to the ISEL shortcoming noted above, be conservative if this op is
15502 // likely to be selected as part of a load-modify-store instruction.
15503 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15504 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15505 if (UI->getOpcode() == ISD::STORE)
15508 // Otherwise use a regular EFLAGS-setting instruction.
15509 switch (ArithOp.getOpcode()) {
15510 default: llvm_unreachable("unexpected operator!");
15511 case ISD::SUB: Opcode = X86ISD::SUB; break;
15512 case ISD::XOR: Opcode = X86ISD::XOR; break;
15513 case ISD::AND: Opcode = X86ISD::AND; break;
15515 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
15516 SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG);
15517 if (EFLAGS.getNode())
15520 Opcode = X86ISD::OR;
15534 return SDValue(Op.getNode(), 1);
15540 // If we found that truncation is beneficial, perform the truncation and
15542 if (NeedTruncation) {
15543 EVT VT = Op.getValueType();
15544 SDValue WideVal = Op->getOperand(0);
15545 EVT WideVT = WideVal.getValueType();
15546 unsigned ConvertedOp = 0;
15547 // Use a target machine opcode to prevent further DAGCombine
15548 // optimizations that may separate the arithmetic operations
15549 // from the setcc node.
15550 switch (WideVal.getOpcode()) {
15552 case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
15553 case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
15554 case ISD::AND: ConvertedOp = X86ISD::AND; break;
15555 case ISD::OR: ConvertedOp = X86ISD::OR; break;
15556 case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
15560 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15561 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
15562 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
15563 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
15564 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1);
15570 // Emit a CMP with 0, which is the TEST pattern.
15571 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15572 DAG.getConstant(0, Op.getValueType()));
15574 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
15575 SmallVector<SDValue, 4> Ops;
15576 for (unsigned i = 0; i != NumOperands; ++i)
15577 Ops.push_back(Op.getOperand(i));
15579 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
15580 DAG.ReplaceAllUsesWith(Op, New);
15581 return SDValue(New.getNode(), 1);
15584 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
15586 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
15587 SDLoc dl, SelectionDAG &DAG) const {
15588 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) {
15589 if (C->getAPIntValue() == 0)
15590 return EmitTest(Op0, X86CC, dl, DAG);
15592 if (Op0.getValueType() == MVT::i1)
15593 llvm_unreachable("Unexpected comparison operation for MVT::i1 operands");
15596 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
15597 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
15598 // Do the comparison at i32 if it's smaller, besides the Atom case.
15599 // This avoids subregister aliasing issues. Keep the smaller reference
15600 // if we're optimizing for size, however, as that'll allow better folding
15601 // of memory operations.
15602 if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
15603 !DAG.getMachineFunction().getFunction()->hasFnAttribute(
15604 Attribute::MinSize) &&
15605 !Subtarget->isAtom()) {
15606 unsigned ExtendOp =
15607 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
15608 Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
15609 Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
15611 // Use SUB instead of CMP to enable CSE between SUB and CMP.
15612 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
15613 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs,
15615 return SDValue(Sub.getNode(), 1);
15617 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
15620 /// Convert a comparison if required by the subtarget.
15621 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
15622 SelectionDAG &DAG) const {
15623 // If the subtarget does not support the FUCOMI instruction, floating-point
15624 // comparisons have to be converted.
15625 if (Subtarget->hasCMov() ||
15626 Cmp.getOpcode() != X86ISD::CMP ||
15627 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
15628 !Cmp.getOperand(1).getValueType().isFloatingPoint())
15631 // The instruction selector will select an FUCOM instruction instead of
15632 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
15633 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
15634 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
15636 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
15637 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
15638 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
15639 DAG.getConstant(8, MVT::i8));
15640 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
15641 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
15644 /// The minimum architected relative accuracy is 2^-12. We need one
15645 /// Newton-Raphson step to have a good float result (24 bits of precision).
15646 SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
15647 DAGCombinerInfo &DCI,
15648 unsigned &RefinementSteps,
15649 bool &UseOneConstNR) const {
15650 // FIXME: We should use instruction latency models to calculate the cost of
15651 // each potential sequence, but this is very hard to do reliably because
15652 // at least Intel's Core* chips have variable timing based on the number of
15653 // significant digits in the divisor and/or sqrt operand.
15654 if (!Subtarget->useSqrtEst())
15657 EVT VT = Op.getValueType();
15659 // SSE1 has rsqrtss and rsqrtps.
15660 // TODO: Add support for AVX512 (v16f32).
15661 // It is likely not profitable to do this for f64 because a double-precision
15662 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
15663 // instructions: convert to single, rsqrtss, convert back to double, refine
15664 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
15665 // along with FMA, this could be a throughput win.
15666 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15667 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15668 RefinementSteps = 1;
15669 UseOneConstNR = false;
15670 return DCI.DAG.getNode(X86ISD::FRSQRT, SDLoc(Op), VT, Op);
15675 /// The minimum architected relative accuracy is 2^-12. We need one
15676 /// Newton-Raphson step to have a good float result (24 bits of precision).
15677 SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
15678 DAGCombinerInfo &DCI,
15679 unsigned &RefinementSteps) const {
15680 // FIXME: We should use instruction latency models to calculate the cost of
15681 // each potential sequence, but this is very hard to do reliably because
15682 // at least Intel's Core* chips have variable timing based on the number of
15683 // significant digits in the divisor.
15684 if (!Subtarget->useReciprocalEst())
15687 EVT VT = Op.getValueType();
15689 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
15690 // TODO: Add support for AVX512 (v16f32).
15691 // It is likely not profitable to do this for f64 because a double-precision
15692 // reciprocal estimate with refinement on x86 prior to FMA requires
15693 // 15 instructions: convert to single, rcpss, convert back to double, refine
15694 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
15695 // along with FMA, this could be a throughput win.
15696 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15697 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15698 RefinementSteps = ReciprocalEstimateRefinementSteps;
15699 return DCI.DAG.getNode(X86ISD::FRCP, SDLoc(Op), VT, Op);
15704 static bool isAllOnes(SDValue V) {
15705 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
15706 return C && C->isAllOnesValue();
15709 /// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
15710 /// if it's possible.
15711 SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
15712 SDLoc dl, SelectionDAG &DAG) const {
15713 SDValue Op0 = And.getOperand(0);
15714 SDValue Op1 = And.getOperand(1);
15715 if (Op0.getOpcode() == ISD::TRUNCATE)
15716 Op0 = Op0.getOperand(0);
15717 if (Op1.getOpcode() == ISD::TRUNCATE)
15718 Op1 = Op1.getOperand(0);
15721 if (Op1.getOpcode() == ISD::SHL)
15722 std::swap(Op0, Op1);
15723 if (Op0.getOpcode() == ISD::SHL) {
15724 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0)))
15725 if (And00C->getZExtValue() == 1) {
15726 // If we looked past a truncate, check that it's only truncating away
15728 unsigned BitWidth = Op0.getValueSizeInBits();
15729 unsigned AndBitWidth = And.getValueSizeInBits();
15730 if (BitWidth > AndBitWidth) {
15732 DAG.computeKnownBits(Op0, Zeros, Ones);
15733 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
15737 RHS = Op0.getOperand(1);
15739 } else if (Op1.getOpcode() == ISD::Constant) {
15740 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
15741 uint64_t AndRHSVal = AndRHS->getZExtValue();
15742 SDValue AndLHS = Op0;
15744 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
15745 LHS = AndLHS.getOperand(0);
15746 RHS = AndLHS.getOperand(1);
15749 // Use BT if the immediate can't be encoded in a TEST instruction.
15750 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
15752 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType());
15756 if (LHS.getNode()) {
15757 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT
15758 // instruction. Since the shift amount is in-range-or-undefined, we know
15759 // that doing a bittest on the i32 value is ok. We extend to i32 because
15760 // the encoding for the i16 version is larger than the i32 version.
15761 // Also promote i16 to i32 for performance / code size reason.
15762 if (LHS.getValueType() == MVT::i8 ||
15763 LHS.getValueType() == MVT::i16)
15764 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
15766 // If the operand types disagree, extend the shift amount to match. Since
15767 // BT ignores high bits (like shifts) we can use anyextend.
15768 if (LHS.getValueType() != RHS.getValueType())
15769 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS);
15771 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
15772 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
15773 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15774 DAG.getConstant(Cond, MVT::i8), BT);
15780 /// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point
15782 static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
15787 // SSE Condition code mapping:
15796 switch (SetCCOpcode) {
15797 default: llvm_unreachable("Unexpected SETCC condition");
15799 case ISD::SETEQ: SSECC = 0; break;
15801 case ISD::SETGT: Swap = true; // Fallthrough
15803 case ISD::SETOLT: SSECC = 1; break;
15805 case ISD::SETGE: Swap = true; // Fallthrough
15807 case ISD::SETOLE: SSECC = 2; break;
15808 case ISD::SETUO: SSECC = 3; break;
15810 case ISD::SETNE: SSECC = 4; break;
15811 case ISD::SETULE: Swap = true; // Fallthrough
15812 case ISD::SETUGE: SSECC = 5; break;
15813 case ISD::SETULT: Swap = true; // Fallthrough
15814 case ISD::SETUGT: SSECC = 6; break;
15815 case ISD::SETO: SSECC = 7; break;
15817 case ISD::SETONE: SSECC = 8; break;
15820 std::swap(Op0, Op1);
15825 // Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128
15826 // ones, and then concatenate the result back.
15827 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
15828 MVT VT = Op.getSimpleValueType();
15830 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
15831 "Unsupported value type for operation");
15833 unsigned NumElems = VT.getVectorNumElements();
15835 SDValue CC = Op.getOperand(2);
15837 // Extract the LHS vectors
15838 SDValue LHS = Op.getOperand(0);
15839 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
15840 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
15842 // Extract the RHS vectors
15843 SDValue RHS = Op.getOperand(1);
15844 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
15845 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
15847 // Issue the operation on the smaller types and concatenate the result back
15848 MVT EltVT = VT.getVectorElementType();
15849 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
15850 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
15851 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
15852 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
15855 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
15856 const X86Subtarget *Subtarget) {
15857 SDValue Op0 = Op.getOperand(0);
15858 SDValue Op1 = Op.getOperand(1);
15859 SDValue CC = Op.getOperand(2);
15860 MVT VT = Op.getSimpleValueType();
15863 assert(Op0.getValueType().getVectorElementType().getSizeInBits() >= 8 &&
15864 Op.getValueType().getScalarType() == MVT::i1 &&
15865 "Cannot set masked compare for this operation");
15867 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15869 bool Unsigned = false;
15872 switch (SetCCOpcode) {
15873 default: llvm_unreachable("Unexpected SETCC condition");
15874 case ISD::SETNE: SSECC = 4; break;
15875 case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
15876 case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
15877 case ISD::SETLT: Swap = true; //fall-through
15878 case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
15879 case ISD::SETULT: SSECC = 1; Unsigned = true; break;
15880 case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
15881 case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
15882 case ISD::SETULE: Unsigned = true; //fall-through
15883 case ISD::SETLE: SSECC = 2; break;
15887 std::swap(Op0, Op1);
15889 return DAG.getNode(Opc, dl, VT, Op0, Op1);
15890 Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
15891 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15892 DAG.getConstant(SSECC, MVT::i8));
15895 /// \brief Try to turn a VSETULT into a VSETULE by modifying its second
15896 /// operand \p Op1. If non-trivial (for example because it's not constant)
15897 /// return an empty value.
15898 static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
15900 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
15904 MVT VT = Op1.getSimpleValueType();
15905 MVT EVT = VT.getVectorElementType();
15906 unsigned n = VT.getVectorNumElements();
15907 SmallVector<SDValue, 8> ULTOp1;
15909 for (unsigned i = 0; i < n; ++i) {
15910 ConstantSDNode *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
15911 if (!Elt || Elt->isOpaque() || Elt->getValueType(0) != EVT)
15914 // Avoid underflow.
15915 APInt Val = Elt->getAPIntValue();
15919 ULTOp1.push_back(DAG.getConstant(Val - 1, EVT));
15922 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
15925 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
15926 SelectionDAG &DAG) {
15927 SDValue Op0 = Op.getOperand(0);
15928 SDValue Op1 = Op.getOperand(1);
15929 SDValue CC = Op.getOperand(2);
15930 MVT VT = Op.getSimpleValueType();
15931 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15932 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
15937 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
15938 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
15941 unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
15942 unsigned Opc = X86ISD::CMPP;
15943 if (Subtarget->hasAVX512() && VT.getVectorElementType() == MVT::i1) {
15944 assert(VT.getVectorNumElements() <= 16);
15945 Opc = X86ISD::CMPM;
15947 // In the two special cases we can't handle, emit two comparisons.
15950 unsigned CombineOpc;
15951 if (SetCCOpcode == ISD::SETUEQ) {
15952 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR;
15954 assert(SetCCOpcode == ISD::SETONE);
15955 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND;
15958 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15959 DAG.getConstant(CC0, MVT::i8));
15960 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15961 DAG.getConstant(CC1, MVT::i8));
15962 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
15964 // Handle all other FP comparisons here.
15965 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15966 DAG.getConstant(SSECC, MVT::i8));
15969 // Break 256-bit integer vector compare into smaller ones.
15970 if (VT.is256BitVector() && !Subtarget->hasInt256())
15971 return Lower256IntVSETCC(Op, DAG);
15973 bool MaskResult = (VT.getVectorElementType() == MVT::i1);
15974 EVT OpVT = Op1.getValueType();
15975 if (Subtarget->hasAVX512()) {
15976 if (Op1.getValueType().is512BitVector() ||
15977 (Subtarget->hasBWI() && Subtarget->hasVLX()) ||
15978 (MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32))
15979 return LowerIntVSETCC_AVX512(Op, DAG, Subtarget);
15981 // In AVX-512 architecture setcc returns mask with i1 elements,
15982 // But there is no compare instruction for i8 and i16 elements in KNL.
15983 // We are not talking about 512-bit operands in this case, these
15984 // types are illegal.
15986 (OpVT.getVectorElementType().getSizeInBits() < 32 &&
15987 OpVT.getVectorElementType().getSizeInBits() >= 8))
15988 return DAG.getNode(ISD::TRUNCATE, dl, VT,
15989 DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
15992 // We are handling one of the integer comparisons here. Since SSE only has
15993 // GT and EQ comparisons for integer, swapping operands and multiple
15994 // operations may be required for some comparisons.
15996 bool Swap = false, Invert = false, FlipSigns = false, MinMax = false;
15997 bool Subus = false;
15999 switch (SetCCOpcode) {
16000 default: llvm_unreachable("Unexpected SETCC condition");
16001 case ISD::SETNE: Invert = true;
16002 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
16003 case ISD::SETLT: Swap = true;
16004 case ISD::SETGT: Opc = X86ISD::PCMPGT; break;
16005 case ISD::SETGE: Swap = true;
16006 case ISD::SETLE: Opc = X86ISD::PCMPGT;
16007 Invert = true; break;
16008 case ISD::SETULT: Swap = true;
16009 case ISD::SETUGT: Opc = X86ISD::PCMPGT;
16010 FlipSigns = true; break;
16011 case ISD::SETUGE: Swap = true;
16012 case ISD::SETULE: Opc = X86ISD::PCMPGT;
16013 FlipSigns = true; Invert = true; break;
16016 // Special case: Use min/max operations for SETULE/SETUGE
16017 MVT VET = VT.getVectorElementType();
16019 (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
16020 || (Subtarget->hasSSE2() && (VET == MVT::i8));
16023 switch (SetCCOpcode) {
16025 case ISD::SETULE: Opc = X86ISD::UMIN; MinMax = true; break;
16026 case ISD::SETUGE: Opc = X86ISD::UMAX; MinMax = true; break;
16029 if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
16032 bool hasSubus = Subtarget->hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
16033 if (!MinMax && hasSubus) {
16034 // As another special case, use PSUBUS[BW] when it's profitable. E.g. for
16036 // t = psubus Op0, Op1
16037 // pcmpeq t, <0..0>
16038 switch (SetCCOpcode) {
16040 case ISD::SETULT: {
16041 // If the comparison is against a constant we can turn this into a
16042 // setule. With psubus, setule does not require a swap. This is
16043 // beneficial because the constant in the register is no longer
16044 // destructed as the destination so it can be hoisted out of a loop.
16045 // Only do this pre-AVX since vpcmp* is no longer destructive.
16046 if (Subtarget->hasAVX())
16048 SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
16049 if (ULEOp1.getNode()) {
16051 Subus = true; Invert = false; Swap = false;
16055 // Psubus is better than flip-sign because it requires no inversion.
16056 case ISD::SETUGE: Subus = true; Invert = false; Swap = true; break;
16057 case ISD::SETULE: Subus = true; Invert = false; Swap = false; break;
16061 Opc = X86ISD::SUBUS;
16067 std::swap(Op0, Op1);
16069 // Check that the operation in question is available (most are plain SSE2,
16070 // but PCMPGTQ and PCMPEQQ have different requirements).
16071 if (VT == MVT::v2i64) {
16072 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) {
16073 assert(Subtarget->hasSSE2() && "Don't know how to lower!");
16075 // First cast everything to the right type.
16076 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
16077 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
16079 // Since SSE has no unsigned integer comparisons, we need to flip the sign
16080 // bits of the inputs before performing those operations. The lower
16081 // compare is always unsigned.
16084 SB = DAG.getConstant(0x80000000U, MVT::v4i32);
16086 SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32);
16087 SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32);
16088 SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
16089 Sign, Zero, Sign, Zero);
16091 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
16092 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
16094 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
16095 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
16096 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
16098 // Create masks for only the low parts/high parts of the 64 bit integers.
16099 static const int MaskHi[] = { 1, 1, 3, 3 };
16100 static const int MaskLo[] = { 0, 0, 2, 2 };
16101 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
16102 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
16103 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
16105 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
16106 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
16109 Result = DAG.getNOT(dl, Result, MVT::v4i32);
16111 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
16114 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) {
16115 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
16116 // pcmpeqd + pshufd + pand.
16117 assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!");
16119 // First cast everything to the right type.
16120 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
16121 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
16124 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
16126 // Make sure the lower and upper halves are both all-ones.
16127 static const int Mask[] = { 1, 0, 3, 2 };
16128 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
16129 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
16132 Result = DAG.getNOT(dl, Result, MVT::v4i32);
16134 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
16138 // Since SSE has no unsigned integer comparisons, we need to flip the sign
16139 // bits of the inputs before performing those operations.
16141 EVT EltVT = VT.getVectorElementType();
16142 SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT);
16143 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB);
16144 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB);
16147 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
16149 // If the logical-not of the result is required, perform that now.
16151 Result = DAG.getNOT(dl, Result, VT);
16154 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
16157 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
16158 getZeroVector(VT, Subtarget, DAG, dl));
16163 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
16165 MVT VT = Op.getSimpleValueType();
16167 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
16169 assert(((!Subtarget->hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
16170 && "SetCC type must be 8-bit or 1-bit integer");
16171 SDValue Op0 = Op.getOperand(0);
16172 SDValue Op1 = Op.getOperand(1);
16174 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
16176 // Optimize to BT if possible.
16177 // Lower (X & (1 << N)) == 0 to BT(X, N).
16178 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
16179 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
16180 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() &&
16181 Op1.getOpcode() == ISD::Constant &&
16182 cast<ConstantSDNode>(Op1)->isNullValue() &&
16183 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16184 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG);
16185 if (NewSetCC.getNode()) {
16187 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
16192 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
16194 if (Op1.getOpcode() == ISD::Constant &&
16195 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
16196 cast<ConstantSDNode>(Op1)->isNullValue()) &&
16197 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16199 // If the input is a setcc, then reuse the input setcc or use a new one with
16200 // the inverted condition.
16201 if (Op0.getOpcode() == X86ISD::SETCC) {
16202 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
16203 bool Invert = (CC == ISD::SETNE) ^
16204 cast<ConstantSDNode>(Op1)->isNullValue();
16208 CCode = X86::GetOppositeBranchCondition(CCode);
16209 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16210 DAG.getConstant(CCode, MVT::i8),
16211 Op0.getOperand(1));
16213 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
16217 if ((Op0.getValueType() == MVT::i1) && (Op1.getOpcode() == ISD::Constant) &&
16218 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1) &&
16219 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16221 ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
16222 return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, MVT::i1), NewCC);
16225 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
16226 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
16227 if (X86CC == X86::COND_INVALID)
16230 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
16231 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
16232 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16233 DAG.getConstant(X86CC, MVT::i8), EFLAGS);
16235 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
16239 // isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
16240 static bool isX86LogicalCmp(SDValue Op) {
16241 unsigned Opc = Op.getNode()->getOpcode();
16242 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
16243 Opc == X86ISD::SAHF)
16245 if (Op.getResNo() == 1 &&
16246 (Opc == X86ISD::ADD ||
16247 Opc == X86ISD::SUB ||
16248 Opc == X86ISD::ADC ||
16249 Opc == X86ISD::SBB ||
16250 Opc == X86ISD::SMUL ||
16251 Opc == X86ISD::UMUL ||
16252 Opc == X86ISD::INC ||
16253 Opc == X86ISD::DEC ||
16254 Opc == X86ISD::OR ||
16255 Opc == X86ISD::XOR ||
16256 Opc == X86ISD::AND))
16259 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
16265 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
16266 if (V.getOpcode() != ISD::TRUNCATE)
16269 SDValue VOp0 = V.getOperand(0);
16270 unsigned InBits = VOp0.getValueSizeInBits();
16271 unsigned Bits = V.getValueSizeInBits();
16272 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
16275 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
16276 bool addTest = true;
16277 SDValue Cond = Op.getOperand(0);
16278 SDValue Op1 = Op.getOperand(1);
16279 SDValue Op2 = Op.getOperand(2);
16281 EVT VT = Op1.getValueType();
16284 // Lower fp selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
16285 // are available. Otherwise fp cmovs get lowered into a less efficient branch
16286 // sequence later on.
16287 if (Cond.getOpcode() == ISD::SETCC &&
16288 ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
16289 (Subtarget->hasSSE1() && VT == MVT::f32)) &&
16290 VT == Cond.getOperand(0).getValueType() && Cond->hasOneUse()) {
16291 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
16292 int SSECC = translateX86FSETCC(
16293 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
16296 if (Subtarget->hasAVX512()) {
16297 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
16298 DAG.getConstant(SSECC, MVT::i8));
16299 return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
16301 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
16302 DAG.getConstant(SSECC, MVT::i8));
16303 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
16304 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
16305 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
16309 if (Cond.getOpcode() == ISD::SETCC) {
16310 SDValue NewCond = LowerSETCC(Cond, DAG);
16311 if (NewCond.getNode())
16315 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
16316 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
16317 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
16318 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
16319 if (Cond.getOpcode() == X86ISD::SETCC &&
16320 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
16321 isZero(Cond.getOperand(1).getOperand(1))) {
16322 SDValue Cmp = Cond.getOperand(1);
16324 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
16326 if ((isAllOnes(Op1) || isAllOnes(Op2)) &&
16327 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
16328 SDValue Y = isAllOnes(Op2) ? Op1 : Op2;
16330 SDValue CmpOp0 = Cmp.getOperand(0);
16331 // Apply further optimizations for special cases
16332 // (select (x != 0), -1, 0) -> neg & sbb
16333 // (select (x == 0), 0, -1) -> neg & sbb
16334 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y))
16335 if (YC->isNullValue() &&
16336 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) {
16337 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
16338 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs,
16339 DAG.getConstant(0, CmpOp0.getValueType()),
16341 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16342 DAG.getConstant(X86::COND_B, MVT::i8),
16343 SDValue(Neg.getNode(), 1));
16347 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
16348 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
16349 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16351 SDValue Res = // Res = 0 or -1.
16352 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16353 DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
16355 if (isAllOnes(Op1) != (CondCode == X86::COND_E))
16356 Res = DAG.getNOT(DL, Res, Res.getValueType());
16358 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2);
16359 if (!N2C || !N2C->isNullValue())
16360 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
16365 // Look past (and (setcc_carry (cmp ...)), 1).
16366 if (Cond.getOpcode() == ISD::AND &&
16367 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16368 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16369 if (C && C->getAPIntValue() == 1)
16370 Cond = Cond.getOperand(0);
16373 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16374 // setting operand in place of the X86ISD::SETCC.
16375 unsigned CondOpcode = Cond.getOpcode();
16376 if (CondOpcode == X86ISD::SETCC ||
16377 CondOpcode == X86ISD::SETCC_CARRY) {
16378 CC = Cond.getOperand(0);
16380 SDValue Cmp = Cond.getOperand(1);
16381 unsigned Opc = Cmp.getOpcode();
16382 MVT VT = Op.getSimpleValueType();
16384 bool IllegalFPCMov = false;
16385 if (VT.isFloatingPoint() && !VT.isVector() &&
16386 !isScalarFPTypeInSSEReg(VT)) // FPStack?
16387 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
16389 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
16390 Opc == X86ISD::BT) { // FIXME
16394 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16395 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16396 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16397 Cond.getOperand(0).getValueType() != MVT::i8)) {
16398 SDValue LHS = Cond.getOperand(0);
16399 SDValue RHS = Cond.getOperand(1);
16400 unsigned X86Opcode;
16403 switch (CondOpcode) {
16404 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16405 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16406 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16407 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16408 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16409 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16410 default: llvm_unreachable("unexpected overflowing operator");
16412 if (CondOpcode == ISD::UMULO)
16413 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16416 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16418 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
16420 if (CondOpcode == ISD::UMULO)
16421 Cond = X86Op.getValue(2);
16423 Cond = X86Op.getValue(1);
16425 CC = DAG.getConstant(X86Cond, MVT::i8);
16430 // Look pass the truncate if the high bits are known zero.
16431 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16432 Cond = Cond.getOperand(0);
16434 // We know the result of AND is compared against zero. Try to match
16436 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16437 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG);
16438 if (NewSetCC.getNode()) {
16439 CC = NewSetCC.getOperand(0);
16440 Cond = NewSetCC.getOperand(1);
16447 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16448 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
16451 // a < b ? -1 : 0 -> RES = ~setcc_carry
16452 // a < b ? 0 : -1 -> RES = setcc_carry
16453 // a >= b ? -1 : 0 -> RES = setcc_carry
16454 // a >= b ? 0 : -1 -> RES = ~setcc_carry
16455 if (Cond.getOpcode() == X86ISD::SUB) {
16456 Cond = ConvertCmpIfNecessary(Cond, DAG);
16457 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
16459 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
16460 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) {
16461 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16462 DAG.getConstant(X86::COND_B, MVT::i8), Cond);
16463 if (isAllOnes(Op1) != (CondCode == X86::COND_B))
16464 return DAG.getNOT(DL, Res, Res.getValueType());
16469 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
16470 // widen the cmov and push the truncate through. This avoids introducing a new
16471 // branch during isel and doesn't add any extensions.
16472 if (Op.getValueType() == MVT::i8 &&
16473 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
16474 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
16475 if (T1.getValueType() == T2.getValueType() &&
16476 // Blacklist CopyFromReg to avoid partial register stalls.
16477 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
16478 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
16479 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond);
16480 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
16484 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
16485 // condition is true.
16486 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
16487 SDValue Ops[] = { Op2, Op1, CC, Cond };
16488 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops);
16491 static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, const X86Subtarget *Subtarget,
16492 SelectionDAG &DAG) {
16493 MVT VT = Op->getSimpleValueType(0);
16494 SDValue In = Op->getOperand(0);
16495 MVT InVT = In.getSimpleValueType();
16496 MVT VTElt = VT.getVectorElementType();
16497 MVT InVTElt = InVT.getVectorElementType();
16501 if ((InVTElt == MVT::i1) &&
16502 (((Subtarget->hasBWI() && Subtarget->hasVLX() &&
16503 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() <= 16)) ||
16505 ((Subtarget->hasBWI() && VT.is512BitVector() &&
16506 VTElt.getSizeInBits() <= 16)) ||
16508 ((Subtarget->hasDQI() && Subtarget->hasVLX() &&
16509 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) ||
16511 ((Subtarget->hasDQI() && VT.is512BitVector() &&
16512 VTElt.getSizeInBits() >= 32))))
16513 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16515 unsigned int NumElts = VT.getVectorNumElements();
16517 if (NumElts != 8 && NumElts != 16)
16520 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) {
16521 if (In.getOpcode() == X86ISD::VSEXT || In.getOpcode() == X86ISD::VZEXT)
16522 return DAG.getNode(In.getOpcode(), dl, VT, In.getOperand(0));
16523 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16526 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16527 assert (InVT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
16529 MVT ExtVT = (NumElts == 8) ? MVT::v8i64 : MVT::v16i32;
16530 Constant *C = ConstantInt::get(*DAG.getContext(),
16531 APInt::getAllOnesValue(ExtVT.getScalarType().getSizeInBits()));
16533 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
16534 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
16535 SDValue Ld = DAG.getLoad(ExtVT.getScalarType(), dl, DAG.getEntryNode(), CP,
16536 MachinePointerInfo::getConstantPool(),
16537 false, false, false, Alignment);
16538 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, dl, ExtVT, In, Ld);
16539 if (VT.is512BitVector())
16541 return DAG.getNode(X86ISD::VTRUNC, dl, VT, Brcst);
16544 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
16545 SelectionDAG &DAG) {
16546 MVT VT = Op->getSimpleValueType(0);
16547 SDValue In = Op->getOperand(0);
16548 MVT InVT = In.getSimpleValueType();
16551 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
16552 return LowerSIGN_EXTEND_AVX512(Op, Subtarget, DAG);
16554 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) &&
16555 (VT != MVT::v8i32 || InVT != MVT::v8i16) &&
16556 (VT != MVT::v16i16 || InVT != MVT::v16i8))
16559 if (Subtarget->hasInt256())
16560 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16562 // Optimize vectors in AVX mode
16563 // Sign extend v8i16 to v8i32 and
16566 // Divide input vector into two parts
16567 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
16568 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
16569 // concat the vectors to original VT
16571 unsigned NumElems = InVT.getVectorNumElements();
16572 SDValue Undef = DAG.getUNDEF(InVT);
16574 SmallVector<int,8> ShufMask1(NumElems, -1);
16575 for (unsigned i = 0; i != NumElems/2; ++i)
16578 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]);
16580 SmallVector<int,8> ShufMask2(NumElems, -1);
16581 for (unsigned i = 0; i != NumElems/2; ++i)
16582 ShufMask2[i] = i + NumElems/2;
16584 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]);
16586 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(),
16587 VT.getVectorNumElements()/2);
16589 OpLo = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpLo);
16590 OpHi = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpHi);
16592 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
16595 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
16596 // may emit an illegal shuffle but the expansion is still better than scalar
16597 // code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
16598 // we'll emit a shuffle and a arithmetic shift.
16599 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
16600 // TODO: It is possible to support ZExt by zeroing the undef values during
16601 // the shuffle phase or after the shuffle.
16602 static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
16603 SelectionDAG &DAG) {
16604 MVT RegVT = Op.getSimpleValueType();
16605 assert(RegVT.isVector() && "We only custom lower vector sext loads.");
16606 assert(RegVT.isInteger() &&
16607 "We only custom lower integer vector sext loads.");
16609 // Nothing useful we can do without SSE2 shuffles.
16610 assert(Subtarget->hasSSE2() && "We only custom lower sext loads with SSE2.");
16612 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
16614 EVT MemVT = Ld->getMemoryVT();
16615 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16616 unsigned RegSz = RegVT.getSizeInBits();
16618 ISD::LoadExtType Ext = Ld->getExtensionType();
16620 assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
16621 && "Only anyext and sext are currently implemented.");
16622 assert(MemVT != RegVT && "Cannot extend to the same type");
16623 assert(MemVT.isVector() && "Must load a vector from memory");
16625 unsigned NumElems = RegVT.getVectorNumElements();
16626 unsigned MemSz = MemVT.getSizeInBits();
16627 assert(RegSz > MemSz && "Register size must be greater than the mem size");
16629 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) {
16630 // The only way in which we have a legal 256-bit vector result but not the
16631 // integer 256-bit operations needed to directly lower a sextload is if we
16632 // have AVX1 but not AVX2. In that case, we can always emit a sextload to
16633 // a 128-bit vector and a normal sign_extend to 256-bits that should get
16634 // correctly legalized. We do this late to allow the canonical form of
16635 // sextload to persist throughout the rest of the DAG combiner -- it wants
16636 // to fold together any extensions it can, and so will fuse a sign_extend
16637 // of an sextload into a sextload targeting a wider value.
16639 if (MemSz == 128) {
16640 // Just switch this to a normal load.
16641 assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
16642 "it must be a legal 128-bit vector "
16644 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
16645 Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(),
16646 Ld->isInvariant(), Ld->getAlignment());
16648 assert(MemSz < 128 &&
16649 "Can't extend a type wider than 128 bits to a 256 bit vector!");
16650 // Do an sext load to a 128-bit vector type. We want to use the same
16651 // number of elements, but elements half as wide. This will end up being
16652 // recursively lowered by this routine, but will succeed as we definitely
16653 // have all the necessary features if we're using AVX1.
16655 EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
16656 EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
16658 DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
16659 Ld->getPointerInfo(), MemVT, Ld->isVolatile(),
16660 Ld->isNonTemporal(), Ld->isInvariant(),
16661 Ld->getAlignment());
16664 // Replace chain users with the new chain.
16665 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
16666 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
16668 // Finally, do a normal sign-extend to the desired register.
16669 return DAG.getSExtOrTrunc(Load, dl, RegVT);
16672 // All sizes must be a power of two.
16673 assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
16674 "Non-power-of-two elements are not custom lowered!");
16676 // Attempt to load the original value using scalar loads.
16677 // Find the largest scalar type that divides the total loaded size.
16678 MVT SclrLoadTy = MVT::i8;
16679 for (MVT Tp : MVT::integer_valuetypes()) {
16680 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
16685 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
16686 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
16688 SclrLoadTy = MVT::f64;
16690 // Calculate the number of scalar loads that we need to perform
16691 // in order to load our vector from memory.
16692 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
16694 assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
16695 "Can only lower sext loads with a single scalar load!");
16697 unsigned loadRegZize = RegSz;
16698 if (Ext == ISD::SEXTLOAD && RegSz == 256)
16701 // Represent our vector as a sequence of elements which are the
16702 // largest scalar that we can load.
16703 EVT LoadUnitVecVT = EVT::getVectorVT(
16704 *DAG.getContext(), SclrLoadTy, loadRegZize / SclrLoadTy.getSizeInBits());
16706 // Represent the data using the same element type that is stored in
16707 // memory. In practice, we ''widen'' MemVT.
16709 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
16710 loadRegZize / MemVT.getScalarType().getSizeInBits());
16712 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
16713 "Invalid vector type");
16715 // We can't shuffle using an illegal type.
16716 assert(TLI.isTypeLegal(WideVecVT) &&
16717 "We only lower types that form legal widened vector types");
16719 SmallVector<SDValue, 8> Chains;
16720 SDValue Ptr = Ld->getBasePtr();
16721 SDValue Increment =
16722 DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, TLI.getPointerTy());
16723 SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
16725 for (unsigned i = 0; i < NumLoads; ++i) {
16726 // Perform a single load.
16727 SDValue ScalarLoad =
16728 DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
16729 Ld->isVolatile(), Ld->isNonTemporal(), Ld->isInvariant(),
16730 Ld->getAlignment());
16731 Chains.push_back(ScalarLoad.getValue(1));
16732 // Create the first element type using SCALAR_TO_VECTOR in order to avoid
16733 // another round of DAGCombining.
16735 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
16737 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
16738 ScalarLoad, DAG.getIntPtrConstant(i));
16740 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
16743 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
16745 // Bitcast the loaded value to a vector of the original element type, in
16746 // the size of the target vector type.
16747 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res);
16748 unsigned SizeRatio = RegSz / MemSz;
16750 if (Ext == ISD::SEXTLOAD) {
16751 // If we have SSE4.1, we can directly emit a VSEXT node.
16752 if (Subtarget->hasSSE41()) {
16753 SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec);
16754 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16758 // Otherwise we'll shuffle the small elements in the high bits of the
16759 // larger type and perform an arithmetic shift. If the shift is not legal
16760 // it's better to scalarize.
16761 assert(TLI.isOperationLegalOrCustom(ISD::SRA, RegVT) &&
16762 "We can't implement a sext load without an arithmetic right shift!");
16764 // Redistribute the loaded elements into the different locations.
16765 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16766 for (unsigned i = 0; i != NumElems; ++i)
16767 ShuffleVec[i * SizeRatio + SizeRatio - 1] = i;
16769 SDValue Shuff = DAG.getVectorShuffle(
16770 WideVecVT, dl, SlicedVec, DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16772 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16774 // Build the arithmetic shift.
16775 unsigned Amt = RegVT.getVectorElementType().getSizeInBits() -
16776 MemVT.getVectorElementType().getSizeInBits();
16778 DAG.getNode(ISD::SRA, dl, RegVT, Shuff, DAG.getConstant(Amt, RegVT));
16780 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16784 // Redistribute the loaded elements into the different locations.
16785 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16786 for (unsigned i = 0; i != NumElems; ++i)
16787 ShuffleVec[i * SizeRatio] = i;
16789 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
16790 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16792 // Bitcast to the requested type.
16793 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16794 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16798 // isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
16799 // ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart
16800 // from the AND / OR.
16801 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
16802 Opc = Op.getOpcode();
16803 if (Opc != ISD::OR && Opc != ISD::AND)
16805 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16806 Op.getOperand(0).hasOneUse() &&
16807 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
16808 Op.getOperand(1).hasOneUse());
16811 // isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and
16812 // 1 and that the SETCC node has a single use.
16813 static bool isXor1OfSetCC(SDValue Op) {
16814 if (Op.getOpcode() != ISD::XOR)
16816 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
16817 if (N1C && N1C->getAPIntValue() == 1) {
16818 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16819 Op.getOperand(0).hasOneUse();
16824 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
16825 bool addTest = true;
16826 SDValue Chain = Op.getOperand(0);
16827 SDValue Cond = Op.getOperand(1);
16828 SDValue Dest = Op.getOperand(2);
16831 bool Inverted = false;
16833 if (Cond.getOpcode() == ISD::SETCC) {
16834 // Check for setcc([su]{add,sub,mul}o == 0).
16835 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
16836 isa<ConstantSDNode>(Cond.getOperand(1)) &&
16837 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() &&
16838 Cond.getOperand(0).getResNo() == 1 &&
16839 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
16840 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
16841 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
16842 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
16843 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
16844 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
16846 Cond = Cond.getOperand(0);
16848 SDValue NewCond = LowerSETCC(Cond, DAG);
16849 if (NewCond.getNode())
16854 // FIXME: LowerXALUO doesn't handle these!!
16855 else if (Cond.getOpcode() == X86ISD::ADD ||
16856 Cond.getOpcode() == X86ISD::SUB ||
16857 Cond.getOpcode() == X86ISD::SMUL ||
16858 Cond.getOpcode() == X86ISD::UMUL)
16859 Cond = LowerXALUO(Cond, DAG);
16862 // Look pass (and (setcc_carry (cmp ...)), 1).
16863 if (Cond.getOpcode() == ISD::AND &&
16864 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16865 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16866 if (C && C->getAPIntValue() == 1)
16867 Cond = Cond.getOperand(0);
16870 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16871 // setting operand in place of the X86ISD::SETCC.
16872 unsigned CondOpcode = Cond.getOpcode();
16873 if (CondOpcode == X86ISD::SETCC ||
16874 CondOpcode == X86ISD::SETCC_CARRY) {
16875 CC = Cond.getOperand(0);
16877 SDValue Cmp = Cond.getOperand(1);
16878 unsigned Opc = Cmp.getOpcode();
16879 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
16880 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
16884 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
16888 // These can only come from an arithmetic instruction with overflow,
16889 // e.g. SADDO, UADDO.
16890 Cond = Cond.getNode()->getOperand(1);
16896 CondOpcode = Cond.getOpcode();
16897 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16898 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16899 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16900 Cond.getOperand(0).getValueType() != MVT::i8)) {
16901 SDValue LHS = Cond.getOperand(0);
16902 SDValue RHS = Cond.getOperand(1);
16903 unsigned X86Opcode;
16906 // Keep this in sync with LowerXALUO, otherwise we might create redundant
16907 // instructions that can't be removed afterwards (i.e. X86ISD::ADD and
16909 switch (CondOpcode) {
16910 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16912 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16914 X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
16917 X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16918 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16920 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16922 X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
16925 X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16926 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16927 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16928 default: llvm_unreachable("unexpected overflowing operator");
16931 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
16932 if (CondOpcode == ISD::UMULO)
16933 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16936 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16938 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
16940 if (CondOpcode == ISD::UMULO)
16941 Cond = X86Op.getValue(2);
16943 Cond = X86Op.getValue(1);
16945 CC = DAG.getConstant(X86Cond, MVT::i8);
16949 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
16950 SDValue Cmp = Cond.getOperand(0).getOperand(1);
16951 if (CondOpc == ISD::OR) {
16952 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
16953 // two branches instead of an explicit OR instruction with a
16955 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16956 isX86LogicalCmp(Cmp)) {
16957 CC = Cond.getOperand(0).getOperand(0);
16958 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16959 Chain, Dest, CC, Cmp);
16960 CC = Cond.getOperand(1).getOperand(0);
16964 } else { // ISD::AND
16965 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
16966 // two branches instead of an explicit AND instruction with a
16967 // separate test. However, we only do this if this block doesn't
16968 // have a fall-through edge, because this requires an explicit
16969 // jmp when the condition is false.
16970 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16971 isX86LogicalCmp(Cmp) &&
16972 Op.getNode()->hasOneUse()) {
16973 X86::CondCode CCode =
16974 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16975 CCode = X86::GetOppositeBranchCondition(CCode);
16976 CC = DAG.getConstant(CCode, MVT::i8);
16977 SDNode *User = *Op.getNode()->use_begin();
16978 // Look for an unconditional branch following this conditional branch.
16979 // We need this because we need to reverse the successors in order
16980 // to implement FCMP_OEQ.
16981 if (User->getOpcode() == ISD::BR) {
16982 SDValue FalseBB = User->getOperand(1);
16984 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16985 assert(NewBR == User);
16989 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16990 Chain, Dest, CC, Cmp);
16991 X86::CondCode CCode =
16992 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
16993 CCode = X86::GetOppositeBranchCondition(CCode);
16994 CC = DAG.getConstant(CCode, MVT::i8);
17000 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
17001 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
17002 // It should be transformed during dag combiner except when the condition
17003 // is set by a arithmetics with overflow node.
17004 X86::CondCode CCode =
17005 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
17006 CCode = X86::GetOppositeBranchCondition(CCode);
17007 CC = DAG.getConstant(CCode, MVT::i8);
17008 Cond = Cond.getOperand(0).getOperand(1);
17010 } else if (Cond.getOpcode() == ISD::SETCC &&
17011 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
17012 // For FCMP_OEQ, we can emit
17013 // two branches instead of an explicit AND instruction with a
17014 // separate test. However, we only do this if this block doesn't
17015 // have a fall-through edge, because this requires an explicit
17016 // jmp when the condition is false.
17017 if (Op.getNode()->hasOneUse()) {
17018 SDNode *User = *Op.getNode()->use_begin();
17019 // Look for an unconditional branch following this conditional branch.
17020 // We need this because we need to reverse the successors in order
17021 // to implement FCMP_OEQ.
17022 if (User->getOpcode() == ISD::BR) {
17023 SDValue FalseBB = User->getOperand(1);
17025 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
17026 assert(NewBR == User);
17030 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
17031 Cond.getOperand(0), Cond.getOperand(1));
17032 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
17033 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
17034 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
17035 Chain, Dest, CC, Cmp);
17036 CC = DAG.getConstant(X86::COND_P, MVT::i8);
17041 } else if (Cond.getOpcode() == ISD::SETCC &&
17042 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
17043 // For FCMP_UNE, we can emit
17044 // two branches instead of an explicit AND instruction with a
17045 // separate test. However, we only do this if this block doesn't
17046 // have a fall-through edge, because this requires an explicit
17047 // jmp when the condition is false.
17048 if (Op.getNode()->hasOneUse()) {
17049 SDNode *User = *Op.getNode()->use_begin();
17050 // Look for an unconditional branch following this conditional branch.
17051 // We need this because we need to reverse the successors in order
17052 // to implement FCMP_UNE.
17053 if (User->getOpcode() == ISD::BR) {
17054 SDValue FalseBB = User->getOperand(1);
17056 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
17057 assert(NewBR == User);
17060 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
17061 Cond.getOperand(0), Cond.getOperand(1));
17062 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
17063 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
17064 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
17065 Chain, Dest, CC, Cmp);
17066 CC = DAG.getConstant(X86::COND_NP, MVT::i8);
17076 // Look pass the truncate if the high bits are known zero.
17077 if (isTruncWithZeroHighBitsInput(Cond, DAG))
17078 Cond = Cond.getOperand(0);
17080 // We know the result of AND is compared against zero. Try to match
17082 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
17083 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG);
17084 if (NewSetCC.getNode()) {
17085 CC = NewSetCC.getOperand(0);
17086 Cond = NewSetCC.getOperand(1);
17093 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
17094 CC = DAG.getConstant(X86Cond, MVT::i8);
17095 Cond = EmitTest(Cond, X86Cond, dl, DAG);
17097 Cond = ConvertCmpIfNecessary(Cond, DAG);
17098 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
17099 Chain, Dest, CC, Cond);
17102 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
17103 // Calls to _alloca are needed to probe the stack when allocating more than 4k
17104 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
17105 // that the guard pages used by the OS virtual memory manager are allocated in
17106 // correct sequence.
17108 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
17109 SelectionDAG &DAG) const {
17110 MachineFunction &MF = DAG.getMachineFunction();
17111 bool SplitStack = MF.shouldSplitStack();
17112 bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMachO()) ||
17117 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
17118 SDNode* Node = Op.getNode();
17120 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
17121 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
17122 " not tell us which reg is the stack pointer!");
17123 EVT VT = Node->getValueType(0);
17124 SDValue Tmp1 = SDValue(Node, 0);
17125 SDValue Tmp2 = SDValue(Node, 1);
17126 SDValue Tmp3 = Node->getOperand(2);
17127 SDValue Chain = Tmp1.getOperand(0);
17129 // Chain the dynamic stack allocation so that it doesn't modify the stack
17130 // pointer when other instructions are using the stack.
17131 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true),
17134 SDValue Size = Tmp2.getOperand(1);
17135 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
17136 Chain = SP.getValue(1);
17137 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
17138 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
17139 unsigned StackAlign = TFI.getStackAlignment();
17140 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
17141 if (Align > StackAlign)
17142 Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
17143 DAG.getConstant(-(uint64_t)Align, VT));
17144 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
17146 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
17147 DAG.getIntPtrConstant(0, true), SDValue(),
17150 SDValue Ops[2] = { Tmp1, Tmp2 };
17151 return DAG.getMergeValues(Ops, dl);
17155 SDValue Chain = Op.getOperand(0);
17156 SDValue Size = Op.getOperand(1);
17157 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
17158 EVT VT = Op.getNode()->getValueType(0);
17160 bool Is64Bit = Subtarget->is64Bit();
17161 EVT SPTy = getPointerTy();
17164 MachineRegisterInfo &MRI = MF.getRegInfo();
17167 // The 64 bit implementation of segmented stacks needs to clobber both r10
17168 // r11. This makes it impossible to use it along with nested parameters.
17169 const Function *F = MF.getFunction();
17171 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
17173 if (I->hasNestAttr())
17174 report_fatal_error("Cannot use segmented stacks with functions that "
17175 "have nested arguments.");
17178 const TargetRegisterClass *AddrRegClass =
17179 getRegClassFor(getPointerTy());
17180 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
17181 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
17182 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
17183 DAG.getRegister(Vreg, SPTy));
17184 SDValue Ops1[2] = { Value, Chain };
17185 return DAG.getMergeValues(Ops1, dl);
17188 const unsigned Reg = (Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX);
17190 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag);
17191 Flag = Chain.getValue(1);
17192 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
17194 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
17196 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17197 unsigned SPReg = RegInfo->getStackRegister();
17198 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
17199 Chain = SP.getValue(1);
17202 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
17203 DAG.getConstant(-(uint64_t)Align, VT));
17204 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
17207 SDValue Ops1[2] = { SP, Chain };
17208 return DAG.getMergeValues(Ops1, dl);
17212 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
17213 MachineFunction &MF = DAG.getMachineFunction();
17214 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
17216 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
17219 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) {
17220 // vastart just stores the address of the VarArgsFrameIndex slot into the
17221 // memory location argument.
17222 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
17224 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
17225 MachinePointerInfo(SV), false, false, 0);
17229 // gp_offset (0 - 6 * 8)
17230 // fp_offset (48 - 48 + 8 * 16)
17231 // overflow_arg_area (point to parameters coming in memory).
17233 SmallVector<SDValue, 8> MemOps;
17234 SDValue FIN = Op.getOperand(1);
17236 SDValue Store = DAG.getStore(Op.getOperand(0), DL,
17237 DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
17239 FIN, MachinePointerInfo(SV), false, false, 0);
17240 MemOps.push_back(Store);
17243 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17244 FIN, DAG.getIntPtrConstant(4));
17245 Store = DAG.getStore(Op.getOperand(0), DL,
17246 DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
17248 FIN, MachinePointerInfo(SV, 4), false, false, 0);
17249 MemOps.push_back(Store);
17251 // Store ptr to overflow_arg_area
17252 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17253 FIN, DAG.getIntPtrConstant(4));
17254 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
17256 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
17257 MachinePointerInfo(SV, 8),
17259 MemOps.push_back(Store);
17261 // Store ptr to reg_save_area.
17262 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17263 FIN, DAG.getIntPtrConstant(8));
17264 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
17266 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN,
17267 MachinePointerInfo(SV, 16), false, false, 0);
17268 MemOps.push_back(Store);
17269 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
17272 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
17273 assert(Subtarget->is64Bit() &&
17274 "LowerVAARG only handles 64-bit va_arg!");
17275 assert((Subtarget->isTargetLinux() ||
17276 Subtarget->isTargetDarwin()) &&
17277 "Unhandled target in LowerVAARG");
17278 assert(Op.getNode()->getNumOperands() == 4);
17279 SDValue Chain = Op.getOperand(0);
17280 SDValue SrcPtr = Op.getOperand(1);
17281 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
17282 unsigned Align = Op.getConstantOperandVal(3);
17285 EVT ArgVT = Op.getNode()->getValueType(0);
17286 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
17287 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy);
17290 // Decide which area this value should be read from.
17291 // TODO: Implement the AMD64 ABI in its entirety. This simple
17292 // selection mechanism works only for the basic types.
17293 if (ArgVT == MVT::f80) {
17294 llvm_unreachable("va_arg for f80 not yet implemented");
17295 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
17296 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
17297 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
17298 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
17300 llvm_unreachable("Unhandled argument type in LowerVAARG");
17303 if (ArgMode == 2) {
17304 // Sanity Check: Make sure using fp_offset makes sense.
17305 assert(!DAG.getTarget().Options.UseSoftFloat &&
17306 !(DAG.getMachineFunction().getFunction()->hasFnAttribute(
17307 Attribute::NoImplicitFloat)) &&
17308 Subtarget->hasSSE1());
17311 // Insert VAARG_64 node into the DAG
17312 // VAARG_64 returns two values: Variable Argument Address, Chain
17313 SmallVector<SDValue, 11> InstOps;
17314 InstOps.push_back(Chain);
17315 InstOps.push_back(SrcPtr);
17316 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32));
17317 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8));
17318 InstOps.push_back(DAG.getConstant(Align, MVT::i32));
17319 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other);
17320 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
17321 VTs, InstOps, MVT::i64,
17322 MachinePointerInfo(SV),
17324 /*Volatile=*/false,
17326 /*WriteMem=*/true);
17327 Chain = VAARG.getValue(1);
17329 // Load the next argument and return it
17330 return DAG.getLoad(ArgVT, dl,
17333 MachinePointerInfo(),
17334 false, false, false, 0);
17337 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
17338 SelectionDAG &DAG) {
17339 // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
17340 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
17341 SDValue Chain = Op.getOperand(0);
17342 SDValue DstPtr = Op.getOperand(1);
17343 SDValue SrcPtr = Op.getOperand(2);
17344 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
17345 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
17348 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
17349 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
17351 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
17354 // getTargetVShiftByConstNode - Handle vector element shifts where the shift
17355 // amount is a constant. Takes immediate version of shift as input.
17356 static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
17357 SDValue SrcOp, uint64_t ShiftAmt,
17358 SelectionDAG &DAG) {
17359 MVT ElementType = VT.getVectorElementType();
17361 // Fold this packed shift into its first operand if ShiftAmt is 0.
17365 // Check for ShiftAmt >= element width
17366 if (ShiftAmt >= ElementType.getSizeInBits()) {
17367 if (Opc == X86ISD::VSRAI)
17368 ShiftAmt = ElementType.getSizeInBits() - 1;
17370 return DAG.getConstant(0, VT);
17373 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
17374 && "Unknown target vector shift-by-constant node");
17376 // Fold this packed vector shift into a build vector if SrcOp is a
17377 // vector of Constants or UNDEFs, and SrcOp valuetype is the same as VT.
17378 if (VT == SrcOp.getSimpleValueType() &&
17379 ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
17380 SmallVector<SDValue, 8> Elts;
17381 unsigned NumElts = SrcOp->getNumOperands();
17382 ConstantSDNode *ND;
17385 default: llvm_unreachable(nullptr);
17386 case X86ISD::VSHLI:
17387 for (unsigned i=0; i!=NumElts; ++i) {
17388 SDValue CurrentOp = SrcOp->getOperand(i);
17389 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17390 Elts.push_back(CurrentOp);
17393 ND = cast<ConstantSDNode>(CurrentOp);
17394 const APInt &C = ND->getAPIntValue();
17395 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), ElementType));
17398 case X86ISD::VSRLI:
17399 for (unsigned i=0; i!=NumElts; ++i) {
17400 SDValue CurrentOp = SrcOp->getOperand(i);
17401 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17402 Elts.push_back(CurrentOp);
17405 ND = cast<ConstantSDNode>(CurrentOp);
17406 const APInt &C = ND->getAPIntValue();
17407 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), ElementType));
17410 case X86ISD::VSRAI:
17411 for (unsigned i=0; i!=NumElts; ++i) {
17412 SDValue CurrentOp = SrcOp->getOperand(i);
17413 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17414 Elts.push_back(CurrentOp);
17417 ND = cast<ConstantSDNode>(CurrentOp);
17418 const APInt &C = ND->getAPIntValue();
17419 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), ElementType));
17424 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
17427 return DAG.getNode(Opc, dl, VT, SrcOp, DAG.getConstant(ShiftAmt, MVT::i8));
17430 // getTargetVShiftNode - Handle vector element shifts where the shift amount
17431 // may or may not be a constant. Takes immediate version of shift as input.
17432 static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
17433 SDValue SrcOp, SDValue ShAmt,
17434 SelectionDAG &DAG) {
17435 MVT SVT = ShAmt.getSimpleValueType();
17436 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
17438 // Catch shift-by-constant.
17439 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
17440 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
17441 CShAmt->getZExtValue(), DAG);
17443 // Change opcode to non-immediate version
17445 default: llvm_unreachable("Unknown target vector shift node");
17446 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break;
17447 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break;
17448 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
17451 const X86Subtarget &Subtarget =
17452 static_cast<const X86Subtarget &>(DAG.getSubtarget());
17453 if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
17454 ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
17455 // Let the shuffle legalizer expand this shift amount node.
17456 SDValue Op0 = ShAmt.getOperand(0);
17457 Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0);
17458 ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, &Subtarget, DAG);
17460 // Need to build a vector containing shift amount.
17461 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
17462 SmallVector<SDValue, 4> ShOps;
17463 ShOps.push_back(ShAmt);
17464 if (SVT == MVT::i32) {
17465 ShOps.push_back(DAG.getConstant(0, SVT));
17466 ShOps.push_back(DAG.getUNDEF(SVT));
17468 ShOps.push_back(DAG.getUNDEF(SVT));
17470 MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
17471 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps);
17474 // The return type has to be a 128-bit type with the same element
17475 // type as the input type.
17476 MVT EltVT = VT.getVectorElementType();
17477 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
17479 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt);
17480 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
17483 /// \brief Return (and \p Op, \p Mask) for compare instructions or
17484 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
17485 /// necessary casting for \p Mask when lowering masking intrinsics.
17486 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
17487 SDValue PreservedSrc,
17488 const X86Subtarget *Subtarget,
17489 SelectionDAG &DAG) {
17490 EVT VT = Op.getValueType();
17491 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(),
17492 MVT::i1, VT.getVectorNumElements());
17493 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17494 Mask.getValueType().getSizeInBits());
17497 assert(MaskVT.isSimple() && "invalid mask type");
17499 if (isAllOnes(Mask))
17502 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
17503 // are extracted by EXTRACT_SUBVECTOR.
17504 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17505 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17506 DAG.getIntPtrConstant(0));
17508 switch (Op.getOpcode()) {
17510 case X86ISD::PCMPEQM:
17511 case X86ISD::PCMPGTM:
17513 case X86ISD::CMPMU:
17514 return DAG.getNode(ISD::AND, dl, VT, Op, VMask);
17516 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17517 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17518 return DAG.getNode(ISD::VSELECT, dl, VT, VMask, Op, PreservedSrc);
17521 /// \brief Creates an SDNode for a predicated scalar operation.
17522 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
17523 /// The mask is comming as MVT::i8 and it should be truncated
17524 /// to MVT::i1 while lowering masking intrinsics.
17525 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
17526 /// "X86select" instead of "vselect". We just can't create the "vselect" node for
17527 /// a scalar instruction.
17528 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
17529 SDValue PreservedSrc,
17530 const X86Subtarget *Subtarget,
17531 SelectionDAG &DAG) {
17532 if (isAllOnes(Mask))
17535 EVT VT = Op.getValueType();
17537 // The mask should be of type MVT::i1
17538 SDValue IMask = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Mask);
17540 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17541 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17542 return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc);
17545 static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17546 SelectionDAG &DAG) {
17548 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17549 EVT VT = Op.getValueType();
17550 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
17552 switch(IntrData->Type) {
17553 case INTR_TYPE_1OP:
17554 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
17555 case INTR_TYPE_2OP:
17556 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17558 case INTR_TYPE_3OP:
17559 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17560 Op.getOperand(2), Op.getOperand(3));
17561 case INTR_TYPE_1OP_MASK_RM: {
17562 SDValue Src = Op.getOperand(1);
17563 SDValue Src0 = Op.getOperand(2);
17564 SDValue Mask = Op.getOperand(3);
17565 SDValue RoundingMode = Op.getOperand(4);
17566 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
17568 Mask, Src0, Subtarget, DAG);
17570 case INTR_TYPE_SCALAR_MASK_RM: {
17571 SDValue Src1 = Op.getOperand(1);
17572 SDValue Src2 = Op.getOperand(2);
17573 SDValue Src0 = Op.getOperand(3);
17574 SDValue Mask = Op.getOperand(4);
17575 SDValue RoundingMode = Op.getOperand(5);
17576 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
17578 Mask, Src0, Subtarget, DAG);
17580 case INTR_TYPE_2OP_MASK: {
17581 SDValue Mask = Op.getOperand(4);
17582 SDValue PassThru = Op.getOperand(3);
17583 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17584 if (IntrWithRoundingModeOpcode != 0) {
17585 unsigned Round = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
17586 if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION) {
17587 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17588 dl, Op.getValueType(),
17589 Op.getOperand(1), Op.getOperand(2),
17590 Op.getOperand(3), Op.getOperand(5)),
17591 Mask, PassThru, Subtarget, DAG);
17594 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
17597 Mask, PassThru, Subtarget, DAG);
17599 case FMA_OP_MASK: {
17600 SDValue Src1 = Op.getOperand(1);
17601 SDValue Src2 = Op.getOperand(2);
17602 SDValue Src3 = Op.getOperand(3);
17603 SDValue Mask = Op.getOperand(4);
17604 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17605 if (IntrWithRoundingModeOpcode != 0) {
17606 SDValue Rnd = Op.getOperand(5);
17607 if (cast<ConstantSDNode>(Rnd)->getZExtValue() !=
17608 X86::STATIC_ROUNDING::CUR_DIRECTION)
17609 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17610 dl, Op.getValueType(),
17611 Src1, Src2, Src3, Rnd),
17612 Mask, Src1, Subtarget, DAG);
17614 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
17615 dl, Op.getValueType(),
17617 Mask, Src1, Subtarget, DAG);
17620 case CMP_MASK_CC: {
17621 // Comparison intrinsics with masks.
17622 // Example of transformation:
17623 // (i8 (int_x86_avx512_mask_pcmpeq_q_128
17624 // (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
17626 // (v8i1 (insert_subvector undef,
17627 // (v2i1 (and (PCMPEQM %a, %b),
17628 // (extract_subvector
17629 // (v8i1 (bitcast %mask)), 0))), 0))))
17630 EVT VT = Op.getOperand(1).getValueType();
17631 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17632 VT.getVectorNumElements());
17633 SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3);
17634 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17635 Mask.getValueType().getSizeInBits());
17637 if (IntrData->Type == CMP_MASK_CC) {
17638 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17639 Op.getOperand(2), Op.getOperand(3));
17641 assert(IntrData->Type == CMP_MASK && "Unexpected intrinsic type!");
17642 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17645 SDValue CmpMask = getVectorMaskingNode(Cmp, Mask,
17646 DAG.getTargetConstant(0, MaskVT),
17648 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
17649 DAG.getUNDEF(BitcastVT), CmpMask,
17650 DAG.getIntPtrConstant(0));
17651 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
17653 case COMI: { // Comparison intrinsics
17654 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
17655 SDValue LHS = Op.getOperand(1);
17656 SDValue RHS = Op.getOperand(2);
17657 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG);
17658 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!");
17659 SDValue Cond = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
17660 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17661 DAG.getConstant(X86CC, MVT::i8), Cond);
17662 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17665 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
17666 Op.getOperand(1), Op.getOperand(2), DAG);
17668 return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl,
17669 Op.getSimpleValueType(),
17671 Op.getOperand(2), DAG),
17672 Op.getOperand(4), Op.getOperand(3), Subtarget,
17674 case COMPRESS_EXPAND_IN_REG: {
17675 SDValue Mask = Op.getOperand(3);
17676 SDValue DataToCompress = Op.getOperand(1);
17677 SDValue PassThru = Op.getOperand(2);
17678 if (isAllOnes(Mask)) // return data as is
17679 return Op.getOperand(1);
17680 EVT VT = Op.getValueType();
17681 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17682 VT.getVectorNumElements());
17683 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17684 Mask.getValueType().getSizeInBits());
17686 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17687 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17688 DAG.getIntPtrConstant(0));
17690 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToCompress,
17694 SDValue Mask = Op.getOperand(3);
17695 EVT VT = Op.getValueType();
17696 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17697 VT.getVectorNumElements());
17698 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17699 Mask.getValueType().getSizeInBits());
17701 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17702 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17703 DAG.getIntPtrConstant(0));
17704 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1),
17713 default: return SDValue(); // Don't custom lower most intrinsics.
17715 case Intrinsic::x86_avx512_mask_valign_q_512:
17716 case Intrinsic::x86_avx512_mask_valign_d_512:
17717 // Vector source operands are swapped.
17718 return getVectorMaskingNode(DAG.getNode(X86ISD::VALIGN, dl,
17719 Op.getValueType(), Op.getOperand(2),
17722 Op.getOperand(5), Op.getOperand(4),
17725 // ptest and testp intrinsics. The intrinsic these come from are designed to
17726 // return an integer value, not just an instruction so lower it to the ptest
17727 // or testp pattern and a setcc for the result.
17728 case Intrinsic::x86_sse41_ptestz:
17729 case Intrinsic::x86_sse41_ptestc:
17730 case Intrinsic::x86_sse41_ptestnzc:
17731 case Intrinsic::x86_avx_ptestz_256:
17732 case Intrinsic::x86_avx_ptestc_256:
17733 case Intrinsic::x86_avx_ptestnzc_256:
17734 case Intrinsic::x86_avx_vtestz_ps:
17735 case Intrinsic::x86_avx_vtestc_ps:
17736 case Intrinsic::x86_avx_vtestnzc_ps:
17737 case Intrinsic::x86_avx_vtestz_pd:
17738 case Intrinsic::x86_avx_vtestc_pd:
17739 case Intrinsic::x86_avx_vtestnzc_pd:
17740 case Intrinsic::x86_avx_vtestz_ps_256:
17741 case Intrinsic::x86_avx_vtestc_ps_256:
17742 case Intrinsic::x86_avx_vtestnzc_ps_256:
17743 case Intrinsic::x86_avx_vtestz_pd_256:
17744 case Intrinsic::x86_avx_vtestc_pd_256:
17745 case Intrinsic::x86_avx_vtestnzc_pd_256: {
17746 bool IsTestPacked = false;
17749 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
17750 case Intrinsic::x86_avx_vtestz_ps:
17751 case Intrinsic::x86_avx_vtestz_pd:
17752 case Intrinsic::x86_avx_vtestz_ps_256:
17753 case Intrinsic::x86_avx_vtestz_pd_256:
17754 IsTestPacked = true; // Fallthrough
17755 case Intrinsic::x86_sse41_ptestz:
17756 case Intrinsic::x86_avx_ptestz_256:
17758 X86CC = X86::COND_E;
17760 case Intrinsic::x86_avx_vtestc_ps:
17761 case Intrinsic::x86_avx_vtestc_pd:
17762 case Intrinsic::x86_avx_vtestc_ps_256:
17763 case Intrinsic::x86_avx_vtestc_pd_256:
17764 IsTestPacked = true; // Fallthrough
17765 case Intrinsic::x86_sse41_ptestc:
17766 case Intrinsic::x86_avx_ptestc_256:
17768 X86CC = X86::COND_B;
17770 case Intrinsic::x86_avx_vtestnzc_ps:
17771 case Intrinsic::x86_avx_vtestnzc_pd:
17772 case Intrinsic::x86_avx_vtestnzc_ps_256:
17773 case Intrinsic::x86_avx_vtestnzc_pd_256:
17774 IsTestPacked = true; // Fallthrough
17775 case Intrinsic::x86_sse41_ptestnzc:
17776 case Intrinsic::x86_avx_ptestnzc_256:
17778 X86CC = X86::COND_A;
17782 SDValue LHS = Op.getOperand(1);
17783 SDValue RHS = Op.getOperand(2);
17784 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
17785 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
17786 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17787 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
17788 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17790 case Intrinsic::x86_avx512_kortestz_w:
17791 case Intrinsic::x86_avx512_kortestc_w: {
17792 unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz_w)? X86::COND_E: X86::COND_B;
17793 SDValue LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(1));
17794 SDValue RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(2));
17795 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17796 SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
17797 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test);
17798 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17801 case Intrinsic::x86_sse42_pcmpistria128:
17802 case Intrinsic::x86_sse42_pcmpestria128:
17803 case Intrinsic::x86_sse42_pcmpistric128:
17804 case Intrinsic::x86_sse42_pcmpestric128:
17805 case Intrinsic::x86_sse42_pcmpistrio128:
17806 case Intrinsic::x86_sse42_pcmpestrio128:
17807 case Intrinsic::x86_sse42_pcmpistris128:
17808 case Intrinsic::x86_sse42_pcmpestris128:
17809 case Intrinsic::x86_sse42_pcmpistriz128:
17810 case Intrinsic::x86_sse42_pcmpestriz128: {
17814 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
17815 case Intrinsic::x86_sse42_pcmpistria128:
17816 Opcode = X86ISD::PCMPISTRI;
17817 X86CC = X86::COND_A;
17819 case Intrinsic::x86_sse42_pcmpestria128:
17820 Opcode = X86ISD::PCMPESTRI;
17821 X86CC = X86::COND_A;
17823 case Intrinsic::x86_sse42_pcmpistric128:
17824 Opcode = X86ISD::PCMPISTRI;
17825 X86CC = X86::COND_B;
17827 case Intrinsic::x86_sse42_pcmpestric128:
17828 Opcode = X86ISD::PCMPESTRI;
17829 X86CC = X86::COND_B;
17831 case Intrinsic::x86_sse42_pcmpistrio128:
17832 Opcode = X86ISD::PCMPISTRI;
17833 X86CC = X86::COND_O;
17835 case Intrinsic::x86_sse42_pcmpestrio128:
17836 Opcode = X86ISD::PCMPESTRI;
17837 X86CC = X86::COND_O;
17839 case Intrinsic::x86_sse42_pcmpistris128:
17840 Opcode = X86ISD::PCMPISTRI;
17841 X86CC = X86::COND_S;
17843 case Intrinsic::x86_sse42_pcmpestris128:
17844 Opcode = X86ISD::PCMPESTRI;
17845 X86CC = X86::COND_S;
17847 case Intrinsic::x86_sse42_pcmpistriz128:
17848 Opcode = X86ISD::PCMPISTRI;
17849 X86CC = X86::COND_E;
17851 case Intrinsic::x86_sse42_pcmpestriz128:
17852 Opcode = X86ISD::PCMPESTRI;
17853 X86CC = X86::COND_E;
17856 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17857 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17858 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps);
17859 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17860 DAG.getConstant(X86CC, MVT::i8),
17861 SDValue(PCMP.getNode(), 1));
17862 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17865 case Intrinsic::x86_sse42_pcmpistri128:
17866 case Intrinsic::x86_sse42_pcmpestri128: {
17868 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
17869 Opcode = X86ISD::PCMPISTRI;
17871 Opcode = X86ISD::PCMPESTRI;
17873 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17874 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17875 return DAG.getNode(Opcode, dl, VTs, NewOps);
17880 static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17881 SDValue Src, SDValue Mask, SDValue Base,
17882 SDValue Index, SDValue ScaleOp, SDValue Chain,
17883 const X86Subtarget * Subtarget) {
17885 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17886 assert(C && "Invalid scale type");
17887 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17888 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17889 Index.getSimpleValueType().getVectorNumElements());
17891 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17893 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17895 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17896 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
17897 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17898 SDValue Segment = DAG.getRegister(0, MVT::i32);
17899 if (Src.getOpcode() == ISD::UNDEF)
17900 Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
17901 SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17902 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17903 SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
17904 return DAG.getMergeValues(RetOps, dl);
17907 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17908 SDValue Src, SDValue Mask, SDValue Base,
17909 SDValue Index, SDValue ScaleOp, SDValue Chain) {
17911 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17912 assert(C && "Invalid scale type");
17913 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17914 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17915 SDValue Segment = DAG.getRegister(0, MVT::i32);
17916 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17917 Index.getSimpleValueType().getVectorNumElements());
17919 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17921 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17923 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17924 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
17925 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
17926 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17927 return SDValue(Res, 1);
17930 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17931 SDValue Mask, SDValue Base, SDValue Index,
17932 SDValue ScaleOp, SDValue Chain) {
17934 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17935 assert(C && "Invalid scale type");
17936 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17937 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17938 SDValue Segment = DAG.getRegister(0, MVT::i32);
17940 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
17942 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17944 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17946 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17947 //SDVTList VTs = DAG.getVTList(MVT::Other);
17948 SDValue Ops[] = {MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17949 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
17950 return SDValue(Res, 0);
17953 // getReadPerformanceCounter - Handles the lowering of builtin intrinsics that
17954 // read performance monitor counters (x86_rdpmc).
17955 static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
17956 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17957 SmallVectorImpl<SDValue> &Results) {
17958 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17959 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17962 // The ECX register is used to select the index of the performance counter
17964 SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
17966 SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
17968 // Reads the content of a 64-bit performance counter and returns it in the
17969 // registers EDX:EAX.
17970 if (Subtarget->is64Bit()) {
17971 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17972 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17975 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17976 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17979 Chain = HI.getValue(1);
17981 if (Subtarget->is64Bit()) {
17982 // The EAX register is loaded with the low-order 32 bits. The EDX register
17983 // is loaded with the supported high-order bits of the counter.
17984 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17985 DAG.getConstant(32, MVT::i8));
17986 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17987 Results.push_back(Chain);
17991 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17992 SDValue Ops[] = { LO, HI };
17993 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17994 Results.push_back(Pair);
17995 Results.push_back(Chain);
17998 // getReadTimeStampCounter - Handles the lowering of builtin intrinsics that
17999 // read the time stamp counter (x86_rdtsc and x86_rdtscp). This function is
18000 // also used to custom lower READCYCLECOUNTER nodes.
18001 static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
18002 SelectionDAG &DAG, const X86Subtarget *Subtarget,
18003 SmallVectorImpl<SDValue> &Results) {
18004 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
18005 SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
18008 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
18009 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
18010 // and the EAX register is loaded with the low-order 32 bits.
18011 if (Subtarget->is64Bit()) {
18012 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
18013 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
18016 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
18017 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
18020 SDValue Chain = HI.getValue(1);
18022 if (Opcode == X86ISD::RDTSCP_DAG) {
18023 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
18025 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
18026 // the ECX register. Add 'ecx' explicitly to the chain.
18027 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
18029 // Explicitly store the content of ECX at the location passed in input
18030 // to the 'rdtscp' intrinsic.
18031 Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
18032 MachinePointerInfo(), false, false, 0);
18035 if (Subtarget->is64Bit()) {
18036 // The EDX register is loaded with the high-order 32 bits of the MSR, and
18037 // the EAX register is loaded with the low-order 32 bits.
18038 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
18039 DAG.getConstant(32, MVT::i8));
18040 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
18041 Results.push_back(Chain);
18045 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
18046 SDValue Ops[] = { LO, HI };
18047 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
18048 Results.push_back(Pair);
18049 Results.push_back(Chain);
18052 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
18053 SelectionDAG &DAG) {
18054 SmallVector<SDValue, 2> Results;
18056 getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
18058 return DAG.getMergeValues(Results, DL);
18062 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
18063 SelectionDAG &DAG) {
18064 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
18066 const IntrinsicData* IntrData = getIntrinsicWithChain(IntNo);
18071 switch(IntrData->Type) {
18073 llvm_unreachable("Unknown Intrinsic Type");
18077 // Emit the node with the right value type.
18078 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other);
18079 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
18081 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
18082 // Otherwise return the value from Rand, which is always 0, casted to i32.
18083 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
18084 DAG.getConstant(1, Op->getValueType(1)),
18085 DAG.getConstant(X86::COND_B, MVT::i32),
18086 SDValue(Result.getNode(), 1) };
18087 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
18088 DAG.getVTList(Op->getValueType(1), MVT::Glue),
18091 // Return { result, isValid, chain }.
18092 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
18093 SDValue(Result.getNode(), 2));
18096 //gather(v1, mask, index, base, scale);
18097 SDValue Chain = Op.getOperand(0);
18098 SDValue Src = Op.getOperand(2);
18099 SDValue Base = Op.getOperand(3);
18100 SDValue Index = Op.getOperand(4);
18101 SDValue Mask = Op.getOperand(5);
18102 SDValue Scale = Op.getOperand(6);
18103 return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain,
18107 //scatter(base, mask, index, v1, scale);
18108 SDValue Chain = Op.getOperand(0);
18109 SDValue Base = Op.getOperand(2);
18110 SDValue Mask = Op.getOperand(3);
18111 SDValue Index = Op.getOperand(4);
18112 SDValue Src = Op.getOperand(5);
18113 SDValue Scale = Op.getOperand(6);
18114 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain);
18117 SDValue Hint = Op.getOperand(6);
18119 if (dyn_cast<ConstantSDNode> (Hint) == nullptr ||
18120 (HintVal = dyn_cast<ConstantSDNode> (Hint)->getZExtValue()) > 1)
18121 llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1");
18122 unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
18123 SDValue Chain = Op.getOperand(0);
18124 SDValue Mask = Op.getOperand(2);
18125 SDValue Index = Op.getOperand(3);
18126 SDValue Base = Op.getOperand(4);
18127 SDValue Scale = Op.getOperand(5);
18128 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain);
18130 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
18132 SmallVector<SDValue, 2> Results;
18133 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget, Results);
18134 return DAG.getMergeValues(Results, dl);
18136 // Read Performance Monitoring Counters.
18138 SmallVector<SDValue, 2> Results;
18139 getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
18140 return DAG.getMergeValues(Results, dl);
18142 // XTEST intrinsics.
18144 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
18145 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
18146 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
18147 DAG.getConstant(X86::COND_NE, MVT::i8),
18149 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
18150 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
18151 Ret, SDValue(InTrans.getNode(), 1));
18155 SmallVector<SDValue, 2> Results;
18156 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
18157 SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
18158 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
18159 DAG.getConstant(-1, MVT::i8));
18160 SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
18161 Op.getOperand(4), GenCF.getValue(1));
18162 SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
18163 Op.getOperand(5), MachinePointerInfo(),
18165 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
18166 DAG.getConstant(X86::COND_B, MVT::i8),
18168 Results.push_back(SetCC);
18169 Results.push_back(Store);
18170 return DAG.getMergeValues(Results, dl);
18172 case COMPRESS_TO_MEM: {
18174 SDValue Mask = Op.getOperand(4);
18175 SDValue DataToCompress = Op.getOperand(3);
18176 SDValue Addr = Op.getOperand(2);
18177 SDValue Chain = Op.getOperand(0);
18179 if (isAllOnes(Mask)) // return just a store
18180 return DAG.getStore(Chain, dl, DataToCompress, Addr,
18181 MachinePointerInfo(), false, false, 0);
18183 EVT VT = DataToCompress.getValueType();
18184 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18185 VT.getVectorNumElements());
18186 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18187 Mask.getValueType().getSizeInBits());
18188 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
18189 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
18190 DAG.getIntPtrConstant(0));
18192 SDValue Compressed = DAG.getNode(IntrData->Opc0, dl, VT, VMask,
18193 DataToCompress, DAG.getUNDEF(VT));
18194 return DAG.getStore(Chain, dl, Compressed, Addr,
18195 MachinePointerInfo(), false, false, 0);
18197 case EXPAND_FROM_MEM: {
18199 SDValue Mask = Op.getOperand(4);
18200 SDValue PathThru = Op.getOperand(3);
18201 SDValue Addr = Op.getOperand(2);
18202 SDValue Chain = Op.getOperand(0);
18203 EVT VT = Op.getValueType();
18205 if (isAllOnes(Mask)) // return just a load
18206 return DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(), false, false,
18208 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18209 VT.getVectorNumElements());
18210 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18211 Mask.getValueType().getSizeInBits());
18212 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
18213 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
18214 DAG.getIntPtrConstant(0));
18216 SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(),
18217 false, false, false, 0);
18219 SmallVector<SDValue, 2> Results;
18220 Results.push_back(DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToExpand,
18222 Results.push_back(Chain);
18223 return DAG.getMergeValues(Results, dl);
18228 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
18229 SelectionDAG &DAG) const {
18230 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
18231 MFI->setReturnAddressIsTaken(true);
18233 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
18236 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18238 EVT PtrVT = getPointerTy();
18241 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
18242 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18243 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
18244 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
18245 DAG.getNode(ISD::ADD, dl, PtrVT,
18246 FrameAddr, Offset),
18247 MachinePointerInfo(), false, false, false, 0);
18250 // Just load the return address.
18251 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
18252 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
18253 RetAddrFI, MachinePointerInfo(), false, false, false, 0);
18256 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
18257 MachineFunction &MF = DAG.getMachineFunction();
18258 MachineFrameInfo *MFI = MF.getFrameInfo();
18259 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
18260 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18261 EVT VT = Op.getValueType();
18263 MFI->setFrameAddressIsTaken(true);
18265 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
18266 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
18267 // is not possible to crawl up the stack without looking at the unwind codes
18269 int FrameAddrIndex = FuncInfo->getFAIndex();
18270 if (!FrameAddrIndex) {
18271 // Set up a frame object for the return address.
18272 unsigned SlotSize = RegInfo->getSlotSize();
18273 FrameAddrIndex = MF.getFrameInfo()->CreateFixedObject(
18274 SlotSize, /*Offset=*/INT64_MIN, /*IsImmutable=*/false);
18275 FuncInfo->setFAIndex(FrameAddrIndex);
18277 return DAG.getFrameIndex(FrameAddrIndex, VT);
18280 unsigned FrameReg =
18281 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
18282 SDLoc dl(Op); // FIXME probably not meaningful
18283 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18284 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
18285 (FrameReg == X86::EBP && VT == MVT::i32)) &&
18286 "Invalid Frame Register!");
18287 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
18289 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
18290 MachinePointerInfo(),
18291 false, false, false, 0);
18295 // FIXME? Maybe this could be a TableGen attribute on some registers and
18296 // this table could be generated automatically from RegInfo.
18297 unsigned X86TargetLowering::getRegisterByName(const char* RegName,
18299 unsigned Reg = StringSwitch<unsigned>(RegName)
18300 .Case("esp", X86::ESP)
18301 .Case("rsp", X86::RSP)
18305 report_fatal_error("Invalid register name global variable");
18308 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
18309 SelectionDAG &DAG) const {
18310 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18311 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
18314 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
18315 SDValue Chain = Op.getOperand(0);
18316 SDValue Offset = Op.getOperand(1);
18317 SDValue Handler = Op.getOperand(2);
18320 EVT PtrVT = getPointerTy();
18321 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18322 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
18323 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
18324 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
18325 "Invalid Frame Register!");
18326 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
18327 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
18329 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
18330 DAG.getIntPtrConstant(RegInfo->getSlotSize()));
18331 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
18332 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
18334 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
18336 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
18337 DAG.getRegister(StoreAddrReg, PtrVT));
18340 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
18341 SelectionDAG &DAG) const {
18343 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
18344 DAG.getVTList(MVT::i32, MVT::Other),
18345 Op.getOperand(0), Op.getOperand(1));
18348 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
18349 SelectionDAG &DAG) const {
18351 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
18352 Op.getOperand(0), Op.getOperand(1));
18355 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
18356 return Op.getOperand(0);
18359 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
18360 SelectionDAG &DAG) const {
18361 SDValue Root = Op.getOperand(0);
18362 SDValue Trmp = Op.getOperand(1); // trampoline
18363 SDValue FPtr = Op.getOperand(2); // nested function
18364 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
18367 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
18368 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
18370 if (Subtarget->is64Bit()) {
18371 SDValue OutChains[6];
18373 // Large code-model.
18374 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
18375 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
18377 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
18378 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
18380 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
18382 // Load the pointer to the nested function into R11.
18383 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
18384 SDValue Addr = Trmp;
18385 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18386 Addr, MachinePointerInfo(TrmpAddr),
18389 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18390 DAG.getConstant(2, MVT::i64));
18391 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
18392 MachinePointerInfo(TrmpAddr, 2),
18395 // Load the 'nest' parameter value into R10.
18396 // R10 is specified in X86CallingConv.td
18397 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
18398 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18399 DAG.getConstant(10, MVT::i64));
18400 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18401 Addr, MachinePointerInfo(TrmpAddr, 10),
18404 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18405 DAG.getConstant(12, MVT::i64));
18406 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
18407 MachinePointerInfo(TrmpAddr, 12),
18410 // Jump to the nested function.
18411 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
18412 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18413 DAG.getConstant(20, MVT::i64));
18414 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18415 Addr, MachinePointerInfo(TrmpAddr, 20),
18418 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
18419 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18420 DAG.getConstant(22, MVT::i64));
18421 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
18422 MachinePointerInfo(TrmpAddr, 22),
18425 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18427 const Function *Func =
18428 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
18429 CallingConv::ID CC = Func->getCallingConv();
18434 llvm_unreachable("Unsupported calling convention");
18435 case CallingConv::C:
18436 case CallingConv::X86_StdCall: {
18437 // Pass 'nest' parameter in ECX.
18438 // Must be kept in sync with X86CallingConv.td
18439 NestReg = X86::ECX;
18441 // Check that ECX wasn't needed by an 'inreg' parameter.
18442 FunctionType *FTy = Func->getFunctionType();
18443 const AttributeSet &Attrs = Func->getAttributes();
18445 if (!Attrs.isEmpty() && !Func->isVarArg()) {
18446 unsigned InRegCount = 0;
18449 for (FunctionType::param_iterator I = FTy->param_begin(),
18450 E = FTy->param_end(); I != E; ++I, ++Idx)
18451 if (Attrs.hasAttribute(Idx, Attribute::InReg))
18452 // FIXME: should only count parameters that are lowered to integers.
18453 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
18455 if (InRegCount > 2) {
18456 report_fatal_error("Nest register in use - reduce number of inreg"
18462 case CallingConv::X86_FastCall:
18463 case CallingConv::X86_ThisCall:
18464 case CallingConv::Fast:
18465 // Pass 'nest' parameter in EAX.
18466 // Must be kept in sync with X86CallingConv.td
18467 NestReg = X86::EAX;
18471 SDValue OutChains[4];
18472 SDValue Addr, Disp;
18474 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18475 DAG.getConstant(10, MVT::i32));
18476 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
18478 // This is storing the opcode for MOV32ri.
18479 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
18480 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
18481 OutChains[0] = DAG.getStore(Root, dl,
18482 DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
18483 Trmp, MachinePointerInfo(TrmpAddr),
18486 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18487 DAG.getConstant(1, MVT::i32));
18488 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
18489 MachinePointerInfo(TrmpAddr, 1),
18492 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
18493 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18494 DAG.getConstant(5, MVT::i32));
18495 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
18496 MachinePointerInfo(TrmpAddr, 5),
18499 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18500 DAG.getConstant(6, MVT::i32));
18501 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
18502 MachinePointerInfo(TrmpAddr, 6),
18505 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18509 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
18510 SelectionDAG &DAG) const {
18512 The rounding mode is in bits 11:10 of FPSR, and has the following
18514 00 Round to nearest
18519 FLT_ROUNDS, on the other hand, expects the following:
18526 To perform the conversion, we do:
18527 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
18530 MachineFunction &MF = DAG.getMachineFunction();
18531 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
18532 unsigned StackAlignment = TFI.getStackAlignment();
18533 MVT VT = Op.getSimpleValueType();
18536 // Save FP Control Word to stack slot
18537 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
18538 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
18540 MachineMemOperand *MMO =
18541 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
18542 MachineMemOperand::MOStore, 2, 2);
18544 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
18545 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
18546 DAG.getVTList(MVT::Other),
18547 Ops, MVT::i16, MMO);
18549 // Load FP Control Word from stack slot
18550 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
18551 MachinePointerInfo(), false, false, false, 0);
18553 // Transform as necessary
18555 DAG.getNode(ISD::SRL, DL, MVT::i16,
18556 DAG.getNode(ISD::AND, DL, MVT::i16,
18557 CWD, DAG.getConstant(0x800, MVT::i16)),
18558 DAG.getConstant(11, MVT::i8));
18560 DAG.getNode(ISD::SRL, DL, MVT::i16,
18561 DAG.getNode(ISD::AND, DL, MVT::i16,
18562 CWD, DAG.getConstant(0x400, MVT::i16)),
18563 DAG.getConstant(9, MVT::i8));
18566 DAG.getNode(ISD::AND, DL, MVT::i16,
18567 DAG.getNode(ISD::ADD, DL, MVT::i16,
18568 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
18569 DAG.getConstant(1, MVT::i16)),
18570 DAG.getConstant(3, MVT::i16));
18572 return DAG.getNode((VT.getSizeInBits() < 16 ?
18573 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
18576 static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
18577 MVT VT = Op.getSimpleValueType();
18579 unsigned NumBits = VT.getSizeInBits();
18582 Op = Op.getOperand(0);
18583 if (VT == MVT::i8) {
18584 // Zero extend to i32 since there is not an i8 bsr.
18586 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18589 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
18590 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18591 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18593 // If src is zero (i.e. bsr sets ZF), returns NumBits.
18596 DAG.getConstant(NumBits+NumBits-1, OpVT),
18597 DAG.getConstant(X86::COND_E, MVT::i8),
18600 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
18602 // Finally xor with NumBits-1.
18603 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18606 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18610 static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
18611 MVT VT = Op.getSimpleValueType();
18613 unsigned NumBits = VT.getSizeInBits();
18616 Op = Op.getOperand(0);
18617 if (VT == MVT::i8) {
18618 // Zero extend to i32 since there is not an i8 bsr.
18620 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18623 // Issue a bsr (scan bits in reverse).
18624 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18625 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18627 // And xor with NumBits-1.
18628 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18631 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18635 static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
18636 MVT VT = Op.getSimpleValueType();
18637 unsigned NumBits = VT.getSizeInBits();
18639 Op = Op.getOperand(0);
18641 // Issue a bsf (scan bits forward) which also sets EFLAGS.
18642 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
18643 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op);
18645 // If src is zero (i.e. bsf sets ZF), returns NumBits.
18648 DAG.getConstant(NumBits, VT),
18649 DAG.getConstant(X86::COND_E, MVT::i8),
18652 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
18655 // Lower256IntArith - Break a 256-bit integer operation into two new 128-bit
18656 // ones, and then concatenate the result back.
18657 static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
18658 MVT VT = Op.getSimpleValueType();
18660 assert(VT.is256BitVector() && VT.isInteger() &&
18661 "Unsupported value type for operation");
18663 unsigned NumElems = VT.getVectorNumElements();
18666 // Extract the LHS vectors
18667 SDValue LHS = Op.getOperand(0);
18668 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
18669 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
18671 // Extract the RHS vectors
18672 SDValue RHS = Op.getOperand(1);
18673 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
18674 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
18676 MVT EltVT = VT.getVectorElementType();
18677 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
18679 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
18680 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
18681 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
18684 static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) {
18685 assert(Op.getSimpleValueType().is256BitVector() &&
18686 Op.getSimpleValueType().isInteger() &&
18687 "Only handle AVX 256-bit vector integer operation");
18688 return Lower256IntArith(Op, DAG);
18691 static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
18692 assert(Op.getSimpleValueType().is256BitVector() &&
18693 Op.getSimpleValueType().isInteger() &&
18694 "Only handle AVX 256-bit vector integer operation");
18695 return Lower256IntArith(Op, DAG);
18698 static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
18699 SelectionDAG &DAG) {
18701 MVT VT = Op.getSimpleValueType();
18703 // Decompose 256-bit ops into smaller 128-bit ops.
18704 if (VT.is256BitVector() && !Subtarget->hasInt256())
18705 return Lower256IntArith(Op, DAG);
18707 SDValue A = Op.getOperand(0);
18708 SDValue B = Op.getOperand(1);
18710 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
18711 if (VT == MVT::v4i32) {
18712 assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() &&
18713 "Should not custom lower when pmuldq is available!");
18715 // Extract the odd parts.
18716 static const int UnpackMask[] = { 1, -1, 3, -1 };
18717 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
18718 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
18720 // Multiply the even parts.
18721 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B);
18722 // Now multiply odd parts.
18723 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds);
18725 Evens = DAG.getNode(ISD::BITCAST, dl, VT, Evens);
18726 Odds = DAG.getNode(ISD::BITCAST, dl, VT, Odds);
18728 // Merge the two vectors back together with a shuffle. This expands into 2
18730 static const int ShufMask[] = { 0, 4, 2, 6 };
18731 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
18734 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
18735 "Only know how to lower V2I64/V4I64/V8I64 multiply");
18737 // Ahi = psrlqi(a, 32);
18738 // Bhi = psrlqi(b, 32);
18740 // AloBlo = pmuludq(a, b);
18741 // AloBhi = pmuludq(a, Bhi);
18742 // AhiBlo = pmuludq(Ahi, b);
18744 // AloBhi = psllqi(AloBhi, 32);
18745 // AhiBlo = psllqi(AhiBlo, 32);
18746 // return AloBlo + AloBhi + AhiBlo;
18748 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
18749 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
18751 // Bit cast to 32-bit vectors for MULUDQ
18752 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 :
18753 (VT == MVT::v4i64) ? MVT::v8i32 : MVT::v16i32;
18754 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A);
18755 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B);
18756 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi);
18757 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi);
18759 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
18760 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
18761 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
18763 AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG);
18764 AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG);
18766 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
18767 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
18770 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
18771 assert(Subtarget->isTargetWin64() && "Unexpected target");
18772 EVT VT = Op.getValueType();
18773 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
18774 "Unexpected return type for lowering");
18778 switch (Op->getOpcode()) {
18779 default: llvm_unreachable("Unexpected request for libcall!");
18780 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
18781 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
18782 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
18783 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
18784 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
18785 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
18789 SDValue InChain = DAG.getEntryNode();
18791 TargetLowering::ArgListTy Args;
18792 TargetLowering::ArgListEntry Entry;
18793 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
18794 EVT ArgVT = Op->getOperand(i).getValueType();
18795 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
18796 "Unexpected argument type for lowering");
18797 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
18798 Entry.Node = StackPtr;
18799 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MachinePointerInfo(),
18801 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
18802 Entry.Ty = PointerType::get(ArgTy,0);
18803 Entry.isSExt = false;
18804 Entry.isZExt = false;
18805 Args.push_back(Entry);
18808 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
18811 TargetLowering::CallLoweringInfo CLI(DAG);
18812 CLI.setDebugLoc(dl).setChain(InChain)
18813 .setCallee(getLibcallCallingConv(LC),
18814 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()),
18815 Callee, std::move(Args), 0)
18816 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
18818 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
18819 return DAG.getNode(ISD::BITCAST, dl, VT, CallInfo.first);
18822 static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
18823 SelectionDAG &DAG) {
18824 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
18825 EVT VT = Op0.getValueType();
18828 assert((VT == MVT::v4i32 && Subtarget->hasSSE2()) ||
18829 (VT == MVT::v8i32 && Subtarget->hasInt256()));
18831 // PMULxD operations multiply each even value (starting at 0) of LHS with
18832 // the related value of RHS and produce a widen result.
18833 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18834 // => <2 x i64> <ae|cg>
18836 // In other word, to have all the results, we need to perform two PMULxD:
18837 // 1. one with the even values.
18838 // 2. one with the odd values.
18839 // To achieve #2, with need to place the odd values at an even position.
18841 // Place the odd value at an even position (basically, shift all values 1
18842 // step to the left):
18843 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
18844 // <a|b|c|d> => <b|undef|d|undef>
18845 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0, Mask);
18846 // <e|f|g|h> => <f|undef|h|undef>
18847 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1, Mask);
18849 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
18851 MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
18852 bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
18854 (!IsSigned || !Subtarget->hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
18855 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18856 // => <2 x i64> <ae|cg>
18857 SDValue Mul1 = DAG.getNode(ISD::BITCAST, dl, VT,
18858 DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
18859 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
18860 // => <2 x i64> <bf|dh>
18861 SDValue Mul2 = DAG.getNode(ISD::BITCAST, dl, VT,
18862 DAG.getNode(Opcode, dl, MulVT, Odd0, Odd1));
18864 // Shuffle it back into the right order.
18865 SDValue Highs, Lows;
18866 if (VT == MVT::v8i32) {
18867 const int HighMask[] = {1, 9, 3, 11, 5, 13, 7, 15};
18868 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18869 const int LowMask[] = {0, 8, 2, 10, 4, 12, 6, 14};
18870 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18872 const int HighMask[] = {1, 5, 3, 7};
18873 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18874 const int LowMask[] = {0, 4, 2, 6};
18875 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18878 // If we have a signed multiply but no PMULDQ fix up the high parts of a
18879 // unsigned multiply.
18880 if (IsSigned && !Subtarget->hasSSE41()) {
18882 DAG.getConstant(31, DAG.getTargetLoweringInfo().getShiftAmountTy(VT));
18883 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
18884 DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
18885 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
18886 DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0);
18888 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
18889 Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup);
18892 // The first result of MUL_LOHI is actually the low value, followed by the
18894 SDValue Ops[] = {Lows, Highs};
18895 return DAG.getMergeValues(Ops, dl);
18898 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
18899 const X86Subtarget *Subtarget) {
18900 MVT VT = Op.getSimpleValueType();
18902 SDValue R = Op.getOperand(0);
18903 SDValue Amt = Op.getOperand(1);
18905 // Optimize shl/srl/sra with constant shift amount.
18906 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
18907 if (auto *ShiftConst = BVAmt->getConstantSplatNode()) {
18908 uint64_t ShiftAmt = ShiftConst->getZExtValue();
18910 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
18911 (Subtarget->hasInt256() &&
18912 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18913 (Subtarget->hasAVX512() &&
18914 (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18915 if (Op.getOpcode() == ISD::SHL)
18916 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18918 if (Op.getOpcode() == ISD::SRL)
18919 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18921 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64)
18922 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18926 if (VT == MVT::v16i8) {
18927 if (Op.getOpcode() == ISD::SHL) {
18928 // Make a large shift.
18929 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18930 MVT::v8i16, R, ShiftAmt,
18932 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18933 // Zero out the rightmost bits.
18934 SmallVector<SDValue, 16> V(16,
18935 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18937 return DAG.getNode(ISD::AND, dl, VT, SHL,
18938 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18940 if (Op.getOpcode() == ISD::SRL) {
18941 // Make a large shift.
18942 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18943 MVT::v8i16, R, ShiftAmt,
18945 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18946 // Zero out the leftmost bits.
18947 SmallVector<SDValue, 16> V(16,
18948 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18950 return DAG.getNode(ISD::AND, dl, VT, SRL,
18951 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18953 if (Op.getOpcode() == ISD::SRA) {
18954 if (ShiftAmt == 7) {
18955 // R s>> 7 === R s< 0
18956 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18957 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18960 // R s>> a === ((R u>> a) ^ m) - m
18961 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18962 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt,
18964 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18965 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18966 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18969 llvm_unreachable("Unknown shift opcode.");
18972 if (Subtarget->hasInt256() && VT == MVT::v32i8) {
18973 if (Op.getOpcode() == ISD::SHL) {
18974 // Make a large shift.
18975 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18976 MVT::v16i16, R, ShiftAmt,
18978 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18979 // Zero out the rightmost bits.
18980 SmallVector<SDValue, 32> V(32,
18981 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18983 return DAG.getNode(ISD::AND, dl, VT, SHL,
18984 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18986 if (Op.getOpcode() == ISD::SRL) {
18987 // Make a large shift.
18988 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18989 MVT::v16i16, R, ShiftAmt,
18991 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18992 // Zero out the leftmost bits.
18993 SmallVector<SDValue, 32> V(32,
18994 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18996 return DAG.getNode(ISD::AND, dl, VT, SRL,
18997 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18999 if (Op.getOpcode() == ISD::SRA) {
19000 if (ShiftAmt == 7) {
19001 // R s>> 7 === R s< 0
19002 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
19003 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
19006 // R s>> a === ((R u>> a) ^ m) - m
19007 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
19008 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt,
19010 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
19011 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
19012 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
19015 llvm_unreachable("Unknown shift opcode.");
19020 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
19021 if (!Subtarget->is64Bit() &&
19022 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) &&
19023 Amt.getOpcode() == ISD::BITCAST &&
19024 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
19025 Amt = Amt.getOperand(0);
19026 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
19027 VT.getVectorNumElements();
19028 unsigned RatioInLog2 = Log2_32_Ceil(Ratio);
19029 uint64_t ShiftAmt = 0;
19030 for (unsigned i = 0; i != Ratio; ++i) {
19031 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i));
19035 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2)));
19037 // Check remaining shift amounts.
19038 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
19039 uint64_t ShAmt = 0;
19040 for (unsigned j = 0; j != Ratio; ++j) {
19041 ConstantSDNode *C =
19042 dyn_cast<ConstantSDNode>(Amt.getOperand(i + j));
19046 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2)));
19048 if (ShAmt != ShiftAmt)
19051 switch (Op.getOpcode()) {
19053 llvm_unreachable("Unknown shift opcode!");
19055 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
19058 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
19061 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
19069 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
19070 const X86Subtarget* Subtarget) {
19071 MVT VT = Op.getSimpleValueType();
19073 SDValue R = Op.getOperand(0);
19074 SDValue Amt = Op.getOperand(1);
19076 if ((VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) ||
19077 VT == MVT::v4i32 || VT == MVT::v8i16 ||
19078 (Subtarget->hasInt256() &&
19079 ((VT == MVT::v4i64 && Op.getOpcode() != ISD::SRA) ||
19080 VT == MVT::v8i32 || VT == MVT::v16i16)) ||
19081 (Subtarget->hasAVX512() && (VT == MVT::v8i64 || VT == MVT::v16i32))) {
19083 EVT EltVT = VT.getVectorElementType();
19085 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Amt)) {
19086 // Check if this build_vector node is doing a splat.
19087 // If so, then set BaseShAmt equal to the splat value.
19088 BaseShAmt = BV->getSplatValue();
19089 if (BaseShAmt && BaseShAmt.getOpcode() == ISD::UNDEF)
19090 BaseShAmt = SDValue();
19092 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
19093 Amt = Amt.getOperand(0);
19095 ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt);
19096 if (SVN && SVN->isSplat()) {
19097 unsigned SplatIdx = (unsigned)SVN->getSplatIndex();
19098 SDValue InVec = Amt.getOperand(0);
19099 if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
19100 assert((SplatIdx < InVec.getValueType().getVectorNumElements()) &&
19101 "Unexpected shuffle index found!");
19102 BaseShAmt = InVec.getOperand(SplatIdx);
19103 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
19104 if (ConstantSDNode *C =
19105 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) {
19106 if (C->getZExtValue() == SplatIdx)
19107 BaseShAmt = InVec.getOperand(1);
19112 // Avoid introducing an extract element from a shuffle.
19113 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec,
19114 DAG.getIntPtrConstant(SplatIdx));
19118 if (BaseShAmt.getNode()) {
19119 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
19120 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
19121 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
19122 else if (EltVT.bitsLT(MVT::i32))
19123 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
19125 switch (Op.getOpcode()) {
19127 llvm_unreachable("Unknown shift opcode!");
19129 switch (VT.SimpleTy) {
19130 default: return SDValue();
19139 return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG);
19142 switch (VT.SimpleTy) {
19143 default: return SDValue();
19150 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG);
19153 switch (VT.SimpleTy) {
19154 default: return SDValue();
19163 return getTargetVShiftNode(X86ISD::VSRLI, dl, VT, R, BaseShAmt, DAG);
19169 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
19170 if (!Subtarget->is64Bit() &&
19171 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64) ||
19172 (Subtarget->hasAVX512() && VT == MVT::v8i64)) &&
19173 Amt.getOpcode() == ISD::BITCAST &&
19174 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
19175 Amt = Amt.getOperand(0);
19176 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
19177 VT.getVectorNumElements();
19178 std::vector<SDValue> Vals(Ratio);
19179 for (unsigned i = 0; i != Ratio; ++i)
19180 Vals[i] = Amt.getOperand(i);
19181 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
19182 for (unsigned j = 0; j != Ratio; ++j)
19183 if (Vals[j] != Amt.getOperand(i + j))
19186 switch (Op.getOpcode()) {
19188 llvm_unreachable("Unknown shift opcode!");
19190 return DAG.getNode(X86ISD::VSHL, dl, VT, R, Op.getOperand(1));
19192 return DAG.getNode(X86ISD::VSRL, dl, VT, R, Op.getOperand(1));
19194 return DAG.getNode(X86ISD::VSRA, dl, VT, R, Op.getOperand(1));
19201 static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
19202 SelectionDAG &DAG) {
19203 MVT VT = Op.getSimpleValueType();
19205 SDValue R = Op.getOperand(0);
19206 SDValue Amt = Op.getOperand(1);
19209 assert(VT.isVector() && "Custom lowering only for vector shifts!");
19210 assert(Subtarget->hasSSE2() && "Only custom lower when we have SSE2!");
19212 V = LowerScalarImmediateShift(Op, DAG, Subtarget);
19216 V = LowerScalarVariableShift(Op, DAG, Subtarget);
19220 if (Subtarget->hasAVX512() && (VT == MVT::v16i32 || VT == MVT::v8i64))
19222 // AVX2 has VPSLLV/VPSRAV/VPSRLV.
19223 if (Subtarget->hasInt256()) {
19224 if (Op.getOpcode() == ISD::SRL &&
19225 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
19226 VT == MVT::v4i64 || VT == MVT::v8i32))
19228 if (Op.getOpcode() == ISD::SHL &&
19229 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
19230 VT == MVT::v4i64 || VT == MVT::v8i32))
19232 if (Op.getOpcode() == ISD::SRA && (VT == MVT::v4i32 || VT == MVT::v8i32))
19236 // If possible, lower this packed shift into a vector multiply instead of
19237 // expanding it into a sequence of scalar shifts.
19238 // Do this only if the vector shift count is a constant build_vector.
19239 if (Op.getOpcode() == ISD::SHL &&
19240 (VT == MVT::v8i16 || VT == MVT::v4i32 ||
19241 (Subtarget->hasInt256() && VT == MVT::v16i16)) &&
19242 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
19243 SmallVector<SDValue, 8> Elts;
19244 EVT SVT = VT.getScalarType();
19245 unsigned SVTBits = SVT.getSizeInBits();
19246 const APInt &One = APInt(SVTBits, 1);
19247 unsigned NumElems = VT.getVectorNumElements();
19249 for (unsigned i=0; i !=NumElems; ++i) {
19250 SDValue Op = Amt->getOperand(i);
19251 if (Op->getOpcode() == ISD::UNDEF) {
19252 Elts.push_back(Op);
19256 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
19257 const APInt &C = APInt(SVTBits, ND->getAPIntValue().getZExtValue());
19258 uint64_t ShAmt = C.getZExtValue();
19259 if (ShAmt >= SVTBits) {
19260 Elts.push_back(DAG.getUNDEF(SVT));
19263 Elts.push_back(DAG.getConstant(One.shl(ShAmt), SVT));
19265 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
19266 return DAG.getNode(ISD::MUL, dl, VT, R, BV);
19269 // Lower SHL with variable shift amount.
19270 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
19271 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT));
19273 Op = DAG.getNode(ISD::ADD, dl, VT, Op, DAG.getConstant(0x3f800000U, VT));
19274 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
19275 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
19276 return DAG.getNode(ISD::MUL, dl, VT, Op, R);
19279 // If possible, lower this shift as a sequence of two shifts by
19280 // constant plus a MOVSS/MOVSD instead of scalarizing it.
19282 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
19284 // Could be rewritten as:
19285 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
19287 // The advantage is that the two shifts from the example would be
19288 // lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
19289 // the vector shift into four scalar shifts plus four pairs of vector
19291 if ((VT == MVT::v8i16 || VT == MVT::v4i32) &&
19292 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
19293 unsigned TargetOpcode = X86ISD::MOVSS;
19294 bool CanBeSimplified;
19295 // The splat value for the first packed shift (the 'X' from the example).
19296 SDValue Amt1 = Amt->getOperand(0);
19297 // The splat value for the second packed shift (the 'Y' from the example).
19298 SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) :
19299 Amt->getOperand(2);
19301 // See if it is possible to replace this node with a sequence of
19302 // two shifts followed by a MOVSS/MOVSD
19303 if (VT == MVT::v4i32) {
19304 // Check if it is legal to use a MOVSS.
19305 CanBeSimplified = Amt2 == Amt->getOperand(2) &&
19306 Amt2 == Amt->getOperand(3);
19307 if (!CanBeSimplified) {
19308 // Otherwise, check if we can still simplify this node using a MOVSD.
19309 CanBeSimplified = Amt1 == Amt->getOperand(1) &&
19310 Amt->getOperand(2) == Amt->getOperand(3);
19311 TargetOpcode = X86ISD::MOVSD;
19312 Amt2 = Amt->getOperand(2);
19315 // Do similar checks for the case where the machine value type
19317 CanBeSimplified = Amt1 == Amt->getOperand(1);
19318 for (unsigned i=3; i != 8 && CanBeSimplified; ++i)
19319 CanBeSimplified = Amt2 == Amt->getOperand(i);
19321 if (!CanBeSimplified) {
19322 TargetOpcode = X86ISD::MOVSD;
19323 CanBeSimplified = true;
19324 Amt2 = Amt->getOperand(4);
19325 for (unsigned i=0; i != 4 && CanBeSimplified; ++i)
19326 CanBeSimplified = Amt1 == Amt->getOperand(i);
19327 for (unsigned j=4; j != 8 && CanBeSimplified; ++j)
19328 CanBeSimplified = Amt2 == Amt->getOperand(j);
19332 if (CanBeSimplified && isa<ConstantSDNode>(Amt1) &&
19333 isa<ConstantSDNode>(Amt2)) {
19334 // Replace this node with two shifts followed by a MOVSS/MOVSD.
19335 EVT CastVT = MVT::v4i32;
19337 DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), VT);
19338 SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
19340 DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), VT);
19341 SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
19342 if (TargetOpcode == X86ISD::MOVSD)
19343 CastVT = MVT::v2i64;
19344 SDValue BitCast1 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift1);
19345 SDValue BitCast2 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift2);
19346 SDValue Result = getTargetShuffleNode(TargetOpcode, dl, CastVT, BitCast2,
19348 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
19352 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
19353 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq.");
19356 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, VT));
19357 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op);
19359 // Turn 'a' into a mask suitable for VSELECT
19360 SDValue VSelM = DAG.getConstant(0x80, VT);
19361 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19362 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19364 SDValue CM1 = DAG.getConstant(0x0f, VT);
19365 SDValue CM2 = DAG.getConstant(0x3f, VT);
19367 // r = VSELECT(r, psllw(r & (char16)15, 4), a);
19368 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1);
19369 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 4, DAG);
19370 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19371 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19374 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19375 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19376 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19378 // r = VSELECT(r, psllw(r & (char16)63, 2), a);
19379 M = DAG.getNode(ISD::AND, dl, VT, R, CM2);
19380 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 2, DAG);
19381 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19382 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19385 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19386 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19387 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19389 // return VSELECT(r, r+r, a);
19390 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel,
19391 DAG.getNode(ISD::ADD, dl, VT, R, R), R);
19395 // It's worth extending once and using the v8i32 shifts for 16-bit types, but
19396 // the extra overheads to get from v16i8 to v8i32 make the existing SSE
19397 // solution better.
19398 if (Subtarget->hasInt256() && VT == MVT::v8i16) {
19399 MVT NewVT = VT == MVT::v8i16 ? MVT::v8i32 : MVT::v16i16;
19401 Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
19402 R = DAG.getNode(ExtOpc, dl, NewVT, R);
19403 Amt = DAG.getNode(ISD::ANY_EXTEND, dl, NewVT, Amt);
19404 return DAG.getNode(ISD::TRUNCATE, dl, VT,
19405 DAG.getNode(Op.getOpcode(), dl, NewVT, R, Amt));
19408 // Decompose 256-bit shifts into smaller 128-bit shifts.
19409 if (VT.is256BitVector()) {
19410 unsigned NumElems = VT.getVectorNumElements();
19411 MVT EltVT = VT.getVectorElementType();
19412 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19414 // Extract the two vectors
19415 SDValue V1 = Extract128BitVector(R, 0, DAG, dl);
19416 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl);
19418 // Recreate the shift amount vectors
19419 SDValue Amt1, Amt2;
19420 if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
19421 // Constant shift amount
19422 SmallVector<SDValue, 4> Amt1Csts;
19423 SmallVector<SDValue, 4> Amt2Csts;
19424 for (unsigned i = 0; i != NumElems/2; ++i)
19425 Amt1Csts.push_back(Amt->getOperand(i));
19426 for (unsigned i = NumElems/2; i != NumElems; ++i)
19427 Amt2Csts.push_back(Amt->getOperand(i));
19429 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt1Csts);
19430 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt2Csts);
19432 // Variable shift amount
19433 Amt1 = Extract128BitVector(Amt, 0, DAG, dl);
19434 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl);
19437 // Issue new vector shifts for the smaller types
19438 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1);
19439 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2);
19441 // Concatenate the result back
19442 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
19448 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
19449 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
19450 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
19451 // looks for this combo and may remove the "setcc" instruction if the "setcc"
19452 // has only one use.
19453 SDNode *N = Op.getNode();
19454 SDValue LHS = N->getOperand(0);
19455 SDValue RHS = N->getOperand(1);
19456 unsigned BaseOp = 0;
19459 switch (Op.getOpcode()) {
19460 default: llvm_unreachable("Unknown ovf instruction!");
19462 // A subtract of one will be selected as a INC. Note that INC doesn't
19463 // set CF, so we can't do this for UADDO.
19464 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19466 BaseOp = X86ISD::INC;
19467 Cond = X86::COND_O;
19470 BaseOp = X86ISD::ADD;
19471 Cond = X86::COND_O;
19474 BaseOp = X86ISD::ADD;
19475 Cond = X86::COND_B;
19478 // A subtract of one will be selected as a DEC. Note that DEC doesn't
19479 // set CF, so we can't do this for USUBO.
19480 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19482 BaseOp = X86ISD::DEC;
19483 Cond = X86::COND_O;
19486 BaseOp = X86ISD::SUB;
19487 Cond = X86::COND_O;
19490 BaseOp = X86ISD::SUB;
19491 Cond = X86::COND_B;
19494 BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL;
19495 Cond = X86::COND_O;
19497 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
19498 if (N->getValueType(0) == MVT::i8) {
19499 BaseOp = X86ISD::UMUL8;
19500 Cond = X86::COND_O;
19503 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
19505 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
19508 DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
19509 DAG.getConstant(X86::COND_O, MVT::i32),
19510 SDValue(Sum.getNode(), 2));
19512 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19516 // Also sets EFLAGS.
19517 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
19518 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
19521 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
19522 DAG.getConstant(Cond, MVT::i32),
19523 SDValue(Sum.getNode(), 1));
19525 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19528 // Sign extension of the low part of vector elements. This may be used either
19529 // when sign extend instructions are not available or if the vector element
19530 // sizes already match the sign-extended size. If the vector elements are in
19531 // their pre-extended size and sign extend instructions are available, that will
19532 // be handled by LowerSIGN_EXTEND.
19533 SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
19534 SelectionDAG &DAG) const {
19536 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
19537 MVT VT = Op.getSimpleValueType();
19539 if (!Subtarget->hasSSE2() || !VT.isVector())
19542 unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
19543 ExtraVT.getScalarType().getSizeInBits();
19545 switch (VT.SimpleTy) {
19546 default: return SDValue();
19549 if (!Subtarget->hasFp256())
19551 if (!Subtarget->hasInt256()) {
19552 // needs to be split
19553 unsigned NumElems = VT.getVectorNumElements();
19555 // Extract the LHS vectors
19556 SDValue LHS = Op.getOperand(0);
19557 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
19558 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
19560 MVT EltVT = VT.getVectorElementType();
19561 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19563 EVT ExtraEltVT = ExtraVT.getVectorElementType();
19564 unsigned ExtraNumElems = ExtraVT.getVectorNumElements();
19565 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT,
19567 SDValue Extra = DAG.getValueType(ExtraVT);
19569 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra);
19570 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra);
19572 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);
19577 SDValue Op0 = Op.getOperand(0);
19579 // This is a sign extension of some low part of vector elements without
19580 // changing the size of the vector elements themselves:
19581 // Shift-Left + Shift-Right-Algebraic.
19582 SDValue Shl = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Op0,
19584 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Shl, BitsDiff,
19590 /// Returns true if the operand type is exactly twice the native width, and
19591 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
19592 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
19593 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
19594 bool X86TargetLowering::needsCmpXchgNb(const Type *MemType) const {
19595 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
19598 return !Subtarget->is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
19599 else if (OpWidth == 128)
19600 return Subtarget->hasCmpxchg16b();
19605 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
19606 return needsCmpXchgNb(SI->getValueOperand()->getType());
19609 // Note: this turns large loads into lock cmpxchg8b/16b.
19610 // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
19611 bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
19612 auto PTy = cast<PointerType>(LI->getPointerOperand()->getType());
19613 return needsCmpXchgNb(PTy->getElementType());
19616 bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
19617 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19618 const Type *MemType = AI->getType();
19620 // If the operand is too big, we must see if cmpxchg8/16b is available
19621 // and default to library calls otherwise.
19622 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19623 return needsCmpXchgNb(MemType);
19625 AtomicRMWInst::BinOp Op = AI->getOperation();
19628 llvm_unreachable("Unknown atomic operation");
19629 case AtomicRMWInst::Xchg:
19630 case AtomicRMWInst::Add:
19631 case AtomicRMWInst::Sub:
19632 // It's better to use xadd, xsub or xchg for these in all cases.
19634 case AtomicRMWInst::Or:
19635 case AtomicRMWInst::And:
19636 case AtomicRMWInst::Xor:
19637 // If the atomicrmw's result isn't actually used, we can just add a "lock"
19638 // prefix to a normal instruction for these operations.
19639 return !AI->use_empty();
19640 case AtomicRMWInst::Nand:
19641 case AtomicRMWInst::Max:
19642 case AtomicRMWInst::Min:
19643 case AtomicRMWInst::UMax:
19644 case AtomicRMWInst::UMin:
19645 // These always require a non-trivial set of data operations on x86. We must
19646 // use a cmpxchg loop.
19651 static bool hasMFENCE(const X86Subtarget& Subtarget) {
19652 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
19653 // no-sse2). There isn't any reason to disable it if the target processor
19655 return Subtarget.hasSSE2() || Subtarget.is64Bit();
19659 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
19660 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19661 const Type *MemType = AI->getType();
19662 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
19663 // there is no benefit in turning such RMWs into loads, and it is actually
19664 // harmful as it introduces a mfence.
19665 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19668 auto Builder = IRBuilder<>(AI);
19669 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
19670 auto SynchScope = AI->getSynchScope();
19671 // We must restrict the ordering to avoid generating loads with Release or
19672 // ReleaseAcquire orderings.
19673 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
19674 auto Ptr = AI->getPointerOperand();
19676 // Before the load we need a fence. Here is an example lifted from
19677 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
19680 // x.store(1, relaxed);
19681 // r1 = y.fetch_add(0, release);
19683 // y.fetch_add(42, acquire);
19684 // r2 = x.load(relaxed);
19685 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
19686 // lowered to just a load without a fence. A mfence flushes the store buffer,
19687 // making the optimization clearly correct.
19688 // FIXME: it is required if isAtLeastRelease(Order) but it is not clear
19689 // otherwise, we might be able to be more agressive on relaxed idempotent
19690 // rmw. In practice, they do not look useful, so we don't try to be
19691 // especially clever.
19692 if (SynchScope == SingleThread) {
19693 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
19694 // the IR level, so we must wrap it in an intrinsic.
19696 } else if (hasMFENCE(*Subtarget)) {
19697 Function *MFence = llvm::Intrinsic::getDeclaration(M,
19698 Intrinsic::x86_sse2_mfence);
19699 Builder.CreateCall(MFence);
19701 // FIXME: it might make sense to use a locked operation here but on a
19702 // different cache-line to prevent cache-line bouncing. In practice it
19703 // is probably a small win, and x86 processors without mfence are rare
19704 // enough that we do not bother.
19708 // Finally we can emit the atomic load.
19709 LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
19710 AI->getType()->getPrimitiveSizeInBits());
19711 Loaded->setAtomic(Order, SynchScope);
19712 AI->replaceAllUsesWith(Loaded);
19713 AI->eraseFromParent();
19717 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
19718 SelectionDAG &DAG) {
19720 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
19721 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
19722 SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
19723 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
19725 // The only fence that needs an instruction is a sequentially-consistent
19726 // cross-thread fence.
19727 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
19728 if (hasMFENCE(*Subtarget))
19729 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
19731 SDValue Chain = Op.getOperand(0);
19732 SDValue Zero = DAG.getConstant(0, MVT::i32);
19734 DAG.getRegister(X86::ESP, MVT::i32), // Base
19735 DAG.getTargetConstant(1, MVT::i8), // Scale
19736 DAG.getRegister(0, MVT::i32), // Index
19737 DAG.getTargetConstant(0, MVT::i32), // Disp
19738 DAG.getRegister(0, MVT::i32), // Segment.
19742 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
19743 return SDValue(Res, 0);
19746 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
19747 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
19750 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
19751 SelectionDAG &DAG) {
19752 MVT T = Op.getSimpleValueType();
19756 switch(T.SimpleTy) {
19757 default: llvm_unreachable("Invalid value type!");
19758 case MVT::i8: Reg = X86::AL; size = 1; break;
19759 case MVT::i16: Reg = X86::AX; size = 2; break;
19760 case MVT::i32: Reg = X86::EAX; size = 4; break;
19762 assert(Subtarget->is64Bit() && "Node not type legal!");
19763 Reg = X86::RAX; size = 8;
19766 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
19767 Op.getOperand(2), SDValue());
19768 SDValue Ops[] = { cpIn.getValue(0),
19771 DAG.getTargetConstant(size, MVT::i8),
19772 cpIn.getValue(1) };
19773 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
19774 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
19775 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
19779 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
19780 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
19781 MVT::i32, cpOut.getValue(2));
19782 SDValue Success = DAG.getNode(X86ISD::SETCC, DL, Op->getValueType(1),
19783 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
19785 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
19786 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
19787 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
19791 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
19792 SelectionDAG &DAG) {
19793 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
19794 MVT DstVT = Op.getSimpleValueType();
19796 if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) {
19797 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19798 if (DstVT != MVT::f64)
19799 // This conversion needs to be expanded.
19802 SDValue InVec = Op->getOperand(0);
19804 unsigned NumElts = SrcVT.getVectorNumElements();
19805 EVT SVT = SrcVT.getVectorElementType();
19807 // Widen the vector in input in the case of MVT::v2i32.
19808 // Example: from MVT::v2i32 to MVT::v4i32.
19809 SmallVector<SDValue, 16> Elts;
19810 for (unsigned i = 0, e = NumElts; i != e; ++i)
19811 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec,
19812 DAG.getIntPtrConstant(i)));
19814 // Explicitly mark the extra elements as Undef.
19815 SDValue Undef = DAG.getUNDEF(SVT);
19816 for (unsigned i = NumElts, e = NumElts * 2; i != e; ++i)
19817 Elts.push_back(Undef);
19819 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
19820 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
19821 SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
19822 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
19823 DAG.getIntPtrConstant(0));
19826 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
19827 Subtarget->hasMMX() && "Unexpected custom BITCAST");
19828 assert((DstVT == MVT::i64 ||
19829 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
19830 "Unexpected custom BITCAST");
19831 // i64 <=> MMX conversions are Legal.
19832 if (SrcVT==MVT::i64 && DstVT.isVector())
19834 if (DstVT==MVT::i64 && SrcVT.isVector())
19836 // MMX <=> MMX conversions are Legal.
19837 if (SrcVT.isVector() && DstVT.isVector())
19839 // All other conversions need to be expanded.
19843 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
19844 SelectionDAG &DAG) {
19845 SDNode *Node = Op.getNode();
19848 Op = Op.getOperand(0);
19849 EVT VT = Op.getValueType();
19850 assert((VT.is128BitVector() || VT.is256BitVector()) &&
19851 "CTPOP lowering only implemented for 128/256-bit wide vector types");
19853 unsigned NumElts = VT.getVectorNumElements();
19854 EVT EltVT = VT.getVectorElementType();
19855 unsigned Len = EltVT.getSizeInBits();
19857 // This is the vectorized version of the "best" algorithm from
19858 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
19859 // with a minor tweak to use a series of adds + shifts instead of vector
19860 // multiplications. Implemented for the v2i64, v4i64, v4i32, v8i32 types:
19862 // v2i64, v4i64, v4i32 => Only profitable w/ popcnt disabled
19863 // v8i32 => Always profitable
19865 // FIXME: There a couple of possible improvements:
19867 // 1) Support for i8 and i16 vectors (needs measurements if popcnt enabled).
19868 // 2) Use strategies from http://wm.ite.pl/articles/sse-popcount.html
19870 assert(EltVT.isInteger() && (Len == 32 || Len == 64) && Len % 8 == 0 &&
19871 "CTPOP not implemented for this vector element type.");
19873 // X86 canonicalize ANDs to vXi64, generate the appropriate bitcasts to avoid
19874 // extra legalization.
19875 bool NeedsBitcast = EltVT == MVT::i32;
19876 MVT BitcastVT = VT.is256BitVector() ? MVT::v4i64 : MVT::v2i64;
19878 SDValue Cst55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), EltVT);
19879 SDValue Cst33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), EltVT);
19880 SDValue Cst0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), EltVT);
19882 // v = v - ((v >> 1) & 0x55555555...)
19883 SmallVector<SDValue, 8> Ones(NumElts, DAG.getConstant(1, EltVT));
19884 SDValue OnesV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ones);
19885 SDValue Srl = DAG.getNode(ISD::SRL, dl, VT, Op, OnesV);
19887 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19889 SmallVector<SDValue, 8> Mask55(NumElts, Cst55);
19890 SDValue M55 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask55);
19892 M55 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M55);
19894 SDValue And = DAG.getNode(ISD::AND, dl, Srl.getValueType(), Srl, M55);
19895 if (VT != And.getValueType())
19896 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19897 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Op, And);
19899 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
19900 SmallVector<SDValue, 8> Mask33(NumElts, Cst33);
19901 SDValue M33 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask33);
19902 SmallVector<SDValue, 8> Twos(NumElts, DAG.getConstant(2, EltVT));
19903 SDValue TwosV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Twos);
19905 Srl = DAG.getNode(ISD::SRL, dl, VT, Sub, TwosV);
19906 if (NeedsBitcast) {
19907 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19908 M33 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M33);
19909 Sub = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Sub);
19912 SDValue AndRHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Srl, M33);
19913 SDValue AndLHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Sub, M33);
19914 if (VT != AndRHS.getValueType()) {
19915 AndRHS = DAG.getNode(ISD::BITCAST, dl, VT, AndRHS);
19916 AndLHS = DAG.getNode(ISD::BITCAST, dl, VT, AndLHS);
19918 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, AndLHS, AndRHS);
19920 // v = (v + (v >> 4)) & 0x0F0F0F0F...
19921 SmallVector<SDValue, 8> Fours(NumElts, DAG.getConstant(4, EltVT));
19922 SDValue FoursV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Fours);
19923 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, FoursV);
19924 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19926 SmallVector<SDValue, 8> Mask0F(NumElts, Cst0F);
19927 SDValue M0F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask0F);
19928 if (NeedsBitcast) {
19929 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19930 M0F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M0F);
19932 And = DAG.getNode(ISD::AND, dl, M0F.getValueType(), Add, M0F);
19933 if (VT != And.getValueType())
19934 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19936 // The algorithm mentioned above uses:
19937 // v = (v * 0x01010101...) >> (Len - 8)
19939 // Change it to use vector adds + vector shifts which yield faster results on
19940 // Haswell than using vector integer multiplication.
19942 // For i32 elements:
19943 // v = v + (v >> 8)
19944 // v = v + (v >> 16)
19946 // For i64 elements:
19947 // v = v + (v >> 8)
19948 // v = v + (v >> 16)
19949 // v = v + (v >> 32)
19952 SmallVector<SDValue, 8> Csts;
19953 for (unsigned i = 8; i <= Len/2; i *= 2) {
19954 Csts.assign(NumElts, DAG.getConstant(i, EltVT));
19955 SDValue CstsV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Csts);
19956 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, CstsV);
19957 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19961 // The result is on the least significant 6-bits on i32 and 7-bits on i64.
19962 SDValue Cst3F = DAG.getConstant(APInt(Len, Len == 32 ? 0x3F : 0x7F), EltVT);
19963 SmallVector<SDValue, 8> Cst3FV(NumElts, Cst3F);
19964 SDValue M3F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Cst3FV);
19965 if (NeedsBitcast) {
19966 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19967 M3F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M3F);
19969 And = DAG.getNode(ISD::AND, dl, M3F.getValueType(), Add, M3F);
19970 if (VT != And.getValueType())
19971 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19976 static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
19977 SDNode *Node = Op.getNode();
19979 EVT T = Node->getValueType(0);
19980 SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
19981 DAG.getConstant(0, T), Node->getOperand(2));
19982 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
19983 cast<AtomicSDNode>(Node)->getMemoryVT(),
19984 Node->getOperand(0),
19985 Node->getOperand(1), negOp,
19986 cast<AtomicSDNode>(Node)->getMemOperand(),
19987 cast<AtomicSDNode>(Node)->getOrdering(),
19988 cast<AtomicSDNode>(Node)->getSynchScope());
19991 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
19992 SDNode *Node = Op.getNode();
19994 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
19996 // Convert seq_cst store -> xchg
19997 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
19998 // FIXME: On 32-bit, store -> fist or movq would be more efficient
19999 // (The only way to get a 16-byte store is cmpxchg16b)
20000 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
20001 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
20002 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
20003 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
20004 cast<AtomicSDNode>(Node)->getMemoryVT(),
20005 Node->getOperand(0),
20006 Node->getOperand(1), Node->getOperand(2),
20007 cast<AtomicSDNode>(Node)->getMemOperand(),
20008 cast<AtomicSDNode>(Node)->getOrdering(),
20009 cast<AtomicSDNode>(Node)->getSynchScope());
20010 return Swap.getValue(1);
20012 // Other atomic stores have a simple pattern.
20016 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
20017 EVT VT = Op.getNode()->getSimpleValueType(0);
20019 // Let legalize expand this if it isn't a legal type yet.
20020 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
20023 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
20026 bool ExtraOp = false;
20027 switch (Op.getOpcode()) {
20028 default: llvm_unreachable("Invalid code");
20029 case ISD::ADDC: Opc = X86ISD::ADD; break;
20030 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break;
20031 case ISD::SUBC: Opc = X86ISD::SUB; break;
20032 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break;
20036 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
20038 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
20039 Op.getOperand(1), Op.getOperand(2));
20042 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
20043 SelectionDAG &DAG) {
20044 assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit());
20046 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
20047 // which returns the values as { float, float } (in XMM0) or
20048 // { double, double } (which is returned in XMM0, XMM1).
20050 SDValue Arg = Op.getOperand(0);
20051 EVT ArgVT = Arg.getValueType();
20052 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
20054 TargetLowering::ArgListTy Args;
20055 TargetLowering::ArgListEntry Entry;
20059 Entry.isSExt = false;
20060 Entry.isZExt = false;
20061 Args.push_back(Entry);
20063 bool isF64 = ArgVT == MVT::f64;
20064 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
20065 // the small struct {f32, f32} is returned in (eax, edx). For f64,
20066 // the results are returned via SRet in memory.
20067 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret";
20068 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20069 SDValue Callee = DAG.getExternalSymbol(LibcallName, TLI.getPointerTy());
20071 Type *RetTy = isF64
20072 ? (Type*)StructType::get(ArgTy, ArgTy, nullptr)
20073 : (Type*)VectorType::get(ArgTy, 4);
20075 TargetLowering::CallLoweringInfo CLI(DAG);
20076 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
20077 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
20079 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
20082 // Returned in xmm0 and xmm1.
20083 return CallResult.first;
20085 // Returned in bits 0:31 and 32:64 xmm0.
20086 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
20087 CallResult.first, DAG.getIntPtrConstant(0));
20088 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
20089 CallResult.first, DAG.getIntPtrConstant(1));
20090 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
20091 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
20094 /// LowerOperation - Provide custom lowering hooks for some operations.
20096 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
20097 switch (Op.getOpcode()) {
20098 default: llvm_unreachable("Should not custom lower this!");
20099 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG);
20100 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
20101 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
20102 return LowerCMP_SWAP(Op, Subtarget, DAG);
20103 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
20104 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
20105 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
20106 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
20107 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
20108 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
20109 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
20110 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
20111 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
20112 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
20113 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
20114 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
20115 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
20116 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
20117 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
20118 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
20119 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
20120 case ISD::SHL_PARTS:
20121 case ISD::SRA_PARTS:
20122 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
20123 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
20124 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
20125 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
20126 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
20127 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
20128 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
20129 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
20130 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
20131 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
20132 case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG);
20134 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
20135 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
20136 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
20137 case ISD::SETCC: return LowerSETCC(Op, DAG);
20138 case ISD::SELECT: return LowerSELECT(Op, DAG);
20139 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
20140 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
20141 case ISD::VASTART: return LowerVASTART(Op, DAG);
20142 case ISD::VAARG: return LowerVAARG(Op, DAG);
20143 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
20144 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, Subtarget, DAG);
20145 case ISD::INTRINSIC_VOID:
20146 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
20147 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
20148 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
20149 case ISD::FRAME_TO_ARGS_OFFSET:
20150 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
20151 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
20152 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
20153 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
20154 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
20155 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
20156 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
20157 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
20158 case ISD::CTLZ: return LowerCTLZ(Op, DAG);
20159 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG);
20160 case ISD::CTTZ: return LowerCTTZ(Op, DAG);
20161 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
20162 case ISD::UMUL_LOHI:
20163 case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
20166 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
20172 case ISD::UMULO: return LowerXALUO(Op, DAG);
20173 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
20174 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
20178 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
20179 case ISD::ADD: return LowerADD(Op, DAG);
20180 case ISD::SUB: return LowerSUB(Op, DAG);
20181 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
20185 /// ReplaceNodeResults - Replace a node with an illegal result type
20186 /// with a new node built out of custom code.
20187 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
20188 SmallVectorImpl<SDValue>&Results,
20189 SelectionDAG &DAG) const {
20191 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20192 switch (N->getOpcode()) {
20194 llvm_unreachable("Do not know how to custom type legalize this operation!");
20195 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
20196 case X86ISD::FMINC:
20198 case X86ISD::FMAXC:
20199 case X86ISD::FMAX: {
20200 EVT VT = N->getValueType(0);
20201 if (VT != MVT::v2f32)
20202 llvm_unreachable("Unexpected type (!= v2f32) on FMIN/FMAX.");
20203 SDValue UNDEF = DAG.getUNDEF(VT);
20204 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
20205 N->getOperand(0), UNDEF);
20206 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
20207 N->getOperand(1), UNDEF);
20208 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
20211 case ISD::SIGN_EXTEND_INREG:
20216 // We don't want to expand or promote these.
20223 case ISD::UDIVREM: {
20224 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
20225 Results.push_back(V);
20228 case ISD::FP_TO_SINT:
20229 case ISD::FP_TO_UINT: {
20230 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
20232 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType()))
20235 std::pair<SDValue,SDValue> Vals =
20236 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
20237 SDValue FIST = Vals.first, StackSlot = Vals.second;
20238 if (FIST.getNode()) {
20239 EVT VT = N->getValueType(0);
20240 // Return a load from the stack slot.
20241 if (StackSlot.getNode())
20242 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
20243 MachinePointerInfo(),
20244 false, false, false, 0));
20246 Results.push_back(FIST);
20250 case ISD::UINT_TO_FP: {
20251 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20252 if (N->getOperand(0).getValueType() != MVT::v2i32 ||
20253 N->getValueType(0) != MVT::v2f32)
20255 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
20257 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
20259 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias);
20260 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
20261 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias));
20262 Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or);
20263 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
20264 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
20267 case ISD::FP_ROUND: {
20268 if (!TLI.isTypeLegal(N->getOperand(0).getValueType()))
20270 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
20271 Results.push_back(V);
20274 case ISD::INTRINSIC_W_CHAIN: {
20275 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
20277 default : llvm_unreachable("Do not know how to custom type "
20278 "legalize this intrinsic operation!");
20279 case Intrinsic::x86_rdtsc:
20280 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20282 case Intrinsic::x86_rdtscp:
20283 return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
20285 case Intrinsic::x86_rdpmc:
20286 return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
20289 case ISD::READCYCLECOUNTER: {
20290 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20293 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
20294 EVT T = N->getValueType(0);
20295 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
20296 bool Regs64bit = T == MVT::i128;
20297 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
20298 SDValue cpInL, cpInH;
20299 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20300 DAG.getConstant(0, HalfT));
20301 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20302 DAG.getConstant(1, HalfT));
20303 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
20304 Regs64bit ? X86::RAX : X86::EAX,
20306 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
20307 Regs64bit ? X86::RDX : X86::EDX,
20308 cpInH, cpInL.getValue(1));
20309 SDValue swapInL, swapInH;
20310 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20311 DAG.getConstant(0, HalfT));
20312 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20313 DAG.getConstant(1, HalfT));
20314 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl,
20315 Regs64bit ? X86::RBX : X86::EBX,
20316 swapInL, cpInH.getValue(1));
20317 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl,
20318 Regs64bit ? X86::RCX : X86::ECX,
20319 swapInH, swapInL.getValue(1));
20320 SDValue Ops[] = { swapInH.getValue(0),
20322 swapInH.getValue(1) };
20323 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
20324 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
20325 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG :
20326 X86ISD::LCMPXCHG8_DAG;
20327 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
20328 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
20329 Regs64bit ? X86::RAX : X86::EAX,
20330 HalfT, Result.getValue(1));
20331 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
20332 Regs64bit ? X86::RDX : X86::EDX,
20333 HalfT, cpOutL.getValue(2));
20334 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
20336 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
20337 MVT::i32, cpOutH.getValue(2));
20339 DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
20340 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
20341 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
20343 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
20344 Results.push_back(Success);
20345 Results.push_back(EFLAGS.getValue(1));
20348 case ISD::ATOMIC_SWAP:
20349 case ISD::ATOMIC_LOAD_ADD:
20350 case ISD::ATOMIC_LOAD_SUB:
20351 case ISD::ATOMIC_LOAD_AND:
20352 case ISD::ATOMIC_LOAD_OR:
20353 case ISD::ATOMIC_LOAD_XOR:
20354 case ISD::ATOMIC_LOAD_NAND:
20355 case ISD::ATOMIC_LOAD_MIN:
20356 case ISD::ATOMIC_LOAD_MAX:
20357 case ISD::ATOMIC_LOAD_UMIN:
20358 case ISD::ATOMIC_LOAD_UMAX:
20359 case ISD::ATOMIC_LOAD: {
20360 // Delegate to generic TypeLegalization. Situations we can really handle
20361 // should have already been dealt with by AtomicExpandPass.cpp.
20364 case ISD::BITCAST: {
20365 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20366 EVT DstVT = N->getValueType(0);
20367 EVT SrcVT = N->getOperand(0)->getValueType(0);
20369 if (SrcVT != MVT::f64 ||
20370 (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
20373 unsigned NumElts = DstVT.getVectorNumElements();
20374 EVT SVT = DstVT.getVectorElementType();
20375 EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
20376 SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
20377 MVT::v2f64, N->getOperand(0));
20378 SDValue ToVecInt = DAG.getNode(ISD::BITCAST, dl, WiderVT, Expanded);
20380 if (ExperimentalVectorWideningLegalization) {
20381 // If we are legalizing vectors by widening, we already have the desired
20382 // legal vector type, just return it.
20383 Results.push_back(ToVecInt);
20387 SmallVector<SDValue, 8> Elts;
20388 for (unsigned i = 0, e = NumElts; i != e; ++i)
20389 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
20390 ToVecInt, DAG.getIntPtrConstant(i)));
20392 Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
20397 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
20399 default: return nullptr;
20400 case X86ISD::BSF: return "X86ISD::BSF";
20401 case X86ISD::BSR: return "X86ISD::BSR";
20402 case X86ISD::SHLD: return "X86ISD::SHLD";
20403 case X86ISD::SHRD: return "X86ISD::SHRD";
20404 case X86ISD::FAND: return "X86ISD::FAND";
20405 case X86ISD::FANDN: return "X86ISD::FANDN";
20406 case X86ISD::FOR: return "X86ISD::FOR";
20407 case X86ISD::FXOR: return "X86ISD::FXOR";
20408 case X86ISD::FSRL: return "X86ISD::FSRL";
20409 case X86ISD::FILD: return "X86ISD::FILD";
20410 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
20411 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
20412 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
20413 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
20414 case X86ISD::FLD: return "X86ISD::FLD";
20415 case X86ISD::FST: return "X86ISD::FST";
20416 case X86ISD::CALL: return "X86ISD::CALL";
20417 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
20418 case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
20419 case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
20420 case X86ISD::BT: return "X86ISD::BT";
20421 case X86ISD::CMP: return "X86ISD::CMP";
20422 case X86ISD::COMI: return "X86ISD::COMI";
20423 case X86ISD::UCOMI: return "X86ISD::UCOMI";
20424 case X86ISD::CMPM: return "X86ISD::CMPM";
20425 case X86ISD::CMPMU: return "X86ISD::CMPMU";
20426 case X86ISD::SETCC: return "X86ISD::SETCC";
20427 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
20428 case X86ISD::FSETCC: return "X86ISD::FSETCC";
20429 case X86ISD::CMOV: return "X86ISD::CMOV";
20430 case X86ISD::BRCOND: return "X86ISD::BRCOND";
20431 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
20432 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
20433 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
20434 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
20435 case X86ISD::Wrapper: return "X86ISD::Wrapper";
20436 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
20437 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
20438 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
20439 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
20440 case X86ISD::PINSRB: return "X86ISD::PINSRB";
20441 case X86ISD::PINSRW: return "X86ISD::PINSRW";
20442 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
20443 case X86ISD::ANDNP: return "X86ISD::ANDNP";
20444 case X86ISD::PSIGN: return "X86ISD::PSIGN";
20445 case X86ISD::BLENDI: return "X86ISD::BLENDI";
20446 case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
20447 case X86ISD::SUBUS: return "X86ISD::SUBUS";
20448 case X86ISD::HADD: return "X86ISD::HADD";
20449 case X86ISD::HSUB: return "X86ISD::HSUB";
20450 case X86ISD::FHADD: return "X86ISD::FHADD";
20451 case X86ISD::FHSUB: return "X86ISD::FHSUB";
20452 case X86ISD::UMAX: return "X86ISD::UMAX";
20453 case X86ISD::UMIN: return "X86ISD::UMIN";
20454 case X86ISD::SMAX: return "X86ISD::SMAX";
20455 case X86ISD::SMIN: return "X86ISD::SMIN";
20456 case X86ISD::FMAX: return "X86ISD::FMAX";
20457 case X86ISD::FMIN: return "X86ISD::FMIN";
20458 case X86ISD::FMAXC: return "X86ISD::FMAXC";
20459 case X86ISD::FMINC: return "X86ISD::FMINC";
20460 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
20461 case X86ISD::FRCP: return "X86ISD::FRCP";
20462 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
20463 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
20464 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
20465 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
20466 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
20467 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
20468 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
20469 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
20470 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
20471 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
20472 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
20473 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
20474 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
20475 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
20476 case X86ISD::VZEXT: return "X86ISD::VZEXT";
20477 case X86ISD::VSEXT: return "X86ISD::VSEXT";
20478 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
20479 case X86ISD::VTRUNCM: return "X86ISD::VTRUNCM";
20480 case X86ISD::VINSERT: return "X86ISD::VINSERT";
20481 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
20482 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
20483 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
20484 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
20485 case X86ISD::VSHL: return "X86ISD::VSHL";
20486 case X86ISD::VSRL: return "X86ISD::VSRL";
20487 case X86ISD::VSRA: return "X86ISD::VSRA";
20488 case X86ISD::VSHLI: return "X86ISD::VSHLI";
20489 case X86ISD::VSRLI: return "X86ISD::VSRLI";
20490 case X86ISD::VSRAI: return "X86ISD::VSRAI";
20491 case X86ISD::CMPP: return "X86ISD::CMPP";
20492 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
20493 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
20494 case X86ISD::PCMPEQM: return "X86ISD::PCMPEQM";
20495 case X86ISD::PCMPGTM: return "X86ISD::PCMPGTM";
20496 case X86ISD::ADD: return "X86ISD::ADD";
20497 case X86ISD::SUB: return "X86ISD::SUB";
20498 case X86ISD::ADC: return "X86ISD::ADC";
20499 case X86ISD::SBB: return "X86ISD::SBB";
20500 case X86ISD::SMUL: return "X86ISD::SMUL";
20501 case X86ISD::UMUL: return "X86ISD::UMUL";
20502 case X86ISD::SMUL8: return "X86ISD::SMUL8";
20503 case X86ISD::UMUL8: return "X86ISD::UMUL8";
20504 case X86ISD::SDIVREM8_SEXT_HREG: return "X86ISD::SDIVREM8_SEXT_HREG";
20505 case X86ISD::UDIVREM8_ZEXT_HREG: return "X86ISD::UDIVREM8_ZEXT_HREG";
20506 case X86ISD::INC: return "X86ISD::INC";
20507 case X86ISD::DEC: return "X86ISD::DEC";
20508 case X86ISD::OR: return "X86ISD::OR";
20509 case X86ISD::XOR: return "X86ISD::XOR";
20510 case X86ISD::AND: return "X86ISD::AND";
20511 case X86ISD::BEXTR: return "X86ISD::BEXTR";
20512 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
20513 case X86ISD::PTEST: return "X86ISD::PTEST";
20514 case X86ISD::TESTP: return "X86ISD::TESTP";
20515 case X86ISD::TESTM: return "X86ISD::TESTM";
20516 case X86ISD::TESTNM: return "X86ISD::TESTNM";
20517 case X86ISD::KORTEST: return "X86ISD::KORTEST";
20518 case X86ISD::PACKSS: return "X86ISD::PACKSS";
20519 case X86ISD::PACKUS: return "X86ISD::PACKUS";
20520 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
20521 case X86ISD::VALIGN: return "X86ISD::VALIGN";
20522 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
20523 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
20524 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
20525 case X86ISD::SHUFP: return "X86ISD::SHUFP";
20526 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
20527 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
20528 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
20529 case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
20530 case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
20531 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
20532 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
20533 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
20534 case X86ISD::MOVSD: return "X86ISD::MOVSD";
20535 case X86ISD::MOVSS: return "X86ISD::MOVSS";
20536 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
20537 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
20538 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
20539 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
20540 case X86ISD::VEXTRACT: return "X86ISD::VEXTRACT";
20541 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
20542 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
20543 case X86ISD::VPERMV: return "X86ISD::VPERMV";
20544 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
20545 case X86ISD::VPERMIV3: return "X86ISD::VPERMIV3";
20546 case X86ISD::VPERMI: return "X86ISD::VPERMI";
20547 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
20548 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
20549 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
20550 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
20551 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
20552 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
20553 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
20554 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL";
20555 case X86ISD::SAHF: return "X86ISD::SAHF";
20556 case X86ISD::RDRAND: return "X86ISD::RDRAND";
20557 case X86ISD::RDSEED: return "X86ISD::RDSEED";
20558 case X86ISD::FMADD: return "X86ISD::FMADD";
20559 case X86ISD::FMSUB: return "X86ISD::FMSUB";
20560 case X86ISD::FNMADD: return "X86ISD::FNMADD";
20561 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
20562 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
20563 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
20564 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI";
20565 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI";
20566 case X86ISD::XTEST: return "X86ISD::XTEST";
20567 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
20568 case X86ISD::EXPAND: return "X86ISD::EXPAND";
20569 case X86ISD::SELECT: return "X86ISD::SELECT";
20570 case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
20571 case X86ISD::RCP28: return "X86ISD::RCP28";
20572 case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
20576 // isLegalAddressingMode - Return true if the addressing mode represented
20577 // by AM is legal for this target, for a load/store of the specified type.
20578 bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
20580 // X86 supports extremely general addressing modes.
20581 CodeModel::Model M = getTargetMachine().getCodeModel();
20582 Reloc::Model R = getTargetMachine().getRelocationModel();
20584 // X86 allows a sign-extended 32-bit immediate field as a displacement.
20585 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
20590 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
20592 // If a reference to this global requires an extra load, we can't fold it.
20593 if (isGlobalStubReference(GVFlags))
20596 // If BaseGV requires a register for the PIC base, we cannot also have a
20597 // BaseReg specified.
20598 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
20601 // If lower 4G is not available, then we must use rip-relative addressing.
20602 if ((M != CodeModel::Small || R != Reloc::Static) &&
20603 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
20607 switch (AM.Scale) {
20613 // These scales always work.
20618 // These scales are formed with basereg+scalereg. Only accept if there is
20623 default: // Other stuff never works.
20630 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
20631 unsigned Bits = Ty->getScalarSizeInBits();
20633 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
20634 // particularly cheaper than those without.
20638 // On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
20639 // variable shifts just as cheap as scalar ones.
20640 if (Subtarget->hasInt256() && (Bits == 32 || Bits == 64))
20643 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
20644 // fully general vector.
20648 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
20649 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20651 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
20652 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
20653 return NumBits1 > NumBits2;
20656 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
20657 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20660 if (!isTypeLegal(EVT::getEVT(Ty1)))
20663 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
20665 // Assuming the caller doesn't have a zeroext or signext return parameter,
20666 // truncation all the way down to i1 is valid.
20670 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
20671 return isInt<32>(Imm);
20674 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
20675 // Can also use sub to handle negated immediates.
20676 return isInt<32>(Imm);
20679 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
20680 if (!VT1.isInteger() || !VT2.isInteger())
20682 unsigned NumBits1 = VT1.getSizeInBits();
20683 unsigned NumBits2 = VT2.getSizeInBits();
20684 return NumBits1 > NumBits2;
20687 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
20688 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20689 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit();
20692 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
20693 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20694 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
20697 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
20698 EVT VT1 = Val.getValueType();
20699 if (isZExtFree(VT1, VT2))
20702 if (Val.getOpcode() != ISD::LOAD)
20705 if (!VT1.isSimple() || !VT1.isInteger() ||
20706 !VT2.isSimple() || !VT2.isInteger())
20709 switch (VT1.getSimpleVT().SimpleTy) {
20714 // X86 has 8, 16, and 32-bit zero-extending loads.
20721 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue) const { return true; }
20724 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
20725 if (!(Subtarget->hasFMA() || Subtarget->hasFMA4()))
20728 VT = VT.getScalarType();
20730 if (!VT.isSimple())
20733 switch (VT.getSimpleVT().SimpleTy) {
20744 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
20745 // i16 instructions are longer (0x66 prefix) and potentially slower.
20746 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
20749 /// isShuffleMaskLegal - Targets can use this to indicate that they only
20750 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
20751 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
20752 /// are assumed to be legal.
20754 X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
20756 if (!VT.isSimple())
20759 MVT SVT = VT.getSimpleVT();
20761 // Very little shuffling can be done for 64-bit vectors right now.
20762 if (VT.getSizeInBits() == 64)
20765 // This is an experimental legality test that is tailored to match the
20766 // legality test of the experimental lowering more closely. They are gated
20767 // separately to ease testing of performance differences.
20768 if (ExperimentalVectorShuffleLegality)
20769 // We only care that the types being shuffled are legal. The lowering can
20770 // handle any possible shuffle mask that results.
20771 return isTypeLegal(SVT);
20773 // If this is a single-input shuffle with no 128 bit lane crossings we can
20774 // lower it into pshufb.
20775 if ((SVT.is128BitVector() && Subtarget->hasSSSE3()) ||
20776 (SVT.is256BitVector() && Subtarget->hasInt256())) {
20777 bool isLegal = true;
20778 for (unsigned I = 0, E = M.size(); I != E; ++I) {
20779 if (M[I] >= (int)SVT.getVectorNumElements() ||
20780 ShuffleCrosses128bitLane(SVT, I, M[I])) {
20789 // FIXME: blends, shifts.
20790 return (SVT.getVectorNumElements() == 2 ||
20791 ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
20792 isMOVLMask(M, SVT) ||
20793 isCommutedMOVLMask(M, SVT) ||
20794 isMOVHLPSMask(M, SVT) ||
20795 isSHUFPMask(M, SVT) ||
20796 isSHUFPMask(M, SVT, /* Commuted */ true) ||
20797 isPSHUFDMask(M, SVT) ||
20798 isPSHUFDMask(M, SVT, /* SecondOperand */ true) ||
20799 isPSHUFHWMask(M, SVT, Subtarget->hasInt256()) ||
20800 isPSHUFLWMask(M, SVT, Subtarget->hasInt256()) ||
20801 isPALIGNRMask(M, SVT, Subtarget) ||
20802 isUNPCKLMask(M, SVT, Subtarget->hasInt256()) ||
20803 isUNPCKHMask(M, SVT, Subtarget->hasInt256()) ||
20804 isUNPCKL_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20805 isUNPCKH_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20806 isBlendMask(M, SVT, Subtarget->hasSSE41(), Subtarget->hasInt256()) ||
20807 (Subtarget->hasSSE41() && isINSERTPSMask(M, SVT)));
20811 X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
20813 if (!VT.isSimple())
20816 MVT SVT = VT.getSimpleVT();
20818 // This is an experimental legality test that is tailored to match the
20819 // legality test of the experimental lowering more closely. They are gated
20820 // separately to ease testing of performance differences.
20821 if (ExperimentalVectorShuffleLegality)
20822 // The new vector shuffle lowering is very good at managing zero-inputs.
20823 return isShuffleMaskLegal(Mask, VT);
20825 unsigned NumElts = SVT.getVectorNumElements();
20826 // FIXME: This collection of masks seems suspect.
20829 if (NumElts == 4 && SVT.is128BitVector()) {
20830 return (isMOVLMask(Mask, SVT) ||
20831 isCommutedMOVLMask(Mask, SVT, true) ||
20832 isSHUFPMask(Mask, SVT) ||
20833 isSHUFPMask(Mask, SVT, /* Commuted */ true) ||
20834 isBlendMask(Mask, SVT, Subtarget->hasSSE41(),
20835 Subtarget->hasInt256()));
20840 //===----------------------------------------------------------------------===//
20841 // X86 Scheduler Hooks
20842 //===----------------------------------------------------------------------===//
20844 /// Utility function to emit xbegin specifying the start of an RTM region.
20845 static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
20846 const TargetInstrInfo *TII) {
20847 DebugLoc DL = MI->getDebugLoc();
20849 const BasicBlock *BB = MBB->getBasicBlock();
20850 MachineFunction::iterator I = MBB;
20853 // For the v = xbegin(), we generate
20864 MachineBasicBlock *thisMBB = MBB;
20865 MachineFunction *MF = MBB->getParent();
20866 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
20867 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
20868 MF->insert(I, mainMBB);
20869 MF->insert(I, sinkMBB);
20871 // Transfer the remainder of BB and its successor edges to sinkMBB.
20872 sinkMBB->splice(sinkMBB->begin(), MBB,
20873 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
20874 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
20878 // # fallthrough to mainMBB
20879 // # abortion to sinkMBB
20880 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB);
20881 thisMBB->addSuccessor(mainMBB);
20882 thisMBB->addSuccessor(sinkMBB);
20886 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1);
20887 mainMBB->addSuccessor(sinkMBB);
20890 // EAX is live into the sinkMBB
20891 sinkMBB->addLiveIn(X86::EAX);
20892 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
20893 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20896 MI->eraseFromParent();
20900 // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
20901 // or XMM0_V32I8 in AVX all of this code can be replaced with that
20902 // in the .td file.
20903 static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
20904 const TargetInstrInfo *TII) {
20906 switch (MI->getOpcode()) {
20907 default: llvm_unreachable("illegal opcode!");
20908 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
20909 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
20910 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break;
20911 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break;
20912 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break;
20913 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break;
20914 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break;
20915 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
20918 DebugLoc dl = MI->getDebugLoc();
20919 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20921 unsigned NumArgs = MI->getNumOperands();
20922 for (unsigned i = 1; i < NumArgs; ++i) {
20923 MachineOperand &Op = MI->getOperand(i);
20924 if (!(Op.isReg() && Op.isImplicit()))
20925 MIB.addOperand(Op);
20927 if (MI->hasOneMemOperand())
20928 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20930 BuildMI(*BB, MI, dl,
20931 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20932 .addReg(X86::XMM0);
20934 MI->eraseFromParent();
20938 // FIXME: Custom handling because TableGen doesn't support multiple implicit
20939 // defs in an instruction pattern
20940 static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
20941 const TargetInstrInfo *TII) {
20943 switch (MI->getOpcode()) {
20944 default: llvm_unreachable("illegal opcode!");
20945 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
20946 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
20947 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break;
20948 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break;
20949 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break;
20950 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break;
20951 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break;
20952 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
20955 DebugLoc dl = MI->getDebugLoc();
20956 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20958 unsigned NumArgs = MI->getNumOperands(); // remove the results
20959 for (unsigned i = 1; i < NumArgs; ++i) {
20960 MachineOperand &Op = MI->getOperand(i);
20961 if (!(Op.isReg() && Op.isImplicit()))
20962 MIB.addOperand(Op);
20964 if (MI->hasOneMemOperand())
20965 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20967 BuildMI(*BB, MI, dl,
20968 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20971 MI->eraseFromParent();
20975 static MachineBasicBlock *EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
20976 const X86Subtarget *Subtarget) {
20977 DebugLoc dl = MI->getDebugLoc();
20978 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20979 // Address into RAX/EAX, other two args into ECX, EDX.
20980 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
20981 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
20982 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
20983 for (int i = 0; i < X86::AddrNumOperands; ++i)
20984 MIB.addOperand(MI->getOperand(i));
20986 unsigned ValOps = X86::AddrNumOperands;
20987 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
20988 .addReg(MI->getOperand(ValOps).getReg());
20989 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
20990 .addReg(MI->getOperand(ValOps+1).getReg());
20992 // The instruction doesn't actually take any operands though.
20993 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
20995 MI->eraseFromParent(); // The pseudo is gone now.
20999 MachineBasicBlock *
21000 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
21001 MachineBasicBlock *MBB) const {
21002 // Emit va_arg instruction on X86-64.
21004 // Operands to this pseudo-instruction:
21005 // 0 ) Output : destination address (reg)
21006 // 1-5) Input : va_list address (addr, i64mem)
21007 // 6 ) ArgSize : Size (in bytes) of vararg type
21008 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
21009 // 8 ) Align : Alignment of type
21010 // 9 ) EFLAGS (implicit-def)
21012 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
21013 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands");
21015 unsigned DestReg = MI->getOperand(0).getReg();
21016 MachineOperand &Base = MI->getOperand(1);
21017 MachineOperand &Scale = MI->getOperand(2);
21018 MachineOperand &Index = MI->getOperand(3);
21019 MachineOperand &Disp = MI->getOperand(4);
21020 MachineOperand &Segment = MI->getOperand(5);
21021 unsigned ArgSize = MI->getOperand(6).getImm();
21022 unsigned ArgMode = MI->getOperand(7).getImm();
21023 unsigned Align = MI->getOperand(8).getImm();
21025 // Memory Reference
21026 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
21027 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21028 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21030 // Machine Information
21031 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21032 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
21033 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
21034 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
21035 DebugLoc DL = MI->getDebugLoc();
21037 // struct va_list {
21040 // i64 overflow_area (address)
21041 // i64 reg_save_area (address)
21043 // sizeof(va_list) = 24
21044 // alignment(va_list) = 8
21046 unsigned TotalNumIntRegs = 6;
21047 unsigned TotalNumXMMRegs = 8;
21048 bool UseGPOffset = (ArgMode == 1);
21049 bool UseFPOffset = (ArgMode == 2);
21050 unsigned MaxOffset = TotalNumIntRegs * 8 +
21051 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
21053 /* Align ArgSize to a multiple of 8 */
21054 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
21055 bool NeedsAlign = (Align > 8);
21057 MachineBasicBlock *thisMBB = MBB;
21058 MachineBasicBlock *overflowMBB;
21059 MachineBasicBlock *offsetMBB;
21060 MachineBasicBlock *endMBB;
21062 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
21063 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
21064 unsigned OffsetReg = 0;
21066 if (!UseGPOffset && !UseFPOffset) {
21067 // If we only pull from the overflow region, we don't create a branch.
21068 // We don't need to alter control flow.
21069 OffsetDestReg = 0; // unused
21070 OverflowDestReg = DestReg;
21072 offsetMBB = nullptr;
21073 overflowMBB = thisMBB;
21076 // First emit code to check if gp_offset (or fp_offset) is below the bound.
21077 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
21078 // If not, pull from overflow_area. (branch to overflowMBB)
21083 // offsetMBB overflowMBB
21088 // Registers for the PHI in endMBB
21089 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
21090 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
21092 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
21093 MachineFunction *MF = MBB->getParent();
21094 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21095 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21096 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21098 MachineFunction::iterator MBBIter = MBB;
21101 // Insert the new basic blocks
21102 MF->insert(MBBIter, offsetMBB);
21103 MF->insert(MBBIter, overflowMBB);
21104 MF->insert(MBBIter, endMBB);
21106 // Transfer the remainder of MBB and its successor edges to endMBB.
21107 endMBB->splice(endMBB->begin(), thisMBB,
21108 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
21109 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
21111 // Make offsetMBB and overflowMBB successors of thisMBB
21112 thisMBB->addSuccessor(offsetMBB);
21113 thisMBB->addSuccessor(overflowMBB);
21115 // endMBB is a successor of both offsetMBB and overflowMBB
21116 offsetMBB->addSuccessor(endMBB);
21117 overflowMBB->addSuccessor(endMBB);
21119 // Load the offset value into a register
21120 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
21121 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
21125 .addDisp(Disp, UseFPOffset ? 4 : 0)
21126 .addOperand(Segment)
21127 .setMemRefs(MMOBegin, MMOEnd);
21129 // Check if there is enough room left to pull this argument.
21130 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
21132 .addImm(MaxOffset + 8 - ArgSizeA8);
21134 // Branch to "overflowMBB" if offset >= max
21135 // Fall through to "offsetMBB" otherwise
21136 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
21137 .addMBB(overflowMBB);
21140 // In offsetMBB, emit code to use the reg_save_area.
21142 assert(OffsetReg != 0);
21144 // Read the reg_save_area address.
21145 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
21146 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
21151 .addOperand(Segment)
21152 .setMemRefs(MMOBegin, MMOEnd);
21154 // Zero-extend the offset
21155 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
21156 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
21159 .addImm(X86::sub_32bit);
21161 // Add the offset to the reg_save_area to get the final address.
21162 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
21163 .addReg(OffsetReg64)
21164 .addReg(RegSaveReg);
21166 // Compute the offset for the next argument
21167 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
21168 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
21170 .addImm(UseFPOffset ? 16 : 8);
21172 // Store it back into the va_list.
21173 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
21177 .addDisp(Disp, UseFPOffset ? 4 : 0)
21178 .addOperand(Segment)
21179 .addReg(NextOffsetReg)
21180 .setMemRefs(MMOBegin, MMOEnd);
21183 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
21188 // Emit code to use overflow area
21191 // Load the overflow_area address into a register.
21192 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
21193 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
21198 .addOperand(Segment)
21199 .setMemRefs(MMOBegin, MMOEnd);
21201 // If we need to align it, do so. Otherwise, just copy the address
21202 // to OverflowDestReg.
21204 // Align the overflow address
21205 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2");
21206 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
21208 // aligned_addr = (addr + (align-1)) & ~(align-1)
21209 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
21210 .addReg(OverflowAddrReg)
21213 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
21215 .addImm(~(uint64_t)(Align-1));
21217 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
21218 .addReg(OverflowAddrReg);
21221 // Compute the next overflow address after this argument.
21222 // (the overflow address should be kept 8-byte aligned)
21223 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
21224 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
21225 .addReg(OverflowDestReg)
21226 .addImm(ArgSizeA8);
21228 // Store the new overflow address.
21229 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
21234 .addOperand(Segment)
21235 .addReg(NextAddrReg)
21236 .setMemRefs(MMOBegin, MMOEnd);
21238 // If we branched, emit the PHI to the front of endMBB.
21240 BuildMI(*endMBB, endMBB->begin(), DL,
21241 TII->get(X86::PHI), DestReg)
21242 .addReg(OffsetDestReg).addMBB(offsetMBB)
21243 .addReg(OverflowDestReg).addMBB(overflowMBB);
21246 // Erase the pseudo instruction
21247 MI->eraseFromParent();
21252 MachineBasicBlock *
21253 X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
21255 MachineBasicBlock *MBB) const {
21256 // Emit code to save XMM registers to the stack. The ABI says that the
21257 // number of registers to save is given in %al, so it's theoretically
21258 // possible to do an indirect jump trick to avoid saving all of them,
21259 // however this code takes a simpler approach and just executes all
21260 // of the stores if %al is non-zero. It's less code, and it's probably
21261 // easier on the hardware branch predictor, and stores aren't all that
21262 // expensive anyway.
21264 // Create the new basic blocks. One block contains all the XMM stores,
21265 // and one block is the final destination regardless of whether any
21266 // stores were performed.
21267 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
21268 MachineFunction *F = MBB->getParent();
21269 MachineFunction::iterator MBBIter = MBB;
21271 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
21272 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
21273 F->insert(MBBIter, XMMSaveMBB);
21274 F->insert(MBBIter, EndMBB);
21276 // Transfer the remainder of MBB and its successor edges to EndMBB.
21277 EndMBB->splice(EndMBB->begin(), MBB,
21278 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21279 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
21281 // The original block will now fall through to the XMM save block.
21282 MBB->addSuccessor(XMMSaveMBB);
21283 // The XMMSaveMBB will fall through to the end block.
21284 XMMSaveMBB->addSuccessor(EndMBB);
21286 // Now add the instructions.
21287 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21288 DebugLoc DL = MI->getDebugLoc();
21290 unsigned CountReg = MI->getOperand(0).getReg();
21291 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm();
21292 int64_t VarArgsFPOffset = MI->getOperand(2).getImm();
21294 if (!Subtarget->isTargetWin64()) {
21295 // If %al is 0, branch around the XMM save block.
21296 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
21297 BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
21298 MBB->addSuccessor(EndMBB);
21301 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
21302 // that was just emitted, but clearly shouldn't be "saved".
21303 assert((MI->getNumOperands() <= 3 ||
21304 !MI->getOperand(MI->getNumOperands() - 1).isReg() ||
21305 MI->getOperand(MI->getNumOperands() - 1).getReg() == X86::EFLAGS)
21306 && "Expected last argument to be EFLAGS");
21307 unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
21308 // In the XMM save block, save all the XMM argument registers.
21309 for (int i = 3, e = MI->getNumOperands() - 1; i != e; ++i) {
21310 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
21311 MachineMemOperand *MMO =
21312 F->getMachineMemOperand(
21313 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset),
21314 MachineMemOperand::MOStore,
21315 /*Size=*/16, /*Align=*/16);
21316 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
21317 .addFrameIndex(RegSaveFrameIndex)
21318 .addImm(/*Scale=*/1)
21319 .addReg(/*IndexReg=*/0)
21320 .addImm(/*Disp=*/Offset)
21321 .addReg(/*Segment=*/0)
21322 .addReg(MI->getOperand(i).getReg())
21323 .addMemOperand(MMO);
21326 MI->eraseFromParent(); // The pseudo instruction is gone now.
21331 // The EFLAGS operand of SelectItr might be missing a kill marker
21332 // because there were multiple uses of EFLAGS, and ISel didn't know
21333 // which to mark. Figure out whether SelectItr should have had a
21334 // kill marker, and set it if it should. Returns the correct kill
21336 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
21337 MachineBasicBlock* BB,
21338 const TargetRegisterInfo* TRI) {
21339 // Scan forward through BB for a use/def of EFLAGS.
21340 MachineBasicBlock::iterator miI(std::next(SelectItr));
21341 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
21342 const MachineInstr& mi = *miI;
21343 if (mi.readsRegister(X86::EFLAGS))
21345 if (mi.definesRegister(X86::EFLAGS))
21346 break; // Should have kill-flag - update below.
21349 // If we hit the end of the block, check whether EFLAGS is live into a
21351 if (miI == BB->end()) {
21352 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
21353 sEnd = BB->succ_end();
21354 sItr != sEnd; ++sItr) {
21355 MachineBasicBlock* succ = *sItr;
21356 if (succ->isLiveIn(X86::EFLAGS))
21361 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
21362 // out. SelectMI should have a kill flag on EFLAGS.
21363 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
21367 MachineBasicBlock *
21368 X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
21369 MachineBasicBlock *BB) const {
21370 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21371 DebugLoc DL = MI->getDebugLoc();
21373 // To "insert" a SELECT_CC instruction, we actually have to insert the
21374 // diamond control-flow pattern. The incoming instruction knows the
21375 // destination vreg to set, the condition code register to branch on, the
21376 // true/false values to select between, and a branch opcode to use.
21377 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21378 MachineFunction::iterator It = BB;
21384 // cmpTY ccX, r1, r2
21386 // fallthrough --> copy0MBB
21387 MachineBasicBlock *thisMBB = BB;
21388 MachineFunction *F = BB->getParent();
21389 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
21390 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
21391 F->insert(It, copy0MBB);
21392 F->insert(It, sinkMBB);
21394 // If the EFLAGS register isn't dead in the terminator, then claim that it's
21395 // live into the sink and copy blocks.
21396 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
21397 if (!MI->killsRegister(X86::EFLAGS) &&
21398 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) {
21399 copy0MBB->addLiveIn(X86::EFLAGS);
21400 sinkMBB->addLiveIn(X86::EFLAGS);
21403 // Transfer the remainder of BB and its successor edges to sinkMBB.
21404 sinkMBB->splice(sinkMBB->begin(), BB,
21405 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21406 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
21408 // Add the true and fallthrough blocks as its successors.
21409 BB->addSuccessor(copy0MBB);
21410 BB->addSuccessor(sinkMBB);
21412 // Create the conditional branch instruction.
21414 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
21415 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
21418 // %FalseValue = ...
21419 // # fallthrough to sinkMBB
21420 copy0MBB->addSuccessor(sinkMBB);
21423 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
21425 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21426 TII->get(X86::PHI), MI->getOperand(0).getReg())
21427 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
21428 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
21430 MI->eraseFromParent(); // The pseudo instruction is gone now.
21434 MachineBasicBlock *
21435 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
21436 MachineBasicBlock *BB) const {
21437 MachineFunction *MF = BB->getParent();
21438 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21439 DebugLoc DL = MI->getDebugLoc();
21440 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21442 assert(MF->shouldSplitStack());
21444 const bool Is64Bit = Subtarget->is64Bit();
21445 const bool IsLP64 = Subtarget->isTarget64BitLP64();
21447 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
21448 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
21451 // ... [Till the alloca]
21452 // If stacklet is not large enough, jump to mallocMBB
21455 // Allocate by subtracting from RSP
21456 // Jump to continueMBB
21459 // Allocate by call to runtime
21463 // [rest of original BB]
21466 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21467 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21468 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21470 MachineRegisterInfo &MRI = MF->getRegInfo();
21471 const TargetRegisterClass *AddrRegClass =
21472 getRegClassFor(getPointerTy());
21474 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21475 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21476 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
21477 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
21478 sizeVReg = MI->getOperand(1).getReg(),
21479 physSPReg = IsLP64 || Subtarget->isTargetNaCl64() ? X86::RSP : X86::ESP;
21481 MachineFunction::iterator MBBIter = BB;
21484 MF->insert(MBBIter, bumpMBB);
21485 MF->insert(MBBIter, mallocMBB);
21486 MF->insert(MBBIter, continueMBB);
21488 continueMBB->splice(continueMBB->begin(), BB,
21489 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21490 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
21492 // Add code to the main basic block to check if the stack limit has been hit,
21493 // and if so, jump to mallocMBB otherwise to bumpMBB.
21494 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
21495 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
21496 .addReg(tmpSPVReg).addReg(sizeVReg);
21497 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
21498 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
21499 .addReg(SPLimitVReg);
21500 BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
21502 // bumpMBB simply decreases the stack pointer, since we know the current
21503 // stacklet has enough space.
21504 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
21505 .addReg(SPLimitVReg);
21506 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
21507 .addReg(SPLimitVReg);
21508 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21510 // Calls into a routine in libgcc to allocate more space from the heap.
21511 const uint32_t *RegMask =
21512 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21514 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
21516 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21517 .addExternalSymbol("__morestack_allocate_stack_space")
21518 .addRegMask(RegMask)
21519 .addReg(X86::RDI, RegState::Implicit)
21520 .addReg(X86::RAX, RegState::ImplicitDefine);
21521 } else if (Is64Bit) {
21522 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
21524 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21525 .addExternalSymbol("__morestack_allocate_stack_space")
21526 .addRegMask(RegMask)
21527 .addReg(X86::EDI, RegState::Implicit)
21528 .addReg(X86::EAX, RegState::ImplicitDefine);
21530 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
21532 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
21533 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
21534 .addExternalSymbol("__morestack_allocate_stack_space")
21535 .addRegMask(RegMask)
21536 .addReg(X86::EAX, RegState::ImplicitDefine);
21540 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
21543 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
21544 .addReg(IsLP64 ? X86::RAX : X86::EAX);
21545 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21547 // Set up the CFG correctly.
21548 BB->addSuccessor(bumpMBB);
21549 BB->addSuccessor(mallocMBB);
21550 mallocMBB->addSuccessor(continueMBB);
21551 bumpMBB->addSuccessor(continueMBB);
21553 // Take care of the PHI nodes.
21554 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
21555 MI->getOperand(0).getReg())
21556 .addReg(mallocPtrVReg).addMBB(mallocMBB)
21557 .addReg(bumpSPPtrVReg).addMBB(bumpMBB);
21559 // Delete the original pseudo instruction.
21560 MI->eraseFromParent();
21563 return continueMBB;
21566 MachineBasicBlock *
21567 X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
21568 MachineBasicBlock *BB) const {
21569 DebugLoc DL = MI->getDebugLoc();
21571 assert(!Subtarget->isTargetMachO());
21573 X86FrameLowering::emitStackProbeCall(*BB->getParent(), *BB, MI, DL);
21575 MI->eraseFromParent(); // The pseudo instruction is gone now.
21579 MachineBasicBlock *
21580 X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
21581 MachineBasicBlock *BB) const {
21582 // This is pretty easy. We're taking the value that we received from
21583 // our load from the relocation, sticking it in either RDI (x86-64)
21584 // or EAX and doing an indirect call. The return value will then
21585 // be in the normal return register.
21586 MachineFunction *F = BB->getParent();
21587 const X86InstrInfo *TII = Subtarget->getInstrInfo();
21588 DebugLoc DL = MI->getDebugLoc();
21590 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
21591 assert(MI->getOperand(3).isGlobal() && "This should be a global");
21593 // Get a register mask for the lowered call.
21594 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
21595 // proper register mask.
21596 const uint32_t *RegMask =
21597 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21598 if (Subtarget->is64Bit()) {
21599 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21600 TII->get(X86::MOV64rm), X86::RDI)
21602 .addImm(0).addReg(0)
21603 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21604 MI->getOperand(3).getTargetFlags())
21606 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
21607 addDirectMem(MIB, X86::RDI);
21608 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
21609 } else if (F->getTarget().getRelocationModel() != Reloc::PIC_) {
21610 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21611 TII->get(X86::MOV32rm), X86::EAX)
21613 .addImm(0).addReg(0)
21614 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21615 MI->getOperand(3).getTargetFlags())
21617 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21618 addDirectMem(MIB, X86::EAX);
21619 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21621 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21622 TII->get(X86::MOV32rm), X86::EAX)
21623 .addReg(TII->getGlobalBaseReg(F))
21624 .addImm(0).addReg(0)
21625 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21626 MI->getOperand(3).getTargetFlags())
21628 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21629 addDirectMem(MIB, X86::EAX);
21630 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21633 MI->eraseFromParent(); // The pseudo instruction is gone now.
21637 MachineBasicBlock *
21638 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
21639 MachineBasicBlock *MBB) const {
21640 DebugLoc DL = MI->getDebugLoc();
21641 MachineFunction *MF = MBB->getParent();
21642 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21643 MachineRegisterInfo &MRI = MF->getRegInfo();
21645 const BasicBlock *BB = MBB->getBasicBlock();
21646 MachineFunction::iterator I = MBB;
21649 // Memory Reference
21650 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21651 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21654 unsigned MemOpndSlot = 0;
21656 unsigned CurOp = 0;
21658 DstReg = MI->getOperand(CurOp++).getReg();
21659 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
21660 assert(RC->hasType(MVT::i32) && "Invalid destination!");
21661 unsigned mainDstReg = MRI.createVirtualRegister(RC);
21662 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
21664 MemOpndSlot = CurOp;
21666 MVT PVT = getPointerTy();
21667 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21668 "Invalid Pointer Size!");
21670 // For v = setjmp(buf), we generate
21673 // buf[LabelOffset] = restoreMBB
21674 // SjLjSetup restoreMBB
21680 // v = phi(main, restore)
21683 // if base pointer being used, load it from frame
21686 MachineBasicBlock *thisMBB = MBB;
21687 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
21688 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
21689 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
21690 MF->insert(I, mainMBB);
21691 MF->insert(I, sinkMBB);
21692 MF->push_back(restoreMBB);
21694 MachineInstrBuilder MIB;
21696 // Transfer the remainder of BB and its successor edges to sinkMBB.
21697 sinkMBB->splice(sinkMBB->begin(), MBB,
21698 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21699 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
21702 unsigned PtrStoreOpc = 0;
21703 unsigned LabelReg = 0;
21704 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21705 Reloc::Model RM = MF->getTarget().getRelocationModel();
21706 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
21707 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC);
21709 // Prepare IP either in reg or imm.
21710 if (!UseImmLabel) {
21711 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
21712 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
21713 LabelReg = MRI.createVirtualRegister(PtrRC);
21714 if (Subtarget->is64Bit()) {
21715 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
21719 .addMBB(restoreMBB)
21722 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
21723 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
21724 .addReg(XII->getGlobalBaseReg(MF))
21727 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference())
21731 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
21733 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
21734 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21735 if (i == X86::AddrDisp)
21736 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset);
21738 MIB.addOperand(MI->getOperand(MemOpndSlot + i));
21741 MIB.addReg(LabelReg);
21743 MIB.addMBB(restoreMBB);
21744 MIB.setMemRefs(MMOBegin, MMOEnd);
21746 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
21747 .addMBB(restoreMBB);
21749 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21750 MIB.addRegMask(RegInfo->getNoPreservedMask());
21751 thisMBB->addSuccessor(mainMBB);
21752 thisMBB->addSuccessor(restoreMBB);
21756 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
21757 mainMBB->addSuccessor(sinkMBB);
21760 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21761 TII->get(X86::PHI), DstReg)
21762 .addReg(mainDstReg).addMBB(mainMBB)
21763 .addReg(restoreDstReg).addMBB(restoreMBB);
21766 if (RegInfo->hasBasePointer(*MF)) {
21767 const bool Uses64BitFramePtr =
21768 Subtarget->isTarget64BitLP64() || Subtarget->isTargetNaCl64();
21769 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
21770 X86FI->setRestoreBasePointer(MF);
21771 unsigned FramePtr = RegInfo->getFrameRegister(*MF);
21772 unsigned BasePtr = RegInfo->getBaseRegister();
21773 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
21774 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
21775 FramePtr, true, X86FI->getRestoreBasePointerOffset())
21776 .setMIFlag(MachineInstr::FrameSetup);
21778 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
21779 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
21780 restoreMBB->addSuccessor(sinkMBB);
21782 MI->eraseFromParent();
21786 MachineBasicBlock *
21787 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
21788 MachineBasicBlock *MBB) const {
21789 DebugLoc DL = MI->getDebugLoc();
21790 MachineFunction *MF = MBB->getParent();
21791 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21792 MachineRegisterInfo &MRI = MF->getRegInfo();
21794 // Memory Reference
21795 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21796 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21798 MVT PVT = getPointerTy();
21799 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21800 "Invalid Pointer Size!");
21802 const TargetRegisterClass *RC =
21803 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
21804 unsigned Tmp = MRI.createVirtualRegister(RC);
21805 // Since FP is only updated here but NOT referenced, it's treated as GPR.
21806 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21807 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
21808 unsigned SP = RegInfo->getStackRegister();
21810 MachineInstrBuilder MIB;
21812 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21813 const int64_t SPOffset = 2 * PVT.getStoreSize();
21815 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
21816 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
21819 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
21820 for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
21821 MIB.addOperand(MI->getOperand(i));
21822 MIB.setMemRefs(MMOBegin, MMOEnd);
21824 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
21825 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21826 if (i == X86::AddrDisp)
21827 MIB.addDisp(MI->getOperand(i), LabelOffset);
21829 MIB.addOperand(MI->getOperand(i));
21831 MIB.setMemRefs(MMOBegin, MMOEnd);
21833 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
21834 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21835 if (i == X86::AddrDisp)
21836 MIB.addDisp(MI->getOperand(i), SPOffset);
21838 MIB.addOperand(MI->getOperand(i));
21840 MIB.setMemRefs(MMOBegin, MMOEnd);
21842 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
21844 MI->eraseFromParent();
21848 // Replace 213-type (isel default) FMA3 instructions with 231-type for
21849 // accumulator loops. Writing back to the accumulator allows the coalescer
21850 // to remove extra copies in the loop.
21851 MachineBasicBlock *
21852 X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
21853 MachineBasicBlock *MBB) const {
21854 MachineOperand &AddendOp = MI->getOperand(3);
21856 // Bail out early if the addend isn't a register - we can't switch these.
21857 if (!AddendOp.isReg())
21860 MachineFunction &MF = *MBB->getParent();
21861 MachineRegisterInfo &MRI = MF.getRegInfo();
21863 // Check whether the addend is defined by a PHI:
21864 assert(MRI.hasOneDef(AddendOp.getReg()) && "Multiple defs in SSA?");
21865 MachineInstr &AddendDef = *MRI.def_instr_begin(AddendOp.getReg());
21866 if (!AddendDef.isPHI())
21869 // Look for the following pattern:
21871 // %addend = phi [%entry, 0], [%loop, %result]
21873 // %result<tied1> = FMA213 %m2<tied0>, %m1, %addend
21877 // %addend = phi [%entry, 0], [%loop, %result]
21879 // %result<tied1> = FMA231 %addend<tied0>, %m1, %m2
21881 for (unsigned i = 1, e = AddendDef.getNumOperands(); i < e; i += 2) {
21882 assert(AddendDef.getOperand(i).isReg());
21883 MachineOperand PHISrcOp = AddendDef.getOperand(i);
21884 MachineInstr &PHISrcInst = *MRI.def_instr_begin(PHISrcOp.getReg());
21885 if (&PHISrcInst == MI) {
21886 // Found a matching instruction.
21887 unsigned NewFMAOpc = 0;
21888 switch (MI->getOpcode()) {
21889 case X86::VFMADDPDr213r: NewFMAOpc = X86::VFMADDPDr231r; break;
21890 case X86::VFMADDPSr213r: NewFMAOpc = X86::VFMADDPSr231r; break;
21891 case X86::VFMADDSDr213r: NewFMAOpc = X86::VFMADDSDr231r; break;
21892 case X86::VFMADDSSr213r: NewFMAOpc = X86::VFMADDSSr231r; break;
21893 case X86::VFMSUBPDr213r: NewFMAOpc = X86::VFMSUBPDr231r; break;
21894 case X86::VFMSUBPSr213r: NewFMAOpc = X86::VFMSUBPSr231r; break;
21895 case X86::VFMSUBSDr213r: NewFMAOpc = X86::VFMSUBSDr231r; break;
21896 case X86::VFMSUBSSr213r: NewFMAOpc = X86::VFMSUBSSr231r; break;
21897 case X86::VFNMADDPDr213r: NewFMAOpc = X86::VFNMADDPDr231r; break;
21898 case X86::VFNMADDPSr213r: NewFMAOpc = X86::VFNMADDPSr231r; break;
21899 case X86::VFNMADDSDr213r: NewFMAOpc = X86::VFNMADDSDr231r; break;
21900 case X86::VFNMADDSSr213r: NewFMAOpc = X86::VFNMADDSSr231r; break;
21901 case X86::VFNMSUBPDr213r: NewFMAOpc = X86::VFNMSUBPDr231r; break;
21902 case X86::VFNMSUBPSr213r: NewFMAOpc = X86::VFNMSUBPSr231r; break;
21903 case X86::VFNMSUBSDr213r: NewFMAOpc = X86::VFNMSUBSDr231r; break;
21904 case X86::VFNMSUBSSr213r: NewFMAOpc = X86::VFNMSUBSSr231r; break;
21905 case X86::VFMADDSUBPDr213r: NewFMAOpc = X86::VFMADDSUBPDr231r; break;
21906 case X86::VFMADDSUBPSr213r: NewFMAOpc = X86::VFMADDSUBPSr231r; break;
21907 case X86::VFMSUBADDPDr213r: NewFMAOpc = X86::VFMSUBADDPDr231r; break;
21908 case X86::VFMSUBADDPSr213r: NewFMAOpc = X86::VFMSUBADDPSr231r; break;
21910 case X86::VFMADDPDr213rY: NewFMAOpc = X86::VFMADDPDr231rY; break;
21911 case X86::VFMADDPSr213rY: NewFMAOpc = X86::VFMADDPSr231rY; break;
21912 case X86::VFMSUBPDr213rY: NewFMAOpc = X86::VFMSUBPDr231rY; break;
21913 case X86::VFMSUBPSr213rY: NewFMAOpc = X86::VFMSUBPSr231rY; break;
21914 case X86::VFNMADDPDr213rY: NewFMAOpc = X86::VFNMADDPDr231rY; break;
21915 case X86::VFNMADDPSr213rY: NewFMAOpc = X86::VFNMADDPSr231rY; break;
21916 case X86::VFNMSUBPDr213rY: NewFMAOpc = X86::VFNMSUBPDr231rY; break;
21917 case X86::VFNMSUBPSr213rY: NewFMAOpc = X86::VFNMSUBPSr231rY; break;
21918 case X86::VFMADDSUBPDr213rY: NewFMAOpc = X86::VFMADDSUBPDr231rY; break;
21919 case X86::VFMADDSUBPSr213rY: NewFMAOpc = X86::VFMADDSUBPSr231rY; break;
21920 case X86::VFMSUBADDPDr213rY: NewFMAOpc = X86::VFMSUBADDPDr231rY; break;
21921 case X86::VFMSUBADDPSr213rY: NewFMAOpc = X86::VFMSUBADDPSr231rY; break;
21922 default: llvm_unreachable("Unrecognized FMA variant.");
21925 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
21926 MachineInstrBuilder MIB =
21927 BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
21928 .addOperand(MI->getOperand(0))
21929 .addOperand(MI->getOperand(3))
21930 .addOperand(MI->getOperand(2))
21931 .addOperand(MI->getOperand(1));
21932 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
21933 MI->eraseFromParent();
21940 MachineBasicBlock *
21941 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
21942 MachineBasicBlock *BB) const {
21943 switch (MI->getOpcode()) {
21944 default: llvm_unreachable("Unexpected instr type to insert");
21945 case X86::TAILJMPd64:
21946 case X86::TAILJMPr64:
21947 case X86::TAILJMPm64:
21948 case X86::TAILJMPd64_REX:
21949 case X86::TAILJMPr64_REX:
21950 case X86::TAILJMPm64_REX:
21951 llvm_unreachable("TAILJMP64 would not be touched here.");
21952 case X86::TCRETURNdi64:
21953 case X86::TCRETURNri64:
21954 case X86::TCRETURNmi64:
21956 case X86::WIN_ALLOCA:
21957 return EmitLoweredWinAlloca(MI, BB);
21958 case X86::SEG_ALLOCA_32:
21959 case X86::SEG_ALLOCA_64:
21960 return EmitLoweredSegAlloca(MI, BB);
21961 case X86::TLSCall_32:
21962 case X86::TLSCall_64:
21963 return EmitLoweredTLSCall(MI, BB);
21964 case X86::CMOV_GR8:
21965 case X86::CMOV_FR32:
21966 case X86::CMOV_FR64:
21967 case X86::CMOV_V4F32:
21968 case X86::CMOV_V2F64:
21969 case X86::CMOV_V2I64:
21970 case X86::CMOV_V8F32:
21971 case X86::CMOV_V4F64:
21972 case X86::CMOV_V4I64:
21973 case X86::CMOV_V16F32:
21974 case X86::CMOV_V8F64:
21975 case X86::CMOV_V8I64:
21976 case X86::CMOV_GR16:
21977 case X86::CMOV_GR32:
21978 case X86::CMOV_RFP32:
21979 case X86::CMOV_RFP64:
21980 case X86::CMOV_RFP80:
21981 return EmitLoweredSelect(MI, BB);
21983 case X86::FP32_TO_INT16_IN_MEM:
21984 case X86::FP32_TO_INT32_IN_MEM:
21985 case X86::FP32_TO_INT64_IN_MEM:
21986 case X86::FP64_TO_INT16_IN_MEM:
21987 case X86::FP64_TO_INT32_IN_MEM:
21988 case X86::FP64_TO_INT64_IN_MEM:
21989 case X86::FP80_TO_INT16_IN_MEM:
21990 case X86::FP80_TO_INT32_IN_MEM:
21991 case X86::FP80_TO_INT64_IN_MEM: {
21992 MachineFunction *F = BB->getParent();
21993 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21994 DebugLoc DL = MI->getDebugLoc();
21996 // Change the floating point control register to use "round towards zero"
21997 // mode when truncating to an integer value.
21998 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
21999 addFrameReference(BuildMI(*BB, MI, DL,
22000 TII->get(X86::FNSTCW16m)), CWFrameIdx);
22002 // Load the old value of the high byte of the control word...
22004 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
22005 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
22008 // Set the high part to be round to zero...
22009 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
22012 // Reload the modified control word now...
22013 addFrameReference(BuildMI(*BB, MI, DL,
22014 TII->get(X86::FLDCW16m)), CWFrameIdx);
22016 // Restore the memory image of control word to original value
22017 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
22020 // Get the X86 opcode to use.
22022 switch (MI->getOpcode()) {
22023 default: llvm_unreachable("illegal opcode!");
22024 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
22025 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
22026 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
22027 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
22028 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
22029 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
22030 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
22031 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
22032 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
22036 MachineOperand &Op = MI->getOperand(0);
22038 AM.BaseType = X86AddressMode::RegBase;
22039 AM.Base.Reg = Op.getReg();
22041 AM.BaseType = X86AddressMode::FrameIndexBase;
22042 AM.Base.FrameIndex = Op.getIndex();
22044 Op = MI->getOperand(1);
22046 AM.Scale = Op.getImm();
22047 Op = MI->getOperand(2);
22049 AM.IndexReg = Op.getImm();
22050 Op = MI->getOperand(3);
22051 if (Op.isGlobal()) {
22052 AM.GV = Op.getGlobal();
22054 AM.Disp = Op.getImm();
22056 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
22057 .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
22059 // Reload the original control word now.
22060 addFrameReference(BuildMI(*BB, MI, DL,
22061 TII->get(X86::FLDCW16m)), CWFrameIdx);
22063 MI->eraseFromParent(); // The pseudo instruction is gone now.
22066 // String/text processing lowering.
22067 case X86::PCMPISTRM128REG:
22068 case X86::VPCMPISTRM128REG:
22069 case X86::PCMPISTRM128MEM:
22070 case X86::VPCMPISTRM128MEM:
22071 case X86::PCMPESTRM128REG:
22072 case X86::VPCMPESTRM128REG:
22073 case X86::PCMPESTRM128MEM:
22074 case X86::VPCMPESTRM128MEM:
22075 assert(Subtarget->hasSSE42() &&
22076 "Target must have SSE4.2 or AVX features enabled");
22077 return EmitPCMPSTRM(MI, BB, Subtarget->getInstrInfo());
22079 // String/text processing lowering.
22080 case X86::PCMPISTRIREG:
22081 case X86::VPCMPISTRIREG:
22082 case X86::PCMPISTRIMEM:
22083 case X86::VPCMPISTRIMEM:
22084 case X86::PCMPESTRIREG:
22085 case X86::VPCMPESTRIREG:
22086 case X86::PCMPESTRIMEM:
22087 case X86::VPCMPESTRIMEM:
22088 assert(Subtarget->hasSSE42() &&
22089 "Target must have SSE4.2 or AVX features enabled");
22090 return EmitPCMPSTRI(MI, BB, Subtarget->getInstrInfo());
22092 // Thread synchronization.
22094 return EmitMonitor(MI, BB, Subtarget);
22098 return EmitXBegin(MI, BB, Subtarget->getInstrInfo());
22100 case X86::VASTART_SAVE_XMM_REGS:
22101 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
22103 case X86::VAARG_64:
22104 return EmitVAARG64WithCustomInserter(MI, BB);
22106 case X86::EH_SjLj_SetJmp32:
22107 case X86::EH_SjLj_SetJmp64:
22108 return emitEHSjLjSetJmp(MI, BB);
22110 case X86::EH_SjLj_LongJmp32:
22111 case X86::EH_SjLj_LongJmp64:
22112 return emitEHSjLjLongJmp(MI, BB);
22114 case TargetOpcode::STATEPOINT:
22115 // As an implementation detail, STATEPOINT shares the STACKMAP format at
22116 // this point in the process. We diverge later.
22117 return emitPatchPoint(MI, BB);
22119 case TargetOpcode::STACKMAP:
22120 case TargetOpcode::PATCHPOINT:
22121 return emitPatchPoint(MI, BB);
22123 case X86::VFMADDPDr213r:
22124 case X86::VFMADDPSr213r:
22125 case X86::VFMADDSDr213r:
22126 case X86::VFMADDSSr213r:
22127 case X86::VFMSUBPDr213r:
22128 case X86::VFMSUBPSr213r:
22129 case X86::VFMSUBSDr213r:
22130 case X86::VFMSUBSSr213r:
22131 case X86::VFNMADDPDr213r:
22132 case X86::VFNMADDPSr213r:
22133 case X86::VFNMADDSDr213r:
22134 case X86::VFNMADDSSr213r:
22135 case X86::VFNMSUBPDr213r:
22136 case X86::VFNMSUBPSr213r:
22137 case X86::VFNMSUBSDr213r:
22138 case X86::VFNMSUBSSr213r:
22139 case X86::VFMADDSUBPDr213r:
22140 case X86::VFMADDSUBPSr213r:
22141 case X86::VFMSUBADDPDr213r:
22142 case X86::VFMSUBADDPSr213r:
22143 case X86::VFMADDPDr213rY:
22144 case X86::VFMADDPSr213rY:
22145 case X86::VFMSUBPDr213rY:
22146 case X86::VFMSUBPSr213rY:
22147 case X86::VFNMADDPDr213rY:
22148 case X86::VFNMADDPSr213rY:
22149 case X86::VFNMSUBPDr213rY:
22150 case X86::VFNMSUBPSr213rY:
22151 case X86::VFMADDSUBPDr213rY:
22152 case X86::VFMADDSUBPSr213rY:
22153 case X86::VFMSUBADDPDr213rY:
22154 case X86::VFMSUBADDPSr213rY:
22155 return emitFMA3Instr(MI, BB);
22159 //===----------------------------------------------------------------------===//
22160 // X86 Optimization Hooks
22161 //===----------------------------------------------------------------------===//
22163 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
22166 const SelectionDAG &DAG,
22167 unsigned Depth) const {
22168 unsigned BitWidth = KnownZero.getBitWidth();
22169 unsigned Opc = Op.getOpcode();
22170 assert((Opc >= ISD::BUILTIN_OP_END ||
22171 Opc == ISD::INTRINSIC_WO_CHAIN ||
22172 Opc == ISD::INTRINSIC_W_CHAIN ||
22173 Opc == ISD::INTRINSIC_VOID) &&
22174 "Should use MaskedValueIsZero if you don't know whether Op"
22175 " is a target node!");
22177 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
22191 // These nodes' second result is a boolean.
22192 if (Op.getResNo() == 0)
22195 case X86ISD::SETCC:
22196 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
22198 case ISD::INTRINSIC_WO_CHAIN: {
22199 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
22200 unsigned NumLoBits = 0;
22203 case Intrinsic::x86_sse_movmsk_ps:
22204 case Intrinsic::x86_avx_movmsk_ps_256:
22205 case Intrinsic::x86_sse2_movmsk_pd:
22206 case Intrinsic::x86_avx_movmsk_pd_256:
22207 case Intrinsic::x86_mmx_pmovmskb:
22208 case Intrinsic::x86_sse2_pmovmskb_128:
22209 case Intrinsic::x86_avx2_pmovmskb: {
22210 // High bits of movmskp{s|d}, pmovmskb are known zero.
22212 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
22213 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break;
22214 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break;
22215 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break;
22216 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break;
22217 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break;
22218 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
22219 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
22221 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
22230 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
22232 const SelectionDAG &,
22233 unsigned Depth) const {
22234 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
22235 if (Op.getOpcode() == X86ISD::SETCC_CARRY)
22236 return Op.getValueType().getScalarType().getSizeInBits();
22242 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
22243 /// node is a GlobalAddress + offset.
22244 bool X86TargetLowering::isGAPlusOffset(SDNode *N,
22245 const GlobalValue* &GA,
22246 int64_t &Offset) const {
22247 if (N->getOpcode() == X86ISD::Wrapper) {
22248 if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
22249 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
22250 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
22254 return TargetLowering::isGAPlusOffset(N, GA, Offset);
22257 /// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the
22258 /// same as extracting the high 128-bit part of 256-bit vector and then
22259 /// inserting the result into the low part of a new 256-bit vector
22260 static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) {
22261 EVT VT = SVOp->getValueType(0);
22262 unsigned NumElems = VT.getVectorNumElements();
22264 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
22265 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j)
22266 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
22267 SVOp->getMaskElt(j) >= 0)
22273 /// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the
22274 /// same as extracting the low 128-bit part of 256-bit vector and then
22275 /// inserting the result into the high part of a new 256-bit vector
22276 static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) {
22277 EVT VT = SVOp->getValueType(0);
22278 unsigned NumElems = VT.getVectorNumElements();
22280 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22281 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j)
22282 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
22283 SVOp->getMaskElt(j) >= 0)
22289 /// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors.
22290 static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
22291 TargetLowering::DAGCombinerInfo &DCI,
22292 const X86Subtarget* Subtarget) {
22294 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
22295 SDValue V1 = SVOp->getOperand(0);
22296 SDValue V2 = SVOp->getOperand(1);
22297 EVT VT = SVOp->getValueType(0);
22298 unsigned NumElems = VT.getVectorNumElements();
22300 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
22301 V2.getOpcode() == ISD::CONCAT_VECTORS) {
22305 // V UNDEF BUILD_VECTOR UNDEF
22307 // CONCAT_VECTOR CONCAT_VECTOR
22310 // RESULT: V + zero extended
22312 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR ||
22313 V2.getOperand(1).getOpcode() != ISD::UNDEF ||
22314 V1.getOperand(1).getOpcode() != ISD::UNDEF)
22317 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()))
22320 // To match the shuffle mask, the first half of the mask should
22321 // be exactly the first vector, and all the rest a splat with the
22322 // first element of the second one.
22323 for (unsigned i = 0; i != NumElems/2; ++i)
22324 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) ||
22325 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems))
22328 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD.
22329 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) {
22330 if (Ld->hasNUsesOfValue(1, 0)) {
22331 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other);
22332 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
22334 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
22336 Ld->getPointerInfo(),
22337 Ld->getAlignment(),
22338 false/*isVolatile*/, true/*ReadMem*/,
22339 false/*WriteMem*/);
22341 // Make sure the newly-created LOAD is in the same position as Ld in
22342 // terms of dependency. We create a TokenFactor for Ld and ResNode,
22343 // and update uses of Ld's output chain to use the TokenFactor.
22344 if (Ld->hasAnyUseOfValue(1)) {
22345 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
22346 SDValue(Ld, 1), SDValue(ResNode.getNode(), 1));
22347 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
22348 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1),
22349 SDValue(ResNode.getNode(), 1));
22352 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode);
22356 // Emit a zeroed vector and insert the desired subvector on its
22358 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
22359 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
22360 return DCI.CombineTo(N, InsV);
22363 //===--------------------------------------------------------------------===//
22364 // Combine some shuffles into subvector extracts and inserts:
22367 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
22368 if (isShuffleHigh128VectorInsertLow(SVOp)) {
22369 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl);
22370 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl);
22371 return DCI.CombineTo(N, InsV);
22374 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22375 if (isShuffleLow128VectorInsertHigh(SVOp)) {
22376 SDValue V = Extract128BitVector(V1, 0, DAG, dl);
22377 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl);
22378 return DCI.CombineTo(N, InsV);
22384 /// \brief Combine an arbitrary chain of shuffles into a single instruction if
22387 /// This is the leaf of the recursive combinine below. When we have found some
22388 /// chain of single-use x86 shuffle instructions and accumulated the combined
22389 /// shuffle mask represented by them, this will try to pattern match that mask
22390 /// into either a single instruction if there is a special purpose instruction
22391 /// for this operation, or into a PSHUFB instruction which is a fully general
22392 /// instruction but should only be used to replace chains over a certain depth.
22393 static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
22394 int Depth, bool HasPSHUFB, SelectionDAG &DAG,
22395 TargetLowering::DAGCombinerInfo &DCI,
22396 const X86Subtarget *Subtarget) {
22397 assert(!Mask.empty() && "Cannot combine an empty shuffle mask!");
22399 // Find the operand that enters the chain. Note that multiple uses are OK
22400 // here, we're not going to remove the operand we find.
22401 SDValue Input = Op.getOperand(0);
22402 while (Input.getOpcode() == ISD::BITCAST)
22403 Input = Input.getOperand(0);
22405 MVT VT = Input.getSimpleValueType();
22406 MVT RootVT = Root.getSimpleValueType();
22409 // Just remove no-op shuffle masks.
22410 if (Mask.size() == 1) {
22411 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Input),
22416 // Use the float domain if the operand type is a floating point type.
22417 bool FloatDomain = VT.isFloatingPoint();
22419 // For floating point shuffles, we don't have free copies in the shuffle
22420 // instructions or the ability to load as part of the instruction, so
22421 // canonicalize their shuffles to UNPCK or MOV variants.
22423 // Note that even with AVX we prefer the PSHUFD form of shuffle for integer
22424 // vectors because it can have a load folded into it that UNPCK cannot. This
22425 // doesn't preclude something switching to the shorter encoding post-RA.
22427 if (Mask.equals(0, 0) || Mask.equals(1, 1)) {
22428 bool Lo = Mask.equals(0, 0);
22431 // Check if we have SSE3 which will let us use MOVDDUP. That instruction
22432 // is no slower than UNPCKLPD but has the option to fold the input operand
22433 // into even an unaligned memory load.
22434 if (Lo && Subtarget->hasSSE3()) {
22435 Shuffle = X86ISD::MOVDDUP;
22436 ShuffleVT = MVT::v2f64;
22438 // We have MOVLHPS and MOVHLPS throughout SSE and they encode smaller
22439 // than the UNPCK variants.
22440 Shuffle = Lo ? X86ISD::MOVLHPS : X86ISD::MOVHLPS;
22441 ShuffleVT = MVT::v4f32;
22443 if (Depth == 1 && Root->getOpcode() == Shuffle)
22444 return false; // Nothing to do!
22445 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22446 DCI.AddToWorklist(Op.getNode());
22447 if (Shuffle == X86ISD::MOVDDUP)
22448 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22450 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22451 DCI.AddToWorklist(Op.getNode());
22452 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22456 if (Subtarget->hasSSE3() &&
22457 (Mask.equals(0, 0, 2, 2) || Mask.equals(1, 1, 3, 3))) {
22458 bool Lo = Mask.equals(0, 0, 2, 2);
22459 unsigned Shuffle = Lo ? X86ISD::MOVSLDUP : X86ISD::MOVSHDUP;
22460 MVT ShuffleVT = MVT::v4f32;
22461 if (Depth == 1 && Root->getOpcode() == Shuffle)
22462 return false; // Nothing to do!
22463 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22464 DCI.AddToWorklist(Op.getNode());
22465 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22466 DCI.AddToWorklist(Op.getNode());
22467 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22471 if (Mask.equals(0, 0, 1, 1) || Mask.equals(2, 2, 3, 3)) {
22472 bool Lo = Mask.equals(0, 0, 1, 1);
22473 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22474 MVT ShuffleVT = MVT::v4f32;
22475 if (Depth == 1 && Root->getOpcode() == Shuffle)
22476 return false; // Nothing to do!
22477 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22478 DCI.AddToWorklist(Op.getNode());
22479 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22480 DCI.AddToWorklist(Op.getNode());
22481 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22487 // We always canonicalize the 8 x i16 and 16 x i8 shuffles into their UNPCK
22488 // variants as none of these have single-instruction variants that are
22489 // superior to the UNPCK formulation.
22490 if (!FloatDomain &&
22491 (Mask.equals(0, 0, 1, 1, 2, 2, 3, 3) ||
22492 Mask.equals(4, 4, 5, 5, 6, 6, 7, 7) ||
22493 Mask.equals(0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7) ||
22494 Mask.equals(8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
22496 bool Lo = Mask[0] == 0;
22497 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22498 if (Depth == 1 && Root->getOpcode() == Shuffle)
22499 return false; // Nothing to do!
22501 switch (Mask.size()) {
22503 ShuffleVT = MVT::v8i16;
22506 ShuffleVT = MVT::v16i8;
22509 llvm_unreachable("Impossible mask size!");
22511 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22512 DCI.AddToWorklist(Op.getNode());
22513 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22514 DCI.AddToWorklist(Op.getNode());
22515 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22520 // Don't try to re-form single instruction chains under any circumstances now
22521 // that we've done encoding canonicalization for them.
22525 // If we have 3 or more shuffle instructions or a chain involving PSHUFB, we
22526 // can replace them with a single PSHUFB instruction profitably. Intel's
22527 // manuals suggest only using PSHUFB if doing so replacing 5 instructions, but
22528 // in practice PSHUFB tends to be *very* fast so we're more aggressive.
22529 if ((Depth >= 3 || HasPSHUFB) && Subtarget->hasSSSE3()) {
22530 SmallVector<SDValue, 16> PSHUFBMask;
22531 assert(Mask.size() <= 16 && "Can't shuffle elements smaller than bytes!");
22532 int Ratio = 16 / Mask.size();
22533 for (unsigned i = 0; i < 16; ++i) {
22534 if (Mask[i / Ratio] == SM_SentinelUndef) {
22535 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
22538 int M = Mask[i / Ratio] != SM_SentinelZero
22539 ? Ratio * Mask[i / Ratio] + i % Ratio
22541 PSHUFBMask.push_back(DAG.getConstant(M, MVT::i8));
22543 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Input);
22544 DCI.AddToWorklist(Op.getNode());
22545 SDValue PSHUFBMaskOp =
22546 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, PSHUFBMask);
22547 DCI.AddToWorklist(PSHUFBMaskOp.getNode());
22548 Op = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, Op, PSHUFBMaskOp);
22549 DCI.AddToWorklist(Op.getNode());
22550 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22555 // Failed to find any combines.
22559 /// \brief Fully generic combining of x86 shuffle instructions.
22561 /// This should be the last combine run over the x86 shuffle instructions. Once
22562 /// they have been fully optimized, this will recursively consider all chains
22563 /// of single-use shuffle instructions, build a generic model of the cumulative
22564 /// shuffle operation, and check for simpler instructions which implement this
22565 /// operation. We use this primarily for two purposes:
22567 /// 1) Collapse generic shuffles to specialized single instructions when
22568 /// equivalent. In most cases, this is just an encoding size win, but
22569 /// sometimes we will collapse multiple generic shuffles into a single
22570 /// special-purpose shuffle.
22571 /// 2) Look for sequences of shuffle instructions with 3 or more total
22572 /// instructions, and replace them with the slightly more expensive SSSE3
22573 /// PSHUFB instruction if available. We do this as the last combining step
22574 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
22575 /// a suitable short sequence of other instructions. The PHUFB will either
22576 /// use a register or have to read from memory and so is slightly (but only
22577 /// slightly) more expensive than the other shuffle instructions.
22579 /// Because this is inherently a quadratic operation (for each shuffle in
22580 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
22581 /// This should never be an issue in practice as the shuffle lowering doesn't
22582 /// produce sequences of more than 8 instructions.
22584 /// FIXME: We will currently miss some cases where the redundant shuffling
22585 /// would simplify under the threshold for PSHUFB formation because of
22586 /// combine-ordering. To fix this, we should do the redundant instruction
22587 /// combining in this recursive walk.
22588 static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
22589 ArrayRef<int> RootMask,
22590 int Depth, bool HasPSHUFB,
22592 TargetLowering::DAGCombinerInfo &DCI,
22593 const X86Subtarget *Subtarget) {
22594 // Bound the depth of our recursive combine because this is ultimately
22595 // quadratic in nature.
22599 // Directly rip through bitcasts to find the underlying operand.
22600 while (Op.getOpcode() == ISD::BITCAST && Op.getOperand(0).hasOneUse())
22601 Op = Op.getOperand(0);
22603 MVT VT = Op.getSimpleValueType();
22604 if (!VT.isVector())
22605 return false; // Bail if we hit a non-vector.
22606 // FIXME: This routine should be taught about 256-bit shuffles, or a 256-bit
22607 // version should be added.
22608 if (VT.getSizeInBits() != 128)
22611 assert(Root.getSimpleValueType().isVector() &&
22612 "Shuffles operate on vector types!");
22613 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
22614 "Can only combine shuffles of the same vector register size.");
22616 if (!isTargetShuffle(Op.getOpcode()))
22618 SmallVector<int, 16> OpMask;
22620 bool HaveMask = getTargetShuffleMask(Op.getNode(), VT, OpMask, IsUnary);
22621 // We only can combine unary shuffles which we can decode the mask for.
22622 if (!HaveMask || !IsUnary)
22625 assert(VT.getVectorNumElements() == OpMask.size() &&
22626 "Different mask size from vector size!");
22627 assert(((RootMask.size() > OpMask.size() &&
22628 RootMask.size() % OpMask.size() == 0) ||
22629 (OpMask.size() > RootMask.size() &&
22630 OpMask.size() % RootMask.size() == 0) ||
22631 OpMask.size() == RootMask.size()) &&
22632 "The smaller number of elements must divide the larger.");
22633 int RootRatio = std::max<int>(1, OpMask.size() / RootMask.size());
22634 int OpRatio = std::max<int>(1, RootMask.size() / OpMask.size());
22635 assert(((RootRatio == 1 && OpRatio == 1) ||
22636 (RootRatio == 1) != (OpRatio == 1)) &&
22637 "Must not have a ratio for both incoming and op masks!");
22639 SmallVector<int, 16> Mask;
22640 Mask.reserve(std::max(OpMask.size(), RootMask.size()));
22642 // Merge this shuffle operation's mask into our accumulated mask. Note that
22643 // this shuffle's mask will be the first applied to the input, followed by the
22644 // root mask to get us all the way to the root value arrangement. The reason
22645 // for this order is that we are recursing up the operation chain.
22646 for (int i = 0, e = std::max(OpMask.size(), RootMask.size()); i < e; ++i) {
22647 int RootIdx = i / RootRatio;
22648 if (RootMask[RootIdx] < 0) {
22649 // This is a zero or undef lane, we're done.
22650 Mask.push_back(RootMask[RootIdx]);
22654 int RootMaskedIdx = RootMask[RootIdx] * RootRatio + i % RootRatio;
22655 int OpIdx = RootMaskedIdx / OpRatio;
22656 if (OpMask[OpIdx] < 0) {
22657 // The incoming lanes are zero or undef, it doesn't matter which ones we
22659 Mask.push_back(OpMask[OpIdx]);
22663 // Ok, we have non-zero lanes, map them through.
22664 Mask.push_back(OpMask[OpIdx] * OpRatio +
22665 RootMaskedIdx % OpRatio);
22668 // See if we can recurse into the operand to combine more things.
22669 switch (Op.getOpcode()) {
22670 case X86ISD::PSHUFB:
22672 case X86ISD::PSHUFD:
22673 case X86ISD::PSHUFHW:
22674 case X86ISD::PSHUFLW:
22675 if (Op.getOperand(0).hasOneUse() &&
22676 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22677 HasPSHUFB, DAG, DCI, Subtarget))
22681 case X86ISD::UNPCKL:
22682 case X86ISD::UNPCKH:
22683 assert(Op.getOperand(0) == Op.getOperand(1) && "We only combine unary shuffles!");
22684 // We can't check for single use, we have to check that this shuffle is the only user.
22685 if (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
22686 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22687 HasPSHUFB, DAG, DCI, Subtarget))
22692 // Minor canonicalization of the accumulated shuffle mask to make it easier
22693 // to match below. All this does is detect masks with squential pairs of
22694 // elements, and shrink them to the half-width mask. It does this in a loop
22695 // so it will reduce the size of the mask to the minimal width mask which
22696 // performs an equivalent shuffle.
22697 SmallVector<int, 16> WidenedMask;
22698 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
22699 Mask = std::move(WidenedMask);
22700 WidenedMask.clear();
22703 return combineX86ShuffleChain(Op, Root, Mask, Depth, HasPSHUFB, DAG, DCI,
22707 /// \brief Get the PSHUF-style mask from PSHUF node.
22709 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
22710 /// PSHUF-style masks that can be reused with such instructions.
22711 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
22712 SmallVector<int, 4> Mask;
22714 bool HaveMask = getTargetShuffleMask(N.getNode(), N.getSimpleValueType(), Mask, IsUnary);
22718 switch (N.getOpcode()) {
22719 case X86ISD::PSHUFD:
22721 case X86ISD::PSHUFLW:
22724 case X86ISD::PSHUFHW:
22725 Mask.erase(Mask.begin(), Mask.begin() + 4);
22726 for (int &M : Mask)
22730 llvm_unreachable("No valid shuffle instruction found!");
22734 /// \brief Search for a combinable shuffle across a chain ending in pshufd.
22736 /// We walk up the chain and look for a combinable shuffle, skipping over
22737 /// shuffles that we could hoist this shuffle's transformation past without
22738 /// altering anything.
22740 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
22742 TargetLowering::DAGCombinerInfo &DCI) {
22743 assert(N.getOpcode() == X86ISD::PSHUFD &&
22744 "Called with something other than an x86 128-bit half shuffle!");
22747 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
22748 // of the shuffles in the chain so that we can form a fresh chain to replace
22750 SmallVector<SDValue, 8> Chain;
22751 SDValue V = N.getOperand(0);
22752 for (; V.hasOneUse(); V = V.getOperand(0)) {
22753 switch (V.getOpcode()) {
22755 return SDValue(); // Nothing combined!
22758 // Skip bitcasts as we always know the type for the target specific
22762 case X86ISD::PSHUFD:
22763 // Found another dword shuffle.
22766 case X86ISD::PSHUFLW:
22767 // Check that the low words (being shuffled) are the identity in the
22768 // dword shuffle, and the high words are self-contained.
22769 if (Mask[0] != 0 || Mask[1] != 1 ||
22770 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
22773 Chain.push_back(V);
22776 case X86ISD::PSHUFHW:
22777 // Check that the high words (being shuffled) are the identity in the
22778 // dword shuffle, and the low words are self-contained.
22779 if (Mask[2] != 2 || Mask[3] != 3 ||
22780 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
22783 Chain.push_back(V);
22786 case X86ISD::UNPCKL:
22787 case X86ISD::UNPCKH:
22788 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
22789 // shuffle into a preceding word shuffle.
22790 if (V.getValueType() != MVT::v16i8 && V.getValueType() != MVT::v8i16)
22793 // Search for a half-shuffle which we can combine with.
22794 unsigned CombineOp =
22795 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
22796 if (V.getOperand(0) != V.getOperand(1) ||
22797 !V->isOnlyUserOf(V.getOperand(0).getNode()))
22799 Chain.push_back(V);
22800 V = V.getOperand(0);
22802 switch (V.getOpcode()) {
22804 return SDValue(); // Nothing to combine.
22806 case X86ISD::PSHUFLW:
22807 case X86ISD::PSHUFHW:
22808 if (V.getOpcode() == CombineOp)
22811 Chain.push_back(V);
22815 V = V.getOperand(0);
22819 } while (V.hasOneUse());
22822 // Break out of the loop if we break out of the switch.
22826 if (!V.hasOneUse())
22827 // We fell out of the loop without finding a viable combining instruction.
22830 // Merge this node's mask and our incoming mask.
22831 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22832 for (int &M : Mask)
22834 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
22835 getV4X86ShuffleImm8ForMask(Mask, DAG));
22837 // Rebuild the chain around this new shuffle.
22838 while (!Chain.empty()) {
22839 SDValue W = Chain.pop_back_val();
22841 if (V.getValueType() != W.getOperand(0).getValueType())
22842 V = DAG.getNode(ISD::BITCAST, DL, W.getOperand(0).getValueType(), V);
22844 switch (W.getOpcode()) {
22846 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
22848 case X86ISD::UNPCKL:
22849 case X86ISD::UNPCKH:
22850 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
22853 case X86ISD::PSHUFD:
22854 case X86ISD::PSHUFLW:
22855 case X86ISD::PSHUFHW:
22856 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
22860 if (V.getValueType() != N.getValueType())
22861 V = DAG.getNode(ISD::BITCAST, DL, N.getValueType(), V);
22863 // Return the new chain to replace N.
22867 /// \brief Search for a combinable shuffle across a chain ending in pshuflw or pshufhw.
22869 /// We walk up the chain, skipping shuffles of the other half and looking
22870 /// through shuffles which switch halves trying to find a shuffle of the same
22871 /// pair of dwords.
22872 static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
22874 TargetLowering::DAGCombinerInfo &DCI) {
22876 (N.getOpcode() == X86ISD::PSHUFLW || N.getOpcode() == X86ISD::PSHUFHW) &&
22877 "Called with something other than an x86 128-bit half shuffle!");
22879 unsigned CombineOpcode = N.getOpcode();
22881 // Walk up a single-use chain looking for a combinable shuffle.
22882 SDValue V = N.getOperand(0);
22883 for (; V.hasOneUse(); V = V.getOperand(0)) {
22884 switch (V.getOpcode()) {
22886 return false; // Nothing combined!
22889 // Skip bitcasts as we always know the type for the target specific
22893 case X86ISD::PSHUFLW:
22894 case X86ISD::PSHUFHW:
22895 if (V.getOpcode() == CombineOpcode)
22898 // Other-half shuffles are no-ops.
22901 // Break out of the loop if we break out of the switch.
22905 if (!V.hasOneUse())
22906 // We fell out of the loop without finding a viable combining instruction.
22909 // Combine away the bottom node as its shuffle will be accumulated into
22910 // a preceding shuffle.
22911 DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22913 // Record the old value.
22916 // Merge this node's mask and our incoming mask (adjusted to account for all
22917 // the pshufd instructions encountered).
22918 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22919 for (int &M : Mask)
22921 V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
22922 getV4X86ShuffleImm8ForMask(Mask, DAG));
22924 // Check that the shuffles didn't cancel each other out. If not, we need to
22925 // combine to the new one.
22927 // Replace the combinable shuffle with the combined one, updating all users
22928 // so that we re-evaluate the chain here.
22929 DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
22934 /// \brief Try to combine x86 target specific shuffles.
22935 static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
22936 TargetLowering::DAGCombinerInfo &DCI,
22937 const X86Subtarget *Subtarget) {
22939 MVT VT = N.getSimpleValueType();
22940 SmallVector<int, 4> Mask;
22942 switch (N.getOpcode()) {
22943 case X86ISD::PSHUFD:
22944 case X86ISD::PSHUFLW:
22945 case X86ISD::PSHUFHW:
22946 Mask = getPSHUFShuffleMask(N);
22947 assert(Mask.size() == 4);
22953 // Nuke no-op shuffles that show up after combining.
22954 if (isNoopShuffleMask(Mask))
22955 return DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22957 // Look for simplifications involving one or two shuffle instructions.
22958 SDValue V = N.getOperand(0);
22959 switch (N.getOpcode()) {
22962 case X86ISD::PSHUFLW:
22963 case X86ISD::PSHUFHW:
22964 assert(VT == MVT::v8i16);
22967 if (combineRedundantHalfShuffle(N, Mask, DAG, DCI))
22968 return SDValue(); // We combined away this shuffle, so we're done.
22970 // See if this reduces to a PSHUFD which is no more expensive and can
22971 // combine with more operations. Note that it has to at least flip the
22972 // dwords as otherwise it would have been removed as a no-op.
22973 if (Mask[0] == 2 && Mask[1] == 3 && Mask[2] == 0 && Mask[3] == 1) {
22974 int DMask[] = {0, 1, 2, 3};
22975 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
22976 DMask[DOffset + 0] = DOffset + 1;
22977 DMask[DOffset + 1] = DOffset + 0;
22978 V = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V);
22979 DCI.AddToWorklist(V.getNode());
22980 V = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V,
22981 getV4X86ShuffleImm8ForMask(DMask, DAG));
22982 DCI.AddToWorklist(V.getNode());
22983 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
22986 // Look for shuffle patterns which can be implemented as a single unpack.
22987 // FIXME: This doesn't handle the location of the PSHUFD generically, and
22988 // only works when we have a PSHUFD followed by two half-shuffles.
22989 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
22990 (V.getOpcode() == X86ISD::PSHUFLW ||
22991 V.getOpcode() == X86ISD::PSHUFHW) &&
22992 V.getOpcode() != N.getOpcode() &&
22994 SDValue D = V.getOperand(0);
22995 while (D.getOpcode() == ISD::BITCAST && D.hasOneUse())
22996 D = D.getOperand(0);
22997 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
22998 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22999 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
23000 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
23001 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
23003 for (int i = 0; i < 4; ++i) {
23004 WordMask[i + NOffset] = Mask[i] + NOffset;
23005 WordMask[i + VOffset] = VMask[i] + VOffset;
23007 // Map the word mask through the DWord mask.
23009 for (int i = 0; i < 8; ++i)
23010 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
23011 const int UnpackLoMask[] = {0, 0, 1, 1, 2, 2, 3, 3};
23012 const int UnpackHiMask[] = {4, 4, 5, 5, 6, 6, 7, 7};
23013 if (std::equal(std::begin(MappedMask), std::end(MappedMask),
23014 std::begin(UnpackLoMask)) ||
23015 std::equal(std::begin(MappedMask), std::end(MappedMask),
23016 std::begin(UnpackHiMask))) {
23017 // We can replace all three shuffles with an unpack.
23018 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, D.getOperand(0));
23019 DCI.AddToWorklist(V.getNode());
23020 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
23022 DL, MVT::v8i16, V, V);
23029 case X86ISD::PSHUFD:
23030 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG, DCI))
23039 /// \brief Try to combine a shuffle into a target-specific add-sub node.
23041 /// We combine this directly on the abstract vector shuffle nodes so it is
23042 /// easier to generically match. We also insert dummy vector shuffle nodes for
23043 /// the operands which explicitly discard the lanes which are unused by this
23044 /// operation to try to flow through the rest of the combiner the fact that
23045 /// they're unused.
23046 static SDValue combineShuffleToAddSub(SDNode *N, SelectionDAG &DAG) {
23048 EVT VT = N->getValueType(0);
23050 // We only handle target-independent shuffles.
23051 // FIXME: It would be easy and harmless to use the target shuffle mask
23052 // extraction tool to support more.
23053 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
23056 auto *SVN = cast<ShuffleVectorSDNode>(N);
23057 ArrayRef<int> Mask = SVN->getMask();
23058 SDValue V1 = N->getOperand(0);
23059 SDValue V2 = N->getOperand(1);
23061 // We require the first shuffle operand to be the SUB node, and the second to
23062 // be the ADD node.
23063 // FIXME: We should support the commuted patterns.
23064 if (V1->getOpcode() != ISD::FSUB || V2->getOpcode() != ISD::FADD)
23067 // If there are other uses of these operations we can't fold them.
23068 if (!V1->hasOneUse() || !V2->hasOneUse())
23071 // Ensure that both operations have the same operands. Note that we can
23072 // commute the FADD operands.
23073 SDValue LHS = V1->getOperand(0), RHS = V1->getOperand(1);
23074 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
23075 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
23078 // We're looking for blends between FADD and FSUB nodes. We insist on these
23079 // nodes being lined up in a specific expected pattern.
23080 if (!(isShuffleEquivalent(V1, V2, Mask, 0, 3) ||
23081 isShuffleEquivalent(V1, V2, Mask, 0, 5, 2, 7) ||
23082 isShuffleEquivalent(V1, V2, Mask, 0, 9, 2, 11, 4, 13, 6, 15)))
23085 // Only specific types are legal at this point, assert so we notice if and
23086 // when these change.
23087 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v8f32 ||
23088 VT == MVT::v4f64) &&
23089 "Unknown vector type encountered!");
23091 return DAG.getNode(X86ISD::ADDSUB, DL, VT, LHS, RHS);
23094 /// PerformShuffleCombine - Performs several different shuffle combines.
23095 static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
23096 TargetLowering::DAGCombinerInfo &DCI,
23097 const X86Subtarget *Subtarget) {
23099 SDValue N0 = N->getOperand(0);
23100 SDValue N1 = N->getOperand(1);
23101 EVT VT = N->getValueType(0);
23103 // Don't create instructions with illegal types after legalize types has run.
23104 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23105 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType()))
23108 // If we have legalized the vector types, look for blends of FADD and FSUB
23109 // nodes that we can fuse into an ADDSUB node.
23110 if (TLI.isTypeLegal(VT) && Subtarget->hasSSE3())
23111 if (SDValue AddSub = combineShuffleToAddSub(N, DAG))
23114 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode
23115 if (Subtarget->hasFp256() && VT.is256BitVector() &&
23116 N->getOpcode() == ISD::VECTOR_SHUFFLE)
23117 return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
23119 // During Type Legalization, when promoting illegal vector types,
23120 // the backend might introduce new shuffle dag nodes and bitcasts.
23122 // This code performs the following transformation:
23123 // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
23124 // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
23126 // We do this only if both the bitcast and the BINOP dag nodes have
23127 // one use. Also, perform this transformation only if the new binary
23128 // operation is legal. This is to avoid introducing dag nodes that
23129 // potentially need to be further expanded (or custom lowered) into a
23130 // less optimal sequence of dag nodes.
23131 if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
23132 N1.getOpcode() == ISD::UNDEF && N0.hasOneUse() &&
23133 N0.getOpcode() == ISD::BITCAST) {
23134 SDValue BC0 = N0.getOperand(0);
23135 EVT SVT = BC0.getValueType();
23136 unsigned Opcode = BC0.getOpcode();
23137 unsigned NumElts = VT.getVectorNumElements();
23139 if (BC0.hasOneUse() && SVT.isVector() &&
23140 SVT.getVectorNumElements() * 2 == NumElts &&
23141 TLI.isOperationLegal(Opcode, VT)) {
23142 bool CanFold = false;
23154 unsigned SVTNumElts = SVT.getVectorNumElements();
23155 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
23156 for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
23157 CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
23158 for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
23159 CanFold = SVOp->getMaskElt(i) < 0;
23162 SDValue BC00 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(0));
23163 SDValue BC01 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(1));
23164 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
23165 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, &SVOp->getMask()[0]);
23170 // Only handle 128 wide vector from here on.
23171 if (!VT.is128BitVector())
23174 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
23175 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
23176 // consecutive, non-overlapping, and in the right order.
23177 SmallVector<SDValue, 16> Elts;
23178 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
23179 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
23181 SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true);
23185 if (isTargetShuffle(N->getOpcode())) {
23187 PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget);
23188 if (Shuffle.getNode())
23191 // Try recursively combining arbitrary sequences of x86 shuffle
23192 // instructions into higher-order shuffles. We do this after combining
23193 // specific PSHUF instruction sequences into their minimal form so that we
23194 // can evaluate how many specialized shuffle instructions are involved in
23195 // a particular chain.
23196 SmallVector<int, 1> NonceMask; // Just a placeholder.
23197 NonceMask.push_back(0);
23198 if (combineX86ShufflesRecursively(SDValue(N, 0), SDValue(N, 0), NonceMask,
23199 /*Depth*/ 1, /*HasPSHUFB*/ false, DAG,
23201 return SDValue(); // This routine will use CombineTo to replace N.
23207 /// PerformTruncateCombine - Converts truncate operation to
23208 /// a sequence of vector shuffle operations.
23209 /// It is possible when we truncate 256-bit vector to 128-bit vector
23210 static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
23211 TargetLowering::DAGCombinerInfo &DCI,
23212 const X86Subtarget *Subtarget) {
23216 /// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target
23217 /// specific shuffle of a load can be folded into a single element load.
23218 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
23219 /// shuffles have been custom lowered so we need to handle those here.
23220 static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
23221 TargetLowering::DAGCombinerInfo &DCI) {
23222 if (DCI.isBeforeLegalizeOps())
23225 SDValue InVec = N->getOperand(0);
23226 SDValue EltNo = N->getOperand(1);
23228 if (!isa<ConstantSDNode>(EltNo))
23231 EVT OriginalVT = InVec.getValueType();
23233 if (InVec.getOpcode() == ISD::BITCAST) {
23234 // Don't duplicate a load with other uses.
23235 if (!InVec.hasOneUse())
23237 EVT BCVT = InVec.getOperand(0).getValueType();
23238 if (BCVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
23240 InVec = InVec.getOperand(0);
23243 EVT CurrentVT = InVec.getValueType();
23245 if (!isTargetShuffle(InVec.getOpcode()))
23248 // Don't duplicate a load with other uses.
23249 if (!InVec.hasOneUse())
23252 SmallVector<int, 16> ShuffleMask;
23254 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(),
23255 ShuffleMask, UnaryShuffle))
23258 // Select the input vector, guarding against out of range extract vector.
23259 unsigned NumElems = CurrentVT.getVectorNumElements();
23260 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
23261 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt];
23262 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0)
23263 : InVec.getOperand(1);
23265 // If inputs to shuffle are the same for both ops, then allow 2 uses
23266 unsigned AllowedUses = InVec.getNumOperands() > 1 &&
23267 InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
23269 if (LdNode.getOpcode() == ISD::BITCAST) {
23270 // Don't duplicate a load with other uses.
23271 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
23274 AllowedUses = 1; // only allow 1 load use if we have a bitcast
23275 LdNode = LdNode.getOperand(0);
23278 if (!ISD::isNormalLoad(LdNode.getNode()))
23281 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
23283 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
23286 EVT EltVT = N->getValueType(0);
23287 // If there's a bitcast before the shuffle, check if the load type and
23288 // alignment is valid.
23289 unsigned Align = LN0->getAlignment();
23290 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23291 unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
23292 EltVT.getTypeForEVT(*DAG.getContext()));
23294 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
23297 // All checks match so transform back to vector_shuffle so that DAG combiner
23298 // can finish the job
23301 // Create shuffle node taking into account the case that its a unary shuffle
23302 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT)
23303 : InVec.getOperand(1);
23304 Shuffle = DAG.getVectorShuffle(CurrentVT, dl,
23305 InVec.getOperand(0), Shuffle,
23307 Shuffle = DAG.getNode(ISD::BITCAST, dl, OriginalVT, Shuffle);
23308 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
23312 /// \brief Detect bitcasts between i32 to x86mmx low word. Since MMX types are
23313 /// special and don't usually play with other vector types, it's better to
23314 /// handle them early to be sure we emit efficient code by avoiding
23315 /// store-load conversions.
23316 static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG) {
23317 if (N->getValueType(0) != MVT::x86mmx ||
23318 N->getOperand(0)->getOpcode() != ISD::BUILD_VECTOR ||
23319 N->getOperand(0)->getValueType(0) != MVT::v2i32)
23322 SDValue V = N->getOperand(0);
23323 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V.getOperand(1));
23324 if (C && C->getZExtValue() == 0 && V.getOperand(0).getValueType() == MVT::i32)
23325 return DAG.getNode(X86ISD::MMX_MOVW2D, SDLoc(V.getOperand(0)),
23326 N->getValueType(0), V.getOperand(0));
23331 /// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
23332 /// generation and convert it from being a bunch of shuffles and extracts
23333 /// into a somewhat faster sequence. For i686, the best sequence is apparently
23334 /// storing the value and loading scalars back, while for x64 we should
23335 /// use 64-bit extracts and shifts.
23336 static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
23337 TargetLowering::DAGCombinerInfo &DCI) {
23338 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI);
23339 if (NewOp.getNode())
23342 SDValue InputVector = N->getOperand(0);
23344 // Detect mmx to i32 conversion through a v2i32 elt extract.
23345 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
23346 N->getValueType(0) == MVT::i32 &&
23347 InputVector.getValueType() == MVT::v2i32) {
23349 // The bitcast source is a direct mmx result.
23350 SDValue MMXSrc = InputVector.getNode()->getOperand(0);
23351 if (MMXSrc.getValueType() == MVT::x86mmx)
23352 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23353 N->getValueType(0),
23354 InputVector.getNode()->getOperand(0));
23356 // The mmx is indirect: (i64 extract_elt (v1i64 bitcast (x86mmx ...))).
23357 SDValue MMXSrcOp = MMXSrc.getOperand(0);
23358 if (MMXSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT && MMXSrc.hasOneUse() &&
23359 MMXSrc.getValueType() == MVT::i64 && MMXSrcOp.hasOneUse() &&
23360 MMXSrcOp.getOpcode() == ISD::BITCAST &&
23361 MMXSrcOp.getValueType() == MVT::v1i64 &&
23362 MMXSrcOp.getOperand(0).getValueType() == MVT::x86mmx)
23363 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23364 N->getValueType(0),
23365 MMXSrcOp.getOperand(0));
23368 // Only operate on vectors of 4 elements, where the alternative shuffling
23369 // gets to be more expensive.
23370 if (InputVector.getValueType() != MVT::v4i32)
23373 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
23374 // single use which is a sign-extend or zero-extend, and all elements are
23376 SmallVector<SDNode *, 4> Uses;
23377 unsigned ExtractedElements = 0;
23378 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
23379 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
23380 if (UI.getUse().getResNo() != InputVector.getResNo())
23383 SDNode *Extract = *UI;
23384 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
23387 if (Extract->getValueType(0) != MVT::i32)
23389 if (!Extract->hasOneUse())
23391 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
23392 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
23394 if (!isa<ConstantSDNode>(Extract->getOperand(1)))
23397 // Record which element was extracted.
23398 ExtractedElements |=
23399 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue();
23401 Uses.push_back(Extract);
23404 // If not all the elements were used, this may not be worthwhile.
23405 if (ExtractedElements != 15)
23408 // Ok, we've now decided to do the transformation.
23409 // If 64-bit shifts are legal, use the extract-shift sequence,
23410 // otherwise bounce the vector off the cache.
23411 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23413 SDLoc dl(InputVector);
23415 if (TLI.isOperationLegal(ISD::SRA, MVT::i64)) {
23416 SDValue Cst = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, InputVector);
23417 EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy();
23418 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23419 DAG.getConstant(0, VecIdxTy));
23420 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23421 DAG.getConstant(1, VecIdxTy));
23423 SDValue ShAmt = DAG.getConstant(32,
23424 DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64));
23425 Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
23426 Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23427 DAG.getNode(ISD::SRA, dl, MVT::i64, BottomHalf, ShAmt));
23428 Vals[2] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, TopHalf);
23429 Vals[3] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23430 DAG.getNode(ISD::SRA, dl, MVT::i64, TopHalf, ShAmt));
23432 // Store the value to a temporary stack slot.
23433 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
23434 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
23435 MachinePointerInfo(), false, false, 0);
23437 EVT ElementType = InputVector.getValueType().getVectorElementType();
23438 unsigned EltSize = ElementType.getSizeInBits() / 8;
23440 // Replace each use (extract) with a load of the appropriate element.
23441 for (unsigned i = 0; i < 4; ++i) {
23442 uint64_t Offset = EltSize * i;
23443 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
23445 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
23446 StackPtr, OffsetVal);
23448 // Load the scalar.
23449 Vals[i] = DAG.getLoad(ElementType, dl, Ch,
23450 ScalarAddr, MachinePointerInfo(),
23451 false, false, false, 0);
23456 // Replace the extracts
23457 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
23458 UE = Uses.end(); UI != UE; ++UI) {
23459 SDNode *Extract = *UI;
23461 SDValue Idx = Extract->getOperand(1);
23462 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
23463 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), Vals[IdxVal]);
23466 // The replacement was made in place; don't return anything.
23470 /// \brief Matches a VSELECT onto min/max or return 0 if the node doesn't match.
23471 static std::pair<unsigned, bool>
23472 matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, SDValue RHS,
23473 SelectionDAG &DAG, const X86Subtarget *Subtarget) {
23474 if (!VT.isVector())
23475 return std::make_pair(0, false);
23477 bool NeedSplit = false;
23478 switch (VT.getSimpleVT().SimpleTy) {
23479 default: return std::make_pair(0, false);
23482 if (!Subtarget->hasVLX())
23483 return std::make_pair(0, false);
23487 if (!Subtarget->hasBWI())
23488 return std::make_pair(0, false);
23492 if (!Subtarget->hasAVX512())
23493 return std::make_pair(0, false);
23498 if (!Subtarget->hasAVX2())
23500 if (!Subtarget->hasAVX())
23501 return std::make_pair(0, false);
23506 if (!Subtarget->hasSSE2())
23507 return std::make_pair(0, false);
23510 // SSE2 has only a small subset of the operations.
23511 bool hasUnsigned = Subtarget->hasSSE41() ||
23512 (Subtarget->hasSSE2() && VT == MVT::v16i8);
23513 bool hasSigned = Subtarget->hasSSE41() ||
23514 (Subtarget->hasSSE2() && VT == MVT::v8i16);
23516 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23519 // Check for x CC y ? x : y.
23520 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23521 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23526 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23529 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23532 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23535 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23537 // Check for x CC y ? y : x -- a min/max with reversed arms.
23538 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23539 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23544 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23547 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23550 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23553 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23557 return std::make_pair(Opc, NeedSplit);
23561 transformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
23562 const X86Subtarget *Subtarget) {
23564 SDValue Cond = N->getOperand(0);
23565 SDValue LHS = N->getOperand(1);
23566 SDValue RHS = N->getOperand(2);
23568 if (Cond.getOpcode() == ISD::SIGN_EXTEND) {
23569 SDValue CondSrc = Cond->getOperand(0);
23570 if (CondSrc->getOpcode() == ISD::SIGN_EXTEND_INREG)
23571 Cond = CondSrc->getOperand(0);
23574 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
23577 // A vselect where all conditions and data are constants can be optimized into
23578 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
23579 if (ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
23580 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
23583 unsigned MaskValue = 0;
23584 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
23587 MVT VT = N->getSimpleValueType(0);
23588 unsigned NumElems = VT.getVectorNumElements();
23589 SmallVector<int, 8> ShuffleMask(NumElems, -1);
23590 for (unsigned i = 0; i < NumElems; ++i) {
23591 // Be sure we emit undef where we can.
23592 if (Cond.getOperand(i)->getOpcode() == ISD::UNDEF)
23593 ShuffleMask[i] = -1;
23595 ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1);
23598 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23599 if (!TLI.isShuffleMaskLegal(ShuffleMask, VT))
23601 return DAG.getVectorShuffle(VT, dl, LHS, RHS, &ShuffleMask[0]);
23604 /// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
23606 static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
23607 TargetLowering::DAGCombinerInfo &DCI,
23608 const X86Subtarget *Subtarget) {
23610 SDValue Cond = N->getOperand(0);
23611 // Get the LHS/RHS of the select.
23612 SDValue LHS = N->getOperand(1);
23613 SDValue RHS = N->getOperand(2);
23614 EVT VT = LHS.getValueType();
23615 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23617 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
23618 // instructions match the semantics of the common C idiom x<y?x:y but not
23619 // x<=y?x:y, because of how they handle negative zero (which can be
23620 // ignored in unsafe-math mode).
23621 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
23622 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
23623 VT != MVT::f80 && (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
23624 (Subtarget->hasSSE2() ||
23625 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
23626 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23628 unsigned Opcode = 0;
23629 // Check for x CC y ? x : y.
23630 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23631 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23635 // Converting this to a min would handle NaNs incorrectly, and swapping
23636 // the operands would cause it to handle comparisons between positive
23637 // and negative zero incorrectly.
23638 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23639 if (!DAG.getTarget().Options.UnsafeFPMath &&
23640 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23642 std::swap(LHS, RHS);
23644 Opcode = X86ISD::FMIN;
23647 // Converting this to a min would handle comparisons between positive
23648 // and negative zero incorrectly.
23649 if (!DAG.getTarget().Options.UnsafeFPMath &&
23650 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23652 Opcode = X86ISD::FMIN;
23655 // Converting this to a min would handle both negative zeros and NaNs
23656 // incorrectly, but we can swap the operands to fix both.
23657 std::swap(LHS, RHS);
23661 Opcode = X86ISD::FMIN;
23665 // Converting this to a max would handle comparisons between positive
23666 // and negative zero incorrectly.
23667 if (!DAG.getTarget().Options.UnsafeFPMath &&
23668 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23670 Opcode = X86ISD::FMAX;
23673 // Converting this to a max would handle NaNs incorrectly, and swapping
23674 // the operands would cause it to handle comparisons between positive
23675 // and negative zero incorrectly.
23676 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23677 if (!DAG.getTarget().Options.UnsafeFPMath &&
23678 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23680 std::swap(LHS, RHS);
23682 Opcode = X86ISD::FMAX;
23685 // Converting this to a max would handle both negative zeros and NaNs
23686 // incorrectly, but we can swap the operands to fix both.
23687 std::swap(LHS, RHS);
23691 Opcode = X86ISD::FMAX;
23694 // Check for x CC y ? y : x -- a min/max with reversed arms.
23695 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23696 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23700 // Converting this to a min would handle comparisons between positive
23701 // and negative zero incorrectly, and swapping the operands would
23702 // cause it to handle NaNs incorrectly.
23703 if (!DAG.getTarget().Options.UnsafeFPMath &&
23704 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
23705 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23707 std::swap(LHS, RHS);
23709 Opcode = X86ISD::FMIN;
23712 // Converting this to a min would handle NaNs incorrectly.
23713 if (!DAG.getTarget().Options.UnsafeFPMath &&
23714 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
23716 Opcode = X86ISD::FMIN;
23719 // Converting this to a min would handle both negative zeros and NaNs
23720 // incorrectly, but we can swap the operands to fix both.
23721 std::swap(LHS, RHS);
23725 Opcode = X86ISD::FMIN;
23729 // Converting this to a max would handle NaNs incorrectly.
23730 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23732 Opcode = X86ISD::FMAX;
23735 // Converting this to a max would handle comparisons between positive
23736 // and negative zero incorrectly, and swapping the operands would
23737 // cause it to handle NaNs incorrectly.
23738 if (!DAG.getTarget().Options.UnsafeFPMath &&
23739 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
23740 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23742 std::swap(LHS, RHS);
23744 Opcode = X86ISD::FMAX;
23747 // Converting this to a max would handle both negative zeros and NaNs
23748 // incorrectly, but we can swap the operands to fix both.
23749 std::swap(LHS, RHS);
23753 Opcode = X86ISD::FMAX;
23759 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
23762 EVT CondVT = Cond.getValueType();
23763 if (Subtarget->hasAVX512() && VT.isVector() && CondVT.isVector() &&
23764 CondVT.getVectorElementType() == MVT::i1) {
23765 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
23766 // lowering on KNL. In this case we convert it to
23767 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
23768 // The same situation for all 128 and 256-bit vectors of i8 and i16.
23769 // Since SKX these selects have a proper lowering.
23770 EVT OpVT = LHS.getValueType();
23771 if ((OpVT.is128BitVector() || OpVT.is256BitVector()) &&
23772 (OpVT.getVectorElementType() == MVT::i8 ||
23773 OpVT.getVectorElementType() == MVT::i16) &&
23774 !(Subtarget->hasBWI() && Subtarget->hasVLX())) {
23775 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond);
23776 DCI.AddToWorklist(Cond.getNode());
23777 return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS);
23780 // If this is a select between two integer constants, try to do some
23782 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
23783 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS))
23784 // Don't do this for crazy integer types.
23785 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) {
23786 // If this is efficiently invertible, canonicalize the LHSC/RHSC values
23787 // so that TrueC (the true value) is larger than FalseC.
23788 bool NeedsCondInvert = false;
23790 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
23791 // Efficiently invertible.
23792 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
23793 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
23794 isa<ConstantSDNode>(Cond.getOperand(1))))) {
23795 NeedsCondInvert = true;
23796 std::swap(TrueC, FalseC);
23799 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
23800 if (FalseC->getAPIntValue() == 0 &&
23801 TrueC->getAPIntValue().isPowerOf2()) {
23802 if (NeedsCondInvert) // Invert the condition if needed.
23803 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23804 DAG.getConstant(1, Cond.getValueType()));
23806 // Zero extend the condition if needed.
23807 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
23809 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
23810 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
23811 DAG.getConstant(ShAmt, MVT::i8));
23814 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
23815 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
23816 if (NeedsCondInvert) // Invert the condition if needed.
23817 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23818 DAG.getConstant(1, Cond.getValueType()));
23820 // Zero extend the condition if needed.
23821 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
23822 FalseC->getValueType(0), Cond);
23823 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23824 SDValue(FalseC, 0));
23827 // Optimize cases that will turn into an LEA instruction. This requires
23828 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
23829 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
23830 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
23831 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
23833 bool isFastMultiplier = false;
23835 switch ((unsigned char)Diff) {
23837 case 1: // result = add base, cond
23838 case 2: // result = lea base( , cond*2)
23839 case 3: // result = lea base(cond, cond*2)
23840 case 4: // result = lea base( , cond*4)
23841 case 5: // result = lea base(cond, cond*4)
23842 case 8: // result = lea base( , cond*8)
23843 case 9: // result = lea base(cond, cond*8)
23844 isFastMultiplier = true;
23849 if (isFastMultiplier) {
23850 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
23851 if (NeedsCondInvert) // Invert the condition if needed.
23852 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23853 DAG.getConstant(1, Cond.getValueType()));
23855 // Zero extend the condition if needed.
23856 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
23858 // Scale the condition by the difference.
23860 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
23861 DAG.getConstant(Diff, Cond.getValueType()));
23863 // Add the base if non-zero.
23864 if (FalseC->getAPIntValue() != 0)
23865 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23866 SDValue(FalseC, 0));
23873 // Canonicalize max and min:
23874 // (x > y) ? x : y -> (x >= y) ? x : y
23875 // (x < y) ? x : y -> (x <= y) ? x : y
23876 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
23877 // the need for an extra compare
23878 // against zero. e.g.
23879 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
23881 // testl %edi, %edi
23883 // cmovgl %edi, %eax
23887 // cmovsl %eax, %edi
23888 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
23889 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23890 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23891 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23896 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
23897 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
23898 Cond.getOperand(0), Cond.getOperand(1), NewCC);
23899 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS);
23904 // Early exit check
23905 if (!TLI.isTypeLegal(VT))
23908 // Match VSELECTs into subs with unsigned saturation.
23909 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
23910 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
23911 ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
23912 (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
23913 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23915 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
23916 // left side invert the predicate to simplify logic below.
23918 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
23920 CC = ISD::getSetCCInverse(CC, true);
23921 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
23925 if (Other.getNode() && Other->getNumOperands() == 2 &&
23926 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) {
23927 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
23928 SDValue CondRHS = Cond->getOperand(1);
23930 // Look for a general sub with unsigned saturation first.
23931 // x >= y ? x-y : 0 --> subus x, y
23932 // x > y ? x-y : 0 --> subus x, y
23933 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
23934 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS))
23935 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS);
23937 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS))
23938 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
23939 if (auto *CondRHSBV = dyn_cast<BuildVectorSDNode>(CondRHS))
23940 if (auto *CondRHSConst = CondRHSBV->getConstantSplatNode())
23941 // If the RHS is a constant we have to reverse the const
23942 // canonicalization.
23943 // x > C-1 ? x+-C : 0 --> subus x, C
23944 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
23945 CondRHSConst->getAPIntValue() ==
23946 (-OpRHSConst->getAPIntValue() - 1))
23947 return DAG.getNode(
23948 X86ISD::SUBUS, DL, VT, OpLHS,
23949 DAG.getConstant(-OpRHSConst->getAPIntValue(), VT));
23951 // Another special case: If C was a sign bit, the sub has been
23952 // canonicalized into a xor.
23953 // FIXME: Would it be better to use computeKnownBits to determine
23954 // whether it's safe to decanonicalize the xor?
23955 // x s< 0 ? x^C : 0 --> subus x, C
23956 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
23957 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
23958 OpRHSConst->getAPIntValue().isSignBit())
23959 // Note that we have to rebuild the RHS constant here to ensure we
23960 // don't rely on particular values of undef lanes.
23961 return DAG.getNode(
23962 X86ISD::SUBUS, DL, VT, OpLHS,
23963 DAG.getConstant(OpRHSConst->getAPIntValue(), VT));
23968 // Try to match a min/max vector operation.
23969 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC) {
23970 std::pair<unsigned, bool> ret = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget);
23971 unsigned Opc = ret.first;
23972 bool NeedSplit = ret.second;
23974 if (Opc && NeedSplit) {
23975 unsigned NumElems = VT.getVectorNumElements();
23976 // Extract the LHS vectors
23977 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, DL);
23978 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, DL);
23980 // Extract the RHS vectors
23981 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, DL);
23982 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, DL);
23984 // Create min/max for each subvector
23985 LHS = DAG.getNode(Opc, DL, LHS1.getValueType(), LHS1, RHS1);
23986 RHS = DAG.getNode(Opc, DL, LHS2.getValueType(), LHS2, RHS2);
23988 // Merge the result
23989 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS, RHS);
23991 return DAG.getNode(Opc, DL, VT, LHS, RHS);
23994 // Simplify vector selection if condition value type matches vselect
23996 if (N->getOpcode() == ISD::VSELECT && CondVT == VT) {
23997 assert(Cond.getValueType().isVector() &&
23998 "vector select expects a vector selector!");
24000 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
24001 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
24003 // Try invert the condition if true value is not all 1s and false value
24005 if (!TValIsAllOnes && !FValIsAllZeros &&
24006 // Check if the selector will be produced by CMPP*/PCMP*
24007 Cond.getOpcode() == ISD::SETCC &&
24008 // Check if SETCC has already been promoted
24009 TLI.getSetCCResultType(*DAG.getContext(), VT) == CondVT) {
24010 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
24011 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
24013 if (TValIsAllZeros || FValIsAllOnes) {
24014 SDValue CC = Cond.getOperand(2);
24015 ISD::CondCode NewCC =
24016 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
24017 Cond.getOperand(0).getValueType().isInteger());
24018 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), NewCC);
24019 std::swap(LHS, RHS);
24020 TValIsAllOnes = FValIsAllOnes;
24021 FValIsAllZeros = TValIsAllZeros;
24025 if (TValIsAllOnes || FValIsAllZeros) {
24028 if (TValIsAllOnes && FValIsAllZeros)
24030 else if (TValIsAllOnes)
24031 Ret = DAG.getNode(ISD::OR, DL, CondVT, Cond,
24032 DAG.getNode(ISD::BITCAST, DL, CondVT, RHS));
24033 else if (FValIsAllZeros)
24034 Ret = DAG.getNode(ISD::AND, DL, CondVT, Cond,
24035 DAG.getNode(ISD::BITCAST, DL, CondVT, LHS));
24037 return DAG.getNode(ISD::BITCAST, DL, VT, Ret);
24041 // If we know that this node is legal then we know that it is going to be
24042 // matched by one of the SSE/AVX BLEND instructions. These instructions only
24043 // depend on the highest bit in each word. Try to use SimplifyDemandedBits
24044 // to simplify previous instructions.
24045 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
24046 !DCI.isBeforeLegalize() &&
24047 // We explicitly check against v8i16 and v16i16 because, although
24048 // they're marked as Custom, they might only be legal when Cond is a
24049 // build_vector of constants. This will be taken care in a later
24051 (TLI.isOperationLegalOrCustom(ISD::VSELECT, VT) && VT != MVT::v16i16 &&
24052 VT != MVT::v8i16) &&
24053 // Don't optimize vector of constants. Those are handled by
24054 // the generic code and all the bits must be properly set for
24055 // the generic optimizer.
24056 !ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
24057 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits();
24059 // Don't optimize vector selects that map to mask-registers.
24063 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
24064 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
24066 APInt KnownZero, KnownOne;
24067 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
24068 DCI.isBeforeLegalizeOps());
24069 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) ||
24070 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne,
24072 // If we changed the computation somewhere in the DAG, this change
24073 // will affect all users of Cond.
24074 // Make sure it is fine and update all the nodes so that we do not
24075 // use the generic VSELECT anymore. Otherwise, we may perform
24076 // wrong optimizations as we messed up with the actual expectation
24077 // for the vector boolean values.
24078 if (Cond != TLO.Old) {
24079 // Check all uses of that condition operand to check whether it will be
24080 // consumed by non-BLEND instructions, which may depend on all bits are
24082 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
24084 if (I->getOpcode() != ISD::VSELECT)
24085 // TODO: Add other opcodes eventually lowered into BLEND.
24088 // Update all the users of the condition, before committing the change,
24089 // so that the VSELECT optimizations that expect the correct vector
24090 // boolean value will not be triggered.
24091 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
24093 DAG.ReplaceAllUsesOfValueWith(
24095 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(*I), I->getValueType(0),
24096 Cond, I->getOperand(1), I->getOperand(2)));
24097 DCI.CommitTargetLoweringOpt(TLO);
24100 // At this point, only Cond is changed. Change the condition
24101 // just for N to keep the opportunity to optimize all other
24102 // users their own way.
24103 DAG.ReplaceAllUsesOfValueWith(
24105 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(N), N->getValueType(0),
24106 TLO.New, N->getOperand(1), N->getOperand(2)));
24111 // We should generate an X86ISD::BLENDI from a vselect if its argument
24112 // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
24113 // constants. This specific pattern gets generated when we split a
24114 // selector for a 512 bit vector in a machine without AVX512 (but with
24115 // 256-bit vectors), during legalization:
24117 // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
24119 // Iff we find this pattern and the build_vectors are built from
24120 // constants, we translate the vselect into a shuffle_vector that we
24121 // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
24122 if ((N->getOpcode() == ISD::VSELECT ||
24123 N->getOpcode() == X86ISD::SHRUNKBLEND) &&
24124 !DCI.isBeforeLegalize()) {
24125 SDValue Shuffle = transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
24126 if (Shuffle.getNode())
24133 // Check whether a boolean test is testing a boolean value generated by
24134 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
24137 // Simplify the following patterns:
24138 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
24139 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
24140 // to (Op EFLAGS Cond)
24142 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
24143 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
24144 // to (Op EFLAGS !Cond)
24146 // where Op could be BRCOND or CMOV.
24148 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
24149 // Quit if not CMP and SUB with its value result used.
24150 if (Cmp.getOpcode() != X86ISD::CMP &&
24151 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0)))
24154 // Quit if not used as a boolean value.
24155 if (CC != X86::COND_E && CC != X86::COND_NE)
24158 // Check CMP operands. One of them should be 0 or 1 and the other should be
24159 // an SetCC or extended from it.
24160 SDValue Op1 = Cmp.getOperand(0);
24161 SDValue Op2 = Cmp.getOperand(1);
24164 const ConstantSDNode* C = nullptr;
24165 bool needOppositeCond = (CC == X86::COND_E);
24166 bool checkAgainstTrue = false; // Is it a comparison against 1?
24168 if ((C = dyn_cast<ConstantSDNode>(Op1)))
24170 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
24172 else // Quit if all operands are not constants.
24175 if (C->getZExtValue() == 1) {
24176 needOppositeCond = !needOppositeCond;
24177 checkAgainstTrue = true;
24178 } else if (C->getZExtValue() != 0)
24179 // Quit if the constant is neither 0 or 1.
24182 bool truncatedToBoolWithAnd = false;
24183 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
24184 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
24185 SetCC.getOpcode() == ISD::TRUNCATE ||
24186 SetCC.getOpcode() == ISD::AND) {
24187 if (SetCC.getOpcode() == ISD::AND) {
24189 ConstantSDNode *CS;
24190 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(0))) &&
24191 CS->getZExtValue() == 1)
24193 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(1))) &&
24194 CS->getZExtValue() == 1)
24198 SetCC = SetCC.getOperand(OpIdx);
24199 truncatedToBoolWithAnd = true;
24201 SetCC = SetCC.getOperand(0);
24204 switch (SetCC.getOpcode()) {
24205 case X86ISD::SETCC_CARRY:
24206 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
24207 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
24208 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
24209 // truncated to i1 using 'and'.
24210 if (checkAgainstTrue && !truncatedToBoolWithAnd)
24212 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
24213 "Invalid use of SETCC_CARRY!");
24215 case X86ISD::SETCC:
24216 // Set the condition code or opposite one if necessary.
24217 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
24218 if (needOppositeCond)
24219 CC = X86::GetOppositeBranchCondition(CC);
24220 return SetCC.getOperand(1);
24221 case X86ISD::CMOV: {
24222 // Check whether false/true value has canonical one, i.e. 0 or 1.
24223 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
24224 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
24225 // Quit if true value is not a constant.
24228 // Quit if false value is not a constant.
24230 SDValue Op = SetCC.getOperand(0);
24231 // Skip 'zext' or 'trunc' node.
24232 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
24233 Op.getOpcode() == ISD::TRUNCATE)
24234 Op = Op.getOperand(0);
24235 // A special case for rdrand/rdseed, where 0 is set if false cond is
24237 if ((Op.getOpcode() != X86ISD::RDRAND &&
24238 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
24241 // Quit if false value is not the constant 0 or 1.
24242 bool FValIsFalse = true;
24243 if (FVal && FVal->getZExtValue() != 0) {
24244 if (FVal->getZExtValue() != 1)
24246 // If FVal is 1, opposite cond is needed.
24247 needOppositeCond = !needOppositeCond;
24248 FValIsFalse = false;
24250 // Quit if TVal is not the constant opposite of FVal.
24251 if (FValIsFalse && TVal->getZExtValue() != 1)
24253 if (!FValIsFalse && TVal->getZExtValue() != 0)
24255 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
24256 if (needOppositeCond)
24257 CC = X86::GetOppositeBranchCondition(CC);
24258 return SetCC.getOperand(3);
24265 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
24266 static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
24267 TargetLowering::DAGCombinerInfo &DCI,
24268 const X86Subtarget *Subtarget) {
24271 // If the flag operand isn't dead, don't touch this CMOV.
24272 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
24275 SDValue FalseOp = N->getOperand(0);
24276 SDValue TrueOp = N->getOperand(1);
24277 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
24278 SDValue Cond = N->getOperand(3);
24280 if (CC == X86::COND_E || CC == X86::COND_NE) {
24281 switch (Cond.getOpcode()) {
24285 // If operand of BSR / BSF are proven never zero, then ZF cannot be set.
24286 if (DAG.isKnownNeverZero(Cond.getOperand(0)))
24287 return (CC == X86::COND_E) ? FalseOp : TrueOp;
24293 Flags = checkBoolTestSetCCCombine(Cond, CC);
24294 if (Flags.getNode() &&
24295 // Extra check as FCMOV only supports a subset of X86 cond.
24296 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
24297 SDValue Ops[] = { FalseOp, TrueOp,
24298 DAG.getConstant(CC, MVT::i8), Flags };
24299 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
24302 // If this is a select between two integer constants, try to do some
24303 // optimizations. Note that the operands are ordered the opposite of SELECT
24305 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
24306 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
24307 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
24308 // larger than FalseC (the false value).
24309 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
24310 CC = X86::GetOppositeBranchCondition(CC);
24311 std::swap(TrueC, FalseC);
24312 std::swap(TrueOp, FalseOp);
24315 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
24316 // This is efficient for any integer data type (including i8/i16) and
24318 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
24319 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24320 DAG.getConstant(CC, MVT::i8), Cond);
24322 // Zero extend the condition if needed.
24323 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
24325 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
24326 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
24327 DAG.getConstant(ShAmt, MVT::i8));
24328 if (N->getNumValues() == 2) // Dead flag value?
24329 return DCI.CombineTo(N, Cond, SDValue());
24333 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
24334 // for any integer data type, including i8/i16.
24335 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
24336 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24337 DAG.getConstant(CC, MVT::i8), Cond);
24339 // Zero extend the condition if needed.
24340 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
24341 FalseC->getValueType(0), Cond);
24342 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24343 SDValue(FalseC, 0));
24345 if (N->getNumValues() == 2) // Dead flag value?
24346 return DCI.CombineTo(N, Cond, SDValue());
24350 // Optimize cases that will turn into an LEA instruction. This requires
24351 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
24352 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
24353 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
24354 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
24356 bool isFastMultiplier = false;
24358 switch ((unsigned char)Diff) {
24360 case 1: // result = add base, cond
24361 case 2: // result = lea base( , cond*2)
24362 case 3: // result = lea base(cond, cond*2)
24363 case 4: // result = lea base( , cond*4)
24364 case 5: // result = lea base(cond, cond*4)
24365 case 8: // result = lea base( , cond*8)
24366 case 9: // result = lea base(cond, cond*8)
24367 isFastMultiplier = true;
24372 if (isFastMultiplier) {
24373 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
24374 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24375 DAG.getConstant(CC, MVT::i8), Cond);
24376 // Zero extend the condition if needed.
24377 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
24379 // Scale the condition by the difference.
24381 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
24382 DAG.getConstant(Diff, Cond.getValueType()));
24384 // Add the base if non-zero.
24385 if (FalseC->getAPIntValue() != 0)
24386 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24387 SDValue(FalseC, 0));
24388 if (N->getNumValues() == 2) // Dead flag value?
24389 return DCI.CombineTo(N, Cond, SDValue());
24396 // Handle these cases:
24397 // (select (x != c), e, c) -> select (x != c), e, x),
24398 // (select (x == c), c, e) -> select (x == c), x, e)
24399 // where the c is an integer constant, and the "select" is the combination
24400 // of CMOV and CMP.
24402 // The rationale for this change is that the conditional-move from a constant
24403 // needs two instructions, however, conditional-move from a register needs
24404 // only one instruction.
24406 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
24407 // some instruction-combining opportunities. This opt needs to be
24408 // postponed as late as possible.
24410 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
24411 // the DCI.xxxx conditions are provided to postpone the optimization as
24412 // late as possible.
24414 ConstantSDNode *CmpAgainst = nullptr;
24415 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
24416 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
24417 !isa<ConstantSDNode>(Cond.getOperand(0))) {
24419 if (CC == X86::COND_NE &&
24420 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
24421 CC = X86::GetOppositeBranchCondition(CC);
24422 std::swap(TrueOp, FalseOp);
24425 if (CC == X86::COND_E &&
24426 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
24427 SDValue Ops[] = { FalseOp, Cond.getOperand(0),
24428 DAG.getConstant(CC, MVT::i8), Cond };
24429 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops);
24437 static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
24438 const X86Subtarget *Subtarget) {
24439 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
24441 default: return SDValue();
24442 // SSE/AVX/AVX2 blend intrinsics.
24443 case Intrinsic::x86_avx2_pblendvb:
24444 case Intrinsic::x86_avx2_pblendw:
24445 case Intrinsic::x86_avx2_pblendd_128:
24446 case Intrinsic::x86_avx2_pblendd_256:
24447 // Don't try to simplify this intrinsic if we don't have AVX2.
24448 if (!Subtarget->hasAVX2())
24451 case Intrinsic::x86_avx_blend_pd_256:
24452 case Intrinsic::x86_avx_blend_ps_256:
24453 case Intrinsic::x86_avx_blendv_pd_256:
24454 case Intrinsic::x86_avx_blendv_ps_256:
24455 // Don't try to simplify this intrinsic if we don't have AVX.
24456 if (!Subtarget->hasAVX())
24459 case Intrinsic::x86_sse41_pblendw:
24460 case Intrinsic::x86_sse41_blendpd:
24461 case Intrinsic::x86_sse41_blendps:
24462 case Intrinsic::x86_sse41_blendvps:
24463 case Intrinsic::x86_sse41_blendvpd:
24464 case Intrinsic::x86_sse41_pblendvb: {
24465 SDValue Op0 = N->getOperand(1);
24466 SDValue Op1 = N->getOperand(2);
24467 SDValue Mask = N->getOperand(3);
24469 // Don't try to simplify this intrinsic if we don't have SSE4.1.
24470 if (!Subtarget->hasSSE41())
24473 // fold (blend A, A, Mask) -> A
24476 // fold (blend A, B, allZeros) -> A
24477 if (ISD::isBuildVectorAllZeros(Mask.getNode()))
24479 // fold (blend A, B, allOnes) -> B
24480 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
24483 // Simplify the case where the mask is a constant i32 value.
24484 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Mask)) {
24485 if (C->isNullValue())
24487 if (C->isAllOnesValue())
24494 // Packed SSE2/AVX2 arithmetic shift immediate intrinsics.
24495 case Intrinsic::x86_sse2_psrai_w:
24496 case Intrinsic::x86_sse2_psrai_d:
24497 case Intrinsic::x86_avx2_psrai_w:
24498 case Intrinsic::x86_avx2_psrai_d:
24499 case Intrinsic::x86_sse2_psra_w:
24500 case Intrinsic::x86_sse2_psra_d:
24501 case Intrinsic::x86_avx2_psra_w:
24502 case Intrinsic::x86_avx2_psra_d: {
24503 SDValue Op0 = N->getOperand(1);
24504 SDValue Op1 = N->getOperand(2);
24505 EVT VT = Op0.getValueType();
24506 assert(VT.isVector() && "Expected a vector type!");
24508 if (isa<BuildVectorSDNode>(Op1))
24509 Op1 = Op1.getOperand(0);
24511 if (!isa<ConstantSDNode>(Op1))
24514 EVT SVT = VT.getVectorElementType();
24515 unsigned SVTBits = SVT.getSizeInBits();
24517 ConstantSDNode *CND = cast<ConstantSDNode>(Op1);
24518 const APInt &C = APInt(SVTBits, CND->getAPIntValue().getZExtValue());
24519 uint64_t ShAmt = C.getZExtValue();
24521 // Don't try to convert this shift into a ISD::SRA if the shift
24522 // count is bigger than or equal to the element size.
24523 if (ShAmt >= SVTBits)
24526 // Trivial case: if the shift count is zero, then fold this
24527 // into the first operand.
24531 // Replace this packed shift intrinsic with a target independent
24533 SDValue Splat = DAG.getConstant(C, VT);
24534 return DAG.getNode(ISD::SRA, SDLoc(N), VT, Op0, Splat);
24539 /// PerformMulCombine - Optimize a single multiply with constant into two
24540 /// in order to implement it with two cheaper instructions, e.g.
24541 /// LEA + SHL, LEA + LEA.
24542 static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
24543 TargetLowering::DAGCombinerInfo &DCI) {
24544 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
24547 EVT VT = N->getValueType(0);
24548 if (VT != MVT::i64 && VT != MVT::i32)
24551 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
24554 uint64_t MulAmt = C->getZExtValue();
24555 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
24558 uint64_t MulAmt1 = 0;
24559 uint64_t MulAmt2 = 0;
24560 if ((MulAmt % 9) == 0) {
24562 MulAmt2 = MulAmt / 9;
24563 } else if ((MulAmt % 5) == 0) {
24565 MulAmt2 = MulAmt / 5;
24566 } else if ((MulAmt % 3) == 0) {
24568 MulAmt2 = MulAmt / 3;
24571 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
24574 if (isPowerOf2_64(MulAmt2) &&
24575 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
24576 // If second multiplifer is pow2, issue it first. We want the multiply by
24577 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
24579 std::swap(MulAmt1, MulAmt2);
24582 if (isPowerOf2_64(MulAmt1))
24583 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
24584 DAG.getConstant(Log2_64(MulAmt1), MVT::i8));
24586 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
24587 DAG.getConstant(MulAmt1, VT));
24589 if (isPowerOf2_64(MulAmt2))
24590 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
24591 DAG.getConstant(Log2_64(MulAmt2), MVT::i8));
24593 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
24594 DAG.getConstant(MulAmt2, VT));
24596 // Do not add new nodes to DAG combiner worklist.
24597 DCI.CombineTo(N, NewMul, false);
24602 static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
24603 SDValue N0 = N->getOperand(0);
24604 SDValue N1 = N->getOperand(1);
24605 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
24606 EVT VT = N0.getValueType();
24608 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
24609 // since the result of setcc_c is all zero's or all ones.
24610 if (VT.isInteger() && !VT.isVector() &&
24611 N1C && N0.getOpcode() == ISD::AND &&
24612 N0.getOperand(1).getOpcode() == ISD::Constant) {
24613 SDValue N00 = N0.getOperand(0);
24614 if (N00.getOpcode() == X86ISD::SETCC_CARRY ||
24615 ((N00.getOpcode() == ISD::ANY_EXTEND ||
24616 N00.getOpcode() == ISD::ZERO_EXTEND) &&
24617 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) {
24618 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
24619 APInt ShAmt = N1C->getAPIntValue();
24620 Mask = Mask.shl(ShAmt);
24622 return DAG.getNode(ISD::AND, SDLoc(N), VT,
24623 N00, DAG.getConstant(Mask, VT));
24627 // Hardware support for vector shifts is sparse which makes us scalarize the
24628 // vector operations in many cases. Also, on sandybridge ADD is faster than
24630 // (shl V, 1) -> add V,V
24631 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
24632 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
24633 assert(N0.getValueType().isVector() && "Invalid vector shift type");
24634 // We shift all of the values by one. In many cases we do not have
24635 // hardware support for this operation. This is better expressed as an ADD
24637 if (N1SplatC->getZExtValue() == 1)
24638 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
24644 /// \brief Returns a vector of 0s if the node in input is a vector logical
24645 /// shift by a constant amount which is known to be bigger than or equal
24646 /// to the vector element size in bits.
24647 static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
24648 const X86Subtarget *Subtarget) {
24649 EVT VT = N->getValueType(0);
24651 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
24652 (!Subtarget->hasInt256() ||
24653 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
24656 SDValue Amt = N->getOperand(1);
24658 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
24659 if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
24660 APInt ShiftAmt = AmtSplat->getAPIntValue();
24661 unsigned MaxAmount = VT.getVectorElementType().getSizeInBits();
24663 // SSE2/AVX2 logical shifts always return a vector of 0s
24664 // if the shift amount is bigger than or equal to
24665 // the element size. The constant shift amount will be
24666 // encoded as a 8-bit immediate.
24667 if (ShiftAmt.trunc(8).uge(MaxAmount))
24668 return getZeroVector(VT, Subtarget, DAG, DL);
24674 /// PerformShiftCombine - Combine shifts.
24675 static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
24676 TargetLowering::DAGCombinerInfo &DCI,
24677 const X86Subtarget *Subtarget) {
24678 if (N->getOpcode() == ISD::SHL) {
24679 SDValue V = PerformSHLCombine(N, DAG);
24680 if (V.getNode()) return V;
24683 if (N->getOpcode() != ISD::SRA) {
24684 // Try to fold this logical shift into a zero vector.
24685 SDValue V = performShiftToAllZeros(N, DAG, Subtarget);
24686 if (V.getNode()) return V;
24692 // CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..))
24693 // where both setccs reference the same FP CMP, and rewrite for CMPEQSS
24694 // and friends. Likewise for OR -> CMPNEQSS.
24695 static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
24696 TargetLowering::DAGCombinerInfo &DCI,
24697 const X86Subtarget *Subtarget) {
24700 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
24701 // we're requiring SSE2 for both.
24702 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
24703 SDValue N0 = N->getOperand(0);
24704 SDValue N1 = N->getOperand(1);
24705 SDValue CMP0 = N0->getOperand(1);
24706 SDValue CMP1 = N1->getOperand(1);
24709 // The SETCCs should both refer to the same CMP.
24710 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
24713 SDValue CMP00 = CMP0->getOperand(0);
24714 SDValue CMP01 = CMP0->getOperand(1);
24715 EVT VT = CMP00.getValueType();
24717 if (VT == MVT::f32 || VT == MVT::f64) {
24718 bool ExpectingFlags = false;
24719 // Check for any users that want flags:
24720 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
24721 !ExpectingFlags && UI != UE; ++UI)
24722 switch (UI->getOpcode()) {
24727 ExpectingFlags = true;
24729 case ISD::CopyToReg:
24730 case ISD::SIGN_EXTEND:
24731 case ISD::ZERO_EXTEND:
24732 case ISD::ANY_EXTEND:
24736 if (!ExpectingFlags) {
24737 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
24738 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
24740 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
24741 X86::CondCode tmp = cc0;
24746 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
24747 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
24748 // FIXME: need symbolic constants for these magic numbers.
24749 // See X86ATTInstPrinter.cpp:printSSECC().
24750 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
24751 if (Subtarget->hasAVX512()) {
24752 SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
24753 CMP01, DAG.getConstant(x86cc, MVT::i8));
24754 if (N->getValueType(0) != MVT::i1)
24755 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0),
24759 SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
24760 CMP00.getValueType(), CMP00, CMP01,
24761 DAG.getConstant(x86cc, MVT::i8));
24763 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
24764 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
24766 if (is64BitFP && !Subtarget->is64Bit()) {
24767 // On a 32-bit target, we cannot bitcast the 64-bit float to a
24768 // 64-bit integer, since that's not a legal type. Since
24769 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
24770 // bits, but can do this little dance to extract the lowest 32 bits
24771 // and work with those going forward.
24772 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
24774 SDValue Vector32 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32,
24776 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
24777 Vector32, DAG.getIntPtrConstant(0));
24781 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, IntVT, OnesOrZeroesF);
24782 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
24783 DAG.getConstant(1, IntVT));
24784 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed);
24785 return OneBitOfTruth;
24793 /// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector
24794 /// so it can be folded inside ANDNP.
24795 static bool CanFoldXORWithAllOnes(const SDNode *N) {
24796 EVT VT = N->getValueType(0);
24798 // Match direct AllOnes for 128 and 256-bit vectors
24799 if (ISD::isBuildVectorAllOnes(N))
24802 // Look through a bit convert.
24803 if (N->getOpcode() == ISD::BITCAST)
24804 N = N->getOperand(0).getNode();
24806 // Sometimes the operand may come from a insert_subvector building a 256-bit
24808 if (VT.is256BitVector() &&
24809 N->getOpcode() == ISD::INSERT_SUBVECTOR) {
24810 SDValue V1 = N->getOperand(0);
24811 SDValue V2 = N->getOperand(1);
24813 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR &&
24814 V1.getOperand(0).getOpcode() == ISD::UNDEF &&
24815 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) &&
24816 ISD::isBuildVectorAllOnes(V2.getNode()))
24823 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
24824 // register. In most cases we actually compare or select YMM-sized registers
24825 // and mixing the two types creates horrible code. This method optimizes
24826 // some of the transition sequences.
24827 static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
24828 TargetLowering::DAGCombinerInfo &DCI,
24829 const X86Subtarget *Subtarget) {
24830 EVT VT = N->getValueType(0);
24831 if (!VT.is256BitVector())
24834 assert((N->getOpcode() == ISD::ANY_EXTEND ||
24835 N->getOpcode() == ISD::ZERO_EXTEND ||
24836 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
24838 SDValue Narrow = N->getOperand(0);
24839 EVT NarrowVT = Narrow->getValueType(0);
24840 if (!NarrowVT.is128BitVector())
24843 if (Narrow->getOpcode() != ISD::XOR &&
24844 Narrow->getOpcode() != ISD::AND &&
24845 Narrow->getOpcode() != ISD::OR)
24848 SDValue N0 = Narrow->getOperand(0);
24849 SDValue N1 = Narrow->getOperand(1);
24852 // The Left side has to be a trunc.
24853 if (N0.getOpcode() != ISD::TRUNCATE)
24856 // The type of the truncated inputs.
24857 EVT WideVT = N0->getOperand(0)->getValueType(0);
24861 // The right side has to be a 'trunc' or a constant vector.
24862 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE;
24863 ConstantSDNode *RHSConstSplat = nullptr;
24864 if (auto *RHSBV = dyn_cast<BuildVectorSDNode>(N1))
24865 RHSConstSplat = RHSBV->getConstantSplatNode();
24866 if (!RHSTrunc && !RHSConstSplat)
24869 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24871 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT))
24874 // Set N0 and N1 to hold the inputs to the new wide operation.
24875 N0 = N0->getOperand(0);
24876 if (RHSConstSplat) {
24877 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getScalarType(),
24878 SDValue(RHSConstSplat, 0));
24879 SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1);
24880 N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C);
24881 } else if (RHSTrunc) {
24882 N1 = N1->getOperand(0);
24885 // Generate the wide operation.
24886 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1);
24887 unsigned Opcode = N->getOpcode();
24889 case ISD::ANY_EXTEND:
24891 case ISD::ZERO_EXTEND: {
24892 unsigned InBits = NarrowVT.getScalarType().getSizeInBits();
24893 APInt Mask = APInt::getAllOnesValue(InBits);
24894 Mask = Mask.zext(VT.getScalarType().getSizeInBits());
24895 return DAG.getNode(ISD::AND, DL, VT,
24896 Op, DAG.getConstant(Mask, VT));
24898 case ISD::SIGN_EXTEND:
24899 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
24900 Op, DAG.getValueType(NarrowVT));
24902 llvm_unreachable("Unexpected opcode");
24906 static SDValue VectorZextCombine(SDNode *N, SelectionDAG &DAG,
24907 TargetLowering::DAGCombinerInfo &DCI,
24908 const X86Subtarget *Subtarget) {
24909 SDValue N0 = N->getOperand(0);
24910 SDValue N1 = N->getOperand(1);
24913 // A vector zext_in_reg may be represented as a shuffle,
24914 // feeding into a bitcast (this represents anyext) feeding into
24915 // an and with a mask.
24916 // We'd like to try to combine that into a shuffle with zero
24917 // plus a bitcast, removing the and.
24918 if (N0.getOpcode() != ISD::BITCAST ||
24919 N0.getOperand(0).getOpcode() != ISD::VECTOR_SHUFFLE)
24922 // The other side of the AND should be a splat of 2^C, where C
24923 // is the number of bits in the source type.
24924 if (N1.getOpcode() == ISD::BITCAST)
24925 N1 = N1.getOperand(0);
24926 if (N1.getOpcode() != ISD::BUILD_VECTOR)
24928 BuildVectorSDNode *Vector = cast<BuildVectorSDNode>(N1);
24930 ShuffleVectorSDNode *Shuffle = cast<ShuffleVectorSDNode>(N0.getOperand(0));
24931 EVT SrcType = Shuffle->getValueType(0);
24933 // We expect a single-source shuffle
24934 if (Shuffle->getOperand(1)->getOpcode() != ISD::UNDEF)
24937 unsigned SrcSize = SrcType.getScalarSizeInBits();
24939 APInt SplatValue, SplatUndef;
24940 unsigned SplatBitSize;
24942 if (!Vector->isConstantSplat(SplatValue, SplatUndef,
24943 SplatBitSize, HasAnyUndefs))
24946 unsigned ResSize = N1.getValueType().getScalarSizeInBits();
24947 // Make sure the splat matches the mask we expect
24948 if (SplatBitSize > ResSize ||
24949 (SplatValue + 1).exactLogBase2() != SrcSize)
24952 // Make sure the input and output size make sense
24953 if (SrcSize >= ResSize || ResSize % SrcSize)
24956 // We expect a shuffle of the form <0, u, u, u, 1, u, u, u...>
24957 // The number of u's between each two values depends on the ratio between
24958 // the source and dest type.
24959 unsigned ZextRatio = ResSize / SrcSize;
24960 bool IsZext = true;
24961 for (unsigned i = 0; i < SrcType.getVectorNumElements(); ++i) {
24962 if (i % ZextRatio) {
24963 if (Shuffle->getMaskElt(i) > 0) {
24969 if (Shuffle->getMaskElt(i) != (i / ZextRatio)) {
24970 // Expected element number
24980 // Ok, perform the transformation - replace the shuffle with
24981 // a shuffle of the form <0, k, k, k, 1, k, k, k> with zero
24982 // (instead of undef) where the k elements come from the zero vector.
24983 SmallVector<int, 8> Mask;
24984 unsigned NumElems = SrcType.getVectorNumElements();
24985 for (unsigned i = 0; i < NumElems; ++i)
24987 Mask.push_back(NumElems);
24989 Mask.push_back(i / ZextRatio);
24991 SDValue NewShuffle = DAG.getVectorShuffle(Shuffle->getValueType(0), DL,
24992 Shuffle->getOperand(0), DAG.getConstant(0, SrcType), Mask);
24993 return DAG.getNode(ISD::BITCAST, DL, N0.getValueType(), NewShuffle);
24996 static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
24997 TargetLowering::DAGCombinerInfo &DCI,
24998 const X86Subtarget *Subtarget) {
24999 if (DCI.isBeforeLegalizeOps())
25002 SDValue Zext = VectorZextCombine(N, DAG, DCI, Subtarget);
25003 if (Zext.getNode())
25006 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
25010 EVT VT = N->getValueType(0);
25011 SDValue N0 = N->getOperand(0);
25012 SDValue N1 = N->getOperand(1);
25015 // Create BEXTR instructions
25016 // BEXTR is ((X >> imm) & (2**size-1))
25017 if (VT == MVT::i32 || VT == MVT::i64) {
25018 // Check for BEXTR.
25019 if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
25020 (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
25021 ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
25022 ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
25023 if (MaskNode && ShiftNode) {
25024 uint64_t Mask = MaskNode->getZExtValue();
25025 uint64_t Shift = ShiftNode->getZExtValue();
25026 if (isMask_64(Mask)) {
25027 uint64_t MaskSize = countPopulation(Mask);
25028 if (Shift + MaskSize <= VT.getSizeInBits())
25029 return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
25030 DAG.getConstant(Shift | (MaskSize << 8), VT));
25038 // Want to form ANDNP nodes:
25039 // 1) In the hopes of then easily combining them with OR and AND nodes
25040 // to form PBLEND/PSIGN.
25041 // 2) To match ANDN packed intrinsics
25042 if (VT != MVT::v2i64 && VT != MVT::v4i64)
25045 // Check LHS for vnot
25046 if (N0.getOpcode() == ISD::XOR &&
25047 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
25048 CanFoldXORWithAllOnes(N0.getOperand(1).getNode()))
25049 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1);
25051 // Check RHS for vnot
25052 if (N1.getOpcode() == ISD::XOR &&
25053 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
25054 CanFoldXORWithAllOnes(N1.getOperand(1).getNode()))
25055 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0);
25060 static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
25061 TargetLowering::DAGCombinerInfo &DCI,
25062 const X86Subtarget *Subtarget) {
25063 if (DCI.isBeforeLegalizeOps())
25066 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
25070 SDValue N0 = N->getOperand(0);
25071 SDValue N1 = N->getOperand(1);
25072 EVT VT = N->getValueType(0);
25074 // look for psign/blend
25075 if (VT == MVT::v2i64 || VT == MVT::v4i64) {
25076 if (!Subtarget->hasSSSE3() ||
25077 (VT == MVT::v4i64 && !Subtarget->hasInt256()))
25080 // Canonicalize pandn to RHS
25081 if (N0.getOpcode() == X86ISD::ANDNP)
25083 // or (and (m, y), (pandn m, x))
25084 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
25085 SDValue Mask = N1.getOperand(0);
25086 SDValue X = N1.getOperand(1);
25088 if (N0.getOperand(0) == Mask)
25089 Y = N0.getOperand(1);
25090 if (N0.getOperand(1) == Mask)
25091 Y = N0.getOperand(0);
25093 // Check to see if the mask appeared in both the AND and ANDNP and
25097 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
25098 // Look through mask bitcast.
25099 if (Mask.getOpcode() == ISD::BITCAST)
25100 Mask = Mask.getOperand(0);
25101 if (X.getOpcode() == ISD::BITCAST)
25102 X = X.getOperand(0);
25103 if (Y.getOpcode() == ISD::BITCAST)
25104 Y = Y.getOperand(0);
25106 EVT MaskVT = Mask.getValueType();
25108 // Validate that the Mask operand is a vector sra node.
25109 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
25110 // there is no psrai.b
25111 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
25112 unsigned SraAmt = ~0;
25113 if (Mask.getOpcode() == ISD::SRA) {
25114 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
25115 if (auto *AmtConst = AmtBV->getConstantSplatNode())
25116 SraAmt = AmtConst->getZExtValue();
25117 } else if (Mask.getOpcode() == X86ISD::VSRAI) {
25118 SDValue SraC = Mask.getOperand(1);
25119 SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
25121 if ((SraAmt + 1) != EltBits)
25126 // Now we know we at least have a plendvb with the mask val. See if
25127 // we can form a psignb/w/d.
25128 // psign = x.type == y.type == mask.type && y = sub(0, x);
25129 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
25130 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
25131 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
25132 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
25133 "Unsupported VT for PSIGN");
25134 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0));
25135 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
25137 // PBLENDVB only available on SSE 4.1
25138 if (!Subtarget->hasSSE41())
25141 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
25143 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X);
25144 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y);
25145 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask);
25146 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
25147 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
25151 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
25154 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
25155 MachineFunction &MF = DAG.getMachineFunction();
25157 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
25159 // SHLD/SHRD instructions have lower register pressure, but on some
25160 // platforms they have higher latency than the equivalent
25161 // series of shifts/or that would otherwise be generated.
25162 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
25163 // have higher latencies and we are not optimizing for size.
25164 if (!OptForSize && Subtarget->isSHLDSlow())
25167 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
25169 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
25171 if (!N0.hasOneUse() || !N1.hasOneUse())
25174 SDValue ShAmt0 = N0.getOperand(1);
25175 if (ShAmt0.getValueType() != MVT::i8)
25177 SDValue ShAmt1 = N1.getOperand(1);
25178 if (ShAmt1.getValueType() != MVT::i8)
25180 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
25181 ShAmt0 = ShAmt0.getOperand(0);
25182 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
25183 ShAmt1 = ShAmt1.getOperand(0);
25186 unsigned Opc = X86ISD::SHLD;
25187 SDValue Op0 = N0.getOperand(0);
25188 SDValue Op1 = N1.getOperand(0);
25189 if (ShAmt0.getOpcode() == ISD::SUB) {
25190 Opc = X86ISD::SHRD;
25191 std::swap(Op0, Op1);
25192 std::swap(ShAmt0, ShAmt1);
25195 unsigned Bits = VT.getSizeInBits();
25196 if (ShAmt1.getOpcode() == ISD::SUB) {
25197 SDValue Sum = ShAmt1.getOperand(0);
25198 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
25199 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
25200 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
25201 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
25202 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
25203 return DAG.getNode(Opc, DL, VT,
25205 DAG.getNode(ISD::TRUNCATE, DL,
25208 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
25209 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
25211 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits)
25212 return DAG.getNode(Opc, DL, VT,
25213 N0.getOperand(0), N1.getOperand(0),
25214 DAG.getNode(ISD::TRUNCATE, DL,
25221 // Generate NEG and CMOV for integer abs.
25222 static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
25223 EVT VT = N->getValueType(0);
25225 // Since X86 does not have CMOV for 8-bit integer, we don't convert
25226 // 8-bit integer abs to NEG and CMOV.
25227 if (VT.isInteger() && VT.getSizeInBits() == 8)
25230 SDValue N0 = N->getOperand(0);
25231 SDValue N1 = N->getOperand(1);
25234 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
25235 // and change it to SUB and CMOV.
25236 if (VT.isInteger() && N->getOpcode() == ISD::XOR &&
25237 N0.getOpcode() == ISD::ADD &&
25238 N0.getOperand(1) == N1 &&
25239 N1.getOpcode() == ISD::SRA &&
25240 N1.getOperand(0) == N0.getOperand(0))
25241 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1)))
25242 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) {
25243 // Generate SUB & CMOV.
25244 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
25245 DAG.getConstant(0, VT), N0.getOperand(0));
25247 SDValue Ops[] = { N0.getOperand(0), Neg,
25248 DAG.getConstant(X86::COND_GE, MVT::i8),
25249 SDValue(Neg.getNode(), 1) };
25250 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops);
25255 // PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes
25256 static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
25257 TargetLowering::DAGCombinerInfo &DCI,
25258 const X86Subtarget *Subtarget) {
25259 if (DCI.isBeforeLegalizeOps())
25262 if (Subtarget->hasCMov()) {
25263 SDValue RV = performIntegerAbsCombine(N, DAG);
25271 /// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
25272 static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
25273 TargetLowering::DAGCombinerInfo &DCI,
25274 const X86Subtarget *Subtarget) {
25275 LoadSDNode *Ld = cast<LoadSDNode>(N);
25276 EVT RegVT = Ld->getValueType(0);
25277 EVT MemVT = Ld->getMemoryVT();
25279 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25281 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
25282 // into two 16-byte operations.
25283 ISD::LoadExtType Ext = Ld->getExtensionType();
25284 unsigned Alignment = Ld->getAlignment();
25285 bool IsAligned = Alignment == 0 || Alignment >= MemVT.getSizeInBits()/8;
25286 if (RegVT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25287 !DCI.isBeforeLegalizeOps() && !IsAligned && Ext == ISD::NON_EXTLOAD) {
25288 unsigned NumElems = RegVT.getVectorNumElements();
25292 SDValue Ptr = Ld->getBasePtr();
25293 SDValue Increment = DAG.getConstant(16, TLI.getPointerTy());
25295 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
25297 SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
25298 Ld->getPointerInfo(), Ld->isVolatile(),
25299 Ld->isNonTemporal(), Ld->isInvariant(),
25301 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25302 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
25303 Ld->getPointerInfo(), Ld->isVolatile(),
25304 Ld->isNonTemporal(), Ld->isInvariant(),
25305 std::min(16U, Alignment));
25306 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
25308 Load2.getValue(1));
25310 SDValue NewVec = DAG.getUNDEF(RegVT);
25311 NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl);
25312 NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl);
25313 return DCI.CombineTo(N, NewVec, TF, true);
25319 /// PerformMLOADCombine - Resolve extending loads
25320 static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
25321 TargetLowering::DAGCombinerInfo &DCI,
25322 const X86Subtarget *Subtarget) {
25323 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
25324 if (Mld->getExtensionType() != ISD::SEXTLOAD)
25327 EVT VT = Mld->getValueType(0);
25328 unsigned NumElems = VT.getVectorNumElements();
25329 EVT LdVT = Mld->getMemoryVT();
25332 assert(LdVT != VT && "Cannot extend to the same type");
25333 unsigned ToSz = VT.getVectorElementType().getSizeInBits();
25334 unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
25335 // From, To sizes and ElemCount must be pow of two
25336 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
25337 "Unexpected size for extending masked load");
25339 unsigned SizeRatio = ToSz / FromSz;
25340 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
25342 // Create a type on which we perform the shuffle
25343 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25344 LdVT.getScalarType(), NumElems*SizeRatio);
25345 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25347 // Convert Src0 value
25348 SDValue WideSrc0 = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mld->getSrc0());
25349 if (Mld->getSrc0().getOpcode() != ISD::UNDEF) {
25350 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25351 for (unsigned i = 0; i != NumElems; ++i)
25352 ShuffleVec[i] = i * SizeRatio;
25354 // Can't shuffle using an illegal type.
25355 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
25356 && "WideVecVT should be legal");
25357 WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
25358 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
25360 // Prepare the new mask
25362 SDValue Mask = Mld->getMask();
25363 if (Mask.getValueType() == VT) {
25364 // Mask and original value have the same type
25365 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
25366 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25367 for (unsigned i = 0; i != NumElems; ++i)
25368 ShuffleVec[i] = i * SizeRatio;
25369 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
25370 ShuffleVec[i] = NumElems*SizeRatio;
25371 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
25372 DAG.getConstant(0, WideVecVT),
25376 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
25377 unsigned WidenNumElts = NumElems*SizeRatio;
25378 unsigned MaskNumElts = VT.getVectorNumElements();
25379 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
25382 unsigned NumConcat = WidenNumElts / MaskNumElts;
25383 SmallVector<SDValue, 16> Ops(NumConcat);
25384 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
25386 for (unsigned i = 1; i != NumConcat; ++i)
25389 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
25392 SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
25393 Mld->getBasePtr(), NewMask, WideSrc0,
25394 Mld->getMemoryVT(), Mld->getMemOperand(),
25396 SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd);
25397 return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
25400 /// PerformMSTORECombine - Resolve truncating stores
25401 static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
25402 const X86Subtarget *Subtarget) {
25403 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
25404 if (!Mst->isTruncatingStore())
25407 EVT VT = Mst->getValue().getValueType();
25408 unsigned NumElems = VT.getVectorNumElements();
25409 EVT StVT = Mst->getMemoryVT();
25412 assert(StVT != VT && "Cannot truncate to the same type");
25413 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25414 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25416 // From, To sizes and ElemCount must be pow of two
25417 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
25418 "Unexpected size for truncating masked store");
25419 // We are going to use the original vector elt for storing.
25420 // Accumulated smaller vector elements must be a multiple of the store size.
25421 assert (((NumElems * FromSz) % ToSz) == 0 &&
25422 "Unexpected ratio for truncating masked store");
25424 unsigned SizeRatio = FromSz / ToSz;
25425 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25427 // Create a type on which we perform the shuffle
25428 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25429 StVT.getScalarType(), NumElems*SizeRatio);
25431 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25433 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mst->getValue());
25434 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25435 for (unsigned i = 0; i != NumElems; ++i)
25436 ShuffleVec[i] = i * SizeRatio;
25438 // Can't shuffle using an illegal type.
25439 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
25440 && "WideVecVT should be legal");
25442 SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25443 DAG.getUNDEF(WideVecVT),
25447 SDValue Mask = Mst->getMask();
25448 if (Mask.getValueType() == VT) {
25449 // Mask and original value have the same type
25450 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
25451 for (unsigned i = 0; i != NumElems; ++i)
25452 ShuffleVec[i] = i * SizeRatio;
25453 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
25454 ShuffleVec[i] = NumElems*SizeRatio;
25455 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
25456 DAG.getConstant(0, WideVecVT),
25460 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
25461 unsigned WidenNumElts = NumElems*SizeRatio;
25462 unsigned MaskNumElts = VT.getVectorNumElements();
25463 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
25466 unsigned NumConcat = WidenNumElts / MaskNumElts;
25467 SmallVector<SDValue, 16> Ops(NumConcat);
25468 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
25470 for (unsigned i = 1; i != NumConcat; ++i)
25473 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
25476 return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal, Mst->getBasePtr(),
25477 NewMask, StVT, Mst->getMemOperand(), false);
25479 /// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
25480 static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
25481 const X86Subtarget *Subtarget) {
25482 StoreSDNode *St = cast<StoreSDNode>(N);
25483 EVT VT = St->getValue().getValueType();
25484 EVT StVT = St->getMemoryVT();
25486 SDValue StoredVal = St->getOperand(1);
25487 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25489 // If we are saving a concatenation of two XMM registers and 32-byte stores
25490 // are slow, such as on Sandy Bridge, perform two 16-byte stores.
25491 unsigned Alignment = St->getAlignment();
25492 bool IsAligned = Alignment == 0 || Alignment >= VT.getSizeInBits()/8;
25493 if (VT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25494 StVT == VT && !IsAligned) {
25495 unsigned NumElems = VT.getVectorNumElements();
25499 SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl);
25500 SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl);
25502 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy());
25503 SDValue Ptr0 = St->getBasePtr();
25504 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride);
25506 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0,
25507 St->getPointerInfo(), St->isVolatile(),
25508 St->isNonTemporal(), Alignment);
25509 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1,
25510 St->getPointerInfo(), St->isVolatile(),
25511 St->isNonTemporal(),
25512 std::min(16U, Alignment));
25513 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
25516 // Optimize trunc store (of multiple scalars) to shuffle and store.
25517 // First, pack all of the elements in one place. Next, store to memory
25518 // in fewer chunks.
25519 if (St->isTruncatingStore() && VT.isVector()) {
25520 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25521 unsigned NumElems = VT.getVectorNumElements();
25522 assert(StVT != VT && "Cannot truncate to the same type");
25523 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25524 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25526 // From, To sizes and ElemCount must be pow of two
25527 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
25528 // We are going to use the original vector elt for storing.
25529 // Accumulated smaller vector elements must be a multiple of the store size.
25530 if (0 != (NumElems * FromSz) % ToSz) return SDValue();
25532 unsigned SizeRatio = FromSz / ToSz;
25534 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25536 // Create a type on which we perform the shuffle
25537 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25538 StVT.getScalarType(), NumElems*SizeRatio);
25540 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25542 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue());
25543 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
25544 for (unsigned i = 0; i != NumElems; ++i)
25545 ShuffleVec[i] = i * SizeRatio;
25547 // Can't shuffle using an illegal type.
25548 if (!TLI.isTypeLegal(WideVecVT))
25551 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25552 DAG.getUNDEF(WideVecVT),
25554 // At this point all of the data is stored at the bottom of the
25555 // register. We now need to save it to mem.
25557 // Find the largest store unit
25558 MVT StoreType = MVT::i8;
25559 for (MVT Tp : MVT::integer_valuetypes()) {
25560 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
25564 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
25565 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
25566 (64 <= NumElems * ToSz))
25567 StoreType = MVT::f64;
25569 // Bitcast the original vector into a vector of store-size units
25570 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
25571 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
25572 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
25573 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff);
25574 SmallVector<SDValue, 8> Chains;
25575 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8,
25576 TLI.getPointerTy());
25577 SDValue Ptr = St->getBasePtr();
25579 // Perform one or more big stores into memory.
25580 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
25581 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
25582 StoreType, ShuffWide,
25583 DAG.getIntPtrConstant(i));
25584 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr,
25585 St->getPointerInfo(), St->isVolatile(),
25586 St->isNonTemporal(), St->getAlignment());
25587 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25588 Chains.push_back(Ch);
25591 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
25594 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
25595 // the FP state in cases where an emms may be missing.
25596 // A preferable solution to the general problem is to figure out the right
25597 // places to insert EMMS. This qualifies as a quick hack.
25599 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
25600 if (VT.getSizeInBits() != 64)
25603 const Function *F = DAG.getMachineFunction().getFunction();
25604 bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat);
25605 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
25606 && Subtarget->hasSSE2();
25607 if ((VT.isVector() ||
25608 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
25609 isa<LoadSDNode>(St->getValue()) &&
25610 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
25611 St->getChain().hasOneUse() && !St->isVolatile()) {
25612 SDNode* LdVal = St->getValue().getNode();
25613 LoadSDNode *Ld = nullptr;
25614 int TokenFactorIndex = -1;
25615 SmallVector<SDValue, 8> Ops;
25616 SDNode* ChainVal = St->getChain().getNode();
25617 // Must be a store of a load. We currently handle two cases: the load
25618 // is a direct child, and it's under an intervening TokenFactor. It is
25619 // possible to dig deeper under nested TokenFactors.
25620 if (ChainVal == LdVal)
25621 Ld = cast<LoadSDNode>(St->getChain());
25622 else if (St->getValue().hasOneUse() &&
25623 ChainVal->getOpcode() == ISD::TokenFactor) {
25624 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) {
25625 if (ChainVal->getOperand(i).getNode() == LdVal) {
25626 TokenFactorIndex = i;
25627 Ld = cast<LoadSDNode>(St->getValue());
25629 Ops.push_back(ChainVal->getOperand(i));
25633 if (!Ld || !ISD::isNormalLoad(Ld))
25636 // If this is not the MMX case, i.e. we are just turning i64 load/store
25637 // into f64 load/store, avoid the transformation if there are multiple
25638 // uses of the loaded value.
25639 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
25644 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
25645 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
25647 if (Subtarget->is64Bit() || F64IsLegal) {
25648 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
25649 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
25650 Ld->getPointerInfo(), Ld->isVolatile(),
25651 Ld->isNonTemporal(), Ld->isInvariant(),
25652 Ld->getAlignment());
25653 SDValue NewChain = NewLd.getValue(1);
25654 if (TokenFactorIndex != -1) {
25655 Ops.push_back(NewChain);
25656 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25658 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
25659 St->getPointerInfo(),
25660 St->isVolatile(), St->isNonTemporal(),
25661 St->getAlignment());
25664 // Otherwise, lower to two pairs of 32-bit loads / stores.
25665 SDValue LoAddr = Ld->getBasePtr();
25666 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
25667 DAG.getConstant(4, MVT::i32));
25669 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
25670 Ld->getPointerInfo(),
25671 Ld->isVolatile(), Ld->isNonTemporal(),
25672 Ld->isInvariant(), Ld->getAlignment());
25673 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
25674 Ld->getPointerInfo().getWithOffset(4),
25675 Ld->isVolatile(), Ld->isNonTemporal(),
25677 MinAlign(Ld->getAlignment(), 4));
25679 SDValue NewChain = LoLd.getValue(1);
25680 if (TokenFactorIndex != -1) {
25681 Ops.push_back(LoLd);
25682 Ops.push_back(HiLd);
25683 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25686 LoAddr = St->getBasePtr();
25687 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
25688 DAG.getConstant(4, MVT::i32));
25690 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
25691 St->getPointerInfo(),
25692 St->isVolatile(), St->isNonTemporal(),
25693 St->getAlignment());
25694 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
25695 St->getPointerInfo().getWithOffset(4),
25697 St->isNonTemporal(),
25698 MinAlign(St->getAlignment(), 4));
25699 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
25704 /// Return 'true' if this vector operation is "horizontal"
25705 /// and return the operands for the horizontal operation in LHS and RHS. A
25706 /// horizontal operation performs the binary operation on successive elements
25707 /// of its first operand, then on successive elements of its second operand,
25708 /// returning the resulting values in a vector. For example, if
25709 /// A = < float a0, float a1, float a2, float a3 >
25711 /// B = < float b0, float b1, float b2, float b3 >
25712 /// then the result of doing a horizontal operation on A and B is
25713 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
25714 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
25715 /// A horizontal-op B, for some already available A and B, and if so then LHS is
25716 /// set to A, RHS to B, and the routine returns 'true'.
25717 /// Note that the binary operation should have the property that if one of the
25718 /// operands is UNDEF then the result is UNDEF.
25719 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
25720 // Look for the following pattern: if
25721 // A = < float a0, float a1, float a2, float a3 >
25722 // B = < float b0, float b1, float b2, float b3 >
25724 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
25725 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
25726 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
25727 // which is A horizontal-op B.
25729 // At least one of the operands should be a vector shuffle.
25730 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
25731 RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
25734 MVT VT = LHS.getSimpleValueType();
25736 assert((VT.is128BitVector() || VT.is256BitVector()) &&
25737 "Unsupported vector type for horizontal add/sub");
25739 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to
25740 // operate independently on 128-bit lanes.
25741 unsigned NumElts = VT.getVectorNumElements();
25742 unsigned NumLanes = VT.getSizeInBits()/128;
25743 unsigned NumLaneElts = NumElts / NumLanes;
25744 assert((NumLaneElts % 2 == 0) &&
25745 "Vector type should have an even number of elements in each lane");
25746 unsigned HalfLaneElts = NumLaneElts/2;
25748 // View LHS in the form
25749 // LHS = VECTOR_SHUFFLE A, B, LMask
25750 // If LHS is not a shuffle then pretend it is the shuffle
25751 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
25752 // NOTE: in what follows a default initialized SDValue represents an UNDEF of
25755 SmallVector<int, 16> LMask(NumElts);
25756 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25757 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF)
25758 A = LHS.getOperand(0);
25759 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF)
25760 B = LHS.getOperand(1);
25761 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
25762 std::copy(Mask.begin(), Mask.end(), LMask.begin());
25764 if (LHS.getOpcode() != ISD::UNDEF)
25766 for (unsigned i = 0; i != NumElts; ++i)
25770 // Likewise, view RHS in the form
25771 // RHS = VECTOR_SHUFFLE C, D, RMask
25773 SmallVector<int, 16> RMask(NumElts);
25774 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25775 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF)
25776 C = RHS.getOperand(0);
25777 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF)
25778 D = RHS.getOperand(1);
25779 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
25780 std::copy(Mask.begin(), Mask.end(), RMask.begin());
25782 if (RHS.getOpcode() != ISD::UNDEF)
25784 for (unsigned i = 0; i != NumElts; ++i)
25788 // Check that the shuffles are both shuffling the same vectors.
25789 if (!(A == C && B == D) && !(A == D && B == C))
25792 // If everything is UNDEF then bail out: it would be better to fold to UNDEF.
25793 if (!A.getNode() && !B.getNode())
25796 // If A and B occur in reverse order in RHS, then "swap" them (which means
25797 // rewriting the mask).
25799 CommuteVectorShuffleMask(RMask, NumElts);
25801 // At this point LHS and RHS are equivalent to
25802 // LHS = VECTOR_SHUFFLE A, B, LMask
25803 // RHS = VECTOR_SHUFFLE A, B, RMask
25804 // Check that the masks correspond to performing a horizontal operation.
25805 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
25806 for (unsigned i = 0; i != NumLaneElts; ++i) {
25807 int LIdx = LMask[i+l], RIdx = RMask[i+l];
25809 // Ignore any UNDEF components.
25810 if (LIdx < 0 || RIdx < 0 ||
25811 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
25812 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
25815 // Check that successive elements are being operated on. If not, this is
25816 // not a horizontal operation.
25817 unsigned Src = (i/HalfLaneElts); // each lane is split between srcs
25818 int Index = 2*(i%HalfLaneElts) + NumElts*Src + l;
25819 if (!(LIdx == Index && RIdx == Index + 1) &&
25820 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
25825 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
25826 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
25830 /// Do target-specific dag combines on floating point adds.
25831 static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
25832 const X86Subtarget *Subtarget) {
25833 EVT VT = N->getValueType(0);
25834 SDValue LHS = N->getOperand(0);
25835 SDValue RHS = N->getOperand(1);
25837 // Try to synthesize horizontal adds from adds of shuffles.
25838 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25839 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25840 isHorizontalBinOp(LHS, RHS, true))
25841 return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS);
25845 /// Do target-specific dag combines on floating point subs.
25846 static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
25847 const X86Subtarget *Subtarget) {
25848 EVT VT = N->getValueType(0);
25849 SDValue LHS = N->getOperand(0);
25850 SDValue RHS = N->getOperand(1);
25852 // Try to synthesize horizontal subs from subs of shuffles.
25853 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25854 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25855 isHorizontalBinOp(LHS, RHS, false))
25856 return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS);
25860 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
25861 static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
25862 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
25864 // F[X]OR(0.0, x) -> x
25865 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25866 if (C->getValueAPF().isPosZero())
25867 return N->getOperand(1);
25869 // F[X]OR(x, 0.0) -> x
25870 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25871 if (C->getValueAPF().isPosZero())
25872 return N->getOperand(0);
25876 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
25877 static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
25878 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
25880 // Only perform optimizations if UnsafeMath is used.
25881 if (!DAG.getTarget().Options.UnsafeFPMath)
25884 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
25885 // into FMINC and FMAXC, which are Commutative operations.
25886 unsigned NewOp = 0;
25887 switch (N->getOpcode()) {
25888 default: llvm_unreachable("unknown opcode");
25889 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
25890 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
25893 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
25894 N->getOperand(0), N->getOperand(1));
25897 /// Do target-specific dag combines on X86ISD::FAND nodes.
25898 static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
25899 // FAND(0.0, x) -> 0.0
25900 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25901 if (C->getValueAPF().isPosZero())
25902 return N->getOperand(0);
25904 // FAND(x, 0.0) -> 0.0
25905 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25906 if (C->getValueAPF().isPosZero())
25907 return N->getOperand(1);
25912 /// Do target-specific dag combines on X86ISD::FANDN nodes
25913 static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG) {
25914 // FANDN(0.0, x) -> x
25915 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25916 if (C->getValueAPF().isPosZero())
25917 return N->getOperand(1);
25919 // FANDN(x, 0.0) -> 0.0
25920 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25921 if (C->getValueAPF().isPosZero())
25922 return N->getOperand(1);
25927 static SDValue PerformBTCombine(SDNode *N,
25929 TargetLowering::DAGCombinerInfo &DCI) {
25930 // BT ignores high bits in the bit index operand.
25931 SDValue Op1 = N->getOperand(1);
25932 if (Op1.hasOneUse()) {
25933 unsigned BitWidth = Op1.getValueSizeInBits();
25934 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
25935 APInt KnownZero, KnownOne;
25936 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
25937 !DCI.isBeforeLegalizeOps());
25938 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25939 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) ||
25940 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
25941 DCI.CommitTargetLoweringOpt(TLO);
25946 static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
25947 SDValue Op = N->getOperand(0);
25948 if (Op.getOpcode() == ISD::BITCAST)
25949 Op = Op.getOperand(0);
25950 EVT VT = N->getValueType(0), OpVT = Op.getValueType();
25951 if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
25952 VT.getVectorElementType().getSizeInBits() ==
25953 OpVT.getVectorElementType().getSizeInBits()) {
25954 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
25959 static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
25960 const X86Subtarget *Subtarget) {
25961 EVT VT = N->getValueType(0);
25962 if (!VT.isVector())
25965 SDValue N0 = N->getOperand(0);
25966 SDValue N1 = N->getOperand(1);
25967 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
25970 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
25971 // both SSE and AVX2 since there is no sign-extended shift right
25972 // operation on a vector with 64-bit elements.
25973 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
25974 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
25975 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
25976 N0.getOpcode() == ISD::SIGN_EXTEND)) {
25977 SDValue N00 = N0.getOperand(0);
25979 // EXTLOAD has a better solution on AVX2,
25980 // it may be replaced with X86ISD::VSEXT node.
25981 if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256())
25982 if (!ISD::isNormalLoad(N00.getNode()))
25985 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
25986 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
25988 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
25994 static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
25995 TargetLowering::DAGCombinerInfo &DCI,
25996 const X86Subtarget *Subtarget) {
25997 SDValue N0 = N->getOperand(0);
25998 EVT VT = N->getValueType(0);
26000 // (i8,i32 sext (sdivrem (i8 x, i8 y)) ->
26001 // (i8,i32 (sdivrem_sext_hreg (i8 x, i8 y)
26002 // This exposes the sext to the sdivrem lowering, so that it directly extends
26003 // from AH (which we otherwise need to do contortions to access).
26004 if (N0.getOpcode() == ISD::SDIVREM && N0.getResNo() == 1 &&
26005 N0.getValueType() == MVT::i8 && VT == MVT::i32) {
26007 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
26008 SDValue R = DAG.getNode(X86ISD::SDIVREM8_SEXT_HREG, dl, NodeTys,
26009 N0.getOperand(0), N0.getOperand(1));
26010 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
26011 return R.getValue(1);
26014 if (!DCI.isBeforeLegalizeOps())
26017 if (!Subtarget->hasFp256())
26020 if (VT.isVector() && VT.getSizeInBits() == 256) {
26021 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
26029 static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
26030 const X86Subtarget* Subtarget) {
26032 EVT VT = N->getValueType(0);
26034 // Let legalize expand this if it isn't a legal type yet.
26035 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
26038 EVT ScalarVT = VT.getScalarType();
26039 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
26040 (!Subtarget->hasFMA() && !Subtarget->hasFMA4()))
26043 SDValue A = N->getOperand(0);
26044 SDValue B = N->getOperand(1);
26045 SDValue C = N->getOperand(2);
26047 bool NegA = (A.getOpcode() == ISD::FNEG);
26048 bool NegB = (B.getOpcode() == ISD::FNEG);
26049 bool NegC = (C.getOpcode() == ISD::FNEG);
26051 // Negative multiplication when NegA xor NegB
26052 bool NegMul = (NegA != NegB);
26054 A = A.getOperand(0);
26056 B = B.getOperand(0);
26058 C = C.getOperand(0);
26062 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB;
26064 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB;
26066 return DAG.getNode(Opcode, dl, VT, A, B, C);
26069 static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
26070 TargetLowering::DAGCombinerInfo &DCI,
26071 const X86Subtarget *Subtarget) {
26072 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
26073 // (and (i32 x86isd::setcc_carry), 1)
26074 // This eliminates the zext. This transformation is necessary because
26075 // ISD::SETCC is always legalized to i8.
26077 SDValue N0 = N->getOperand(0);
26078 EVT VT = N->getValueType(0);
26080 if (N0.getOpcode() == ISD::AND &&
26082 N0.getOperand(0).hasOneUse()) {
26083 SDValue N00 = N0.getOperand(0);
26084 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
26085 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
26086 if (!C || C->getZExtValue() != 1)
26088 return DAG.getNode(ISD::AND, dl, VT,
26089 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
26090 N00.getOperand(0), N00.getOperand(1)),
26091 DAG.getConstant(1, VT));
26095 if (N0.getOpcode() == ISD::TRUNCATE &&
26097 N0.getOperand(0).hasOneUse()) {
26098 SDValue N00 = N0.getOperand(0);
26099 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
26100 return DAG.getNode(ISD::AND, dl, VT,
26101 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
26102 N00.getOperand(0), N00.getOperand(1)),
26103 DAG.getConstant(1, VT));
26106 if (VT.is256BitVector()) {
26107 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
26112 // (i8,i32 zext (udivrem (i8 x, i8 y)) ->
26113 // (i8,i32 (udivrem_zext_hreg (i8 x, i8 y)
26114 // This exposes the zext to the udivrem lowering, so that it directly extends
26115 // from AH (which we otherwise need to do contortions to access).
26116 if (N0.getOpcode() == ISD::UDIVREM &&
26117 N0.getResNo() == 1 && N0.getValueType() == MVT::i8 &&
26118 (VT == MVT::i32 || VT == MVT::i64)) {
26119 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
26120 SDValue R = DAG.getNode(X86ISD::UDIVREM8_ZEXT_HREG, dl, NodeTys,
26121 N0.getOperand(0), N0.getOperand(1));
26122 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
26123 return R.getValue(1);
26129 // Optimize x == -y --> x+y == 0
26130 // x != -y --> x+y != 0
26131 static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
26132 const X86Subtarget* Subtarget) {
26133 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
26134 SDValue LHS = N->getOperand(0);
26135 SDValue RHS = N->getOperand(1);
26136 EVT VT = N->getValueType(0);
26139 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
26140 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0)))
26141 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) {
26142 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
26143 LHS.getValueType(), RHS, LHS.getOperand(1));
26144 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
26145 addV, DAG.getConstant(0, addV.getValueType()), CC);
26147 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB)
26148 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0)))
26149 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) {
26150 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
26151 RHS.getValueType(), LHS, RHS.getOperand(1));
26152 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
26153 addV, DAG.getConstant(0, addV.getValueType()), CC);
26156 if (VT.getScalarType() == MVT::i1) {
26157 bool IsSEXT0 = (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
26158 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
26159 bool IsVZero0 = ISD::isBuildVectorAllZeros(LHS.getNode());
26160 if (!IsSEXT0 && !IsVZero0)
26162 bool IsSEXT1 = (RHS.getOpcode() == ISD::SIGN_EXTEND) &&
26163 (RHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
26164 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
26166 if (!IsSEXT1 && !IsVZero1)
26169 if (IsSEXT0 && IsVZero1) {
26170 assert(VT == LHS.getOperand(0).getValueType() && "Uexpected operand type");
26171 if (CC == ISD::SETEQ)
26172 return DAG.getNOT(DL, LHS.getOperand(0), VT);
26173 return LHS.getOperand(0);
26175 if (IsSEXT1 && IsVZero0) {
26176 assert(VT == RHS.getOperand(0).getValueType() && "Uexpected operand type");
26177 if (CC == ISD::SETEQ)
26178 return DAG.getNOT(DL, RHS.getOperand(0), VT);
26179 return RHS.getOperand(0);
26186 static SDValue PerformINSERTPSCombine(SDNode *N, SelectionDAG &DAG,
26187 const X86Subtarget *Subtarget) {
26189 MVT VT = N->getOperand(1)->getSimpleValueType(0);
26190 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
26191 "X86insertps is only defined for v4x32");
26193 SDValue Ld = N->getOperand(1);
26194 if (MayFoldLoad(Ld)) {
26195 // Extract the countS bits from the immediate so we can get the proper
26196 // address when narrowing the vector load to a specific element.
26197 // When the second source op is a memory address, interps doesn't use
26198 // countS and just gets an f32 from that address.
26199 unsigned DestIndex =
26200 cast<ConstantSDNode>(N->getOperand(2))->getZExtValue() >> 6;
26201 Ld = NarrowVectorLoadToElement(cast<LoadSDNode>(Ld), DestIndex, DAG);
26205 // Create this as a scalar to vector to match the instruction pattern.
26206 SDValue LoadScalarToVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Ld);
26207 // countS bits are ignored when loading from memory on insertps, which
26208 // means we don't need to explicitly set them to 0.
26209 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N->getOperand(0),
26210 LoadScalarToVector, N->getOperand(2));
26213 // Helper function of PerformSETCCCombine. It is to materialize "setb reg"
26214 // as "sbb reg,reg", since it can be extended without zext and produces
26215 // an all-ones bit which is more useful than 0/1 in some cases.
26216 static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
26219 return DAG.getNode(ISD::AND, DL, VT,
26220 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
26221 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS),
26222 DAG.getConstant(1, VT));
26223 assert (VT == MVT::i1 && "Unexpected type for SECCC node");
26224 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1,
26225 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
26226 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS));
26229 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
26230 static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
26231 TargetLowering::DAGCombinerInfo &DCI,
26232 const X86Subtarget *Subtarget) {
26234 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
26235 SDValue EFLAGS = N->getOperand(1);
26237 if (CC == X86::COND_A) {
26238 // Try to convert COND_A into COND_B in an attempt to facilitate
26239 // materializing "setb reg".
26241 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
26242 // cannot take an immediate as its first operand.
26244 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
26245 EFLAGS.getValueType().isInteger() &&
26246 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
26247 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
26248 EFLAGS.getNode()->getVTList(),
26249 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
26250 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
26251 return MaterializeSETB(DL, NewEFLAGS, DAG, N->getSimpleValueType(0));
26255 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without
26256 // a zext and produces an all-ones bit which is more useful than 0/1 in some
26258 if (CC == X86::COND_B)
26259 return MaterializeSETB(DL, EFLAGS, DAG, N->getSimpleValueType(0));
26263 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
26264 if (Flags.getNode()) {
26265 SDValue Cond = DAG.getConstant(CC, MVT::i8);
26266 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
26272 // Optimize branch condition evaluation.
26274 static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
26275 TargetLowering::DAGCombinerInfo &DCI,
26276 const X86Subtarget *Subtarget) {
26278 SDValue Chain = N->getOperand(0);
26279 SDValue Dest = N->getOperand(1);
26280 SDValue EFLAGS = N->getOperand(3);
26281 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
26285 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
26286 if (Flags.getNode()) {
26287 SDValue Cond = DAG.getConstant(CC, MVT::i8);
26288 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
26295 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
26296 SelectionDAG &DAG) {
26297 // Take advantage of vector comparisons producing 0 or -1 in each lane to
26298 // optimize away operation when it's from a constant.
26300 // The general transformation is:
26301 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
26302 // AND(VECTOR_CMP(x,y), constant2)
26303 // constant2 = UNARYOP(constant)
26305 // Early exit if this isn't a vector operation, the operand of the
26306 // unary operation isn't a bitwise AND, or if the sizes of the operations
26307 // aren't the same.
26308 EVT VT = N->getValueType(0);
26309 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
26310 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
26311 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
26314 // Now check that the other operand of the AND is a constant. We could
26315 // make the transformation for non-constant splats as well, but it's unclear
26316 // that would be a benefit as it would not eliminate any operations, just
26317 // perform one more step in scalar code before moving to the vector unit.
26318 if (BuildVectorSDNode *BV =
26319 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
26320 // Bail out if the vector isn't a constant.
26321 if (!BV->isConstant())
26324 // Everything checks out. Build up the new and improved node.
26326 EVT IntVT = BV->getValueType(0);
26327 // Create a new constant of the appropriate type for the transformed
26329 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
26330 // The AND node needs bitcasts to/from an integer vector type around it.
26331 SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
26332 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
26333 N->getOperand(0)->getOperand(0), MaskConst);
26334 SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
26341 static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
26342 const X86Subtarget *Subtarget) {
26343 // First try to optimize away the conversion entirely when it's
26344 // conditionally from a constant. Vectors only.
26345 SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
26346 if (Res != SDValue())
26349 // Now move on to more general possibilities.
26350 SDValue Op0 = N->getOperand(0);
26351 EVT InVT = Op0->getValueType(0);
26353 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32))
26354 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) {
26356 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
26357 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
26358 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P);
26361 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
26362 // a 32-bit target where SSE doesn't support i64->FP operations.
26363 if (Op0.getOpcode() == ISD::LOAD) {
26364 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
26365 EVT VT = Ld->getValueType(0);
26366 if (!Ld->isVolatile() && !N->getValueType(0).isVector() &&
26367 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
26368 !Subtarget->is64Bit() && VT == MVT::i64) {
26369 SDValue FILDChain = Subtarget->getTargetLowering()->BuildFILD(
26370 SDValue(N, 0), Ld->getValueType(0), Ld->getChain(), Op0, DAG);
26371 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
26378 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
26379 static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
26380 X86TargetLowering::DAGCombinerInfo &DCI) {
26381 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
26382 // the result is either zero or one (depending on the input carry bit).
26383 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
26384 if (X86::isZeroNode(N->getOperand(0)) &&
26385 X86::isZeroNode(N->getOperand(1)) &&
26386 // We don't have a good way to replace an EFLAGS use, so only do this when
26388 SDValue(N, 1).use_empty()) {
26390 EVT VT = N->getValueType(0);
26391 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1));
26392 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
26393 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
26394 DAG.getConstant(X86::COND_B,MVT::i8),
26396 DAG.getConstant(1, VT));
26397 return DCI.CombineTo(N, Res1, CarryOut);
26403 // fold (add Y, (sete X, 0)) -> adc 0, Y
26404 // (add Y, (setne X, 0)) -> sbb -1, Y
26405 // (sub (sete X, 0), Y) -> sbb 0, Y
26406 // (sub (setne X, 0), Y) -> adc -1, Y
26407 static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
26410 // Look through ZExts.
26411 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
26412 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse())
26415 SDValue SetCC = Ext.getOperand(0);
26416 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse())
26419 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
26420 if (CC != X86::COND_E && CC != X86::COND_NE)
26423 SDValue Cmp = SetCC.getOperand(1);
26424 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
26425 !X86::isZeroNode(Cmp.getOperand(1)) ||
26426 !Cmp.getOperand(0).getValueType().isInteger())
26429 SDValue CmpOp0 = Cmp.getOperand(0);
26430 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0,
26431 DAG.getConstant(1, CmpOp0.getValueType()));
26433 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1);
26434 if (CC == X86::COND_NE)
26435 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB,
26436 DL, OtherVal.getValueType(), OtherVal,
26437 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp);
26438 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC,
26439 DL, OtherVal.getValueType(), OtherVal,
26440 DAG.getConstant(0, OtherVal.getValueType()), NewCmp);
26443 /// PerformADDCombine - Do target-specific dag combines on integer adds.
26444 static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG,
26445 const X86Subtarget *Subtarget) {
26446 EVT VT = N->getValueType(0);
26447 SDValue Op0 = N->getOperand(0);
26448 SDValue Op1 = N->getOperand(1);
26450 // Try to synthesize horizontal adds from adds of shuffles.
26451 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26452 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26453 isHorizontalBinOp(Op0, Op1, true))
26454 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
26456 return OptimizeConditionalInDecrement(N, DAG);
26459 static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
26460 const X86Subtarget *Subtarget) {
26461 SDValue Op0 = N->getOperand(0);
26462 SDValue Op1 = N->getOperand(1);
26464 // X86 can't encode an immediate LHS of a sub. See if we can push the
26465 // negation into a preceding instruction.
26466 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
26467 // If the RHS of the sub is a XOR with one use and a constant, invert the
26468 // immediate. Then add one to the LHS of the sub so we can turn
26469 // X-Y -> X+~Y+1, saving one register.
26470 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
26471 isa<ConstantSDNode>(Op1.getOperand(1))) {
26472 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
26473 EVT VT = Op0.getValueType();
26474 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
26476 DAG.getConstant(~XorC, VT));
26477 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
26478 DAG.getConstant(C->getAPIntValue()+1, VT));
26482 // Try to synthesize horizontal adds from adds of shuffles.
26483 EVT VT = N->getValueType(0);
26484 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26485 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26486 isHorizontalBinOp(Op0, Op1, true))
26487 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
26489 return OptimizeConditionalInDecrement(N, DAG);
26492 /// performVZEXTCombine - Performs build vector combines
26493 static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
26494 TargetLowering::DAGCombinerInfo &DCI,
26495 const X86Subtarget *Subtarget) {
26497 MVT VT = N->getSimpleValueType(0);
26498 SDValue Op = N->getOperand(0);
26499 MVT OpVT = Op.getSimpleValueType();
26500 MVT OpEltVT = OpVT.getVectorElementType();
26501 unsigned InputBits = OpEltVT.getSizeInBits() * VT.getVectorNumElements();
26503 // (vzext (bitcast (vzext (x)) -> (vzext x)
26505 while (V.getOpcode() == ISD::BITCAST)
26506 V = V.getOperand(0);
26508 if (V != Op && V.getOpcode() == X86ISD::VZEXT) {
26509 MVT InnerVT = V.getSimpleValueType();
26510 MVT InnerEltVT = InnerVT.getVectorElementType();
26512 // If the element sizes match exactly, we can just do one larger vzext. This
26513 // is always an exact type match as vzext operates on integer types.
26514 if (OpEltVT == InnerEltVT) {
26515 assert(OpVT == InnerVT && "Types must match for vzext!");
26516 return DAG.getNode(X86ISD::VZEXT, DL, VT, V.getOperand(0));
26519 // The only other way we can combine them is if only a single element of the
26520 // inner vzext is used in the input to the outer vzext.
26521 if (InnerEltVT.getSizeInBits() < InputBits)
26524 // In this case, the inner vzext is completely dead because we're going to
26525 // only look at bits inside of the low element. Just do the outer vzext on
26526 // a bitcast of the input to the inner.
26527 return DAG.getNode(X86ISD::VZEXT, DL, VT,
26528 DAG.getNode(ISD::BITCAST, DL, OpVT, V));
26531 // Check if we can bypass extracting and re-inserting an element of an input
26532 // vector. Essentialy:
26533 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
26534 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
26535 V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
26536 V.getOperand(0).getSimpleValueType().getSizeInBits() == InputBits) {
26537 SDValue ExtractedV = V.getOperand(0);
26538 SDValue OrigV = ExtractedV.getOperand(0);
26539 if (auto *ExtractIdx = dyn_cast<ConstantSDNode>(ExtractedV.getOperand(1)))
26540 if (ExtractIdx->getZExtValue() == 0) {
26541 MVT OrigVT = OrigV.getSimpleValueType();
26542 // Extract a subvector if necessary...
26543 if (OrigVT.getSizeInBits() > OpVT.getSizeInBits()) {
26544 int Ratio = OrigVT.getSizeInBits() / OpVT.getSizeInBits();
26545 OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(),
26546 OrigVT.getVectorNumElements() / Ratio);
26547 OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV,
26548 DAG.getIntPtrConstant(0));
26550 Op = DAG.getNode(ISD::BITCAST, DL, OpVT, OrigV);
26551 return DAG.getNode(X86ISD::VZEXT, DL, VT, Op);
26558 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
26559 DAGCombinerInfo &DCI) const {
26560 SelectionDAG &DAG = DCI.DAG;
26561 switch (N->getOpcode()) {
26563 case ISD::EXTRACT_VECTOR_ELT:
26564 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI);
26567 case X86ISD::SHRUNKBLEND:
26568 return PerformSELECTCombine(N, DAG, DCI, Subtarget);
26569 case ISD::BITCAST: return PerformBITCASTCombine(N, DAG);
26570 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget);
26571 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
26572 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
26573 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
26574 case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
26577 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget);
26578 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
26579 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
26580 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
26581 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
26582 case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget);
26583 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
26584 case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget);
26585 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, Subtarget);
26586 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
26587 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
26589 case X86ISD::FOR: return PerformFORCombine(N, DAG);
26591 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG);
26592 case X86ISD::FAND: return PerformFANDCombine(N, DAG);
26593 case X86ISD::FANDN: return PerformFANDNCombine(N, DAG);
26594 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
26595 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
26596 case ISD::ANY_EXTEND:
26597 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
26598 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
26599 case ISD::SIGN_EXTEND_INREG:
26600 return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
26601 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget);
26602 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG, Subtarget);
26603 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget);
26604 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
26605 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget);
26606 case X86ISD::SHUFP: // Handle all target specific shuffles
26607 case X86ISD::PALIGNR:
26608 case X86ISD::UNPCKH:
26609 case X86ISD::UNPCKL:
26610 case X86ISD::MOVHLPS:
26611 case X86ISD::MOVLHPS:
26612 case X86ISD::PSHUFB:
26613 case X86ISD::PSHUFD:
26614 case X86ISD::PSHUFHW:
26615 case X86ISD::PSHUFLW:
26616 case X86ISD::MOVSS:
26617 case X86ISD::MOVSD:
26618 case X86ISD::VPERMILPI:
26619 case X86ISD::VPERM2X128:
26620 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
26621 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
26622 case ISD::INTRINSIC_WO_CHAIN:
26623 return PerformINTRINSIC_WO_CHAINCombine(N, DAG, Subtarget);
26624 case X86ISD::INSERTPS: {
26625 if (getTargetMachine().getOptLevel() > CodeGenOpt::None)
26626 return PerformINSERTPSCombine(N, DAG, Subtarget);
26629 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DAG, Subtarget);
26635 /// isTypeDesirableForOp - Return true if the target has native support for
26636 /// the specified value type and it is 'desirable' to use the type for the
26637 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
26638 /// instruction encodings are longer and some i16 instructions are slow.
26639 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
26640 if (!isTypeLegal(VT))
26642 if (VT != MVT::i16)
26649 case ISD::SIGN_EXTEND:
26650 case ISD::ZERO_EXTEND:
26651 case ISD::ANY_EXTEND:
26664 /// IsDesirableToPromoteOp - This method query the target whether it is
26665 /// beneficial for dag combiner to promote the specified node. If true, it
26666 /// should return the desired promotion type by reference.
26667 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
26668 EVT VT = Op.getValueType();
26669 if (VT != MVT::i16)
26672 bool Promote = false;
26673 bool Commute = false;
26674 switch (Op.getOpcode()) {
26677 LoadSDNode *LD = cast<LoadSDNode>(Op);
26678 // If the non-extending load has a single use and it's not live out, then it
26679 // might be folded.
26680 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
26681 Op.hasOneUse()*/) {
26682 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
26683 UE = Op.getNode()->use_end(); UI != UE; ++UI) {
26684 // The only case where we'd want to promote LOAD (rather then it being
26685 // promoted as an operand is when it's only use is liveout.
26686 if (UI->getOpcode() != ISD::CopyToReg)
26693 case ISD::SIGN_EXTEND:
26694 case ISD::ZERO_EXTEND:
26695 case ISD::ANY_EXTEND:
26700 SDValue N0 = Op.getOperand(0);
26701 // Look out for (store (shl (load), x)).
26702 if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
26715 SDValue N0 = Op.getOperand(0);
26716 SDValue N1 = Op.getOperand(1);
26717 if (!Commute && MayFoldLoad(N1))
26719 // Avoid disabling potential load folding opportunities.
26720 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op)))
26722 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op)))
26732 //===----------------------------------------------------------------------===//
26733 // X86 Inline Assembly Support
26734 //===----------------------------------------------------------------------===//
26737 // Helper to match a string separated by whitespace.
26738 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) {
26739 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace.
26741 for (unsigned i = 0, e = args.size(); i != e; ++i) {
26742 StringRef piece(*args[i]);
26743 if (!s.startswith(piece)) // Check if the piece matches.
26746 s = s.substr(piece.size());
26747 StringRef::size_type pos = s.find_first_not_of(" \t");
26748 if (pos == 0) // We matched a prefix.
26756 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={};
26759 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
26761 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
26762 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
26763 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
26764 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
26766 if (AsmPieces.size() == 3)
26768 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
26775 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
26776 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
26778 std::string AsmStr = IA->getAsmString();
26780 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
26781 if (!Ty || Ty->getBitWidth() % 16 != 0)
26784 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
26785 SmallVector<StringRef, 4> AsmPieces;
26786 SplitString(AsmStr, AsmPieces, ";\n");
26788 switch (AsmPieces.size()) {
26789 default: return false;
26791 // FIXME: this should verify that we are targeting a 486 or better. If not,
26792 // we will turn this bswap into something that will be lowered to logical
26793 // ops instead of emitting the bswap asm. For now, we don't support 486 or
26794 // lower so don't worry about this.
26796 if (matchAsm(AsmPieces[0], "bswap", "$0") ||
26797 matchAsm(AsmPieces[0], "bswapl", "$0") ||
26798 matchAsm(AsmPieces[0], "bswapq", "$0") ||
26799 matchAsm(AsmPieces[0], "bswap", "${0:q}") ||
26800 matchAsm(AsmPieces[0], "bswapl", "${0:q}") ||
26801 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) {
26802 // No need to check constraints, nothing other than the equivalent of
26803 // "=r,0" would be valid here.
26804 return IntrinsicLowering::LowerToByteSwap(CI);
26807 // rorw $$8, ${0:w} --> llvm.bswap.i16
26808 if (CI->getType()->isIntegerTy(16) &&
26809 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26810 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") ||
26811 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) {
26813 const std::string &ConstraintsStr = IA->getConstraintString();
26814 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26815 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26816 if (clobbersFlagRegisters(AsmPieces))
26817 return IntrinsicLowering::LowerToByteSwap(CI);
26821 if (CI->getType()->isIntegerTy(32) &&
26822 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26823 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") &&
26824 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") &&
26825 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) {
26827 const std::string &ConstraintsStr = IA->getConstraintString();
26828 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26829 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26830 if (clobbersFlagRegisters(AsmPieces))
26831 return IntrinsicLowering::LowerToByteSwap(CI);
26834 if (CI->getType()->isIntegerTy(64)) {
26835 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
26836 if (Constraints.size() >= 2 &&
26837 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
26838 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
26839 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
26840 if (matchAsm(AsmPieces[0], "bswap", "%eax") &&
26841 matchAsm(AsmPieces[1], "bswap", "%edx") &&
26842 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx"))
26843 return IntrinsicLowering::LowerToByteSwap(CI);
26851 /// getConstraintType - Given a constraint letter, return the type of
26852 /// constraint it is for this target.
26853 X86TargetLowering::ConstraintType
26854 X86TargetLowering::getConstraintType(const std::string &Constraint) const {
26855 if (Constraint.size() == 1) {
26856 switch (Constraint[0]) {
26867 return C_RegisterClass;
26891 return TargetLowering::getConstraintType(Constraint);
26894 /// Examine constraint type and operand type and determine a weight value.
26895 /// This object must already have been set up with the operand type
26896 /// and the current alternative constraint selected.
26897 TargetLowering::ConstraintWeight
26898 X86TargetLowering::getSingleConstraintMatchWeight(
26899 AsmOperandInfo &info, const char *constraint) const {
26900 ConstraintWeight weight = CW_Invalid;
26901 Value *CallOperandVal = info.CallOperandVal;
26902 // If we don't have a value, we can't do a match,
26903 // but allow it at the lowest weight.
26904 if (!CallOperandVal)
26906 Type *type = CallOperandVal->getType();
26907 // Look at the constraint type.
26908 switch (*constraint) {
26910 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
26921 if (CallOperandVal->getType()->isIntegerTy())
26922 weight = CW_SpecificReg;
26927 if (type->isFloatingPointTy())
26928 weight = CW_SpecificReg;
26931 if (type->isX86_MMXTy() && Subtarget->hasMMX())
26932 weight = CW_SpecificReg;
26936 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) ||
26937 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256()))
26938 weight = CW_Register;
26941 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
26942 if (C->getZExtValue() <= 31)
26943 weight = CW_Constant;
26947 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26948 if (C->getZExtValue() <= 63)
26949 weight = CW_Constant;
26953 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26954 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
26955 weight = CW_Constant;
26959 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26960 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
26961 weight = CW_Constant;
26965 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26966 if (C->getZExtValue() <= 3)
26967 weight = CW_Constant;
26971 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26972 if (C->getZExtValue() <= 0xff)
26973 weight = CW_Constant;
26978 if (dyn_cast<ConstantFP>(CallOperandVal)) {
26979 weight = CW_Constant;
26983 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26984 if ((C->getSExtValue() >= -0x80000000LL) &&
26985 (C->getSExtValue() <= 0x7fffffffLL))
26986 weight = CW_Constant;
26990 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26991 if (C->getZExtValue() <= 0xffffffff)
26992 weight = CW_Constant;
26999 /// LowerXConstraint - try to replace an X constraint, which matches anything,
27000 /// with another that has more specific requirements based on the type of the
27001 /// corresponding operand.
27002 const char *X86TargetLowering::
27003 LowerXConstraint(EVT ConstraintVT) const {
27004 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
27005 // 'f' like normal targets.
27006 if (ConstraintVT.isFloatingPoint()) {
27007 if (Subtarget->hasSSE2())
27009 if (Subtarget->hasSSE1())
27013 return TargetLowering::LowerXConstraint(ConstraintVT);
27016 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
27017 /// vector. If it is invalid, don't add anything to Ops.
27018 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
27019 std::string &Constraint,
27020 std::vector<SDValue>&Ops,
27021 SelectionDAG &DAG) const {
27024 // Only support length 1 constraints for now.
27025 if (Constraint.length() > 1) return;
27027 char ConstraintLetter = Constraint[0];
27028 switch (ConstraintLetter) {
27031 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
27032 if (C->getZExtValue() <= 31) {
27033 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
27039 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
27040 if (C->getZExtValue() <= 63) {
27041 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
27047 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
27048 if (isInt<8>(C->getSExtValue())) {
27049 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
27055 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
27056 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
27057 (Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) {
27058 Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType());
27064 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
27065 if (C->getZExtValue() <= 3) {
27066 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
27072 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
27073 if (C->getZExtValue() <= 255) {
27074 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
27080 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
27081 if (C->getZExtValue() <= 127) {
27082 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
27088 // 32-bit signed value
27089 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
27090 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
27091 C->getSExtValue())) {
27092 // Widen to 64 bits here to get it sign extended.
27093 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
27096 // FIXME gcc accepts some relocatable values here too, but only in certain
27097 // memory models; it's complicated.
27102 // 32-bit unsigned value
27103 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
27104 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
27105 C->getZExtValue())) {
27106 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
27110 // FIXME gcc accepts some relocatable values here too, but only in certain
27111 // memory models; it's complicated.
27115 // Literal immediates are always ok.
27116 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
27117 // Widen to 64 bits here to get it sign extended.
27118 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64);
27122 // In any sort of PIC mode addresses need to be computed at runtime by
27123 // adding in a register or some sort of table lookup. These can't
27124 // be used as immediates.
27125 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
27128 // If we are in non-pic codegen mode, we allow the address of a global (with
27129 // an optional displacement) to be used with 'i'.
27130 GlobalAddressSDNode *GA = nullptr;
27131 int64_t Offset = 0;
27133 // Match either (GA), (GA+C), (GA+C1+C2), etc.
27135 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
27136 Offset += GA->getOffset();
27138 } else if (Op.getOpcode() == ISD::ADD) {
27139 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
27140 Offset += C->getZExtValue();
27141 Op = Op.getOperand(0);
27144 } else if (Op.getOpcode() == ISD::SUB) {
27145 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
27146 Offset += -C->getZExtValue();
27147 Op = Op.getOperand(0);
27152 // Otherwise, this isn't something we can handle, reject it.
27156 const GlobalValue *GV = GA->getGlobal();
27157 // If we require an extra load to get this address, as in PIC mode, we
27158 // can't accept it.
27159 if (isGlobalStubReference(
27160 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget())))
27163 Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
27164 GA->getValueType(0), Offset);
27169 if (Result.getNode()) {
27170 Ops.push_back(Result);
27173 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
27176 std::pair<unsigned, const TargetRegisterClass*>
27177 X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
27179 // First, see if this is a constraint that directly corresponds to an LLVM
27181 if (Constraint.size() == 1) {
27182 // GCC Constraint Letters
27183 switch (Constraint[0]) {
27185 // TODO: Slight differences here in allocation order and leaving
27186 // RIP in the class. Do they matter any more here than they do
27187 // in the normal allocation?
27188 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
27189 if (Subtarget->is64Bit()) {
27190 if (VT == MVT::i32 || VT == MVT::f32)
27191 return std::make_pair(0U, &X86::GR32RegClass);
27192 if (VT == MVT::i16)
27193 return std::make_pair(0U, &X86::GR16RegClass);
27194 if (VT == MVT::i8 || VT == MVT::i1)
27195 return std::make_pair(0U, &X86::GR8RegClass);
27196 if (VT == MVT::i64 || VT == MVT::f64)
27197 return std::make_pair(0U, &X86::GR64RegClass);
27200 // 32-bit fallthrough
27201 case 'Q': // Q_REGS
27202 if (VT == MVT::i32 || VT == MVT::f32)
27203 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
27204 if (VT == MVT::i16)
27205 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
27206 if (VT == MVT::i8 || VT == MVT::i1)
27207 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
27208 if (VT == MVT::i64)
27209 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
27211 case 'r': // GENERAL_REGS
27212 case 'l': // INDEX_REGS
27213 if (VT == MVT::i8 || VT == MVT::i1)
27214 return std::make_pair(0U, &X86::GR8RegClass);
27215 if (VT == MVT::i16)
27216 return std::make_pair(0U, &X86::GR16RegClass);
27217 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit())
27218 return std::make_pair(0U, &X86::GR32RegClass);
27219 return std::make_pair(0U, &X86::GR64RegClass);
27220 case 'R': // LEGACY_REGS
27221 if (VT == MVT::i8 || VT == MVT::i1)
27222 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
27223 if (VT == MVT::i16)
27224 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
27225 if (VT == MVT::i32 || !Subtarget->is64Bit())
27226 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
27227 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
27228 case 'f': // FP Stack registers.
27229 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
27230 // value to the correct fpstack register class.
27231 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
27232 return std::make_pair(0U, &X86::RFP32RegClass);
27233 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
27234 return std::make_pair(0U, &X86::RFP64RegClass);
27235 return std::make_pair(0U, &X86::RFP80RegClass);
27236 case 'y': // MMX_REGS if MMX allowed.
27237 if (!Subtarget->hasMMX()) break;
27238 return std::make_pair(0U, &X86::VR64RegClass);
27239 case 'Y': // SSE_REGS if SSE2 allowed
27240 if (!Subtarget->hasSSE2()) break;
27242 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
27243 if (!Subtarget->hasSSE1()) break;
27245 switch (VT.SimpleTy) {
27247 // Scalar SSE types.
27250 return std::make_pair(0U, &X86::FR32RegClass);
27253 return std::make_pair(0U, &X86::FR64RegClass);
27261 return std::make_pair(0U, &X86::VR128RegClass);
27269 return std::make_pair(0U, &X86::VR256RegClass);
27274 return std::make_pair(0U, &X86::VR512RegClass);
27280 // Use the default implementation in TargetLowering to convert the register
27281 // constraint into a member of a register class.
27282 std::pair<unsigned, const TargetRegisterClass*> Res;
27283 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
27285 // Not found as a standard register?
27287 // Map st(0) -> st(7) -> ST0
27288 if (Constraint.size() == 7 && Constraint[0] == '{' &&
27289 tolower(Constraint[1]) == 's' &&
27290 tolower(Constraint[2]) == 't' &&
27291 Constraint[3] == '(' &&
27292 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
27293 Constraint[5] == ')' &&
27294 Constraint[6] == '}') {
27296 Res.first = X86::FP0+Constraint[4]-'0';
27297 Res.second = &X86::RFP80RegClass;
27301 // GCC allows "st(0)" to be called just plain "st".
27302 if (StringRef("{st}").equals_lower(Constraint)) {
27303 Res.first = X86::FP0;
27304 Res.second = &X86::RFP80RegClass;
27309 if (StringRef("{flags}").equals_lower(Constraint)) {
27310 Res.first = X86::EFLAGS;
27311 Res.second = &X86::CCRRegClass;
27315 // 'A' means EAX + EDX.
27316 if (Constraint == "A") {
27317 Res.first = X86::EAX;
27318 Res.second = &X86::GR32_ADRegClass;
27324 // Otherwise, check to see if this is a register class of the wrong value
27325 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
27326 // turn into {ax},{dx}.
27327 if (Res.second->hasType(VT))
27328 return Res; // Correct type already, nothing to do.
27330 // All of the single-register GCC register classes map their values onto
27331 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
27332 // really want an 8-bit or 32-bit register, map to the appropriate register
27333 // class and return the appropriate register.
27334 if (Res.second == &X86::GR16RegClass) {
27335 if (VT == MVT::i8 || VT == MVT::i1) {
27336 unsigned DestReg = 0;
27337 switch (Res.first) {
27339 case X86::AX: DestReg = X86::AL; break;
27340 case X86::DX: DestReg = X86::DL; break;
27341 case X86::CX: DestReg = X86::CL; break;
27342 case X86::BX: DestReg = X86::BL; break;
27345 Res.first = DestReg;
27346 Res.second = &X86::GR8RegClass;
27348 } else if (VT == MVT::i32 || VT == MVT::f32) {
27349 unsigned DestReg = 0;
27350 switch (Res.first) {
27352 case X86::AX: DestReg = X86::EAX; break;
27353 case X86::DX: DestReg = X86::EDX; break;
27354 case X86::CX: DestReg = X86::ECX; break;
27355 case X86::BX: DestReg = X86::EBX; break;
27356 case X86::SI: DestReg = X86::ESI; break;
27357 case X86::DI: DestReg = X86::EDI; break;
27358 case X86::BP: DestReg = X86::EBP; break;
27359 case X86::SP: DestReg = X86::ESP; break;
27362 Res.first = DestReg;
27363 Res.second = &X86::GR32RegClass;
27365 } else if (VT == MVT::i64 || VT == MVT::f64) {
27366 unsigned DestReg = 0;
27367 switch (Res.first) {
27369 case X86::AX: DestReg = X86::RAX; break;
27370 case X86::DX: DestReg = X86::RDX; break;
27371 case X86::CX: DestReg = X86::RCX; break;
27372 case X86::BX: DestReg = X86::RBX; break;
27373 case X86::SI: DestReg = X86::RSI; break;
27374 case X86::DI: DestReg = X86::RDI; break;
27375 case X86::BP: DestReg = X86::RBP; break;
27376 case X86::SP: DestReg = X86::RSP; break;
27379 Res.first = DestReg;
27380 Res.second = &X86::GR64RegClass;
27383 } else if (Res.second == &X86::FR32RegClass ||
27384 Res.second == &X86::FR64RegClass ||
27385 Res.second == &X86::VR128RegClass ||
27386 Res.second == &X86::VR256RegClass ||
27387 Res.second == &X86::FR32XRegClass ||
27388 Res.second == &X86::FR64XRegClass ||
27389 Res.second == &X86::VR128XRegClass ||
27390 Res.second == &X86::VR256XRegClass ||
27391 Res.second == &X86::VR512RegClass) {
27392 // Handle references to XMM physical registers that got mapped into the
27393 // wrong class. This can happen with constraints like {xmm0} where the
27394 // target independent register mapper will just pick the first match it can
27395 // find, ignoring the required type.
27397 if (VT == MVT::f32 || VT == MVT::i32)
27398 Res.second = &X86::FR32RegClass;
27399 else if (VT == MVT::f64 || VT == MVT::i64)
27400 Res.second = &X86::FR64RegClass;
27401 else if (X86::VR128RegClass.hasType(VT))
27402 Res.second = &X86::VR128RegClass;
27403 else if (X86::VR256RegClass.hasType(VT))
27404 Res.second = &X86::VR256RegClass;
27405 else if (X86::VR512RegClass.hasType(VT))
27406 Res.second = &X86::VR512RegClass;
27412 int X86TargetLowering::getScalingFactorCost(const AddrMode &AM,
27414 // Scaling factors are not free at all.
27415 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
27416 // will take 2 allocations in the out of order engine instead of 1
27417 // for plain addressing mode, i.e. inst (reg1).
27419 // vaddps (%rsi,%drx), %ymm0, %ymm1
27420 // Requires two allocations (one for the load, one for the computation)
27422 // vaddps (%rsi), %ymm0, %ymm1
27423 // Requires just 1 allocation, i.e., freeing allocations for other operations
27424 // and having less micro operations to execute.
27426 // For some X86 architectures, this is even worse because for instance for
27427 // stores, the complex addressing mode forces the instruction to use the
27428 // "load" ports instead of the dedicated "store" port.
27429 // E.g., on Haswell:
27430 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
27431 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
27432 if (isLegalAddressingMode(AM, Ty))
27433 // Scale represents reg2 * scale, thus account for 1
27434 // as soon as we use a second register.
27435 return AM.Scale != 0;
27439 bool X86TargetLowering::isTargetFTOL() const {
27440 return Subtarget->isTargetKnownWindowsMSVC() && !Subtarget->is64Bit();