1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "X86ISelLowering.h"
16 #include "Utils/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "X86TargetMachine.h"
22 #include "X86TargetObjectFile.h"
23 #include "llvm/ADT/SmallBitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/ADT/VariadicFunction.h"
29 #include "llvm/CodeGen/IntrinsicLowering.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineJumpTableInfo.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/CallingConv.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/MC/MCAsmInfo.h"
46 #include "llvm/MC/MCContext.h"
47 #include "llvm/MC/MCExpr.h"
48 #include "llvm/MC/MCSymbol.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Target/TargetOptions.h"
54 #include "X86IntrinsicsInfo.h"
60 #define DEBUG_TYPE "x86-isel"
62 STATISTIC(NumTailCalls, "Number of tail calls");
64 static cl::opt<bool> ExperimentalVectorWideningLegalization(
65 "x86-experimental-vector-widening-legalization", cl::init(false),
66 cl::desc("Enable an experimental vector type legalization through widening "
67 "rather than promotion."),
70 static cl::opt<bool> ExperimentalVectorShuffleLowering(
71 "x86-experimental-vector-shuffle-lowering", cl::init(true),
72 cl::desc("Enable an experimental vector shuffle lowering code path."),
75 static cl::opt<bool> ExperimentalVectorShuffleLegality(
76 "x86-experimental-vector-shuffle-legality", cl::init(false),
77 cl::desc("Enable experimental shuffle legality based on the experimental "
78 "shuffle lowering. Should only be used with the experimental "
82 static cl::opt<int> ReciprocalEstimateRefinementSteps(
83 "x86-recip-refinement-steps", cl::init(1),
84 cl::desc("Specify the number of Newton-Raphson iterations applied to the "
85 "result of the hardware reciprocal estimate instruction."),
88 // Forward declarations.
89 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
92 static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
93 SelectionDAG &DAG, SDLoc dl,
94 unsigned vectorWidth) {
95 assert((vectorWidth == 128 || vectorWidth == 256) &&
96 "Unsupported vector width");
97 EVT VT = Vec.getValueType();
98 EVT ElVT = VT.getVectorElementType();
99 unsigned Factor = VT.getSizeInBits()/vectorWidth;
100 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
101 VT.getVectorNumElements()/Factor);
103 // Extract from UNDEF is UNDEF.
104 if (Vec.getOpcode() == ISD::UNDEF)
105 return DAG.getUNDEF(ResultVT);
107 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
108 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
110 // This is the index of the first element of the vectorWidth-bit chunk
112 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / vectorWidth)
115 // If the input is a buildvector just emit a smaller one.
116 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
117 return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
118 makeArrayRef(Vec->op_begin() + NormalizedIdxVal,
121 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
122 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
125 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
126 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
127 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
128 /// instructions or a simple subregister reference. Idx is an index in the
129 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
130 /// lowering EXTRACT_VECTOR_ELT operations easier.
131 static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
132 SelectionDAG &DAG, SDLoc dl) {
133 assert((Vec.getValueType().is256BitVector() ||
134 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
135 return ExtractSubVector(Vec, IdxVal, DAG, dl, 128);
138 /// Generate a DAG to grab 256-bits from a 512-bit vector.
139 static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal,
140 SelectionDAG &DAG, SDLoc dl) {
141 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
142 return ExtractSubVector(Vec, IdxVal, DAG, dl, 256);
145 static SDValue InsertSubVector(SDValue Result, SDValue Vec,
146 unsigned IdxVal, SelectionDAG &DAG,
147 SDLoc dl, unsigned vectorWidth) {
148 assert((vectorWidth == 128 || vectorWidth == 256) &&
149 "Unsupported vector width");
150 // Inserting UNDEF is Result
151 if (Vec.getOpcode() == ISD::UNDEF)
153 EVT VT = Vec.getValueType();
154 EVT ElVT = VT.getVectorElementType();
155 EVT ResultVT = Result.getValueType();
157 // Insert the relevant vectorWidth bits.
158 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
160 // This is the index of the first element of the vectorWidth-bit chunk
162 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/vectorWidth)
165 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
166 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
169 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
170 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
171 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
172 /// simple superregister reference. Idx is an index in the 128 bits
173 /// we want. It need not be aligned to a 128-bit boundary. That makes
174 /// lowering INSERT_VECTOR_ELT operations easier.
175 static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
176 SelectionDAG &DAG,SDLoc dl) {
177 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
178 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
181 static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
182 SelectionDAG &DAG, SDLoc dl) {
183 assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
184 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
187 /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128
188 /// instructions. This is used because creating CONCAT_VECTOR nodes of
189 /// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
190 /// large BUILD_VECTORS.
191 static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
192 unsigned NumElems, SelectionDAG &DAG,
194 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
195 return Insert128BitVector(V, V2, NumElems/2, DAG, dl);
198 static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
199 unsigned NumElems, SelectionDAG &DAG,
201 SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
202 return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
205 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
206 const X86Subtarget &STI)
207 : TargetLowering(TM), Subtarget(&STI) {
208 X86ScalarSSEf64 = Subtarget->hasSSE2();
209 X86ScalarSSEf32 = Subtarget->hasSSE1();
210 TD = getDataLayout();
212 // Set up the TargetLowering object.
213 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
215 // X86 is weird. It always uses i8 for shift amounts and setcc results.
216 setBooleanContents(ZeroOrOneBooleanContent);
217 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
218 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
220 // For 64-bit, since we have so many registers, use the ILP scheduler.
221 // For 32-bit, use the register pressure specific scheduling.
222 // For Atom, always use ILP scheduling.
223 if (Subtarget->isAtom())
224 setSchedulingPreference(Sched::ILP);
225 else if (Subtarget->is64Bit())
226 setSchedulingPreference(Sched::ILP);
228 setSchedulingPreference(Sched::RegPressure);
229 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
230 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
232 // Bypass expensive divides on Atom when compiling with O2.
233 if (TM.getOptLevel() >= CodeGenOpt::Default) {
234 if (Subtarget->hasSlowDivide32())
235 addBypassSlowDiv(32, 8);
236 if (Subtarget->hasSlowDivide64() && Subtarget->is64Bit())
237 addBypassSlowDiv(64, 16);
240 if (Subtarget->isTargetKnownWindowsMSVC()) {
241 // Setup Windows compiler runtime calls.
242 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
243 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
244 setLibcallName(RTLIB::SREM_I64, "_allrem");
245 setLibcallName(RTLIB::UREM_I64, "_aullrem");
246 setLibcallName(RTLIB::MUL_I64, "_allmul");
247 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
248 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
249 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
250 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
251 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
253 // The _ftol2 runtime function has an unusual calling conv, which
254 // is modeled by a special pseudo-instruction.
255 setLibcallName(RTLIB::FPTOUINT_F64_I64, nullptr);
256 setLibcallName(RTLIB::FPTOUINT_F32_I64, nullptr);
257 setLibcallName(RTLIB::FPTOUINT_F64_I32, nullptr);
258 setLibcallName(RTLIB::FPTOUINT_F32_I32, nullptr);
261 if (Subtarget->isTargetDarwin()) {
262 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
263 setUseUnderscoreSetJmp(false);
264 setUseUnderscoreLongJmp(false);
265 } else if (Subtarget->isTargetWindowsGNU()) {
266 // MS runtime is weird: it exports _setjmp, but longjmp!
267 setUseUnderscoreSetJmp(true);
268 setUseUnderscoreLongJmp(false);
270 setUseUnderscoreSetJmp(true);
271 setUseUnderscoreLongJmp(true);
274 // Set up the register classes.
275 addRegisterClass(MVT::i8, &X86::GR8RegClass);
276 addRegisterClass(MVT::i16, &X86::GR16RegClass);
277 addRegisterClass(MVT::i32, &X86::GR32RegClass);
278 if (Subtarget->is64Bit())
279 addRegisterClass(MVT::i64, &X86::GR64RegClass);
281 for (MVT VT : MVT::integer_valuetypes())
282 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
284 // We don't accept any truncstore of integer registers.
285 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
286 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
287 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
288 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
289 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
290 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
292 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
294 // SETOEQ and SETUNE require checking two conditions.
295 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
296 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
297 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
298 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
299 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
300 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
302 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
304 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
305 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
306 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
308 if (Subtarget->is64Bit()) {
309 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
310 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
311 } else if (!TM.Options.UseSoftFloat) {
312 // We have an algorithm for SSE2->double, and we turn this into a
313 // 64-bit FILD followed by conditional FADD for other targets.
314 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
315 // We have an algorithm for SSE2, and we turn this into a 64-bit
316 // FILD for other targets.
317 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
320 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
322 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
323 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
325 if (!TM.Options.UseSoftFloat) {
326 // SSE has no i16 to fp conversion, only i32
327 if (X86ScalarSSEf32) {
328 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
329 // f32 and f64 cases are Legal, f80 case is not
330 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
332 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
333 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
336 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
337 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
340 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
341 // are Legal, f80 is custom lowered.
342 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
343 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
345 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
347 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
348 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
350 if (X86ScalarSSEf32) {
351 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
352 // f32 and f64 cases are Legal, f80 case is not
353 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
355 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
356 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
359 // Handle FP_TO_UINT by promoting the destination to a larger signed
361 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
362 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
363 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
365 if (Subtarget->is64Bit()) {
366 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
367 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
368 } else if (!TM.Options.UseSoftFloat) {
369 // Since AVX is a superset of SSE3, only check for SSE here.
370 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3())
371 // Expand FP_TO_UINT into a select.
372 // FIXME: We would like to use a Custom expander here eventually to do
373 // the optimal thing for SSE vs. the default expansion in the legalizer.
374 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
376 // With SSE3 we can use fisttpll to convert to a signed i64; without
377 // SSE, we're stuck with a fistpll.
378 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
381 if (isTargetFTOL()) {
382 // Use the _ftol2 runtime function, which has a pseudo-instruction
383 // to handle its weird calling convention.
384 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
387 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
388 if (!X86ScalarSSEf64) {
389 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
390 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
391 if (Subtarget->is64Bit()) {
392 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
393 // Without SSE, i64->f64 goes through memory.
394 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
398 // Scalar integer divide and remainder are lowered to use operations that
399 // produce two results, to match the available instructions. This exposes
400 // the two-result form to trivial CSE, which is able to combine x/y and x%y
401 // into a single instruction.
403 // Scalar integer multiply-high is also lowered to use two-result
404 // operations, to match the available instructions. However, plain multiply
405 // (low) operations are left as Legal, as there are single-result
406 // instructions for this in x86. Using the two-result multiply instructions
407 // when both high and low results are needed must be arranged by dagcombine.
408 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
410 setOperationAction(ISD::MULHS, VT, Expand);
411 setOperationAction(ISD::MULHU, VT, Expand);
412 setOperationAction(ISD::SDIV, VT, Expand);
413 setOperationAction(ISD::UDIV, VT, Expand);
414 setOperationAction(ISD::SREM, VT, Expand);
415 setOperationAction(ISD::UREM, VT, Expand);
417 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences.
418 setOperationAction(ISD::ADDC, VT, Custom);
419 setOperationAction(ISD::ADDE, VT, Custom);
420 setOperationAction(ISD::SUBC, VT, Custom);
421 setOperationAction(ISD::SUBE, VT, Custom);
424 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
425 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
426 setOperationAction(ISD::BR_CC , MVT::f32, Expand);
427 setOperationAction(ISD::BR_CC , MVT::f64, Expand);
428 setOperationAction(ISD::BR_CC , MVT::f80, Expand);
429 setOperationAction(ISD::BR_CC , MVT::i8, Expand);
430 setOperationAction(ISD::BR_CC , MVT::i16, Expand);
431 setOperationAction(ISD::BR_CC , MVT::i32, Expand);
432 setOperationAction(ISD::BR_CC , MVT::i64, Expand);
433 setOperationAction(ISD::SELECT_CC , MVT::f32, Expand);
434 setOperationAction(ISD::SELECT_CC , MVT::f64, Expand);
435 setOperationAction(ISD::SELECT_CC , MVT::f80, Expand);
436 setOperationAction(ISD::SELECT_CC , MVT::i8, Expand);
437 setOperationAction(ISD::SELECT_CC , MVT::i16, Expand);
438 setOperationAction(ISD::SELECT_CC , MVT::i32, Expand);
439 setOperationAction(ISD::SELECT_CC , MVT::i64, Expand);
440 if (Subtarget->is64Bit())
441 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
442 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
443 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
444 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
445 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
446 setOperationAction(ISD::FREM , MVT::f32 , Expand);
447 setOperationAction(ISD::FREM , MVT::f64 , Expand);
448 setOperationAction(ISD::FREM , MVT::f80 , Expand);
449 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
451 // Promote the i8 variants and force them on up to i32 which has a shorter
453 setOperationAction(ISD::CTTZ , MVT::i8 , Promote);
454 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32);
455 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote);
456 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32);
457 if (Subtarget->hasBMI()) {
458 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand);
459 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand);
460 if (Subtarget->is64Bit())
461 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
463 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
464 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
465 if (Subtarget->is64Bit())
466 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
469 if (Subtarget->hasLZCNT()) {
470 // When promoting the i8 variants, force them to i32 for a shorter
472 setOperationAction(ISD::CTLZ , MVT::i8 , Promote);
473 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32);
474 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote);
475 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
476 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand);
477 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand);
478 if (Subtarget->is64Bit())
479 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
481 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
482 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
483 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
484 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
485 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
486 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
487 if (Subtarget->is64Bit()) {
488 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
489 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
493 // Special handling for half-precision floating point conversions.
494 // If we don't have F16C support, then lower half float conversions
495 // into library calls.
496 if (TM.Options.UseSoftFloat || !Subtarget->hasF16C()) {
497 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
498 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
501 // There's never any support for operations beyond MVT::f32.
502 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
503 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
504 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
505 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
507 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
508 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
509 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
510 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
511 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
512 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
514 if (Subtarget->hasPOPCNT()) {
515 setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
517 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
518 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
519 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
520 if (Subtarget->is64Bit())
521 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
524 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
526 if (!Subtarget->hasMOVBE())
527 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
529 // These should be promoted to a larger select which is supported.
530 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
531 // X86 wants to expand cmov itself.
532 setOperationAction(ISD::SELECT , MVT::i8 , Custom);
533 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
534 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
535 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
536 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
537 setOperationAction(ISD::SELECT , MVT::f80 , Custom);
538 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
539 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
540 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
541 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
542 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
543 setOperationAction(ISD::SETCC , MVT::f80 , Custom);
544 if (Subtarget->is64Bit()) {
545 setOperationAction(ISD::SELECT , MVT::i64 , Custom);
546 setOperationAction(ISD::SETCC , MVT::i64 , Custom);
548 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
549 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
550 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
551 // support continuation, user-level threading, and etc.. As a result, no
552 // other SjLj exception interfaces are implemented and please don't build
553 // your own exception handling based on them.
554 // LLVM/Clang supports zero-cost DWARF exception handling.
555 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
556 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
559 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
560 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
561 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
562 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
563 if (Subtarget->is64Bit())
564 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
565 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
566 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom);
567 if (Subtarget->is64Bit()) {
568 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
569 setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
570 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
571 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
572 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom);
574 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
575 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
576 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
577 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
578 if (Subtarget->is64Bit()) {
579 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
580 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
581 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
584 if (Subtarget->hasSSE1())
585 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
587 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
589 // Expand certain atomics
590 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
592 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
593 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
594 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
597 if (Subtarget->hasCmpxchg16b()) {
598 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
601 // FIXME - use subtarget debug flags
602 if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetELF() &&
603 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWin64()) {
604 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
607 if (Subtarget->is64Bit()) {
608 setExceptionPointerRegister(X86::RAX);
609 setExceptionSelectorRegister(X86::RDX);
611 setExceptionPointerRegister(X86::EAX);
612 setExceptionSelectorRegister(X86::EDX);
614 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
615 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
617 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
618 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
620 setOperationAction(ISD::TRAP, MVT::Other, Legal);
621 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
623 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
624 setOperationAction(ISD::VASTART , MVT::Other, Custom);
625 setOperationAction(ISD::VAEND , MVT::Other, Expand);
626 if (Subtarget->is64Bit() && !Subtarget->isTargetWin64()) {
627 // TargetInfo::X86_64ABIBuiltinVaList
628 setOperationAction(ISD::VAARG , MVT::Other, Custom);
629 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
631 // TargetInfo::CharPtrBuiltinVaList
632 setOperationAction(ISD::VAARG , MVT::Other, Expand);
633 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
636 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
637 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
639 setOperationAction(ISD::DYNAMIC_STACKALLOC, getPointerTy(), Custom);
641 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) {
642 // f32 and f64 use SSE.
643 // Set up the FP register classes.
644 addRegisterClass(MVT::f32, &X86::FR32RegClass);
645 addRegisterClass(MVT::f64, &X86::FR64RegClass);
647 // Use ANDPD to simulate FABS.
648 setOperationAction(ISD::FABS , MVT::f64, Custom);
649 setOperationAction(ISD::FABS , MVT::f32, Custom);
651 // Use XORP to simulate FNEG.
652 setOperationAction(ISD::FNEG , MVT::f64, Custom);
653 setOperationAction(ISD::FNEG , MVT::f32, Custom);
655 // Use ANDPD and ORPD to simulate FCOPYSIGN.
656 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
657 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
659 // Lower this to FGETSIGNx86 plus an AND.
660 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
661 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
663 // We don't support sin/cos/fmod
664 setOperationAction(ISD::FSIN , MVT::f64, Expand);
665 setOperationAction(ISD::FCOS , MVT::f64, Expand);
666 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
667 setOperationAction(ISD::FSIN , MVT::f32, Expand);
668 setOperationAction(ISD::FCOS , MVT::f32, Expand);
669 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
671 // Expand FP immediates into loads from the stack, except for the special
673 addLegalFPImmediate(APFloat(+0.0)); // xorpd
674 addLegalFPImmediate(APFloat(+0.0f)); // xorps
675 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) {
676 // Use SSE for f32, x87 for f64.
677 // Set up the FP register classes.
678 addRegisterClass(MVT::f32, &X86::FR32RegClass);
679 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
681 // Use ANDPS to simulate FABS.
682 setOperationAction(ISD::FABS , MVT::f32, Custom);
684 // Use XORP to simulate FNEG.
685 setOperationAction(ISD::FNEG , MVT::f32, Custom);
687 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
689 // Use ANDPS and ORPS to simulate FCOPYSIGN.
690 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
691 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
693 // We don't support sin/cos/fmod
694 setOperationAction(ISD::FSIN , MVT::f32, Expand);
695 setOperationAction(ISD::FCOS , MVT::f32, Expand);
696 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
698 // Special cases we handle for FP constants.
699 addLegalFPImmediate(APFloat(+0.0f)); // xorps
700 addLegalFPImmediate(APFloat(+0.0)); // FLD0
701 addLegalFPImmediate(APFloat(+1.0)); // FLD1
702 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
703 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
705 if (!TM.Options.UnsafeFPMath) {
706 setOperationAction(ISD::FSIN , MVT::f64, Expand);
707 setOperationAction(ISD::FCOS , MVT::f64, Expand);
708 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
710 } else if (!TM.Options.UseSoftFloat) {
711 // f32 and f64 in x87.
712 // Set up the FP register classes.
713 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
714 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
716 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
717 setOperationAction(ISD::UNDEF, MVT::f32, Expand);
718 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
719 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
721 if (!TM.Options.UnsafeFPMath) {
722 setOperationAction(ISD::FSIN , MVT::f64, Expand);
723 setOperationAction(ISD::FSIN , MVT::f32, Expand);
724 setOperationAction(ISD::FCOS , MVT::f64, Expand);
725 setOperationAction(ISD::FCOS , MVT::f32, Expand);
726 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
727 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
729 addLegalFPImmediate(APFloat(+0.0)); // FLD0
730 addLegalFPImmediate(APFloat(+1.0)); // FLD1
731 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
732 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
733 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
734 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
735 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
736 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
739 // We don't support FMA.
740 setOperationAction(ISD::FMA, MVT::f64, Expand);
741 setOperationAction(ISD::FMA, MVT::f32, Expand);
743 // Long double always uses X87.
744 if (!TM.Options.UseSoftFloat) {
745 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
746 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
747 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
749 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended);
750 addLegalFPImmediate(TmpFlt); // FLD0
752 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
755 APFloat TmpFlt2(+1.0);
756 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
758 addLegalFPImmediate(TmpFlt2); // FLD1
759 TmpFlt2.changeSign();
760 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
763 if (!TM.Options.UnsafeFPMath) {
764 setOperationAction(ISD::FSIN , MVT::f80, Expand);
765 setOperationAction(ISD::FCOS , MVT::f80, Expand);
766 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
769 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
770 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
771 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
772 setOperationAction(ISD::FRINT, MVT::f80, Expand);
773 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
774 setOperationAction(ISD::FMA, MVT::f80, Expand);
777 // Always use a library call for pow.
778 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
779 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
780 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
782 setOperationAction(ISD::FLOG, MVT::f80, Expand);
783 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
784 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
785 setOperationAction(ISD::FEXP, MVT::f80, Expand);
786 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
787 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
788 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
790 // First set operation action for all vector types to either promote
791 // (for widening) or expand (for scalarization). Then we will selectively
792 // turn on ones that can be effectively codegen'd.
793 for (MVT VT : MVT::vector_valuetypes()) {
794 setOperationAction(ISD::ADD , VT, Expand);
795 setOperationAction(ISD::SUB , VT, Expand);
796 setOperationAction(ISD::FADD, VT, Expand);
797 setOperationAction(ISD::FNEG, VT, Expand);
798 setOperationAction(ISD::FSUB, VT, Expand);
799 setOperationAction(ISD::MUL , VT, Expand);
800 setOperationAction(ISD::FMUL, VT, Expand);
801 setOperationAction(ISD::SDIV, VT, Expand);
802 setOperationAction(ISD::UDIV, VT, Expand);
803 setOperationAction(ISD::FDIV, VT, Expand);
804 setOperationAction(ISD::SREM, VT, Expand);
805 setOperationAction(ISD::UREM, VT, Expand);
806 setOperationAction(ISD::LOAD, VT, Expand);
807 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
808 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
809 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
810 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
811 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
812 setOperationAction(ISD::FABS, VT, Expand);
813 setOperationAction(ISD::FSIN, VT, Expand);
814 setOperationAction(ISD::FSINCOS, VT, Expand);
815 setOperationAction(ISD::FCOS, VT, Expand);
816 setOperationAction(ISD::FSINCOS, VT, Expand);
817 setOperationAction(ISD::FREM, VT, Expand);
818 setOperationAction(ISD::FMA, VT, Expand);
819 setOperationAction(ISD::FPOWI, VT, Expand);
820 setOperationAction(ISD::FSQRT, VT, Expand);
821 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
822 setOperationAction(ISD::FFLOOR, VT, Expand);
823 setOperationAction(ISD::FCEIL, VT, Expand);
824 setOperationAction(ISD::FTRUNC, VT, Expand);
825 setOperationAction(ISD::FRINT, VT, Expand);
826 setOperationAction(ISD::FNEARBYINT, VT, Expand);
827 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
828 setOperationAction(ISD::MULHS, VT, Expand);
829 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
830 setOperationAction(ISD::MULHU, VT, Expand);
831 setOperationAction(ISD::SDIVREM, VT, Expand);
832 setOperationAction(ISD::UDIVREM, VT, Expand);
833 setOperationAction(ISD::FPOW, VT, Expand);
834 setOperationAction(ISD::CTPOP, VT, Expand);
835 setOperationAction(ISD::CTTZ, VT, Expand);
836 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
837 setOperationAction(ISD::CTLZ, VT, Expand);
838 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
839 setOperationAction(ISD::SHL, VT, Expand);
840 setOperationAction(ISD::SRA, VT, Expand);
841 setOperationAction(ISD::SRL, VT, Expand);
842 setOperationAction(ISD::ROTL, VT, Expand);
843 setOperationAction(ISD::ROTR, VT, Expand);
844 setOperationAction(ISD::BSWAP, VT, Expand);
845 setOperationAction(ISD::SETCC, VT, Expand);
846 setOperationAction(ISD::FLOG, VT, Expand);
847 setOperationAction(ISD::FLOG2, VT, Expand);
848 setOperationAction(ISD::FLOG10, VT, Expand);
849 setOperationAction(ISD::FEXP, VT, Expand);
850 setOperationAction(ISD::FEXP2, VT, Expand);
851 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
852 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
853 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
854 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
855 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
856 setOperationAction(ISD::TRUNCATE, VT, Expand);
857 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
858 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
859 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
860 setOperationAction(ISD::VSELECT, VT, Expand);
861 setOperationAction(ISD::SELECT_CC, VT, Expand);
862 for (MVT InnerVT : MVT::vector_valuetypes()) {
863 setTruncStoreAction(InnerVT, VT, Expand);
865 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
866 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
868 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
869 // types, we have to deal with them whether we ask for Expansion or not.
870 // Setting Expand causes its own optimisation problems though, so leave
872 if (VT.getVectorElementType() == MVT::i1)
873 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
877 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
878 // with -msoft-float, disable use of MMX as well.
879 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) {
880 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
881 // No operations on x86mmx supported, everything uses intrinsics.
884 // MMX-sized vectors (other than x86mmx) are expected to be expanded
885 // into smaller operations.
886 setOperationAction(ISD::MULHS, MVT::v8i8, Expand);
887 setOperationAction(ISD::MULHS, MVT::v4i16, Expand);
888 setOperationAction(ISD::MULHS, MVT::v2i32, Expand);
889 setOperationAction(ISD::MULHS, MVT::v1i64, Expand);
890 setOperationAction(ISD::AND, MVT::v8i8, Expand);
891 setOperationAction(ISD::AND, MVT::v4i16, Expand);
892 setOperationAction(ISD::AND, MVT::v2i32, Expand);
893 setOperationAction(ISD::AND, MVT::v1i64, Expand);
894 setOperationAction(ISD::OR, MVT::v8i8, Expand);
895 setOperationAction(ISD::OR, MVT::v4i16, Expand);
896 setOperationAction(ISD::OR, MVT::v2i32, Expand);
897 setOperationAction(ISD::OR, MVT::v1i64, Expand);
898 setOperationAction(ISD::XOR, MVT::v8i8, Expand);
899 setOperationAction(ISD::XOR, MVT::v4i16, Expand);
900 setOperationAction(ISD::XOR, MVT::v2i32, Expand);
901 setOperationAction(ISD::XOR, MVT::v1i64, Expand);
902 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand);
903 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand);
904 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand);
905 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand);
906 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
907 setOperationAction(ISD::SELECT, MVT::v8i8, Expand);
908 setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
909 setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
910 setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
911 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand);
912 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand);
913 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand);
914 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
916 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) {
917 addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
919 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
920 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
921 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
922 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
923 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
924 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
925 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
926 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
927 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
928 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
929 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
930 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
931 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
934 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) {
935 addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
937 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
938 // registers cannot be used even for integer operations.
939 addRegisterClass(MVT::v16i8, &X86::VR128RegClass);
940 addRegisterClass(MVT::v8i16, &X86::VR128RegClass);
941 addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
942 addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
944 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
945 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
946 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
947 setOperationAction(ISD::ADD, MVT::v2i64, Legal);
948 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
949 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
950 setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
951 setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
952 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
953 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
954 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
955 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
956 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
957 setOperationAction(ISD::SUB, MVT::v2i64, Legal);
958 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
959 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
960 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
961 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
962 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
963 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
964 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
965 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
967 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
968 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
969 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
970 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
972 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
973 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
974 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
975 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
976 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
978 // Only provide customized ctpop vector bit twiddling for vector types we
979 // know to perform better than using the popcnt instructions on each vector
980 // element. If popcnt isn't supported, always provide the custom version.
981 if (!Subtarget->hasPOPCNT()) {
982 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
983 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom);
986 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
987 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
988 MVT VT = (MVT::SimpleValueType)i;
989 // Do not attempt to custom lower non-power-of-2 vectors
990 if (!isPowerOf2_32(VT.getVectorNumElements()))
992 // Do not attempt to custom lower non-128-bit vectors
993 if (!VT.is128BitVector())
995 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
996 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
997 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1000 // We support custom legalizing of sext and anyext loads for specific
1001 // memory vector types which we can load as a scalar (or sequence of
1002 // scalars) and extend in-register to a legal 128-bit vector type. For sext
1003 // loads these must work with a single scalar load.
1004 for (MVT VT : MVT::integer_vector_valuetypes()) {
1005 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
1006 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
1007 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
1008 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
1009 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
1010 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
1011 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
1012 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
1013 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
1016 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1017 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1018 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
1019 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
1020 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
1021 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
1023 if (Subtarget->is64Bit()) {
1024 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1025 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1028 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
1029 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
1030 MVT VT = (MVT::SimpleValueType)i;
1032 // Do not attempt to promote non-128-bit vectors
1033 if (!VT.is128BitVector())
1036 setOperationAction(ISD::AND, VT, Promote);
1037 AddPromotedToType (ISD::AND, VT, MVT::v2i64);
1038 setOperationAction(ISD::OR, VT, Promote);
1039 AddPromotedToType (ISD::OR, VT, MVT::v2i64);
1040 setOperationAction(ISD::XOR, VT, Promote);
1041 AddPromotedToType (ISD::XOR, VT, MVT::v2i64);
1042 setOperationAction(ISD::LOAD, VT, Promote);
1043 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
1044 setOperationAction(ISD::SELECT, VT, Promote);
1045 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64);
1048 // Custom lower v2i64 and v2f64 selects.
1049 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
1050 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
1051 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
1052 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
1054 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
1055 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
1057 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1058 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1059 // As there is no 64-bit GPR available, we need build a special custom
1060 // sequence to convert from v2i32 to v2f32.
1061 if (!Subtarget->is64Bit())
1062 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
1064 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1065 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
1067 for (MVT VT : MVT::fp_vector_valuetypes())
1068 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
1070 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1071 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1072 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1075 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
1076 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1077 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1078 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1079 setOperationAction(ISD::FRINT, MVT::f32, Legal);
1080 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1081 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1082 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1083 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1084 setOperationAction(ISD::FRINT, MVT::f64, Legal);
1085 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1087 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1088 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
1089 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1090 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
1091 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
1092 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
1093 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
1094 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
1095 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
1096 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
1098 // FIXME: Do we need to handle scalar-to-vector here?
1099 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1101 setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
1102 setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
1103 setOperationAction(ISD::VSELECT, MVT::v4i32, Custom);
1104 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
1105 setOperationAction(ISD::VSELECT, MVT::v8i16, Custom);
1106 // There is no BLENDI for byte vectors. We don't need to custom lower
1107 // some vselects for now.
1108 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1110 // SSE41 brings specific instructions for doing vector sign extend even in
1111 // cases where we don't have SRA.
1112 for (MVT VT : MVT::integer_vector_valuetypes()) {
1113 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
1114 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
1115 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
1118 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1119 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1120 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1121 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1122 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1123 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1124 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1126 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1127 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1128 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1129 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1130 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1131 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1133 // i8 and i16 vectors are custom because the source register and source
1134 // source memory operand types are not the same width. f32 vectors are
1135 // custom since the immediate controlling the insert encodes additional
1137 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1138 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1139 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1140 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1142 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
1143 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
1144 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
1145 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
1147 // FIXME: these should be Legal, but that's only for the case where
1148 // the index is constant. For now custom expand to deal with that.
1149 if (Subtarget->is64Bit()) {
1150 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1151 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1155 if (Subtarget->hasSSE2()) {
1156 setOperationAction(ISD::SRL, MVT::v8i16, Custom);
1157 setOperationAction(ISD::SRL, MVT::v16i8, Custom);
1159 setOperationAction(ISD::SHL, MVT::v8i16, Custom);
1160 setOperationAction(ISD::SHL, MVT::v16i8, Custom);
1162 setOperationAction(ISD::SRA, MVT::v8i16, Custom);
1163 setOperationAction(ISD::SRA, MVT::v16i8, Custom);
1165 // In the customized shift lowering, the legal cases in AVX2 will be
1167 setOperationAction(ISD::SRL, MVT::v2i64, Custom);
1168 setOperationAction(ISD::SRL, MVT::v4i32, Custom);
1170 setOperationAction(ISD::SHL, MVT::v2i64, Custom);
1171 setOperationAction(ISD::SHL, MVT::v4i32, Custom);
1173 setOperationAction(ISD::SRA, MVT::v4i32, Custom);
1176 if (!TM.Options.UseSoftFloat && Subtarget->hasFp256()) {
1177 addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
1178 addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
1179 addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
1180 addRegisterClass(MVT::v8f32, &X86::VR256RegClass);
1181 addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
1182 addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
1184 setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
1185 setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
1186 setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
1188 setOperationAction(ISD::FADD, MVT::v8f32, Legal);
1189 setOperationAction(ISD::FSUB, MVT::v8f32, Legal);
1190 setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
1191 setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
1192 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
1193 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal);
1194 setOperationAction(ISD::FCEIL, MVT::v8f32, Legal);
1195 setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal);
1196 setOperationAction(ISD::FRINT, MVT::v8f32, Legal);
1197 setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal);
1198 setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
1199 setOperationAction(ISD::FABS, MVT::v8f32, Custom);
1201 setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1202 setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1203 setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1204 setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1205 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1206 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1207 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
1208 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1209 setOperationAction(ISD::FRINT, MVT::v4f64, Legal);
1210 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal);
1211 setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
1212 setOperationAction(ISD::FABS, MVT::v4f64, Custom);
1214 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1215 // even though v8i16 is a legal type.
1216 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Promote);
1217 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Promote);
1218 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1220 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote);
1221 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1222 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
1224 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
1225 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
1227 for (MVT VT : MVT::fp_vector_valuetypes())
1228 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
1230 setOperationAction(ISD::SRL, MVT::v16i16, Custom);
1231 setOperationAction(ISD::SRL, MVT::v32i8, Custom);
1233 setOperationAction(ISD::SHL, MVT::v16i16, Custom);
1234 setOperationAction(ISD::SHL, MVT::v32i8, Custom);
1236 setOperationAction(ISD::SRA, MVT::v16i16, Custom);
1237 setOperationAction(ISD::SRA, MVT::v32i8, Custom);
1239 setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
1240 setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
1241 setOperationAction(ISD::SETCC, MVT::v8i32, Custom);
1242 setOperationAction(ISD::SETCC, MVT::v4i64, Custom);
1244 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1245 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1246 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1248 setOperationAction(ISD::VSELECT, MVT::v4f64, Custom);
1249 setOperationAction(ISD::VSELECT, MVT::v4i64, Custom);
1250 setOperationAction(ISD::VSELECT, MVT::v8i32, Custom);
1251 setOperationAction(ISD::VSELECT, MVT::v8f32, Custom);
1253 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1254 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
1255 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1256 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom);
1257 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
1258 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom);
1259 setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom);
1260 setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom);
1261 setOperationAction(ISD::ANY_EXTEND, MVT::v16i16, Custom);
1262 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1263 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1264 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1266 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) {
1267 setOperationAction(ISD::FMA, MVT::v8f32, Legal);
1268 setOperationAction(ISD::FMA, MVT::v4f64, Legal);
1269 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
1270 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
1271 setOperationAction(ISD::FMA, MVT::f32, Legal);
1272 setOperationAction(ISD::FMA, MVT::f64, Legal);
1275 if (Subtarget->hasInt256()) {
1276 setOperationAction(ISD::ADD, MVT::v4i64, Legal);
1277 setOperationAction(ISD::ADD, MVT::v8i32, Legal);
1278 setOperationAction(ISD::ADD, MVT::v16i16, Legal);
1279 setOperationAction(ISD::ADD, MVT::v32i8, Legal);
1281 setOperationAction(ISD::SUB, MVT::v4i64, Legal);
1282 setOperationAction(ISD::SUB, MVT::v8i32, Legal);
1283 setOperationAction(ISD::SUB, MVT::v16i16, Legal);
1284 setOperationAction(ISD::SUB, MVT::v32i8, Legal);
1286 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1287 setOperationAction(ISD::MUL, MVT::v8i32, Legal);
1288 setOperationAction(ISD::MUL, MVT::v16i16, Legal);
1289 // Don't lower v32i8 because there is no 128-bit byte mul
1291 setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
1292 setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
1293 setOperationAction(ISD::MULHU, MVT::v16i16, Legal);
1294 setOperationAction(ISD::MULHS, MVT::v16i16, Legal);
1296 setOperationAction(ISD::VSELECT, MVT::v16i16, Custom);
1297 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1299 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1300 // when we have a 256bit-wide blend with immediate.
1301 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1303 // Only provide customized ctpop vector bit twiddling for vector types we
1304 // know to perform better than using the popcnt instructions on each
1305 // vector element. If popcnt isn't supported, always provide the custom
1307 if (!Subtarget->hasPOPCNT())
1308 setOperationAction(ISD::CTPOP, MVT::v4i64, Custom);
1310 // Custom CTPOP always performs better on natively supported v8i32
1311 setOperationAction(ISD::CTPOP, MVT::v8i32, Custom);
1313 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1314 setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1315 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1316 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1317 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1318 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1319 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1321 setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1322 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1323 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1324 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1325 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1326 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1328 setOperationAction(ISD::ADD, MVT::v4i64, Custom);
1329 setOperationAction(ISD::ADD, MVT::v8i32, Custom);
1330 setOperationAction(ISD::ADD, MVT::v16i16, Custom);
1331 setOperationAction(ISD::ADD, MVT::v32i8, Custom);
1333 setOperationAction(ISD::SUB, MVT::v4i64, Custom);
1334 setOperationAction(ISD::SUB, MVT::v8i32, Custom);
1335 setOperationAction(ISD::SUB, MVT::v16i16, Custom);
1336 setOperationAction(ISD::SUB, MVT::v32i8, Custom);
1338 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1339 setOperationAction(ISD::MUL, MVT::v8i32, Custom);
1340 setOperationAction(ISD::MUL, MVT::v16i16, Custom);
1341 // Don't lower v32i8 because there is no 128-bit byte mul
1344 // In the customized shift lowering, the legal cases in AVX2 will be
1346 setOperationAction(ISD::SRL, MVT::v4i64, Custom);
1347 setOperationAction(ISD::SRL, MVT::v8i32, Custom);
1349 setOperationAction(ISD::SHL, MVT::v4i64, Custom);
1350 setOperationAction(ISD::SHL, MVT::v8i32, Custom);
1352 setOperationAction(ISD::SRA, MVT::v8i32, Custom);
1354 // Custom lower several nodes for 256-bit types.
1355 for (MVT VT : MVT::vector_valuetypes()) {
1356 if (VT.getScalarSizeInBits() >= 32) {
1357 setOperationAction(ISD::MLOAD, VT, Legal);
1358 setOperationAction(ISD::MSTORE, VT, Legal);
1360 // Extract subvector is special because the value type
1361 // (result) is 128-bit but the source is 256-bit wide.
1362 if (VT.is128BitVector()) {
1363 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1365 // Do not attempt to custom lower other non-256-bit vectors
1366 if (!VT.is256BitVector())
1369 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1370 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1371 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1372 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1373 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1374 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1375 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1378 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
1379 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) {
1380 MVT VT = (MVT::SimpleValueType)i;
1382 // Do not attempt to promote non-256-bit vectors
1383 if (!VT.is256BitVector())
1386 setOperationAction(ISD::AND, VT, Promote);
1387 AddPromotedToType (ISD::AND, VT, MVT::v4i64);
1388 setOperationAction(ISD::OR, VT, Promote);
1389 AddPromotedToType (ISD::OR, VT, MVT::v4i64);
1390 setOperationAction(ISD::XOR, VT, Promote);
1391 AddPromotedToType (ISD::XOR, VT, MVT::v4i64);
1392 setOperationAction(ISD::LOAD, VT, Promote);
1393 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
1394 setOperationAction(ISD::SELECT, VT, Promote);
1395 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
1399 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512()) {
1400 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1401 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1402 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1403 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1405 addRegisterClass(MVT::i1, &X86::VK1RegClass);
1406 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1407 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1409 for (MVT VT : MVT::fp_vector_valuetypes())
1410 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
1412 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1413 setOperationAction(ISD::SETCC, MVT::i1, Custom);
1414 setOperationAction(ISD::XOR, MVT::i1, Legal);
1415 setOperationAction(ISD::OR, MVT::i1, Legal);
1416 setOperationAction(ISD::AND, MVT::i1, Legal);
1417 setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
1418 setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
1419 setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
1420 setOperationAction(ISD::LOAD, MVT::v16i32, Legal);
1421 setOperationAction(ISD::LOAD, MVT::v16i1, Legal);
1423 setOperationAction(ISD::FADD, MVT::v16f32, Legal);
1424 setOperationAction(ISD::FSUB, MVT::v16f32, Legal);
1425 setOperationAction(ISD::FMUL, MVT::v16f32, Legal);
1426 setOperationAction(ISD::FDIV, MVT::v16f32, Legal);
1427 setOperationAction(ISD::FSQRT, MVT::v16f32, Legal);
1428 setOperationAction(ISD::FNEG, MVT::v16f32, Custom);
1430 setOperationAction(ISD::FADD, MVT::v8f64, Legal);
1431 setOperationAction(ISD::FSUB, MVT::v8f64, Legal);
1432 setOperationAction(ISD::FMUL, MVT::v8f64, Legal);
1433 setOperationAction(ISD::FDIV, MVT::v8f64, Legal);
1434 setOperationAction(ISD::FSQRT, MVT::v8f64, Legal);
1435 setOperationAction(ISD::FNEG, MVT::v8f64, Custom);
1436 setOperationAction(ISD::FMA, MVT::v8f64, Legal);
1437 setOperationAction(ISD::FMA, MVT::v16f32, Legal);
1439 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1440 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1441 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1442 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1443 if (Subtarget->is64Bit()) {
1444 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1445 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1446 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1447 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1449 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1450 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1451 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1452 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1453 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1454 setOperationAction(ISD::SINT_TO_FP, MVT::v8i1, Custom);
1455 setOperationAction(ISD::SINT_TO_FP, MVT::v16i1, Custom);
1456 setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Promote);
1457 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Promote);
1458 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1459 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1460 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1461 setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal);
1462 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1464 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
1465 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1466 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1467 setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
1468 setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
1469 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1470 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1471 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1472 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1473 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1474 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
1475 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
1476 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1478 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1479 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1480 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1481 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1482 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
1483 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Legal);
1485 setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
1486 setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
1488 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1490 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
1491 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
1492 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
1493 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
1494 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
1495 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
1496 setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
1497 setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
1498 setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
1500 setOperationAction(ISD::ADD, MVT::v8i64, Legal);
1501 setOperationAction(ISD::ADD, MVT::v16i32, Legal);
1503 setOperationAction(ISD::SUB, MVT::v8i64, Legal);
1504 setOperationAction(ISD::SUB, MVT::v16i32, Legal);
1506 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1508 setOperationAction(ISD::SRL, MVT::v8i64, Custom);
1509 setOperationAction(ISD::SRL, MVT::v16i32, Custom);
1511 setOperationAction(ISD::SHL, MVT::v8i64, Custom);
1512 setOperationAction(ISD::SHL, MVT::v16i32, Custom);
1514 setOperationAction(ISD::SRA, MVT::v8i64, Custom);
1515 setOperationAction(ISD::SRA, MVT::v16i32, Custom);
1517 setOperationAction(ISD::AND, MVT::v8i64, Legal);
1518 setOperationAction(ISD::OR, MVT::v8i64, Legal);
1519 setOperationAction(ISD::XOR, MVT::v8i64, Legal);
1520 setOperationAction(ISD::AND, MVT::v16i32, Legal);
1521 setOperationAction(ISD::OR, MVT::v16i32, Legal);
1522 setOperationAction(ISD::XOR, MVT::v16i32, Legal);
1524 if (Subtarget->hasCDI()) {
1525 setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
1526 setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
1529 // Custom lower several nodes.
1530 for (MVT VT : MVT::vector_valuetypes()) {
1531 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1532 // Extract subvector is special because the value type
1533 // (result) is 256/128-bit but the source is 512-bit wide.
1534 if (VT.is128BitVector() || VT.is256BitVector()) {
1535 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1537 if (VT.getVectorElementType() == MVT::i1)
1538 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1540 // Do not attempt to custom lower other non-512-bit vectors
1541 if (!VT.is512BitVector())
1544 if ( EltSize >= 32) {
1545 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1546 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1547 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1548 setOperationAction(ISD::VSELECT, VT, Legal);
1549 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1550 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1551 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1552 setOperationAction(ISD::MLOAD, VT, Legal);
1553 setOperationAction(ISD::MSTORE, VT, Legal);
1556 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1557 MVT VT = (MVT::SimpleValueType)i;
1559 // Do not attempt to promote non-512-bit vectors.
1560 if (!VT.is512BitVector())
1563 setOperationAction(ISD::SELECT, VT, Promote);
1564 AddPromotedToType (ISD::SELECT, VT, MVT::v8i64);
1568 if (!TM.Options.UseSoftFloat && Subtarget->hasBWI()) {
1569 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1570 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1572 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1573 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1575 setOperationAction(ISD::LOAD, MVT::v32i16, Legal);
1576 setOperationAction(ISD::LOAD, MVT::v64i8, Legal);
1577 setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
1578 setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
1579 setOperationAction(ISD::ADD, MVT::v32i16, Legal);
1580 setOperationAction(ISD::ADD, MVT::v64i8, Legal);
1581 setOperationAction(ISD::SUB, MVT::v32i16, Legal);
1582 setOperationAction(ISD::SUB, MVT::v64i8, Legal);
1583 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1585 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1586 const MVT VT = (MVT::SimpleValueType)i;
1588 const unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1590 // Do not attempt to promote non-512-bit vectors.
1591 if (!VT.is512BitVector())
1595 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1596 setOperationAction(ISD::VSELECT, VT, Legal);
1601 if (!TM.Options.UseSoftFloat && Subtarget->hasVLX()) {
1602 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1603 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1605 setOperationAction(ISD::SETCC, MVT::v4i1, Custom);
1606 setOperationAction(ISD::SETCC, MVT::v2i1, Custom);
1607 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Legal);
1609 setOperationAction(ISD::AND, MVT::v8i32, Legal);
1610 setOperationAction(ISD::OR, MVT::v8i32, Legal);
1611 setOperationAction(ISD::XOR, MVT::v8i32, Legal);
1612 setOperationAction(ISD::AND, MVT::v4i32, Legal);
1613 setOperationAction(ISD::OR, MVT::v4i32, Legal);
1614 setOperationAction(ISD::XOR, MVT::v4i32, Legal);
1617 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion
1618 // of this type with custom code.
1619 for (MVT VT : MVT::vector_valuetypes())
1620 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
1622 // We want to custom lower some of our intrinsics.
1623 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1624 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1625 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1626 if (!Subtarget->is64Bit())
1627 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1629 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1630 // handle type legalization for these operations here.
1632 // FIXME: We really should do custom legalization for addition and
1633 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1634 // than generic legalization for 64-bit multiplication-with-overflow, though.
1635 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) {
1636 // Add/Sub/Mul with overflow operations are custom lowered.
1638 setOperationAction(ISD::SADDO, VT, Custom);
1639 setOperationAction(ISD::UADDO, VT, Custom);
1640 setOperationAction(ISD::SSUBO, VT, Custom);
1641 setOperationAction(ISD::USUBO, VT, Custom);
1642 setOperationAction(ISD::SMULO, VT, Custom);
1643 setOperationAction(ISD::UMULO, VT, Custom);
1647 if (!Subtarget->is64Bit()) {
1648 // These libcalls are not available in 32-bit.
1649 setLibcallName(RTLIB::SHL_I128, nullptr);
1650 setLibcallName(RTLIB::SRL_I128, nullptr);
1651 setLibcallName(RTLIB::SRA_I128, nullptr);
1654 // Combine sin / cos into one node or libcall if possible.
1655 if (Subtarget->hasSinCos()) {
1656 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1657 setLibcallName(RTLIB::SINCOS_F64, "sincos");
1658 if (Subtarget->isTargetDarwin()) {
1659 // For MacOSX, we don't want the normal expansion of a libcall to sincos.
1660 // We want to issue a libcall to __sincos_stret to avoid memory traffic.
1661 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1662 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1666 if (Subtarget->isTargetWin64()) {
1667 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1668 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1669 setOperationAction(ISD::SREM, MVT::i128, Custom);
1670 setOperationAction(ISD::UREM, MVT::i128, Custom);
1671 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1672 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1675 // We have target-specific dag combine patterns for the following nodes:
1676 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1677 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1678 setTargetDAGCombine(ISD::VSELECT);
1679 setTargetDAGCombine(ISD::SELECT);
1680 setTargetDAGCombine(ISD::SHL);
1681 setTargetDAGCombine(ISD::SRA);
1682 setTargetDAGCombine(ISD::SRL);
1683 setTargetDAGCombine(ISD::OR);
1684 setTargetDAGCombine(ISD::AND);
1685 setTargetDAGCombine(ISD::ADD);
1686 setTargetDAGCombine(ISD::FADD);
1687 setTargetDAGCombine(ISD::FSUB);
1688 setTargetDAGCombine(ISD::FMA);
1689 setTargetDAGCombine(ISD::SUB);
1690 setTargetDAGCombine(ISD::LOAD);
1691 setTargetDAGCombine(ISD::MLOAD);
1692 setTargetDAGCombine(ISD::STORE);
1693 setTargetDAGCombine(ISD::MSTORE);
1694 setTargetDAGCombine(ISD::ZERO_EXTEND);
1695 setTargetDAGCombine(ISD::ANY_EXTEND);
1696 setTargetDAGCombine(ISD::SIGN_EXTEND);
1697 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1698 setTargetDAGCombine(ISD::TRUNCATE);
1699 setTargetDAGCombine(ISD::SINT_TO_FP);
1700 setTargetDAGCombine(ISD::SETCC);
1701 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1702 setTargetDAGCombine(ISD::BUILD_VECTOR);
1703 setTargetDAGCombine(ISD::MUL);
1704 setTargetDAGCombine(ISD::XOR);
1706 computeRegisterProperties();
1708 // On Darwin, -Os means optimize for size without hurting performance,
1709 // do not reduce the limit.
1710 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1711 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8;
1712 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1713 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1714 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1715 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1716 setPrefLoopAlignment(4); // 2^4 bytes.
1718 // Predictable cmov don't hurt on atom because it's in-order.
1719 PredictableSelectIsExpensive = !Subtarget->isAtom();
1720 EnableExtLdPromotion = true;
1721 setPrefFunctionAlignment(4); // 2^4 bytes.
1723 verifyIntrinsicTables();
1726 // This has so far only been implemented for 64-bit MachO.
1727 bool X86TargetLowering::useLoadStackGuardNode() const {
1728 return Subtarget->isTargetMachO() && Subtarget->is64Bit();
1731 TargetLoweringBase::LegalizeTypeAction
1732 X86TargetLowering::getPreferredVectorAction(EVT VT) const {
1733 if (ExperimentalVectorWideningLegalization &&
1734 VT.getVectorNumElements() != 1 &&
1735 VT.getVectorElementType().getSimpleVT() != MVT::i1)
1736 return TypeWidenVector;
1738 return TargetLoweringBase::getPreferredVectorAction(VT);
1741 EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
1743 return Subtarget->hasAVX512() ? MVT::i1: MVT::i8;
1745 const unsigned NumElts = VT.getVectorNumElements();
1746 const EVT EltVT = VT.getVectorElementType();
1747 if (VT.is512BitVector()) {
1748 if (Subtarget->hasAVX512())
1749 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1750 EltVT == MVT::f32 || EltVT == MVT::f64)
1752 case 8: return MVT::v8i1;
1753 case 16: return MVT::v16i1;
1755 if (Subtarget->hasBWI())
1756 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1758 case 32: return MVT::v32i1;
1759 case 64: return MVT::v64i1;
1763 if (VT.is256BitVector() || VT.is128BitVector()) {
1764 if (Subtarget->hasVLX())
1765 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1766 EltVT == MVT::f32 || EltVT == MVT::f64)
1768 case 2: return MVT::v2i1;
1769 case 4: return MVT::v4i1;
1770 case 8: return MVT::v8i1;
1772 if (Subtarget->hasBWI() && Subtarget->hasVLX())
1773 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1775 case 8: return MVT::v8i1;
1776 case 16: return MVT::v16i1;
1777 case 32: return MVT::v32i1;
1781 return VT.changeVectorElementTypeToInteger();
1784 /// Helper for getByValTypeAlignment to determine
1785 /// the desired ByVal argument alignment.
1786 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
1789 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1790 if (VTy->getBitWidth() == 128)
1792 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1793 unsigned EltAlign = 0;
1794 getMaxByValAlign(ATy->getElementType(), EltAlign);
1795 if (EltAlign > MaxAlign)
1796 MaxAlign = EltAlign;
1797 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1798 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1799 unsigned EltAlign = 0;
1800 getMaxByValAlign(STy->getElementType(i), EltAlign);
1801 if (EltAlign > MaxAlign)
1802 MaxAlign = EltAlign;
1809 /// Return the desired alignment for ByVal aggregate
1810 /// function arguments in the caller parameter area. For X86, aggregates
1811 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
1812 /// are at 4-byte boundaries.
1813 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
1814 if (Subtarget->is64Bit()) {
1815 // Max of 8 and alignment of type.
1816 unsigned TyAlign = TD->getABITypeAlignment(Ty);
1823 if (Subtarget->hasSSE1())
1824 getMaxByValAlign(Ty, Align);
1828 /// Returns the target specific optimal type for load
1829 /// and store operations as a result of memset, memcpy, and memmove
1830 /// lowering. If DstAlign is zero that means it's safe to destination
1831 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
1832 /// means there isn't a need to check it against alignment requirement,
1833 /// probably because the source does not need to be loaded. If 'IsMemset' is
1834 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
1835 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
1836 /// source is constant so it does not need to be loaded.
1837 /// It returns EVT::Other if the type should be determined using generic
1838 /// target-independent logic.
1840 X86TargetLowering::getOptimalMemOpType(uint64_t Size,
1841 unsigned DstAlign, unsigned SrcAlign,
1842 bool IsMemset, bool ZeroMemset,
1844 MachineFunction &MF) const {
1845 const Function *F = MF.getFunction();
1846 if ((!IsMemset || ZeroMemset) &&
1847 !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
1848 Attribute::NoImplicitFloat)) {
1850 (Subtarget->isUnalignedMemAccessFast() ||
1851 ((DstAlign == 0 || DstAlign >= 16) &&
1852 (SrcAlign == 0 || SrcAlign >= 16)))) {
1854 if (Subtarget->hasInt256())
1856 if (Subtarget->hasFp256())
1859 if (Subtarget->hasSSE2())
1861 if (Subtarget->hasSSE1())
1863 } else if (!MemcpyStrSrc && Size >= 8 &&
1864 !Subtarget->is64Bit() &&
1865 Subtarget->hasSSE2()) {
1866 // Do not use f64 to lower memcpy if source is string constant. It's
1867 // better to use i32 to avoid the loads.
1871 if (Subtarget->is64Bit() && Size >= 8)
1876 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
1878 return X86ScalarSSEf32;
1879 else if (VT == MVT::f64)
1880 return X86ScalarSSEf64;
1885 X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
1890 *Fast = Subtarget->isUnalignedMemAccessFast();
1894 /// Return the entry encoding for a jump table in the
1895 /// current function. The returned value is a member of the
1896 /// MachineJumpTableInfo::JTEntryKind enum.
1897 unsigned X86TargetLowering::getJumpTableEncoding() const {
1898 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
1900 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1901 Subtarget->isPICStyleGOT())
1902 return MachineJumpTableInfo::EK_Custom32;
1904 // Otherwise, use the normal jump table encoding heuristics.
1905 return TargetLowering::getJumpTableEncoding();
1909 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
1910 const MachineBasicBlock *MBB,
1911 unsigned uid,MCContext &Ctx) const{
1912 assert(MBB->getParent()->getTarget().getRelocationModel() == Reloc::PIC_ &&
1913 Subtarget->isPICStyleGOT());
1914 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
1916 return MCSymbolRefExpr::Create(MBB->getSymbol(),
1917 MCSymbolRefExpr::VK_GOTOFF, Ctx);
1920 /// Returns relocation base for the given PIC jumptable.
1921 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
1922 SelectionDAG &DAG) const {
1923 if (!Subtarget->is64Bit())
1924 // This doesn't have SDLoc associated with it, but is not really the
1925 // same as a Register.
1926 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy());
1930 /// This returns the relocation base for the given PIC jumptable,
1931 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
1932 const MCExpr *X86TargetLowering::
1933 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
1934 MCContext &Ctx) const {
1935 // X86-64 uses RIP relative addressing based on the jump table label.
1936 if (Subtarget->isPICStyleRIPRel())
1937 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
1939 // Otherwise, the reference is relative to the PIC base.
1940 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
1943 // FIXME: Why this routine is here? Move to RegInfo!
1944 std::pair<const TargetRegisterClass*, uint8_t>
1945 X86TargetLowering::findRepresentativeClass(MVT VT) const{
1946 const TargetRegisterClass *RRC = nullptr;
1948 switch (VT.SimpleTy) {
1950 return TargetLowering::findRepresentativeClass(VT);
1951 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
1952 RRC = Subtarget->is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
1955 RRC = &X86::VR64RegClass;
1957 case MVT::f32: case MVT::f64:
1958 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1959 case MVT::v4f32: case MVT::v2f64:
1960 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32:
1962 RRC = &X86::VR128RegClass;
1965 return std::make_pair(RRC, Cost);
1968 bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
1969 unsigned &Offset) const {
1970 if (!Subtarget->isTargetLinux())
1973 if (Subtarget->is64Bit()) {
1974 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
1976 if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
1988 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1989 unsigned DestAS) const {
1990 assert(SrcAS != DestAS && "Expected different address spaces!");
1992 return SrcAS < 256 && DestAS < 256;
1995 //===----------------------------------------------------------------------===//
1996 // Return Value Calling Convention Implementation
1997 //===----------------------------------------------------------------------===//
1999 #include "X86GenCallingConv.inc"
2002 X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2003 MachineFunction &MF, bool isVarArg,
2004 const SmallVectorImpl<ISD::OutputArg> &Outs,
2005 LLVMContext &Context) const {
2006 SmallVector<CCValAssign, 16> RVLocs;
2007 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2008 return CCInfo.CheckReturn(Outs, RetCC_X86);
2011 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2012 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2017 X86TargetLowering::LowerReturn(SDValue Chain,
2018 CallingConv::ID CallConv, bool isVarArg,
2019 const SmallVectorImpl<ISD::OutputArg> &Outs,
2020 const SmallVectorImpl<SDValue> &OutVals,
2021 SDLoc dl, SelectionDAG &DAG) const {
2022 MachineFunction &MF = DAG.getMachineFunction();
2023 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2025 SmallVector<CCValAssign, 16> RVLocs;
2026 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2027 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2030 SmallVector<SDValue, 6> RetOps;
2031 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2032 // Operand #1 = Bytes To Pop
2033 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(),
2036 // Copy the result values into the output registers.
2037 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2038 CCValAssign &VA = RVLocs[i];
2039 assert(VA.isRegLoc() && "Can only return in registers!");
2040 SDValue ValToCopy = OutVals[i];
2041 EVT ValVT = ValToCopy.getValueType();
2043 // Promote values to the appropriate types.
2044 if (VA.getLocInfo() == CCValAssign::SExt)
2045 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2046 else if (VA.getLocInfo() == CCValAssign::ZExt)
2047 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2048 else if (VA.getLocInfo() == CCValAssign::AExt)
2049 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2050 else if (VA.getLocInfo() == CCValAssign::BCvt)
2051 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy);
2053 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2054 "Unexpected FP-extend for return value.");
2056 // If this is x86-64, and we disabled SSE, we can't return FP values,
2057 // or SSE or MMX vectors.
2058 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2059 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2060 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
2061 report_fatal_error("SSE register return with SSE disabled");
2063 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2064 // llvm-gcc has never done it right and no one has noticed, so this
2065 // should be OK for now.
2066 if (ValVT == MVT::f64 &&
2067 (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
2068 report_fatal_error("SSE2 register return with SSE2 disabled");
2070 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2071 // the RET instruction and handled by the FP Stackifier.
2072 if (VA.getLocReg() == X86::FP0 ||
2073 VA.getLocReg() == X86::FP1) {
2074 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2075 // change the value to the FP stack register class.
2076 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2077 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2078 RetOps.push_back(ValToCopy);
2079 // Don't emit a copytoreg.
2083 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2084 // which is returned in RAX / RDX.
2085 if (Subtarget->is64Bit()) {
2086 if (ValVT == MVT::x86mmx) {
2087 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2088 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy);
2089 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2091 // If we don't have SSE2 available, convert to v4f32 so the generated
2092 // register is legal.
2093 if (!Subtarget->hasSSE2())
2094 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy);
2099 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
2100 Flag = Chain.getValue(1);
2101 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2104 // The x86-64 ABIs require that for returning structs by value we copy
2105 // the sret argument into %rax/%eax (depending on ABI) for the return.
2106 // Win32 requires us to put the sret argument to %eax as well.
2107 // We saved the argument into a virtual register in the entry block,
2108 // so now we copy the value out and into %rax/%eax.
2109 if (DAG.getMachineFunction().getFunction()->hasStructRetAttr() &&
2110 (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC())) {
2111 MachineFunction &MF = DAG.getMachineFunction();
2112 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2113 unsigned Reg = FuncInfo->getSRetReturnReg();
2115 "SRetReturnReg should have been set in LowerFormalArguments().");
2116 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy());
2119 = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ?
2120 X86::RAX : X86::EAX;
2121 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2122 Flag = Chain.getValue(1);
2124 // RAX/EAX now acts like a return value.
2125 RetOps.push_back(DAG.getRegister(RetValReg, getPointerTy()));
2128 RetOps[0] = Chain; // Update chain.
2130 // Add the flag if we have it.
2132 RetOps.push_back(Flag);
2134 return DAG.getNode(X86ISD::RET_FLAG, dl, MVT::Other, RetOps);
2137 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2138 if (N->getNumValues() != 1)
2140 if (!N->hasNUsesOfValue(1, 0))
2143 SDValue TCChain = Chain;
2144 SDNode *Copy = *N->use_begin();
2145 if (Copy->getOpcode() == ISD::CopyToReg) {
2146 // If the copy has a glue operand, we conservatively assume it isn't safe to
2147 // perform a tail call.
2148 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2150 TCChain = Copy->getOperand(0);
2151 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2154 bool HasRet = false;
2155 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2157 if (UI->getOpcode() != X86ISD::RET_FLAG)
2159 // If we are returning more than one value, we can definitely
2160 // not make a tail call see PR19530
2161 if (UI->getNumOperands() > 4)
2163 if (UI->getNumOperands() == 4 &&
2164 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2177 X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
2178 ISD::NodeType ExtendKind) const {
2180 // TODO: Is this also valid on 32-bit?
2181 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND)
2182 ReturnMVT = MVT::i8;
2184 ReturnMVT = MVT::i32;
2186 EVT MinVT = getRegisterType(Context, ReturnMVT);
2187 return VT.bitsLT(MinVT) ? MinVT : VT;
2190 /// Lower the result values of a call into the
2191 /// appropriate copies out of appropriate physical registers.
2194 X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
2195 CallingConv::ID CallConv, bool isVarArg,
2196 const SmallVectorImpl<ISD::InputArg> &Ins,
2197 SDLoc dl, SelectionDAG &DAG,
2198 SmallVectorImpl<SDValue> &InVals) const {
2200 // Assign locations to each value returned by this call.
2201 SmallVector<CCValAssign, 16> RVLocs;
2202 bool Is64Bit = Subtarget->is64Bit();
2203 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2205 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2207 // Copy all of the result registers out of their specified physreg.
2208 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2209 CCValAssign &VA = RVLocs[i];
2210 EVT CopyVT = VA.getValVT();
2212 // If this is x86-64, and we disabled SSE, we can't return FP values
2213 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
2214 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
2215 report_fatal_error("SSE register return with SSE disabled");
2218 // If we prefer to use the value in xmm registers, copy it out as f80 and
2219 // use a truncate to move it from fp stack reg to xmm reg.
2220 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2221 isScalarFPTypeInSSEReg(VA.getValVT()))
2224 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
2225 CopyVT, InFlag).getValue(1);
2226 SDValue Val = Chain.getValue(0);
2228 if (CopyVT != VA.getValVT())
2229 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2230 // This truncation won't change the value.
2231 DAG.getIntPtrConstant(1));
2233 InFlag = Chain.getValue(2);
2234 InVals.push_back(Val);
2240 //===----------------------------------------------------------------------===//
2241 // C & StdCall & Fast Calling Convention implementation
2242 //===----------------------------------------------------------------------===//
2243 // StdCall calling convention seems to be standard for many Windows' API
2244 // routines and around. It differs from C calling convention just a little:
2245 // callee should clean up the stack, not caller. Symbols should be also
2246 // decorated in some fancy way :) It doesn't support any vector arguments.
2247 // For info on fast calling convention see Fast Calling Convention (tail call)
2248 // implementation LowerX86_32FastCCCallTo.
2250 /// CallIsStructReturn - Determines whether a call uses struct return
2252 enum StructReturnType {
2257 static StructReturnType
2258 callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
2260 return NotStructReturn;
2262 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2263 if (!Flags.isSRet())
2264 return NotStructReturn;
2265 if (Flags.isInReg())
2266 return RegStructReturn;
2267 return StackStructReturn;
2270 /// Determines whether a function uses struct return semantics.
2271 static StructReturnType
2272 argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
2274 return NotStructReturn;
2276 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2277 if (!Flags.isSRet())
2278 return NotStructReturn;
2279 if (Flags.isInReg())
2280 return RegStructReturn;
2281 return StackStructReturn;
2284 /// Make a copy of an aggregate at address specified by "Src" to address
2285 /// "Dst" with size and alignment information specified by the specific
2286 /// parameter attribute. The copy will be passed as a byval function parameter.
2288 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
2289 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
2291 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
2293 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2294 /*isVolatile*/false, /*AlwaysInline=*/true,
2295 MachinePointerInfo(), MachinePointerInfo());
2298 /// Return true if the calling convention is one that
2299 /// supports tail call optimization.
2300 static bool IsTailCallConvention(CallingConv::ID CC) {
2301 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2302 CC == CallingConv::HiPE);
2305 /// \brief Return true if the calling convention is a C calling convention.
2306 static bool IsCCallConvention(CallingConv::ID CC) {
2307 return (CC == CallingConv::C || CC == CallingConv::X86_64_Win64 ||
2308 CC == CallingConv::X86_64_SysV);
2311 bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
2312 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
2316 CallingConv::ID CalleeCC = CS.getCallingConv();
2317 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
2323 /// Return true if the function is being made into
2324 /// a tailcall target by changing its ABI.
2325 static bool FuncIsMadeTailCallSafe(CallingConv::ID CC,
2326 bool GuaranteedTailCallOpt) {
2327 return GuaranteedTailCallOpt && IsTailCallConvention(CC);
2331 X86TargetLowering::LowerMemArgument(SDValue Chain,
2332 CallingConv::ID CallConv,
2333 const SmallVectorImpl<ISD::InputArg> &Ins,
2334 SDLoc dl, SelectionDAG &DAG,
2335 const CCValAssign &VA,
2336 MachineFrameInfo *MFI,
2338 // Create the nodes corresponding to a load from this parameter slot.
2339 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2340 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(
2341 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
2342 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
2345 // If value is passed by pointer we have address passed instead of the value
2347 if (VA.getLocInfo() == CCValAssign::Indirect)
2348 ValVT = VA.getLocVT();
2350 ValVT = VA.getValVT();
2352 // FIXME: For now, all byval parameter objects are marked mutable. This can be
2353 // changed with more analysis.
2354 // In case of tail call optimization mark all arguments mutable. Since they
2355 // could be overwritten by lowering of arguments in case of a tail call.
2356 if (Flags.isByVal()) {
2357 unsigned Bytes = Flags.getByValSize();
2358 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
2359 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable);
2360 return DAG.getFrameIndex(FI, getPointerTy());
2362 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
2363 VA.getLocMemOffset(), isImmutable);
2364 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2365 return DAG.getLoad(ValVT, dl, Chain, FIN,
2366 MachinePointerInfo::getFixedStack(FI),
2367 false, false, false, 0);
2371 // FIXME: Get this from tablegen.
2372 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
2373 const X86Subtarget *Subtarget) {
2374 assert(Subtarget->is64Bit());
2376 if (Subtarget->isCallingConvWin64(CallConv)) {
2377 static const MCPhysReg GPR64ArgRegsWin64[] = {
2378 X86::RCX, X86::RDX, X86::R8, X86::R9
2380 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
2383 static const MCPhysReg GPR64ArgRegs64Bit[] = {
2384 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
2386 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
2389 // FIXME: Get this from tablegen.
2390 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
2391 CallingConv::ID CallConv,
2392 const X86Subtarget *Subtarget) {
2393 assert(Subtarget->is64Bit());
2394 if (Subtarget->isCallingConvWin64(CallConv)) {
2395 // The XMM registers which might contain var arg parameters are shadowed
2396 // in their paired GPR. So we only need to save the GPR to their home
2398 // TODO: __vectorcall will change this.
2402 const Function *Fn = MF.getFunction();
2403 bool NoImplicitFloatOps = Fn->getAttributes().
2404 hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat);
2405 assert(!(MF.getTarget().Options.UseSoftFloat && NoImplicitFloatOps) &&
2406 "SSE register cannot be used when SSE is disabled!");
2407 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps ||
2408 !Subtarget->hasSSE1())
2409 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
2413 static const MCPhysReg XMMArgRegs64Bit[] = {
2414 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2415 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2417 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
2421 X86TargetLowering::LowerFormalArguments(SDValue Chain,
2422 CallingConv::ID CallConv,
2424 const SmallVectorImpl<ISD::InputArg> &Ins,
2427 SmallVectorImpl<SDValue> &InVals)
2429 MachineFunction &MF = DAG.getMachineFunction();
2430 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2432 const Function* Fn = MF.getFunction();
2433 if (Fn->hasExternalLinkage() &&
2434 Subtarget->isTargetCygMing() &&
2435 Fn->getName() == "main")
2436 FuncInfo->setForceFramePointer(true);
2438 MachineFrameInfo *MFI = MF.getFrameInfo();
2439 bool Is64Bit = Subtarget->is64Bit();
2440 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2442 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2443 "Var args not supported with calling convention fastcc, ghc or hipe");
2445 // Assign locations to all of the incoming arguments.
2446 SmallVector<CCValAssign, 16> ArgLocs;
2447 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2449 // Allocate shadow area for Win64
2451 CCInfo.AllocateStack(32, 8);
2453 CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
2455 unsigned LastVal = ~0U;
2457 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2458 CCValAssign &VA = ArgLocs[i];
2459 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
2461 assert(VA.getValNo() != LastVal &&
2462 "Don't support value assigned to multiple locs yet");
2464 LastVal = VA.getValNo();
2466 if (VA.isRegLoc()) {
2467 EVT RegVT = VA.getLocVT();
2468 const TargetRegisterClass *RC;
2469 if (RegVT == MVT::i32)
2470 RC = &X86::GR32RegClass;
2471 else if (Is64Bit && RegVT == MVT::i64)
2472 RC = &X86::GR64RegClass;
2473 else if (RegVT == MVT::f32)
2474 RC = &X86::FR32RegClass;
2475 else if (RegVT == MVT::f64)
2476 RC = &X86::FR64RegClass;
2477 else if (RegVT.is512BitVector())
2478 RC = &X86::VR512RegClass;
2479 else if (RegVT.is256BitVector())
2480 RC = &X86::VR256RegClass;
2481 else if (RegVT.is128BitVector())
2482 RC = &X86::VR128RegClass;
2483 else if (RegVT == MVT::x86mmx)
2484 RC = &X86::VR64RegClass;
2485 else if (RegVT == MVT::i1)
2486 RC = &X86::VK1RegClass;
2487 else if (RegVT == MVT::v8i1)
2488 RC = &X86::VK8RegClass;
2489 else if (RegVT == MVT::v16i1)
2490 RC = &X86::VK16RegClass;
2491 else if (RegVT == MVT::v32i1)
2492 RC = &X86::VK32RegClass;
2493 else if (RegVT == MVT::v64i1)
2494 RC = &X86::VK64RegClass;
2496 llvm_unreachable("Unknown argument type!");
2498 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2499 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2501 // If this is an 8 or 16-bit value, it is really passed promoted to 32
2502 // bits. Insert an assert[sz]ext to capture this, then truncate to the
2504 if (VA.getLocInfo() == CCValAssign::SExt)
2505 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2506 DAG.getValueType(VA.getValVT()));
2507 else if (VA.getLocInfo() == CCValAssign::ZExt)
2508 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2509 DAG.getValueType(VA.getValVT()));
2510 else if (VA.getLocInfo() == CCValAssign::BCvt)
2511 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
2513 if (VA.isExtInLoc()) {
2514 // Handle MMX values passed in XMM regs.
2515 if (RegVT.isVector())
2516 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
2518 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2521 assert(VA.isMemLoc());
2522 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i);
2525 // If value is passed via pointer - do a load.
2526 if (VA.getLocInfo() == CCValAssign::Indirect)
2527 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
2528 MachinePointerInfo(), false, false, false, 0);
2530 InVals.push_back(ArgValue);
2533 if (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) {
2534 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2535 // The x86-64 ABIs require that for returning structs by value we copy
2536 // the sret argument into %rax/%eax (depending on ABI) for the return.
2537 // Win32 requires us to put the sret argument to %eax as well.
2538 // Save the argument into a virtual register so that we can access it
2539 // from the return points.
2540 if (Ins[i].Flags.isSRet()) {
2541 unsigned Reg = FuncInfo->getSRetReturnReg();
2543 MVT PtrTy = getPointerTy();
2544 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
2545 FuncInfo->setSRetReturnReg(Reg);
2547 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[i]);
2548 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
2554 unsigned StackSize = CCInfo.getNextStackOffset();
2555 // Align stack specially for tail calls.
2556 if (FuncIsMadeTailCallSafe(CallConv,
2557 MF.getTarget().Options.GuaranteedTailCallOpt))
2558 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
2560 // If the function takes variable number of arguments, make a frame index for
2561 // the start of the first vararg value... for expansion of llvm.va_start. We
2562 // can skip this if there are no va_start calls.
2563 if (MFI->hasVAStart() &&
2564 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
2565 CallConv != CallingConv::X86_ThisCall))) {
2566 FuncInfo->setVarArgsFrameIndex(
2567 MFI->CreateFixedObject(1, StackSize, true));
2570 // Figure out if XMM registers are in use.
2571 assert(!(MF.getTarget().Options.UseSoftFloat &&
2572 Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
2573 Attribute::NoImplicitFloat)) &&
2574 "SSE register cannot be used when SSE is disabled!");
2576 // 64-bit calling conventions support varargs and register parameters, so we
2577 // have to do extra work to spill them in the prologue.
2578 if (Is64Bit && isVarArg && MFI->hasVAStart()) {
2579 // Find the first unallocated argument registers.
2580 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
2581 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
2582 unsigned NumIntRegs =
2583 CCInfo.getFirstUnallocated(ArgGPRs.data(), ArgGPRs.size());
2584 unsigned NumXMMRegs =
2585 CCInfo.getFirstUnallocated(ArgXMMs.data(), ArgXMMs.size());
2586 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
2587 "SSE register cannot be used when SSE is disabled!");
2589 // Gather all the live in physical registers.
2590 SmallVector<SDValue, 6> LiveGPRs;
2591 SmallVector<SDValue, 8> LiveXMMRegs;
2593 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
2594 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
2596 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
2598 if (!ArgXMMs.empty()) {
2599 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2600 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
2601 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
2602 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
2603 LiveXMMRegs.push_back(
2604 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
2609 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
2610 // Get to the caller-allocated home save location. Add 8 to account
2611 // for the return address.
2612 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
2613 FuncInfo->setRegSaveFrameIndex(
2614 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
2615 // Fixup to set vararg frame on shadow area (4 x i64).
2617 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
2619 // For X86-64, if there are vararg parameters that are passed via
2620 // registers, then we must store them to their spots on the stack so
2621 // they may be loaded by deferencing the result of va_next.
2622 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
2623 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
2624 FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
2625 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
2628 // Store the integer parameter registers.
2629 SmallVector<SDValue, 8> MemOps;
2630 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
2632 unsigned Offset = FuncInfo->getVarArgsGPOffset();
2633 for (SDValue Val : LiveGPRs) {
2634 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
2635 DAG.getIntPtrConstant(Offset));
2637 DAG.getStore(Val.getValue(1), dl, Val, FIN,
2638 MachinePointerInfo::getFixedStack(
2639 FuncInfo->getRegSaveFrameIndex(), Offset),
2641 MemOps.push_back(Store);
2645 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
2646 // Now store the XMM (fp + vector) parameter registers.
2647 SmallVector<SDValue, 12> SaveXMMOps;
2648 SaveXMMOps.push_back(Chain);
2649 SaveXMMOps.push_back(ALVal);
2650 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2651 FuncInfo->getRegSaveFrameIndex()));
2652 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2653 FuncInfo->getVarArgsFPOffset()));
2654 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
2656 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
2657 MVT::Other, SaveXMMOps));
2660 if (!MemOps.empty())
2661 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
2664 if (isVarArg && MFI->hasMustTailInVarArgFunc()) {
2665 // Find the largest legal vector type.
2666 MVT VecVT = MVT::Other;
2667 // FIXME: Only some x86_32 calling conventions support AVX512.
2668 if (Subtarget->hasAVX512() &&
2669 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
2670 CallConv == CallingConv::Intel_OCL_BI)))
2671 VecVT = MVT::v16f32;
2672 else if (Subtarget->hasAVX())
2674 else if (Subtarget->hasSSE2())
2677 // We forward some GPRs and some vector types.
2678 SmallVector<MVT, 2> RegParmTypes;
2679 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
2680 RegParmTypes.push_back(IntVT);
2681 if (VecVT != MVT::Other)
2682 RegParmTypes.push_back(VecVT);
2684 // Compute the set of forwarded registers. The rest are scratch.
2685 SmallVectorImpl<ForwardedRegister> &Forwards =
2686 FuncInfo->getForwardedMustTailRegParms();
2687 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
2689 // Conservatively forward AL on x86_64, since it might be used for varargs.
2690 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
2691 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2692 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
2695 // Copy all forwards from physical to virtual registers.
2696 for (ForwardedRegister &F : Forwards) {
2697 // FIXME: Can we use a less constrained schedule?
2698 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
2699 F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
2700 Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
2704 // Some CCs need callee pop.
2705 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
2706 MF.getTarget().Options.GuaranteedTailCallOpt)) {
2707 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
2709 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
2710 // If this is an sret function, the return should pop the hidden pointer.
2711 if (!Is64Bit && !IsTailCallConvention(CallConv) &&
2712 !Subtarget->getTargetTriple().isOSMSVCRT() &&
2713 argsAreStructReturn(Ins) == StackStructReturn)
2714 FuncInfo->setBytesToPopOnReturn(4);
2718 // RegSaveFrameIndex is X86-64 only.
2719 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
2720 if (CallConv == CallingConv::X86_FastCall ||
2721 CallConv == CallingConv::X86_ThisCall)
2722 // fastcc functions can't have varargs.
2723 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
2726 FuncInfo->setArgumentStackSize(StackSize);
2732 X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
2733 SDValue StackPtr, SDValue Arg,
2734 SDLoc dl, SelectionDAG &DAG,
2735 const CCValAssign &VA,
2736 ISD::ArgFlagsTy Flags) const {
2737 unsigned LocMemOffset = VA.getLocMemOffset();
2738 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
2739 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
2740 if (Flags.isByVal())
2741 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
2743 return DAG.getStore(Chain, dl, Arg, PtrOff,
2744 MachinePointerInfo::getStack(LocMemOffset),
2748 /// Emit a load of return address if tail call
2749 /// optimization is performed and it is required.
2751 X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
2752 SDValue &OutRetAddr, SDValue Chain,
2753 bool IsTailCall, bool Is64Bit,
2754 int FPDiff, SDLoc dl) const {
2755 // Adjust the Return address stack slot.
2756 EVT VT = getPointerTy();
2757 OutRetAddr = getReturnAddressFrameIndex(DAG);
2759 // Load the "old" Return address.
2760 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
2761 false, false, false, 0);
2762 return SDValue(OutRetAddr.getNode(), 1);
2765 /// Emit a store of the return address if tail call
2766 /// optimization is performed and it is required (FPDiff!=0).
2767 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
2768 SDValue Chain, SDValue RetAddrFrIdx,
2769 EVT PtrVT, unsigned SlotSize,
2770 int FPDiff, SDLoc dl) {
2771 // Store the return address to the appropriate stack slot.
2772 if (!FPDiff) return Chain;
2773 // Calculate the new stack slot for the return address.
2774 int NewReturnAddrFI =
2775 MF.getFrameInfo()->CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
2777 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
2778 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
2779 MachinePointerInfo::getFixedStack(NewReturnAddrFI),
2785 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2786 SmallVectorImpl<SDValue> &InVals) const {
2787 SelectionDAG &DAG = CLI.DAG;
2789 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2790 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2791 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2792 SDValue Chain = CLI.Chain;
2793 SDValue Callee = CLI.Callee;
2794 CallingConv::ID CallConv = CLI.CallConv;
2795 bool &isTailCall = CLI.IsTailCall;
2796 bool isVarArg = CLI.IsVarArg;
2798 MachineFunction &MF = DAG.getMachineFunction();
2799 bool Is64Bit = Subtarget->is64Bit();
2800 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2801 StructReturnType SR = callIsStructReturn(Outs);
2802 bool IsSibcall = false;
2803 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
2805 if (MF.getTarget().Options.DisableTailCalls)
2808 bool IsMustTail = CLI.CS && CLI.CS->isMustTailCall();
2810 // Force this to be a tail call. The verifier rules are enough to ensure
2811 // that we can lower this successfully without moving the return address
2814 } else if (isTailCall) {
2815 // Check if it's really possible to do a tail call.
2816 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
2817 isVarArg, SR != NotStructReturn,
2818 MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
2819 Outs, OutVals, Ins, DAG);
2821 // Sibcalls are automatically detected tailcalls which do not require
2823 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
2830 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2831 "Var args not supported with calling convention fastcc, ghc or hipe");
2833 // Analyze operands of the call, assigning locations to each operand.
2834 SmallVector<CCValAssign, 16> ArgLocs;
2835 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2837 // Allocate shadow area for Win64
2839 CCInfo.AllocateStack(32, 8);
2841 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
2843 // Get a count of how many bytes are to be pushed on the stack.
2844 unsigned NumBytes = CCInfo.getNextStackOffset();
2846 // This is a sibcall. The memory operands are available in caller's
2847 // own caller's stack.
2849 else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2850 IsTailCallConvention(CallConv))
2851 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
2854 if (isTailCall && !IsSibcall && !IsMustTail) {
2855 // Lower arguments at fp - stackoffset + fpdiff.
2856 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
2858 FPDiff = NumBytesCallerPushed - NumBytes;
2860 // Set the delta of movement of the returnaddr stackslot.
2861 // But only set if delta is greater than previous delta.
2862 if (FPDiff < X86Info->getTCReturnAddrDelta())
2863 X86Info->setTCReturnAddrDelta(FPDiff);
2866 unsigned NumBytesToPush = NumBytes;
2867 unsigned NumBytesToPop = NumBytes;
2869 // If we have an inalloca argument, all stack space has already been allocated
2870 // for us and be right at the top of the stack. We don't support multiple
2871 // arguments passed in memory when using inalloca.
2872 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
2874 if (!ArgLocs.back().isMemLoc())
2875 report_fatal_error("cannot use inalloca attribute on a register "
2877 if (ArgLocs.back().getLocMemOffset() != 0)
2878 report_fatal_error("any parameter with the inalloca attribute must be "
2879 "the only memory argument");
2883 Chain = DAG.getCALLSEQ_START(
2884 Chain, DAG.getIntPtrConstant(NumBytesToPush, true), dl);
2886 SDValue RetAddrFrIdx;
2887 // Load return address for tail calls.
2888 if (isTailCall && FPDiff)
2889 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2890 Is64Bit, FPDiff, dl);
2892 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2893 SmallVector<SDValue, 8> MemOpChains;
2896 // Walk the register/memloc assignments, inserting copies/loads. In the case
2897 // of tail call optimization arguments are handle later.
2898 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2899 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2900 // Skip inalloca arguments, they have already been written.
2901 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2902 if (Flags.isInAlloca())
2905 CCValAssign &VA = ArgLocs[i];
2906 EVT RegVT = VA.getLocVT();
2907 SDValue Arg = OutVals[i];
2908 bool isByVal = Flags.isByVal();
2910 // Promote the value if needed.
2911 switch (VA.getLocInfo()) {
2912 default: llvm_unreachable("Unknown loc info!");
2913 case CCValAssign::Full: break;
2914 case CCValAssign::SExt:
2915 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
2917 case CCValAssign::ZExt:
2918 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
2920 case CCValAssign::AExt:
2921 if (RegVT.is128BitVector()) {
2922 // Special case: passing MMX values in XMM registers.
2923 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
2924 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
2925 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
2927 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
2929 case CCValAssign::BCvt:
2930 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg);
2932 case CCValAssign::Indirect: {
2933 // Store the argument.
2934 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
2935 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2936 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot,
2937 MachinePointerInfo::getFixedStack(FI),
2944 if (VA.isRegLoc()) {
2945 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2946 if (isVarArg && IsWin64) {
2947 // Win64 ABI requires argument XMM reg to be copied to the corresponding
2948 // shadow reg if callee is a varargs function.
2949 unsigned ShadowReg = 0;
2950 switch (VA.getLocReg()) {
2951 case X86::XMM0: ShadowReg = X86::RCX; break;
2952 case X86::XMM1: ShadowReg = X86::RDX; break;
2953 case X86::XMM2: ShadowReg = X86::R8; break;
2954 case X86::XMM3: ShadowReg = X86::R9; break;
2957 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
2959 } else if (!IsSibcall && (!isTailCall || isByVal)) {
2960 assert(VA.isMemLoc());
2961 if (!StackPtr.getNode())
2962 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
2964 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2965 dl, DAG, VA, Flags));
2969 if (!MemOpChains.empty())
2970 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2972 if (Subtarget->isPICStyleGOT()) {
2973 // ELF / PIC requires GOT in the EBX register before function calls via PLT
2976 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX),
2977 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy())));
2979 // If we are tail calling and generating PIC/GOT style code load the
2980 // address of the callee into ECX. The value in ecx is used as target of
2981 // the tail jump. This is done to circumvent the ebx/callee-saved problem
2982 // for tail calls on PIC/GOT architectures. Normally we would just put the
2983 // address of GOT into ebx and then call target@PLT. But for tail calls
2984 // ebx would be restored (since ebx is callee saved) before jumping to the
2987 // Note: The actual moving to ECX is done further down.
2988 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
2989 if (G && !G->getGlobal()->hasHiddenVisibility() &&
2990 !G->getGlobal()->hasProtectedVisibility())
2991 Callee = LowerGlobalAddress(Callee, DAG);
2992 else if (isa<ExternalSymbolSDNode>(Callee))
2993 Callee = LowerExternalSymbol(Callee, DAG);
2997 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
2998 // From AMD64 ABI document:
2999 // For calls that may call functions that use varargs or stdargs
3000 // (prototype-less calls or calls to functions containing ellipsis (...) in
3001 // the declaration) %al is used as hidden argument to specify the number
3002 // of SSE registers used. The contents of %al do not need to match exactly
3003 // the number of registers, but must be an ubound on the number of SSE
3004 // registers used and is in the range 0 - 8 inclusive.
3006 // Count the number of XMM registers allocated.
3007 static const MCPhysReg XMMArgRegs[] = {
3008 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3009 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3011 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
3012 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3013 && "SSE registers cannot be used when SSE is disabled");
3015 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3016 DAG.getConstant(NumXMMRegs, MVT::i8)));
3019 if (isVarArg && IsMustTail) {
3020 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3021 for (const auto &F : Forwards) {
3022 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3023 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3027 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3028 // don't need this because the eligibility check rejects calls that require
3029 // shuffling arguments passed in memory.
3030 if (!IsSibcall && isTailCall) {
3031 // Force all the incoming stack arguments to be loaded from the stack
3032 // before any new outgoing arguments are stored to the stack, because the
3033 // outgoing stack slots may alias the incoming argument stack slots, and
3034 // the alias isn't otherwise explicit. This is slightly more conservative
3035 // than necessary, because it means that each store effectively depends
3036 // on every argument instead of just those arguments it would clobber.
3037 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3039 SmallVector<SDValue, 8> MemOpChains2;
3042 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3043 CCValAssign &VA = ArgLocs[i];
3046 assert(VA.isMemLoc());
3047 SDValue Arg = OutVals[i];
3048 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3049 // Skip inalloca arguments. They don't require any work.
3050 if (Flags.isInAlloca())
3052 // Create frame index.
3053 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3054 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3055 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
3056 FIN = DAG.getFrameIndex(FI, getPointerTy());
3058 if (Flags.isByVal()) {
3059 // Copy relative to framepointer.
3060 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
3061 if (!StackPtr.getNode())
3062 StackPtr = DAG.getCopyFromReg(Chain, dl,
3063 RegInfo->getStackRegister(),
3065 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
3067 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3071 // Store relative to framepointer.
3072 MemOpChains2.push_back(
3073 DAG.getStore(ArgChain, dl, Arg, FIN,
3074 MachinePointerInfo::getFixedStack(FI),
3079 if (!MemOpChains2.empty())
3080 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3082 // Store the return address to the appropriate stack slot.
3083 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3084 getPointerTy(), RegInfo->getSlotSize(),
3088 // Build a sequence of copy-to-reg nodes chained together with token chain
3089 // and flag operands which copy the outgoing args into registers.
3091 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3092 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3093 RegsToPass[i].second, InFlag);
3094 InFlag = Chain.getValue(1);
3097 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3098 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3099 // In the 64-bit large code model, we have to make all calls
3100 // through a register, since the call instruction's 32-bit
3101 // pc-relative offset may not be large enough to hold the whole
3103 } else if (Callee->getOpcode() == ISD::GlobalAddress) {
3104 // If the callee is a GlobalAddress node (quite common, every direct call
3105 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
3107 GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
3109 // We should use extra load for direct calls to dllimported functions in
3111 const GlobalValue *GV = G->getGlobal();
3112 if (!GV->hasDLLImportStorageClass()) {
3113 unsigned char OpFlags = 0;
3114 bool ExtraLoad = false;
3115 unsigned WrapperKind = ISD::DELETED_NODE;
3117 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
3118 // external symbols most go through the PLT in PIC mode. If the symbol
3119 // has hidden or protected visibility, or if it is static or local, then
3120 // we don't need to use the PLT - we can directly call it.
3121 if (Subtarget->isTargetELF() &&
3122 DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
3123 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
3124 OpFlags = X86II::MO_PLT;
3125 } else if (Subtarget->isPICStyleStubAny() &&
3126 (GV->isDeclaration() || GV->isWeakForLinker()) &&
3127 (!Subtarget->getTargetTriple().isMacOSX() ||
3128 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3129 // PC-relative references to external symbols should go through $stub,
3130 // unless we're building with the leopard linker or later, which
3131 // automatically synthesizes these stubs.
3132 OpFlags = X86II::MO_DARWIN_STUB;
3133 } else if (Subtarget->isPICStyleRIPRel() &&
3134 isa<Function>(GV) &&
3135 cast<Function>(GV)->getAttributes().
3136 hasAttribute(AttributeSet::FunctionIndex,
3137 Attribute::NonLazyBind)) {
3138 // If the function is marked as non-lazy, generate an indirect call
3139 // which loads from the GOT directly. This avoids runtime overhead
3140 // at the cost of eager binding (and one extra byte of encoding).
3141 OpFlags = X86II::MO_GOTPCREL;
3142 WrapperKind = X86ISD::WrapperRIP;
3146 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
3147 G->getOffset(), OpFlags);
3149 // Add a wrapper if needed.
3150 if (WrapperKind != ISD::DELETED_NODE)
3151 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee);
3152 // Add extra indirection if needed.
3154 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
3155 MachinePointerInfo::getGOT(),
3156 false, false, false, 0);
3158 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3159 unsigned char OpFlags = 0;
3161 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
3162 // external symbols should go through the PLT.
3163 if (Subtarget->isTargetELF() &&
3164 DAG.getTarget().getRelocationModel() == Reloc::PIC_) {
3165 OpFlags = X86II::MO_PLT;
3166 } else if (Subtarget->isPICStyleStubAny() &&
3167 (!Subtarget->getTargetTriple().isMacOSX() ||
3168 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3169 // PC-relative references to external symbols should go through $stub,
3170 // unless we're building with the leopard linker or later, which
3171 // automatically synthesizes these stubs.
3172 OpFlags = X86II::MO_DARWIN_STUB;
3175 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
3177 } else if (Subtarget->isTarget64BitILP32() &&
3178 Callee->getValueType(0) == MVT::i32) {
3179 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
3180 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
3183 // Returns a chain & a flag for retval copy to use.
3184 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3185 SmallVector<SDValue, 8> Ops;
3187 if (!IsSibcall && isTailCall) {
3188 Chain = DAG.getCALLSEQ_END(Chain,
3189 DAG.getIntPtrConstant(NumBytesToPop, true),
3190 DAG.getIntPtrConstant(0, true), InFlag, dl);
3191 InFlag = Chain.getValue(1);
3194 Ops.push_back(Chain);
3195 Ops.push_back(Callee);
3198 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
3200 // Add argument registers to the end of the list so that they are known live
3202 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3203 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3204 RegsToPass[i].second.getValueType()));
3206 // Add a register mask operand representing the call-preserved registers.
3207 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
3208 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
3209 assert(Mask && "Missing call preserved mask for calling convention");
3210 Ops.push_back(DAG.getRegisterMask(Mask));
3212 if (InFlag.getNode())
3213 Ops.push_back(InFlag);
3217 //// If this is the first return lowered for this function, add the regs
3218 //// to the liveout set for the function.
3219 // This isn't right, although it's probably harmless on x86; liveouts
3220 // should be computed from returns not tail calls. Consider a void
3221 // function making a tail call to a function returning int.
3222 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
3225 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
3226 InFlag = Chain.getValue(1);
3228 // Create the CALLSEQ_END node.
3229 unsigned NumBytesForCalleeToPop;
3230 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3231 DAG.getTarget().Options.GuaranteedTailCallOpt))
3232 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
3233 else if (!Is64Bit && !IsTailCallConvention(CallConv) &&
3234 !Subtarget->getTargetTriple().isOSMSVCRT() &&
3235 SR == StackStructReturn)
3236 // If this is a call to a struct-return function, the callee
3237 // pops the hidden struct pointer, so we have to push it back.
3238 // This is common for Darwin/X86, Linux & Mingw32 targets.
3239 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
3240 NumBytesForCalleeToPop = 4;
3242 NumBytesForCalleeToPop = 0; // Callee pops nothing.
3244 // Returns a flag for retval copy to use.
3246 Chain = DAG.getCALLSEQ_END(Chain,
3247 DAG.getIntPtrConstant(NumBytesToPop, true),
3248 DAG.getIntPtrConstant(NumBytesForCalleeToPop,
3251 InFlag = Chain.getValue(1);
3254 // Handle result values, copying them out of physregs into vregs that we
3256 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
3257 Ins, dl, DAG, InVals);
3260 //===----------------------------------------------------------------------===//
3261 // Fast Calling Convention (tail call) implementation
3262 //===----------------------------------------------------------------------===//
3264 // Like std call, callee cleans arguments, convention except that ECX is
3265 // reserved for storing the tail called function address. Only 2 registers are
3266 // free for argument passing (inreg). Tail call optimization is performed
3268 // * tailcallopt is enabled
3269 // * caller/callee are fastcc
3270 // On X86_64 architecture with GOT-style position independent code only local
3271 // (within module) calls are supported at the moment.
3272 // To keep the stack aligned according to platform abi the function
3273 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
3274 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
3275 // If a tail called function callee has more arguments than the caller the
3276 // caller needs to make sure that there is room to move the RETADDR to. This is
3277 // achieved by reserving an area the size of the argument delta right after the
3278 // original RETADDR, but before the saved framepointer or the spilled registers
3279 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
3291 /// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
3292 /// for a 16 byte align requirement.
3294 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
3295 SelectionDAG& DAG) const {
3296 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3297 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
3298 unsigned StackAlignment = TFI.getStackAlignment();
3299 uint64_t AlignMask = StackAlignment - 1;
3300 int64_t Offset = StackSize;
3301 unsigned SlotSize = RegInfo->getSlotSize();
3302 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
3303 // Number smaller than 12 so just add the difference.
3304 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
3306 // Mask out lower bits, add stackalignment once plus the 12 bytes.
3307 Offset = ((~AlignMask) & Offset) + StackAlignment +
3308 (StackAlignment-SlotSize);
3313 /// MatchingStackOffset - Return true if the given stack call argument is
3314 /// already available in the same position (relatively) of the caller's
3315 /// incoming argument stack.
3317 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
3318 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
3319 const X86InstrInfo *TII) {
3320 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
3322 if (Arg.getOpcode() == ISD::CopyFromReg) {
3323 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
3324 if (!TargetRegisterInfo::isVirtualRegister(VR))
3326 MachineInstr *Def = MRI->getVRegDef(VR);
3329 if (!Flags.isByVal()) {
3330 if (!TII->isLoadFromStackSlot(Def, FI))
3333 unsigned Opcode = Def->getOpcode();
3334 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
3335 Opcode == X86::LEA64_32r) &&
3336 Def->getOperand(1).isFI()) {
3337 FI = Def->getOperand(1).getIndex();
3338 Bytes = Flags.getByValSize();
3342 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
3343 if (Flags.isByVal())
3344 // ByVal argument is passed in as a pointer but it's now being
3345 // dereferenced. e.g.
3346 // define @foo(%struct.X* %A) {
3347 // tail call @bar(%struct.X* byval %A)
3350 SDValue Ptr = Ld->getBasePtr();
3351 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
3354 FI = FINode->getIndex();
3355 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
3356 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
3357 FI = FINode->getIndex();
3358 Bytes = Flags.getByValSize();
3362 assert(FI != INT_MAX);
3363 if (!MFI->isFixedObjectIndex(FI))
3365 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
3368 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
3369 /// for tail call optimization. Targets which want to do tail call
3370 /// optimization should implement this function.
3372 X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
3373 CallingConv::ID CalleeCC,
3375 bool isCalleeStructRet,
3376 bool isCallerStructRet,
3378 const SmallVectorImpl<ISD::OutputArg> &Outs,
3379 const SmallVectorImpl<SDValue> &OutVals,
3380 const SmallVectorImpl<ISD::InputArg> &Ins,
3381 SelectionDAG &DAG) const {
3382 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
3385 // If -tailcallopt is specified, make fastcc functions tail-callable.
3386 const MachineFunction &MF = DAG.getMachineFunction();
3387 const Function *CallerF = MF.getFunction();
3389 // If the function return type is x86_fp80 and the callee return type is not,
3390 // then the FP_EXTEND of the call result is not a nop. It's not safe to
3391 // perform a tailcall optimization here.
3392 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
3395 CallingConv::ID CallerCC = CallerF->getCallingConv();
3396 bool CCMatch = CallerCC == CalleeCC;
3397 bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
3398 bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
3400 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
3401 if (IsTailCallConvention(CalleeCC) && CCMatch)
3406 // Look for obvious safe cases to perform tail call optimization that do not
3407 // require ABI changes. This is what gcc calls sibcall.
3409 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
3410 // emit a special epilogue.
3411 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3412 if (RegInfo->needsStackRealignment(MF))
3415 // Also avoid sibcall optimization if either caller or callee uses struct
3416 // return semantics.
3417 if (isCalleeStructRet || isCallerStructRet)
3420 // An stdcall/thiscall caller is expected to clean up its arguments; the
3421 // callee isn't going to do that.
3422 // FIXME: this is more restrictive than needed. We could produce a tailcall
3423 // when the stack adjustment matches. For example, with a thiscall that takes
3424 // only one argument.
3425 if (!CCMatch && (CallerCC == CallingConv::X86_StdCall ||
3426 CallerCC == CallingConv::X86_ThisCall))
3429 // Do not sibcall optimize vararg calls unless all arguments are passed via
3431 if (isVarArg && !Outs.empty()) {
3433 // Optimizing for varargs on Win64 is unlikely to be safe without
3434 // additional testing.
3435 if (IsCalleeWin64 || IsCallerWin64)
3438 SmallVector<CCValAssign, 16> ArgLocs;
3439 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3442 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3443 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
3444 if (!ArgLocs[i].isRegLoc())
3448 // If the call result is in ST0 / ST1, it needs to be popped off the x87
3449 // stack. Therefore, if it's not used by the call it is not safe to optimize
3450 // this into a sibcall.
3451 bool Unused = false;
3452 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3459 SmallVector<CCValAssign, 16> RVLocs;
3460 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), RVLocs,
3462 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3463 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
3464 CCValAssign &VA = RVLocs[i];
3465 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
3470 // If the calling conventions do not match, then we'd better make sure the
3471 // results are returned in the same way as what the caller expects.
3473 SmallVector<CCValAssign, 16> RVLocs1;
3474 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
3476 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
3478 SmallVector<CCValAssign, 16> RVLocs2;
3479 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
3481 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
3483 if (RVLocs1.size() != RVLocs2.size())
3485 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
3486 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
3488 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
3490 if (RVLocs1[i].isRegLoc()) {
3491 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
3494 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
3500 // If the callee takes no arguments then go on to check the results of the
3502 if (!Outs.empty()) {
3503 // Check if stack adjustment is needed. For now, do not do this if any
3504 // argument is passed on the stack.
3505 SmallVector<CCValAssign, 16> ArgLocs;
3506 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3509 // Allocate shadow area for Win64
3511 CCInfo.AllocateStack(32, 8);
3513 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3514 if (CCInfo.getNextStackOffset()) {
3515 MachineFunction &MF = DAG.getMachineFunction();
3516 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn())
3519 // Check if the arguments are already laid out in the right way as
3520 // the caller's fixed stack objects.
3521 MachineFrameInfo *MFI = MF.getFrameInfo();
3522 const MachineRegisterInfo *MRI = &MF.getRegInfo();
3523 const X86InstrInfo *TII = Subtarget->getInstrInfo();
3524 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3525 CCValAssign &VA = ArgLocs[i];
3526 SDValue Arg = OutVals[i];
3527 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3528 if (VA.getLocInfo() == CCValAssign::Indirect)
3530 if (!VA.isRegLoc()) {
3531 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
3538 // If the tailcall address may be in a register, then make sure it's
3539 // possible to register allocate for it. In 32-bit, the call address can
3540 // only target EAX, EDX, or ECX since the tail call must be scheduled after
3541 // callee-saved registers are restored. These happen to be the same
3542 // registers used to pass 'inreg' arguments so watch out for those.
3543 if (!Subtarget->is64Bit() &&
3544 ((!isa<GlobalAddressSDNode>(Callee) &&
3545 !isa<ExternalSymbolSDNode>(Callee)) ||
3546 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
3547 unsigned NumInRegs = 0;
3548 // In PIC we need an extra register to formulate the address computation
3550 unsigned MaxInRegs =
3551 (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
3553 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3554 CCValAssign &VA = ArgLocs[i];
3557 unsigned Reg = VA.getLocReg();
3560 case X86::EAX: case X86::EDX: case X86::ECX:
3561 if (++NumInRegs == MaxInRegs)
3573 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
3574 const TargetLibraryInfo *libInfo) const {
3575 return X86::createFastISel(funcInfo, libInfo);
3578 //===----------------------------------------------------------------------===//
3579 // Other Lowering Hooks
3580 //===----------------------------------------------------------------------===//
3582 static bool MayFoldLoad(SDValue Op) {
3583 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
3586 static bool MayFoldIntoStore(SDValue Op) {
3587 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
3590 static bool isTargetShuffle(unsigned Opcode) {
3592 default: return false;
3593 case X86ISD::BLENDI:
3594 case X86ISD::PSHUFB:
3595 case X86ISD::PSHUFD:
3596 case X86ISD::PSHUFHW:
3597 case X86ISD::PSHUFLW:
3599 case X86ISD::PALIGNR:
3600 case X86ISD::MOVLHPS:
3601 case X86ISD::MOVLHPD:
3602 case X86ISD::MOVHLPS:
3603 case X86ISD::MOVLPS:
3604 case X86ISD::MOVLPD:
3605 case X86ISD::MOVSHDUP:
3606 case X86ISD::MOVSLDUP:
3607 case X86ISD::MOVDDUP:
3610 case X86ISD::UNPCKL:
3611 case X86ISD::UNPCKH:
3612 case X86ISD::VPERMILPI:
3613 case X86ISD::VPERM2X128:
3614 case X86ISD::VPERMI:
3619 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3620 SDValue V1, SelectionDAG &DAG) {
3622 default: llvm_unreachable("Unknown x86 shuffle node");
3623 case X86ISD::MOVSHDUP:
3624 case X86ISD::MOVSLDUP:
3625 case X86ISD::MOVDDUP:
3626 return DAG.getNode(Opc, dl, VT, V1);
3630 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3631 SDValue V1, unsigned TargetMask,
3632 SelectionDAG &DAG) {
3634 default: llvm_unreachable("Unknown x86 shuffle node");
3635 case X86ISD::PSHUFD:
3636 case X86ISD::PSHUFHW:
3637 case X86ISD::PSHUFLW:
3638 case X86ISD::VPERMILPI:
3639 case X86ISD::VPERMI:
3640 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8));
3644 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3645 SDValue V1, SDValue V2, unsigned TargetMask,
3646 SelectionDAG &DAG) {
3648 default: llvm_unreachable("Unknown x86 shuffle node");
3649 case X86ISD::PALIGNR:
3650 case X86ISD::VALIGN:
3652 case X86ISD::VPERM2X128:
3653 return DAG.getNode(Opc, dl, VT, V1, V2,
3654 DAG.getConstant(TargetMask, MVT::i8));
3658 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3659 SDValue V1, SDValue V2, SelectionDAG &DAG) {
3661 default: llvm_unreachable("Unknown x86 shuffle node");
3662 case X86ISD::MOVLHPS:
3663 case X86ISD::MOVLHPD:
3664 case X86ISD::MOVHLPS:
3665 case X86ISD::MOVLPS:
3666 case X86ISD::MOVLPD:
3669 case X86ISD::UNPCKL:
3670 case X86ISD::UNPCKH:
3671 return DAG.getNode(Opc, dl, VT, V1, V2);
3675 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
3676 MachineFunction &MF = DAG.getMachineFunction();
3677 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3678 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3679 int ReturnAddrIndex = FuncInfo->getRAIndex();
3681 if (ReturnAddrIndex == 0) {
3682 // Set up a frame object for the return address.
3683 unsigned SlotSize = RegInfo->getSlotSize();
3684 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize,
3687 FuncInfo->setRAIndex(ReturnAddrIndex);
3690 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
3693 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
3694 bool hasSymbolicDisplacement) {
3695 // Offset should fit into 32 bit immediate field.
3696 if (!isInt<32>(Offset))
3699 // If we don't have a symbolic displacement - we don't have any extra
3701 if (!hasSymbolicDisplacement)
3704 // FIXME: Some tweaks might be needed for medium code model.
3705 if (M != CodeModel::Small && M != CodeModel::Kernel)
3708 // For small code model we assume that latest object is 16MB before end of 31
3709 // bits boundary. We may also accept pretty large negative constants knowing
3710 // that all objects are in the positive half of address space.
3711 if (M == CodeModel::Small && Offset < 16*1024*1024)
3714 // For kernel code model we know that all object resist in the negative half
3715 // of 32bits address space. We may not accept negative offsets, since they may
3716 // be just off and we may accept pretty large positive ones.
3717 if (M == CodeModel::Kernel && Offset >= 0)
3723 /// isCalleePop - Determines whether the callee is required to pop its
3724 /// own arguments. Callee pop is necessary to support tail calls.
3725 bool X86::isCalleePop(CallingConv::ID CallingConv,
3726 bool is64Bit, bool IsVarArg, bool TailCallOpt) {
3727 switch (CallingConv) {
3730 case CallingConv::X86_StdCall:
3731 case CallingConv::X86_FastCall:
3732 case CallingConv::X86_ThisCall:
3734 case CallingConv::Fast:
3735 case CallingConv::GHC:
3736 case CallingConv::HiPE:
3743 /// \brief Return true if the condition is an unsigned comparison operation.
3744 static bool isX86CCUnsigned(unsigned X86CC) {
3746 default: llvm_unreachable("Invalid integer condition!");
3747 case X86::COND_E: return true;
3748 case X86::COND_G: return false;
3749 case X86::COND_GE: return false;
3750 case X86::COND_L: return false;
3751 case X86::COND_LE: return false;
3752 case X86::COND_NE: return true;
3753 case X86::COND_B: return true;
3754 case X86::COND_A: return true;
3755 case X86::COND_BE: return true;
3756 case X86::COND_AE: return true;
3758 llvm_unreachable("covered switch fell through?!");
3761 /// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
3762 /// specific condition code, returning the condition code and the LHS/RHS of the
3763 /// comparison to make.
3764 static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
3765 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
3767 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
3768 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
3769 // X > -1 -> X == 0, jump !sign.
3770 RHS = DAG.getConstant(0, RHS.getValueType());
3771 return X86::COND_NS;
3773 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
3774 // X < 0 -> X == 0, jump on sign.
3777 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
3779 RHS = DAG.getConstant(0, RHS.getValueType());
3780 return X86::COND_LE;
3784 switch (SetCCOpcode) {
3785 default: llvm_unreachable("Invalid integer condition!");
3786 case ISD::SETEQ: return X86::COND_E;
3787 case ISD::SETGT: return X86::COND_G;
3788 case ISD::SETGE: return X86::COND_GE;
3789 case ISD::SETLT: return X86::COND_L;
3790 case ISD::SETLE: return X86::COND_LE;
3791 case ISD::SETNE: return X86::COND_NE;
3792 case ISD::SETULT: return X86::COND_B;
3793 case ISD::SETUGT: return X86::COND_A;
3794 case ISD::SETULE: return X86::COND_BE;
3795 case ISD::SETUGE: return X86::COND_AE;
3799 // First determine if it is required or is profitable to flip the operands.
3801 // If LHS is a foldable load, but RHS is not, flip the condition.
3802 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
3803 !ISD::isNON_EXTLoad(RHS.getNode())) {
3804 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
3805 std::swap(LHS, RHS);
3808 switch (SetCCOpcode) {
3814 std::swap(LHS, RHS);
3818 // On a floating point condition, the flags are set as follows:
3820 // 0 | 0 | 0 | X > Y
3821 // 0 | 0 | 1 | X < Y
3822 // 1 | 0 | 0 | X == Y
3823 // 1 | 1 | 1 | unordered
3824 switch (SetCCOpcode) {
3825 default: llvm_unreachable("Condcode should be pre-legalized away");
3827 case ISD::SETEQ: return X86::COND_E;
3828 case ISD::SETOLT: // flipped
3830 case ISD::SETGT: return X86::COND_A;
3831 case ISD::SETOLE: // flipped
3833 case ISD::SETGE: return X86::COND_AE;
3834 case ISD::SETUGT: // flipped
3836 case ISD::SETLT: return X86::COND_B;
3837 case ISD::SETUGE: // flipped
3839 case ISD::SETLE: return X86::COND_BE;
3841 case ISD::SETNE: return X86::COND_NE;
3842 case ISD::SETUO: return X86::COND_P;
3843 case ISD::SETO: return X86::COND_NP;
3845 case ISD::SETUNE: return X86::COND_INVALID;
3849 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
3850 /// code. Current x86 isa includes the following FP cmov instructions:
3851 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
3852 static bool hasFPCMov(unsigned X86CC) {
3868 /// isFPImmLegal - Returns true if the target can instruction select the
3869 /// specified FP immediate natively. If false, the legalizer will
3870 /// materialize the FP immediate as a load from a constant pool.
3871 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
3872 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
3873 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
3879 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
3880 ISD::LoadExtType ExtTy,
3882 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
3883 // relocation target a movq or addq instruction: don't let the load shrink.
3884 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
3885 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
3886 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
3887 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
3891 /// \brief Returns true if it is beneficial to convert a load of a constant
3892 /// to just the constant itself.
3893 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
3895 assert(Ty->isIntegerTy());
3897 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3898 if (BitSize == 0 || BitSize > 64)
3903 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
3904 unsigned Index) const {
3905 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
3908 return (Index == 0 || Index == ResVT.getVectorNumElements());
3911 bool X86TargetLowering::isCheapToSpeculateCttz() const {
3912 // Speculate cttz only if we can directly use TZCNT.
3913 return Subtarget->hasBMI();
3916 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
3917 // Speculate ctlz only if we can directly use LZCNT.
3918 return Subtarget->hasLZCNT();
3921 /// isUndefOrInRange - Return true if Val is undef or if its value falls within
3922 /// the specified range (L, H].
3923 static bool isUndefOrInRange(int Val, int Low, int Hi) {
3924 return (Val < 0) || (Val >= Low && Val < Hi);
3927 /// isUndefOrEqual - Val is either less than zero (undef) or equal to the
3928 /// specified value.
3929 static bool isUndefOrEqual(int Val, int CmpVal) {
3930 return (Val < 0 || Val == CmpVal);
3933 /// isSequentialOrUndefInRange - Return true if every element in Mask, beginning
3934 /// from position Pos and ending in Pos+Size, falls within the specified
3935 /// sequential range (Low, Low+Size]. or is undef.
3936 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
3937 unsigned Pos, unsigned Size, int Low) {
3938 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
3939 if (!isUndefOrEqual(Mask[i], Low))
3944 /// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
3945 /// is suitable for input to PSHUFD. That is, it doesn't reference the other
3946 /// operand - by default will match for first operand.
3947 static bool isPSHUFDMask(ArrayRef<int> Mask, MVT VT,
3948 bool TestSecondOperand = false) {
3949 if (VT != MVT::v4f32 && VT != MVT::v4i32 &&
3950 VT != MVT::v2f64 && VT != MVT::v2i64)
3953 unsigned NumElems = VT.getVectorNumElements();
3954 unsigned Lo = TestSecondOperand ? NumElems : 0;
3955 unsigned Hi = Lo + NumElems;
3957 for (unsigned i = 0; i < NumElems; ++i)
3958 if (!isUndefOrInRange(Mask[i], (int)Lo, (int)Hi))
3964 /// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
3965 /// is suitable for input to PSHUFHW.
3966 static bool isPSHUFHWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3967 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3970 // Lower quadword copied in order or undef.
3971 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0))
3974 // Upper quadword shuffled.
3975 for (unsigned i = 4; i != 8; ++i)
3976 if (!isUndefOrInRange(Mask[i], 4, 8))
3979 if (VT == MVT::v16i16) {
3980 // Lower quadword copied in order or undef.
3981 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8))
3984 // Upper quadword shuffled.
3985 for (unsigned i = 12; i != 16; ++i)
3986 if (!isUndefOrInRange(Mask[i], 12, 16))
3993 /// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
3994 /// is suitable for input to PSHUFLW.
3995 static bool isPSHUFLWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3996 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3999 // Upper quadword copied in order.
4000 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4))
4003 // Lower quadword shuffled.
4004 for (unsigned i = 0; i != 4; ++i)
4005 if (!isUndefOrInRange(Mask[i], 0, 4))
4008 if (VT == MVT::v16i16) {
4009 // Upper quadword copied in order.
4010 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12))
4013 // Lower quadword shuffled.
4014 for (unsigned i = 8; i != 12; ++i)
4015 if (!isUndefOrInRange(Mask[i], 8, 12))
4022 /// \brief Return true if the mask specifies a shuffle of elements that is
4023 /// suitable for input to intralane (palignr) or interlane (valign) vector
4025 static bool isAlignrMask(ArrayRef<int> Mask, MVT VT, bool InterLane) {
4026 unsigned NumElts = VT.getVectorNumElements();
4027 unsigned NumLanes = InterLane ? 1: VT.getSizeInBits()/128;
4028 unsigned NumLaneElts = NumElts/NumLanes;
4030 // Do not handle 64-bit element shuffles with palignr.
4031 if (NumLaneElts == 2)
4034 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) {
4036 for (i = 0; i != NumLaneElts; ++i) {
4041 // Lane is all undef, go to next lane
4042 if (i == NumLaneElts)
4045 int Start = Mask[i+l];
4047 // Make sure its in this lane in one of the sources
4048 if (!isUndefOrInRange(Start, l, l+NumLaneElts) &&
4049 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts))
4052 // If not lane 0, then we must match lane 0
4053 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l))
4056 // Correct second source to be contiguous with first source
4057 if (Start >= (int)NumElts)
4058 Start -= NumElts - NumLaneElts;
4060 // Make sure we're shifting in the right direction.
4061 if (Start <= (int)(i+l))
4066 // Check the rest of the elements to see if they are consecutive.
4067 for (++i; i != NumLaneElts; ++i) {
4068 int Idx = Mask[i+l];
4070 // Make sure its in this lane
4071 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) &&
4072 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts))
4075 // If not lane 0, then we must match lane 0
4076 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l))
4079 if (Idx >= (int)NumElts)
4080 Idx -= NumElts - NumLaneElts;
4082 if (!isUndefOrEqual(Idx, Start+i))
4091 /// \brief Return true if the node specifies a shuffle of elements that is
4092 /// suitable for input to PALIGNR.
4093 static bool isPALIGNRMask(ArrayRef<int> Mask, MVT VT,
4094 const X86Subtarget *Subtarget) {
4095 if ((VT.is128BitVector() && !Subtarget->hasSSSE3()) ||
4096 (VT.is256BitVector() && !Subtarget->hasInt256()) ||
4097 VT.is512BitVector())
4098 // FIXME: Add AVX512BW.
4101 return isAlignrMask(Mask, VT, false);
4104 /// \brief Return true if the node specifies a shuffle of elements that is
4105 /// suitable for input to VALIGN.
4106 static bool isVALIGNMask(ArrayRef<int> Mask, MVT VT,
4107 const X86Subtarget *Subtarget) {
4108 // FIXME: Add AVX512VL.
4109 if (!VT.is512BitVector() || !Subtarget->hasAVX512())
4111 return isAlignrMask(Mask, VT, true);
4114 /// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
4115 /// the two vector operands have swapped position.
4116 static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask,
4117 unsigned NumElems) {
4118 for (unsigned i = 0; i != NumElems; ++i) {
4122 else if (idx < (int)NumElems)
4123 Mask[i] = idx + NumElems;
4125 Mask[i] = idx - NumElems;
4129 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
4130 /// specifies a shuffle of elements that is suitable for input to 128/256-bit
4131 /// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be
4132 /// reverse of what x86 shuffles want.
4133 static bool isSHUFPMask(ArrayRef<int> Mask, MVT VT, bool Commuted = false) {
4135 unsigned NumElems = VT.getVectorNumElements();
4136 unsigned NumLanes = VT.getSizeInBits()/128;
4137 unsigned NumLaneElems = NumElems/NumLanes;
4139 if (NumLaneElems != 2 && NumLaneElems != 4)
4142 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4143 bool symetricMaskRequired =
4144 (VT.getSizeInBits() >= 256) && (EltSize == 32);
4146 // VSHUFPSY divides the resulting vector into 4 chunks.
4147 // The sources are also splitted into 4 chunks, and each destination
4148 // chunk must come from a different source chunk.
4150 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0
4151 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9
4153 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4,
4154 // Y3..Y0, Y3..Y0, X3..X0, X3..X0
4156 // VSHUFPDY divides the resulting vector into 4 chunks.
4157 // The sources are also splitted into 4 chunks, and each destination
4158 // chunk must come from a different source chunk.
4160 // SRC1 => X3 X2 X1 X0
4161 // SRC2 => Y3 Y2 Y1 Y0
4163 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0
4165 SmallVector<int, 4> MaskVal(NumLaneElems, -1);
4166 unsigned HalfLaneElems = NumLaneElems/2;
4167 for (unsigned l = 0; l != NumElems; l += NumLaneElems) {
4168 for (unsigned i = 0; i != NumLaneElems; ++i) {
4169 int Idx = Mask[i+l];
4170 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0);
4171 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems))
4173 // For VSHUFPSY, the mask of the second half must be the same as the
4174 // first but with the appropriate offsets. This works in the same way as
4175 // VPERMILPS works with masks.
4176 if (!symetricMaskRequired || Idx < 0)
4178 if (MaskVal[i] < 0) {
4179 MaskVal[i] = Idx - l;
4182 if ((signed)(Idx - l) != MaskVal[i])
4190 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
4191 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
4192 static bool isMOVHLPSMask(ArrayRef<int> Mask, MVT VT) {
4193 if (!VT.is128BitVector())
4196 unsigned NumElems = VT.getVectorNumElements();
4201 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
4202 return isUndefOrEqual(Mask[0], 6) &&
4203 isUndefOrEqual(Mask[1], 7) &&
4204 isUndefOrEqual(Mask[2], 2) &&
4205 isUndefOrEqual(Mask[3], 3);
4208 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
4209 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
4211 static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, MVT VT) {
4212 if (!VT.is128BitVector())
4215 unsigned NumElems = VT.getVectorNumElements();
4220 return isUndefOrEqual(Mask[0], 2) &&
4221 isUndefOrEqual(Mask[1], 3) &&
4222 isUndefOrEqual(Mask[2], 2) &&
4223 isUndefOrEqual(Mask[3], 3);
4226 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
4227 /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
4228 static bool isMOVLPMask(ArrayRef<int> Mask, MVT VT) {
4229 if (!VT.is128BitVector())
4232 unsigned NumElems = VT.getVectorNumElements();
4234 if (NumElems != 2 && NumElems != 4)
4237 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4238 if (!isUndefOrEqual(Mask[i], i + NumElems))
4241 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
4242 if (!isUndefOrEqual(Mask[i], i))
4248 /// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
4249 /// specifies a shuffle of elements that is suitable for input to MOVLHPS.
4250 static bool isMOVLHPSMask(ArrayRef<int> Mask, MVT VT) {
4251 if (!VT.is128BitVector())
4254 unsigned NumElems = VT.getVectorNumElements();
4256 if (NumElems != 2 && NumElems != 4)
4259 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4260 if (!isUndefOrEqual(Mask[i], i))
4263 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4264 if (!isUndefOrEqual(Mask[i + e], i + NumElems))
4270 /// isINSERTPSMask - Return true if the specified VECTOR_SHUFFLE operand
4271 /// specifies a shuffle of elements that is suitable for input to INSERTPS.
4272 /// i. e: If all but one element come from the same vector.
4273 static bool isINSERTPSMask(ArrayRef<int> Mask, MVT VT) {
4274 // TODO: Deal with AVX's VINSERTPS
4275 if (!VT.is128BitVector() || (VT != MVT::v4f32 && VT != MVT::v4i32))
4278 unsigned CorrectPosV1 = 0;
4279 unsigned CorrectPosV2 = 0;
4280 for (int i = 0, e = (int)VT.getVectorNumElements(); i != e; ++i) {
4281 if (Mask[i] == -1) {
4289 else if (Mask[i] == i + 4)
4293 if (CorrectPosV1 == 3 || CorrectPosV2 == 3)
4294 // We have 3 elements (undefs count as elements from any vector) from one
4295 // vector, and one from another.
4302 // Some special combinations that can be optimized.
4305 SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp,
4306 SelectionDAG &DAG) {
4307 MVT VT = SVOp->getSimpleValueType(0);
4310 if (VT != MVT::v8i32 && VT != MVT::v8f32)
4313 ArrayRef<int> Mask = SVOp->getMask();
4315 // These are the special masks that may be optimized.
4316 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14};
4317 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15};
4318 bool MatchEvenMask = true;
4319 bool MatchOddMask = true;
4320 for (int i=0; i<8; ++i) {
4321 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i]))
4322 MatchEvenMask = false;
4323 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i]))
4324 MatchOddMask = false;
4327 if (!MatchEvenMask && !MatchOddMask)
4330 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT);
4332 SDValue Op0 = SVOp->getOperand(0);
4333 SDValue Op1 = SVOp->getOperand(1);
4335 if (MatchEvenMask) {
4336 // Shift the second operand right to 32 bits.
4337 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 };
4338 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask);
4340 // Shift the first operand left to 32 bits.
4341 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 };
4342 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask);
4344 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15};
4345 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask);
4348 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
4349 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
4350 static bool isUNPCKLMask(ArrayRef<int> Mask, MVT VT,
4351 bool HasInt256, bool V2IsSplat = false) {
4353 assert(VT.getSizeInBits() >= 128 &&
4354 "Unsupported vector type for unpckl");
4356 unsigned NumElts = VT.getVectorNumElements();
4357 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4358 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4361 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4362 "Unsupported vector type for unpckh");
4364 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4365 unsigned NumLanes = VT.getSizeInBits()/128;
4366 unsigned NumLaneElts = NumElts/NumLanes;
4368 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4369 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4370 int BitI = Mask[l+i];
4371 int BitI1 = Mask[l+i+1];
4372 if (!isUndefOrEqual(BitI, j))
4375 if (!isUndefOrEqual(BitI1, NumElts))
4378 if (!isUndefOrEqual(BitI1, j + NumElts))
4387 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
4388 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
4389 static bool isUNPCKHMask(ArrayRef<int> Mask, MVT VT,
4390 bool HasInt256, bool V2IsSplat = false) {
4391 assert(VT.getSizeInBits() >= 128 &&
4392 "Unsupported vector type for unpckh");
4394 unsigned NumElts = VT.getVectorNumElements();
4395 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4396 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4399 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4400 "Unsupported vector type for unpckh");
4402 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4403 unsigned NumLanes = VT.getSizeInBits()/128;
4404 unsigned NumLaneElts = NumElts/NumLanes;
4406 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4407 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4408 int BitI = Mask[l+i];
4409 int BitI1 = Mask[l+i+1];
4410 if (!isUndefOrEqual(BitI, j))
4413 if (isUndefOrEqual(BitI1, NumElts))
4416 if (!isUndefOrEqual(BitI1, j+NumElts))
4424 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
4425 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
4427 static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4428 unsigned NumElts = VT.getVectorNumElements();
4429 bool Is256BitVec = VT.is256BitVector();
4431 if (VT.is512BitVector())
4433 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4434 "Unsupported vector type for unpckh");
4436 if (Is256BitVec && NumElts != 4 && NumElts != 8 &&
4437 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4440 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern
4441 // FIXME: Need a better way to get rid of this, there's no latency difference
4442 // between UNPCKLPD and MOVDDUP, the later should always be checked first and
4443 // the former later. We should also remove the "_undef" special mask.
4444 if (NumElts == 4 && Is256BitVec)
4447 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4448 // independently on 128-bit lanes.
4449 unsigned NumLanes = VT.getSizeInBits()/128;
4450 unsigned NumLaneElts = NumElts/NumLanes;
4452 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4453 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4454 int BitI = Mask[l+i];
4455 int BitI1 = Mask[l+i+1];
4457 if (!isUndefOrEqual(BitI, j))
4459 if (!isUndefOrEqual(BitI1, j))
4467 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
4468 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
4470 static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4471 unsigned NumElts = VT.getVectorNumElements();
4473 if (VT.is512BitVector())
4476 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4477 "Unsupported vector type for unpckh");
4479 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4480 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4483 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4484 // independently on 128-bit lanes.
4485 unsigned NumLanes = VT.getSizeInBits()/128;
4486 unsigned NumLaneElts = NumElts/NumLanes;
4488 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4489 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4490 int BitI = Mask[l+i];
4491 int BitI1 = Mask[l+i+1];
4492 if (!isUndefOrEqual(BitI, j))
4494 if (!isUndefOrEqual(BitI1, j))
4501 // Match for INSERTI64x4 INSERTF64x4 instructions (src0[0], src1[0]) or
4502 // (src1[0], src0[1]), manipulation with 256-bit sub-vectors
4503 static bool isINSERT64x4Mask(ArrayRef<int> Mask, MVT VT, unsigned int *Imm) {
4504 if (!VT.is512BitVector())
4507 unsigned NumElts = VT.getVectorNumElements();
4508 unsigned HalfSize = NumElts/2;
4509 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, 0)) {
4510 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, NumElts)) {
4515 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, NumElts)) {
4516 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, HalfSize)) {
4524 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
4525 /// specifies a shuffle of elements that is suitable for input to MOVSS,
4526 /// MOVSD, and MOVD, i.e. setting the lowest element.
4527 static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) {
4528 if (VT.getVectorElementType().getSizeInBits() < 32)
4530 if (!VT.is128BitVector())
4533 unsigned NumElts = VT.getVectorNumElements();
4535 if (!isUndefOrEqual(Mask[0], NumElts))
4538 for (unsigned i = 1; i != NumElts; ++i)
4539 if (!isUndefOrEqual(Mask[i], i))
4545 /// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered
4546 /// as permutations between 128-bit chunks or halves. As an example: this
4548 /// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15>
4549 /// The first half comes from the second half of V1 and the second half from the
4550 /// the second half of V2.
4551 static bool isVPERM2X128Mask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4552 if (!HasFp256 || !VT.is256BitVector())
4555 // The shuffle result is divided into half A and half B. In total the two
4556 // sources have 4 halves, namely: C, D, E, F. The final values of A and
4557 // B must come from C, D, E or F.
4558 unsigned HalfSize = VT.getVectorNumElements()/2;
4559 bool MatchA = false, MatchB = false;
4561 // Check if A comes from one of C, D, E, F.
4562 for (unsigned Half = 0; Half != 4; ++Half) {
4563 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) {
4569 // Check if B comes from one of C, D, E, F.
4570 for (unsigned Half = 0; Half != 4; ++Half) {
4571 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) {
4577 return MatchA && MatchB;
4580 /// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle
4581 /// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions.
4582 static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) {
4583 MVT VT = SVOp->getSimpleValueType(0);
4585 unsigned HalfSize = VT.getVectorNumElements()/2;
4587 unsigned FstHalf = 0, SndHalf = 0;
4588 for (unsigned i = 0; i < HalfSize; ++i) {
4589 if (SVOp->getMaskElt(i) > 0) {
4590 FstHalf = SVOp->getMaskElt(i)/HalfSize;
4594 for (unsigned i = HalfSize; i < HalfSize*2; ++i) {
4595 if (SVOp->getMaskElt(i) > 0) {
4596 SndHalf = SVOp->getMaskElt(i)/HalfSize;
4601 return (FstHalf | (SndHalf << 4));
4604 // Symetric in-lane mask. Each lane has 4 elements (for imm8)
4605 static bool isPermImmMask(ArrayRef<int> Mask, MVT VT, unsigned& Imm8) {
4606 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4610 unsigned NumElts = VT.getVectorNumElements();
4612 if (VT.is128BitVector() || (VT.is256BitVector() && EltSize == 64)) {
4613 for (unsigned i = 0; i != NumElts; ++i) {
4616 Imm8 |= Mask[i] << (i*2);
4621 unsigned LaneSize = 4;
4622 SmallVector<int, 4> MaskVal(LaneSize, -1);
4624 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4625 for (unsigned i = 0; i != LaneSize; ++i) {
4626 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4630 if (MaskVal[i] < 0) {
4631 MaskVal[i] = Mask[i+l] - l;
4632 Imm8 |= MaskVal[i] << (i*2);
4635 if (Mask[i+l] != (signed)(MaskVal[i]+l))
4642 /// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand
4643 /// specifies a shuffle of elements that is suitable for input to VPERMILPD*.
4644 /// Note that VPERMIL mask matching is different depending whether theunderlying
4645 /// type is 32 or 64. In the VPERMILPS the high half of the mask should point
4646 /// to the same elements of the low, but to the higher half of the source.
4647 /// In VPERMILPD the two lanes could be shuffled independently of each other
4648 /// with the same restriction that lanes can't be crossed. Also handles PSHUFDY.
4649 static bool isVPERMILPMask(ArrayRef<int> Mask, MVT VT) {
4650 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4651 if (VT.getSizeInBits() < 256 || EltSize < 32)
4653 bool symetricMaskRequired = (EltSize == 32);
4654 unsigned NumElts = VT.getVectorNumElements();
4656 unsigned NumLanes = VT.getSizeInBits()/128;
4657 unsigned LaneSize = NumElts/NumLanes;
4658 // 2 or 4 elements in one lane
4660 SmallVector<int, 4> ExpectedMaskVal(LaneSize, -1);
4661 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4662 for (unsigned i = 0; i != LaneSize; ++i) {
4663 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4665 if (symetricMaskRequired) {
4666 if (ExpectedMaskVal[i] < 0 && Mask[i+l] >= 0) {
4667 ExpectedMaskVal[i] = Mask[i+l] - l;
4670 if (!isUndefOrEqual(Mask[i+l], ExpectedMaskVal[i]+l))
4678 /// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse
4679 /// of what x86 movss want. X86 movs requires the lowest element to be lowest
4680 /// element of vector 2 and the other elements to come from vector 1 in order.
4681 static bool isCommutedMOVLMask(ArrayRef<int> Mask, MVT VT,
4682 bool V2IsSplat = false, bool V2IsUndef = false) {
4683 if (!VT.is128BitVector())
4686 unsigned NumOps = VT.getVectorNumElements();
4687 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
4690 if (!isUndefOrEqual(Mask[0], 0))
4693 for (unsigned i = 1; i != NumOps; ++i)
4694 if (!(isUndefOrEqual(Mask[i], i+NumOps) ||
4695 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) ||
4696 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps))))
4702 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4703 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
4704 /// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7>
4705 static bool isMOVSHDUPMask(ArrayRef<int> Mask, MVT VT,
4706 const X86Subtarget *Subtarget) {
4707 if (!Subtarget->hasSSE3())
4710 unsigned NumElems = VT.getVectorNumElements();
4712 if ((VT.is128BitVector() && NumElems != 4) ||
4713 (VT.is256BitVector() && NumElems != 8) ||
4714 (VT.is512BitVector() && NumElems != 16))
4717 // "i+1" is the value the indexed mask element must have
4718 for (unsigned i = 0; i != NumElems; i += 2)
4719 if (!isUndefOrEqual(Mask[i], i+1) ||
4720 !isUndefOrEqual(Mask[i+1], i+1))
4726 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4727 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
4728 /// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6>
4729 static bool isMOVSLDUPMask(ArrayRef<int> Mask, MVT VT,
4730 const X86Subtarget *Subtarget) {
4731 if (!Subtarget->hasSSE3())
4734 unsigned NumElems = VT.getVectorNumElements();
4736 if ((VT.is128BitVector() && NumElems != 4) ||
4737 (VT.is256BitVector() && NumElems != 8) ||
4738 (VT.is512BitVector() && NumElems != 16))
4741 // "i" is the value the indexed mask element must have
4742 for (unsigned i = 0; i != NumElems; i += 2)
4743 if (!isUndefOrEqual(Mask[i], i) ||
4744 !isUndefOrEqual(Mask[i+1], i))
4750 /// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand
4751 /// specifies a shuffle of elements that is suitable for input to 256-bit
4752 /// version of MOVDDUP.
4753 static bool isMOVDDUPYMask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4754 if (!HasFp256 || !VT.is256BitVector())
4757 unsigned NumElts = VT.getVectorNumElements();
4761 for (unsigned i = 0; i != NumElts/2; ++i)
4762 if (!isUndefOrEqual(Mask[i], 0))
4764 for (unsigned i = NumElts/2; i != NumElts; ++i)
4765 if (!isUndefOrEqual(Mask[i], NumElts/2))
4770 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4771 /// specifies a shuffle of elements that is suitable for input to 128-bit
4772 /// version of MOVDDUP.
4773 static bool isMOVDDUPMask(ArrayRef<int> Mask, MVT VT) {
4774 if (!VT.is128BitVector())
4777 unsigned e = VT.getVectorNumElements() / 2;
4778 for (unsigned i = 0; i != e; ++i)
4779 if (!isUndefOrEqual(Mask[i], i))
4781 for (unsigned i = 0; i != e; ++i)
4782 if (!isUndefOrEqual(Mask[e+i], i))
4787 /// isVEXTRACTIndex - Return true if the specified
4788 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
4789 /// suitable for instruction that extract 128 or 256 bit vectors
4790 static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
4791 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4792 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4795 // The index should be aligned on a vecWidth-bit boundary.
4797 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4799 MVT VT = N->getSimpleValueType(0);
4800 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4801 bool Result = (Index * ElSize) % vecWidth == 0;
4806 /// isVINSERTIndex - Return true if the specified INSERT_SUBVECTOR
4807 /// operand specifies a subvector insert that is suitable for input to
4808 /// insertion of 128 or 256-bit subvectors
4809 static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) {
4810 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4811 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4813 // The index should be aligned on a vecWidth-bit boundary.
4815 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4817 MVT VT = N->getSimpleValueType(0);
4818 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4819 bool Result = (Index * ElSize) % vecWidth == 0;
4824 bool X86::isVINSERT128Index(SDNode *N) {
4825 return isVINSERTIndex(N, 128);
4828 bool X86::isVINSERT256Index(SDNode *N) {
4829 return isVINSERTIndex(N, 256);
4832 bool X86::isVEXTRACT128Index(SDNode *N) {
4833 return isVEXTRACTIndex(N, 128);
4836 bool X86::isVEXTRACT256Index(SDNode *N) {
4837 return isVEXTRACTIndex(N, 256);
4840 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
4841 /// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions.
4842 /// Handles 128-bit and 256-bit.
4843 static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
4844 MVT VT = N->getSimpleValueType(0);
4846 assert((VT.getSizeInBits() >= 128) &&
4847 "Unsupported vector type for PSHUF/SHUFP");
4849 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate
4850 // independently on 128-bit lanes.
4851 unsigned NumElts = VT.getVectorNumElements();
4852 unsigned NumLanes = VT.getSizeInBits()/128;
4853 unsigned NumLaneElts = NumElts/NumLanes;
4855 assert((NumLaneElts == 2 || NumLaneElts == 4 || NumLaneElts == 8) &&
4856 "Only supports 2, 4 or 8 elements per lane");
4858 unsigned Shift = (NumLaneElts >= 4) ? 1 : 0;
4860 for (unsigned i = 0; i != NumElts; ++i) {
4861 int Elt = N->getMaskElt(i);
4862 if (Elt < 0) continue;
4863 Elt &= NumLaneElts - 1;
4864 unsigned ShAmt = (i << Shift) % 8;
4865 Mask |= Elt << ShAmt;
4871 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
4872 /// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction.
4873 static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) {
4874 MVT VT = N->getSimpleValueType(0);
4876 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4877 "Unsupported vector type for PSHUFHW");
4879 unsigned NumElts = VT.getVectorNumElements();
4882 for (unsigned l = 0; l != NumElts; l += 8) {
4883 // 8 nodes per lane, but we only care about the last 4.
4884 for (unsigned i = 0; i < 4; ++i) {
4885 int Elt = N->getMaskElt(l+i+4);
4886 if (Elt < 0) continue;
4887 Elt &= 0x3; // only 2-bits.
4888 Mask |= Elt << (i * 2);
4895 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
4896 /// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction.
4897 static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) {
4898 MVT VT = N->getSimpleValueType(0);
4900 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4901 "Unsupported vector type for PSHUFHW");
4903 unsigned NumElts = VT.getVectorNumElements();
4906 for (unsigned l = 0; l != NumElts; l += 8) {
4907 // 8 nodes per lane, but we only care about the first 4.
4908 for (unsigned i = 0; i < 4; ++i) {
4909 int Elt = N->getMaskElt(l+i);
4910 if (Elt < 0) continue;
4911 Elt &= 0x3; // only 2-bits
4912 Mask |= Elt << (i * 2);
4919 /// \brief Return the appropriate immediate to shuffle the specified
4920 /// VECTOR_SHUFFLE mask with the PALIGNR (if InterLane is false) or with
4921 /// VALIGN (if Interlane is true) instructions.
4922 static unsigned getShuffleAlignrImmediate(ShuffleVectorSDNode *SVOp,
4924 MVT VT = SVOp->getSimpleValueType(0);
4925 unsigned EltSize = InterLane ? 1 :
4926 VT.getVectorElementType().getSizeInBits() >> 3;
4928 unsigned NumElts = VT.getVectorNumElements();
4929 unsigned NumLanes = VT.is512BitVector() ? 1 : VT.getSizeInBits()/128;
4930 unsigned NumLaneElts = NumElts/NumLanes;
4934 for (i = 0; i != NumElts; ++i) {
4935 Val = SVOp->getMaskElt(i);
4939 if (Val >= (int)NumElts)
4940 Val -= NumElts - NumLaneElts;
4942 assert(Val - i > 0 && "PALIGNR imm should be positive");
4943 return (Val - i) * EltSize;
4946 /// \brief Return the appropriate immediate to shuffle the specified
4947 /// VECTOR_SHUFFLE mask with the PALIGNR instruction.
4948 static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) {
4949 return getShuffleAlignrImmediate(SVOp, false);
4952 /// \brief Return the appropriate immediate to shuffle the specified
4953 /// VECTOR_SHUFFLE mask with the VALIGN instruction.
4954 static unsigned getShuffleVALIGNImmediate(ShuffleVectorSDNode *SVOp) {
4955 return getShuffleAlignrImmediate(SVOp, true);
4959 static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) {
4960 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4961 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4962 llvm_unreachable("Illegal extract subvector for VEXTRACT");
4965 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4967 MVT VecVT = N->getOperand(0).getSimpleValueType();
4968 MVT ElVT = VecVT.getVectorElementType();
4970 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4971 return Index / NumElemsPerChunk;
4974 static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) {
4975 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4976 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4977 llvm_unreachable("Illegal insert subvector for VINSERT");
4980 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4982 MVT VecVT = N->getSimpleValueType(0);
4983 MVT ElVT = VecVT.getVectorElementType();
4985 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4986 return Index / NumElemsPerChunk;
4989 /// getExtractVEXTRACT128Immediate - Return the appropriate immediate
4990 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128
4991 /// and VINSERTI128 instructions.
4992 unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) {
4993 return getExtractVEXTRACTImmediate(N, 128);
4996 /// getExtractVEXTRACT256Immediate - Return the appropriate immediate
4997 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF64x4
4998 /// and VINSERTI64x4 instructions.
4999 unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) {
5000 return getExtractVEXTRACTImmediate(N, 256);
5003 /// getInsertVINSERT128Immediate - Return the appropriate immediate
5004 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128
5005 /// and VINSERTI128 instructions.
5006 unsigned X86::getInsertVINSERT128Immediate(SDNode *N) {
5007 return getInsertVINSERTImmediate(N, 128);
5010 /// getInsertVINSERT256Immediate - Return the appropriate immediate
5011 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF46x4
5012 /// and VINSERTI64x4 instructions.
5013 unsigned X86::getInsertVINSERT256Immediate(SDNode *N) {
5014 return getInsertVINSERTImmediate(N, 256);
5017 /// isZero - Returns true if Elt is a constant integer zero
5018 static bool isZero(SDValue V) {
5019 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
5020 return C && C->isNullValue();
5023 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
5025 bool X86::isZeroNode(SDValue Elt) {
5028 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Elt))
5029 return CFP->getValueAPF().isPosZero();
5033 /// ShouldXformToMOVHLPS - Return true if the node should be transformed to
5034 /// match movhlps. The lower half elements should come from upper half of
5035 /// V1 (and in order), and the upper half elements should come from the upper
5036 /// half of V2 (and in order).
5037 static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, MVT VT) {
5038 if (!VT.is128BitVector())
5040 if (VT.getVectorNumElements() != 4)
5042 for (unsigned i = 0, e = 2; i != e; ++i)
5043 if (!isUndefOrEqual(Mask[i], i+2))
5045 for (unsigned i = 2; i != 4; ++i)
5046 if (!isUndefOrEqual(Mask[i], i+4))
5051 /// isScalarLoadToVector - Returns true if the node is a scalar load that
5052 /// is promoted to a vector. It also returns the LoadSDNode by reference if
5054 static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = nullptr) {
5055 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR)
5057 N = N->getOperand(0).getNode();
5058 if (!ISD::isNON_EXTLoad(N))
5061 *LD = cast<LoadSDNode>(N);
5065 // Test whether the given value is a vector value which will be legalized
5067 static bool WillBeConstantPoolLoad(SDNode *N) {
5068 if (N->getOpcode() != ISD::BUILD_VECTOR)
5071 // Check for any non-constant elements.
5072 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
5073 switch (N->getOperand(i).getNode()->getOpcode()) {
5075 case ISD::ConstantFP:
5082 // Vectors of all-zeros and all-ones are materialized with special
5083 // instructions rather than being loaded.
5084 return !ISD::isBuildVectorAllZeros(N) &&
5085 !ISD::isBuildVectorAllOnes(N);
5088 /// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
5089 /// match movlp{s|d}. The lower half elements should come from lower half of
5090 /// V1 (and in order), and the upper half elements should come from the upper
5091 /// half of V2 (and in order). And since V1 will become the source of the
5092 /// MOVLP, it must be either a vector load or a scalar load to vector.
5093 static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
5094 ArrayRef<int> Mask, MVT VT) {
5095 if (!VT.is128BitVector())
5098 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
5100 // Is V2 is a vector load, don't do this transformation. We will try to use
5101 // load folding shufps op.
5102 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2))
5105 unsigned NumElems = VT.getVectorNumElements();
5107 if (NumElems != 2 && NumElems != 4)
5109 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
5110 if (!isUndefOrEqual(Mask[i], i))
5112 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
5113 if (!isUndefOrEqual(Mask[i], i+NumElems))
5118 /// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
5119 /// to an zero vector.
5120 /// FIXME: move to dag combiner / method on ShuffleVectorSDNode
5121 static bool isZeroShuffle(ShuffleVectorSDNode *N) {
5122 SDValue V1 = N->getOperand(0);
5123 SDValue V2 = N->getOperand(1);
5124 unsigned NumElems = N->getValueType(0).getVectorNumElements();
5125 for (unsigned i = 0; i != NumElems; ++i) {
5126 int Idx = N->getMaskElt(i);
5127 if (Idx >= (int)NumElems) {
5128 unsigned Opc = V2.getOpcode();
5129 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode()))
5131 if (Opc != ISD::BUILD_VECTOR ||
5132 !X86::isZeroNode(V2.getOperand(Idx-NumElems)))
5134 } else if (Idx >= 0) {
5135 unsigned Opc = V1.getOpcode();
5136 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode()))
5138 if (Opc != ISD::BUILD_VECTOR ||
5139 !X86::isZeroNode(V1.getOperand(Idx)))
5146 /// getZeroVector - Returns a vector of specified type with all zero elements.
5148 static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
5149 SelectionDAG &DAG, SDLoc dl) {
5150 assert(VT.isVector() && "Expected a vector type");
5152 // Always build SSE zero vectors as <4 x i32> bitcasted
5153 // to their dest type. This ensures they get CSE'd.
5155 if (VT.is128BitVector()) { // SSE
5156 if (Subtarget->hasSSE2()) { // SSE2
5157 SDValue Cst = DAG.getConstant(0, MVT::i32);
5158 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5160 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5161 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
5163 } else if (VT.is256BitVector()) { // AVX
5164 if (Subtarget->hasInt256()) { // AVX2
5165 SDValue Cst = DAG.getConstant(0, MVT::i32);
5166 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5167 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5169 // 256-bit logic and arithmetic instructions in AVX are all
5170 // floating-point, no support for integer ops. Emit fp zeroed vectors.
5171 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5172 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5173 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops);
5175 } else if (VT.is512BitVector()) { // AVX-512
5176 SDValue Cst = DAG.getConstant(0, MVT::i32);
5177 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
5178 Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5179 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
5180 } else if (VT.getScalarType() == MVT::i1) {
5181 assert(VT.getVectorNumElements() <= 16 && "Unexpected vector type");
5182 SDValue Cst = DAG.getConstant(0, MVT::i1);
5183 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
5184 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
5186 llvm_unreachable("Unexpected vector type");
5188 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5191 /// getOnesVector - Returns a vector of specified type with all bits set.
5192 /// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with
5193 /// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately.
5194 /// Then bitcast to their original type, ensuring they get CSE'd.
5195 static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG,
5197 assert(VT.isVector() && "Expected a vector type");
5199 SDValue Cst = DAG.getConstant(~0U, MVT::i32);
5201 if (VT.is256BitVector()) {
5202 if (HasInt256) { // AVX2
5203 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5204 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5206 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5207 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
5209 } else if (VT.is128BitVector()) {
5210 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5212 llvm_unreachable("Unexpected vector type");
5214 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5217 /// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
5218 /// that point to V2 points to its first element.
5219 static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) {
5220 for (unsigned i = 0; i != NumElems; ++i) {
5221 if (Mask[i] > (int)NumElems) {
5227 /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
5228 /// operation of specified width.
5229 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
5231 unsigned NumElems = VT.getVectorNumElements();
5232 SmallVector<int, 8> Mask;
5233 Mask.push_back(NumElems);
5234 for (unsigned i = 1; i != NumElems; ++i)
5236 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5239 /// getUnpackl - Returns a vector_shuffle node for an unpackl operation.
5240 static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5242 unsigned NumElems = VT.getVectorNumElements();
5243 SmallVector<int, 8> Mask;
5244 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
5246 Mask.push_back(i + NumElems);
5248 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5251 /// getUnpackh - Returns a vector_shuffle node for an unpackh operation.
5252 static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5254 unsigned NumElems = VT.getVectorNumElements();
5255 SmallVector<int, 8> Mask;
5256 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) {
5257 Mask.push_back(i + Half);
5258 Mask.push_back(i + NumElems + Half);
5260 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5263 // PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by
5264 // a generic shuffle instruction because the target has no such instructions.
5265 // Generate shuffles which repeat i16 and i8 several times until they can be
5266 // represented by v4f32 and then be manipulated by target suported shuffles.
5267 static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) {
5268 MVT VT = V.getSimpleValueType();
5269 int NumElems = VT.getVectorNumElements();
5272 while (NumElems > 4) {
5273 if (EltNo < NumElems/2) {
5274 V = getUnpackl(DAG, dl, VT, V, V);
5276 V = getUnpackh(DAG, dl, VT, V, V);
5277 EltNo -= NumElems/2;
5284 /// getLegalSplat - Generate a legal splat with supported x86 shuffles
5285 static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) {
5286 MVT VT = V.getSimpleValueType();
5289 if (VT.is128BitVector()) {
5290 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V);
5291 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
5292 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32),
5294 } else if (VT.is256BitVector()) {
5295 // To use VPERMILPS to splat scalars, the second half of indicies must
5296 // refer to the higher part, which is a duplication of the lower one,
5297 // because VPERMILPS can only handle in-lane permutations.
5298 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo,
5299 EltNo+4, EltNo+4, EltNo+4, EltNo+4 };
5301 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V);
5302 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32),
5305 llvm_unreachable("Vector size not supported");
5307 return DAG.getNode(ISD::BITCAST, dl, VT, V);
5310 /// PromoteSplat - Splat is promoted to target supported vector shuffles.
5311 static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
5312 MVT SrcVT = SV->getSimpleValueType(0);
5313 SDValue V1 = SV->getOperand(0);
5316 int EltNo = SV->getSplatIndex();
5317 int NumElems = SrcVT.getVectorNumElements();
5318 bool Is256BitVec = SrcVT.is256BitVector();
5320 assert(((SrcVT.is128BitVector() && NumElems > 4) || Is256BitVec) &&
5321 "Unknown how to promote splat for type");
5323 // Extract the 128-bit part containing the splat element and update
5324 // the splat element index when it refers to the higher register.
5326 V1 = Extract128BitVector(V1, EltNo, DAG, dl);
5327 if (EltNo >= NumElems/2)
5328 EltNo -= NumElems/2;
5331 // All i16 and i8 vector types can't be used directly by a generic shuffle
5332 // instruction because the target has no such instruction. Generate shuffles
5333 // which repeat i16 and i8 several times until they fit in i32, and then can
5334 // be manipulated by target suported shuffles.
5335 MVT EltVT = SrcVT.getVectorElementType();
5336 if (EltVT == MVT::i8 || EltVT == MVT::i16)
5337 V1 = PromoteSplati8i16(V1, DAG, EltNo);
5339 // Recreate the 256-bit vector and place the same 128-bit vector
5340 // into the low and high part. This is necessary because we want
5341 // to use VPERM* to shuffle the vectors
5343 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1);
5346 return getLegalSplat(DAG, V1, EltNo);
5349 /// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
5350 /// vector of zero or undef vector. This produces a shuffle where the low
5351 /// element of V2 is swizzled into the zero/undef vector, landing at element
5352 /// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
5353 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
5355 const X86Subtarget *Subtarget,
5356 SelectionDAG &DAG) {
5357 MVT VT = V2.getSimpleValueType();
5359 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
5360 unsigned NumElems = VT.getVectorNumElements();
5361 SmallVector<int, 16> MaskVec;
5362 for (unsigned i = 0; i != NumElems; ++i)
5363 // If this is the insertion idx, put the low elt of V2 here.
5364 MaskVec.push_back(i == Idx ? NumElems : i);
5365 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]);
5368 /// getTargetShuffleMask - Calculates the shuffle mask corresponding to the
5369 /// target specific opcode. Returns true if the Mask could be calculated. Sets
5370 /// IsUnary to true if only uses one source. Note that this will set IsUnary for
5371 /// shuffles which use a single input multiple times, and in those cases it will
5372 /// adjust the mask to only have indices within that single input.
5373 static bool getTargetShuffleMask(SDNode *N, MVT VT,
5374 SmallVectorImpl<int> &Mask, bool &IsUnary) {
5375 unsigned NumElems = VT.getVectorNumElements();
5379 bool IsFakeUnary = false;
5380 switch(N->getOpcode()) {
5381 case X86ISD::BLENDI:
5382 ImmN = N->getOperand(N->getNumOperands()-1);
5383 DecodeBLENDMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5386 ImmN = N->getOperand(N->getNumOperands()-1);
5387 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5388 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5390 case X86ISD::UNPCKH:
5391 DecodeUNPCKHMask(VT, Mask);
5392 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5394 case X86ISD::UNPCKL:
5395 DecodeUNPCKLMask(VT, Mask);
5396 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5398 case X86ISD::MOVHLPS:
5399 DecodeMOVHLPSMask(NumElems, Mask);
5400 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5402 case X86ISD::MOVLHPS:
5403 DecodeMOVLHPSMask(NumElems, Mask);
5404 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5406 case X86ISD::PALIGNR:
5407 ImmN = N->getOperand(N->getNumOperands()-1);
5408 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5410 case X86ISD::PSHUFD:
5411 case X86ISD::VPERMILPI:
5412 ImmN = N->getOperand(N->getNumOperands()-1);
5413 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5416 case X86ISD::PSHUFHW:
5417 ImmN = N->getOperand(N->getNumOperands()-1);
5418 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5421 case X86ISD::PSHUFLW:
5422 ImmN = N->getOperand(N->getNumOperands()-1);
5423 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5426 case X86ISD::PSHUFB: {
5428 SDValue MaskNode = N->getOperand(1);
5429 while (MaskNode->getOpcode() == ISD::BITCAST)
5430 MaskNode = MaskNode->getOperand(0);
5432 if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
5433 // If we have a build-vector, then things are easy.
5434 EVT VT = MaskNode.getValueType();
5435 assert(VT.isVector() &&
5436 "Can't produce a non-vector with a build_vector!");
5437 if (!VT.isInteger())
5440 int NumBytesPerElement = VT.getVectorElementType().getSizeInBits() / 8;
5442 SmallVector<uint64_t, 32> RawMask;
5443 for (int i = 0, e = MaskNode->getNumOperands(); i < e; ++i) {
5444 SDValue Op = MaskNode->getOperand(i);
5445 if (Op->getOpcode() == ISD::UNDEF) {
5446 RawMask.push_back((uint64_t)SM_SentinelUndef);
5449 auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
5452 APInt MaskElement = CN->getAPIntValue();
5454 // We now have to decode the element which could be any integer size and
5455 // extract each byte of it.
5456 for (int j = 0; j < NumBytesPerElement; ++j) {
5457 // Note that this is x86 and so always little endian: the low byte is
5458 // the first byte of the mask.
5459 RawMask.push_back(MaskElement.getLoBits(8).getZExtValue());
5460 MaskElement = MaskElement.lshr(8);
5463 DecodePSHUFBMask(RawMask, Mask);
5467 auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
5471 SDValue Ptr = MaskLoad->getBasePtr();
5472 if (Ptr->getOpcode() == X86ISD::Wrapper)
5473 Ptr = Ptr->getOperand(0);
5475 auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
5476 if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
5479 if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
5480 DecodePSHUFBMask(C, Mask);
5486 case X86ISD::VPERMI:
5487 ImmN = N->getOperand(N->getNumOperands()-1);
5488 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5493 DecodeScalarMoveMask(VT, /* IsLoad */ false, Mask);
5495 case X86ISD::VPERM2X128:
5496 ImmN = N->getOperand(N->getNumOperands()-1);
5497 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5498 if (Mask.empty()) return false;
5500 case X86ISD::MOVSLDUP:
5501 DecodeMOVSLDUPMask(VT, Mask);
5504 case X86ISD::MOVSHDUP:
5505 DecodeMOVSHDUPMask(VT, Mask);
5508 case X86ISD::MOVDDUP:
5509 DecodeMOVDDUPMask(VT, Mask);
5512 case X86ISD::MOVLHPD:
5513 case X86ISD::MOVLPD:
5514 case X86ISD::MOVLPS:
5515 // Not yet implemented
5517 default: llvm_unreachable("unknown target shuffle node");
5520 // If we have a fake unary shuffle, the shuffle mask is spread across two
5521 // inputs that are actually the same node. Re-map the mask to always point
5522 // into the first input.
5525 if (M >= (int)Mask.size())
5531 /// getShuffleScalarElt - Returns the scalar element that will make up the ith
5532 /// element of the result of the vector shuffle.
5533 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
5536 return SDValue(); // Limit search depth.
5538 SDValue V = SDValue(N, 0);
5539 EVT VT = V.getValueType();
5540 unsigned Opcode = V.getOpcode();
5542 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
5543 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
5544 int Elt = SV->getMaskElt(Index);
5547 return DAG.getUNDEF(VT.getVectorElementType());
5549 unsigned NumElems = VT.getVectorNumElements();
5550 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
5551 : SV->getOperand(1);
5552 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
5555 // Recurse into target specific vector shuffles to find scalars.
5556 if (isTargetShuffle(Opcode)) {
5557 MVT ShufVT = V.getSimpleValueType();
5558 unsigned NumElems = ShufVT.getVectorNumElements();
5559 SmallVector<int, 16> ShuffleMask;
5562 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary))
5565 int Elt = ShuffleMask[Index];
5567 return DAG.getUNDEF(ShufVT.getVectorElementType());
5569 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0)
5571 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
5575 // Actual nodes that may contain scalar elements
5576 if (Opcode == ISD::BITCAST) {
5577 V = V.getOperand(0);
5578 EVT SrcVT = V.getValueType();
5579 unsigned NumElems = VT.getVectorNumElements();
5581 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
5585 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
5586 return (Index == 0) ? V.getOperand(0)
5587 : DAG.getUNDEF(VT.getVectorElementType());
5589 if (V.getOpcode() == ISD::BUILD_VECTOR)
5590 return V.getOperand(Index);
5595 /// getNumOfConsecutiveZeros - Return the number of elements of a vector
5596 /// shuffle operation which come from a consecutively from a zero. The
5597 /// search can start in two different directions, from left or right.
5598 /// We count undefs as zeros until PreferredNum is reached.
5599 static unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp,
5600 unsigned NumElems, bool ZerosFromLeft,
5602 unsigned PreferredNum = -1U) {
5603 unsigned NumZeros = 0;
5604 for (unsigned i = 0; i != NumElems; ++i) {
5605 unsigned Index = ZerosFromLeft ? i : NumElems - i - 1;
5606 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0);
5610 if (X86::isZeroNode(Elt))
5612 else if (Elt.getOpcode() == ISD::UNDEF) // Undef as zero up to PreferredNum.
5613 NumZeros = std::min(NumZeros + 1, PreferredNum);
5621 /// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE)
5622 /// correspond consecutively to elements from one of the vector operands,
5623 /// starting from its index OpIdx. Also tell OpNum which source vector operand.
5625 bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp,
5626 unsigned MaskI, unsigned MaskE, unsigned OpIdx,
5627 unsigned NumElems, unsigned &OpNum) {
5628 bool SeenV1 = false;
5629 bool SeenV2 = false;
5631 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) {
5632 int Idx = SVOp->getMaskElt(i);
5633 // Ignore undef indicies
5637 if (Idx < (int)NumElems)
5642 // Only accept consecutive elements from the same vector
5643 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2))
5647 OpNum = SeenV1 ? 0 : 1;
5651 /// isVectorShiftRight - Returns true if the shuffle can be implemented as a
5652 /// logical left shift of a vector.
5653 static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5654 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5656 SVOp->getSimpleValueType(0).getVectorNumElements();
5657 unsigned NumZeros = getNumOfConsecutiveZeros(
5658 SVOp, NumElems, false /* check zeros from right */, DAG,
5659 SVOp->getMaskElt(0));
5665 // Considering the elements in the mask that are not consecutive zeros,
5666 // check if they consecutively come from only one of the source vectors.
5668 // V1 = {X, A, B, C} 0
5670 // vector_shuffle V1, V2 <1, 2, 3, X>
5672 if (!isShuffleMaskConsecutive(SVOp,
5673 0, // Mask Start Index
5674 NumElems-NumZeros, // Mask End Index(exclusive)
5675 NumZeros, // Where to start looking in the src vector
5676 NumElems, // Number of elements in vector
5677 OpSrc)) // Which source operand ?
5682 ShVal = SVOp->getOperand(OpSrc);
5686 /// isVectorShiftLeft - Returns true if the shuffle can be implemented as a
5687 /// logical left shift of a vector.
5688 static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5689 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5691 SVOp->getSimpleValueType(0).getVectorNumElements();
5692 unsigned NumZeros = getNumOfConsecutiveZeros(
5693 SVOp, NumElems, true /* check zeros from left */, DAG,
5694 NumElems - SVOp->getMaskElt(NumElems - 1) - 1);
5700 // Considering the elements in the mask that are not consecutive zeros,
5701 // check if they consecutively come from only one of the source vectors.
5703 // 0 { A, B, X, X } = V2
5705 // vector_shuffle V1, V2 <X, X, 4, 5>
5707 if (!isShuffleMaskConsecutive(SVOp,
5708 NumZeros, // Mask Start Index
5709 NumElems, // Mask End Index(exclusive)
5710 0, // Where to start looking in the src vector
5711 NumElems, // Number of elements in vector
5712 OpSrc)) // Which source operand ?
5717 ShVal = SVOp->getOperand(OpSrc);
5721 /// isVectorShift - Returns true if the shuffle can be implemented as a
5722 /// logical left or right shift of a vector.
5723 static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5724 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5725 // Although the logic below support any bitwidth size, there are no
5726 // shift instructions which handle more than 128-bit vectors.
5727 if (!SVOp->getSimpleValueType(0).is128BitVector())
5730 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) ||
5731 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt))
5737 /// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
5739 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
5740 unsigned NumNonZero, unsigned NumZero,
5742 const X86Subtarget* Subtarget,
5743 const TargetLowering &TLI) {
5750 for (unsigned i = 0; i < 16; ++i) {
5751 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
5752 if (ThisIsNonZero && First) {
5754 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5756 V = DAG.getUNDEF(MVT::v8i16);
5761 SDValue ThisElt, LastElt;
5762 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
5763 if (LastIsNonZero) {
5764 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
5765 MVT::i16, Op.getOperand(i-1));
5767 if (ThisIsNonZero) {
5768 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
5769 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
5770 ThisElt, DAG.getConstant(8, MVT::i8));
5772 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
5776 if (ThisElt.getNode())
5777 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
5778 DAG.getIntPtrConstant(i/2));
5782 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V);
5785 /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
5787 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
5788 unsigned NumNonZero, unsigned NumZero,
5790 const X86Subtarget* Subtarget,
5791 const TargetLowering &TLI) {
5798 for (unsigned i = 0; i < 8; ++i) {
5799 bool isNonZero = (NonZeros & (1 << i)) != 0;
5803 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5805 V = DAG.getUNDEF(MVT::v8i16);
5808 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
5809 MVT::v8i16, V, Op.getOperand(i),
5810 DAG.getIntPtrConstant(i));
5817 /// LowerBuildVectorv4x32 - Custom lower build_vector of v4i32 or v4f32.
5818 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
5819 const X86Subtarget *Subtarget,
5820 const TargetLowering &TLI) {
5821 // Find all zeroable elements.
5823 for (int i=0; i < 4; ++i) {
5824 SDValue Elt = Op->getOperand(i);
5825 Zeroable[i] = (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt));
5827 assert(std::count_if(&Zeroable[0], &Zeroable[4],
5828 [](bool M) { return !M; }) > 1 &&
5829 "We expect at least two non-zero elements!");
5831 // We only know how to deal with build_vector nodes where elements are either
5832 // zeroable or extract_vector_elt with constant index.
5833 SDValue FirstNonZero;
5834 unsigned FirstNonZeroIdx;
5835 for (unsigned i=0; i < 4; ++i) {
5838 SDValue Elt = Op->getOperand(i);
5839 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5840 !isa<ConstantSDNode>(Elt.getOperand(1)))
5842 // Make sure that this node is extracting from a 128-bit vector.
5843 MVT VT = Elt.getOperand(0).getSimpleValueType();
5844 if (!VT.is128BitVector())
5846 if (!FirstNonZero.getNode()) {
5848 FirstNonZeroIdx = i;
5852 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
5853 SDValue V1 = FirstNonZero.getOperand(0);
5854 MVT VT = V1.getSimpleValueType();
5856 // See if this build_vector can be lowered as a blend with zero.
5858 unsigned EltMaskIdx, EltIdx;
5860 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
5861 if (Zeroable[EltIdx]) {
5862 // The zero vector will be on the right hand side.
5863 Mask[EltIdx] = EltIdx+4;
5867 Elt = Op->getOperand(EltIdx);
5868 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
5869 EltMaskIdx = cast<ConstantSDNode>(Elt.getOperand(1))->getZExtValue();
5870 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
5872 Mask[EltIdx] = EltIdx;
5876 // Let the shuffle legalizer deal with blend operations.
5877 SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
5878 if (V1.getSimpleValueType() != VT)
5879 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), VT, V1);
5880 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, &Mask[0]);
5883 // See if we can lower this build_vector to a INSERTPS.
5884 if (!Subtarget->hasSSE41())
5887 SDValue V2 = Elt.getOperand(0);
5888 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
5891 bool CanFold = true;
5892 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
5896 SDValue Current = Op->getOperand(i);
5897 SDValue SrcVector = Current->getOperand(0);
5900 CanFold = SrcVector == V1 &&
5901 cast<ConstantSDNode>(Current.getOperand(1))->getZExtValue() == i;
5907 assert(V1.getNode() && "Expected at least two non-zero elements!");
5908 if (V1.getSimpleValueType() != MVT::v4f32)
5909 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), MVT::v4f32, V1);
5910 if (V2.getSimpleValueType() != MVT::v4f32)
5911 V2 = DAG.getNode(ISD::BITCAST, SDLoc(V2), MVT::v4f32, V2);
5913 // Ok, we can emit an INSERTPS instruction.
5915 for (int i = 0; i < 4; ++i)
5919 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
5920 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
5921 SDValue Result = DAG.getNode(X86ISD::INSERTPS, SDLoc(Op), MVT::v4f32, V1, V2,
5922 DAG.getIntPtrConstant(InsertPSMask));
5923 return DAG.getNode(ISD::BITCAST, SDLoc(Op), VT, Result);
5926 /// Return a vector logical shift node.
5927 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
5928 unsigned NumBits, SelectionDAG &DAG,
5929 const TargetLowering &TLI, SDLoc dl) {
5930 assert(VT.is128BitVector() && "Unknown type for VShift");
5931 MVT ShVT = MVT::v2i64;
5932 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
5933 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
5934 MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(SrcOp.getValueType());
5935 SDValue ShiftVal = DAG.getConstant(NumBits, ScalarShiftTy);
5936 return DAG.getNode(ISD::BITCAST, dl, VT,
5937 DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
5941 LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
5943 // Check if the scalar load can be widened into a vector load. And if
5944 // the address is "base + cst" see if the cst can be "absorbed" into
5945 // the shuffle mask.
5946 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
5947 SDValue Ptr = LD->getBasePtr();
5948 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
5950 EVT PVT = LD->getValueType(0);
5951 if (PVT != MVT::i32 && PVT != MVT::f32)
5956 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
5957 FI = FINode->getIndex();
5959 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
5960 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
5961 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5962 Offset = Ptr.getConstantOperandVal(1);
5963 Ptr = Ptr.getOperand(0);
5968 // FIXME: 256-bit vector instructions don't require a strict alignment,
5969 // improve this code to support it better.
5970 unsigned RequiredAlign = VT.getSizeInBits()/8;
5971 SDValue Chain = LD->getChain();
5972 // Make sure the stack object alignment is at least 16 or 32.
5973 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
5974 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
5975 if (MFI->isFixedObjectIndex(FI)) {
5976 // Can't change the alignment. FIXME: It's possible to compute
5977 // the exact stack offset and reference FI + adjust offset instead.
5978 // If someone *really* cares about this. That's the way to implement it.
5981 MFI->setObjectAlignment(FI, RequiredAlign);
5985 // (Offset % 16 or 32) must be multiple of 4. Then address is then
5986 // Ptr + (Offset & ~15).
5989 if ((Offset % RequiredAlign) & 3)
5991 int64_t StartOffset = Offset & ~(RequiredAlign-1);
5993 Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(),
5994 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType()));
5996 int EltNo = (Offset - StartOffset) >> 2;
5997 unsigned NumElems = VT.getVectorNumElements();
5999 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
6000 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
6001 LD->getPointerInfo().getWithOffset(StartOffset),
6002 false, false, false, 0);
6004 SmallVector<int, 8> Mask;
6005 for (unsigned i = 0; i != NumElems; ++i)
6006 Mask.push_back(EltNo);
6008 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
6014 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
6015 /// elements can be replaced by a single large load which has the same value as
6016 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
6018 /// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
6020 /// FIXME: we'd also like to handle the case where the last elements are zero
6021 /// rather than undef via VZEXT_LOAD, but we do not detect that case today.
6022 /// There's even a handy isZeroNode for that purpose.
6023 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
6024 SDLoc &DL, SelectionDAG &DAG,
6025 bool isAfterLegalize) {
6026 unsigned NumElems = Elts.size();
6028 LoadSDNode *LDBase = nullptr;
6029 unsigned LastLoadedElt = -1U;
6031 // For each element in the initializer, see if we've found a load or an undef.
6032 // If we don't find an initial load element, or later load elements are
6033 // non-consecutive, bail out.
6034 for (unsigned i = 0; i < NumElems; ++i) {
6035 SDValue Elt = Elts[i];
6036 // Look through a bitcast.
6037 if (Elt.getNode() && Elt.getOpcode() == ISD::BITCAST)
6038 Elt = Elt.getOperand(0);
6039 if (!Elt.getNode() ||
6040 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
6043 if (Elt.getNode()->getOpcode() == ISD::UNDEF)
6045 LDBase = cast<LoadSDNode>(Elt.getNode());
6049 if (Elt.getOpcode() == ISD::UNDEF)
6052 LoadSDNode *LD = cast<LoadSDNode>(Elt);
6053 EVT LdVT = Elt.getValueType();
6054 // Each loaded element must be the correct fractional portion of the
6055 // requested vector load.
6056 if (LdVT.getSizeInBits() != VT.getSizeInBits() / NumElems)
6058 if (!DAG.isConsecutiveLoad(LD, LDBase, LdVT.getSizeInBits() / 8, i))
6063 // If we have found an entire vector of loads and undefs, then return a large
6064 // load of the entire vector width starting at the base pointer. If we found
6065 // consecutive loads for the low half, generate a vzext_load node.
6066 if (LastLoadedElt == NumElems - 1) {
6067 assert(LDBase && "Did not find base load for merging consecutive loads");
6068 EVT EltVT = LDBase->getValueType(0);
6069 // Ensure that the input vector size for the merged loads matches the
6070 // cumulative size of the input elements.
6071 if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems)
6074 if (isAfterLegalize &&
6075 !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
6078 SDValue NewLd = SDValue();
6080 NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
6081 LDBase->getPointerInfo(), LDBase->isVolatile(),
6082 LDBase->isNonTemporal(), LDBase->isInvariant(),
6083 LDBase->getAlignment());
6085 if (LDBase->hasAnyUseOfValue(1)) {
6086 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6088 SDValue(NewLd.getNode(), 1));
6089 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6090 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6091 SDValue(NewLd.getNode(), 1));
6097 //TODO: The code below fires only for for loading the low v2i32 / v2f32
6098 //of a v4i32 / v4f32. It's probably worth generalizing.
6099 EVT EltVT = VT.getVectorElementType();
6100 if (NumElems == 4 && LastLoadedElt == 1 && (EltVT.getSizeInBits() == 32) &&
6101 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
6102 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
6103 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
6105 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::i64,
6106 LDBase->getPointerInfo(),
6107 LDBase->getAlignment(),
6108 false/*isVolatile*/, true/*ReadMem*/,
6111 // Make sure the newly-created LOAD is in the same position as LDBase in
6112 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and
6113 // update uses of LDBase's output chain to use the TokenFactor.
6114 if (LDBase->hasAnyUseOfValue(1)) {
6115 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6116 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1));
6117 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6118 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6119 SDValue(ResNode.getNode(), 1));
6122 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode);
6127 /// LowerVectorBroadcast - Attempt to use the vbroadcast instruction
6128 /// to generate a splat value for the following cases:
6129 /// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
6130 /// 2. A splat shuffle which uses a scalar_to_vector node which comes from
6131 /// a scalar load, or a constant.
6132 /// The VBROADCAST node is returned when a pattern is found,
6133 /// or SDValue() otherwise.
6134 static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
6135 SelectionDAG &DAG) {
6136 // VBROADCAST requires AVX.
6137 // TODO: Splats could be generated for non-AVX CPUs using SSE
6138 // instructions, but there's less potential gain for only 128-bit vectors.
6139 if (!Subtarget->hasAVX())
6142 MVT VT = Op.getSimpleValueType();
6145 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
6146 "Unsupported vector type for broadcast.");
6151 switch (Op.getOpcode()) {
6153 // Unknown pattern found.
6156 case ISD::BUILD_VECTOR: {
6157 auto *BVOp = cast<BuildVectorSDNode>(Op.getNode());
6158 BitVector UndefElements;
6159 SDValue Splat = BVOp->getSplatValue(&UndefElements);
6161 // We need a splat of a single value to use broadcast, and it doesn't
6162 // make any sense if the value is only in one element of the vector.
6163 if (!Splat || (VT.getVectorNumElements() - UndefElements.count()) <= 1)
6167 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6168 Ld.getOpcode() == ISD::ConstantFP);
6170 // Make sure that all of the users of a non-constant load are from the
6171 // BUILD_VECTOR node.
6172 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
6177 case ISD::VECTOR_SHUFFLE: {
6178 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
6180 // Shuffles must have a splat mask where the first element is
6182 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0)
6185 SDValue Sc = Op.getOperand(0);
6186 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
6187 Sc.getOpcode() != ISD::BUILD_VECTOR) {
6189 if (!Subtarget->hasInt256())
6192 // Use the register form of the broadcast instruction available on AVX2.
6193 if (VT.getSizeInBits() >= 256)
6194 Sc = Extract128BitVector(Sc, 0, DAG, dl);
6195 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
6198 Ld = Sc.getOperand(0);
6199 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6200 Ld.getOpcode() == ISD::ConstantFP);
6202 // The scalar_to_vector node and the suspected
6203 // load node must have exactly one user.
6204 // Constants may have multiple users.
6206 // AVX-512 has register version of the broadcast
6207 bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() &&
6208 Ld.getValueType().getSizeInBits() >= 32;
6209 if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
6216 unsigned ScalarSize = Ld.getValueType().getSizeInBits();
6217 bool IsGE256 = (VT.getSizeInBits() >= 256);
6219 // When optimizing for size, generate up to 5 extra bytes for a broadcast
6220 // instruction to save 8 or more bytes of constant pool data.
6221 // TODO: If multiple splats are generated to load the same constant,
6222 // it may be detrimental to overall size. There needs to be a way to detect
6223 // that condition to know if this is truly a size win.
6224 const Function *F = DAG.getMachineFunction().getFunction();
6225 bool OptForSize = F->getAttributes().
6226 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
6228 // Handle broadcasting a single constant scalar from the constant pool
6230 // On Sandybridge (no AVX2), it is still better to load a constant vector
6231 // from the constant pool and not to broadcast it from a scalar.
6232 // But override that restriction when optimizing for size.
6233 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
6234 if (ConstSplatVal && (Subtarget->hasAVX2() || OptForSize)) {
6235 EVT CVT = Ld.getValueType();
6236 assert(!CVT.isVector() && "Must not broadcast a vector type");
6238 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
6239 // For size optimization, also splat v2f64 and v2i64, and for size opt
6240 // with AVX2, also splat i8 and i16.
6241 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
6242 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6243 (OptForSize && (ScalarSize == 64 || Subtarget->hasAVX2()))) {
6244 const Constant *C = nullptr;
6245 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
6246 C = CI->getConstantIntValue();
6247 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
6248 C = CF->getConstantFPValue();
6250 assert(C && "Invalid constant type");
6252 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6253 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
6254 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
6255 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP,
6256 MachinePointerInfo::getConstantPool(),
6257 false, false, false, Alignment);
6259 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6263 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
6265 // Handle AVX2 in-register broadcasts.
6266 if (!IsLoad && Subtarget->hasInt256() &&
6267 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
6268 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6270 // The scalar source must be a normal load.
6274 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6275 (Subtarget->hasVLX() && ScalarSize == 64))
6276 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6278 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
6279 // double since there is no vbroadcastsd xmm
6280 if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) {
6281 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
6282 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6285 // Unsupported broadcast.
6289 /// \brief For an EXTRACT_VECTOR_ELT with a constant index return the real
6290 /// underlying vector and index.
6292 /// Modifies \p ExtractedFromVec to the real vector and returns the real
6294 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
6296 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
6297 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
6300 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
6302 // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
6304 // (extract_vector_elt (vector_shuffle<2,u,u,u>
6305 // (extract_subvector (v8f32 %vreg0), Constant<4>),
6308 // In this case the vector is the extract_subvector expression and the index
6309 // is 2, as specified by the shuffle.
6310 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
6311 SDValue ShuffleVec = SVOp->getOperand(0);
6312 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
6313 assert(ShuffleVecVT.getVectorElementType() ==
6314 ExtractedFromVec.getSimpleValueType().getVectorElementType());
6316 int ShuffleIdx = SVOp->getMaskElt(Idx);
6317 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
6318 ExtractedFromVec = ShuffleVec;
6324 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
6325 MVT VT = Op.getSimpleValueType();
6327 // Skip if insert_vec_elt is not supported.
6328 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6329 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
6333 unsigned NumElems = Op.getNumOperands();
6337 SmallVector<unsigned, 4> InsertIndices;
6338 SmallVector<int, 8> Mask(NumElems, -1);
6340 for (unsigned i = 0; i != NumElems; ++i) {
6341 unsigned Opc = Op.getOperand(i).getOpcode();
6343 if (Opc == ISD::UNDEF)
6346 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
6347 // Quit if more than 1 elements need inserting.
6348 if (InsertIndices.size() > 1)
6351 InsertIndices.push_back(i);
6355 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
6356 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
6357 // Quit if non-constant index.
6358 if (!isa<ConstantSDNode>(ExtIdx))
6360 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
6362 // Quit if extracted from vector of different type.
6363 if (ExtractedFromVec.getValueType() != VT)
6366 if (!VecIn1.getNode())
6367 VecIn1 = ExtractedFromVec;
6368 else if (VecIn1 != ExtractedFromVec) {
6369 if (!VecIn2.getNode())
6370 VecIn2 = ExtractedFromVec;
6371 else if (VecIn2 != ExtractedFromVec)
6372 // Quit if more than 2 vectors to shuffle
6376 if (ExtractedFromVec == VecIn1)
6378 else if (ExtractedFromVec == VecIn2)
6379 Mask[i] = Idx + NumElems;
6382 if (!VecIn1.getNode())
6385 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
6386 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]);
6387 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
6388 unsigned Idx = InsertIndices[i];
6389 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
6390 DAG.getIntPtrConstant(Idx));
6396 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
6398 X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
6400 MVT VT = Op.getSimpleValueType();
6401 assert((VT.getVectorElementType() == MVT::i1) && (VT.getSizeInBits() <= 16) &&
6402 "Unexpected type in LowerBUILD_VECTORvXi1!");
6405 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6406 SDValue Cst = DAG.getTargetConstant(0, MVT::i1);
6407 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6408 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6411 if (ISD::isBuildVectorAllOnes(Op.getNode())) {
6412 SDValue Cst = DAG.getTargetConstant(1, MVT::i1);
6413 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6414 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6417 bool AllContants = true;
6418 uint64_t Immediate = 0;
6419 int NonConstIdx = -1;
6420 bool IsSplat = true;
6421 unsigned NumNonConsts = 0;
6422 unsigned NumConsts = 0;
6423 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
6424 SDValue In = Op.getOperand(idx);
6425 if (In.getOpcode() == ISD::UNDEF)
6427 if (!isa<ConstantSDNode>(In)) {
6428 AllContants = false;
6433 if (cast<ConstantSDNode>(In)->getZExtValue())
6434 Immediate |= (1ULL << idx);
6436 if (In != Op.getOperand(0))
6441 SDValue FullMask = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1,
6442 DAG.getConstant(Immediate, MVT::i16));
6443 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, FullMask,
6444 DAG.getIntPtrConstant(0));
6447 if (NumNonConsts == 1 && NonConstIdx != 0) {
6450 SDValue VecAsImm = DAG.getConstant(Immediate,
6451 MVT::getIntegerVT(VT.getSizeInBits()));
6452 DstVec = DAG.getNode(ISD::BITCAST, dl, VT, VecAsImm);
6455 DstVec = DAG.getUNDEF(VT);
6456 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
6457 Op.getOperand(NonConstIdx),
6458 DAG.getIntPtrConstant(NonConstIdx));
6460 if (!IsSplat && (NonConstIdx != 0))
6461 llvm_unreachable("Unsupported BUILD_VECTOR operation");
6462 MVT SelectVT = (VT == MVT::v16i1)? MVT::i16 : MVT::i8;
6465 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6466 DAG.getConstant(-1, SelectVT),
6467 DAG.getConstant(0, SelectVT));
6469 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6470 DAG.getConstant((Immediate | 1), SelectVT),
6471 DAG.getConstant(Immediate, SelectVT));
6472 return DAG.getNode(ISD::BITCAST, dl, VT, Select);
6475 /// \brief Return true if \p N implements a horizontal binop and return the
6476 /// operands for the horizontal binop into V0 and V1.
6478 /// This is a helper function of PerformBUILD_VECTORCombine.
6479 /// This function checks that the build_vector \p N in input implements a
6480 /// horizontal operation. Parameter \p Opcode defines the kind of horizontal
6481 /// operation to match.
6482 /// For example, if \p Opcode is equal to ISD::ADD, then this function
6483 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
6484 /// is equal to ISD::SUB, then this function checks if this is a horizontal
6487 /// This function only analyzes elements of \p N whose indices are
6488 /// in range [BaseIdx, LastIdx).
6489 static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
6491 unsigned BaseIdx, unsigned LastIdx,
6492 SDValue &V0, SDValue &V1) {
6493 EVT VT = N->getValueType(0);
6495 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
6496 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
6497 "Invalid Vector in input!");
6499 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
6500 bool CanFold = true;
6501 unsigned ExpectedVExtractIdx = BaseIdx;
6502 unsigned NumElts = LastIdx - BaseIdx;
6503 V0 = DAG.getUNDEF(VT);
6504 V1 = DAG.getUNDEF(VT);
6506 // Check if N implements a horizontal binop.
6507 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
6508 SDValue Op = N->getOperand(i + BaseIdx);
6511 if (Op->getOpcode() == ISD::UNDEF) {
6512 // Update the expected vector extract index.
6513 if (i * 2 == NumElts)
6514 ExpectedVExtractIdx = BaseIdx;
6515 ExpectedVExtractIdx += 2;
6519 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
6524 SDValue Op0 = Op.getOperand(0);
6525 SDValue Op1 = Op.getOperand(1);
6527 // Try to match the following pattern:
6528 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
6529 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6530 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6531 Op0.getOperand(0) == Op1.getOperand(0) &&
6532 isa<ConstantSDNode>(Op0.getOperand(1)) &&
6533 isa<ConstantSDNode>(Op1.getOperand(1)));
6537 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6538 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
6540 if (i * 2 < NumElts) {
6541 if (V0.getOpcode() == ISD::UNDEF)
6542 V0 = Op0.getOperand(0);
6544 if (V1.getOpcode() == ISD::UNDEF)
6545 V1 = Op0.getOperand(0);
6546 if (i * 2 == NumElts)
6547 ExpectedVExtractIdx = BaseIdx;
6550 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
6551 if (I0 == ExpectedVExtractIdx)
6552 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
6553 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
6554 // Try to match the following dag sequence:
6555 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
6556 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
6560 ExpectedVExtractIdx += 2;
6566 /// \brief Emit a sequence of two 128-bit horizontal add/sub followed by
6567 /// a concat_vector.
6569 /// This is a helper function of PerformBUILD_VECTORCombine.
6570 /// This function expects two 256-bit vectors called V0 and V1.
6571 /// At first, each vector is split into two separate 128-bit vectors.
6572 /// Then, the resulting 128-bit vectors are used to implement two
6573 /// horizontal binary operations.
6575 /// The kind of horizontal binary operation is defined by \p X86Opcode.
6577 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
6578 /// the two new horizontal binop.
6579 /// When Mode is set, the first horizontal binop dag node would take as input
6580 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
6581 /// horizontal binop dag node would take as input the lower 128-bit of V1
6582 /// and the upper 128-bit of V1.
6584 /// HADD V0_LO, V0_HI
6585 /// HADD V1_LO, V1_HI
6587 /// Otherwise, the first horizontal binop dag node takes as input the lower
6588 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
6589 /// dag node takes the the upper 128-bit of V0 and the upper 128-bit of V1.
6591 /// HADD V0_LO, V1_LO
6592 /// HADD V0_HI, V1_HI
6594 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
6595 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
6596 /// the upper 128-bits of the result.
6597 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
6598 SDLoc DL, SelectionDAG &DAG,
6599 unsigned X86Opcode, bool Mode,
6600 bool isUndefLO, bool isUndefHI) {
6601 EVT VT = V0.getValueType();
6602 assert(VT.is256BitVector() && VT == V1.getValueType() &&
6603 "Invalid nodes in input!");
6605 unsigned NumElts = VT.getVectorNumElements();
6606 SDValue V0_LO = Extract128BitVector(V0, 0, DAG, DL);
6607 SDValue V0_HI = Extract128BitVector(V0, NumElts/2, DAG, DL);
6608 SDValue V1_LO = Extract128BitVector(V1, 0, DAG, DL);
6609 SDValue V1_HI = Extract128BitVector(V1, NumElts/2, DAG, DL);
6610 EVT NewVT = V0_LO.getValueType();
6612 SDValue LO = DAG.getUNDEF(NewVT);
6613 SDValue HI = DAG.getUNDEF(NewVT);
6616 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6617 if (!isUndefLO && V0->getOpcode() != ISD::UNDEF)
6618 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
6619 if (!isUndefHI && V1->getOpcode() != ISD::UNDEF)
6620 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
6622 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6623 if (!isUndefLO && (V0_LO->getOpcode() != ISD::UNDEF ||
6624 V1_LO->getOpcode() != ISD::UNDEF))
6625 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
6627 if (!isUndefHI && (V0_HI->getOpcode() != ISD::UNDEF ||
6628 V1_HI->getOpcode() != ISD::UNDEF))
6629 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
6632 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
6635 /// \brief Try to fold a build_vector that performs an 'addsub' into the
6636 /// sequence of 'vadd + vsub + blendi'.
6637 static SDValue matchAddSub(const BuildVectorSDNode *BV, SelectionDAG &DAG,
6638 const X86Subtarget *Subtarget) {
6640 EVT VT = BV->getValueType(0);
6641 unsigned NumElts = VT.getVectorNumElements();
6642 SDValue InVec0 = DAG.getUNDEF(VT);
6643 SDValue InVec1 = DAG.getUNDEF(VT);
6645 assert((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
6646 VT == MVT::v2f64) && "build_vector with an invalid type found!");
6648 // Odd-numbered elements in the input build vector are obtained from
6649 // adding two integer/float elements.
6650 // Even-numbered elements in the input build vector are obtained from
6651 // subtracting two integer/float elements.
6652 unsigned ExpectedOpcode = ISD::FSUB;
6653 unsigned NextExpectedOpcode = ISD::FADD;
6654 bool AddFound = false;
6655 bool SubFound = false;
6657 for (unsigned i = 0, e = NumElts; i != e; i++) {
6658 SDValue Op = BV->getOperand(i);
6660 // Skip 'undef' values.
6661 unsigned Opcode = Op.getOpcode();
6662 if (Opcode == ISD::UNDEF) {
6663 std::swap(ExpectedOpcode, NextExpectedOpcode);
6667 // Early exit if we found an unexpected opcode.
6668 if (Opcode != ExpectedOpcode)
6671 SDValue Op0 = Op.getOperand(0);
6672 SDValue Op1 = Op.getOperand(1);
6674 // Try to match the following pattern:
6675 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
6676 // Early exit if we cannot match that sequence.
6677 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6678 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6679 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
6680 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
6681 Op0.getOperand(1) != Op1.getOperand(1))
6684 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6688 // We found a valid add/sub node. Update the information accordingly.
6694 // Update InVec0 and InVec1.
6695 if (InVec0.getOpcode() == ISD::UNDEF)
6696 InVec0 = Op0.getOperand(0);
6697 if (InVec1.getOpcode() == ISD::UNDEF)
6698 InVec1 = Op1.getOperand(0);
6700 // Make sure that operands in input to each add/sub node always
6701 // come from a same pair of vectors.
6702 if (InVec0 != Op0.getOperand(0)) {
6703 if (ExpectedOpcode == ISD::FSUB)
6706 // FADD is commutable. Try to commute the operands
6707 // and then test again.
6708 std::swap(Op0, Op1);
6709 if (InVec0 != Op0.getOperand(0))
6713 if (InVec1 != Op1.getOperand(0))
6716 // Update the pair of expected opcodes.
6717 std::swap(ExpectedOpcode, NextExpectedOpcode);
6720 // Don't try to fold this build_vector into an ADDSUB if the inputs are undef.
6721 if (AddFound && SubFound && InVec0.getOpcode() != ISD::UNDEF &&
6722 InVec1.getOpcode() != ISD::UNDEF)
6723 return DAG.getNode(X86ISD::ADDSUB, DL, VT, InVec0, InVec1);
6728 static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
6729 const X86Subtarget *Subtarget) {
6731 EVT VT = N->getValueType(0);
6732 unsigned NumElts = VT.getVectorNumElements();
6733 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
6734 SDValue InVec0, InVec1;
6736 // Try to match an ADDSUB.
6737 if ((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
6738 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
6739 SDValue Value = matchAddSub(BV, DAG, Subtarget);
6740 if (Value.getNode())
6744 // Try to match horizontal ADD/SUB.
6745 unsigned NumUndefsLO = 0;
6746 unsigned NumUndefsHI = 0;
6747 unsigned Half = NumElts/2;
6749 // Count the number of UNDEF operands in the build_vector in input.
6750 for (unsigned i = 0, e = Half; i != e; ++i)
6751 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6754 for (unsigned i = Half, e = NumElts; i != e; ++i)
6755 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6758 // Early exit if this is either a build_vector of all UNDEFs or all the
6759 // operands but one are UNDEF.
6760 if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
6763 if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget->hasSSE3()) {
6764 // Try to match an SSE3 float HADD/HSUB.
6765 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6766 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6768 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6769 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6770 } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) {
6771 // Try to match an SSSE3 integer HADD/HSUB.
6772 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6773 return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
6775 if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6776 return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
6779 if (!Subtarget->hasAVX())
6782 if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
6783 // Try to match an AVX horizontal add/sub of packed single/double
6784 // precision floating point values from 256-bit vectors.
6785 SDValue InVec2, InVec3;
6786 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
6787 isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
6788 ((InVec0.getOpcode() == ISD::UNDEF ||
6789 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6790 ((InVec1.getOpcode() == ISD::UNDEF ||
6791 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6792 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6794 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
6795 isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
6796 ((InVec0.getOpcode() == ISD::UNDEF ||
6797 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6798 ((InVec1.getOpcode() == ISD::UNDEF ||
6799 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6800 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6801 } else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
6802 // Try to match an AVX2 horizontal add/sub of signed integers.
6803 SDValue InVec2, InVec3;
6805 bool CanFold = true;
6807 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
6808 isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
6809 ((InVec0.getOpcode() == ISD::UNDEF ||
6810 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6811 ((InVec1.getOpcode() == ISD::UNDEF ||
6812 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6813 X86Opcode = X86ISD::HADD;
6814 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
6815 isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
6816 ((InVec0.getOpcode() == ISD::UNDEF ||
6817 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6818 ((InVec1.getOpcode() == ISD::UNDEF ||
6819 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6820 X86Opcode = X86ISD::HSUB;
6825 // Fold this build_vector into a single horizontal add/sub.
6826 // Do this only if the target has AVX2.
6827 if (Subtarget->hasAVX2())
6828 return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
6830 // Do not try to expand this build_vector into a pair of horizontal
6831 // add/sub if we can emit a pair of scalar add/sub.
6832 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6835 // Convert this build_vector into a pair of horizontal binop followed by
6837 bool isUndefLO = NumUndefsLO == Half;
6838 bool isUndefHI = NumUndefsHI == Half;
6839 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
6840 isUndefLO, isUndefHI);
6844 if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
6845 VT == MVT::v16i16) && Subtarget->hasAVX()) {
6847 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6848 X86Opcode = X86ISD::HADD;
6849 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6850 X86Opcode = X86ISD::HSUB;
6851 else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6852 X86Opcode = X86ISD::FHADD;
6853 else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6854 X86Opcode = X86ISD::FHSUB;
6858 // Don't try to expand this build_vector into a pair of horizontal add/sub
6859 // if we can simply emit a pair of scalar add/sub.
6860 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6863 // Convert this build_vector into two horizontal add/sub followed by
6865 bool isUndefLO = NumUndefsLO == Half;
6866 bool isUndefHI = NumUndefsHI == Half;
6867 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
6868 isUndefLO, isUndefHI);
6875 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
6878 MVT VT = Op.getSimpleValueType();
6879 MVT ExtVT = VT.getVectorElementType();
6880 unsigned NumElems = Op.getNumOperands();
6882 // Generate vectors for predicate vectors.
6883 if (VT.getScalarType() == MVT::i1 && Subtarget->hasAVX512())
6884 return LowerBUILD_VECTORvXi1(Op, DAG);
6886 // Vectors containing all zeros can be matched by pxor and xorps later
6887 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6888 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
6889 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
6890 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
6893 return getZeroVector(VT, Subtarget, DAG, dl);
6896 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
6897 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
6898 // vpcmpeqd on 256-bit vectors.
6899 if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
6900 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256()))
6903 if (!VT.is512BitVector())
6904 return getOnesVector(VT, Subtarget->hasInt256(), DAG, dl);
6907 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
6908 if (Broadcast.getNode())
6911 unsigned EVTBits = ExtVT.getSizeInBits();
6913 unsigned NumZero = 0;
6914 unsigned NumNonZero = 0;
6915 unsigned NonZeros = 0;
6916 bool IsAllConstants = true;
6917 SmallSet<SDValue, 8> Values;
6918 for (unsigned i = 0; i < NumElems; ++i) {
6919 SDValue Elt = Op.getOperand(i);
6920 if (Elt.getOpcode() == ISD::UNDEF)
6923 if (Elt.getOpcode() != ISD::Constant &&
6924 Elt.getOpcode() != ISD::ConstantFP)
6925 IsAllConstants = false;
6926 if (X86::isZeroNode(Elt))
6929 NonZeros |= (1 << i);
6934 // All undef vector. Return an UNDEF. All zero vectors were handled above.
6935 if (NumNonZero == 0)
6936 return DAG.getUNDEF(VT);
6938 // Special case for single non-zero, non-undef, element.
6939 if (NumNonZero == 1) {
6940 unsigned Idx = countTrailingZeros(NonZeros);
6941 SDValue Item = Op.getOperand(Idx);
6943 // If this is an insertion of an i64 value on x86-32, and if the top bits of
6944 // the value are obviously zero, truncate the value to i32 and do the
6945 // insertion that way. Only do this if the value is non-constant or if the
6946 // value is a constant being inserted into element 0. It is cheaper to do
6947 // a constant pool load than it is to do a movd + shuffle.
6948 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
6949 (!IsAllConstants || Idx == 0)) {
6950 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
6952 assert(VT == MVT::v2i64 && "Expected an SSE value type!");
6953 EVT VecVT = MVT::v4i32;
6954 unsigned VecElts = 4;
6956 // Truncate the value (which may itself be a constant) to i32, and
6957 // convert it to a vector with movd (S2V+shuffle to zero extend).
6958 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
6959 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
6961 // If using the new shuffle lowering, just directly insert this.
6962 if (ExperimentalVectorShuffleLowering)
6964 ISD::BITCAST, dl, VT,
6965 getShuffleVectorZeroOrUndef(Item, Idx * 2, true, Subtarget, DAG));
6967 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6969 // Now we have our 32-bit value zero extended in the low element of
6970 // a vector. If Idx != 0, swizzle it into place.
6972 SmallVector<int, 4> Mask;
6973 Mask.push_back(Idx);
6974 for (unsigned i = 1; i != VecElts; ++i)
6976 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT),
6979 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
6983 // If we have a constant or non-constant insertion into the low element of
6984 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
6985 // the rest of the elements. This will be matched as movd/movq/movss/movsd
6986 // depending on what the source datatype is.
6989 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6991 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
6992 (ExtVT == MVT::i64 && Subtarget->is64Bit())) {
6993 if (VT.is256BitVector() || VT.is512BitVector()) {
6994 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
6995 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
6996 Item, DAG.getIntPtrConstant(0));
6998 assert(VT.is128BitVector() && "Expected an SSE value type!");
6999 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7000 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
7001 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7004 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
7005 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
7006 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
7007 if (VT.is256BitVector()) {
7008 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
7009 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
7011 assert(VT.is128BitVector() && "Expected an SSE value type!");
7012 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7014 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
7018 // Is it a vector logical left shift?
7019 if (NumElems == 2 && Idx == 1 &&
7020 X86::isZeroNode(Op.getOperand(0)) &&
7021 !X86::isZeroNode(Op.getOperand(1))) {
7022 unsigned NumBits = VT.getSizeInBits();
7023 return getVShift(true, VT,
7024 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
7025 VT, Op.getOperand(1)),
7026 NumBits/2, DAG, *this, dl);
7029 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
7032 // Otherwise, if this is a vector with i32 or f32 elements, and the element
7033 // is a non-constant being inserted into an element other than the low one,
7034 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
7035 // movd/movss) to move this into the low element, then shuffle it into
7037 if (EVTBits == 32) {
7038 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7040 // If using the new shuffle lowering, just directly insert this.
7041 if (ExperimentalVectorShuffleLowering)
7042 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
7044 // Turn it into a shuffle of zero and zero-extended scalar to vector.
7045 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG);
7046 SmallVector<int, 8> MaskVec;
7047 for (unsigned i = 0; i != NumElems; ++i)
7048 MaskVec.push_back(i == Idx ? 0 : 1);
7049 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]);
7053 // Splat is obviously ok. Let legalizer expand it to a shuffle.
7054 if (Values.size() == 1) {
7055 if (EVTBits == 32) {
7056 // Instead of a shuffle like this:
7057 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
7058 // Check if it's possible to issue this instead.
7059 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
7060 unsigned Idx = countTrailingZeros(NonZeros);
7061 SDValue Item = Op.getOperand(Idx);
7062 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
7063 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
7068 // A vector full of immediates; various special cases are already
7069 // handled, so this is best done with a single constant-pool load.
7073 // For AVX-length vectors, see if we can use a vector load to get all of the
7074 // elements, otherwise build the individual 128-bit pieces and use
7075 // shuffles to put them in place.
7076 if (VT.is256BitVector() || VT.is512BitVector()) {
7077 SmallVector<SDValue, 64> V;
7078 for (unsigned i = 0; i != NumElems; ++i)
7079 V.push_back(Op.getOperand(i));
7081 // Check for a build vector of consecutive loads.
7082 if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
7085 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
7087 // Build both the lower and upper subvector.
7088 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7089 makeArrayRef(&V[0], NumElems/2));
7090 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7091 makeArrayRef(&V[NumElems / 2], NumElems/2));
7093 // Recreate the wider vector with the lower and upper part.
7094 if (VT.is256BitVector())
7095 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7096 return Concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7099 // Let legalizer expand 2-wide build_vectors.
7100 if (EVTBits == 64) {
7101 if (NumNonZero == 1) {
7102 // One half is zero or undef.
7103 unsigned Idx = countTrailingZeros(NonZeros);
7104 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
7105 Op.getOperand(Idx));
7106 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
7111 // If element VT is < 32 bits, convert it to inserts into a zero vector.
7112 if (EVTBits == 8 && NumElems == 16) {
7113 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
7115 if (V.getNode()) return V;
7118 if (EVTBits == 16 && NumElems == 8) {
7119 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
7121 if (V.getNode()) return V;
7124 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
7125 if (EVTBits == 32 && NumElems == 4) {
7126 SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget, *this);
7131 // If element VT is == 32 bits, turn it into a number of shuffles.
7132 SmallVector<SDValue, 8> V(NumElems);
7133 if (NumElems == 4 && NumZero > 0) {
7134 for (unsigned i = 0; i < 4; ++i) {
7135 bool isZero = !(NonZeros & (1 << i));
7137 V[i] = getZeroVector(VT, Subtarget, DAG, dl);
7139 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7142 for (unsigned i = 0; i < 2; ++i) {
7143 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
7146 V[i] = V[i*2]; // Must be a zero vector.
7149 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]);
7152 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]);
7155 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]);
7160 bool Reverse1 = (NonZeros & 0x3) == 2;
7161 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
7165 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
7166 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
7168 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
7171 if (Values.size() > 1 && VT.is128BitVector()) {
7172 // Check for a build vector of consecutive loads.
7173 for (unsigned i = 0; i < NumElems; ++i)
7174 V[i] = Op.getOperand(i);
7176 // Check for elements which are consecutive loads.
7177 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false);
7181 // Check for a build vector from mostly shuffle plus few inserting.
7182 SDValue Sh = buildFromShuffleMostly(Op, DAG);
7186 // For SSE 4.1, use insertps to put the high elements into the low element.
7187 if (Subtarget->hasSSE41()) {
7189 if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
7190 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
7192 Result = DAG.getUNDEF(VT);
7194 for (unsigned i = 1; i < NumElems; ++i) {
7195 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
7196 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
7197 Op.getOperand(i), DAG.getIntPtrConstant(i));
7202 // Otherwise, expand into a number of unpckl*, start by extending each of
7203 // our (non-undef) elements to the full vector width with the element in the
7204 // bottom slot of the vector (which generates no code for SSE).
7205 for (unsigned i = 0; i < NumElems; ++i) {
7206 if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
7207 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7209 V[i] = DAG.getUNDEF(VT);
7212 // Next, we iteratively mix elements, e.g. for v4f32:
7213 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
7214 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
7215 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
7216 unsigned EltStride = NumElems >> 1;
7217 while (EltStride != 0) {
7218 for (unsigned i = 0; i < EltStride; ++i) {
7219 // If V[i+EltStride] is undef and this is the first round of mixing,
7220 // then it is safe to just drop this shuffle: V[i] is already in the
7221 // right place, the one element (since it's the first round) being
7222 // inserted as undef can be dropped. This isn't safe for successive
7223 // rounds because they will permute elements within both vectors.
7224 if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
7225 EltStride == NumElems/2)
7228 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
7237 // LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction
7238 // to create 256-bit vectors from two other 128-bit ones.
7239 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7241 MVT ResVT = Op.getSimpleValueType();
7243 assert((ResVT.is256BitVector() ||
7244 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
7246 SDValue V1 = Op.getOperand(0);
7247 SDValue V2 = Op.getOperand(1);
7248 unsigned NumElems = ResVT.getVectorNumElements();
7249 if(ResVT.is256BitVector())
7250 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7252 if (Op.getNumOperands() == 4) {
7253 MVT HalfVT = MVT::getVectorVT(ResVT.getScalarType(),
7254 ResVT.getVectorNumElements()/2);
7255 SDValue V3 = Op.getOperand(2);
7256 SDValue V4 = Op.getOperand(3);
7257 return Concat256BitVectors(Concat128BitVectors(V1, V2, HalfVT, NumElems/2, DAG, dl),
7258 Concat128BitVectors(V3, V4, HalfVT, NumElems/2, DAG, dl), ResVT, NumElems, DAG, dl);
7260 return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7263 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7264 MVT LLVM_ATTRIBUTE_UNUSED VT = Op.getSimpleValueType();
7265 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
7266 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
7267 Op.getNumOperands() == 4)));
7269 // AVX can use the vinsertf128 instruction to create 256-bit vectors
7270 // from two other 128-bit ones.
7272 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
7273 return LowerAVXCONCAT_VECTORS(Op, DAG);
7277 //===----------------------------------------------------------------------===//
7278 // Vector shuffle lowering
7280 // This is an experimental code path for lowering vector shuffles on x86. It is
7281 // designed to handle arbitrary vector shuffles and blends, gracefully
7282 // degrading performance as necessary. It works hard to recognize idiomatic
7283 // shuffles and lower them to optimal instruction patterns without leaving
7284 // a framework that allows reasonably efficient handling of all vector shuffle
7286 //===----------------------------------------------------------------------===//
7288 /// \brief Tiny helper function to identify a no-op mask.
7290 /// This is a somewhat boring predicate function. It checks whether the mask
7291 /// array input, which is assumed to be a single-input shuffle mask of the kind
7292 /// used by the X86 shuffle instructions (not a fully general
7293 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
7294 /// in-place shuffle are 'no-op's.
7295 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
7296 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7297 if (Mask[i] != -1 && Mask[i] != i)
7302 /// \brief Helper function to classify a mask as a single-input mask.
7304 /// This isn't a generic single-input test because in the vector shuffle
7305 /// lowering we canonicalize single inputs to be the first input operand. This
7306 /// means we can more quickly test for a single input by only checking whether
7307 /// an input from the second operand exists. We also assume that the size of
7308 /// mask corresponds to the size of the input vectors which isn't true in the
7309 /// fully general case.
7310 static bool isSingleInputShuffleMask(ArrayRef<int> Mask) {
7312 if (M >= (int)Mask.size())
7317 /// \brief Test whether there are elements crossing 128-bit lanes in this
7320 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
7321 /// and we routinely test for these.
7322 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
7323 int LaneSize = 128 / VT.getScalarSizeInBits();
7324 int Size = Mask.size();
7325 for (int i = 0; i < Size; ++i)
7326 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
7331 /// \brief Test whether a shuffle mask is equivalent within each 128-bit lane.
7333 /// This checks a shuffle mask to see if it is performing the same
7334 /// 128-bit lane-relative shuffle in each 128-bit lane. This trivially implies
7335 /// that it is also not lane-crossing. It may however involve a blend from the
7336 /// same lane of a second vector.
7338 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
7339 /// non-trivial to compute in the face of undef lanes. The representation is
7340 /// *not* suitable for use with existing 128-bit shuffles as it will contain
7341 /// entries from both V1 and V2 inputs to the wider mask.
7343 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
7344 SmallVectorImpl<int> &RepeatedMask) {
7345 int LaneSize = 128 / VT.getScalarSizeInBits();
7346 RepeatedMask.resize(LaneSize, -1);
7347 int Size = Mask.size();
7348 for (int i = 0; i < Size; ++i) {
7351 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
7352 // This entry crosses lanes, so there is no way to model this shuffle.
7355 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
7356 if (RepeatedMask[i % LaneSize] == -1)
7357 // This is the first non-undef entry in this slot of a 128-bit lane.
7358 RepeatedMask[i % LaneSize] =
7359 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + Size;
7360 else if (RepeatedMask[i % LaneSize] + (i / LaneSize) * LaneSize != Mask[i])
7361 // Found a mismatch with the repeated mask.
7367 // Hide this symbol with an anonymous namespace instead of 'static' so that MSVC
7368 // 2013 will allow us to use it as a non-type template parameter.
7371 /// \brief Implementation of the \c isShuffleEquivalent variadic functor.
7373 /// See its documentation for details.
7374 bool isShuffleEquivalentImpl(ArrayRef<int> Mask, ArrayRef<const int *> Args) {
7375 if (Mask.size() != Args.size())
7377 for (int i = 0, e = Mask.size(); i < e; ++i) {
7378 assert(*Args[i] >= 0 && "Arguments must be positive integers!");
7379 if (Mask[i] != -1 && Mask[i] != *Args[i])
7387 /// \brief Checks whether a shuffle mask is equivalent to an explicit list of
7390 /// This is a fast way to test a shuffle mask against a fixed pattern:
7392 /// if (isShuffleEquivalent(Mask, 3, 2, 1, 0)) { ... }
7394 /// It returns true if the mask is exactly as wide as the argument list, and
7395 /// each element of the mask is either -1 (signifying undef) or the value given
7396 /// in the argument.
7397 static const VariadicFunction1<
7398 bool, ArrayRef<int>, int, isShuffleEquivalentImpl> isShuffleEquivalent = {};
7400 /// \brief Get a 4-lane 8-bit shuffle immediate for a mask.
7402 /// This helper function produces an 8-bit shuffle immediate corresponding to
7403 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
7404 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
7407 /// NB: We rely heavily on "undef" masks preserving the input lane.
7408 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,
7409 SelectionDAG &DAG) {
7410 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
7411 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
7412 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
7413 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
7414 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
7417 Imm |= (Mask[0] == -1 ? 0 : Mask[0]) << 0;
7418 Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2;
7419 Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4;
7420 Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6;
7421 return DAG.getConstant(Imm, MVT::i8);
7424 /// \brief Try to emit a blend instruction for a shuffle.
7426 /// This doesn't do any checks for the availability of instructions for blending
7427 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
7428 /// be matched in the backend with the type given. What it does check for is
7429 /// that the shuffle mask is in fact a blend.
7430 static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
7431 SDValue V2, ArrayRef<int> Mask,
7432 const X86Subtarget *Subtarget,
7433 SelectionDAG &DAG) {
7435 unsigned BlendMask = 0;
7436 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7437 if (Mask[i] >= Size) {
7438 if (Mask[i] != i + Size)
7439 return SDValue(); // Shuffled V2 input!
7440 BlendMask |= 1u << i;
7443 if (Mask[i] >= 0 && Mask[i] != i)
7444 return SDValue(); // Shuffled V1 input!
7446 switch (VT.SimpleTy) {
7451 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
7452 DAG.getConstant(BlendMask, MVT::i8));
7456 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7460 // If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
7461 // that instruction.
7462 if (Subtarget->hasAVX2()) {
7463 // Scale the blend by the number of 32-bit dwords per element.
7464 int Scale = VT.getScalarSizeInBits() / 32;
7466 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7467 if (Mask[i] >= Size)
7468 for (int j = 0; j < Scale; ++j)
7469 BlendMask |= 1u << (i * Scale + j);
7471 MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32;
7472 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7473 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7474 return DAG.getNode(ISD::BITCAST, DL, VT,
7475 DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
7476 DAG.getConstant(BlendMask, MVT::i8)));
7480 // For integer shuffles we need to expand the mask and cast the inputs to
7481 // v8i16s prior to blending.
7482 int Scale = 8 / VT.getVectorNumElements();
7484 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7485 if (Mask[i] >= Size)
7486 for (int j = 0; j < Scale; ++j)
7487 BlendMask |= 1u << (i * Scale + j);
7489 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
7490 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
7491 return DAG.getNode(ISD::BITCAST, DL, VT,
7492 DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
7493 DAG.getConstant(BlendMask, MVT::i8)));
7497 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7498 SmallVector<int, 8> RepeatedMask;
7499 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
7500 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
7501 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
7503 for (int i = 0; i < 8; ++i)
7504 if (RepeatedMask[i] >= 16)
7505 BlendMask |= 1u << i;
7506 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
7507 DAG.getConstant(BlendMask, MVT::i8));
7512 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7513 // Scale the blend by the number of bytes per element.
7514 int Scale = VT.getScalarSizeInBits() / 8;
7515 assert(Mask.size() * Scale == 32 && "Not a 256-bit vector!");
7517 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
7518 // mix of LLVM's code generator and the x86 backend. We tell the code
7519 // generator that boolean values in the elements of an x86 vector register
7520 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
7521 // mapping a select to operand #1, and 'false' mapping to operand #2. The
7522 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
7523 // of the element (the remaining are ignored) and 0 in that high bit would
7524 // mean operand #1 while 1 in the high bit would mean operand #2. So while
7525 // the LLVM model for boolean values in vector elements gets the relevant
7526 // bit set, it is set backwards and over constrained relative to x86's
7528 SDValue VSELECTMask[32];
7529 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7530 for (int j = 0; j < Scale; ++j)
7531 VSELECTMask[Scale * i + j] =
7532 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
7533 : DAG.getConstant(Mask[i] < Size ? -1 : 0, MVT::i8);
7535 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1);
7536 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V2);
7538 ISD::BITCAST, DL, VT,
7539 DAG.getNode(ISD::VSELECT, DL, MVT::v32i8,
7540 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, VSELECTMask),
7545 llvm_unreachable("Not a supported integer vector type!");
7549 /// \brief Generic routine to lower a shuffle and blend as a decomposed set of
7550 /// unblended shuffles followed by an unshuffled blend.
7552 /// This matches the extremely common pattern for handling combined
7553 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
7555 static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
7559 SelectionDAG &DAG) {
7560 // Shuffle the input elements into the desired positions in V1 and V2 and
7561 // blend them together.
7562 SmallVector<int, 32> V1Mask(Mask.size(), -1);
7563 SmallVector<int, 32> V2Mask(Mask.size(), -1);
7564 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7565 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7566 if (Mask[i] >= 0 && Mask[i] < Size) {
7567 V1Mask[i] = Mask[i];
7569 } else if (Mask[i] >= Size) {
7570 V2Mask[i] = Mask[i] - Size;
7571 BlendMask[i] = i + Size;
7574 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
7575 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
7576 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7579 /// \brief Try to lower a vector shuffle as a byte rotation.
7581 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
7582 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
7583 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
7584 /// try to generically lower a vector shuffle through such an pattern. It
7585 /// does not check for the profitability of lowering either as PALIGNR or
7586 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
7587 /// This matches shuffle vectors that look like:
7589 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
7591 /// Essentially it concatenates V1 and V2, shifts right by some number of
7592 /// elements, and takes the low elements as the result. Note that while this is
7593 /// specified as a *right shift* because x86 is little-endian, it is a *left
7594 /// rotate* of the vector lanes.
7596 /// Note that this only handles 128-bit vector widths currently.
7597 static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
7600 const X86Subtarget *Subtarget,
7601 SelectionDAG &DAG) {
7602 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7604 // We need to detect various ways of spelling a rotation:
7605 // [11, 12, 13, 14, 15, 0, 1, 2]
7606 // [-1, 12, 13, 14, -1, -1, 1, -1]
7607 // [-1, -1, -1, -1, -1, -1, 1, 2]
7608 // [ 3, 4, 5, 6, 7, 8, 9, 10]
7609 // [-1, 4, 5, 6, -1, -1, 9, -1]
7610 // [-1, 4, 5, 6, -1, -1, -1, -1]
7613 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7616 assert(Mask[i] >= 0 && "Only -1 is a valid negative mask element!");
7618 // Based on the mod-Size value of this mask element determine where
7619 // a rotated vector would have started.
7620 int StartIdx = i - (Mask[i] % Size);
7622 // The identity rotation isn't interesting, stop.
7625 // If we found the tail of a vector the rotation must be the missing
7626 // front. If we found the head of a vector, it must be how much of the head.
7627 int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx;
7630 Rotation = CandidateRotation;
7631 else if (Rotation != CandidateRotation)
7632 // The rotations don't match, so we can't match this mask.
7635 // Compute which value this mask is pointing at.
7636 SDValue MaskV = Mask[i] < Size ? V1 : V2;
7638 // Compute which of the two target values this index should be assigned to.
7639 // This reflects whether the high elements are remaining or the low elements
7641 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
7643 // Either set up this value if we've not encountered it before, or check
7644 // that it remains consistent.
7647 else if (TargetV != MaskV)
7648 // This may be a rotation, but it pulls from the inputs in some
7649 // unsupported interleaving.
7653 // Check that we successfully analyzed the mask, and normalize the results.
7654 assert(Rotation != 0 && "Failed to locate a viable rotation!");
7655 assert((Lo || Hi) && "Failed to find a rotated input vector!");
7661 assert(VT.getSizeInBits() == 128 &&
7662 "Rotate-based lowering only supports 128-bit lowering!");
7663 assert(Mask.size() <= 16 &&
7664 "Can shuffle at most 16 bytes in a 128-bit vector!");
7666 // The actual rotate instruction rotates bytes, so we need to scale the
7667 // rotation based on how many bytes are in the vector.
7668 int Scale = 16 / Mask.size();
7670 // SSSE3 targets can use the palignr instruction
7671 if (Subtarget->hasSSSE3()) {
7672 // Cast the inputs to v16i8 to match PALIGNR.
7673 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Lo);
7674 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Hi);
7676 return DAG.getNode(ISD::BITCAST, DL, VT,
7677 DAG.getNode(X86ISD::PALIGNR, DL, MVT::v16i8, Hi, Lo,
7678 DAG.getConstant(Rotation * Scale, MVT::i8)));
7681 // Default SSE2 implementation
7682 int LoByteShift = 16 - Rotation * Scale;
7683 int HiByteShift = Rotation * Scale;
7685 // Cast the inputs to v2i64 to match PSLLDQ/PSRLDQ.
7686 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Lo);
7687 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Hi);
7689 SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo,
7690 DAG.getConstant(8 * LoByteShift, MVT::i8));
7691 SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi,
7692 DAG.getConstant(8 * HiByteShift, MVT::i8));
7693 return DAG.getNode(ISD::BITCAST, DL, VT,
7694 DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift));
7697 /// \brief Compute whether each element of a shuffle is zeroable.
7699 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
7700 /// Either it is an undef element in the shuffle mask, the element of the input
7701 /// referenced is undef, or the element of the input referenced is known to be
7702 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
7703 /// as many lanes with this technique as possible to simplify the remaining
7705 static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
7706 SDValue V1, SDValue V2) {
7707 SmallBitVector Zeroable(Mask.size(), false);
7709 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
7710 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
7712 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7714 // Handle the easy cases.
7715 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
7720 // If this is an index into a build_vector node, dig out the input value and
7722 SDValue V = M < Size ? V1 : V2;
7723 if (V.getOpcode() != ISD::BUILD_VECTOR)
7726 SDValue Input = V.getOperand(M % Size);
7727 // The UNDEF opcode check really should be dead code here, but not quite
7728 // worth asserting on (it isn't invalid, just unexpected).
7729 if (Input.getOpcode() == ISD::UNDEF || X86::isZeroNode(Input))
7736 /// \brief Try to lower a vector shuffle as a byte shift (shifts in zeros).
7738 /// Attempts to match a shuffle mask against the PSRLDQ and PSLLDQ SSE2
7739 /// byte-shift instructions. The mask must consist of a shifted sequential
7740 /// shuffle from one of the input vectors and zeroable elements for the
7741 /// remaining 'shifted in' elements.
7743 /// Note that this only handles 128-bit vector widths currently.
7744 static SDValue lowerVectorShuffleAsByteShift(SDLoc DL, MVT VT, SDValue V1,
7745 SDValue V2, ArrayRef<int> Mask,
7746 SelectionDAG &DAG) {
7747 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7749 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7751 int Size = Mask.size();
7752 int Scale = 16 / Size;
7754 for (int Shift = 1; Shift < Size; Shift++) {
7755 int ByteShift = Shift * Scale;
7757 // PSRLDQ : (little-endian) right byte shift
7758 // [ 5, 6, 7, zz, zz, zz, zz, zz]
7759 // [ -1, 5, 6, 7, zz, zz, zz, zz]
7760 // [ 1, 2, -1, -1, -1, -1, zz, zz]
7761 bool ZeroableRight = true;
7762 for (int i = Size - Shift; i < Size; i++) {
7763 ZeroableRight &= Zeroable[i];
7766 if (ZeroableRight) {
7767 bool ValidShiftRight1 =
7768 isSequentialOrUndefInRange(Mask, 0, Size - Shift, Shift);
7769 bool ValidShiftRight2 =
7770 isSequentialOrUndefInRange(Mask, 0, Size - Shift, Size + Shift);
7772 if (ValidShiftRight1 || ValidShiftRight2) {
7773 // Cast the inputs to v2i64 to match PSRLDQ.
7774 SDValue &TargetV = ValidShiftRight1 ? V1 : V2;
7775 SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TargetV);
7776 SDValue Shifted = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, V,
7777 DAG.getConstant(ByteShift * 8, MVT::i8));
7778 return DAG.getNode(ISD::BITCAST, DL, VT, Shifted);
7782 // PSLLDQ : (little-endian) left byte shift
7783 // [ zz, 0, 1, 2, 3, 4, 5, 6]
7784 // [ zz, zz, -1, -1, 2, 3, 4, -1]
7785 // [ zz, zz, zz, zz, zz, zz, -1, 1]
7786 bool ZeroableLeft = true;
7787 for (int i = 0; i < Shift; i++) {
7788 ZeroableLeft &= Zeroable[i];
7792 bool ValidShiftLeft1 =
7793 isSequentialOrUndefInRange(Mask, Shift, Size - Shift, 0);
7794 bool ValidShiftLeft2 =
7795 isSequentialOrUndefInRange(Mask, Shift, Size - Shift, Size);
7797 if (ValidShiftLeft1 || ValidShiftLeft2) {
7798 // Cast the inputs to v2i64 to match PSLLDQ.
7799 SDValue &TargetV = ValidShiftLeft1 ? V1 : V2;
7800 SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TargetV);
7801 SDValue Shifted = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, V,
7802 DAG.getConstant(ByteShift * 8, MVT::i8));
7803 return DAG.getNode(ISD::BITCAST, DL, VT, Shifted);
7811 /// \brief Try to lower a vector shuffle as a bit shift (shifts in zeros).
7813 /// Attempts to match a shuffle mask against the PSRL(W/D/Q) and PSLL(W/D/Q)
7814 /// SSE2 and AVX2 logical bit-shift instructions. The function matches
7815 /// elements from one of the input vectors shuffled to the left or right
7816 /// with zeroable elements 'shifted in'.
7817 static SDValue lowerVectorShuffleAsBitShift(SDLoc DL, MVT VT, SDValue V1,
7818 SDValue V2, ArrayRef<int> Mask,
7819 SelectionDAG &DAG) {
7820 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7821 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7823 int Size = Mask.size();
7824 assert(Size == VT.getVectorNumElements() && "Unexpected mask size");
7826 // PSRL : (little-endian) right bit shift.
7829 // PSHL : (little-endian) left bit shift.
7831 // [ -1, 4, zz, -1 ]
7832 auto MatchBitShift = [&](int Shift, int Scale) -> SDValue {
7833 MVT ShiftSVT = MVT::getIntegerVT(VT.getScalarSizeInBits() * Scale);
7834 MVT ShiftVT = MVT::getVectorVT(ShiftSVT, Size / Scale);
7835 assert(TLI.isTypeLegal(ShiftVT) && "Illegal integer vector type");
7837 bool MatchLeft = true, MatchRight = true;
7838 for (int i = 0; i != Size; i += Scale) {
7839 for (int j = 0; j != Shift; j++) {
7840 MatchLeft &= Zeroable[i + j];
7842 for (int j = Scale - Shift; j != Scale; j++) {
7843 MatchRight &= Zeroable[i + j];
7846 if (!(MatchLeft || MatchRight))
7849 bool MatchV1 = true, MatchV2 = true;
7850 for (int i = 0; i != Size; i += Scale) {
7851 unsigned Pos = MatchLeft ? i + Shift : i;
7852 unsigned Low = MatchLeft ? i : i + Shift;
7853 unsigned Len = Scale - Shift;
7854 MatchV1 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low);
7855 MatchV2 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low + Size);
7857 if (!(MatchV1 || MatchV2))
7860 // Cast the inputs to ShiftVT to match VSRLI/VSHLI and back again.
7861 unsigned OpCode = MatchLeft ? X86ISD::VSHLI : X86ISD::VSRLI;
7862 int ShiftAmt = Shift * VT.getScalarSizeInBits();
7863 SDValue V = MatchV1 ? V1 : V2;
7864 V = DAG.getNode(ISD::BITCAST, DL, ShiftVT, V);
7865 V = DAG.getNode(OpCode, DL, ShiftVT, V, DAG.getConstant(ShiftAmt, MVT::i8));
7866 return DAG.getNode(ISD::BITCAST, DL, VT, V);
7869 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
7870 // keep doubling the size of the integer elements up to that. We can
7871 // then shift the elements of the integer vector by whole multiples of
7872 // their width within the elements of the larger integer vector. Test each
7873 // multiple to see if we can find a match with the moved element indices
7874 // and that the shifted in elements are all zeroable.
7875 for (int Scale = 2; Scale * VT.getScalarSizeInBits() <= 64; Scale *= 2)
7876 for (int Shift = 1; Shift != Scale; Shift++)
7877 if (SDValue BitShift = MatchBitShift(Shift, Scale))
7884 /// \brief Lower a vector shuffle as a zero or any extension.
7886 /// Given a specific number of elements, element bit width, and extension
7887 /// stride, produce either a zero or any extension based on the available
7888 /// features of the subtarget.
7889 static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
7890 SDLoc DL, MVT VT, int Scale, bool AnyExt, SDValue InputV,
7891 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7892 assert(Scale > 1 && "Need a scale to extend.");
7893 int NumElements = VT.getVectorNumElements();
7894 int EltBits = VT.getScalarSizeInBits();
7895 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
7896 "Only 8, 16, and 32 bit elements can be extended.");
7897 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
7899 // Found a valid zext mask! Try various lowering strategies based on the
7900 // input type and available ISA extensions.
7901 if (Subtarget->hasSSE41()) {
7902 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
7903 NumElements / Scale);
7904 return DAG.getNode(ISD::BITCAST, DL, VT,
7905 DAG.getNode(X86ISD::VZEXT, DL, ExtVT, InputV));
7908 // For any extends we can cheat for larger element sizes and use shuffle
7909 // instructions that can fold with a load and/or copy.
7910 if (AnyExt && EltBits == 32) {
7911 int PSHUFDMask[4] = {0, -1, 1, -1};
7913 ISD::BITCAST, DL, VT,
7914 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
7915 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
7916 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
7918 if (AnyExt && EltBits == 16 && Scale > 2) {
7919 int PSHUFDMask[4] = {0, -1, 0, -1};
7920 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
7921 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
7922 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG));
7923 int PSHUFHWMask[4] = {1, -1, -1, -1};
7925 ISD::BITCAST, DL, VT,
7926 DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16,
7927 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, InputV),
7928 getV4X86ShuffleImm8ForMask(PSHUFHWMask, DAG)));
7931 // If this would require more than 2 unpack instructions to expand, use
7932 // pshufb when available. We can only use more than 2 unpack instructions
7933 // when zero extending i8 elements which also makes it easier to use pshufb.
7934 if (Scale > 4 && EltBits == 8 && Subtarget->hasSSSE3()) {
7935 assert(NumElements == 16 && "Unexpected byte vector width!");
7936 SDValue PSHUFBMask[16];
7937 for (int i = 0; i < 16; ++i)
7939 DAG.getConstant((i % Scale == 0) ? i / Scale : 0x80, MVT::i8);
7940 InputV = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, InputV);
7941 return DAG.getNode(ISD::BITCAST, DL, VT,
7942 DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
7943 DAG.getNode(ISD::BUILD_VECTOR, DL,
7944 MVT::v16i8, PSHUFBMask)));
7947 // Otherwise emit a sequence of unpacks.
7949 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
7950 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
7951 : getZeroVector(InputVT, Subtarget, DAG, DL);
7952 InputV = DAG.getNode(ISD::BITCAST, DL, InputVT, InputV);
7953 InputV = DAG.getNode(X86ISD::UNPCKL, DL, InputVT, InputV, Ext);
7957 } while (Scale > 1);
7958 return DAG.getNode(ISD::BITCAST, DL, VT, InputV);
7961 /// \brief Try to lower a vector shuffle as a zero extension on any microarch.
7963 /// This routine will try to do everything in its power to cleverly lower
7964 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
7965 /// check for the profitability of this lowering, it tries to aggressively
7966 /// match this pattern. It will use all of the micro-architectural details it
7967 /// can to emit an efficient lowering. It handles both blends with all-zero
7968 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
7969 /// masking out later).
7971 /// The reason we have dedicated lowering for zext-style shuffles is that they
7972 /// are both incredibly common and often quite performance sensitive.
7973 static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
7974 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
7975 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7976 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7978 int Bits = VT.getSizeInBits();
7979 int NumElements = VT.getVectorNumElements();
7980 assert(VT.getScalarSizeInBits() <= 32 &&
7981 "Exceeds 32-bit integer zero extension limit");
7982 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
7984 // Define a helper function to check a particular ext-scale and lower to it if
7986 auto Lower = [&](int Scale) -> SDValue {
7989 for (int i = 0; i < NumElements; ++i) {
7991 continue; // Valid anywhere but doesn't tell us anything.
7992 if (i % Scale != 0) {
7993 // Each of the extended elements need to be zeroable.
7997 // We no longer are in the anyext case.
8002 // Each of the base elements needs to be consecutive indices into the
8003 // same input vector.
8004 SDValue V = Mask[i] < NumElements ? V1 : V2;
8007 else if (InputV != V)
8008 return SDValue(); // Flip-flopping inputs.
8010 if (Mask[i] % NumElements != i / Scale)
8011 return SDValue(); // Non-consecutive strided elements.
8014 // If we fail to find an input, we have a zero-shuffle which should always
8015 // have already been handled.
8016 // FIXME: Maybe handle this here in case during blending we end up with one?
8020 return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
8021 DL, VT, Scale, AnyExt, InputV, Subtarget, DAG);
8024 // The widest scale possible for extending is to a 64-bit integer.
8025 assert(Bits % 64 == 0 &&
8026 "The number of bits in a vector must be divisible by 64 on x86!");
8027 int NumExtElements = Bits / 64;
8029 // Each iteration, try extending the elements half as much, but into twice as
8031 for (; NumExtElements < NumElements; NumExtElements *= 2) {
8032 assert(NumElements % NumExtElements == 0 &&
8033 "The input vector size must be divisible by the extended size.");
8034 if (SDValue V = Lower(NumElements / NumExtElements))
8038 // General extends failed, but 128-bit vectors may be able to use MOVQ.
8042 // Returns one of the source operands if the shuffle can be reduced to a
8043 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
8044 auto CanZExtLowHalf = [&]() {
8045 for (int i = NumElements / 2; i != NumElements; i++)
8048 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
8050 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
8055 if (SDValue V = CanZExtLowHalf()) {
8056 V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V);
8057 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
8058 return DAG.getNode(ISD::BITCAST, DL, VT, V);
8061 // No viable ext lowering found.
8065 /// \brief Try to get a scalar value for a specific element of a vector.
8067 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
8068 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
8069 SelectionDAG &DAG) {
8070 MVT VT = V.getSimpleValueType();
8071 MVT EltVT = VT.getVectorElementType();
8072 while (V.getOpcode() == ISD::BITCAST)
8073 V = V.getOperand(0);
8074 // If the bitcasts shift the element size, we can't extract an equivalent
8076 MVT NewVT = V.getSimpleValueType();
8077 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
8080 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8081 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR))
8082 return DAG.getNode(ISD::BITCAST, SDLoc(V), EltVT, V.getOperand(Idx));
8087 /// \brief Helper to test for a load that can be folded with x86 shuffles.
8089 /// This is particularly important because the set of instructions varies
8090 /// significantly based on whether the operand is a load or not.
8091 static bool isShuffleFoldableLoad(SDValue V) {
8092 while (V.getOpcode() == ISD::BITCAST)
8093 V = V.getOperand(0);
8095 return ISD::isNON_EXTLoad(V.getNode());
8098 /// \brief Try to lower insertion of a single element into a zero vector.
8100 /// This is a common pattern that we have especially efficient patterns to lower
8101 /// across all subtarget feature sets.
8102 static SDValue lowerVectorShuffleAsElementInsertion(
8103 MVT VT, SDLoc DL, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8104 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8105 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8107 MVT EltVT = VT.getVectorElementType();
8109 int V2Index = std::find_if(Mask.begin(), Mask.end(),
8110 [&Mask](int M) { return M >= (int)Mask.size(); }) -
8112 bool IsV1Zeroable = true;
8113 for (int i = 0, Size = Mask.size(); i < Size; ++i)
8114 if (i != V2Index && !Zeroable[i]) {
8115 IsV1Zeroable = false;
8119 // Check for a single input from a SCALAR_TO_VECTOR node.
8120 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
8121 // all the smarts here sunk into that routine. However, the current
8122 // lowering of BUILD_VECTOR makes that nearly impossible until the old
8123 // vector shuffle lowering is dead.
8124 if (SDValue V2S = getScalarValueForVectorElement(
8125 V2, Mask[V2Index] - Mask.size(), DAG)) {
8126 // We need to zext the scalar if it is smaller than an i32.
8127 V2S = DAG.getNode(ISD::BITCAST, DL, EltVT, V2S);
8128 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
8129 // Using zext to expand a narrow element won't work for non-zero
8134 // Zero-extend directly to i32.
8136 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
8138 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
8139 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
8140 EltVT == MVT::i16) {
8141 // Either not inserting from the low element of the input or the input
8142 // element size is too small to use VZEXT_MOVL to clear the high bits.
8146 if (!IsV1Zeroable) {
8147 // If V1 can't be treated as a zero vector we have fewer options to lower
8148 // this. We can't support integer vectors or non-zero targets cheaply, and
8149 // the V1 elements can't be permuted in any way.
8150 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
8151 if (!VT.isFloatingPoint() || V2Index != 0)
8153 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
8154 V1Mask[V2Index] = -1;
8155 if (!isNoopShuffleMask(V1Mask))
8157 // This is essentially a special case blend operation, but if we have
8158 // general purpose blend operations, they are always faster. Bail and let
8159 // the rest of the lowering handle these as blends.
8160 if (Subtarget->hasSSE41())
8163 // Otherwise, use MOVSD or MOVSS.
8164 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
8165 "Only two types of floating point element types to handle!");
8166 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
8170 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
8172 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8175 // If we have 4 or fewer lanes we can cheaply shuffle the element into
8176 // the desired position. Otherwise it is more efficient to do a vector
8177 // shift left. We know that we can do a vector shift left because all
8178 // the inputs are zero.
8179 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
8180 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
8181 V2Shuffle[V2Index] = 0;
8182 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
8184 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V2);
8186 X86ISD::VSHLDQ, DL, MVT::v2i64, V2,
8188 V2Index * EltVT.getSizeInBits(),
8189 DAG.getTargetLoweringInfo().getScalarShiftAmountTy(MVT::v2i64)));
8190 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8196 /// \brief Try to lower broadcast of a single element.
8198 /// For convenience, this code also bundles all of the subtarget feature set
8199 /// filtering. While a little annoying to re-dispatch on type here, there isn't
8200 /// a convenient way to factor it out.
8201 static SDValue lowerVectorShuffleAsBroadcast(MVT VT, SDLoc DL, SDValue V,
8203 const X86Subtarget *Subtarget,
8204 SelectionDAG &DAG) {
8205 if (!Subtarget->hasAVX())
8207 if (VT.isInteger() && !Subtarget->hasAVX2())
8210 // Check that the mask is a broadcast.
8211 int BroadcastIdx = -1;
8213 if (M >= 0 && BroadcastIdx == -1)
8215 else if (M >= 0 && M != BroadcastIdx)
8218 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
8219 "a sorted mask where the broadcast "
8222 // Go up the chain of (vector) values to try and find a scalar load that
8223 // we can combine with the broadcast.
8225 switch (V.getOpcode()) {
8226 case ISD::CONCAT_VECTORS: {
8227 int OperandSize = Mask.size() / V.getNumOperands();
8228 V = V.getOperand(BroadcastIdx / OperandSize);
8229 BroadcastIdx %= OperandSize;
8233 case ISD::INSERT_SUBVECTOR: {
8234 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
8235 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
8239 int BeginIdx = (int)ConstantIdx->getZExtValue();
8241 BeginIdx + (int)VInner.getValueType().getVectorNumElements();
8242 if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) {
8243 BroadcastIdx -= BeginIdx;
8254 // Check if this is a broadcast of a scalar. We special case lowering
8255 // for scalars so that we can more effectively fold with loads.
8256 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8257 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
8258 V = V.getOperand(BroadcastIdx);
8260 // If the scalar isn't a load we can't broadcast from it in AVX1, only with
8262 if (!Subtarget->hasAVX2() && !isShuffleFoldableLoad(V))
8264 } else if (BroadcastIdx != 0 || !Subtarget->hasAVX2()) {
8265 // We can't broadcast from a vector register w/o AVX2, and we can only
8266 // broadcast from the zero-element of a vector register.
8270 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, V);
8273 // Check for whether we can use INSERTPS to perform the shuffle. We only use
8274 // INSERTPS when the V1 elements are already in the correct locations
8275 // because otherwise we can just always use two SHUFPS instructions which
8276 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
8277 // perform INSERTPS if a single V1 element is out of place and all V2
8278 // elements are zeroable.
8279 static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
8281 SelectionDAG &DAG) {
8282 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8283 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8284 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8285 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8287 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8290 int V1DstIndex = -1;
8291 int V2DstIndex = -1;
8292 bool V1UsedInPlace = false;
8294 for (int i = 0; i < 4; i++) {
8295 // Synthesize a zero mask from the zeroable elements (includes undefs).
8301 // Flag if we use any V1 inputs in place.
8303 V1UsedInPlace = true;
8307 // We can only insert a single non-zeroable element.
8308 if (V1DstIndex != -1 || V2DstIndex != -1)
8312 // V1 input out of place for insertion.
8315 // V2 input for insertion.
8320 // Don't bother if we have no (non-zeroable) element for insertion.
8321 if (V1DstIndex == -1 && V2DstIndex == -1)
8324 // Determine element insertion src/dst indices. The src index is from the
8325 // start of the inserted vector, not the start of the concatenated vector.
8326 unsigned V2SrcIndex = 0;
8327 if (V1DstIndex != -1) {
8328 // If we have a V1 input out of place, we use V1 as the V2 element insertion
8329 // and don't use the original V2 at all.
8330 V2SrcIndex = Mask[V1DstIndex];
8331 V2DstIndex = V1DstIndex;
8334 V2SrcIndex = Mask[V2DstIndex] - 4;
8337 // If no V1 inputs are used in place, then the result is created only from
8338 // the zero mask and the V2 insertion - so remove V1 dependency.
8340 V1 = DAG.getUNDEF(MVT::v4f32);
8342 unsigned InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
8343 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
8345 // Insert the V2 element into the desired position.
8347 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
8348 DAG.getConstant(InsertPSMask, MVT::i8));
8351 /// \brief Handle lowering of 2-lane 64-bit floating point shuffles.
8353 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
8354 /// support for floating point shuffles but not integer shuffles. These
8355 /// instructions will incur a domain crossing penalty on some chips though so
8356 /// it is better to avoid lowering through this for integer vectors where
8358 static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8359 const X86Subtarget *Subtarget,
8360 SelectionDAG &DAG) {
8362 assert(Op.getSimpleValueType() == MVT::v2f64 && "Bad shuffle type!");
8363 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8364 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8365 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8366 ArrayRef<int> Mask = SVOp->getMask();
8367 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8369 if (isSingleInputShuffleMask(Mask)) {
8370 // Use low duplicate instructions for masks that match their pattern.
8371 if (Subtarget->hasSSE3())
8372 if (isShuffleEquivalent(Mask, 0, 0))
8373 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, V1);
8375 // Straight shuffle of a single input vector. Simulate this by using the
8376 // single input as both of the "inputs" to this instruction..
8377 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
8379 if (Subtarget->hasAVX()) {
8380 // If we have AVX, we can use VPERMILPS which will allow folding a load
8381 // into the shuffle.
8382 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
8383 DAG.getConstant(SHUFPDMask, MVT::i8));
8386 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1,
8387 DAG.getConstant(SHUFPDMask, MVT::i8));
8389 assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
8390 assert(Mask[1] >= 2 && "Non-canonicalized blend!");
8392 // Use dedicated unpack instructions for masks that match their pattern.
8393 if (isShuffleEquivalent(Mask, 0, 2))
8394 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2f64, V1, V2);
8395 if (isShuffleEquivalent(Mask, 1, 3))
8396 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2f64, V1, V2);
8398 // If we have a single input, insert that into V1 if we can do so cheaply.
8399 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8400 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8401 MVT::v2f64, DL, V1, V2, Mask, Subtarget, DAG))
8403 // Try inverting the insertion since for v2 masks it is easy to do and we
8404 // can't reliably sort the mask one way or the other.
8405 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8406 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8407 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8408 MVT::v2f64, DL, V2, V1, InverseMask, Subtarget, DAG))
8412 // Try to use one of the special instruction patterns to handle two common
8413 // blend patterns if a zero-blend above didn't work.
8414 if (isShuffleEquivalent(Mask, 0, 3) || isShuffleEquivalent(Mask, 1, 3))
8415 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
8416 // We can either use a special instruction to load over the low double or
8417 // to move just the low double.
8419 isShuffleFoldableLoad(V1S) ? X86ISD::MOVLPD : X86ISD::MOVSD,
8421 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
8423 if (Subtarget->hasSSE41())
8424 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
8428 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
8429 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V2,
8430 DAG.getConstant(SHUFPDMask, MVT::i8));
8433 /// \brief Handle lowering of 2-lane 64-bit integer shuffles.
8435 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
8436 /// the integer unit to minimize domain crossing penalties. However, for blends
8437 /// it falls back to the floating point shuffle operation with appropriate bit
8439 static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8440 const X86Subtarget *Subtarget,
8441 SelectionDAG &DAG) {
8443 assert(Op.getSimpleValueType() == MVT::v2i64 && "Bad shuffle type!");
8444 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8445 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8446 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8447 ArrayRef<int> Mask = SVOp->getMask();
8448 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8450 if (isSingleInputShuffleMask(Mask)) {
8451 // Check for being able to broadcast a single element.
8452 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v2i64, DL, V1,
8453 Mask, Subtarget, DAG))
8456 // Straight shuffle of a single input vector. For everything from SSE2
8457 // onward this has a single fast instruction with no scary immediates.
8458 // We have to map the mask as it is actually a v4i32 shuffle instruction.
8459 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V1);
8460 int WidenedMask[4] = {
8461 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
8462 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
8464 ISD::BITCAST, DL, MVT::v2i64,
8465 DAG.getNode(X86ISD::PSHUFD, SDLoc(Op), MVT::v4i32, V1,
8466 getV4X86ShuffleImm8ForMask(WidenedMask, DAG)));
8469 // Try to use byte shift instructions.
8470 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8471 DL, MVT::v2i64, V1, V2, Mask, DAG))
8474 // If we have a single input from V2 insert that into V1 if we can do so
8476 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8477 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8478 MVT::v2i64, DL, V1, V2, Mask, Subtarget, DAG))
8480 // Try inverting the insertion since for v2 masks it is easy to do and we
8481 // can't reliably sort the mask one way or the other.
8482 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8483 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8484 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8485 MVT::v2i64, DL, V2, V1, InverseMask, Subtarget, DAG))
8489 // Use dedicated unpack instructions for masks that match their pattern.
8490 if (isShuffleEquivalent(Mask, 0, 2))
8491 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, V1, V2);
8492 if (isShuffleEquivalent(Mask, 1, 3))
8493 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2i64, V1, V2);
8495 if (Subtarget->hasSSE41())
8496 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
8500 // Try to use byte rotation instructions.
8501 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8502 if (Subtarget->hasSSSE3())
8503 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8504 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
8507 // We implement this with SHUFPD which is pretty lame because it will likely
8508 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
8509 // However, all the alternatives are still more cycles and newer chips don't
8510 // have this problem. It would be really nice if x86 had better shuffles here.
8511 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V1);
8512 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V2);
8513 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
8514 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
8517 /// \brief Lower a vector shuffle using the SHUFPS instruction.
8519 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
8520 /// It makes no assumptions about whether this is the *best* lowering, it simply
8522 static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
8523 ArrayRef<int> Mask, SDValue V1,
8524 SDValue V2, SelectionDAG &DAG) {
8525 SDValue LowV = V1, HighV = V2;
8526 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
8529 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8531 if (NumV2Elements == 1) {
8533 std::find_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }) -
8536 // Compute the index adjacent to V2Index and in the same half by toggling
8538 int V2AdjIndex = V2Index ^ 1;
8540 if (Mask[V2AdjIndex] == -1) {
8541 // Handles all the cases where we have a single V2 element and an undef.
8542 // This will only ever happen in the high lanes because we commute the
8543 // vector otherwise.
8545 std::swap(LowV, HighV);
8546 NewMask[V2Index] -= 4;
8548 // Handle the case where the V2 element ends up adjacent to a V1 element.
8549 // To make this work, blend them together as the first step.
8550 int V1Index = V2AdjIndex;
8551 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
8552 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
8553 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8555 // Now proceed to reconstruct the final blend as we have the necessary
8556 // high or low half formed.
8563 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
8564 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
8566 } else if (NumV2Elements == 2) {
8567 if (Mask[0] < 4 && Mask[1] < 4) {
8568 // Handle the easy case where we have V1 in the low lanes and V2 in the
8572 } else if (Mask[2] < 4 && Mask[3] < 4) {
8573 // We also handle the reversed case because this utility may get called
8574 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
8575 // arrange things in the right direction.
8581 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
8582 // trying to place elements directly, just blend them and set up the final
8583 // shuffle to place them.
8585 // The first two blend mask elements are for V1, the second two are for
8587 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
8588 Mask[2] < 4 ? Mask[2] : Mask[3],
8589 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
8590 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
8591 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
8592 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8594 // Now we do a normal shuffle of V1 by giving V1 as both operands to
8597 NewMask[0] = Mask[0] < 4 ? 0 : 2;
8598 NewMask[1] = Mask[0] < 4 ? 2 : 0;
8599 NewMask[2] = Mask[2] < 4 ? 1 : 3;
8600 NewMask[3] = Mask[2] < 4 ? 3 : 1;
8603 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
8604 getV4X86ShuffleImm8ForMask(NewMask, DAG));
8607 /// \brief Lower 4-lane 32-bit floating point shuffles.
8609 /// Uses instructions exclusively from the floating point unit to minimize
8610 /// domain crossing penalties, as these are sufficient to implement all v4f32
8612 static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8613 const X86Subtarget *Subtarget,
8614 SelectionDAG &DAG) {
8616 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8617 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8618 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8619 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8620 ArrayRef<int> Mask = SVOp->getMask();
8621 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8624 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8626 if (NumV2Elements == 0) {
8627 // Check for being able to broadcast a single element.
8628 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f32, DL, V1,
8629 Mask, Subtarget, DAG))
8632 // Use even/odd duplicate instructions for masks that match their pattern.
8633 if (Subtarget->hasSSE3()) {
8634 if (isShuffleEquivalent(Mask, 0, 0, 2, 2))
8635 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
8636 if (isShuffleEquivalent(Mask, 1, 1, 3, 3))
8637 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
8640 if (Subtarget->hasAVX()) {
8641 // If we have AVX, we can use VPERMILPS which will allow folding a load
8642 // into the shuffle.
8643 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
8644 getV4X86ShuffleImm8ForMask(Mask, DAG));
8647 // Otherwise, use a straight shuffle of a single input vector. We pass the
8648 // input vector to both operands to simulate this with a SHUFPS.
8649 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
8650 getV4X86ShuffleImm8ForMask(Mask, DAG));
8653 // Use dedicated unpack instructions for masks that match their pattern.
8654 if (isShuffleEquivalent(Mask, 0, 4, 1, 5))
8655 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V1, V2);
8656 if (isShuffleEquivalent(Mask, 2, 6, 3, 7))
8657 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V1, V2);
8659 // There are special ways we can lower some single-element blends. However, we
8660 // have custom ways we can lower more complex single-element blends below that
8661 // we defer to if both this and BLENDPS fail to match, so restrict this to
8662 // when the V2 input is targeting element 0 of the mask -- that is the fast
8664 if (NumV2Elements == 1 && Mask[0] >= 4)
8665 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4f32, DL, V1, V2,
8666 Mask, Subtarget, DAG))
8669 if (Subtarget->hasSSE41()) {
8670 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
8674 // Use INSERTPS if we can complete the shuffle efficiently.
8675 if (SDValue V = lowerVectorShuffleAsInsertPS(Op, V1, V2, Mask, DAG))
8679 // Otherwise fall back to a SHUFPS lowering strategy.
8680 return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
8683 /// \brief Lower 4-lane i32 vector shuffles.
8685 /// We try to handle these with integer-domain shuffles where we can, but for
8686 /// blends we use the floating point domain blend instructions.
8687 static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8688 const X86Subtarget *Subtarget,
8689 SelectionDAG &DAG) {
8691 assert(Op.getSimpleValueType() == MVT::v4i32 && "Bad shuffle type!");
8692 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8693 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8694 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8695 ArrayRef<int> Mask = SVOp->getMask();
8696 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8698 // Whenever we can lower this as a zext, that instruction is strictly faster
8699 // than any alternative. It also allows us to fold memory operands into the
8700 // shuffle in many cases.
8701 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2,
8702 Mask, Subtarget, DAG))
8706 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8708 if (NumV2Elements == 0) {
8709 // Check for being able to broadcast a single element.
8710 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i32, DL, V1,
8711 Mask, Subtarget, DAG))
8714 // Straight shuffle of a single input vector. For everything from SSE2
8715 // onward this has a single fast instruction with no scary immediates.
8716 // We coerce the shuffle pattern to be compatible with UNPCK instructions
8717 // but we aren't actually going to use the UNPCK instruction because doing
8718 // so prevents folding a load into this instruction or making a copy.
8719 const int UnpackLoMask[] = {0, 0, 1, 1};
8720 const int UnpackHiMask[] = {2, 2, 3, 3};
8721 if (isShuffleEquivalent(Mask, 0, 0, 1, 1))
8722 Mask = UnpackLoMask;
8723 else if (isShuffleEquivalent(Mask, 2, 2, 3, 3))
8724 Mask = UnpackHiMask;
8726 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
8727 getV4X86ShuffleImm8ForMask(Mask, DAG));
8730 // Try to use bit shift instructions.
8731 if (SDValue Shift = lowerVectorShuffleAsBitShift(
8732 DL, MVT::v4i32, V1, V2, Mask, DAG))
8735 // Try to use byte shift instructions.
8736 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8737 DL, MVT::v4i32, V1, V2, Mask, DAG))
8740 // There are special ways we can lower some single-element blends.
8741 if (NumV2Elements == 1)
8742 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4i32, DL, V1, V2,
8743 Mask, Subtarget, DAG))
8746 // Use dedicated unpack instructions for masks that match their pattern.
8747 if (isShuffleEquivalent(Mask, 0, 4, 1, 5))
8748 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V1, V2);
8749 if (isShuffleEquivalent(Mask, 2, 6, 3, 7))
8750 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V1, V2);
8752 if (Subtarget->hasSSE41())
8753 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
8757 // Try to use byte rotation instructions.
8758 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8759 if (Subtarget->hasSSSE3())
8760 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8761 DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
8764 // We implement this with SHUFPS because it can blend from two vectors.
8765 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
8766 // up the inputs, bypassing domain shift penalties that we would encur if we
8767 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
8769 return DAG.getNode(ISD::BITCAST, DL, MVT::v4i32,
8770 DAG.getVectorShuffle(
8772 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V1),
8773 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V2), Mask));
8776 /// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
8777 /// shuffle lowering, and the most complex part.
8779 /// The lowering strategy is to try to form pairs of input lanes which are
8780 /// targeted at the same half of the final vector, and then use a dword shuffle
8781 /// to place them onto the right half, and finally unpack the paired lanes into
8782 /// their final position.
8784 /// The exact breakdown of how to form these dword pairs and align them on the
8785 /// correct sides is really tricky. See the comments within the function for
8786 /// more of the details.
8787 static SDValue lowerV8I16SingleInputVectorShuffle(
8788 SDLoc DL, SDValue V, MutableArrayRef<int> Mask,
8789 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8790 assert(V.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
8791 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
8792 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
8794 SmallVector<int, 4> LoInputs;
8795 std::copy_if(LoMask.begin(), LoMask.end(), std::back_inserter(LoInputs),
8796 [](int M) { return M >= 0; });
8797 std::sort(LoInputs.begin(), LoInputs.end());
8798 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
8799 SmallVector<int, 4> HiInputs;
8800 std::copy_if(HiMask.begin(), HiMask.end(), std::back_inserter(HiInputs),
8801 [](int M) { return M >= 0; });
8802 std::sort(HiInputs.begin(), HiInputs.end());
8803 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
8805 std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
8806 int NumHToL = LoInputs.size() - NumLToL;
8808 std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
8809 int NumHToH = HiInputs.size() - NumLToH;
8810 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
8811 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
8812 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
8813 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
8815 // Check for being able to broadcast a single element.
8816 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i16, DL, V,
8817 Mask, Subtarget, DAG))
8820 // Try to use bit shift instructions.
8821 if (SDValue Shift = lowerVectorShuffleAsBitShift(
8822 DL, MVT::v8i16, V, V, Mask, DAG))
8825 // Try to use byte shift instructions.
8826 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8827 DL, MVT::v8i16, V, V, Mask, DAG))
8830 // Use dedicated unpack instructions for masks that match their pattern.
8831 if (isShuffleEquivalent(Mask, 0, 0, 1, 1, 2, 2, 3, 3))
8832 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V, V);
8833 if (isShuffleEquivalent(Mask, 4, 4, 5, 5, 6, 6, 7, 7))
8834 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V, V);
8836 // Try to use byte rotation instructions.
8837 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8838 DL, MVT::v8i16, V, V, Mask, Subtarget, DAG))
8841 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
8842 // such inputs we can swap two of the dwords across the half mark and end up
8843 // with <=2 inputs to each half in each half. Once there, we can fall through
8844 // to the generic code below. For example:
8846 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
8847 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
8849 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
8850 // and an existing 2-into-2 on the other half. In this case we may have to
8851 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
8852 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
8853 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
8854 // because any other situation (including a 3-into-1 or 1-into-3 in the other
8855 // half than the one we target for fixing) will be fixed when we re-enter this
8856 // path. We will also combine away any sequence of PSHUFD instructions that
8857 // result into a single instruction. Here is an example of the tricky case:
8859 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
8860 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
8862 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
8864 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
8865 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
8867 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
8868 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
8870 // The result is fine to be handled by the generic logic.
8871 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
8872 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
8873 int AOffset, int BOffset) {
8874 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
8875 "Must call this with A having 3 or 1 inputs from the A half.");
8876 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
8877 "Must call this with B having 1 or 3 inputs from the B half.");
8878 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
8879 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
8881 // Compute the index of dword with only one word among the three inputs in
8882 // a half by taking the sum of the half with three inputs and subtracting
8883 // the sum of the actual three inputs. The difference is the remaining
8886 int &TripleDWord = AToAInputs.size() == 3 ? ADWord : BDWord;
8887 int &OneInputDWord = AToAInputs.size() == 3 ? BDWord : ADWord;
8888 int TripleInputOffset = AToAInputs.size() == 3 ? AOffset : BOffset;
8889 ArrayRef<int> TripleInputs = AToAInputs.size() == 3 ? AToAInputs : BToAInputs;
8890 int OneInput = AToAInputs.size() == 3 ? BToAInputs[0] : AToAInputs[0];
8891 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
8892 int TripleNonInputIdx =
8893 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
8894 TripleDWord = TripleNonInputIdx / 2;
8896 // We use xor with one to compute the adjacent DWord to whichever one the
8898 OneInputDWord = (OneInput / 2) ^ 1;
8900 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
8901 // and BToA inputs. If there is also such a problem with the BToB and AToB
8902 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
8903 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
8904 // is essential that we don't *create* a 3<-1 as then we might oscillate.
8905 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
8906 // Compute how many inputs will be flipped by swapping these DWords. We
8908 // to balance this to ensure we don't form a 3-1 shuffle in the other
8910 int NumFlippedAToBInputs =
8911 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
8912 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
8913 int NumFlippedBToBInputs =
8914 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
8915 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
8916 if ((NumFlippedAToBInputs == 1 &&
8917 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
8918 (NumFlippedBToBInputs == 1 &&
8919 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
8920 // We choose whether to fix the A half or B half based on whether that
8921 // half has zero flipped inputs. At zero, we may not be able to fix it
8922 // with that half. We also bias towards fixing the B half because that
8923 // will more commonly be the high half, and we have to bias one way.
8924 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
8925 ArrayRef<int> Inputs) {
8926 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
8927 bool IsFixIdxInput = std::find(Inputs.begin(), Inputs.end(),
8928 PinnedIdx ^ 1) != Inputs.end();
8929 // Determine whether the free index is in the flipped dword or the
8930 // unflipped dword based on where the pinned index is. We use this bit
8931 // in an xor to conditionally select the adjacent dword.
8932 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
8933 bool IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
8934 FixFreeIdx) != Inputs.end();
8935 if (IsFixIdxInput == IsFixFreeIdxInput)
8937 IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
8938 FixFreeIdx) != Inputs.end();
8939 assert(IsFixIdxInput != IsFixFreeIdxInput &&
8940 "We need to be changing the number of flipped inputs!");
8941 int PSHUFHalfMask[] = {0, 1, 2, 3};
8942 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
8943 V = DAG.getNode(FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
8945 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DAG));
8948 if (M != -1 && M == FixIdx)
8950 else if (M != -1 && M == FixFreeIdx)
8953 if (NumFlippedBToBInputs != 0) {
8955 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
8956 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
8958 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
8960 AToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
8961 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
8966 int PSHUFDMask[] = {0, 1, 2, 3};
8967 PSHUFDMask[ADWord] = BDWord;
8968 PSHUFDMask[BDWord] = ADWord;
8969 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
8970 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8971 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
8972 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
8974 // Adjust the mask to match the new locations of A and B.
8976 if (M != -1 && M/2 == ADWord)
8977 M = 2 * BDWord + M % 2;
8978 else if (M != -1 && M/2 == BDWord)
8979 M = 2 * ADWord + M % 2;
8981 // Recurse back into this routine to re-compute state now that this isn't
8982 // a 3 and 1 problem.
8983 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
8986 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
8987 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
8988 else if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
8989 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
8991 // At this point there are at most two inputs to the low and high halves from
8992 // each half. That means the inputs can always be grouped into dwords and
8993 // those dwords can then be moved to the correct half with a dword shuffle.
8994 // We use at most one low and one high word shuffle to collect these paired
8995 // inputs into dwords, and finally a dword shuffle to place them.
8996 int PSHUFLMask[4] = {-1, -1, -1, -1};
8997 int PSHUFHMask[4] = {-1, -1, -1, -1};
8998 int PSHUFDMask[4] = {-1, -1, -1, -1};
9000 // First fix the masks for all the inputs that are staying in their
9001 // original halves. This will then dictate the targets of the cross-half
9003 auto fixInPlaceInputs =
9004 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
9005 MutableArrayRef<int> SourceHalfMask,
9006 MutableArrayRef<int> HalfMask, int HalfOffset) {
9007 if (InPlaceInputs.empty())
9009 if (InPlaceInputs.size() == 1) {
9010 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9011 InPlaceInputs[0] - HalfOffset;
9012 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
9015 if (IncomingInputs.empty()) {
9016 // Just fix all of the in place inputs.
9017 for (int Input : InPlaceInputs) {
9018 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
9019 PSHUFDMask[Input / 2] = Input / 2;
9024 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
9025 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9026 InPlaceInputs[0] - HalfOffset;
9027 // Put the second input next to the first so that they are packed into
9028 // a dword. We find the adjacent index by toggling the low bit.
9029 int AdjIndex = InPlaceInputs[0] ^ 1;
9030 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
9031 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
9032 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
9034 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
9035 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
9037 // Now gather the cross-half inputs and place them into a free dword of
9038 // their target half.
9039 // FIXME: This operation could almost certainly be simplified dramatically to
9040 // look more like the 3-1 fixing operation.
9041 auto moveInputsToRightHalf = [&PSHUFDMask](
9042 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
9043 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
9044 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
9046 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
9047 return SourceHalfMask[Word] != -1 && SourceHalfMask[Word] != Word;
9049 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
9051 int LowWord = Word & ~1;
9052 int HighWord = Word | 1;
9053 return isWordClobbered(SourceHalfMask, LowWord) ||
9054 isWordClobbered(SourceHalfMask, HighWord);
9057 if (IncomingInputs.empty())
9060 if (ExistingInputs.empty()) {
9061 // Map any dwords with inputs from them into the right half.
9062 for (int Input : IncomingInputs) {
9063 // If the source half mask maps over the inputs, turn those into
9064 // swaps and use the swapped lane.
9065 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
9066 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == -1) {
9067 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
9068 Input - SourceOffset;
9069 // We have to swap the uses in our half mask in one sweep.
9070 for (int &M : HalfMask)
9071 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
9073 else if (M == Input)
9074 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9076 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
9077 Input - SourceOffset &&
9078 "Previous placement doesn't match!");
9080 // Note that this correctly re-maps both when we do a swap and when
9081 // we observe the other side of the swap above. We rely on that to
9082 // avoid swapping the members of the input list directly.
9083 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9086 // Map the input's dword into the correct half.
9087 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == -1)
9088 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
9090 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
9092 "Previous placement doesn't match!");
9095 // And just directly shift any other-half mask elements to be same-half
9096 // as we will have mirrored the dword containing the element into the
9097 // same position within that half.
9098 for (int &M : HalfMask)
9099 if (M >= SourceOffset && M < SourceOffset + 4) {
9100 M = M - SourceOffset + DestOffset;
9101 assert(M >= 0 && "This should never wrap below zero!");
9106 // Ensure we have the input in a viable dword of its current half. This
9107 // is particularly tricky because the original position may be clobbered
9108 // by inputs being moved and *staying* in that half.
9109 if (IncomingInputs.size() == 1) {
9110 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9111 int InputFixed = std::find(std::begin(SourceHalfMask),
9112 std::end(SourceHalfMask), -1) -
9113 std::begin(SourceHalfMask) + SourceOffset;
9114 SourceHalfMask[InputFixed - SourceOffset] =
9115 IncomingInputs[0] - SourceOffset;
9116 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
9118 IncomingInputs[0] = InputFixed;
9120 } else if (IncomingInputs.size() == 2) {
9121 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
9122 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9123 // We have two non-adjacent or clobbered inputs we need to extract from
9124 // the source half. To do this, we need to map them into some adjacent
9125 // dword slot in the source mask.
9126 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
9127 IncomingInputs[1] - SourceOffset};
9129 // If there is a free slot in the source half mask adjacent to one of
9130 // the inputs, place the other input in it. We use (Index XOR 1) to
9131 // compute an adjacent index.
9132 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
9133 SourceHalfMask[InputsFixed[0] ^ 1] == -1) {
9134 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
9135 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9136 InputsFixed[1] = InputsFixed[0] ^ 1;
9137 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
9138 SourceHalfMask[InputsFixed[1] ^ 1] == -1) {
9139 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
9140 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
9141 InputsFixed[0] = InputsFixed[1] ^ 1;
9142 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] == -1 &&
9143 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] == -1) {
9144 // The two inputs are in the same DWord but it is clobbered and the
9145 // adjacent DWord isn't used at all. Move both inputs to the free
9147 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
9148 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
9149 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
9150 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
9152 // The only way we hit this point is if there is no clobbering
9153 // (because there are no off-half inputs to this half) and there is no
9154 // free slot adjacent to one of the inputs. In this case, we have to
9155 // swap an input with a non-input.
9156 for (int i = 0; i < 4; ++i)
9157 assert((SourceHalfMask[i] == -1 || SourceHalfMask[i] == i) &&
9158 "We can't handle any clobbers here!");
9159 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
9160 "Cannot have adjacent inputs here!");
9162 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9163 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
9165 // We also have to update the final source mask in this case because
9166 // it may need to undo the above swap.
9167 for (int &M : FinalSourceHalfMask)
9168 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
9169 M = InputsFixed[1] + SourceOffset;
9170 else if (M == InputsFixed[1] + SourceOffset)
9171 M = (InputsFixed[0] ^ 1) + SourceOffset;
9173 InputsFixed[1] = InputsFixed[0] ^ 1;
9176 // Point everything at the fixed inputs.
9177 for (int &M : HalfMask)
9178 if (M == IncomingInputs[0])
9179 M = InputsFixed[0] + SourceOffset;
9180 else if (M == IncomingInputs[1])
9181 M = InputsFixed[1] + SourceOffset;
9183 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
9184 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
9187 llvm_unreachable("Unhandled input size!");
9190 // Now hoist the DWord down to the right half.
9191 int FreeDWord = (PSHUFDMask[DestOffset / 2] == -1 ? 0 : 1) + DestOffset / 2;
9192 assert(PSHUFDMask[FreeDWord] == -1 && "DWord not free");
9193 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
9194 for (int &M : HalfMask)
9195 for (int Input : IncomingInputs)
9197 M = FreeDWord * 2 + Input % 2;
9199 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
9200 /*SourceOffset*/ 4, /*DestOffset*/ 0);
9201 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
9202 /*SourceOffset*/ 0, /*DestOffset*/ 4);
9204 // Now enact all the shuffles we've computed to move the inputs into their
9206 if (!isNoopShuffleMask(PSHUFLMask))
9207 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9208 getV4X86ShuffleImm8ForMask(PSHUFLMask, DAG));
9209 if (!isNoopShuffleMask(PSHUFHMask))
9210 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9211 getV4X86ShuffleImm8ForMask(PSHUFHMask, DAG));
9212 if (!isNoopShuffleMask(PSHUFDMask))
9213 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9214 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9215 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9216 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9218 // At this point, each half should contain all its inputs, and we can then
9219 // just shuffle them into their final position.
9220 assert(std::count_if(LoMask.begin(), LoMask.end(),
9221 [](int M) { return M >= 4; }) == 0 &&
9222 "Failed to lift all the high half inputs to the low mask!");
9223 assert(std::count_if(HiMask.begin(), HiMask.end(),
9224 [](int M) { return M >= 0 && M < 4; }) == 0 &&
9225 "Failed to lift all the low half inputs to the high mask!");
9227 // Do a half shuffle for the low mask.
9228 if (!isNoopShuffleMask(LoMask))
9229 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9230 getV4X86ShuffleImm8ForMask(LoMask, DAG));
9232 // Do a half shuffle with the high mask after shifting its values down.
9233 for (int &M : HiMask)
9236 if (!isNoopShuffleMask(HiMask))
9237 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9238 getV4X86ShuffleImm8ForMask(HiMask, DAG));
9243 /// \brief Detect whether the mask pattern should be lowered through
9246 /// This essentially tests whether viewing the mask as an interleaving of two
9247 /// sub-sequences reduces the cross-input traffic of a blend operation. If so,
9248 /// lowering it through interleaving is a significantly better strategy.
9249 static bool shouldLowerAsInterleaving(ArrayRef<int> Mask) {
9250 int NumEvenInputs[2] = {0, 0};
9251 int NumOddInputs[2] = {0, 0};
9252 int NumLoInputs[2] = {0, 0};
9253 int NumHiInputs[2] = {0, 0};
9254 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
9258 int InputIdx = Mask[i] >= Size;
9261 ++NumLoInputs[InputIdx];
9263 ++NumHiInputs[InputIdx];
9266 ++NumEvenInputs[InputIdx];
9268 ++NumOddInputs[InputIdx];
9271 // The minimum number of cross-input results for both the interleaved and
9272 // split cases. If interleaving results in fewer cross-input results, return
9274 int InterleavedCrosses = std::min(NumEvenInputs[1] + NumOddInputs[0],
9275 NumEvenInputs[0] + NumOddInputs[1]);
9276 int SplitCrosses = std::min(NumLoInputs[1] + NumHiInputs[0],
9277 NumLoInputs[0] + NumHiInputs[1]);
9278 return InterleavedCrosses < SplitCrosses;
9281 /// \brief Blend two v8i16 vectors using a naive unpack strategy.
9283 /// This strategy only works when the inputs from each vector fit into a single
9284 /// half of that vector, and generally there are not so many inputs as to leave
9285 /// the in-place shuffles required highly constrained (and thus expensive). It
9286 /// shifts all the inputs into a single side of both input vectors and then
9287 /// uses an unpack to interleave these inputs in a single vector. At that
9288 /// point, we will fall back on the generic single input shuffle lowering.
9289 static SDValue lowerV8I16BasicBlendVectorShuffle(SDLoc DL, SDValue V1,
9291 MutableArrayRef<int> Mask,
9292 const X86Subtarget *Subtarget,
9293 SelectionDAG &DAG) {
9294 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9295 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9296 SmallVector<int, 3> LoV1Inputs, HiV1Inputs, LoV2Inputs, HiV2Inputs;
9297 for (int i = 0; i < 8; ++i)
9298 if (Mask[i] >= 0 && Mask[i] < 4)
9299 LoV1Inputs.push_back(i);
9300 else if (Mask[i] >= 4 && Mask[i] < 8)
9301 HiV1Inputs.push_back(i);
9302 else if (Mask[i] >= 8 && Mask[i] < 12)
9303 LoV2Inputs.push_back(i);
9304 else if (Mask[i] >= 12)
9305 HiV2Inputs.push_back(i);
9307 int NumV1Inputs = LoV1Inputs.size() + HiV1Inputs.size();
9308 int NumV2Inputs = LoV2Inputs.size() + HiV2Inputs.size();
9311 assert(NumV1Inputs > 0 && NumV1Inputs <= 3 && "At most 3 inputs supported");
9312 assert(NumV2Inputs > 0 && NumV2Inputs <= 3 && "At most 3 inputs supported");
9313 assert(NumV1Inputs + NumV2Inputs <= 4 && "At most 4 combined inputs");
9315 bool MergeFromLo = LoV1Inputs.size() + LoV2Inputs.size() >=
9316 HiV1Inputs.size() + HiV2Inputs.size();
9318 auto moveInputsToHalf = [&](SDValue V, ArrayRef<int> LoInputs,
9319 ArrayRef<int> HiInputs, bool MoveToLo,
9321 ArrayRef<int> GoodInputs = MoveToLo ? LoInputs : HiInputs;
9322 ArrayRef<int> BadInputs = MoveToLo ? HiInputs : LoInputs;
9323 if (BadInputs.empty())
9326 int MoveMask[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9327 int MoveOffset = MoveToLo ? 0 : 4;
9329 if (GoodInputs.empty()) {
9330 for (int BadInput : BadInputs) {
9331 MoveMask[Mask[BadInput] % 4 + MoveOffset] = Mask[BadInput] - MaskOffset;
9332 Mask[BadInput] = Mask[BadInput] % 4 + MoveOffset + MaskOffset;
9335 if (GoodInputs.size() == 2) {
9336 // If the low inputs are spread across two dwords, pack them into
9338 MoveMask[MoveOffset] = Mask[GoodInputs[0]] - MaskOffset;
9339 MoveMask[MoveOffset + 1] = Mask[GoodInputs[1]] - MaskOffset;
9340 Mask[GoodInputs[0]] = MoveOffset + MaskOffset;
9341 Mask[GoodInputs[1]] = MoveOffset + 1 + MaskOffset;
9343 // Otherwise pin the good inputs.
9344 for (int GoodInput : GoodInputs)
9345 MoveMask[Mask[GoodInput] - MaskOffset] = Mask[GoodInput] - MaskOffset;
9348 if (BadInputs.size() == 2) {
9349 // If we have two bad inputs then there may be either one or two good
9350 // inputs fixed in place. Find a fixed input, and then find the *other*
9351 // two adjacent indices by using modular arithmetic.
9353 std::find_if(std::begin(MoveMask) + MoveOffset, std::end(MoveMask),
9354 [](int M) { return M >= 0; }) -
9355 std::begin(MoveMask);
9357 ((((GoodMaskIdx - MoveOffset) & ~1) + 2) % 4) + MoveOffset;
9358 assert(MoveMask[MoveMaskIdx] == -1 && "Expected empty slot");
9359 assert(MoveMask[MoveMaskIdx + 1] == -1 && "Expected empty slot");
9360 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9361 MoveMask[MoveMaskIdx + 1] = Mask[BadInputs[1]] - MaskOffset;
9362 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9363 Mask[BadInputs[1]] = MoveMaskIdx + 1 + MaskOffset;
9365 assert(BadInputs.size() == 1 && "All sizes handled");
9366 int MoveMaskIdx = std::find(std::begin(MoveMask) + MoveOffset,
9367 std::end(MoveMask), -1) -
9368 std::begin(MoveMask);
9369 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9370 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9374 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9377 V1 = moveInputsToHalf(V1, LoV1Inputs, HiV1Inputs, MergeFromLo,
9379 V2 = moveInputsToHalf(V2, LoV2Inputs, HiV2Inputs, MergeFromLo,
9382 // FIXME: Select an interleaving of the merge of V1 and V2 that minimizes
9383 // cross-half traffic in the final shuffle.
9385 // Munge the mask to be a single-input mask after the unpack merges the
9389 M = 2 * (M % 4) + (M / 8);
9391 return DAG.getVectorShuffle(
9392 MVT::v8i16, DL, DAG.getNode(MergeFromLo ? X86ISD::UNPCKL : X86ISD::UNPCKH,
9393 DL, MVT::v8i16, V1, V2),
9394 DAG.getUNDEF(MVT::v8i16), Mask);
9397 /// \brief Generic lowering of 8-lane i16 shuffles.
9399 /// This handles both single-input shuffles and combined shuffle/blends with
9400 /// two inputs. The single input shuffles are immediately delegated to
9401 /// a dedicated lowering routine.
9403 /// The blends are lowered in one of three fundamental ways. If there are few
9404 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
9405 /// of the input is significantly cheaper when lowered as an interleaving of
9406 /// the two inputs, try to interleave them. Otherwise, blend the low and high
9407 /// halves of the inputs separately (making them have relatively few inputs)
9408 /// and then concatenate them.
9409 static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9410 const X86Subtarget *Subtarget,
9411 SelectionDAG &DAG) {
9413 assert(Op.getSimpleValueType() == MVT::v8i16 && "Bad shuffle type!");
9414 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9415 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9416 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9417 ArrayRef<int> OrigMask = SVOp->getMask();
9418 int MaskStorage[8] = {OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9419 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7]};
9420 MutableArrayRef<int> Mask(MaskStorage);
9422 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
9424 // Whenever we can lower this as a zext, that instruction is strictly faster
9425 // than any alternative.
9426 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9427 DL, MVT::v8i16, V1, V2, OrigMask, Subtarget, DAG))
9430 auto isV1 = [](int M) { return M >= 0 && M < 8; };
9431 auto isV2 = [](int M) { return M >= 8; };
9433 int NumV1Inputs = std::count_if(Mask.begin(), Mask.end(), isV1);
9434 int NumV2Inputs = std::count_if(Mask.begin(), Mask.end(), isV2);
9436 if (NumV2Inputs == 0)
9437 return lowerV8I16SingleInputVectorShuffle(DL, V1, Mask, Subtarget, DAG);
9439 assert(NumV1Inputs > 0 && "All single-input shuffles should be canonicalized "
9440 "to be V1-input shuffles.");
9442 // Try to use bit shift instructions.
9443 if (SDValue Shift = lowerVectorShuffleAsBitShift(
9444 DL, MVT::v8i16, V1, V2, Mask, DAG))
9447 // Try to use byte shift instructions.
9448 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9449 DL, MVT::v8i16, V1, V2, Mask, DAG))
9452 // There are special ways we can lower some single-element blends.
9453 if (NumV2Inputs == 1)
9454 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v8i16, DL, V1, V2,
9455 Mask, Subtarget, DAG))
9458 // Use dedicated unpack instructions for masks that match their pattern.
9459 if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 2, 10, 3, 11))
9460 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V1, V2);
9461 if (isShuffleEquivalent(Mask, 4, 12, 5, 13, 6, 14, 7, 15))
9462 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V1, V2);
9464 if (Subtarget->hasSSE41())
9465 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
9469 // Try to use byte rotation instructions.
9470 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9471 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
9474 if (NumV1Inputs + NumV2Inputs <= 4)
9475 return lowerV8I16BasicBlendVectorShuffle(DL, V1, V2, Mask, Subtarget, DAG);
9477 // Check whether an interleaving lowering is likely to be more efficient.
9478 // This isn't perfect but it is a strong heuristic that tends to work well on
9479 // the kinds of shuffles that show up in practice.
9481 // FIXME: Handle 1x, 2x, and 4x interleaving.
9482 if (shouldLowerAsInterleaving(Mask)) {
9483 // FIXME: Figure out whether we should pack these into the low or high
9486 int EMask[8], OMask[8];
9487 for (int i = 0; i < 4; ++i) {
9488 EMask[i] = Mask[2*i];
9489 OMask[i] = Mask[2*i + 1];
9494 SDValue Evens = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, EMask);
9495 SDValue Odds = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, OMask);
9497 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, Evens, Odds);
9500 int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9501 int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9503 for (int i = 0; i < 4; ++i) {
9504 LoBlendMask[i] = Mask[i];
9505 HiBlendMask[i] = Mask[i + 4];
9508 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
9509 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
9510 LoV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, LoV);
9511 HiV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, HiV);
9513 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9514 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, LoV, HiV));
9517 /// \brief Check whether a compaction lowering can be done by dropping even
9518 /// elements and compute how many times even elements must be dropped.
9520 /// This handles shuffles which take every Nth element where N is a power of
9521 /// two. Example shuffle masks:
9523 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
9524 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
9525 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
9526 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
9527 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
9528 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
9530 /// Any of these lanes can of course be undef.
9532 /// This routine only supports N <= 3.
9533 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
9536 /// \returns N above, or the number of times even elements must be dropped if
9537 /// there is such a number. Otherwise returns zero.
9538 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
9539 // Figure out whether we're looping over two inputs or just one.
9540 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9542 // The modulus for the shuffle vector entries is based on whether this is
9543 // a single input or not.
9544 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
9545 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
9546 "We should only be called with masks with a power-of-2 size!");
9548 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
9550 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
9551 // and 2^3 simultaneously. This is because we may have ambiguity with
9552 // partially undef inputs.
9553 bool ViableForN[3] = {true, true, true};
9555 for (int i = 0, e = Mask.size(); i < e; ++i) {
9556 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
9561 bool IsAnyViable = false;
9562 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9563 if (ViableForN[j]) {
9566 // The shuffle mask must be equal to (i * 2^N) % M.
9567 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
9570 ViableForN[j] = false;
9572 // Early exit if we exhaust the possible powers of two.
9577 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9581 // Return 0 as there is no viable power of two.
9585 /// \brief Generic lowering of v16i8 shuffles.
9587 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
9588 /// detect any complexity reducing interleaving. If that doesn't help, it uses
9589 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
9590 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
9592 static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9593 const X86Subtarget *Subtarget,
9594 SelectionDAG &DAG) {
9596 assert(Op.getSimpleValueType() == MVT::v16i8 && "Bad shuffle type!");
9597 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9598 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9599 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9600 ArrayRef<int> OrigMask = SVOp->getMask();
9601 assert(OrigMask.size() == 16 && "Unexpected mask size for v16 shuffle!");
9603 // Try to use bit shift instructions.
9604 if (SDValue Shift = lowerVectorShuffleAsBitShift(
9605 DL, MVT::v16i8, V1, V2, OrigMask, DAG))
9608 // Try to use byte shift instructions.
9609 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9610 DL, MVT::v16i8, V1, V2, OrigMask, DAG))
9613 // Try to use byte rotation instructions.
9614 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9615 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9618 // Try to use a zext lowering.
9619 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9620 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9623 int MaskStorage[16] = {
9624 OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9625 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7],
9626 OrigMask[8], OrigMask[9], OrigMask[10], OrigMask[11],
9627 OrigMask[12], OrigMask[13], OrigMask[14], OrigMask[15]};
9628 MutableArrayRef<int> Mask(MaskStorage);
9629 MutableArrayRef<int> LoMask = Mask.slice(0, 8);
9630 MutableArrayRef<int> HiMask = Mask.slice(8, 8);
9633 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 16; });
9635 // For single-input shuffles, there are some nicer lowering tricks we can use.
9636 if (NumV2Elements == 0) {
9637 // Check for being able to broadcast a single element.
9638 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i8, DL, V1,
9639 Mask, Subtarget, DAG))
9642 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
9643 // Notably, this handles splat and partial-splat shuffles more efficiently.
9644 // However, it only makes sense if the pre-duplication shuffle simplifies
9645 // things significantly. Currently, this means we need to be able to
9646 // express the pre-duplication shuffle as an i16 shuffle.
9648 // FIXME: We should check for other patterns which can be widened into an
9649 // i16 shuffle as well.
9650 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
9651 for (int i = 0; i < 16; i += 2)
9652 if (Mask[i] != -1 && Mask[i + 1] != -1 && Mask[i] != Mask[i + 1])
9657 auto tryToWidenViaDuplication = [&]() -> SDValue {
9658 if (!canWidenViaDuplication(Mask))
9660 SmallVector<int, 4> LoInputs;
9661 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(LoInputs),
9662 [](int M) { return M >= 0 && M < 8; });
9663 std::sort(LoInputs.begin(), LoInputs.end());
9664 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
9666 SmallVector<int, 4> HiInputs;
9667 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(HiInputs),
9668 [](int M) { return M >= 8; });
9669 std::sort(HiInputs.begin(), HiInputs.end());
9670 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
9673 bool TargetLo = LoInputs.size() >= HiInputs.size();
9674 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
9675 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
9677 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9678 SmallDenseMap<int, int, 8> LaneMap;
9679 for (int I : InPlaceInputs) {
9680 PreDupI16Shuffle[I/2] = I/2;
9683 int j = TargetLo ? 0 : 4, je = j + 4;
9684 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
9685 // Check if j is already a shuffle of this input. This happens when
9686 // there are two adjacent bytes after we move the low one.
9687 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
9688 // If we haven't yet mapped the input, search for a slot into which
9690 while (j < je && PreDupI16Shuffle[j] != -1)
9694 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
9697 // Map this input with the i16 shuffle.
9698 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
9701 // Update the lane map based on the mapping we ended up with.
9702 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
9705 ISD::BITCAST, DL, MVT::v16i8,
9706 DAG.getVectorShuffle(MVT::v8i16, DL,
9707 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9708 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
9710 // Unpack the bytes to form the i16s that will be shuffled into place.
9711 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9712 MVT::v16i8, V1, V1);
9714 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9715 for (int i = 0; i < 16; ++i)
9716 if (Mask[i] != -1) {
9717 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
9718 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
9719 if (PostDupI16Shuffle[i / 2] == -1)
9720 PostDupI16Shuffle[i / 2] = MappedMask;
9722 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
9723 "Conflicting entrties in the original shuffle!");
9726 ISD::BITCAST, DL, MVT::v16i8,
9727 DAG.getVectorShuffle(MVT::v8i16, DL,
9728 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9729 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
9731 if (SDValue V = tryToWidenViaDuplication())
9735 // Check whether an interleaving lowering is likely to be more efficient.
9736 // This isn't perfect but it is a strong heuristic that tends to work well on
9737 // the kinds of shuffles that show up in practice.
9739 // FIXME: We need to handle other interleaving widths (i16, i32, ...).
9740 if (shouldLowerAsInterleaving(Mask)) {
9741 int NumLoHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9742 return (M >= 0 && M < 8) || (M >= 16 && M < 24);
9744 int NumHiHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9745 return (M >= 8 && M < 16) || M >= 24;
9747 int EMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9748 -1, -1, -1, -1, -1, -1, -1, -1};
9749 int OMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9750 -1, -1, -1, -1, -1, -1, -1, -1};
9751 bool UnpackLo = NumLoHalf >= NumHiHalf;
9752 MutableArrayRef<int> TargetEMask(UnpackLo ? EMask : EMask + 8, 8);
9753 MutableArrayRef<int> TargetOMask(UnpackLo ? OMask : OMask + 8, 8);
9754 for (int i = 0; i < 8; ++i) {
9755 TargetEMask[i] = Mask[2 * i];
9756 TargetOMask[i] = Mask[2 * i + 1];
9759 SDValue Evens = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, EMask);
9760 SDValue Odds = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, OMask);
9762 return DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9763 MVT::v16i8, Evens, Odds);
9766 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
9767 // with PSHUFB. It is important to do this before we attempt to generate any
9768 // blends but after all of the single-input lowerings. If the single input
9769 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
9770 // want to preserve that and we can DAG combine any longer sequences into
9771 // a PSHUFB in the end. But once we start blending from multiple inputs,
9772 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
9773 // and there are *very* few patterns that would actually be faster than the
9774 // PSHUFB approach because of its ability to zero lanes.
9776 // FIXME: The only exceptions to the above are blends which are exact
9777 // interleavings with direct instructions supporting them. We currently don't
9778 // handle those well here.
9779 if (Subtarget->hasSSSE3()) {
9782 bool V1InUse = false;
9783 bool V2InUse = false;
9784 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
9786 for (int i = 0; i < 16; ++i) {
9787 if (Mask[i] == -1) {
9788 V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
9790 const int ZeroMask = 0x80;
9791 int V1Idx = (Mask[i] < 16 ? Mask[i] : ZeroMask);
9792 int V2Idx = (Mask[i] < 16 ? ZeroMask : Mask[i] - 16);
9794 V1Idx = V2Idx = ZeroMask;
9795 V1Mask[i] = DAG.getConstant(V1Idx, MVT::i8);
9796 V2Mask[i] = DAG.getConstant(V2Idx, MVT::i8);
9797 V1InUse |= (ZeroMask != V1Idx);
9798 V2InUse |= (ZeroMask != V2Idx);
9803 V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V1,
9804 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
9806 V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V2,
9807 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
9809 // If we need shuffled inputs from both, blend the two.
9810 if (V1InUse && V2InUse)
9811 return DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
9813 return V1; // Single inputs are easy.
9815 return V2; // Single inputs are easy.
9816 // Shuffling to a zeroable vector.
9817 return getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
9820 // There are special ways we can lower some single-element blends.
9821 if (NumV2Elements == 1)
9822 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v16i8, DL, V1, V2,
9823 Mask, Subtarget, DAG))
9826 // Check whether a compaction lowering can be done. This handles shuffles
9827 // which take every Nth element for some even N. See the helper function for
9830 // We special case these as they can be particularly efficiently handled with
9831 // the PACKUSB instruction on x86 and they show up in common patterns of
9832 // rearranging bytes to truncate wide elements.
9833 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask)) {
9834 // NumEvenDrops is the power of two stride of the elements. Another way of
9835 // thinking about it is that we need to drop the even elements this many
9836 // times to get the original input.
9837 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9839 // First we need to zero all the dropped bytes.
9840 assert(NumEvenDrops <= 3 &&
9841 "No support for dropping even elements more than 3 times.");
9842 // We use the mask type to pick which bytes are preserved based on how many
9843 // elements are dropped.
9844 MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
9845 SDValue ByteClearMask =
9846 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8,
9847 DAG.getConstant(0xFF, MaskVTs[NumEvenDrops - 1]));
9848 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
9850 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
9852 // Now pack things back together.
9853 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
9854 V2 = IsSingleInput ? V1 : DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
9855 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
9856 for (int i = 1; i < NumEvenDrops; ++i) {
9857 Result = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, Result);
9858 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
9864 int V1LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9865 int V1HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9866 int V2LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9867 int V2HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9869 auto buildBlendMasks = [](MutableArrayRef<int> HalfMask,
9870 MutableArrayRef<int> V1HalfBlendMask,
9871 MutableArrayRef<int> V2HalfBlendMask) {
9872 for (int i = 0; i < 8; ++i)
9873 if (HalfMask[i] >= 0 && HalfMask[i] < 16) {
9874 V1HalfBlendMask[i] = HalfMask[i];
9876 } else if (HalfMask[i] >= 16) {
9877 V2HalfBlendMask[i] = HalfMask[i] - 16;
9878 HalfMask[i] = i + 8;
9881 buildBlendMasks(LoMask, V1LoBlendMask, V2LoBlendMask);
9882 buildBlendMasks(HiMask, V1HiBlendMask, V2HiBlendMask);
9884 SDValue Zero = getZeroVector(MVT::v8i16, Subtarget, DAG, DL);
9886 auto buildLoAndHiV8s = [&](SDValue V, MutableArrayRef<int> LoBlendMask,
9887 MutableArrayRef<int> HiBlendMask) {
9889 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
9890 // them out and avoid using UNPCK{L,H} to extract the elements of V as
9892 if (std::none_of(LoBlendMask.begin(), LoBlendMask.end(),
9893 [](int M) { return M >= 0 && M % 2 == 1; }) &&
9894 std::none_of(HiBlendMask.begin(), HiBlendMask.end(),
9895 [](int M) { return M >= 0 && M % 2 == 1; })) {
9896 // Use a mask to drop the high bytes.
9897 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
9898 V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, V1,
9899 DAG.getConstant(0x00FF, MVT::v8i16));
9901 // This will be a single vector shuffle instead of a blend so nuke V2.
9902 V2 = DAG.getUNDEF(MVT::v8i16);
9904 // Squash the masks to point directly into V1.
9905 for (int &M : LoBlendMask)
9908 for (int &M : HiBlendMask)
9912 // Otherwise just unpack the low half of V into V1 and the high half into
9913 // V2 so that we can blend them as i16s.
9914 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9915 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
9916 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9917 DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
9920 SDValue BlendedLo = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
9921 SDValue BlendedHi = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
9922 return std::make_pair(BlendedLo, BlendedHi);
9924 SDValue V1Lo, V1Hi, V2Lo, V2Hi;
9925 std::tie(V1Lo, V1Hi) = buildLoAndHiV8s(V1, V1LoBlendMask, V1HiBlendMask);
9926 std::tie(V2Lo, V2Hi) = buildLoAndHiV8s(V2, V2LoBlendMask, V2HiBlendMask);
9928 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Lo, V2Lo, LoMask);
9929 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Hi, V2Hi, HiMask);
9931 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
9934 /// \brief Dispatching routine to lower various 128-bit x86 vector shuffles.
9936 /// This routine breaks down the specific type of 128-bit shuffle and
9937 /// dispatches to the lowering routines accordingly.
9938 static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9939 MVT VT, const X86Subtarget *Subtarget,
9940 SelectionDAG &DAG) {
9941 switch (VT.SimpleTy) {
9943 return lowerV2I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
9945 return lowerV2F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
9947 return lowerV4I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
9949 return lowerV4F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
9951 return lowerV8I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
9953 return lowerV16I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
9956 llvm_unreachable("Unimplemented!");
9960 /// \brief Helper function to test whether a shuffle mask could be
9961 /// simplified by widening the elements being shuffled.
9963 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
9964 /// leaves it in an unspecified state.
9966 /// NOTE: This must handle normal vector shuffle masks and *target* vector
9967 /// shuffle masks. The latter have the special property of a '-2' representing
9968 /// a zero-ed lane of a vector.
9969 static bool canWidenShuffleElements(ArrayRef<int> Mask,
9970 SmallVectorImpl<int> &WidenedMask) {
9971 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
9972 // If both elements are undef, its trivial.
9973 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] == SM_SentinelUndef) {
9974 WidenedMask.push_back(SM_SentinelUndef);
9978 // Check for an undef mask and a mask value properly aligned to fit with
9979 // a pair of values. If we find such a case, use the non-undef mask's value.
9980 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] >= 0 && Mask[i + 1] % 2 == 1) {
9981 WidenedMask.push_back(Mask[i + 1] / 2);
9984 if (Mask[i + 1] == SM_SentinelUndef && Mask[i] >= 0 && Mask[i] % 2 == 0) {
9985 WidenedMask.push_back(Mask[i] / 2);
9989 // When zeroing, we need to spread the zeroing across both lanes to widen.
9990 if (Mask[i] == SM_SentinelZero || Mask[i + 1] == SM_SentinelZero) {
9991 if ((Mask[i] == SM_SentinelZero || Mask[i] == SM_SentinelUndef) &&
9992 (Mask[i + 1] == SM_SentinelZero || Mask[i + 1] == SM_SentinelUndef)) {
9993 WidenedMask.push_back(SM_SentinelZero);
9999 // Finally check if the two mask values are adjacent and aligned with
10001 if (Mask[i] != SM_SentinelUndef && Mask[i] % 2 == 0 && Mask[i] + 1 == Mask[i + 1]) {
10002 WidenedMask.push_back(Mask[i] / 2);
10006 // Otherwise we can't safely widen the elements used in this shuffle.
10009 assert(WidenedMask.size() == Mask.size() / 2 &&
10010 "Incorrect size of mask after widening the elements!");
10015 /// \brief Generic routine to split ector shuffle into half-sized shuffles.
10017 /// This routine just extracts two subvectors, shuffles them independently, and
10018 /// then concatenates them back together. This should work effectively with all
10019 /// AVX vector shuffle types.
10020 static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10021 SDValue V2, ArrayRef<int> Mask,
10022 SelectionDAG &DAG) {
10023 assert(VT.getSizeInBits() >= 256 &&
10024 "Only for 256-bit or wider vector shuffles!");
10025 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
10026 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
10028 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
10029 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
10031 int NumElements = VT.getVectorNumElements();
10032 int SplitNumElements = NumElements / 2;
10033 MVT ScalarVT = VT.getScalarType();
10034 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
10036 SDValue LoV1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V1,
10037 DAG.getIntPtrConstant(0));
10038 SDValue HiV1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V1,
10039 DAG.getIntPtrConstant(SplitNumElements));
10040 SDValue LoV2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V2,
10041 DAG.getIntPtrConstant(0));
10042 SDValue HiV2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V2,
10043 DAG.getIntPtrConstant(SplitNumElements));
10045 // Now create two 4-way blends of these half-width vectors.
10046 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
10047 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
10048 SmallVector<int, 32> V1BlendMask, V2BlendMask, BlendMask;
10049 for (int i = 0; i < SplitNumElements; ++i) {
10050 int M = HalfMask[i];
10051 if (M >= NumElements) {
10052 if (M >= NumElements + SplitNumElements)
10056 V2BlendMask.push_back(M - NumElements);
10057 V1BlendMask.push_back(-1);
10058 BlendMask.push_back(SplitNumElements + i);
10059 } else if (M >= 0) {
10060 if (M >= SplitNumElements)
10064 V2BlendMask.push_back(-1);
10065 V1BlendMask.push_back(M);
10066 BlendMask.push_back(i);
10068 V2BlendMask.push_back(-1);
10069 V1BlendMask.push_back(-1);
10070 BlendMask.push_back(-1);
10074 // Because the lowering happens after all combining takes place, we need to
10075 // manually combine these blend masks as much as possible so that we create
10076 // a minimal number of high-level vector shuffle nodes.
10078 // First try just blending the halves of V1 or V2.
10079 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
10080 return DAG.getUNDEF(SplitVT);
10081 if (!UseLoV2 && !UseHiV2)
10082 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10083 if (!UseLoV1 && !UseHiV1)
10084 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10086 SDValue V1Blend, V2Blend;
10087 if (UseLoV1 && UseHiV1) {
10089 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10091 // We only use half of V1 so map the usage down into the final blend mask.
10092 V1Blend = UseLoV1 ? LoV1 : HiV1;
10093 for (int i = 0; i < SplitNumElements; ++i)
10094 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
10095 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
10097 if (UseLoV2 && UseHiV2) {
10099 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10101 // We only use half of V2 so map the usage down into the final blend mask.
10102 V2Blend = UseLoV2 ? LoV2 : HiV2;
10103 for (int i = 0; i < SplitNumElements; ++i)
10104 if (BlendMask[i] >= SplitNumElements)
10105 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
10107 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
10109 SDValue Lo = HalfBlend(LoMask);
10110 SDValue Hi = HalfBlend(HiMask);
10111 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
10114 /// \brief Either split a vector in halves or decompose the shuffles and the
10117 /// This is provided as a good fallback for many lowerings of non-single-input
10118 /// shuffles with more than one 128-bit lane. In those cases, we want to select
10119 /// between splitting the shuffle into 128-bit components and stitching those
10120 /// back together vs. extracting the single-input shuffles and blending those
10122 static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
10123 SDValue V2, ArrayRef<int> Mask,
10124 SelectionDAG &DAG) {
10125 assert(!isSingleInputShuffleMask(Mask) && "This routine must not be used to "
10126 "lower single-input shuffles as it "
10127 "could then recurse on itself.");
10128 int Size = Mask.size();
10130 // If this can be modeled as a broadcast of two elements followed by a blend,
10131 // prefer that lowering. This is especially important because broadcasts can
10132 // often fold with memory operands.
10133 auto DoBothBroadcast = [&] {
10134 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
10137 if (V2BroadcastIdx == -1)
10138 V2BroadcastIdx = M - Size;
10139 else if (M - Size != V2BroadcastIdx)
10141 } else if (M >= 0) {
10142 if (V1BroadcastIdx == -1)
10143 V1BroadcastIdx = M;
10144 else if (M != V1BroadcastIdx)
10149 if (DoBothBroadcast())
10150 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
10153 // If the inputs all stem from a single 128-bit lane of each input, then we
10154 // split them rather than blending because the split will decompose to
10155 // unusually few instructions.
10156 int LaneCount = VT.getSizeInBits() / 128;
10157 int LaneSize = Size / LaneCount;
10158 SmallBitVector LaneInputs[2];
10159 LaneInputs[0].resize(LaneCount, false);
10160 LaneInputs[1].resize(LaneCount, false);
10161 for (int i = 0; i < Size; ++i)
10163 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
10164 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
10165 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10167 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
10168 // that the decomposed single-input shuffles don't end up here.
10169 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10172 /// \brief Lower a vector shuffle crossing multiple 128-bit lanes as
10173 /// a permutation and blend of those lanes.
10175 /// This essentially blends the out-of-lane inputs to each lane into the lane
10176 /// from a permuted copy of the vector. This lowering strategy results in four
10177 /// instructions in the worst case for a single-input cross lane shuffle which
10178 /// is lower than any other fully general cross-lane shuffle strategy I'm aware
10179 /// of. Special cases for each particular shuffle pattern should be handled
10180 /// prior to trying this lowering.
10181 static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT,
10182 SDValue V1, SDValue V2,
10183 ArrayRef<int> Mask,
10184 SelectionDAG &DAG) {
10185 // FIXME: This should probably be generalized for 512-bit vectors as well.
10186 assert(VT.getSizeInBits() == 256 && "Only for 256-bit vector shuffles!");
10187 int LaneSize = Mask.size() / 2;
10189 // If there are only inputs from one 128-bit lane, splitting will in fact be
10190 // less expensive. The flags track wether the given lane contains an element
10191 // that crosses to another lane.
10192 bool LaneCrossing[2] = {false, false};
10193 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10194 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10195 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
10196 if (!LaneCrossing[0] || !LaneCrossing[1])
10197 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10199 if (isSingleInputShuffleMask(Mask)) {
10200 SmallVector<int, 32> FlippedBlendMask;
10201 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10202 FlippedBlendMask.push_back(
10203 Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
10205 : Mask[i] % LaneSize +
10206 (i / LaneSize) * LaneSize + Size));
10208 // Flip the vector, and blend the results which should now be in-lane. The
10209 // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
10210 // 5 for the high source. The value 3 selects the high half of source 2 and
10211 // the value 2 selects the low half of source 2. We only use source 2 to
10212 // allow folding it into a memory operand.
10213 unsigned PERMMask = 3 | 2 << 4;
10214 SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
10215 V1, DAG.getConstant(PERMMask, MVT::i8));
10216 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
10219 // This now reduces to two single-input shuffles of V1 and V2 which at worst
10220 // will be handled by the above logic and a blend of the results, much like
10221 // other patterns in AVX.
10222 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10225 /// \brief Handle lowering 2-lane 128-bit shuffles.
10226 static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10227 SDValue V2, ArrayRef<int> Mask,
10228 const X86Subtarget *Subtarget,
10229 SelectionDAG &DAG) {
10230 // Blends are faster and handle all the non-lane-crossing cases.
10231 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask,
10235 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
10236 VT.getVectorNumElements() / 2);
10237 // Check for patterns which can be matched with a single insert of a 128-bit
10239 if (isShuffleEquivalent(Mask, 0, 1, 0, 1) ||
10240 isShuffleEquivalent(Mask, 0, 1, 4, 5)) {
10241 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10242 DAG.getIntPtrConstant(0));
10243 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
10244 Mask[2] < 4 ? V1 : V2, DAG.getIntPtrConstant(0));
10245 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10247 if (isShuffleEquivalent(Mask, 0, 1, 6, 7)) {
10248 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10249 DAG.getIntPtrConstant(0));
10250 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
10251 DAG.getIntPtrConstant(2));
10252 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10255 // Otherwise form a 128-bit permutation.
10256 // FIXME: Detect zero-vector inputs and use the VPERM2X128 to zero that half.
10257 unsigned PermMask = Mask[0] / 2 | (Mask[2] / 2) << 4;
10258 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
10259 DAG.getConstant(PermMask, MVT::i8));
10262 /// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
10263 /// shuffling each lane.
10265 /// This will only succeed when the result of fixing the 128-bit lanes results
10266 /// in a single-input non-lane-crossing shuffle with a repeating shuffle mask in
10267 /// each 128-bit lanes. This handles many cases where we can quickly blend away
10268 /// the lane crosses early and then use simpler shuffles within each lane.
10270 /// FIXME: It might be worthwhile at some point to support this without
10271 /// requiring the 128-bit lane-relative shuffles to be repeating, but currently
10272 /// in x86 only floating point has interesting non-repeating shuffles, and even
10273 /// those are still *marginally* more expensive.
10274 static SDValue lowerVectorShuffleByMerging128BitLanes(
10275 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
10276 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
10277 assert(!isSingleInputShuffleMask(Mask) &&
10278 "This is only useful with multiple inputs.");
10280 int Size = Mask.size();
10281 int LaneSize = 128 / VT.getScalarSizeInBits();
10282 int NumLanes = Size / LaneSize;
10283 assert(NumLanes > 1 && "Only handles 256-bit and wider shuffles.");
10285 // See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
10286 // check whether the in-128-bit lane shuffles share a repeating pattern.
10287 SmallVector<int, 4> Lanes;
10288 Lanes.resize(NumLanes, -1);
10289 SmallVector<int, 4> InLaneMask;
10290 InLaneMask.resize(LaneSize, -1);
10291 for (int i = 0; i < Size; ++i) {
10295 int j = i / LaneSize;
10297 if (Lanes[j] < 0) {
10298 // First entry we've seen for this lane.
10299 Lanes[j] = Mask[i] / LaneSize;
10300 } else if (Lanes[j] != Mask[i] / LaneSize) {
10301 // This doesn't match the lane selected previously!
10305 // Check that within each lane we have a consistent shuffle mask.
10306 int k = i % LaneSize;
10307 if (InLaneMask[k] < 0) {
10308 InLaneMask[k] = Mask[i] % LaneSize;
10309 } else if (InLaneMask[k] != Mask[i] % LaneSize) {
10310 // This doesn't fit a repeating in-lane mask.
10315 // First shuffle the lanes into place.
10316 MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
10317 VT.getSizeInBits() / 64);
10318 SmallVector<int, 8> LaneMask;
10319 LaneMask.resize(NumLanes * 2, -1);
10320 for (int i = 0; i < NumLanes; ++i)
10321 if (Lanes[i] >= 0) {
10322 LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
10323 LaneMask[2 * i + 1] = 2*Lanes[i] + 1;
10326 V1 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V1);
10327 V2 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V2);
10328 SDValue LaneShuffle = DAG.getVectorShuffle(LaneVT, DL, V1, V2, LaneMask);
10330 // Cast it back to the type we actually want.
10331 LaneShuffle = DAG.getNode(ISD::BITCAST, DL, VT, LaneShuffle);
10333 // Now do a simple shuffle that isn't lane crossing.
10334 SmallVector<int, 8> NewMask;
10335 NewMask.resize(Size, -1);
10336 for (int i = 0; i < Size; ++i)
10338 NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
10339 assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) &&
10340 "Must not introduce lane crosses at this point!");
10342 return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask);
10345 /// \brief Test whether the specified input (0 or 1) is in-place blended by the
10348 /// This returns true if the elements from a particular input are already in the
10349 /// slot required by the given mask and require no permutation.
10350 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
10351 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
10352 int Size = Mask.size();
10353 for (int i = 0; i < Size; ++i)
10354 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
10360 /// \brief Handle lowering of 4-lane 64-bit floating point shuffles.
10362 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
10363 /// isn't available.
10364 static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10365 const X86Subtarget *Subtarget,
10366 SelectionDAG &DAG) {
10368 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10369 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10370 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10371 ArrayRef<int> Mask = SVOp->getMask();
10372 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10374 SmallVector<int, 4> WidenedMask;
10375 if (canWidenShuffleElements(Mask, WidenedMask))
10376 return lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask, Subtarget,
10379 if (isSingleInputShuffleMask(Mask)) {
10380 // Check for being able to broadcast a single element.
10381 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f64, DL, V1,
10382 Mask, Subtarget, DAG))
10385 // Use low duplicate instructions for masks that match their pattern.
10386 if (isShuffleEquivalent(Mask, 0, 0, 2, 2))
10387 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
10389 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
10390 // Non-half-crossing single input shuffles can be lowerid with an
10391 // interleaved permutation.
10392 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
10393 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
10394 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
10395 DAG.getConstant(VPERMILPMask, MVT::i8));
10398 // With AVX2 we have direct support for this permutation.
10399 if (Subtarget->hasAVX2())
10400 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
10401 getV4X86ShuffleImm8ForMask(Mask, DAG));
10403 // Otherwise, fall back.
10404 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
10408 // X86 has dedicated unpack instructions that can handle specific blend
10409 // operations: UNPCKH and UNPCKL.
10410 if (isShuffleEquivalent(Mask, 0, 4, 2, 6))
10411 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V1, V2);
10412 if (isShuffleEquivalent(Mask, 1, 5, 3, 7))
10413 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V1, V2);
10415 // If we have a single input to the zero element, insert that into V1 if we
10416 // can do so cheaply.
10417 int NumV2Elements =
10418 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
10419 if (NumV2Elements == 1 && Mask[0] >= 4)
10420 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
10421 MVT::v4f64, DL, V1, V2, Mask, Subtarget, DAG))
10424 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
10428 // Check if the blend happens to exactly fit that of SHUFPD.
10429 if ((Mask[0] == -1 || Mask[0] < 2) &&
10430 (Mask[1] == -1 || (Mask[1] >= 4 && Mask[1] < 6)) &&
10431 (Mask[2] == -1 || (Mask[2] >= 2 && Mask[2] < 4)) &&
10432 (Mask[3] == -1 || Mask[3] >= 6)) {
10433 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 5) << 1) |
10434 ((Mask[2] == 3) << 2) | ((Mask[3] == 7) << 3);
10435 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V1, V2,
10436 DAG.getConstant(SHUFPDMask, MVT::i8));
10438 if ((Mask[0] == -1 || (Mask[0] >= 4 && Mask[0] < 6)) &&
10439 (Mask[1] == -1 || Mask[1] < 2) &&
10440 (Mask[2] == -1 || Mask[2] >= 6) &&
10441 (Mask[3] == -1 || (Mask[3] >= 2 && Mask[3] < 4))) {
10442 unsigned SHUFPDMask = (Mask[0] == 5) | ((Mask[1] == 1) << 1) |
10443 ((Mask[2] == 7) << 2) | ((Mask[3] == 3) << 3);
10444 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V2, V1,
10445 DAG.getConstant(SHUFPDMask, MVT::i8));
10448 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10449 // shuffle. However, if we have AVX2 and either inputs are already in place,
10450 // we will be able to shuffle even across lanes the other input in a single
10451 // instruction so skip this pattern.
10452 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10453 isShuffleMaskInputInPlace(1, Mask))))
10454 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10455 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
10458 // If we have AVX2 then we always want to lower with a blend because an v4 we
10459 // can fully permute the elements.
10460 if (Subtarget->hasAVX2())
10461 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
10464 // Otherwise fall back on generic lowering.
10465 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, DAG);
10468 /// \brief Handle lowering of 4-lane 64-bit integer shuffles.
10470 /// This routine is only called when we have AVX2 and thus a reasonable
10471 /// instruction set for v4i64 shuffling..
10472 static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10473 const X86Subtarget *Subtarget,
10474 SelectionDAG &DAG) {
10476 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10477 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10478 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10479 ArrayRef<int> Mask = SVOp->getMask();
10480 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10481 assert(Subtarget->hasAVX2() && "We can only lower v4i64 with AVX2!");
10483 SmallVector<int, 4> WidenedMask;
10484 if (canWidenShuffleElements(Mask, WidenedMask))
10485 return lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask, Subtarget,
10488 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
10492 // Check for being able to broadcast a single element.
10493 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i64, DL, V1,
10494 Mask, Subtarget, DAG))
10497 // When the shuffle is mirrored between the 128-bit lanes of the unit, we can
10498 // use lower latency instructions that will operate on both 128-bit lanes.
10499 SmallVector<int, 2> RepeatedMask;
10500 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
10501 if (isSingleInputShuffleMask(Mask)) {
10502 int PSHUFDMask[] = {-1, -1, -1, -1};
10503 for (int i = 0; i < 2; ++i)
10504 if (RepeatedMask[i] >= 0) {
10505 PSHUFDMask[2 * i] = 2 * RepeatedMask[i];
10506 PSHUFDMask[2 * i + 1] = 2 * RepeatedMask[i] + 1;
10508 return DAG.getNode(
10509 ISD::BITCAST, DL, MVT::v4i64,
10510 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
10511 DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, V1),
10512 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
10515 // Use dedicated unpack instructions for masks that match their pattern.
10516 if (isShuffleEquivalent(Mask, 0, 4, 2, 6))
10517 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V1, V2);
10518 if (isShuffleEquivalent(Mask, 1, 5, 3, 7))
10519 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V1, V2);
10522 // AVX2 provides a direct instruction for permuting a single input across
10524 if (isSingleInputShuffleMask(Mask))
10525 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
10526 getV4X86ShuffleImm8ForMask(Mask, DAG));
10528 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10529 // shuffle. However, if we have AVX2 and either inputs are already in place,
10530 // we will be able to shuffle even across lanes the other input in a single
10531 // instruction so skip this pattern.
10532 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10533 isShuffleMaskInputInPlace(1, Mask))))
10534 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10535 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
10538 // Otherwise fall back on generic blend lowering.
10539 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
10543 /// \brief Handle lowering of 8-lane 32-bit floating point shuffles.
10545 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
10546 /// isn't available.
10547 static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10548 const X86Subtarget *Subtarget,
10549 SelectionDAG &DAG) {
10551 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10552 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10553 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10554 ArrayRef<int> Mask = SVOp->getMask();
10555 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10557 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
10561 // Check for being able to broadcast a single element.
10562 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8f32, DL, V1,
10563 Mask, Subtarget, DAG))
10566 // If the shuffle mask is repeated in each 128-bit lane, we have many more
10567 // options to efficiently lower the shuffle.
10568 SmallVector<int, 4> RepeatedMask;
10569 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
10570 assert(RepeatedMask.size() == 4 &&
10571 "Repeated masks must be half the mask width!");
10573 // Use even/odd duplicate instructions for masks that match their pattern.
10574 if (isShuffleEquivalent(Mask, 0, 0, 2, 2, 4, 4, 6, 6))
10575 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
10576 if (isShuffleEquivalent(Mask, 1, 1, 3, 3, 5, 5, 7, 7))
10577 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
10579 if (isSingleInputShuffleMask(Mask))
10580 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
10581 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10583 // Use dedicated unpack instructions for masks that match their pattern.
10584 if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10585 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V1, V2);
10586 if (isShuffleEquivalent(Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10587 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V1, V2);
10589 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
10590 // have already handled any direct blends. We also need to squash the
10591 // repeated mask into a simulated v4f32 mask.
10592 for (int i = 0; i < 4; ++i)
10593 if (RepeatedMask[i] >= 8)
10594 RepeatedMask[i] -= 4;
10595 return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
10598 // If we have a single input shuffle with different shuffle patterns in the
10599 // two 128-bit lanes use the variable mask to VPERMILPS.
10600 if (isSingleInputShuffleMask(Mask)) {
10601 SDValue VPermMask[8];
10602 for (int i = 0; i < 8; ++i)
10603 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10604 : DAG.getConstant(Mask[i], MVT::i32);
10605 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
10606 return DAG.getNode(
10607 X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
10608 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask));
10610 if (Subtarget->hasAVX2())
10611 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32,
10612 DAG.getNode(ISD::BITCAST, DL, MVT::v8f32,
10613 DAG.getNode(ISD::BUILD_VECTOR, DL,
10614 MVT::v8i32, VPermMask)),
10617 // Otherwise, fall back.
10618 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
10622 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10624 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10625 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
10628 // If we have AVX2 then we always want to lower with a blend because at v8 we
10629 // can fully permute the elements.
10630 if (Subtarget->hasAVX2())
10631 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
10634 // Otherwise fall back on generic lowering.
10635 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, DAG);
10638 /// \brief Handle lowering of 8-lane 32-bit integer shuffles.
10640 /// This routine is only called when we have AVX2 and thus a reasonable
10641 /// instruction set for v8i32 shuffling..
10642 static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10643 const X86Subtarget *Subtarget,
10644 SelectionDAG &DAG) {
10646 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10647 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10648 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10649 ArrayRef<int> Mask = SVOp->getMask();
10650 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10651 assert(Subtarget->hasAVX2() && "We can only lower v8i32 with AVX2!");
10653 // Whenever we can lower this as a zext, that instruction is strictly faster
10654 // than any alternative. It also allows us to fold memory operands into the
10655 // shuffle in many cases.
10656 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2,
10657 Mask, Subtarget, DAG))
10660 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
10664 // Check for being able to broadcast a single element.
10665 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i32, DL, V1,
10666 Mask, Subtarget, DAG))
10669 // If the shuffle mask is repeated in each 128-bit lane we can use more
10670 // efficient instructions that mirror the shuffles across the two 128-bit
10672 SmallVector<int, 4> RepeatedMask;
10673 if (is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask)) {
10674 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
10675 if (isSingleInputShuffleMask(Mask))
10676 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
10677 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10679 // Use dedicated unpack instructions for masks that match their pattern.
10680 if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10681 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V1, V2);
10682 if (isShuffleEquivalent(Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10683 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V1, V2);
10686 // If the shuffle patterns aren't repeated but it is a single input, directly
10687 // generate a cross-lane VPERMD instruction.
10688 if (isSingleInputShuffleMask(Mask)) {
10689 SDValue VPermMask[8];
10690 for (int i = 0; i < 8; ++i)
10691 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10692 : DAG.getConstant(Mask[i], MVT::i32);
10693 return DAG.getNode(
10694 X86ISD::VPERMV, DL, MVT::v8i32,
10695 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
10698 // Try to use bit shift instructions.
10699 if (SDValue Shift = lowerVectorShuffleAsBitShift(
10700 DL, MVT::v8i32, V1, V2, Mask, DAG))
10703 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10705 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10706 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10709 // Otherwise fall back on generic blend lowering.
10710 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
10714 /// \brief Handle lowering of 16-lane 16-bit integer shuffles.
10716 /// This routine is only called when we have AVX2 and thus a reasonable
10717 /// instruction set for v16i16 shuffling..
10718 static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10719 const X86Subtarget *Subtarget,
10720 SelectionDAG &DAG) {
10722 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10723 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10724 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10725 ArrayRef<int> Mask = SVOp->getMask();
10726 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10727 assert(Subtarget->hasAVX2() && "We can only lower v16i16 with AVX2!");
10729 // Whenever we can lower this as a zext, that instruction is strictly faster
10730 // than any alternative. It also allows us to fold memory operands into the
10731 // shuffle in many cases.
10732 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v16i16, V1, V2,
10733 Mask, Subtarget, DAG))
10736 // Check for being able to broadcast a single element.
10737 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i16, DL, V1,
10738 Mask, Subtarget, DAG))
10741 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
10745 // Use dedicated unpack instructions for masks that match their pattern.
10746 if (isShuffleEquivalent(Mask,
10747 // First 128-bit lane:
10748 0, 16, 1, 17, 2, 18, 3, 19,
10749 // Second 128-bit lane:
10750 8, 24, 9, 25, 10, 26, 11, 27))
10751 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i16, V1, V2);
10752 if (isShuffleEquivalent(Mask,
10753 // First 128-bit lane:
10754 4, 20, 5, 21, 6, 22, 7, 23,
10755 // Second 128-bit lane:
10756 12, 28, 13, 29, 14, 30, 15, 31))
10757 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i16, V1, V2);
10759 if (isSingleInputShuffleMask(Mask)) {
10760 // There are no generalized cross-lane shuffle operations available on i16
10762 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
10763 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
10766 SDValue PSHUFBMask[32];
10767 for (int i = 0; i < 16; ++i) {
10768 if (Mask[i] == -1) {
10769 PSHUFBMask[2 * i] = PSHUFBMask[2 * i + 1] = DAG.getUNDEF(MVT::i8);
10773 int M = i < 8 ? Mask[i] : Mask[i] - 8;
10774 assert(M >= 0 && M < 8 && "Invalid single-input mask!");
10775 PSHUFBMask[2 * i] = DAG.getConstant(2 * M, MVT::i8);
10776 PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, MVT::i8);
10778 return DAG.getNode(
10779 ISD::BITCAST, DL, MVT::v16i16,
10781 X86ISD::PSHUFB, DL, MVT::v32i8,
10782 DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1),
10783 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask)));
10786 // Try to use bit shift instructions.
10787 if (SDValue Shift = lowerVectorShuffleAsBitShift(
10788 DL, MVT::v16i16, V1, V2, Mask, DAG))
10791 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10793 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10794 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
10797 // Otherwise fall back on generic lowering.
10798 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG);
10801 /// \brief Handle lowering of 32-lane 8-bit integer shuffles.
10803 /// This routine is only called when we have AVX2 and thus a reasonable
10804 /// instruction set for v32i8 shuffling..
10805 static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10806 const X86Subtarget *Subtarget,
10807 SelectionDAG &DAG) {
10809 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10810 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10811 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10812 ArrayRef<int> Mask = SVOp->getMask();
10813 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
10814 assert(Subtarget->hasAVX2() && "We can only lower v32i8 with AVX2!");
10816 // Whenever we can lower this as a zext, that instruction is strictly faster
10817 // than any alternative. It also allows us to fold memory operands into the
10818 // shuffle in many cases.
10819 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2,
10820 Mask, Subtarget, DAG))
10823 // Check for being able to broadcast a single element.
10824 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v32i8, DL, V1,
10825 Mask, Subtarget, DAG))
10828 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
10832 // Use dedicated unpack instructions for masks that match their pattern.
10833 // Note that these are repeated 128-bit lane unpacks, not unpacks across all
10835 if (isShuffleEquivalent(
10837 // First 128-bit lane:
10838 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39,
10839 // Second 128-bit lane:
10840 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55))
10841 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v32i8, V1, V2);
10842 if (isShuffleEquivalent(
10844 // First 128-bit lane:
10845 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47,
10846 // Second 128-bit lane:
10847 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63))
10848 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v32i8, V1, V2);
10850 if (isSingleInputShuffleMask(Mask)) {
10851 // There are no generalized cross-lane shuffle operations available on i8
10853 if (is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
10854 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2,
10857 SDValue PSHUFBMask[32];
10858 for (int i = 0; i < 32; ++i)
10861 ? DAG.getUNDEF(MVT::i8)
10862 : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, MVT::i8);
10864 return DAG.getNode(
10865 X86ISD::PSHUFB, DL, MVT::v32i8, V1,
10866 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask));
10869 // Try to use bit shift instructions.
10870 if (SDValue Shift = lowerVectorShuffleAsBitShift(
10871 DL, MVT::v32i8, V1, V2, Mask, DAG))
10874 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10876 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10877 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
10880 // Otherwise fall back on generic lowering.
10881 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG);
10884 /// \brief High-level routine to lower various 256-bit x86 vector shuffles.
10886 /// This routine either breaks down the specific type of a 256-bit x86 vector
10887 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
10888 /// together based on the available instructions.
10889 static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10890 MVT VT, const X86Subtarget *Subtarget,
10891 SelectionDAG &DAG) {
10893 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10894 ArrayRef<int> Mask = SVOp->getMask();
10896 // There is a really nice hard cut-over between AVX1 and AVX2 that means we can
10897 // check for those subtargets here and avoid much of the subtarget querying in
10898 // the per-vector-type lowering routines. With AVX1 we have essentially *zero*
10899 // ability to manipulate a 256-bit vector with integer types. Since we'll use
10900 // floating point types there eventually, just immediately cast everything to
10901 // a float and operate entirely in that domain.
10902 if (VT.isInteger() && !Subtarget->hasAVX2()) {
10903 int ElementBits = VT.getScalarSizeInBits();
10904 if (ElementBits < 32)
10905 // No floating point type available, decompose into 128-bit vectors.
10906 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10908 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
10909 VT.getVectorNumElements());
10910 V1 = DAG.getNode(ISD::BITCAST, DL, FpVT, V1);
10911 V2 = DAG.getNode(ISD::BITCAST, DL, FpVT, V2);
10912 return DAG.getNode(ISD::BITCAST, DL, VT,
10913 DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
10916 switch (VT.SimpleTy) {
10918 return lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10920 return lowerV4I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10922 return lowerV8F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10924 return lowerV8I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10926 return lowerV16I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
10928 return lowerV32I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
10931 llvm_unreachable("Not a valid 256-bit x86 vector type!");
10935 /// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
10936 static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10937 const X86Subtarget *Subtarget,
10938 SelectionDAG &DAG) {
10940 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
10941 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
10942 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10943 ArrayRef<int> Mask = SVOp->getMask();
10944 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10946 // X86 has dedicated unpack instructions that can handle specific blend
10947 // operations: UNPCKH and UNPCKL.
10948 if (isShuffleEquivalent(Mask, 0, 8, 2, 10, 4, 12, 6, 14))
10949 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f64, V1, V2);
10950 if (isShuffleEquivalent(Mask, 1, 9, 3, 11, 5, 13, 7, 15))
10951 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f64, V1, V2);
10953 // FIXME: Implement direct support for this type!
10954 return splitAndLowerVectorShuffle(DL, MVT::v8f64, V1, V2, Mask, DAG);
10957 /// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
10958 static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10959 const X86Subtarget *Subtarget,
10960 SelectionDAG &DAG) {
10962 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
10963 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
10964 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10965 ArrayRef<int> Mask = SVOp->getMask();
10966 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10968 // Use dedicated unpack instructions for masks that match their pattern.
10969 if (isShuffleEquivalent(Mask,
10970 0, 16, 1, 17, 4, 20, 5, 21,
10971 8, 24, 9, 25, 12, 28, 13, 29))
10972 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16f32, V1, V2);
10973 if (isShuffleEquivalent(Mask,
10974 2, 18, 3, 19, 6, 22, 7, 23,
10975 10, 26, 11, 27, 14, 30, 15, 31))
10976 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16f32, V1, V2);
10978 // FIXME: Implement direct support for this type!
10979 return splitAndLowerVectorShuffle(DL, MVT::v16f32, V1, V2, Mask, DAG);
10982 /// \brief Handle lowering of 8-lane 64-bit integer shuffles.
10983 static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10984 const X86Subtarget *Subtarget,
10985 SelectionDAG &DAG) {
10987 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
10988 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
10989 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10990 ArrayRef<int> Mask = SVOp->getMask();
10991 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10993 // X86 has dedicated unpack instructions that can handle specific blend
10994 // operations: UNPCKH and UNPCKL.
10995 if (isShuffleEquivalent(Mask, 0, 8, 2, 10, 4, 12, 6, 14))
10996 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i64, V1, V2);
10997 if (isShuffleEquivalent(Mask, 1, 9, 3, 11, 5, 13, 7, 15))
10998 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i64, V1, V2);
11000 // FIXME: Implement direct support for this type!
11001 return splitAndLowerVectorShuffle(DL, MVT::v8i64, V1, V2, Mask, DAG);
11004 /// \brief Handle lowering of 16-lane 32-bit integer shuffles.
11005 static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11006 const X86Subtarget *Subtarget,
11007 SelectionDAG &DAG) {
11009 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11010 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11011 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11012 ArrayRef<int> Mask = SVOp->getMask();
11013 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11015 // Use dedicated unpack instructions for masks that match their pattern.
11016 if (isShuffleEquivalent(Mask,
11017 0, 16, 1, 17, 4, 20, 5, 21,
11018 8, 24, 9, 25, 12, 28, 13, 29))
11019 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i32, V1, V2);
11020 if (isShuffleEquivalent(Mask,
11021 2, 18, 3, 19, 6, 22, 7, 23,
11022 10, 26, 11, 27, 14, 30, 15, 31))
11023 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i32, V1, V2);
11025 // FIXME: Implement direct support for this type!
11026 return splitAndLowerVectorShuffle(DL, MVT::v16i32, V1, V2, Mask, DAG);
11029 /// \brief Handle lowering of 32-lane 16-bit integer shuffles.
11030 static SDValue lowerV32I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11031 const X86Subtarget *Subtarget,
11032 SelectionDAG &DAG) {
11034 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11035 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11036 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11037 ArrayRef<int> Mask = SVOp->getMask();
11038 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
11039 assert(Subtarget->hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
11041 // FIXME: Implement direct support for this type!
11042 return splitAndLowerVectorShuffle(DL, MVT::v32i16, V1, V2, Mask, DAG);
11045 /// \brief Handle lowering of 64-lane 8-bit integer shuffles.
11046 static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11047 const X86Subtarget *Subtarget,
11048 SelectionDAG &DAG) {
11050 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11051 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11052 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11053 ArrayRef<int> Mask = SVOp->getMask();
11054 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
11055 assert(Subtarget->hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
11057 // FIXME: Implement direct support for this type!
11058 return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
11061 /// \brief High-level routine to lower various 512-bit x86 vector shuffles.
11063 /// This routine either breaks down the specific type of a 512-bit x86 vector
11064 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
11065 /// together based on the available instructions.
11066 static SDValue lower512BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11067 MVT VT, const X86Subtarget *Subtarget,
11068 SelectionDAG &DAG) {
11070 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11071 ArrayRef<int> Mask = SVOp->getMask();
11072 assert(Subtarget->hasAVX512() &&
11073 "Cannot lower 512-bit vectors w/ basic ISA!");
11075 // Check for being able to broadcast a single element.
11076 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(VT.SimpleTy, DL, V1,
11077 Mask, Subtarget, DAG))
11080 // Dispatch to each element type for lowering. If we don't have supprot for
11081 // specific element type shuffles at 512 bits, immediately split them and
11082 // lower them. Each lowering routine of a given type is allowed to assume that
11083 // the requisite ISA extensions for that element type are available.
11084 switch (VT.SimpleTy) {
11086 return lowerV8F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11088 return lowerV16F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11090 return lowerV8I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11092 return lowerV16I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11094 if (Subtarget->hasBWI())
11095 return lowerV32I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11098 if (Subtarget->hasBWI())
11099 return lowerV64I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11103 llvm_unreachable("Not a valid 512-bit x86 vector type!");
11106 // Otherwise fall back on splitting.
11107 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11110 /// \brief Top-level lowering for x86 vector shuffles.
11112 /// This handles decomposition, canonicalization, and lowering of all x86
11113 /// vector shuffles. Most of the specific lowering strategies are encapsulated
11114 /// above in helper routines. The canonicalization attempts to widen shuffles
11115 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
11116 /// s.t. only one of the two inputs needs to be tested, etc.
11117 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
11118 SelectionDAG &DAG) {
11119 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11120 ArrayRef<int> Mask = SVOp->getMask();
11121 SDValue V1 = Op.getOperand(0);
11122 SDValue V2 = Op.getOperand(1);
11123 MVT VT = Op.getSimpleValueType();
11124 int NumElements = VT.getVectorNumElements();
11127 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
11129 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
11130 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11131 if (V1IsUndef && V2IsUndef)
11132 return DAG.getUNDEF(VT);
11134 // When we create a shuffle node we put the UNDEF node to second operand,
11135 // but in some cases the first operand may be transformed to UNDEF.
11136 // In this case we should just commute the node.
11138 return DAG.getCommutedVectorShuffle(*SVOp);
11140 // Check for non-undef masks pointing at an undef vector and make the masks
11141 // undef as well. This makes it easier to match the shuffle based solely on
11145 if (M >= NumElements) {
11146 SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
11147 for (int &M : NewMask)
11148 if (M >= NumElements)
11150 return DAG.getVectorShuffle(VT, dl, V1, V2, NewMask);
11153 // Try to collapse shuffles into using a vector type with fewer elements but
11154 // wider element types. We cap this to not form integers or floating point
11155 // elements wider than 64 bits, but it might be interesting to form i128
11156 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
11157 SmallVector<int, 16> WidenedMask;
11158 if (VT.getScalarSizeInBits() < 64 &&
11159 canWidenShuffleElements(Mask, WidenedMask)) {
11160 MVT NewEltVT = VT.isFloatingPoint()
11161 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
11162 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
11163 MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
11164 // Make sure that the new vector type is legal. For example, v2f64 isn't
11166 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
11167 V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
11168 V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
11169 return DAG.getNode(ISD::BITCAST, dl, VT,
11170 DAG.getVectorShuffle(NewVT, dl, V1, V2, WidenedMask));
11174 int NumV1Elements = 0, NumUndefElements = 0, NumV2Elements = 0;
11175 for (int M : SVOp->getMask())
11177 ++NumUndefElements;
11178 else if (M < NumElements)
11183 // Commute the shuffle as needed such that more elements come from V1 than
11184 // V2. This allows us to match the shuffle pattern strictly on how many
11185 // elements come from V1 without handling the symmetric cases.
11186 if (NumV2Elements > NumV1Elements)
11187 return DAG.getCommutedVectorShuffle(*SVOp);
11189 // When the number of V1 and V2 elements are the same, try to minimize the
11190 // number of uses of V2 in the low half of the vector. When that is tied,
11191 // ensure that the sum of indices for V1 is equal to or lower than the sum
11192 // indices for V2. When those are equal, try to ensure that the number of odd
11193 // indices for V1 is lower than the number of odd indices for V2.
11194 if (NumV1Elements == NumV2Elements) {
11195 int LowV1Elements = 0, LowV2Elements = 0;
11196 for (int M : SVOp->getMask().slice(0, NumElements / 2))
11197 if (M >= NumElements)
11201 if (LowV2Elements > LowV1Elements) {
11202 return DAG.getCommutedVectorShuffle(*SVOp);
11203 } else if (LowV2Elements == LowV1Elements) {
11204 int SumV1Indices = 0, SumV2Indices = 0;
11205 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11206 if (SVOp->getMask()[i] >= NumElements)
11208 else if (SVOp->getMask()[i] >= 0)
11210 if (SumV2Indices < SumV1Indices) {
11211 return DAG.getCommutedVectorShuffle(*SVOp);
11212 } else if (SumV2Indices == SumV1Indices) {
11213 int NumV1OddIndices = 0, NumV2OddIndices = 0;
11214 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11215 if (SVOp->getMask()[i] >= NumElements)
11216 NumV2OddIndices += i % 2;
11217 else if (SVOp->getMask()[i] >= 0)
11218 NumV1OddIndices += i % 2;
11219 if (NumV2OddIndices < NumV1OddIndices)
11220 return DAG.getCommutedVectorShuffle(*SVOp);
11225 // For each vector width, delegate to a specialized lowering routine.
11226 if (VT.getSizeInBits() == 128)
11227 return lower128BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11229 if (VT.getSizeInBits() == 256)
11230 return lower256BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11232 // Force AVX-512 vectors to be scalarized for now.
11233 // FIXME: Implement AVX-512 support!
11234 if (VT.getSizeInBits() == 512)
11235 return lower512BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11237 llvm_unreachable("Unimplemented!");
11241 //===----------------------------------------------------------------------===//
11242 // Legacy vector shuffle lowering
11244 // This code is the legacy code handling vector shuffles until the above
11245 // replaces its functionality and performance.
11246 //===----------------------------------------------------------------------===//
11248 static bool isBlendMask(ArrayRef<int> MaskVals, MVT VT, bool hasSSE41,
11249 bool hasInt256, unsigned *MaskOut = nullptr) {
11250 MVT EltVT = VT.getVectorElementType();
11252 // There is no blend with immediate in AVX-512.
11253 if (VT.is512BitVector())
11256 if (!hasSSE41 || EltVT == MVT::i8)
11258 if (!hasInt256 && VT == MVT::v16i16)
11261 unsigned MaskValue = 0;
11262 unsigned NumElems = VT.getVectorNumElements();
11263 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
11264 unsigned NumLanes = (NumElems - 1) / 8 + 1;
11265 unsigned NumElemsInLane = NumElems / NumLanes;
11267 // Blend for v16i16 should be symetric for the both lanes.
11268 for (unsigned i = 0; i < NumElemsInLane; ++i) {
11270 int SndLaneEltIdx = (NumLanes == 2) ? MaskVals[i + NumElemsInLane] : -1;
11271 int EltIdx = MaskVals[i];
11273 if ((EltIdx < 0 || EltIdx == (int)i) &&
11274 (SndLaneEltIdx < 0 || SndLaneEltIdx == (int)(i + NumElemsInLane)))
11277 if (((unsigned)EltIdx == (i + NumElems)) &&
11278 (SndLaneEltIdx < 0 ||
11279 (unsigned)SndLaneEltIdx == i + NumElems + NumElemsInLane))
11280 MaskValue |= (1 << i);
11286 *MaskOut = MaskValue;
11290 // Try to lower a shuffle node into a simple blend instruction.
11291 // This function assumes isBlendMask returns true for this
11292 // SuffleVectorSDNode
11293 static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
11294 unsigned MaskValue,
11295 const X86Subtarget *Subtarget,
11296 SelectionDAG &DAG) {
11297 MVT VT = SVOp->getSimpleValueType(0);
11298 MVT EltVT = VT.getVectorElementType();
11299 assert(isBlendMask(SVOp->getMask(), VT, Subtarget->hasSSE41(),
11300 Subtarget->hasInt256() && "Trying to lower a "
11301 "VECTOR_SHUFFLE to a Blend but "
11302 "with the wrong mask"));
11303 SDValue V1 = SVOp->getOperand(0);
11304 SDValue V2 = SVOp->getOperand(1);
11306 unsigned NumElems = VT.getVectorNumElements();
11308 // Convert i32 vectors to floating point if it is not AVX2.
11309 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
11311 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
11312 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
11314 V1 = DAG.getNode(ISD::BITCAST, dl, VT, V1);
11315 V2 = DAG.getNode(ISD::BITCAST, dl, VT, V2);
11318 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, V1, V2,
11319 DAG.getConstant(MaskValue, MVT::i32));
11320 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
11323 /// In vector type \p VT, return true if the element at index \p InputIdx
11324 /// falls on a different 128-bit lane than \p OutputIdx.
11325 static bool ShuffleCrosses128bitLane(MVT VT, unsigned InputIdx,
11326 unsigned OutputIdx) {
11327 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
11328 return InputIdx * EltSize / 128 != OutputIdx * EltSize / 128;
11331 /// Generate a PSHUFB if possible. Selects elements from \p V1 according to
11332 /// \p MaskVals. MaskVals[OutputIdx] = InputIdx specifies that we want to
11333 /// shuffle the element at InputIdx in V1 to OutputIdx in the result. If \p
11334 /// MaskVals refers to elements outside of \p V1 or is undef (-1), insert a
11336 static SDValue getPSHUFB(ArrayRef<int> MaskVals, SDValue V1, SDLoc &dl,
11337 SelectionDAG &DAG) {
11338 MVT VT = V1.getSimpleValueType();
11339 assert(VT.is128BitVector() || VT.is256BitVector());
11341 MVT EltVT = VT.getVectorElementType();
11342 unsigned EltSizeInBytes = EltVT.getSizeInBits() / 8;
11343 unsigned NumElts = VT.getVectorNumElements();
11345 SmallVector<SDValue, 32> PshufbMask;
11346 for (unsigned OutputIdx = 0; OutputIdx < NumElts; ++OutputIdx) {
11347 int InputIdx = MaskVals[OutputIdx];
11348 unsigned InputByteIdx;
11350 if (InputIdx < 0 || NumElts <= (unsigned)InputIdx)
11351 InputByteIdx = 0x80;
11353 // Cross lane is not allowed.
11354 if (ShuffleCrosses128bitLane(VT, InputIdx, OutputIdx))
11356 InputByteIdx = InputIdx * EltSizeInBytes;
11357 // Index is an byte offset within the 128-bit lane.
11358 InputByteIdx &= 0xf;
11361 for (unsigned j = 0; j < EltSizeInBytes; ++j) {
11362 PshufbMask.push_back(DAG.getConstant(InputByteIdx, MVT::i8));
11363 if (InputByteIdx != 0x80)
11368 MVT ShufVT = MVT::getVectorVT(MVT::i8, PshufbMask.size());
11370 V1 = DAG.getNode(ISD::BITCAST, dl, ShufVT, V1);
11371 return DAG.getNode(X86ISD::PSHUFB, dl, ShufVT, V1,
11372 DAG.getNode(ISD::BUILD_VECTOR, dl, ShufVT, PshufbMask));
11375 // v8i16 shuffles - Prefer shuffles in the following order:
11376 // 1. [all] pshuflw, pshufhw, optional move
11377 // 2. [ssse3] 1 x pshufb
11378 // 3. [ssse3] 2 x pshufb + 1 x por
11379 // 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw)
11381 LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
11382 SelectionDAG &DAG) {
11383 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11384 SDValue V1 = SVOp->getOperand(0);
11385 SDValue V2 = SVOp->getOperand(1);
11387 SmallVector<int, 8> MaskVals;
11389 // Determine if more than 1 of the words in each of the low and high quadwords
11390 // of the result come from the same quadword of one of the two inputs. Undef
11391 // mask values count as coming from any quadword, for better codegen.
11393 // Lo/HiQuad[i] = j indicates how many words from the ith quad of the input
11394 // feeds this quad. For i, 0 and 1 refer to V1, 2 and 3 refer to V2.
11395 unsigned LoQuad[] = { 0, 0, 0, 0 };
11396 unsigned HiQuad[] = { 0, 0, 0, 0 };
11397 // Indices of quads used.
11398 std::bitset<4> InputQuads;
11399 for (unsigned i = 0; i < 8; ++i) {
11400 unsigned *Quad = i < 4 ? LoQuad : HiQuad;
11401 int EltIdx = SVOp->getMaskElt(i);
11402 MaskVals.push_back(EltIdx);
11410 ++Quad[EltIdx / 4];
11411 InputQuads.set(EltIdx / 4);
11414 int BestLoQuad = -1;
11415 unsigned MaxQuad = 1;
11416 for (unsigned i = 0; i < 4; ++i) {
11417 if (LoQuad[i] > MaxQuad) {
11419 MaxQuad = LoQuad[i];
11423 int BestHiQuad = -1;
11425 for (unsigned i = 0; i < 4; ++i) {
11426 if (HiQuad[i] > MaxQuad) {
11428 MaxQuad = HiQuad[i];
11432 // For SSSE3, If all 8 words of the result come from only 1 quadword of each
11433 // of the two input vectors, shuffle them into one input vector so only a
11434 // single pshufb instruction is necessary. If there are more than 2 input
11435 // quads, disable the next transformation since it does not help SSSE3.
11436 bool V1Used = InputQuads[0] || InputQuads[1];
11437 bool V2Used = InputQuads[2] || InputQuads[3];
11438 if (Subtarget->hasSSSE3()) {
11439 if (InputQuads.count() == 2 && V1Used && V2Used) {
11440 BestLoQuad = InputQuads[0] ? 0 : 1;
11441 BestHiQuad = InputQuads[2] ? 2 : 3;
11443 if (InputQuads.count() > 2) {
11449 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update
11450 // the shuffle mask. If a quad is scored as -1, that means that it contains
11451 // words from all 4 input quadwords.
11453 if (BestLoQuad >= 0 || BestHiQuad >= 0) {
11455 BestLoQuad < 0 ? 0 : BestLoQuad,
11456 BestHiQuad < 0 ? 1 : BestHiQuad
11458 NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
11459 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1),
11460 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]);
11461 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV);
11463 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
11464 // source words for the shuffle, to aid later transformations.
11465 bool AllWordsInNewV = true;
11466 bool InOrder[2] = { true, true };
11467 for (unsigned i = 0; i != 8; ++i) {
11468 int idx = MaskVals[i];
11470 InOrder[i/4] = false;
11471 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad)
11473 AllWordsInNewV = false;
11477 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV;
11478 if (AllWordsInNewV) {
11479 for (int i = 0; i != 8; ++i) {
11480 int idx = MaskVals[i];
11483 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4;
11484 if ((idx != i) && idx < 4)
11486 if ((idx != i) && idx > 3)
11495 // If we've eliminated the use of V2, and the new mask is a pshuflw or
11496 // pshufhw, that's as cheap as it gets. Return the new shuffle.
11497 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) {
11498 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW;
11499 unsigned TargetMask = 0;
11500 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV,
11501 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]);
11502 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11503 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp):
11504 getShufflePSHUFLWImmediate(SVOp);
11505 V1 = NewV.getOperand(0);
11506 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG);
11510 // Promote splats to a larger type which usually leads to more efficient code.
11511 // FIXME: Is this true if pshufb is available?
11512 if (SVOp->isSplat())
11513 return PromoteSplat(SVOp, DAG);
11515 // If we have SSSE3, and all words of the result are from 1 input vector,
11516 // case 2 is generated, otherwise case 3 is generated. If no SSSE3
11517 // is present, fall back to case 4.
11518 if (Subtarget->hasSSSE3()) {
11519 SmallVector<SDValue,16> pshufbMask;
11521 // If we have elements from both input vectors, set the high bit of the
11522 // shuffle mask element to zero out elements that come from V2 in the V1
11523 // mask, and elements that come from V1 in the V2 mask, so that the two
11524 // results can be OR'd together.
11525 bool TwoInputs = V1Used && V2Used;
11526 V1 = getPSHUFB(MaskVals, V1, dl, DAG);
11528 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11530 // Calculate the shuffle mask for the second input, shuffle it, and
11531 // OR it with the first shuffled input.
11532 CommuteVectorShuffleMask(MaskVals, 8);
11533 V2 = getPSHUFB(MaskVals, V2, dl, DAG);
11534 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11535 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11538 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
11539 // and update MaskVals with new element order.
11540 std::bitset<8> InOrder;
11541 if (BestLoQuad >= 0) {
11542 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 };
11543 for (int i = 0; i != 4; ++i) {
11544 int idx = MaskVals[i];
11547 } else if ((idx / 4) == BestLoQuad) {
11548 MaskV[i] = idx & 3;
11552 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11555 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11556 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11557 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16,
11558 NewV.getOperand(0),
11559 getShufflePSHUFLWImmediate(SVOp), DAG);
11563 // If BestHi >= 0, generate a pshufhw to put the high elements in order,
11564 // and update MaskVals with the new element order.
11565 if (BestHiQuad >= 0) {
11566 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 };
11567 for (unsigned i = 4; i != 8; ++i) {
11568 int idx = MaskVals[i];
11571 } else if ((idx / 4) == BestHiQuad) {
11572 MaskV[i] = (idx & 3) + 4;
11576 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11579 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11580 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11581 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16,
11582 NewV.getOperand(0),
11583 getShufflePSHUFHWImmediate(SVOp), DAG);
11587 // In case BestHi & BestLo were both -1, which means each quadword has a word
11588 // from each of the four input quadwords, calculate the InOrder bitvector now
11589 // before falling through to the insert/extract cleanup.
11590 if (BestLoQuad == -1 && BestHiQuad == -1) {
11592 for (int i = 0; i != 8; ++i)
11593 if (MaskVals[i] < 0 || MaskVals[i] == i)
11597 // The other elements are put in the right place using pextrw and pinsrw.
11598 for (unsigned i = 0; i != 8; ++i) {
11601 int EltIdx = MaskVals[i];
11604 SDValue ExtOp = (EltIdx < 8) ?
11605 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1,
11606 DAG.getIntPtrConstant(EltIdx)) :
11607 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2,
11608 DAG.getIntPtrConstant(EltIdx - 8));
11609 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp,
11610 DAG.getIntPtrConstant(i));
11615 /// \brief v16i16 shuffles
11617 /// FIXME: We only support generation of a single pshufb currently. We can
11618 /// generalize the other applicable cases from LowerVECTOR_SHUFFLEv8i16 as
11619 /// well (e.g 2 x pshufb + 1 x por).
11621 LowerVECTOR_SHUFFLEv16i16(SDValue Op, SelectionDAG &DAG) {
11622 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11623 SDValue V1 = SVOp->getOperand(0);
11624 SDValue V2 = SVOp->getOperand(1);
11627 if (V2.getOpcode() != ISD::UNDEF)
11630 SmallVector<int, 16> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11631 return getPSHUFB(MaskVals, V1, dl, DAG);
11634 // v16i8 shuffles - Prefer shuffles in the following order:
11635 // 1. [ssse3] 1 x pshufb
11636 // 2. [ssse3] 2 x pshufb + 1 x por
11637 // 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw
11638 static SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
11639 const X86Subtarget* Subtarget,
11640 SelectionDAG &DAG) {
11641 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11642 SDValue V1 = SVOp->getOperand(0);
11643 SDValue V2 = SVOp->getOperand(1);
11645 ArrayRef<int> MaskVals = SVOp->getMask();
11647 // Promote splats to a larger type which usually leads to more efficient code.
11648 // FIXME: Is this true if pshufb is available?
11649 if (SVOp->isSplat())
11650 return PromoteSplat(SVOp, DAG);
11652 // If we have SSSE3, case 1 is generated when all result bytes come from
11653 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is
11654 // present, fall back to case 3.
11656 // If SSSE3, use 1 pshufb instruction per vector with elements in the result.
11657 if (Subtarget->hasSSSE3()) {
11658 SmallVector<SDValue,16> pshufbMask;
11660 // If all result elements are from one input vector, then only translate
11661 // undef mask values to 0x80 (zero out result) in the pshufb mask.
11663 // Otherwise, we have elements from both input vectors, and must zero out
11664 // elements that come from V2 in the first mask, and V1 in the second mask
11665 // so that we can OR them together.
11666 for (unsigned i = 0; i != 16; ++i) {
11667 int EltIdx = MaskVals[i];
11668 if (EltIdx < 0 || EltIdx >= 16)
11670 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11672 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
11673 DAG.getNode(ISD::BUILD_VECTOR, dl,
11674 MVT::v16i8, pshufbMask));
11676 // As PSHUFB will zero elements with negative indices, it's safe to ignore
11677 // the 2nd operand if it's undefined or zero.
11678 if (V2.getOpcode() == ISD::UNDEF ||
11679 ISD::isBuildVectorAllZeros(V2.getNode()))
11682 // Calculate the shuffle mask for the second input, shuffle it, and
11683 // OR it with the first shuffled input.
11684 pshufbMask.clear();
11685 for (unsigned i = 0; i != 16; ++i) {
11686 int EltIdx = MaskVals[i];
11687 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16;
11688 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11690 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
11691 DAG.getNode(ISD::BUILD_VECTOR, dl,
11692 MVT::v16i8, pshufbMask));
11693 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11696 // No SSSE3 - Calculate in place words and then fix all out of place words
11697 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from
11698 // the 16 different words that comprise the two doublequadword input vectors.
11699 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11700 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
11702 for (int i = 0; i != 8; ++i) {
11703 int Elt0 = MaskVals[i*2];
11704 int Elt1 = MaskVals[i*2+1];
11706 // This word of the result is all undef, skip it.
11707 if (Elt0 < 0 && Elt1 < 0)
11710 // This word of the result is already in the correct place, skip it.
11711 if ((Elt0 == i*2) && (Elt1 == i*2+1))
11714 SDValue Elt0Src = Elt0 < 16 ? V1 : V2;
11715 SDValue Elt1Src = Elt1 < 16 ? V1 : V2;
11718 // If Elt0 and Elt1 are defined, are consecutive, and can be load
11719 // using a single extract together, load it and store it.
11720 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) {
11721 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11722 DAG.getIntPtrConstant(Elt1 / 2));
11723 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11724 DAG.getIntPtrConstant(i));
11728 // If Elt1 is defined, extract it from the appropriate source. If the
11729 // source byte is not also odd, shift the extracted word left 8 bits
11730 // otherwise clear the bottom 8 bits if we need to do an or.
11732 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11733 DAG.getIntPtrConstant(Elt1 / 2));
11734 if ((Elt1 & 1) == 0)
11735 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
11737 TLI.getShiftAmountTy(InsElt.getValueType())));
11738 else if (Elt0 >= 0)
11739 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
11740 DAG.getConstant(0xFF00, MVT::i16));
11742 // If Elt0 is defined, extract it from the appropriate source. If the
11743 // source byte is not also even, shift the extracted word right 8 bits. If
11744 // Elt1 was also defined, OR the extracted values together before
11745 // inserting them in the result.
11747 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
11748 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2));
11749 if ((Elt0 & 1) != 0)
11750 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
11752 TLI.getShiftAmountTy(InsElt0.getValueType())));
11753 else if (Elt1 >= 0)
11754 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
11755 DAG.getConstant(0x00FF, MVT::i16));
11756 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0)
11759 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11760 DAG.getIntPtrConstant(i));
11762 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV);
11765 // v32i8 shuffles - Translate to VPSHUFB if possible.
11767 SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp,
11768 const X86Subtarget *Subtarget,
11769 SelectionDAG &DAG) {
11770 MVT VT = SVOp->getSimpleValueType(0);
11771 SDValue V1 = SVOp->getOperand(0);
11772 SDValue V2 = SVOp->getOperand(1);
11774 SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11776 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11777 bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode());
11778 bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode());
11780 // VPSHUFB may be generated if
11781 // (1) one of input vector is undefined or zeroinitializer.
11782 // The mask value 0x80 puts 0 in the corresponding slot of the vector.
11783 // And (2) the mask indexes don't cross the 128-bit lane.
11784 if (VT != MVT::v32i8 || !Subtarget->hasInt256() ||
11785 (!V2IsUndef && !V2IsAllZero && !V1IsAllZero))
11788 if (V1IsAllZero && !V2IsAllZero) {
11789 CommuteVectorShuffleMask(MaskVals, 32);
11792 return getPSHUFB(MaskVals, V1, dl, DAG);
11795 /// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
11796 /// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be
11797 /// done when every pair / quad of shuffle mask elements point to elements in
11798 /// the right sequence. e.g.
11799 /// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15>
11801 SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
11802 SelectionDAG &DAG) {
11803 MVT VT = SVOp->getSimpleValueType(0);
11805 unsigned NumElems = VT.getVectorNumElements();
11808 switch (VT.SimpleTy) {
11809 default: llvm_unreachable("Unexpected!");
11812 return SDValue(SVOp, 0);
11813 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break;
11814 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break;
11815 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break;
11816 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break;
11817 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break;
11818 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break;
11821 SmallVector<int, 8> MaskVec;
11822 for (unsigned i = 0; i != NumElems; i += Scale) {
11824 for (unsigned j = 0; j != Scale; ++j) {
11825 int EltIdx = SVOp->getMaskElt(i+j);
11829 StartIdx = (EltIdx / Scale);
11830 if (EltIdx != (int)(StartIdx*Scale + j))
11833 MaskVec.push_back(StartIdx);
11836 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0));
11837 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1));
11838 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
11841 /// getVZextMovL - Return a zero-extending vector move low node.
11843 static SDValue getVZextMovL(MVT VT, MVT OpVT,
11844 SDValue SrcOp, SelectionDAG &DAG,
11845 const X86Subtarget *Subtarget, SDLoc dl) {
11846 if (VT == MVT::v2f64 || VT == MVT::v4f32) {
11847 LoadSDNode *LD = nullptr;
11848 if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
11849 LD = dyn_cast<LoadSDNode>(SrcOp);
11851 // movssrr and movsdrr do not clear top bits. Try to use movd, movq
11853 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
11854 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) &&
11855 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
11856 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST &&
11857 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {
11859 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
11860 return DAG.getNode(ISD::BITCAST, dl, VT,
11861 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
11862 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
11864 SrcOp.getOperand(0)
11870 return DAG.getNode(ISD::BITCAST, dl, VT,
11871 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
11872 DAG.getNode(ISD::BITCAST, dl,
11876 /// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles
11877 /// which could not be matched by any known target speficic shuffle
11879 LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
11881 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG);
11882 if (NewOp.getNode())
11885 MVT VT = SVOp->getSimpleValueType(0);
11887 unsigned NumElems = VT.getVectorNumElements();
11888 unsigned NumLaneElems = NumElems / 2;
11891 MVT EltVT = VT.getVectorElementType();
11892 MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems);
11895 SmallVector<int, 16> Mask;
11896 for (unsigned l = 0; l < 2; ++l) {
11897 // Build a shuffle mask for the output, discovering on the fly which
11898 // input vectors to use as shuffle operands (recorded in InputUsed).
11899 // If building a suitable shuffle vector proves too hard, then bail
11900 // out with UseBuildVector set.
11901 bool UseBuildVector = false;
11902 int InputUsed[2] = { -1, -1 }; // Not yet discovered.
11903 unsigned LaneStart = l * NumLaneElems;
11904 for (unsigned i = 0; i != NumLaneElems; ++i) {
11905 // The mask element. This indexes into the input.
11906 int Idx = SVOp->getMaskElt(i+LaneStart);
11908 // the mask element does not index into any input vector.
11909 Mask.push_back(-1);
11913 // The input vector this mask element indexes into.
11914 int Input = Idx / NumLaneElems;
11916 // Turn the index into an offset from the start of the input vector.
11917 Idx -= Input * NumLaneElems;
11919 // Find or create a shuffle vector operand to hold this input.
11921 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) {
11922 if (InputUsed[OpNo] == Input)
11923 // This input vector is already an operand.
11925 if (InputUsed[OpNo] < 0) {
11926 // Create a new operand for this input vector.
11927 InputUsed[OpNo] = Input;
11932 if (OpNo >= array_lengthof(InputUsed)) {
11933 // More than two input vectors used! Give up on trying to create a
11934 // shuffle vector. Insert all elements into a BUILD_VECTOR instead.
11935 UseBuildVector = true;
11939 // Add the mask index for the new shuffle vector.
11940 Mask.push_back(Idx + OpNo * NumLaneElems);
11943 if (UseBuildVector) {
11944 SmallVector<SDValue, 16> SVOps;
11945 for (unsigned i = 0; i != NumLaneElems; ++i) {
11946 // The mask element. This indexes into the input.
11947 int Idx = SVOp->getMaskElt(i+LaneStart);
11949 SVOps.push_back(DAG.getUNDEF(EltVT));
11953 // The input vector this mask element indexes into.
11954 int Input = Idx / NumElems;
11956 // Turn the index into an offset from the start of the input vector.
11957 Idx -= Input * NumElems;
11959 // Extract the vector element by hand.
11960 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
11961 SVOp->getOperand(Input),
11962 DAG.getIntPtrConstant(Idx)));
11965 // Construct the output using a BUILD_VECTOR.
11966 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, SVOps);
11967 } else if (InputUsed[0] < 0) {
11968 // No input vectors were used! The result is undefined.
11969 Output[l] = DAG.getUNDEF(NVT);
11971 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2),
11972 (InputUsed[0] % 2) * NumLaneElems,
11974 // If only one input was used, use an undefined vector for the other.
11975 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) :
11976 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2),
11977 (InputUsed[1] % 2) * NumLaneElems, DAG, dl);
11978 // At least one input vector was used. Create a new shuffle vector.
11979 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]);
11985 // Concatenate the result back
11986 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]);
11989 /// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with
11990 /// 4 elements, and match them with several different shuffle types.
11992 LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
11993 SDValue V1 = SVOp->getOperand(0);
11994 SDValue V2 = SVOp->getOperand(1);
11996 MVT VT = SVOp->getSimpleValueType(0);
11998 assert(VT.is128BitVector() && "Unsupported vector size");
12000 std::pair<int, int> Locs[4];
12001 int Mask1[] = { -1, -1, -1, -1 };
12002 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end());
12004 unsigned NumHi = 0;
12005 unsigned NumLo = 0;
12006 for (unsigned i = 0; i != 4; ++i) {
12007 int Idx = PermMask[i];
12009 Locs[i] = std::make_pair(-1, -1);
12011 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!");
12013 Locs[i] = std::make_pair(0, NumLo);
12014 Mask1[NumLo] = Idx;
12017 Locs[i] = std::make_pair(1, NumHi);
12019 Mask1[2+NumHi] = Idx;
12025 if (NumLo <= 2 && NumHi <= 2) {
12026 // If no more than two elements come from either vector. This can be
12027 // implemented with two shuffles. First shuffle gather the elements.
12028 // The second shuffle, which takes the first shuffle as both of its
12029 // vector operands, put the elements into the right order.
12030 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12032 int Mask2[] = { -1, -1, -1, -1 };
12034 for (unsigned i = 0; i != 4; ++i)
12035 if (Locs[i].first != -1) {
12036 unsigned Idx = (i < 2) ? 0 : 4;
12037 Idx += Locs[i].first * 2 + Locs[i].second;
12041 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]);
12044 if (NumLo == 3 || NumHi == 3) {
12045 // Otherwise, we must have three elements from one vector, call it X, and
12046 // one element from the other, call it Y. First, use a shufps to build an
12047 // intermediate vector with the one element from Y and the element from X
12048 // that will be in the same half in the final destination (the indexes don't
12049 // matter). Then, use a shufps to build the final vector, taking the half
12050 // containing the element from Y from the intermediate, and the other half
12053 // Normalize it so the 3 elements come from V1.
12054 CommuteVectorShuffleMask(PermMask, 4);
12058 // Find the element from V2.
12060 for (HiIndex = 0; HiIndex < 3; ++HiIndex) {
12061 int Val = PermMask[HiIndex];
12068 Mask1[0] = PermMask[HiIndex];
12070 Mask1[2] = PermMask[HiIndex^1];
12072 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12074 if (HiIndex >= 2) {
12075 Mask1[0] = PermMask[0];
12076 Mask1[1] = PermMask[1];
12077 Mask1[2] = HiIndex & 1 ? 6 : 4;
12078 Mask1[3] = HiIndex & 1 ? 4 : 6;
12079 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12082 Mask1[0] = HiIndex & 1 ? 2 : 0;
12083 Mask1[1] = HiIndex & 1 ? 0 : 2;
12084 Mask1[2] = PermMask[2];
12085 Mask1[3] = PermMask[3];
12090 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]);
12093 // Break it into (shuffle shuffle_hi, shuffle_lo).
12094 int LoMask[] = { -1, -1, -1, -1 };
12095 int HiMask[] = { -1, -1, -1, -1 };
12097 int *MaskPtr = LoMask;
12098 unsigned MaskIdx = 0;
12099 unsigned LoIdx = 0;
12100 unsigned HiIdx = 2;
12101 for (unsigned i = 0; i != 4; ++i) {
12108 int Idx = PermMask[i];
12110 Locs[i] = std::make_pair(-1, -1);
12111 } else if (Idx < 4) {
12112 Locs[i] = std::make_pair(MaskIdx, LoIdx);
12113 MaskPtr[LoIdx] = Idx;
12116 Locs[i] = std::make_pair(MaskIdx, HiIdx);
12117 MaskPtr[HiIdx] = Idx;
12122 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]);
12123 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]);
12124 int MaskOps[] = { -1, -1, -1, -1 };
12125 for (unsigned i = 0; i != 4; ++i)
12126 if (Locs[i].first != -1)
12127 MaskOps[i] = Locs[i].first * 4 + Locs[i].second;
12128 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]);
12131 static bool MayFoldVectorLoad(SDValue V) {
12132 while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
12133 V = V.getOperand(0);
12135 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
12136 V = V.getOperand(0);
12137 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR &&
12138 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF)
12139 // BUILD_VECTOR (load), undef
12140 V = V.getOperand(0);
12142 return MayFoldLoad(V);
12146 SDValue getMOVDDup(SDValue &Op, SDLoc &dl, SDValue V1, SelectionDAG &DAG) {
12147 MVT VT = Op.getSimpleValueType();
12149 // Canonizalize to v2f64.
12150 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
12151 return DAG.getNode(ISD::BITCAST, dl, VT,
12152 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64,
12157 SDValue getMOVLowToHigh(SDValue &Op, SDLoc &dl, SelectionDAG &DAG,
12159 SDValue V1 = Op.getOperand(0);
12160 SDValue V2 = Op.getOperand(1);
12161 MVT VT = Op.getSimpleValueType();
12163 assert(VT != MVT::v2i64 && "unsupported shuffle type");
12165 if (HasSSE2 && VT == MVT::v2f64)
12166 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG);
12168 // v4f32 or v4i32: canonizalized to v4f32 (which is legal for SSE1)
12169 return DAG.getNode(ISD::BITCAST, dl, VT,
12170 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32,
12171 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1),
12172 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG));
12176 SDValue getMOVHighToLow(SDValue &Op, SDLoc &dl, SelectionDAG &DAG) {
12177 SDValue V1 = Op.getOperand(0);
12178 SDValue V2 = Op.getOperand(1);
12179 MVT VT = Op.getSimpleValueType();
12181 assert((VT == MVT::v4i32 || VT == MVT::v4f32) &&
12182 "unsupported shuffle type");
12184 if (V2.getOpcode() == ISD::UNDEF)
12188 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG);
12192 SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
12193 SDValue V1 = Op.getOperand(0);
12194 SDValue V2 = Op.getOperand(1);
12195 MVT VT = Op.getSimpleValueType();
12196 unsigned NumElems = VT.getVectorNumElements();
12198 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second
12199 // operand of these instructions is only memory, so check if there's a
12200 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the
12202 bool CanFoldLoad = false;
12204 // Trivial case, when V2 comes from a load.
12205 if (MayFoldVectorLoad(V2))
12206 CanFoldLoad = true;
12208 // When V1 is a load, it can be folded later into a store in isel, example:
12209 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1)
12211 // (MOVLPSmr addr:$src1, VR128:$src2)
12212 // So, recognize this potential and also use MOVLPS or MOVLPD
12213 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op))
12214 CanFoldLoad = true;
12216 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12218 if (HasSSE2 && NumElems == 2)
12219 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
12222 // If we don't care about the second element, proceed to use movss.
12223 if (SVOp->getMaskElt(1) != -1)
12224 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG);
12227 // movl and movlp will both match v2i64, but v2i64 is never matched by
12228 // movl earlier because we make it strict to avoid messing with the movlp load
12229 // folding logic (see the code above getMOVLP call). Match it here then,
12230 // this is horrible, but will stay like this until we move all shuffle
12231 // matching to x86 specific nodes. Note that for the 1st condition all
12232 // types are matched with movsd.
12234 // FIXME: isMOVLMask should be checked and matched before getMOVLP,
12235 // as to remove this logic from here, as much as possible
12236 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT))
12237 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12238 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12241 assert(VT != MVT::v4i32 && "unsupported shuffle type");
12243 // Invert the operand order and use SHUFPS to match it.
12244 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1,
12245 getShuffleSHUFImmediate(SVOp), DAG);
12248 static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index,
12249 SelectionDAG &DAG) {
12251 MVT VT = Load->getSimpleValueType(0);
12252 MVT EVT = VT.getVectorElementType();
12253 SDValue Addr = Load->getOperand(1);
12254 SDValue NewAddr = DAG.getNode(
12255 ISD::ADD, dl, Addr.getSimpleValueType(), Addr,
12256 DAG.getConstant(Index * EVT.getStoreSize(), Addr.getSimpleValueType()));
12259 DAG.getLoad(EVT, dl, Load->getChain(), NewAddr,
12260 DAG.getMachineFunction().getMachineMemOperand(
12261 Load->getMemOperand(), 0, EVT.getStoreSize()));
12265 // It is only safe to call this function if isINSERTPSMask is true for
12266 // this shufflevector mask.
12267 static SDValue getINSERTPS(ShuffleVectorSDNode *SVOp, SDLoc &dl,
12268 SelectionDAG &DAG) {
12269 // Generate an insertps instruction when inserting an f32 from memory onto a
12270 // v4f32 or when copying a member from one v4f32 to another.
12271 // We also use it for transferring i32 from one register to another,
12272 // since it simply copies the same bits.
12273 // If we're transferring an i32 from memory to a specific element in a
12274 // register, we output a generic DAG that will match the PINSRD
12276 MVT VT = SVOp->getSimpleValueType(0);
12277 MVT EVT = VT.getVectorElementType();
12278 SDValue V1 = SVOp->getOperand(0);
12279 SDValue V2 = SVOp->getOperand(1);
12280 auto Mask = SVOp->getMask();
12281 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
12282 "unsupported vector type for insertps/pinsrd");
12284 auto FromV1Predicate = [](const int &i) { return i < 4 && i > -1; };
12285 auto FromV2Predicate = [](const int &i) { return i >= 4; };
12286 int FromV1 = std::count_if(Mask.begin(), Mask.end(), FromV1Predicate);
12290 unsigned DestIndex;
12294 DestIndex = std::find_if(Mask.begin(), Mask.end(), FromV1Predicate) -
12297 // If we have 1 element from each vector, we have to check if we're
12298 // changing V1's element's place. If so, we're done. Otherwise, we
12299 // should assume we're changing V2's element's place and behave
12301 int FromV2 = std::count_if(Mask.begin(), Mask.end(), FromV2Predicate);
12302 assert(DestIndex <= INT32_MAX && "truncated destination index");
12303 if (FromV1 == FromV2 &&
12304 static_cast<int>(DestIndex) == Mask[DestIndex] % 4) {
12308 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12311 assert(std::count_if(Mask.begin(), Mask.end(), FromV2Predicate) == 1 &&
12312 "More than one element from V1 and from V2, or no elements from one "
12313 "of the vectors. This case should not have returned true from "
12318 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12321 // Get an index into the source vector in the range [0,4) (the mask is
12322 // in the range [0,8) because it can address V1 and V2)
12323 unsigned SrcIndex = Mask[DestIndex] % 4;
12324 if (MayFoldLoad(From)) {
12325 // Trivial case, when From comes from a load and is only used by the
12326 // shuffle. Make it use insertps from the vector that we need from that
12329 NarrowVectorLoadToElement(cast<LoadSDNode>(From), SrcIndex, DAG);
12330 if (!NewLoad.getNode())
12333 if (EVT == MVT::f32) {
12334 // Create this as a scalar to vector to match the instruction pattern.
12335 SDValue LoadScalarToVector =
12336 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, NewLoad);
12337 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4);
12338 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, LoadScalarToVector,
12340 } else { // EVT == MVT::i32
12341 // If we're getting an i32 from memory, use an INSERT_VECTOR_ELT
12342 // instruction, to match the PINSRD instruction, which loads an i32 to a
12343 // certain vector element.
12344 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, To, NewLoad,
12345 DAG.getConstant(DestIndex, MVT::i32));
12349 // Vector-element-to-vector
12350 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4 | SrcIndex << 6);
12351 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, From, InsertpsMask);
12354 // Reduce a vector shuffle to zext.
12355 static SDValue LowerVectorIntExtend(SDValue Op, const X86Subtarget *Subtarget,
12356 SelectionDAG &DAG) {
12357 // PMOVZX is only available from SSE41.
12358 if (!Subtarget->hasSSE41())
12361 MVT VT = Op.getSimpleValueType();
12363 // Only AVX2 support 256-bit vector integer extending.
12364 if (!Subtarget->hasInt256() && VT.is256BitVector())
12367 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12369 SDValue V1 = Op.getOperand(0);
12370 SDValue V2 = Op.getOperand(1);
12371 unsigned NumElems = VT.getVectorNumElements();
12373 // Extending is an unary operation and the element type of the source vector
12374 // won't be equal to or larger than i64.
12375 if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() ||
12376 VT.getVectorElementType() == MVT::i64)
12379 // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4.
12380 unsigned Shift = 1; // Start from 2, i.e. 1 << 1.
12381 while ((1U << Shift) < NumElems) {
12382 if (SVOp->getMaskElt(1U << Shift) == 1)
12385 // The maximal ratio is 8, i.e. from i8 to i64.
12390 // Check the shuffle mask.
12391 unsigned Mask = (1U << Shift) - 1;
12392 for (unsigned i = 0; i != NumElems; ++i) {
12393 int EltIdx = SVOp->getMaskElt(i);
12394 if ((i & Mask) != 0 && EltIdx != -1)
12396 if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift))
12400 unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift;
12401 MVT NeVT = MVT::getIntegerVT(NBits);
12402 MVT NVT = MVT::getVectorVT(NeVT, NumElems >> Shift);
12404 if (!DAG.getTargetLoweringInfo().isTypeLegal(NVT))
12407 return DAG.getNode(ISD::BITCAST, DL, VT,
12408 DAG.getNode(X86ISD::VZEXT, DL, NVT, V1));
12411 static SDValue NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
12412 SelectionDAG &DAG) {
12413 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12414 MVT VT = Op.getSimpleValueType();
12416 SDValue V1 = Op.getOperand(0);
12417 SDValue V2 = Op.getOperand(1);
12419 if (isZeroShuffle(SVOp))
12420 return getZeroVector(VT, Subtarget, DAG, dl);
12422 // Handle splat operations
12423 if (SVOp->isSplat()) {
12424 // Use vbroadcast whenever the splat comes from a foldable load
12425 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
12426 if (Broadcast.getNode())
12430 // Check integer expanding shuffles.
12431 SDValue NewOp = LowerVectorIntExtend(Op, Subtarget, DAG);
12432 if (NewOp.getNode())
12435 // If the shuffle can be profitably rewritten as a narrower shuffle, then
12437 if (VT == MVT::v8i16 || VT == MVT::v16i8 || VT == MVT::v16i16 ||
12438 VT == MVT::v32i8) {
12439 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12440 if (NewOp.getNode())
12441 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
12442 } else if (VT.is128BitVector() && Subtarget->hasSSE2()) {
12443 // FIXME: Figure out a cleaner way to do this.
12444 if (ISD::isBuildVectorAllZeros(V2.getNode())) {
12445 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12446 if (NewOp.getNode()) {
12447 MVT NewVT = NewOp.getSimpleValueType();
12448 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(),
12449 NewVT, true, false))
12450 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), DAG, Subtarget,
12453 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
12454 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12455 if (NewOp.getNode()) {
12456 MVT NewVT = NewOp.getSimpleValueType();
12457 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT))
12458 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), DAG, Subtarget,
12467 X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
12468 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12469 SDValue V1 = Op.getOperand(0);
12470 SDValue V2 = Op.getOperand(1);
12471 MVT VT = Op.getSimpleValueType();
12473 unsigned NumElems = VT.getVectorNumElements();
12474 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
12475 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
12476 bool V1IsSplat = false;
12477 bool V2IsSplat = false;
12478 bool HasSSE2 = Subtarget->hasSSE2();
12479 bool HasFp256 = Subtarget->hasFp256();
12480 bool HasInt256 = Subtarget->hasInt256();
12481 MachineFunction &MF = DAG.getMachineFunction();
12482 bool OptForSize = MF.getFunction()->getAttributes().
12483 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
12485 // Check if we should use the experimental vector shuffle lowering. If so,
12486 // delegate completely to that code path.
12487 if (ExperimentalVectorShuffleLowering)
12488 return lowerVectorShuffle(Op, Subtarget, DAG);
12490 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
12492 if (V1IsUndef && V2IsUndef)
12493 return DAG.getUNDEF(VT);
12495 // When we create a shuffle node we put the UNDEF node to second operand,
12496 // but in some cases the first operand may be transformed to UNDEF.
12497 // In this case we should just commute the node.
12499 return DAG.getCommutedVectorShuffle(*SVOp);
12501 // Vector shuffle lowering takes 3 steps:
12503 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable
12504 // narrowing and commutation of operands should be handled.
12505 // 2) Matching of shuffles with known shuffle masks to x86 target specific
12507 // 3) Rewriting of unmatched masks into new generic shuffle operations,
12508 // so the shuffle can be broken into other shuffles and the legalizer can
12509 // try the lowering again.
12511 // The general idea is that no vector_shuffle operation should be left to
12512 // be matched during isel, all of them must be converted to a target specific
12515 // Normalize the input vectors. Here splats, zeroed vectors, profitable
12516 // narrowing and commutation of operands should be handled. The actual code
12517 // doesn't include all of those, work in progress...
12518 SDValue NewOp = NormalizeVectorShuffle(Op, Subtarget, DAG);
12519 if (NewOp.getNode())
12522 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end());
12524 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and
12525 // unpckh_undef). Only use pshufd if speed is more important than size.
12526 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12527 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12528 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12529 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12531 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() &&
12532 V2IsUndef && MayFoldVectorLoad(V1))
12533 return getMOVDDup(Op, dl, V1, DAG);
12535 if (isMOVHLPS_v_undef_Mask(M, VT))
12536 return getMOVHighToLow(Op, dl, DAG);
12538 // Use to match splats
12539 if (HasSSE2 && isUNPCKHMask(M, VT, HasInt256) && V2IsUndef &&
12540 (VT == MVT::v2f64 || VT == MVT::v2i64))
12541 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12543 if (isPSHUFDMask(M, VT)) {
12544 // The actual implementation will match the mask in the if above and then
12545 // during isel it can match several different instructions, not only pshufd
12546 // as its name says, sad but true, emulate the behavior for now...
12547 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64)))
12548 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG);
12550 unsigned TargetMask = getShuffleSHUFImmediate(SVOp);
12552 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32))
12553 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG);
12555 if (HasFp256 && (VT == MVT::v4f32 || VT == MVT::v2f64))
12556 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1, TargetMask,
12559 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1,
12563 if (isPALIGNRMask(M, VT, Subtarget))
12564 return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2,
12565 getShufflePALIGNRImmediate(SVOp),
12568 if (isVALIGNMask(M, VT, Subtarget))
12569 return getTargetShuffleNode(X86ISD::VALIGN, dl, VT, V1, V2,
12570 getShuffleVALIGNImmediate(SVOp),
12573 // Check if this can be converted into a logical shift.
12574 bool isLeft = false;
12575 unsigned ShAmt = 0;
12577 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt);
12578 if (isShift && ShVal.hasOneUse()) {
12579 // If the shifted value has multiple uses, it may be cheaper to use
12580 // v_set0 + movlhps or movhlps, etc.
12581 MVT EltVT = VT.getVectorElementType();
12582 ShAmt *= EltVT.getSizeInBits();
12583 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12586 if (isMOVLMask(M, VT)) {
12587 if (ISD::isBuildVectorAllZeros(V1.getNode()))
12588 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
12589 if (!isMOVLPMask(M, VT)) {
12590 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
12591 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12593 if (VT == MVT::v4i32 || VT == MVT::v4f32)
12594 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12598 // FIXME: fold these into legal mask.
12599 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasInt256))
12600 return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
12602 if (isMOVHLPSMask(M, VT))
12603 return getMOVHighToLow(Op, dl, DAG);
12605 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget))
12606 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG);
12608 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget))
12609 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG);
12611 if (isMOVLPMask(M, VT))
12612 return getMOVLP(Op, dl, DAG, HasSSE2);
12614 if (ShouldXformToMOVHLPS(M, VT) ||
12615 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT))
12616 return DAG.getCommutedVectorShuffle(*SVOp);
12619 // No better options. Use a vshldq / vsrldq.
12620 MVT EltVT = VT.getVectorElementType();
12621 ShAmt *= EltVT.getSizeInBits();
12622 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12625 bool Commuted = false;
12626 // FIXME: This should also accept a bitcast of a splat? Be careful, not
12627 // 1,1,1,1 -> v8i16 though.
12628 BitVector UndefElements;
12629 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V1.getNode()))
12630 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12632 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V2.getNode()))
12633 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12636 // Canonicalize the splat or undef, if present, to be on the RHS.
12637 if (!V2IsUndef && V1IsSplat && !V2IsSplat) {
12638 CommuteVectorShuffleMask(M, NumElems);
12640 std::swap(V1IsSplat, V2IsSplat);
12644 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) {
12645 // Shuffling low element of v1 into undef, just return v1.
12648 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which
12649 // the instruction selector will not match, so get a canonical MOVL with
12650 // swapped operands to undo the commute.
12651 return getMOVL(DAG, dl, VT, V2, V1);
12654 if (isUNPCKLMask(M, VT, HasInt256))
12655 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12657 if (isUNPCKHMask(M, VT, HasInt256))
12658 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12661 // Normalize mask so all entries that point to V2 points to its first
12662 // element then try to match unpck{h|l} again. If match, return a
12663 // new vector_shuffle with the corrected mask.p
12664 SmallVector<int, 8> NewMask(M.begin(), M.end());
12665 NormalizeMask(NewMask, NumElems);
12666 if (isUNPCKLMask(NewMask, VT, HasInt256, true))
12667 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12668 if (isUNPCKHMask(NewMask, VT, HasInt256, true))
12669 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12673 // Commute is back and try unpck* again.
12674 // FIXME: this seems wrong.
12675 CommuteVectorShuffleMask(M, NumElems);
12677 std::swap(V1IsSplat, V2IsSplat);
12679 if (isUNPCKLMask(M, VT, HasInt256))
12680 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12682 if (isUNPCKHMask(M, VT, HasInt256))
12683 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12686 // Normalize the node to match x86 shuffle ops if needed
12687 if (!V2IsUndef && (isSHUFPMask(M, VT, /* Commuted */ true)))
12688 return DAG.getCommutedVectorShuffle(*SVOp);
12690 // The checks below are all present in isShuffleMaskLegal, but they are
12691 // inlined here right now to enable us to directly emit target specific
12692 // nodes, and remove one by one until they don't return Op anymore.
12694 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) &&
12695 SVOp->getSplatIndex() == 0 && V2IsUndef) {
12696 if (VT == MVT::v2f64 || VT == MVT::v2i64)
12697 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12700 if (isPSHUFHWMask(M, VT, HasInt256))
12701 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1,
12702 getShufflePSHUFHWImmediate(SVOp),
12705 if (isPSHUFLWMask(M, VT, HasInt256))
12706 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1,
12707 getShufflePSHUFLWImmediate(SVOp),
12710 unsigned MaskValue;
12711 if (isBlendMask(M, VT, Subtarget->hasSSE41(), Subtarget->hasInt256(),
12713 return LowerVECTOR_SHUFFLEtoBlend(SVOp, MaskValue, Subtarget, DAG);
12715 if (isSHUFPMask(M, VT))
12716 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2,
12717 getShuffleSHUFImmediate(SVOp), DAG);
12719 if (isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12720 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12721 if (isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12722 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12724 //===--------------------------------------------------------------------===//
12725 // Generate target specific nodes for 128 or 256-bit shuffles only
12726 // supported in the AVX instruction set.
12729 // Handle VMOVDDUPY permutations
12730 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasFp256))
12731 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG);
12733 // Handle VPERMILPS/D* permutations
12734 if (isVPERMILPMask(M, VT)) {
12735 if ((HasInt256 && VT == MVT::v8i32) || VT == MVT::v16i32)
12736 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1,
12737 getShuffleSHUFImmediate(SVOp), DAG);
12738 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1,
12739 getShuffleSHUFImmediate(SVOp), DAG);
12743 if (VT.is512BitVector() && isINSERT64x4Mask(M, VT, &Idx))
12744 return Insert256BitVector(V1, Extract256BitVector(V2, 0, DAG, dl),
12745 Idx*(NumElems/2), DAG, dl);
12747 // Handle VPERM2F128/VPERM2I128 permutations
12748 if (isVPERM2X128Mask(M, VT, HasFp256))
12749 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1,
12750 V2, getShuffleVPERM2X128Immediate(SVOp), DAG);
12752 if (Subtarget->hasSSE41() && isINSERTPSMask(M, VT))
12753 return getINSERTPS(SVOp, dl, DAG);
12756 if (V2IsUndef && HasInt256 && isPermImmMask(M, VT, Imm8))
12757 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, Imm8, DAG);
12759 if ((V2IsUndef && HasInt256 && VT.is256BitVector() && NumElems == 8) ||
12760 VT.is512BitVector()) {
12761 MVT MaskEltVT = MVT::getIntegerVT(VT.getVectorElementType().getSizeInBits());
12762 MVT MaskVectorVT = MVT::getVectorVT(MaskEltVT, NumElems);
12763 SmallVector<SDValue, 16> permclMask;
12764 for (unsigned i = 0; i != NumElems; ++i) {
12765 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MaskEltVT));
12768 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVectorVT, permclMask);
12770 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32
12771 return DAG.getNode(X86ISD::VPERMV, dl, VT,
12772 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1);
12773 return DAG.getNode(X86ISD::VPERMV3, dl, VT, V1,
12774 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V2);
12777 //===--------------------------------------------------------------------===//
12778 // Since no target specific shuffle was selected for this generic one,
12779 // lower it into other known shuffles. FIXME: this isn't true yet, but
12780 // this is the plan.
12783 // Handle v8i16 specifically since SSE can do byte extraction and insertion.
12784 if (VT == MVT::v8i16) {
12785 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG);
12786 if (NewOp.getNode())
12790 if (VT == MVT::v16i16 && Subtarget->hasInt256()) {
12791 SDValue NewOp = LowerVECTOR_SHUFFLEv16i16(Op, DAG);
12792 if (NewOp.getNode())
12796 if (VT == MVT::v16i8) {
12797 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, Subtarget, DAG);
12798 if (NewOp.getNode())
12802 if (VT == MVT::v32i8) {
12803 SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG);
12804 if (NewOp.getNode())
12808 // Handle all 128-bit wide vectors with 4 elements, and match them with
12809 // several different shuffle types.
12810 if (NumElems == 4 && VT.is128BitVector())
12811 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG);
12813 // Handle general 256-bit shuffles
12814 if (VT.is256BitVector())
12815 return LowerVECTOR_SHUFFLE_256(SVOp, DAG);
12820 // This function assumes its argument is a BUILD_VECTOR of constants or
12821 // undef SDNodes. i.e: ISD::isBuildVectorOfConstantSDNodes(BuildVector) is
12823 static bool BUILD_VECTORtoBlendMask(BuildVectorSDNode *BuildVector,
12824 unsigned &MaskValue) {
12826 unsigned NumElems = BuildVector->getNumOperands();
12827 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
12828 unsigned NumLanes = (NumElems - 1) / 8 + 1;
12829 unsigned NumElemsInLane = NumElems / NumLanes;
12831 // Blend for v16i16 should be symetric for the both lanes.
12832 for (unsigned i = 0; i < NumElemsInLane; ++i) {
12833 SDValue EltCond = BuildVector->getOperand(i);
12834 SDValue SndLaneEltCond =
12835 (NumLanes == 2) ? BuildVector->getOperand(i + NumElemsInLane) : EltCond;
12837 int Lane1Cond = -1, Lane2Cond = -1;
12838 if (isa<ConstantSDNode>(EltCond))
12839 Lane1Cond = !isZero(EltCond);
12840 if (isa<ConstantSDNode>(SndLaneEltCond))
12841 Lane2Cond = !isZero(SndLaneEltCond);
12843 if (Lane1Cond == Lane2Cond || Lane2Cond < 0)
12844 // Lane1Cond != 0, means we want the first argument.
12845 // Lane1Cond == 0, means we want the second argument.
12846 // The encoding of this argument is 0 for the first argument, 1
12847 // for the second. Therefore, invert the condition.
12848 MaskValue |= !Lane1Cond << i;
12849 else if (Lane1Cond < 0)
12850 MaskValue |= !Lane2Cond << i;
12857 /// \brief Try to lower a VSELECT instruction to an immediate-controlled blend
12859 static SDValue lowerVSELECTtoBLENDI(SDValue Op, const X86Subtarget *Subtarget,
12860 SelectionDAG &DAG) {
12861 SDValue Cond = Op.getOperand(0);
12862 SDValue LHS = Op.getOperand(1);
12863 SDValue RHS = Op.getOperand(2);
12865 MVT VT = Op.getSimpleValueType();
12866 MVT EltVT = VT.getVectorElementType();
12867 unsigned NumElems = VT.getVectorNumElements();
12869 // There is no blend with immediate in AVX-512.
12870 if (VT.is512BitVector())
12873 if (!Subtarget->hasSSE41() || EltVT == MVT::i8)
12875 if (!Subtarget->hasInt256() && VT == MVT::v16i16)
12878 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
12881 // Check the mask for BLEND and build the value.
12882 unsigned MaskValue = 0;
12883 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
12886 // Convert i32 vectors to floating point if it is not AVX2.
12887 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
12889 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
12890 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
12892 LHS = DAG.getNode(ISD::BITCAST, dl, VT, LHS);
12893 RHS = DAG.getNode(ISD::BITCAST, dl, VT, RHS);
12896 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, LHS, RHS,
12897 DAG.getConstant(MaskValue, MVT::i32));
12898 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
12901 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
12902 // A vselect where all conditions and data are constants can be optimized into
12903 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
12904 if (ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(0).getNode()) &&
12905 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(1).getNode()) &&
12906 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(2).getNode()))
12909 SDValue BlendOp = lowerVSELECTtoBLENDI(Op, Subtarget, DAG);
12910 if (BlendOp.getNode())
12913 // Some types for vselect were previously set to Expand, not Legal or
12914 // Custom. Return an empty SDValue so we fall-through to Expand, after
12915 // the Custom lowering phase.
12916 MVT VT = Op.getSimpleValueType();
12917 switch (VT.SimpleTy) {
12922 if (Subtarget->hasBWI() && Subtarget->hasVLX())
12927 // We couldn't create a "Blend with immediate" node.
12928 // This node should still be legal, but we'll have to emit a blendv*
12933 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
12934 MVT VT = Op.getSimpleValueType();
12937 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
12940 if (VT.getSizeInBits() == 8) {
12941 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
12942 Op.getOperand(0), Op.getOperand(1));
12943 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
12944 DAG.getValueType(VT));
12945 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
12948 if (VT.getSizeInBits() == 16) {
12949 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
12950 // If Idx is 0, it's cheaper to do a move instead of a pextrw.
12952 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
12953 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
12954 DAG.getNode(ISD::BITCAST, dl,
12957 Op.getOperand(1)));
12958 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
12959 Op.getOperand(0), Op.getOperand(1));
12960 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
12961 DAG.getValueType(VT));
12962 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
12965 if (VT == MVT::f32) {
12966 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
12967 // the result back to FR32 register. It's only worth matching if the
12968 // result has a single use which is a store or a bitcast to i32. And in
12969 // the case of a store, it's not worth it if the index is a constant 0,
12970 // because a MOVSSmr can be used instead, which is smaller and faster.
12971 if (!Op.hasOneUse())
12973 SDNode *User = *Op.getNode()->use_begin();
12974 if ((User->getOpcode() != ISD::STORE ||
12975 (isa<ConstantSDNode>(Op.getOperand(1)) &&
12976 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
12977 (User->getOpcode() != ISD::BITCAST ||
12978 User->getValueType(0) != MVT::i32))
12980 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
12981 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
12984 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
12987 if (VT == MVT::i32 || VT == MVT::i64) {
12988 // ExtractPS/pextrq works with constant index.
12989 if (isa<ConstantSDNode>(Op.getOperand(1)))
12995 /// Extract one bit from mask vector, like v16i1 or v8i1.
12996 /// AVX-512 feature.
12998 X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const {
12999 SDValue Vec = Op.getOperand(0);
13001 MVT VecVT = Vec.getSimpleValueType();
13002 SDValue Idx = Op.getOperand(1);
13003 MVT EltVT = Op.getSimpleValueType();
13005 assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
13006 assert((VecVT.getVectorNumElements() <= 16 || Subtarget->hasBWI()) &&
13007 "Unexpected vector type in ExtractBitFromMaskVector");
13009 // variable index can't be handled in mask registers,
13010 // extend vector to VR512
13011 if (!isa<ConstantSDNode>(Idx)) {
13012 MVT ExtVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13013 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Vec);
13014 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
13015 ExtVT.getVectorElementType(), Ext, Idx);
13016 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
13019 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13020 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13021 if (!Subtarget->hasDQI() && (VecVT.getVectorNumElements() <= 8))
13022 rc = getRegClassFor(MVT::v16i1);
13023 unsigned MaxSift = rc->getSize()*8 - 1;
13024 Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
13025 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13026 Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
13027 DAG.getConstant(MaxSift, MVT::i8));
13028 return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec,
13029 DAG.getIntPtrConstant(0));
13033 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
13034 SelectionDAG &DAG) const {
13036 SDValue Vec = Op.getOperand(0);
13037 MVT VecVT = Vec.getSimpleValueType();
13038 SDValue Idx = Op.getOperand(1);
13040 if (Op.getSimpleValueType() == MVT::i1)
13041 return ExtractBitFromMaskVector(Op, DAG);
13043 if (!isa<ConstantSDNode>(Idx)) {
13044 if (VecVT.is512BitVector() ||
13045 (VecVT.is256BitVector() && Subtarget->hasInt256() &&
13046 VecVT.getVectorElementType().getSizeInBits() == 32)) {
13049 MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
13050 MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
13051 MaskEltVT.getSizeInBits());
13053 Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT);
13054 SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT,
13055 getZeroVector(MaskVT, Subtarget, DAG, dl),
13056 Idx, DAG.getConstant(0, getPointerTy()));
13057 SDValue Perm = DAG.getNode(X86ISD::VPERMV, dl, VecVT, Mask, Vec);
13058 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(),
13059 Perm, DAG.getConstant(0, getPointerTy()));
13064 // If this is a 256-bit vector result, first extract the 128-bit vector and
13065 // then extract the element from the 128-bit vector.
13066 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
13068 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13069 // Get the 128-bit vector.
13070 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl);
13071 MVT EltVT = VecVT.getVectorElementType();
13073 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
13075 //if (IdxVal >= NumElems/2)
13076 // IdxVal -= NumElems/2;
13077 IdxVal -= (IdxVal/ElemsPerChunk)*ElemsPerChunk;
13078 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
13079 DAG.getConstant(IdxVal, MVT::i32));
13082 assert(VecVT.is128BitVector() && "Unexpected vector length");
13084 if (Subtarget->hasSSE41()) {
13085 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
13090 MVT VT = Op.getSimpleValueType();
13091 // TODO: handle v16i8.
13092 if (VT.getSizeInBits() == 16) {
13093 SDValue Vec = Op.getOperand(0);
13094 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13096 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13097 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13098 DAG.getNode(ISD::BITCAST, dl,
13100 Op.getOperand(1)));
13101 // Transform it so it match pextrw which produces a 32-bit result.
13102 MVT EltVT = MVT::i32;
13103 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT,
13104 Op.getOperand(0), Op.getOperand(1));
13105 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
13106 DAG.getValueType(VT));
13107 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13110 if (VT.getSizeInBits() == 32) {
13111 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13115 // SHUFPS the element to the lowest double word, then movss.
13116 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 };
13117 MVT VVT = Op.getOperand(0).getSimpleValueType();
13118 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13119 DAG.getUNDEF(VVT), Mask);
13120 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13121 DAG.getIntPtrConstant(0));
13124 if (VT.getSizeInBits() == 64) {
13125 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
13126 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
13127 // to match extract_elt for f64.
13128 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13132 // UNPCKHPD the element to the lowest double word, then movsd.
13133 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
13134 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
13135 int Mask[2] = { 1, -1 };
13136 MVT VVT = Op.getOperand(0).getSimpleValueType();
13137 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13138 DAG.getUNDEF(VVT), Mask);
13139 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13140 DAG.getIntPtrConstant(0));
13146 /// Insert one bit to mask vector, like v16i1 or v8i1.
13147 /// AVX-512 feature.
13149 X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
13151 SDValue Vec = Op.getOperand(0);
13152 SDValue Elt = Op.getOperand(1);
13153 SDValue Idx = Op.getOperand(2);
13154 MVT VecVT = Vec.getSimpleValueType();
13156 if (!isa<ConstantSDNode>(Idx)) {
13157 // Non constant index. Extend source and destination,
13158 // insert element and then truncate the result.
13159 MVT ExtVecVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13160 MVT ExtEltVT = (VecVT == MVT::v8i1 ? MVT::i64 : MVT::i32);
13161 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
13162 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVecVT, Vec),
13163 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx);
13164 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
13167 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13168 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
13169 if (Vec.getOpcode() == ISD::UNDEF)
13170 return DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13171 DAG.getConstant(IdxVal, MVT::i8));
13172 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13173 unsigned MaxSift = rc->getSize()*8 - 1;
13174 EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13175 DAG.getConstant(MaxSift, MVT::i8));
13176 EltInVec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, EltInVec,
13177 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13178 return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
13181 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
13182 SelectionDAG &DAG) const {
13183 MVT VT = Op.getSimpleValueType();
13184 MVT EltVT = VT.getVectorElementType();
13186 if (EltVT == MVT::i1)
13187 return InsertBitToMaskVector(Op, DAG);
13190 SDValue N0 = Op.getOperand(0);
13191 SDValue N1 = Op.getOperand(1);
13192 SDValue N2 = Op.getOperand(2);
13193 if (!isa<ConstantSDNode>(N2))
13195 auto *N2C = cast<ConstantSDNode>(N2);
13196 unsigned IdxVal = N2C->getZExtValue();
13198 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
13199 // into that, and then insert the subvector back into the result.
13200 if (VT.is256BitVector() || VT.is512BitVector()) {
13201 // Get the desired 128-bit vector half.
13202 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
13204 // Insert the element into the desired half.
13205 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
13206 unsigned IdxIn128 = IdxVal - (IdxVal / NumEltsIn128) * NumEltsIn128;
13208 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
13209 DAG.getConstant(IdxIn128, MVT::i32));
13211 // Insert the changed part back to the 256-bit vector
13212 return Insert128BitVector(N0, V, IdxVal, DAG, dl);
13214 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
13216 if (Subtarget->hasSSE41()) {
13217 if (EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) {
13219 if (VT == MVT::v8i16) {
13220 Opc = X86ISD::PINSRW;
13222 assert(VT == MVT::v16i8);
13223 Opc = X86ISD::PINSRB;
13226 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
13228 if (N1.getValueType() != MVT::i32)
13229 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13230 if (N2.getValueType() != MVT::i32)
13231 N2 = DAG.getIntPtrConstant(IdxVal);
13232 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
13235 if (EltVT == MVT::f32) {
13236 // Bits [7:6] of the constant are the source select. This will always be
13237 // zero here. The DAG Combiner may combine an extract_elt index into
13239 // bits. For example (insert (extract, 3), 2) could be matched by
13241 // the '3' into bits [7:6] of X86ISD::INSERTPS.
13242 // Bits [5:4] of the constant are the destination select. This is the
13243 // value of the incoming immediate.
13244 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
13245 // combine either bitwise AND or insert of float 0.0 to set these bits.
13246 N2 = DAG.getIntPtrConstant(IdxVal << 4);
13247 // Create this as a scalar to vector..
13248 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
13249 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
13252 if (EltVT == MVT::i32 || EltVT == MVT::i64) {
13253 // PINSR* works with constant index.
13258 if (EltVT == MVT::i8)
13261 if (EltVT.getSizeInBits() == 16) {
13262 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
13263 // as its second argument.
13264 if (N1.getValueType() != MVT::i32)
13265 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13266 if (N2.getValueType() != MVT::i32)
13267 N2 = DAG.getIntPtrConstant(IdxVal);
13268 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
13273 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
13275 MVT OpVT = Op.getSimpleValueType();
13277 // If this is a 256-bit vector result, first insert into a 128-bit
13278 // vector and then insert into the 256-bit vector.
13279 if (!OpVT.is128BitVector()) {
13280 // Insert into a 128-bit vector.
13281 unsigned SizeFactor = OpVT.getSizeInBits()/128;
13282 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
13283 OpVT.getVectorNumElements() / SizeFactor);
13285 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
13287 // Insert the 128-bit vector.
13288 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
13291 if (OpVT == MVT::v1i64 &&
13292 Op.getOperand(0).getValueType() == MVT::i64)
13293 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
13295 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
13296 assert(OpVT.is128BitVector() && "Expected an SSE type!");
13297 return DAG.getNode(ISD::BITCAST, dl, OpVT,
13298 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
13301 // Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
13302 // a simple subregister reference or explicit instructions to grab
13303 // upper bits of a vector.
13304 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13305 SelectionDAG &DAG) {
13307 SDValue In = Op.getOperand(0);
13308 SDValue Idx = Op.getOperand(1);
13309 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13310 MVT ResVT = Op.getSimpleValueType();
13311 MVT InVT = In.getSimpleValueType();
13313 if (Subtarget->hasFp256()) {
13314 if (ResVT.is128BitVector() &&
13315 (InVT.is256BitVector() || InVT.is512BitVector()) &&
13316 isa<ConstantSDNode>(Idx)) {
13317 return Extract128BitVector(In, IdxVal, DAG, dl);
13319 if (ResVT.is256BitVector() && InVT.is512BitVector() &&
13320 isa<ConstantSDNode>(Idx)) {
13321 return Extract256BitVector(In, IdxVal, DAG, dl);
13327 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
13328 // simple superregister reference or explicit instructions to insert
13329 // the upper bits of a vector.
13330 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13331 SelectionDAG &DAG) {
13332 if (!Subtarget->hasAVX())
13336 SDValue Vec = Op.getOperand(0);
13337 SDValue SubVec = Op.getOperand(1);
13338 SDValue Idx = Op.getOperand(2);
13340 if (!isa<ConstantSDNode>(Idx))
13343 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13344 MVT OpVT = Op.getSimpleValueType();
13345 MVT SubVecVT = SubVec.getSimpleValueType();
13347 // Fold two 16-byte subvector loads into one 32-byte load:
13348 // (insert_subvector (insert_subvector undef, (load addr), 0),
13349 // (load addr + 16), Elts/2)
13351 if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
13352 Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
13353 OpVT.is256BitVector() && SubVecVT.is128BitVector() &&
13354 !Subtarget->isUnalignedMem32Slow()) {
13355 SDValue SubVec2 = Vec.getOperand(1);
13356 if (auto *Idx2 = dyn_cast<ConstantSDNode>(Vec.getOperand(2))) {
13357 if (Idx2->getZExtValue() == 0) {
13358 SDValue Ops[] = { SubVec2, SubVec };
13359 SDValue LD = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, false);
13366 if ((OpVT.is256BitVector() || OpVT.is512BitVector()) &&
13367 SubVecVT.is128BitVector())
13368 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
13370 if (OpVT.is512BitVector() && SubVecVT.is256BitVector())
13371 return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
13376 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
13377 // their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
13378 // one of the above mentioned nodes. It has to be wrapped because otherwise
13379 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
13380 // be used to form addressing mode. These wrapped nodes will be selected
13383 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
13384 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
13386 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13387 // global base reg.
13388 unsigned char OpFlag = 0;
13389 unsigned WrapperKind = X86ISD::Wrapper;
13390 CodeModel::Model M = DAG.getTarget().getCodeModel();
13392 if (Subtarget->isPICStyleRIPRel() &&
13393 (M == CodeModel::Small || M == CodeModel::Kernel))
13394 WrapperKind = X86ISD::WrapperRIP;
13395 else if (Subtarget->isPICStyleGOT())
13396 OpFlag = X86II::MO_GOTOFF;
13397 else if (Subtarget->isPICStyleStubPIC())
13398 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13400 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
13401 CP->getAlignment(),
13402 CP->getOffset(), OpFlag);
13404 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13405 // With PIC, the address is actually $g + Offset.
13407 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13408 DAG.getNode(X86ISD::GlobalBaseReg,
13409 SDLoc(), getPointerTy()),
13416 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
13417 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
13419 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13420 // global base reg.
13421 unsigned char OpFlag = 0;
13422 unsigned WrapperKind = X86ISD::Wrapper;
13423 CodeModel::Model M = DAG.getTarget().getCodeModel();
13425 if (Subtarget->isPICStyleRIPRel() &&
13426 (M == CodeModel::Small || M == CodeModel::Kernel))
13427 WrapperKind = X86ISD::WrapperRIP;
13428 else if (Subtarget->isPICStyleGOT())
13429 OpFlag = X86II::MO_GOTOFF;
13430 else if (Subtarget->isPICStyleStubPIC())
13431 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13433 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
13436 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13438 // With PIC, the address is actually $g + Offset.
13440 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13441 DAG.getNode(X86ISD::GlobalBaseReg,
13442 SDLoc(), getPointerTy()),
13449 X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
13450 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
13452 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13453 // global base reg.
13454 unsigned char OpFlag = 0;
13455 unsigned WrapperKind = X86ISD::Wrapper;
13456 CodeModel::Model M = DAG.getTarget().getCodeModel();
13458 if (Subtarget->isPICStyleRIPRel() &&
13459 (M == CodeModel::Small || M == CodeModel::Kernel)) {
13460 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF())
13461 OpFlag = X86II::MO_GOTPCREL;
13462 WrapperKind = X86ISD::WrapperRIP;
13463 } else if (Subtarget->isPICStyleGOT()) {
13464 OpFlag = X86II::MO_GOT;
13465 } else if (Subtarget->isPICStyleStubPIC()) {
13466 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE;
13467 } else if (Subtarget->isPICStyleStubNoDynamic()) {
13468 OpFlag = X86II::MO_DARWIN_NONLAZY;
13471 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag);
13474 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13476 // With PIC, the address is actually $g + Offset.
13477 if (DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
13478 !Subtarget->is64Bit()) {
13479 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13480 DAG.getNode(X86ISD::GlobalBaseReg,
13481 SDLoc(), getPointerTy()),
13485 // For symbols that require a load from a stub to get the address, emit the
13487 if (isGlobalStubReference(OpFlag))
13488 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result,
13489 MachinePointerInfo::getGOT(), false, false, false, 0);
13495 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
13496 // Create the TargetBlockAddressAddress node.
13497 unsigned char OpFlags =
13498 Subtarget->ClassifyBlockAddressReference();
13499 CodeModel::Model M = DAG.getTarget().getCodeModel();
13500 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
13501 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
13503 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset,
13506 if (Subtarget->isPICStyleRIPRel() &&
13507 (M == CodeModel::Small || M == CodeModel::Kernel))
13508 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13510 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13512 // With PIC, the address is actually $g + Offset.
13513 if (isGlobalRelativeToPICBase(OpFlags)) {
13514 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13515 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13523 X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
13524 int64_t Offset, SelectionDAG &DAG) const {
13525 // Create the TargetGlobalAddress node, folding in the constant
13526 // offset if it is legal.
13527 unsigned char OpFlags =
13528 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget());
13529 CodeModel::Model M = DAG.getTarget().getCodeModel();
13531 if (OpFlags == X86II::MO_NO_FLAG &&
13532 X86::isOffsetSuitableForCodeModel(Offset, M)) {
13533 // A direct static reference to a global.
13534 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
13537 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
13540 if (Subtarget->isPICStyleRIPRel() &&
13541 (M == CodeModel::Small || M == CodeModel::Kernel))
13542 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13544 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13546 // With PIC, the address is actually $g + Offset.
13547 if (isGlobalRelativeToPICBase(OpFlags)) {
13548 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13549 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13553 // For globals that require a load from a stub to get the address, emit the
13555 if (isGlobalStubReference(OpFlags))
13556 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
13557 MachinePointerInfo::getGOT(), false, false, false, 0);
13559 // If there was a non-zero offset that we didn't fold, create an explicit
13560 // addition for it.
13562 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result,
13563 DAG.getConstant(Offset, getPointerTy()));
13569 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
13570 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
13571 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
13572 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
13576 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
13577 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
13578 unsigned char OperandFlags, bool LocalDynamic = false) {
13579 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13580 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13582 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13583 GA->getValueType(0),
13587 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
13591 SDValue Ops[] = { Chain, TGA, *InFlag };
13592 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13594 SDValue Ops[] = { Chain, TGA };
13595 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13598 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
13599 MFI->setAdjustsStack(true);
13600 MFI->setHasCalls(true);
13602 SDValue Flag = Chain.getValue(1);
13603 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
13606 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
13608 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13611 SDLoc dl(GA); // ? function entry point might be better
13612 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13613 DAG.getNode(X86ISD::GlobalBaseReg,
13614 SDLoc(), PtrVT), InFlag);
13615 InFlag = Chain.getValue(1);
13617 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
13620 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
13622 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13624 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
13625 X86::RAX, X86II::MO_TLSGD);
13628 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
13634 // Get the start address of the TLS block for this module.
13635 X86MachineFunctionInfo* MFI = DAG.getMachineFunction()
13636 .getInfo<X86MachineFunctionInfo>();
13637 MFI->incNumLocalDynamicTLSAccesses();
13641 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
13642 X86II::MO_TLSLD, /*LocalDynamic=*/true);
13645 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13646 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
13647 InFlag = Chain.getValue(1);
13648 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
13649 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
13652 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
13656 unsigned char OperandFlags = X86II::MO_DTPOFF;
13657 unsigned WrapperKind = X86ISD::Wrapper;
13658 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13659 GA->getValueType(0),
13660 GA->getOffset(), OperandFlags);
13661 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13663 // Add x@dtpoff with the base.
13664 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
13667 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
13668 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13669 const EVT PtrVT, TLSModel::Model model,
13670 bool is64Bit, bool isPIC) {
13673 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
13674 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
13675 is64Bit ? 257 : 256));
13677 SDValue ThreadPointer =
13678 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0),
13679 MachinePointerInfo(Ptr), false, false, false, 0);
13681 unsigned char OperandFlags = 0;
13682 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
13684 unsigned WrapperKind = X86ISD::Wrapper;
13685 if (model == TLSModel::LocalExec) {
13686 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
13687 } else if (model == TLSModel::InitialExec) {
13689 OperandFlags = X86II::MO_GOTTPOFF;
13690 WrapperKind = X86ISD::WrapperRIP;
13692 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
13695 llvm_unreachable("Unexpected model");
13698 // emit "addl x@ntpoff,%eax" (local exec)
13699 // or "addl x@indntpoff,%eax" (initial exec)
13700 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
13702 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
13703 GA->getOffset(), OperandFlags);
13704 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13706 if (model == TLSModel::InitialExec) {
13707 if (isPIC && !is64Bit) {
13708 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
13709 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
13713 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
13714 MachinePointerInfo::getGOT(), false, false, false, 0);
13717 // The address of the thread local variable is the add of the thread
13718 // pointer with the offset of the variable.
13719 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
13723 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
13725 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
13726 const GlobalValue *GV = GA->getGlobal();
13728 if (Subtarget->isTargetELF()) {
13729 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
13732 case TLSModel::GeneralDynamic:
13733 if (Subtarget->is64Bit())
13734 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
13735 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
13736 case TLSModel::LocalDynamic:
13737 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(),
13738 Subtarget->is64Bit());
13739 case TLSModel::InitialExec:
13740 case TLSModel::LocalExec:
13741 return LowerToTLSExecModel(
13742 GA, DAG, getPointerTy(), model, Subtarget->is64Bit(),
13743 DAG.getTarget().getRelocationModel() == Reloc::PIC_);
13745 llvm_unreachable("Unknown TLS model.");
13748 if (Subtarget->isTargetDarwin()) {
13749 // Darwin only has one model of TLS. Lower to that.
13750 unsigned char OpFlag = 0;
13751 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
13752 X86ISD::WrapperRIP : X86ISD::Wrapper;
13754 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13755 // global base reg.
13756 bool PIC32 = (DAG.getTarget().getRelocationModel() == Reloc::PIC_) &&
13757 !Subtarget->is64Bit();
13759 OpFlag = X86II::MO_TLVP_PIC_BASE;
13761 OpFlag = X86II::MO_TLVP;
13763 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
13764 GA->getValueType(0),
13765 GA->getOffset(), OpFlag);
13766 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13768 // With PIC32, the address is actually $g + Offset.
13770 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13771 DAG.getNode(X86ISD::GlobalBaseReg,
13772 SDLoc(), getPointerTy()),
13775 // Lowering the machine isd will make sure everything is in the right
13777 SDValue Chain = DAG.getEntryNode();
13778 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13779 SDValue Args[] = { Chain, Offset };
13780 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
13782 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
13783 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13784 MFI->setAdjustsStack(true);
13786 // And our return value (tls address) is in the standard call return value
13788 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
13789 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(),
13790 Chain.getValue(1));
13793 if (Subtarget->isTargetKnownWindowsMSVC() ||
13794 Subtarget->isTargetWindowsGNU()) {
13795 // Just use the implicit TLS architecture
13796 // Need to generate someting similar to:
13797 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
13799 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
13800 // mov rcx, qword [rdx+rcx*8]
13801 // mov eax, .tls$:tlsvar
13802 // [rax+rcx] contains the address
13803 // Windows 64bit: gs:0x58
13804 // Windows 32bit: fs:__tls_array
13807 SDValue Chain = DAG.getEntryNode();
13809 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
13810 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
13811 // use its literal value of 0x2C.
13812 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit()
13813 ? Type::getInt8PtrTy(*DAG.getContext(),
13815 : Type::getInt32PtrTy(*DAG.getContext(),
13819 Subtarget->is64Bit()
13820 ? DAG.getIntPtrConstant(0x58)
13821 : (Subtarget->isTargetWindowsGNU()
13822 ? DAG.getIntPtrConstant(0x2C)
13823 : DAG.getExternalSymbol("_tls_array", getPointerTy()));
13825 SDValue ThreadPointer =
13826 DAG.getLoad(getPointerTy(), dl, Chain, TlsArray,
13827 MachinePointerInfo(Ptr), false, false, false, 0);
13829 // Load the _tls_index variable
13830 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy());
13831 if (Subtarget->is64Bit())
13832 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain,
13833 IDX, MachinePointerInfo(), MVT::i32,
13834 false, false, false, 0);
13836 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(),
13837 false, false, false, 0);
13839 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()),
13841 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale);
13843 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX);
13844 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(),
13845 false, false, false, 0);
13847 // Get the offset of start of .tls section
13848 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13849 GA->getValueType(0),
13850 GA->getOffset(), X86II::MO_SECREL);
13851 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA);
13853 // The address of the thread local variable is the add of the thread
13854 // pointer with the offset of the variable.
13855 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset);
13858 llvm_unreachable("TLS not implemented for this target.");
13861 /// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values
13862 /// and take a 2 x i32 value to shift plus a shift amount.
13863 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
13864 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
13865 MVT VT = Op.getSimpleValueType();
13866 unsigned VTBits = VT.getSizeInBits();
13868 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
13869 SDValue ShOpLo = Op.getOperand(0);
13870 SDValue ShOpHi = Op.getOperand(1);
13871 SDValue ShAmt = Op.getOperand(2);
13872 // X86ISD::SHLD and X86ISD::SHRD have defined overflow behavior but the
13873 // generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
13875 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
13876 DAG.getConstant(VTBits - 1, MVT::i8));
13877 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
13878 DAG.getConstant(VTBits - 1, MVT::i8))
13879 : DAG.getConstant(0, VT);
13881 SDValue Tmp2, Tmp3;
13882 if (Op.getOpcode() == ISD::SHL_PARTS) {
13883 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
13884 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
13886 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
13887 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
13890 // If the shift amount is larger or equal than the width of a part we can't
13891 // rely on the results of shld/shrd. Insert a test and select the appropriate
13892 // values for large shift amounts.
13893 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
13894 DAG.getConstant(VTBits, MVT::i8));
13895 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
13896 AndNode, DAG.getConstant(0, MVT::i8));
13899 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
13900 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
13901 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
13903 if (Op.getOpcode() == ISD::SHL_PARTS) {
13904 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
13905 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
13907 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
13908 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
13911 SDValue Ops[2] = { Lo, Hi };
13912 return DAG.getMergeValues(Ops, dl);
13915 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
13916 SelectionDAG &DAG) const {
13917 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
13920 if (SrcVT.isVector()) {
13921 if (SrcVT.getVectorElementType() == MVT::i1) {
13922 MVT IntegerVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
13923 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
13924 DAG.getNode(ISD::SIGN_EXTEND, dl, IntegerVT,
13925 Op.getOperand(0)));
13930 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
13931 "Unknown SINT_TO_FP to lower!");
13933 // These are really Legal; return the operand so the caller accepts it as
13935 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
13937 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
13938 Subtarget->is64Bit()) {
13942 unsigned Size = SrcVT.getSizeInBits()/8;
13943 MachineFunction &MF = DAG.getMachineFunction();
13944 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false);
13945 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
13946 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
13948 MachinePointerInfo::getFixedStack(SSFI),
13950 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
13953 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
13955 SelectionDAG &DAG) const {
13959 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
13961 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
13963 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
13965 unsigned ByteSize = SrcVT.getSizeInBits()/8;
13967 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
13968 MachineMemOperand *MMO;
13970 int SSFI = FI->getIndex();
13972 DAG.getMachineFunction()
13973 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
13974 MachineMemOperand::MOLoad, ByteSize, ByteSize);
13976 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
13977 StackSlot = StackSlot.getOperand(1);
13979 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
13980 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
13982 Tys, Ops, SrcVT, MMO);
13985 Chain = Result.getValue(1);
13986 SDValue InFlag = Result.getValue(2);
13988 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
13989 // shouldn't be necessary except that RFP cannot be live across
13990 // multiple blocks. When stackifier is fixed, they can be uncoupled.
13991 MachineFunction &MF = DAG.getMachineFunction();
13992 unsigned SSFISize = Op.getValueType().getSizeInBits()/8;
13993 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false);
13994 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
13995 Tys = DAG.getVTList(MVT::Other);
13997 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
13999 MachineMemOperand *MMO =
14000 DAG.getMachineFunction()
14001 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14002 MachineMemOperand::MOStore, SSFISize, SSFISize);
14004 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
14005 Ops, Op.getValueType(), MMO);
14006 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot,
14007 MachinePointerInfo::getFixedStack(SSFI),
14008 false, false, false, 0);
14014 // LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
14015 SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
14016 SelectionDAG &DAG) const {
14017 // This algorithm is not obvious. Here it is what we're trying to output:
14020 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
14021 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
14023 haddpd %xmm0, %xmm0
14025 pshufd $0x4e, %xmm0, %xmm1
14031 LLVMContext *Context = DAG.getContext();
14033 // Build some magic constants.
14034 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
14035 Constant *C0 = ConstantDataVector::get(*Context, CV0);
14036 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
14038 SmallVector<Constant*,2> CV1;
14040 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14041 APInt(64, 0x4330000000000000ULL))));
14043 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14044 APInt(64, 0x4530000000000000ULL))));
14045 Constant *C1 = ConstantVector::get(CV1);
14046 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
14048 // Load the 64-bit value into an XMM register.
14049 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
14051 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
14052 MachinePointerInfo::getConstantPool(),
14053 false, false, false, 16);
14054 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32,
14055 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1),
14058 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
14059 MachinePointerInfo::getConstantPool(),
14060 false, false, false, 16);
14061 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1);
14062 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
14065 if (Subtarget->hasSSE3()) {
14066 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
14067 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
14069 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub);
14070 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32,
14072 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64,
14073 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle),
14077 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
14078 DAG.getIntPtrConstant(0));
14081 // LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
14082 SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
14083 SelectionDAG &DAG) const {
14085 // FP constant to bias correct the final result.
14086 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
14089 // Load the 32-bit value into an XMM register.
14090 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
14093 // Zero out the upper parts of the register.
14094 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
14096 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14097 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
14098 DAG.getIntPtrConstant(0));
14100 // Or the load with the bias.
14101 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
14102 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14103 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14104 MVT::v2f64, Load)),
14105 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14106 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14107 MVT::v2f64, Bias)));
14108 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14109 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or),
14110 DAG.getIntPtrConstant(0));
14112 // Subtract the bias.
14113 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
14115 // Handle final rounding.
14116 EVT DestVT = Op.getValueType();
14118 if (DestVT.bitsLT(MVT::f64))
14119 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
14120 DAG.getIntPtrConstant(0));
14121 if (DestVT.bitsGT(MVT::f64))
14122 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
14124 // Handle final rounding.
14128 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
14129 const X86Subtarget &Subtarget) {
14130 // The algorithm is the following:
14131 // #ifdef __SSE4_1__
14132 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14133 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14134 // (uint4) 0x53000000, 0xaa);
14136 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14137 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14139 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14140 // return (float4) lo + fhi;
14143 SDValue V = Op->getOperand(0);
14144 EVT VecIntVT = V.getValueType();
14145 bool Is128 = VecIntVT == MVT::v4i32;
14146 EVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
14147 // If we convert to something else than the supported type, e.g., to v4f64,
14149 if (VecFloatVT != Op->getValueType(0))
14152 unsigned NumElts = VecIntVT.getVectorNumElements();
14153 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
14154 "Unsupported custom type");
14155 assert(NumElts <= 8 && "The size of the constant array must be fixed");
14157 // In the #idef/#else code, we have in common:
14158 // - The vector of constants:
14164 // Create the splat vector for 0x4b000000.
14165 SDValue CstLow = DAG.getConstant(0x4b000000, MVT::i32);
14166 SDValue CstLowArray[] = {CstLow, CstLow, CstLow, CstLow,
14167 CstLow, CstLow, CstLow, CstLow};
14168 SDValue VecCstLow = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14169 makeArrayRef(&CstLowArray[0], NumElts));
14170 // Create the splat vector for 0x53000000.
14171 SDValue CstHigh = DAG.getConstant(0x53000000, MVT::i32);
14172 SDValue CstHighArray[] = {CstHigh, CstHigh, CstHigh, CstHigh,
14173 CstHigh, CstHigh, CstHigh, CstHigh};
14174 SDValue VecCstHigh = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14175 makeArrayRef(&CstHighArray[0], NumElts));
14177 // Create the right shift.
14178 SDValue CstShift = DAG.getConstant(16, MVT::i32);
14179 SDValue CstShiftArray[] = {CstShift, CstShift, CstShift, CstShift,
14180 CstShift, CstShift, CstShift, CstShift};
14181 SDValue VecCstShift = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14182 makeArrayRef(&CstShiftArray[0], NumElts));
14183 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
14186 if (Subtarget.hasSSE41()) {
14187 EVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
14188 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14189 SDValue VecCstLowBitcast =
14190 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstLow);
14191 SDValue VecBitcast = DAG.getNode(ISD::BITCAST, DL, VecI16VT, V);
14192 // Low will be bitcasted right away, so do not bother bitcasting back to its
14194 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
14195 VecCstLowBitcast, DAG.getConstant(0xaa, MVT::i32));
14196 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14197 // (uint4) 0x53000000, 0xaa);
14198 SDValue VecCstHighBitcast =
14199 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstHigh);
14200 SDValue VecShiftBitcast =
14201 DAG.getNode(ISD::BITCAST, DL, VecI16VT, HighShift);
14202 // High will be bitcasted right away, so do not bother bitcasting back to
14203 // its original type.
14204 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
14205 VecCstHighBitcast, DAG.getConstant(0xaa, MVT::i32));
14207 SDValue CstMask = DAG.getConstant(0xffff, MVT::i32);
14208 SDValue VecCstMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, CstMask,
14209 CstMask, CstMask, CstMask);
14210 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14211 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
14212 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
14214 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14215 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
14218 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
14219 SDValue CstFAdd = DAG.getConstantFP(
14220 APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), MVT::f32);
14221 SDValue CstFAddArray[] = {CstFAdd, CstFAdd, CstFAdd, CstFAdd,
14222 CstFAdd, CstFAdd, CstFAdd, CstFAdd};
14223 SDValue VecCstFAdd = DAG.getNode(ISD::BUILD_VECTOR, DL, VecFloatVT,
14224 makeArrayRef(&CstFAddArray[0], NumElts));
14226 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14227 SDValue HighBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, High);
14229 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
14230 // return (float4) lo + fhi;
14231 SDValue LowBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, Low);
14232 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
14235 SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
14236 SelectionDAG &DAG) const {
14237 SDValue N0 = Op.getOperand(0);
14238 MVT SVT = N0.getSimpleValueType();
14241 switch (SVT.SimpleTy) {
14243 llvm_unreachable("Custom UINT_TO_FP is not supported!");
14248 MVT NVT = MVT::getVectorVT(MVT::i32, SVT.getVectorNumElements());
14249 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14250 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0));
14254 return lowerUINT_TO_FP_vXi32(Op, DAG, *Subtarget);
14256 llvm_unreachable(nullptr);
14259 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
14260 SelectionDAG &DAG) const {
14261 SDValue N0 = Op.getOperand(0);
14264 if (Op.getValueType().isVector())
14265 return lowerUINT_TO_FP_vec(Op, DAG);
14267 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
14268 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
14269 // the optimization here.
14270 if (DAG.SignBitIsZero(N0))
14271 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
14273 MVT SrcVT = N0.getSimpleValueType();
14274 MVT DstVT = Op.getSimpleValueType();
14275 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
14276 return LowerUINT_TO_FP_i64(Op, DAG);
14277 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
14278 return LowerUINT_TO_FP_i32(Op, DAG);
14279 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
14282 // Make a 64-bit buffer, and use it to build an FILD.
14283 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
14284 if (SrcVT == MVT::i32) {
14285 SDValue WordOff = DAG.getConstant(4, getPointerTy());
14286 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
14287 getPointerTy(), StackSlot, WordOff);
14288 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14289 StackSlot, MachinePointerInfo(),
14291 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
14292 OffsetSlot, MachinePointerInfo(),
14294 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
14298 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
14299 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14300 StackSlot, MachinePointerInfo(),
14302 // For i64 source, we need to add the appropriate power of 2 if the input
14303 // was negative. This is the same as the optimization in
14304 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
14305 // we must be careful to do the computation in x87 extended precision, not
14306 // in SSE. (The generic code can't know it's OK to do this, or how to.)
14307 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
14308 MachineMemOperand *MMO =
14309 DAG.getMachineFunction()
14310 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14311 MachineMemOperand::MOLoad, 8, 8);
14313 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
14314 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
14315 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
14318 APInt FF(32, 0x5F800000ULL);
14320 // Check whether the sign bit is set.
14321 SDValue SignSet = DAG.getSetCC(dl,
14322 getSetCCResultType(*DAG.getContext(), MVT::i64),
14323 Op.getOperand(0), DAG.getConstant(0, MVT::i64),
14326 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
14327 SDValue FudgePtr = DAG.getConstantPool(
14328 ConstantInt::get(*DAG.getContext(), FF.zext(64)),
14331 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
14332 SDValue Zero = DAG.getIntPtrConstant(0);
14333 SDValue Four = DAG.getIntPtrConstant(4);
14334 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
14336 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset);
14338 // Load the value out, extending it from f32 to f80.
14339 // FIXME: Avoid the extend by constructing the right constant pool?
14340 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(),
14341 FudgePtr, MachinePointerInfo::getConstantPool(),
14342 MVT::f32, false, false, false, 4);
14343 // Extend everything to 80 bits to force it to be done on x87.
14344 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
14345 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0));
14348 std::pair<SDValue,SDValue>
14349 X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
14350 bool IsSigned, bool IsReplace) const {
14353 EVT DstTy = Op.getValueType();
14355 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) {
14356 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
14360 assert(DstTy.getSimpleVT() <= MVT::i64 &&
14361 DstTy.getSimpleVT() >= MVT::i16 &&
14362 "Unknown FP_TO_INT to lower!");
14364 // These are really Legal.
14365 if (DstTy == MVT::i32 &&
14366 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14367 return std::make_pair(SDValue(), SDValue());
14368 if (Subtarget->is64Bit() &&
14369 DstTy == MVT::i64 &&
14370 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14371 return std::make_pair(SDValue(), SDValue());
14373 // We lower FP->int64 either into FISTP64 followed by a load from a temporary
14374 // stack slot, or into the FTOL runtime function.
14375 MachineFunction &MF = DAG.getMachineFunction();
14376 unsigned MemSize = DstTy.getSizeInBits()/8;
14377 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14378 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14381 if (!IsSigned && isIntegerTypeFTOL(DstTy))
14382 Opc = X86ISD::WIN_FTOL;
14384 switch (DstTy.getSimpleVT().SimpleTy) {
14385 default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
14386 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
14387 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
14388 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
14391 SDValue Chain = DAG.getEntryNode();
14392 SDValue Value = Op.getOperand(0);
14393 EVT TheVT = Op.getOperand(0).getValueType();
14394 // FIXME This causes a redundant load/store if the SSE-class value is already
14395 // in memory, such as if it is on the callstack.
14396 if (isScalarFPTypeInSSEReg(TheVT)) {
14397 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
14398 Chain = DAG.getStore(Chain, DL, Value, StackSlot,
14399 MachinePointerInfo::getFixedStack(SSFI),
14401 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
14403 Chain, StackSlot, DAG.getValueType(TheVT)
14406 MachineMemOperand *MMO =
14407 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14408 MachineMemOperand::MOLoad, MemSize, MemSize);
14409 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
14410 Chain = Value.getValue(1);
14411 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14412 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14415 MachineMemOperand *MMO =
14416 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14417 MachineMemOperand::MOStore, MemSize, MemSize);
14419 if (Opc != X86ISD::WIN_FTOL) {
14420 // Build the FP_TO_INT*_IN_MEM
14421 SDValue Ops[] = { Chain, Value, StackSlot };
14422 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
14424 return std::make_pair(FIST, StackSlot);
14426 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL,
14427 DAG.getVTList(MVT::Other, MVT::Glue),
14429 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX,
14430 MVT::i32, ftol.getValue(1));
14431 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX,
14432 MVT::i32, eax.getValue(2));
14433 SDValue Ops[] = { eax, edx };
14434 SDValue pair = IsReplace
14435 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops)
14436 : DAG.getMergeValues(Ops, DL);
14437 return std::make_pair(pair, SDValue());
14441 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
14442 const X86Subtarget *Subtarget) {
14443 MVT VT = Op->getSimpleValueType(0);
14444 SDValue In = Op->getOperand(0);
14445 MVT InVT = In.getSimpleValueType();
14448 // Optimize vectors in AVX mode:
14451 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
14452 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
14453 // Concat upper and lower parts.
14456 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
14457 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
14458 // Concat upper and lower parts.
14461 if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) &&
14462 ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) &&
14463 ((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
14466 if (Subtarget->hasInt256())
14467 return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
14469 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
14470 SDValue Undef = DAG.getUNDEF(InVT);
14471 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
14472 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14473 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14475 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(),
14476 VT.getVectorNumElements()/2);
14478 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo);
14479 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi);
14481 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
14484 static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
14485 SelectionDAG &DAG) {
14486 MVT VT = Op->getSimpleValueType(0);
14487 SDValue In = Op->getOperand(0);
14488 MVT InVT = In.getSimpleValueType();
14490 unsigned int NumElts = VT.getVectorNumElements();
14491 if (NumElts != 8 && NumElts != 16)
14494 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
14495 return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
14497 EVT ExtVT = (NumElts == 8)? MVT::v8i64 : MVT::v16i32;
14498 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14499 // Now we have only mask extension
14500 assert(InVT.getVectorElementType() == MVT::i1);
14501 SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
14502 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14503 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
14504 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14505 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14506 MachinePointerInfo::getConstantPool(),
14507 false, false, false, Alignment);
14509 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, DL, ExtVT, In, Ld);
14510 if (VT.is512BitVector())
14512 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Brcst);
14515 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14516 SelectionDAG &DAG) {
14517 if (Subtarget->hasFp256()) {
14518 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14526 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14527 SelectionDAG &DAG) {
14529 MVT VT = Op.getSimpleValueType();
14530 SDValue In = Op.getOperand(0);
14531 MVT SVT = In.getSimpleValueType();
14533 if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
14534 return LowerZERO_EXTEND_AVX512(Op, DAG);
14536 if (Subtarget->hasFp256()) {
14537 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14542 assert(!VT.is256BitVector() || !SVT.is128BitVector() ||
14543 VT.getVectorNumElements() != SVT.getVectorNumElements());
14547 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
14549 MVT VT = Op.getSimpleValueType();
14550 SDValue In = Op.getOperand(0);
14551 MVT InVT = In.getSimpleValueType();
14553 if (VT == MVT::i1) {
14554 assert((InVT.isInteger() && (InVT.getSizeInBits() <= 64)) &&
14555 "Invalid scalar TRUNCATE operation");
14556 if (InVT.getSizeInBits() >= 32)
14558 In = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, In);
14559 return DAG.getNode(ISD::TRUNCATE, DL, VT, In);
14561 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
14562 "Invalid TRUNCATE operation");
14564 if (InVT.is512BitVector() || VT.getVectorElementType() == MVT::i1) {
14565 if (VT.getVectorElementType().getSizeInBits() >=8)
14566 return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
14568 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
14569 unsigned NumElts = InVT.getVectorNumElements();
14570 assert ((NumElts == 8 || NumElts == 16) && "Unexpected vector type");
14571 if (InVT.getSizeInBits() < 512) {
14572 MVT ExtVT = (NumElts == 16)? MVT::v16i32 : MVT::v8i64;
14573 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
14577 SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
14578 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14579 SDValue CP = DAG.getConstantPool(C, getPointerTy());
14580 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14581 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14582 MachinePointerInfo::getConstantPool(),
14583 false, false, false, Alignment);
14584 SDValue OneV = DAG.getNode(X86ISD::VBROADCAST, DL, InVT, Ld);
14585 SDValue And = DAG.getNode(ISD::AND, DL, InVT, OneV, In);
14586 return DAG.getNode(X86ISD::TESTM, DL, VT, And, And);
14589 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
14590 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
14591 if (Subtarget->hasInt256()) {
14592 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
14593 In = DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, In);
14594 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32),
14596 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
14597 DAG.getIntPtrConstant(0));
14600 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14601 DAG.getIntPtrConstant(0));
14602 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14603 DAG.getIntPtrConstant(2));
14604 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14605 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14606 static const int ShufMask[] = {0, 2, 4, 6};
14607 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
14610 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
14611 // On AVX2, v8i32 -> v8i16 becomed PSHUFB.
14612 if (Subtarget->hasInt256()) {
14613 In = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, In);
14615 SmallVector<SDValue,32> pshufbMask;
14616 for (unsigned i = 0; i < 2; ++i) {
14617 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8));
14618 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8));
14619 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8));
14620 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8));
14621 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8));
14622 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8));
14623 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8));
14624 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8));
14625 for (unsigned j = 0; j < 8; ++j)
14626 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
14628 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
14629 In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
14630 In = DAG.getNode(ISD::BITCAST, DL, MVT::v4i64, In);
14632 static const int ShufMask[] = {0, 2, -1, -1};
14633 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64),
14635 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14636 DAG.getIntPtrConstant(0));
14637 return DAG.getNode(ISD::BITCAST, DL, VT, In);
14640 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14641 DAG.getIntPtrConstant(0));
14643 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14644 DAG.getIntPtrConstant(4));
14646 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpLo);
14647 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpHi);
14649 // The PSHUFB mask:
14650 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
14651 -1, -1, -1, -1, -1, -1, -1, -1};
14653 SDValue Undef = DAG.getUNDEF(MVT::v16i8);
14654 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, Undef, ShufMask1);
14655 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, Undef, ShufMask1);
14657 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14658 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14660 // The MOVLHPS Mask:
14661 static const int ShufMask2[] = {0, 1, 4, 5};
14662 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
14663 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, res);
14666 // Handle truncation of V256 to V128 using shuffles.
14667 if (!VT.is128BitVector() || !InVT.is256BitVector())
14670 assert(Subtarget->hasFp256() && "256-bit vector without AVX!");
14672 unsigned NumElems = VT.getVectorNumElements();
14673 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
14675 SmallVector<int, 16> MaskVec(NumElems * 2, -1);
14676 // Prepare truncation shuffle mask
14677 for (unsigned i = 0; i != NumElems; ++i)
14678 MaskVec[i] = i * 2;
14679 SDValue V = DAG.getVectorShuffle(NVT, DL,
14680 DAG.getNode(ISD::BITCAST, DL, NVT, In),
14681 DAG.getUNDEF(NVT), &MaskVec[0]);
14682 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
14683 DAG.getIntPtrConstant(0));
14686 SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
14687 SelectionDAG &DAG) const {
14688 assert(!Op.getSimpleValueType().isVector());
14690 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14691 /*IsSigned=*/ true, /*IsReplace=*/ false);
14692 SDValue FIST = Vals.first, StackSlot = Vals.second;
14693 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
14694 if (!FIST.getNode()) return Op;
14696 if (StackSlot.getNode())
14697 // Load the result.
14698 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14699 FIST, StackSlot, MachinePointerInfo(),
14700 false, false, false, 0);
14702 // The node is the result.
14706 SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
14707 SelectionDAG &DAG) const {
14708 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14709 /*IsSigned=*/ false, /*IsReplace=*/ false);
14710 SDValue FIST = Vals.first, StackSlot = Vals.second;
14711 assert(FIST.getNode() && "Unexpected failure");
14713 if (StackSlot.getNode())
14714 // Load the result.
14715 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14716 FIST, StackSlot, MachinePointerInfo(),
14717 false, false, false, 0);
14719 // The node is the result.
14723 static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
14725 MVT VT = Op.getSimpleValueType();
14726 SDValue In = Op.getOperand(0);
14727 MVT SVT = In.getSimpleValueType();
14729 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
14731 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
14732 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
14733 In, DAG.getUNDEF(SVT)));
14736 /// The only differences between FABS and FNEG are the mask and the logic op.
14737 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
14738 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
14739 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
14740 "Wrong opcode for lowering FABS or FNEG.");
14742 bool IsFABS = (Op.getOpcode() == ISD::FABS);
14744 // If this is a FABS and it has an FNEG user, bail out to fold the combination
14745 // into an FNABS. We'll lower the FABS after that if it is still in use.
14747 for (SDNode *User : Op->uses())
14748 if (User->getOpcode() == ISD::FNEG)
14751 SDValue Op0 = Op.getOperand(0);
14752 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
14755 MVT VT = Op.getSimpleValueType();
14756 // Assume scalar op for initialization; update for vector if needed.
14757 // Note that there are no scalar bitwise logical SSE/AVX instructions, so we
14758 // generate a 16-byte vector constant and logic op even for the scalar case.
14759 // Using a 16-byte mask allows folding the load of the mask with
14760 // the logic op, so it can save (~4 bytes) on code size.
14762 unsigned NumElts = VT == MVT::f64 ? 2 : 4;
14763 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
14764 // decide if we should generate a 16-byte constant mask when we only need 4 or
14765 // 8 bytes for the scalar case.
14766 if (VT.isVector()) {
14767 EltVT = VT.getVectorElementType();
14768 NumElts = VT.getVectorNumElements();
14771 unsigned EltBits = EltVT.getSizeInBits();
14772 LLVMContext *Context = DAG.getContext();
14773 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
14775 IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignBit(EltBits);
14776 Constant *C = ConstantInt::get(*Context, MaskElt);
14777 C = ConstantVector::getSplat(NumElts, C);
14778 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14779 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy());
14780 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
14781 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
14782 MachinePointerInfo::getConstantPool(),
14783 false, false, false, Alignment);
14785 if (VT.isVector()) {
14786 // For a vector, cast operands to a vector type, perform the logic op,
14787 // and cast the result back to the original value type.
14788 MVT VecVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
14789 SDValue MaskCasted = DAG.getNode(ISD::BITCAST, dl, VecVT, Mask);
14790 SDValue Operand = IsFNABS ?
14791 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0.getOperand(0)) :
14792 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0);
14793 unsigned BitOp = IsFABS ? ISD::AND : IsFNABS ? ISD::OR : ISD::XOR;
14794 return DAG.getNode(ISD::BITCAST, dl, VT,
14795 DAG.getNode(BitOp, dl, VecVT, Operand, MaskCasted));
14798 // If not vector, then scalar.
14799 unsigned BitOp = IsFABS ? X86ISD::FAND : IsFNABS ? X86ISD::FOR : X86ISD::FXOR;
14800 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
14801 return DAG.getNode(BitOp, dl, VT, Operand, Mask);
14804 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
14805 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14806 LLVMContext *Context = DAG.getContext();
14807 SDValue Op0 = Op.getOperand(0);
14808 SDValue Op1 = Op.getOperand(1);
14810 MVT VT = Op.getSimpleValueType();
14811 MVT SrcVT = Op1.getSimpleValueType();
14813 // If second operand is smaller, extend it first.
14814 if (SrcVT.bitsLT(VT)) {
14815 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1);
14818 // And if it is bigger, shrink it first.
14819 if (SrcVT.bitsGT(VT)) {
14820 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1));
14824 // At this point the operands and the result should have the same
14825 // type, and that won't be f80 since that is not custom lowered.
14827 const fltSemantics &Sem =
14828 VT == MVT::f64 ? APFloat::IEEEdouble : APFloat::IEEEsingle;
14829 const unsigned SizeInBits = VT.getSizeInBits();
14831 SmallVector<Constant *, 4> CV(
14832 VT == MVT::f64 ? 2 : 4,
14833 ConstantFP::get(*Context, APFloat(Sem, APInt(SizeInBits, 0))));
14835 // First, clear all bits but the sign bit from the second operand (sign).
14836 CV[0] = ConstantFP::get(*Context,
14837 APFloat(Sem, APInt::getHighBitsSet(SizeInBits, 1)));
14838 Constant *C = ConstantVector::get(CV);
14839 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
14840 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
14841 MachinePointerInfo::getConstantPool(),
14842 false, false, false, 16);
14843 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1);
14845 // Next, clear the sign bit from the first operand (magnitude).
14846 // If it's a constant, we can clear it here.
14847 if (ConstantFPSDNode *Op0CN = dyn_cast<ConstantFPSDNode>(Op0)) {
14848 APFloat APF = Op0CN->getValueAPF();
14849 // If the magnitude is a positive zero, the sign bit alone is enough.
14850 if (APF.isPosZero())
14853 CV[0] = ConstantFP::get(*Context, APF);
14855 CV[0] = ConstantFP::get(
14857 APFloat(Sem, APInt::getLowBitsSet(SizeInBits, SizeInBits - 1)));
14859 C = ConstantVector::get(CV);
14860 CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
14861 SDValue Val = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
14862 MachinePointerInfo::getConstantPool(),
14863 false, false, false, 16);
14864 // If the magnitude operand wasn't a constant, we need to AND out the sign.
14865 if (!isa<ConstantFPSDNode>(Op0))
14866 Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Val);
14868 // OR the magnitude value with the sign bit.
14869 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit);
14872 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
14873 SDValue N0 = Op.getOperand(0);
14875 MVT VT = Op.getSimpleValueType();
14877 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
14878 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
14879 DAG.getConstant(1, VT));
14880 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT));
14883 // Check whether an OR'd tree is PTEST-able.
14884 static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
14885 SelectionDAG &DAG) {
14886 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
14888 if (!Subtarget->hasSSE41())
14891 if (!Op->hasOneUse())
14894 SDNode *N = Op.getNode();
14897 SmallVector<SDValue, 8> Opnds;
14898 DenseMap<SDValue, unsigned> VecInMap;
14899 SmallVector<SDValue, 8> VecIns;
14900 EVT VT = MVT::Other;
14902 // Recognize a special case where a vector is casted into wide integer to
14904 Opnds.push_back(N->getOperand(0));
14905 Opnds.push_back(N->getOperand(1));
14907 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
14908 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
14909 // BFS traverse all OR'd operands.
14910 if (I->getOpcode() == ISD::OR) {
14911 Opnds.push_back(I->getOperand(0));
14912 Opnds.push_back(I->getOperand(1));
14913 // Re-evaluate the number of nodes to be traversed.
14914 e += 2; // 2 more nodes (LHS and RHS) are pushed.
14918 // Quit if a non-EXTRACT_VECTOR_ELT
14919 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
14922 // Quit if without a constant index.
14923 SDValue Idx = I->getOperand(1);
14924 if (!isa<ConstantSDNode>(Idx))
14927 SDValue ExtractedFromVec = I->getOperand(0);
14928 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
14929 if (M == VecInMap.end()) {
14930 VT = ExtractedFromVec.getValueType();
14931 // Quit if not 128/256-bit vector.
14932 if (!VT.is128BitVector() && !VT.is256BitVector())
14934 // Quit if not the same type.
14935 if (VecInMap.begin() != VecInMap.end() &&
14936 VT != VecInMap.begin()->first.getValueType())
14938 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
14939 VecIns.push_back(ExtractedFromVec);
14941 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
14944 assert((VT.is128BitVector() || VT.is256BitVector()) &&
14945 "Not extracted from 128-/256-bit vector.");
14947 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
14949 for (DenseMap<SDValue, unsigned>::const_iterator
14950 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
14951 // Quit if not all elements are used.
14952 if (I->second != FullMask)
14956 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
14958 // Cast all vectors into TestVT for PTEST.
14959 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
14960 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]);
14962 // If more than one full vectors are evaluated, OR them first before PTEST.
14963 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
14964 // Each iteration will OR 2 nodes and append the result until there is only
14965 // 1 node left, i.e. the final OR'd value of all vectors.
14966 SDValue LHS = VecIns[Slot];
14967 SDValue RHS = VecIns[Slot + 1];
14968 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
14971 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
14972 VecIns.back(), VecIns.back());
14975 /// \brief return true if \c Op has a use that doesn't just read flags.
14976 static bool hasNonFlagsUse(SDValue Op) {
14977 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
14979 SDNode *User = *UI;
14980 unsigned UOpNo = UI.getOperandNo();
14981 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
14982 // Look pass truncate.
14983 UOpNo = User->use_begin().getOperandNo();
14984 User = *User->use_begin();
14987 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
14988 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
14994 /// Emit nodes that will be selected as "test Op0,Op0", or something
14996 SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
14997 SelectionDAG &DAG) const {
14998 if (Op.getValueType() == MVT::i1)
14999 // KORTEST instruction should be selected
15000 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15001 DAG.getConstant(0, Op.getValueType()));
15003 // CF and OF aren't always set the way we want. Determine which
15004 // of these we need.
15005 bool NeedCF = false;
15006 bool NeedOF = false;
15009 case X86::COND_A: case X86::COND_AE:
15010 case X86::COND_B: case X86::COND_BE:
15013 case X86::COND_G: case X86::COND_GE:
15014 case X86::COND_L: case X86::COND_LE:
15015 case X86::COND_O: case X86::COND_NO: {
15016 // Check if we really need to set the
15017 // Overflow flag. If NoSignedWrap is present
15018 // that is not actually needed.
15019 switch (Op->getOpcode()) {
15024 const BinaryWithFlagsSDNode *BinNode =
15025 cast<BinaryWithFlagsSDNode>(Op.getNode());
15026 if (BinNode->hasNoSignedWrap())
15036 // See if we can use the EFLAGS value from the operand instead of
15037 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
15038 // we prove that the arithmetic won't overflow, we can't use OF or CF.
15039 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
15040 // Emit a CMP with 0, which is the TEST pattern.
15041 //if (Op.getValueType() == MVT::i1)
15042 // return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op,
15043 // DAG.getConstant(0, MVT::i1));
15044 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15045 DAG.getConstant(0, Op.getValueType()));
15047 unsigned Opcode = 0;
15048 unsigned NumOperands = 0;
15050 // Truncate operations may prevent the merge of the SETCC instruction
15051 // and the arithmetic instruction before it. Attempt to truncate the operands
15052 // of the arithmetic instruction and use a reduced bit-width instruction.
15053 bool NeedTruncation = false;
15054 SDValue ArithOp = Op;
15055 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
15056 SDValue Arith = Op->getOperand(0);
15057 // Both the trunc and the arithmetic op need to have one user each.
15058 if (Arith->hasOneUse())
15059 switch (Arith.getOpcode()) {
15066 NeedTruncation = true;
15072 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
15073 // which may be the result of a CAST. We use the variable 'Op', which is the
15074 // non-casted variable when we check for possible users.
15075 switch (ArithOp.getOpcode()) {
15077 // Due to an isel shortcoming, be conservative if this add is likely to be
15078 // selected as part of a load-modify-store instruction. When the root node
15079 // in a match is a store, isel doesn't know how to remap non-chain non-flag
15080 // uses of other nodes in the match, such as the ADD in this case. This
15081 // leads to the ADD being left around and reselected, with the result being
15082 // two adds in the output. Alas, even if none our users are stores, that
15083 // doesn't prove we're O.K. Ergo, if we have any parents that aren't
15084 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
15085 // climbing the DAG back to the root, and it doesn't seem to be worth the
15087 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15088 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15089 if (UI->getOpcode() != ISD::CopyToReg &&
15090 UI->getOpcode() != ISD::SETCC &&
15091 UI->getOpcode() != ISD::STORE)
15094 if (ConstantSDNode *C =
15095 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
15096 // An add of one will be selected as an INC.
15097 if (C->getAPIntValue() == 1 && !Subtarget->slowIncDec()) {
15098 Opcode = X86ISD::INC;
15103 // An add of negative one (subtract of one) will be selected as a DEC.
15104 if (C->getAPIntValue().isAllOnesValue() && !Subtarget->slowIncDec()) {
15105 Opcode = X86ISD::DEC;
15111 // Otherwise use a regular EFLAGS-setting add.
15112 Opcode = X86ISD::ADD;
15117 // If we have a constant logical shift that's only used in a comparison
15118 // against zero turn it into an equivalent AND. This allows turning it into
15119 // a TEST instruction later.
15120 if ((X86CC == X86::COND_E || X86CC == X86::COND_NE) && Op->hasOneUse() &&
15121 isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
15122 EVT VT = Op.getValueType();
15123 unsigned BitWidth = VT.getSizeInBits();
15124 unsigned ShAmt = Op->getConstantOperandVal(1);
15125 if (ShAmt >= BitWidth) // Avoid undefined shifts.
15127 APInt Mask = ArithOp.getOpcode() == ISD::SRL
15128 ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
15129 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
15130 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
15132 SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
15133 DAG.getConstant(Mask, VT));
15134 DAG.ReplaceAllUsesWith(Op, New);
15140 // If the primary and result isn't used, don't bother using X86ISD::AND,
15141 // because a TEST instruction will be better.
15142 if (!hasNonFlagsUse(Op))
15148 // Due to the ISEL shortcoming noted above, be conservative if this op is
15149 // likely to be selected as part of a load-modify-store instruction.
15150 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15151 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15152 if (UI->getOpcode() == ISD::STORE)
15155 // Otherwise use a regular EFLAGS-setting instruction.
15156 switch (ArithOp.getOpcode()) {
15157 default: llvm_unreachable("unexpected operator!");
15158 case ISD::SUB: Opcode = X86ISD::SUB; break;
15159 case ISD::XOR: Opcode = X86ISD::XOR; break;
15160 case ISD::AND: Opcode = X86ISD::AND; break;
15162 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
15163 SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG);
15164 if (EFLAGS.getNode())
15167 Opcode = X86ISD::OR;
15181 return SDValue(Op.getNode(), 1);
15187 // If we found that truncation is beneficial, perform the truncation and
15189 if (NeedTruncation) {
15190 EVT VT = Op.getValueType();
15191 SDValue WideVal = Op->getOperand(0);
15192 EVT WideVT = WideVal.getValueType();
15193 unsigned ConvertedOp = 0;
15194 // Use a target machine opcode to prevent further DAGCombine
15195 // optimizations that may separate the arithmetic operations
15196 // from the setcc node.
15197 switch (WideVal.getOpcode()) {
15199 case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
15200 case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
15201 case ISD::AND: ConvertedOp = X86ISD::AND; break;
15202 case ISD::OR: ConvertedOp = X86ISD::OR; break;
15203 case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
15207 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15208 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
15209 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
15210 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
15211 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1);
15217 // Emit a CMP with 0, which is the TEST pattern.
15218 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15219 DAG.getConstant(0, Op.getValueType()));
15221 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
15222 SmallVector<SDValue, 4> Ops;
15223 for (unsigned i = 0; i != NumOperands; ++i)
15224 Ops.push_back(Op.getOperand(i));
15226 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
15227 DAG.ReplaceAllUsesWith(Op, New);
15228 return SDValue(New.getNode(), 1);
15231 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
15233 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
15234 SDLoc dl, SelectionDAG &DAG) const {
15235 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) {
15236 if (C->getAPIntValue() == 0)
15237 return EmitTest(Op0, X86CC, dl, DAG);
15239 if (Op0.getValueType() == MVT::i1)
15240 llvm_unreachable("Unexpected comparison operation for MVT::i1 operands");
15243 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
15244 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
15245 // Do the comparison at i32 if it's smaller, besides the Atom case.
15246 // This avoids subregister aliasing issues. Keep the smaller reference
15247 // if we're optimizing for size, however, as that'll allow better folding
15248 // of memory operations.
15249 if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
15250 !DAG.getMachineFunction().getFunction()->getAttributes().hasAttribute(
15251 AttributeSet::FunctionIndex, Attribute::MinSize) &&
15252 !Subtarget->isAtom()) {
15253 unsigned ExtendOp =
15254 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
15255 Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
15256 Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
15258 // Use SUB instead of CMP to enable CSE between SUB and CMP.
15259 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
15260 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs,
15262 return SDValue(Sub.getNode(), 1);
15264 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
15267 /// Convert a comparison if required by the subtarget.
15268 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
15269 SelectionDAG &DAG) const {
15270 // If the subtarget does not support the FUCOMI instruction, floating-point
15271 // comparisons have to be converted.
15272 if (Subtarget->hasCMov() ||
15273 Cmp.getOpcode() != X86ISD::CMP ||
15274 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
15275 !Cmp.getOperand(1).getValueType().isFloatingPoint())
15278 // The instruction selector will select an FUCOM instruction instead of
15279 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
15280 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
15281 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
15283 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
15284 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
15285 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
15286 DAG.getConstant(8, MVT::i8));
15287 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
15288 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
15291 /// The minimum architected relative accuracy is 2^-12. We need one
15292 /// Newton-Raphson step to have a good float result (24 bits of precision).
15293 SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
15294 DAGCombinerInfo &DCI,
15295 unsigned &RefinementSteps,
15296 bool &UseOneConstNR) const {
15297 // FIXME: We should use instruction latency models to calculate the cost of
15298 // each potential sequence, but this is very hard to do reliably because
15299 // at least Intel's Core* chips have variable timing based on the number of
15300 // significant digits in the divisor and/or sqrt operand.
15301 if (!Subtarget->useSqrtEst())
15304 EVT VT = Op.getValueType();
15306 // SSE1 has rsqrtss and rsqrtps.
15307 // TODO: Add support for AVX512 (v16f32).
15308 // It is likely not profitable to do this for f64 because a double-precision
15309 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
15310 // instructions: convert to single, rsqrtss, convert back to double, refine
15311 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
15312 // along with FMA, this could be a throughput win.
15313 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15314 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15315 RefinementSteps = 1;
15316 UseOneConstNR = false;
15317 return DCI.DAG.getNode(X86ISD::FRSQRT, SDLoc(Op), VT, Op);
15322 /// The minimum architected relative accuracy is 2^-12. We need one
15323 /// Newton-Raphson step to have a good float result (24 bits of precision).
15324 SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
15325 DAGCombinerInfo &DCI,
15326 unsigned &RefinementSteps) const {
15327 // FIXME: We should use instruction latency models to calculate the cost of
15328 // each potential sequence, but this is very hard to do reliably because
15329 // at least Intel's Core* chips have variable timing based on the number of
15330 // significant digits in the divisor.
15331 if (!Subtarget->useReciprocalEst())
15334 EVT VT = Op.getValueType();
15336 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
15337 // TODO: Add support for AVX512 (v16f32).
15338 // It is likely not profitable to do this for f64 because a double-precision
15339 // reciprocal estimate with refinement on x86 prior to FMA requires
15340 // 15 instructions: convert to single, rcpss, convert back to double, refine
15341 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
15342 // along with FMA, this could be a throughput win.
15343 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15344 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15345 RefinementSteps = ReciprocalEstimateRefinementSteps;
15346 return DCI.DAG.getNode(X86ISD::FRCP, SDLoc(Op), VT, Op);
15351 static bool isAllOnes(SDValue V) {
15352 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
15353 return C && C->isAllOnesValue();
15356 /// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
15357 /// if it's possible.
15358 SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
15359 SDLoc dl, SelectionDAG &DAG) const {
15360 SDValue Op0 = And.getOperand(0);
15361 SDValue Op1 = And.getOperand(1);
15362 if (Op0.getOpcode() == ISD::TRUNCATE)
15363 Op0 = Op0.getOperand(0);
15364 if (Op1.getOpcode() == ISD::TRUNCATE)
15365 Op1 = Op1.getOperand(0);
15368 if (Op1.getOpcode() == ISD::SHL)
15369 std::swap(Op0, Op1);
15370 if (Op0.getOpcode() == ISD::SHL) {
15371 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0)))
15372 if (And00C->getZExtValue() == 1) {
15373 // If we looked past a truncate, check that it's only truncating away
15375 unsigned BitWidth = Op0.getValueSizeInBits();
15376 unsigned AndBitWidth = And.getValueSizeInBits();
15377 if (BitWidth > AndBitWidth) {
15379 DAG.computeKnownBits(Op0, Zeros, Ones);
15380 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
15384 RHS = Op0.getOperand(1);
15386 } else if (Op1.getOpcode() == ISD::Constant) {
15387 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
15388 uint64_t AndRHSVal = AndRHS->getZExtValue();
15389 SDValue AndLHS = Op0;
15391 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
15392 LHS = AndLHS.getOperand(0);
15393 RHS = AndLHS.getOperand(1);
15396 // Use BT if the immediate can't be encoded in a TEST instruction.
15397 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
15399 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType());
15403 if (LHS.getNode()) {
15404 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT
15405 // instruction. Since the shift amount is in-range-or-undefined, we know
15406 // that doing a bittest on the i32 value is ok. We extend to i32 because
15407 // the encoding for the i16 version is larger than the i32 version.
15408 // Also promote i16 to i32 for performance / code size reason.
15409 if (LHS.getValueType() == MVT::i8 ||
15410 LHS.getValueType() == MVT::i16)
15411 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
15413 // If the operand types disagree, extend the shift amount to match. Since
15414 // BT ignores high bits (like shifts) we can use anyextend.
15415 if (LHS.getValueType() != RHS.getValueType())
15416 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS);
15418 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
15419 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
15420 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15421 DAG.getConstant(Cond, MVT::i8), BT);
15427 /// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point
15429 static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
15434 // SSE Condition code mapping:
15443 switch (SetCCOpcode) {
15444 default: llvm_unreachable("Unexpected SETCC condition");
15446 case ISD::SETEQ: SSECC = 0; break;
15448 case ISD::SETGT: Swap = true; // Fallthrough
15450 case ISD::SETOLT: SSECC = 1; break;
15452 case ISD::SETGE: Swap = true; // Fallthrough
15454 case ISD::SETOLE: SSECC = 2; break;
15455 case ISD::SETUO: SSECC = 3; break;
15457 case ISD::SETNE: SSECC = 4; break;
15458 case ISD::SETULE: Swap = true; // Fallthrough
15459 case ISD::SETUGE: SSECC = 5; break;
15460 case ISD::SETULT: Swap = true; // Fallthrough
15461 case ISD::SETUGT: SSECC = 6; break;
15462 case ISD::SETO: SSECC = 7; break;
15464 case ISD::SETONE: SSECC = 8; break;
15467 std::swap(Op0, Op1);
15472 // Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128
15473 // ones, and then concatenate the result back.
15474 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
15475 MVT VT = Op.getSimpleValueType();
15477 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
15478 "Unsupported value type for operation");
15480 unsigned NumElems = VT.getVectorNumElements();
15482 SDValue CC = Op.getOperand(2);
15484 // Extract the LHS vectors
15485 SDValue LHS = Op.getOperand(0);
15486 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
15487 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
15489 // Extract the RHS vectors
15490 SDValue RHS = Op.getOperand(1);
15491 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
15492 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
15494 // Issue the operation on the smaller types and concatenate the result back
15495 MVT EltVT = VT.getVectorElementType();
15496 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
15497 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
15498 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
15499 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
15502 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
15503 const X86Subtarget *Subtarget) {
15504 SDValue Op0 = Op.getOperand(0);
15505 SDValue Op1 = Op.getOperand(1);
15506 SDValue CC = Op.getOperand(2);
15507 MVT VT = Op.getSimpleValueType();
15510 assert(Op0.getValueType().getVectorElementType().getSizeInBits() >= 8 &&
15511 Op.getValueType().getScalarType() == MVT::i1 &&
15512 "Cannot set masked compare for this operation");
15514 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15516 bool Unsigned = false;
15519 switch (SetCCOpcode) {
15520 default: llvm_unreachable("Unexpected SETCC condition");
15521 case ISD::SETNE: SSECC = 4; break;
15522 case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
15523 case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
15524 case ISD::SETLT: Swap = true; //fall-through
15525 case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
15526 case ISD::SETULT: SSECC = 1; Unsigned = true; break;
15527 case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
15528 case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
15529 case ISD::SETULE: Unsigned = true; //fall-through
15530 case ISD::SETLE: SSECC = 2; break;
15534 std::swap(Op0, Op1);
15536 return DAG.getNode(Opc, dl, VT, Op0, Op1);
15537 Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
15538 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15539 DAG.getConstant(SSECC, MVT::i8));
15542 /// \brief Try to turn a VSETULT into a VSETULE by modifying its second
15543 /// operand \p Op1. If non-trivial (for example because it's not constant)
15544 /// return an empty value.
15545 static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
15547 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
15551 MVT VT = Op1.getSimpleValueType();
15552 MVT EVT = VT.getVectorElementType();
15553 unsigned n = VT.getVectorNumElements();
15554 SmallVector<SDValue, 8> ULTOp1;
15556 for (unsigned i = 0; i < n; ++i) {
15557 ConstantSDNode *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
15558 if (!Elt || Elt->isOpaque() || Elt->getValueType(0) != EVT)
15561 // Avoid underflow.
15562 APInt Val = Elt->getAPIntValue();
15566 ULTOp1.push_back(DAG.getConstant(Val - 1, EVT));
15569 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
15572 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
15573 SelectionDAG &DAG) {
15574 SDValue Op0 = Op.getOperand(0);
15575 SDValue Op1 = Op.getOperand(1);
15576 SDValue CC = Op.getOperand(2);
15577 MVT VT = Op.getSimpleValueType();
15578 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15579 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
15584 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
15585 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
15588 unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
15589 unsigned Opc = X86ISD::CMPP;
15590 if (Subtarget->hasAVX512() && VT.getVectorElementType() == MVT::i1) {
15591 assert(VT.getVectorNumElements() <= 16);
15592 Opc = X86ISD::CMPM;
15594 // In the two special cases we can't handle, emit two comparisons.
15597 unsigned CombineOpc;
15598 if (SetCCOpcode == ISD::SETUEQ) {
15599 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR;
15601 assert(SetCCOpcode == ISD::SETONE);
15602 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND;
15605 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15606 DAG.getConstant(CC0, MVT::i8));
15607 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15608 DAG.getConstant(CC1, MVT::i8));
15609 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
15611 // Handle all other FP comparisons here.
15612 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15613 DAG.getConstant(SSECC, MVT::i8));
15616 // Break 256-bit integer vector compare into smaller ones.
15617 if (VT.is256BitVector() && !Subtarget->hasInt256())
15618 return Lower256IntVSETCC(Op, DAG);
15620 bool MaskResult = (VT.getVectorElementType() == MVT::i1);
15621 EVT OpVT = Op1.getValueType();
15622 if (Subtarget->hasAVX512()) {
15623 if (Op1.getValueType().is512BitVector() ||
15624 (Subtarget->hasBWI() && Subtarget->hasVLX()) ||
15625 (MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32))
15626 return LowerIntVSETCC_AVX512(Op, DAG, Subtarget);
15628 // In AVX-512 architecture setcc returns mask with i1 elements,
15629 // But there is no compare instruction for i8 and i16 elements in KNL.
15630 // We are not talking about 512-bit operands in this case, these
15631 // types are illegal.
15633 (OpVT.getVectorElementType().getSizeInBits() < 32 &&
15634 OpVT.getVectorElementType().getSizeInBits() >= 8))
15635 return DAG.getNode(ISD::TRUNCATE, dl, VT,
15636 DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
15639 // We are handling one of the integer comparisons here. Since SSE only has
15640 // GT and EQ comparisons for integer, swapping operands and multiple
15641 // operations may be required for some comparisons.
15643 bool Swap = false, Invert = false, FlipSigns = false, MinMax = false;
15644 bool Subus = false;
15646 switch (SetCCOpcode) {
15647 default: llvm_unreachable("Unexpected SETCC condition");
15648 case ISD::SETNE: Invert = true;
15649 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
15650 case ISD::SETLT: Swap = true;
15651 case ISD::SETGT: Opc = X86ISD::PCMPGT; break;
15652 case ISD::SETGE: Swap = true;
15653 case ISD::SETLE: Opc = X86ISD::PCMPGT;
15654 Invert = true; break;
15655 case ISD::SETULT: Swap = true;
15656 case ISD::SETUGT: Opc = X86ISD::PCMPGT;
15657 FlipSigns = true; break;
15658 case ISD::SETUGE: Swap = true;
15659 case ISD::SETULE: Opc = X86ISD::PCMPGT;
15660 FlipSigns = true; Invert = true; break;
15663 // Special case: Use min/max operations for SETULE/SETUGE
15664 MVT VET = VT.getVectorElementType();
15666 (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
15667 || (Subtarget->hasSSE2() && (VET == MVT::i8));
15670 switch (SetCCOpcode) {
15672 case ISD::SETULE: Opc = X86ISD::UMIN; MinMax = true; break;
15673 case ISD::SETUGE: Opc = X86ISD::UMAX; MinMax = true; break;
15676 if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
15679 bool hasSubus = Subtarget->hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
15680 if (!MinMax && hasSubus) {
15681 // As another special case, use PSUBUS[BW] when it's profitable. E.g. for
15683 // t = psubus Op0, Op1
15684 // pcmpeq t, <0..0>
15685 switch (SetCCOpcode) {
15687 case ISD::SETULT: {
15688 // If the comparison is against a constant we can turn this into a
15689 // setule. With psubus, setule does not require a swap. This is
15690 // beneficial because the constant in the register is no longer
15691 // destructed as the destination so it can be hoisted out of a loop.
15692 // Only do this pre-AVX since vpcmp* is no longer destructive.
15693 if (Subtarget->hasAVX())
15695 SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
15696 if (ULEOp1.getNode()) {
15698 Subus = true; Invert = false; Swap = false;
15702 // Psubus is better than flip-sign because it requires no inversion.
15703 case ISD::SETUGE: Subus = true; Invert = false; Swap = true; break;
15704 case ISD::SETULE: Subus = true; Invert = false; Swap = false; break;
15708 Opc = X86ISD::SUBUS;
15714 std::swap(Op0, Op1);
15716 // Check that the operation in question is available (most are plain SSE2,
15717 // but PCMPGTQ and PCMPEQQ have different requirements).
15718 if (VT == MVT::v2i64) {
15719 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) {
15720 assert(Subtarget->hasSSE2() && "Don't know how to lower!");
15722 // First cast everything to the right type.
15723 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15724 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15726 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15727 // bits of the inputs before performing those operations. The lower
15728 // compare is always unsigned.
15731 SB = DAG.getConstant(0x80000000U, MVT::v4i32);
15733 SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32);
15734 SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32);
15735 SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
15736 Sign, Zero, Sign, Zero);
15738 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
15739 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
15741 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
15742 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
15743 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
15745 // Create masks for only the low parts/high parts of the 64 bit integers.
15746 static const int MaskHi[] = { 1, 1, 3, 3 };
15747 static const int MaskLo[] = { 0, 0, 2, 2 };
15748 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
15749 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
15750 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
15752 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
15753 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
15756 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15758 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15761 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) {
15762 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
15763 // pcmpeqd + pshufd + pand.
15764 assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!");
15766 // First cast everything to the right type.
15767 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15768 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15771 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
15773 // Make sure the lower and upper halves are both all-ones.
15774 static const int Mask[] = { 1, 0, 3, 2 };
15775 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
15776 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
15779 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15781 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15785 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15786 // bits of the inputs before performing those operations.
15788 EVT EltVT = VT.getVectorElementType();
15789 SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT);
15790 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB);
15791 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB);
15794 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
15796 // If the logical-not of the result is required, perform that now.
15798 Result = DAG.getNOT(dl, Result, VT);
15801 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
15804 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
15805 getZeroVector(VT, Subtarget, DAG, dl));
15810 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
15812 MVT VT = Op.getSimpleValueType();
15814 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
15816 assert(((!Subtarget->hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
15817 && "SetCC type must be 8-bit or 1-bit integer");
15818 SDValue Op0 = Op.getOperand(0);
15819 SDValue Op1 = Op.getOperand(1);
15821 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
15823 // Optimize to BT if possible.
15824 // Lower (X & (1 << N)) == 0 to BT(X, N).
15825 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
15826 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
15827 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() &&
15828 Op1.getOpcode() == ISD::Constant &&
15829 cast<ConstantSDNode>(Op1)->isNullValue() &&
15830 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15831 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG);
15832 if (NewSetCC.getNode()) {
15834 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
15839 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
15841 if (Op1.getOpcode() == ISD::Constant &&
15842 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
15843 cast<ConstantSDNode>(Op1)->isNullValue()) &&
15844 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15846 // If the input is a setcc, then reuse the input setcc or use a new one with
15847 // the inverted condition.
15848 if (Op0.getOpcode() == X86ISD::SETCC) {
15849 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
15850 bool Invert = (CC == ISD::SETNE) ^
15851 cast<ConstantSDNode>(Op1)->isNullValue();
15855 CCode = X86::GetOppositeBranchCondition(CCode);
15856 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15857 DAG.getConstant(CCode, MVT::i8),
15858 Op0.getOperand(1));
15860 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
15864 if ((Op0.getValueType() == MVT::i1) && (Op1.getOpcode() == ISD::Constant) &&
15865 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1) &&
15866 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15868 ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
15869 return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, MVT::i1), NewCC);
15872 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
15873 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
15874 if (X86CC == X86::COND_INVALID)
15877 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
15878 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
15879 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15880 DAG.getConstant(X86CC, MVT::i8), EFLAGS);
15882 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
15886 // isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
15887 static bool isX86LogicalCmp(SDValue Op) {
15888 unsigned Opc = Op.getNode()->getOpcode();
15889 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
15890 Opc == X86ISD::SAHF)
15892 if (Op.getResNo() == 1 &&
15893 (Opc == X86ISD::ADD ||
15894 Opc == X86ISD::SUB ||
15895 Opc == X86ISD::ADC ||
15896 Opc == X86ISD::SBB ||
15897 Opc == X86ISD::SMUL ||
15898 Opc == X86ISD::UMUL ||
15899 Opc == X86ISD::INC ||
15900 Opc == X86ISD::DEC ||
15901 Opc == X86ISD::OR ||
15902 Opc == X86ISD::XOR ||
15903 Opc == X86ISD::AND))
15906 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
15912 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
15913 if (V.getOpcode() != ISD::TRUNCATE)
15916 SDValue VOp0 = V.getOperand(0);
15917 unsigned InBits = VOp0.getValueSizeInBits();
15918 unsigned Bits = V.getValueSizeInBits();
15919 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
15922 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
15923 bool addTest = true;
15924 SDValue Cond = Op.getOperand(0);
15925 SDValue Op1 = Op.getOperand(1);
15926 SDValue Op2 = Op.getOperand(2);
15928 EVT VT = Op1.getValueType();
15931 // Lower fp selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
15932 // are available. Otherwise fp cmovs get lowered into a less efficient branch
15933 // sequence later on.
15934 if (Cond.getOpcode() == ISD::SETCC &&
15935 ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
15936 (Subtarget->hasSSE1() && VT == MVT::f32)) &&
15937 VT == Cond.getOperand(0).getValueType() && Cond->hasOneUse()) {
15938 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
15939 int SSECC = translateX86FSETCC(
15940 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
15943 if (Subtarget->hasAVX512()) {
15944 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
15945 DAG.getConstant(SSECC, MVT::i8));
15946 return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
15948 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
15949 DAG.getConstant(SSECC, MVT::i8));
15950 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
15951 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
15952 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
15956 if (Cond.getOpcode() == ISD::SETCC) {
15957 SDValue NewCond = LowerSETCC(Cond, DAG);
15958 if (NewCond.getNode())
15962 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
15963 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
15964 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
15965 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
15966 if (Cond.getOpcode() == X86ISD::SETCC &&
15967 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
15968 isZero(Cond.getOperand(1).getOperand(1))) {
15969 SDValue Cmp = Cond.getOperand(1);
15971 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
15973 if ((isAllOnes(Op1) || isAllOnes(Op2)) &&
15974 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
15975 SDValue Y = isAllOnes(Op2) ? Op1 : Op2;
15977 SDValue CmpOp0 = Cmp.getOperand(0);
15978 // Apply further optimizations for special cases
15979 // (select (x != 0), -1, 0) -> neg & sbb
15980 // (select (x == 0), 0, -1) -> neg & sbb
15981 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y))
15982 if (YC->isNullValue() &&
15983 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) {
15984 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
15985 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs,
15986 DAG.getConstant(0, CmpOp0.getValueType()),
15988 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
15989 DAG.getConstant(X86::COND_B, MVT::i8),
15990 SDValue(Neg.getNode(), 1));
15994 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
15995 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
15996 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
15998 SDValue Res = // Res = 0 or -1.
15999 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16000 DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
16002 if (isAllOnes(Op1) != (CondCode == X86::COND_E))
16003 Res = DAG.getNOT(DL, Res, Res.getValueType());
16005 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2);
16006 if (!N2C || !N2C->isNullValue())
16007 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
16012 // Look past (and (setcc_carry (cmp ...)), 1).
16013 if (Cond.getOpcode() == ISD::AND &&
16014 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16015 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16016 if (C && C->getAPIntValue() == 1)
16017 Cond = Cond.getOperand(0);
16020 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16021 // setting operand in place of the X86ISD::SETCC.
16022 unsigned CondOpcode = Cond.getOpcode();
16023 if (CondOpcode == X86ISD::SETCC ||
16024 CondOpcode == X86ISD::SETCC_CARRY) {
16025 CC = Cond.getOperand(0);
16027 SDValue Cmp = Cond.getOperand(1);
16028 unsigned Opc = Cmp.getOpcode();
16029 MVT VT = Op.getSimpleValueType();
16031 bool IllegalFPCMov = false;
16032 if (VT.isFloatingPoint() && !VT.isVector() &&
16033 !isScalarFPTypeInSSEReg(VT)) // FPStack?
16034 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
16036 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
16037 Opc == X86ISD::BT) { // FIXME
16041 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16042 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16043 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16044 Cond.getOperand(0).getValueType() != MVT::i8)) {
16045 SDValue LHS = Cond.getOperand(0);
16046 SDValue RHS = Cond.getOperand(1);
16047 unsigned X86Opcode;
16050 switch (CondOpcode) {
16051 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16052 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16053 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16054 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16055 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16056 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16057 default: llvm_unreachable("unexpected overflowing operator");
16059 if (CondOpcode == ISD::UMULO)
16060 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16063 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16065 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
16067 if (CondOpcode == ISD::UMULO)
16068 Cond = X86Op.getValue(2);
16070 Cond = X86Op.getValue(1);
16072 CC = DAG.getConstant(X86Cond, MVT::i8);
16077 // Look pass the truncate if the high bits are known zero.
16078 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16079 Cond = Cond.getOperand(0);
16081 // We know the result of AND is compared against zero. Try to match
16083 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16084 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG);
16085 if (NewSetCC.getNode()) {
16086 CC = NewSetCC.getOperand(0);
16087 Cond = NewSetCC.getOperand(1);
16094 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16095 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
16098 // a < b ? -1 : 0 -> RES = ~setcc_carry
16099 // a < b ? 0 : -1 -> RES = setcc_carry
16100 // a >= b ? -1 : 0 -> RES = setcc_carry
16101 // a >= b ? 0 : -1 -> RES = ~setcc_carry
16102 if (Cond.getOpcode() == X86ISD::SUB) {
16103 Cond = ConvertCmpIfNecessary(Cond, DAG);
16104 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
16106 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
16107 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) {
16108 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16109 DAG.getConstant(X86::COND_B, MVT::i8), Cond);
16110 if (isAllOnes(Op1) != (CondCode == X86::COND_B))
16111 return DAG.getNOT(DL, Res, Res.getValueType());
16116 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
16117 // widen the cmov and push the truncate through. This avoids introducing a new
16118 // branch during isel and doesn't add any extensions.
16119 if (Op.getValueType() == MVT::i8 &&
16120 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
16121 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
16122 if (T1.getValueType() == T2.getValueType() &&
16123 // Blacklist CopyFromReg to avoid partial register stalls.
16124 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
16125 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
16126 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond);
16127 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
16131 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
16132 // condition is true.
16133 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
16134 SDValue Ops[] = { Op2, Op1, CC, Cond };
16135 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops);
16138 static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, const X86Subtarget *Subtarget,
16139 SelectionDAG &DAG) {
16140 MVT VT = Op->getSimpleValueType(0);
16141 SDValue In = Op->getOperand(0);
16142 MVT InVT = In.getSimpleValueType();
16143 MVT VTElt = VT.getVectorElementType();
16144 MVT InVTElt = InVT.getVectorElementType();
16148 if ((InVTElt == MVT::i1) &&
16149 (((Subtarget->hasBWI() && Subtarget->hasVLX() &&
16150 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() <= 16)) ||
16152 ((Subtarget->hasBWI() && VT.is512BitVector() &&
16153 VTElt.getSizeInBits() <= 16)) ||
16155 ((Subtarget->hasDQI() && Subtarget->hasVLX() &&
16156 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) ||
16158 ((Subtarget->hasDQI() && VT.is512BitVector() &&
16159 VTElt.getSizeInBits() >= 32))))
16160 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16162 unsigned int NumElts = VT.getVectorNumElements();
16164 if (NumElts != 8 && NumElts != 16)
16167 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) {
16168 if (In.getOpcode() == X86ISD::VSEXT || In.getOpcode() == X86ISD::VZEXT)
16169 return DAG.getNode(In.getOpcode(), dl, VT, In.getOperand(0));
16170 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16173 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16174 assert (InVT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
16176 MVT ExtVT = (NumElts == 8) ? MVT::v8i64 : MVT::v16i32;
16177 Constant *C = ConstantInt::get(*DAG.getContext(),
16178 APInt::getAllOnesValue(ExtVT.getScalarType().getSizeInBits()));
16180 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
16181 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
16182 SDValue Ld = DAG.getLoad(ExtVT.getScalarType(), dl, DAG.getEntryNode(), CP,
16183 MachinePointerInfo::getConstantPool(),
16184 false, false, false, Alignment);
16185 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, dl, ExtVT, In, Ld);
16186 if (VT.is512BitVector())
16188 return DAG.getNode(X86ISD::VTRUNC, dl, VT, Brcst);
16191 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
16192 SelectionDAG &DAG) {
16193 MVT VT = Op->getSimpleValueType(0);
16194 SDValue In = Op->getOperand(0);
16195 MVT InVT = In.getSimpleValueType();
16198 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
16199 return LowerSIGN_EXTEND_AVX512(Op, Subtarget, DAG);
16201 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) &&
16202 (VT != MVT::v8i32 || InVT != MVT::v8i16) &&
16203 (VT != MVT::v16i16 || InVT != MVT::v16i8))
16206 if (Subtarget->hasInt256())
16207 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16209 // Optimize vectors in AVX mode
16210 // Sign extend v8i16 to v8i32 and
16213 // Divide input vector into two parts
16214 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
16215 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
16216 // concat the vectors to original VT
16218 unsigned NumElems = InVT.getVectorNumElements();
16219 SDValue Undef = DAG.getUNDEF(InVT);
16221 SmallVector<int,8> ShufMask1(NumElems, -1);
16222 for (unsigned i = 0; i != NumElems/2; ++i)
16225 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]);
16227 SmallVector<int,8> ShufMask2(NumElems, -1);
16228 for (unsigned i = 0; i != NumElems/2; ++i)
16229 ShufMask2[i] = i + NumElems/2;
16231 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]);
16233 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(),
16234 VT.getVectorNumElements()/2);
16236 OpLo = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpLo);
16237 OpHi = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpHi);
16239 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
16242 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
16243 // may emit an illegal shuffle but the expansion is still better than scalar
16244 // code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
16245 // we'll emit a shuffle and a arithmetic shift.
16246 // TODO: It is possible to support ZExt by zeroing the undef values during
16247 // the shuffle phase or after the shuffle.
16248 static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
16249 SelectionDAG &DAG) {
16250 MVT RegVT = Op.getSimpleValueType();
16251 assert(RegVT.isVector() && "We only custom lower vector sext loads.");
16252 assert(RegVT.isInteger() &&
16253 "We only custom lower integer vector sext loads.");
16255 // Nothing useful we can do without SSE2 shuffles.
16256 assert(Subtarget->hasSSE2() && "We only custom lower sext loads with SSE2.");
16258 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
16260 EVT MemVT = Ld->getMemoryVT();
16261 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16262 unsigned RegSz = RegVT.getSizeInBits();
16264 ISD::LoadExtType Ext = Ld->getExtensionType();
16266 assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
16267 && "Only anyext and sext are currently implemented.");
16268 assert(MemVT != RegVT && "Cannot extend to the same type");
16269 assert(MemVT.isVector() && "Must load a vector from memory");
16271 unsigned NumElems = RegVT.getVectorNumElements();
16272 unsigned MemSz = MemVT.getSizeInBits();
16273 assert(RegSz > MemSz && "Register size must be greater than the mem size");
16275 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) {
16276 // The only way in which we have a legal 256-bit vector result but not the
16277 // integer 256-bit operations needed to directly lower a sextload is if we
16278 // have AVX1 but not AVX2. In that case, we can always emit a sextload to
16279 // a 128-bit vector and a normal sign_extend to 256-bits that should get
16280 // correctly legalized. We do this late to allow the canonical form of
16281 // sextload to persist throughout the rest of the DAG combiner -- it wants
16282 // to fold together any extensions it can, and so will fuse a sign_extend
16283 // of an sextload into a sextload targeting a wider value.
16285 if (MemSz == 128) {
16286 // Just switch this to a normal load.
16287 assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
16288 "it must be a legal 128-bit vector "
16290 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
16291 Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(),
16292 Ld->isInvariant(), Ld->getAlignment());
16294 assert(MemSz < 128 &&
16295 "Can't extend a type wider than 128 bits to a 256 bit vector!");
16296 // Do an sext load to a 128-bit vector type. We want to use the same
16297 // number of elements, but elements half as wide. This will end up being
16298 // recursively lowered by this routine, but will succeed as we definitely
16299 // have all the necessary features if we're using AVX1.
16301 EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
16302 EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
16304 DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
16305 Ld->getPointerInfo(), MemVT, Ld->isVolatile(),
16306 Ld->isNonTemporal(), Ld->isInvariant(),
16307 Ld->getAlignment());
16310 // Replace chain users with the new chain.
16311 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
16312 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
16314 // Finally, do a normal sign-extend to the desired register.
16315 return DAG.getSExtOrTrunc(Load, dl, RegVT);
16318 // All sizes must be a power of two.
16319 assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
16320 "Non-power-of-two elements are not custom lowered!");
16322 // Attempt to load the original value using scalar loads.
16323 // Find the largest scalar type that divides the total loaded size.
16324 MVT SclrLoadTy = MVT::i8;
16325 for (MVT Tp : MVT::integer_valuetypes()) {
16326 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
16331 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
16332 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
16334 SclrLoadTy = MVT::f64;
16336 // Calculate the number of scalar loads that we need to perform
16337 // in order to load our vector from memory.
16338 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
16340 assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
16341 "Can only lower sext loads with a single scalar load!");
16343 unsigned loadRegZize = RegSz;
16344 if (Ext == ISD::SEXTLOAD && RegSz == 256)
16347 // Represent our vector as a sequence of elements which are the
16348 // largest scalar that we can load.
16349 EVT LoadUnitVecVT = EVT::getVectorVT(
16350 *DAG.getContext(), SclrLoadTy, loadRegZize / SclrLoadTy.getSizeInBits());
16352 // Represent the data using the same element type that is stored in
16353 // memory. In practice, we ''widen'' MemVT.
16355 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
16356 loadRegZize / MemVT.getScalarType().getSizeInBits());
16358 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
16359 "Invalid vector type");
16361 // We can't shuffle using an illegal type.
16362 assert(TLI.isTypeLegal(WideVecVT) &&
16363 "We only lower types that form legal widened vector types");
16365 SmallVector<SDValue, 8> Chains;
16366 SDValue Ptr = Ld->getBasePtr();
16367 SDValue Increment =
16368 DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, TLI.getPointerTy());
16369 SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
16371 for (unsigned i = 0; i < NumLoads; ++i) {
16372 // Perform a single load.
16373 SDValue ScalarLoad =
16374 DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
16375 Ld->isVolatile(), Ld->isNonTemporal(), Ld->isInvariant(),
16376 Ld->getAlignment());
16377 Chains.push_back(ScalarLoad.getValue(1));
16378 // Create the first element type using SCALAR_TO_VECTOR in order to avoid
16379 // another round of DAGCombining.
16381 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
16383 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
16384 ScalarLoad, DAG.getIntPtrConstant(i));
16386 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
16389 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
16391 // Bitcast the loaded value to a vector of the original element type, in
16392 // the size of the target vector type.
16393 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res);
16394 unsigned SizeRatio = RegSz / MemSz;
16396 if (Ext == ISD::SEXTLOAD) {
16397 // If we have SSE4.1, we can directly emit a VSEXT node.
16398 if (Subtarget->hasSSE41()) {
16399 SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec);
16400 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16404 // Otherwise we'll shuffle the small elements in the high bits of the
16405 // larger type and perform an arithmetic shift. If the shift is not legal
16406 // it's better to scalarize.
16407 assert(TLI.isOperationLegalOrCustom(ISD::SRA, RegVT) &&
16408 "We can't implement a sext load without an arithmetic right shift!");
16410 // Redistribute the loaded elements into the different locations.
16411 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16412 for (unsigned i = 0; i != NumElems; ++i)
16413 ShuffleVec[i * SizeRatio + SizeRatio - 1] = i;
16415 SDValue Shuff = DAG.getVectorShuffle(
16416 WideVecVT, dl, SlicedVec, DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16418 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16420 // Build the arithmetic shift.
16421 unsigned Amt = RegVT.getVectorElementType().getSizeInBits() -
16422 MemVT.getVectorElementType().getSizeInBits();
16424 DAG.getNode(ISD::SRA, dl, RegVT, Shuff, DAG.getConstant(Amt, RegVT));
16426 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16430 // Redistribute the loaded elements into the different locations.
16431 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16432 for (unsigned i = 0; i != NumElems; ++i)
16433 ShuffleVec[i * SizeRatio] = i;
16435 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
16436 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16438 // Bitcast to the requested type.
16439 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16440 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16444 // isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
16445 // ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart
16446 // from the AND / OR.
16447 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
16448 Opc = Op.getOpcode();
16449 if (Opc != ISD::OR && Opc != ISD::AND)
16451 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16452 Op.getOperand(0).hasOneUse() &&
16453 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
16454 Op.getOperand(1).hasOneUse());
16457 // isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and
16458 // 1 and that the SETCC node has a single use.
16459 static bool isXor1OfSetCC(SDValue Op) {
16460 if (Op.getOpcode() != ISD::XOR)
16462 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
16463 if (N1C && N1C->getAPIntValue() == 1) {
16464 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16465 Op.getOperand(0).hasOneUse();
16470 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
16471 bool addTest = true;
16472 SDValue Chain = Op.getOperand(0);
16473 SDValue Cond = Op.getOperand(1);
16474 SDValue Dest = Op.getOperand(2);
16477 bool Inverted = false;
16479 if (Cond.getOpcode() == ISD::SETCC) {
16480 // Check for setcc([su]{add,sub,mul}o == 0).
16481 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
16482 isa<ConstantSDNode>(Cond.getOperand(1)) &&
16483 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() &&
16484 Cond.getOperand(0).getResNo() == 1 &&
16485 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
16486 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
16487 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
16488 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
16489 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
16490 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
16492 Cond = Cond.getOperand(0);
16494 SDValue NewCond = LowerSETCC(Cond, DAG);
16495 if (NewCond.getNode())
16500 // FIXME: LowerXALUO doesn't handle these!!
16501 else if (Cond.getOpcode() == X86ISD::ADD ||
16502 Cond.getOpcode() == X86ISD::SUB ||
16503 Cond.getOpcode() == X86ISD::SMUL ||
16504 Cond.getOpcode() == X86ISD::UMUL)
16505 Cond = LowerXALUO(Cond, DAG);
16508 // Look pass (and (setcc_carry (cmp ...)), 1).
16509 if (Cond.getOpcode() == ISD::AND &&
16510 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16511 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16512 if (C && C->getAPIntValue() == 1)
16513 Cond = Cond.getOperand(0);
16516 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16517 // setting operand in place of the X86ISD::SETCC.
16518 unsigned CondOpcode = Cond.getOpcode();
16519 if (CondOpcode == X86ISD::SETCC ||
16520 CondOpcode == X86ISD::SETCC_CARRY) {
16521 CC = Cond.getOperand(0);
16523 SDValue Cmp = Cond.getOperand(1);
16524 unsigned Opc = Cmp.getOpcode();
16525 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
16526 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
16530 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
16534 // These can only come from an arithmetic instruction with overflow,
16535 // e.g. SADDO, UADDO.
16536 Cond = Cond.getNode()->getOperand(1);
16542 CondOpcode = Cond.getOpcode();
16543 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16544 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16545 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16546 Cond.getOperand(0).getValueType() != MVT::i8)) {
16547 SDValue LHS = Cond.getOperand(0);
16548 SDValue RHS = Cond.getOperand(1);
16549 unsigned X86Opcode;
16552 // Keep this in sync with LowerXALUO, otherwise we might create redundant
16553 // instructions that can't be removed afterwards (i.e. X86ISD::ADD and
16555 switch (CondOpcode) {
16556 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16558 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16560 X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
16563 X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16564 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16566 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16568 X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
16571 X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16572 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16573 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16574 default: llvm_unreachable("unexpected overflowing operator");
16577 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
16578 if (CondOpcode == ISD::UMULO)
16579 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16582 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16584 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
16586 if (CondOpcode == ISD::UMULO)
16587 Cond = X86Op.getValue(2);
16589 Cond = X86Op.getValue(1);
16591 CC = DAG.getConstant(X86Cond, MVT::i8);
16595 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
16596 SDValue Cmp = Cond.getOperand(0).getOperand(1);
16597 if (CondOpc == ISD::OR) {
16598 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
16599 // two branches instead of an explicit OR instruction with a
16601 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16602 isX86LogicalCmp(Cmp)) {
16603 CC = Cond.getOperand(0).getOperand(0);
16604 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16605 Chain, Dest, CC, Cmp);
16606 CC = Cond.getOperand(1).getOperand(0);
16610 } else { // ISD::AND
16611 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
16612 // two branches instead of an explicit AND instruction with a
16613 // separate test. However, we only do this if this block doesn't
16614 // have a fall-through edge, because this requires an explicit
16615 // jmp when the condition is false.
16616 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16617 isX86LogicalCmp(Cmp) &&
16618 Op.getNode()->hasOneUse()) {
16619 X86::CondCode CCode =
16620 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16621 CCode = X86::GetOppositeBranchCondition(CCode);
16622 CC = DAG.getConstant(CCode, MVT::i8);
16623 SDNode *User = *Op.getNode()->use_begin();
16624 // Look for an unconditional branch following this conditional branch.
16625 // We need this because we need to reverse the successors in order
16626 // to implement FCMP_OEQ.
16627 if (User->getOpcode() == ISD::BR) {
16628 SDValue FalseBB = User->getOperand(1);
16630 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16631 assert(NewBR == User);
16635 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16636 Chain, Dest, CC, Cmp);
16637 X86::CondCode CCode =
16638 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
16639 CCode = X86::GetOppositeBranchCondition(CCode);
16640 CC = DAG.getConstant(CCode, MVT::i8);
16646 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
16647 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
16648 // It should be transformed during dag combiner except when the condition
16649 // is set by a arithmetics with overflow node.
16650 X86::CondCode CCode =
16651 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16652 CCode = X86::GetOppositeBranchCondition(CCode);
16653 CC = DAG.getConstant(CCode, MVT::i8);
16654 Cond = Cond.getOperand(0).getOperand(1);
16656 } else if (Cond.getOpcode() == ISD::SETCC &&
16657 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
16658 // For FCMP_OEQ, we can emit
16659 // two branches instead of an explicit AND instruction with a
16660 // separate test. However, we only do this if this block doesn't
16661 // have a fall-through edge, because this requires an explicit
16662 // jmp when the condition is false.
16663 if (Op.getNode()->hasOneUse()) {
16664 SDNode *User = *Op.getNode()->use_begin();
16665 // Look for an unconditional branch following this conditional branch.
16666 // We need this because we need to reverse the successors in order
16667 // to implement FCMP_OEQ.
16668 if (User->getOpcode() == ISD::BR) {
16669 SDValue FalseBB = User->getOperand(1);
16671 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16672 assert(NewBR == User);
16676 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16677 Cond.getOperand(0), Cond.getOperand(1));
16678 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16679 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16680 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16681 Chain, Dest, CC, Cmp);
16682 CC = DAG.getConstant(X86::COND_P, MVT::i8);
16687 } else if (Cond.getOpcode() == ISD::SETCC &&
16688 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
16689 // For FCMP_UNE, we can emit
16690 // two branches instead of an explicit AND instruction with a
16691 // separate test. However, we only do this if this block doesn't
16692 // have a fall-through edge, because this requires an explicit
16693 // jmp when the condition is false.
16694 if (Op.getNode()->hasOneUse()) {
16695 SDNode *User = *Op.getNode()->use_begin();
16696 // Look for an unconditional branch following this conditional branch.
16697 // We need this because we need to reverse the successors in order
16698 // to implement FCMP_UNE.
16699 if (User->getOpcode() == ISD::BR) {
16700 SDValue FalseBB = User->getOperand(1);
16702 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16703 assert(NewBR == User);
16706 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16707 Cond.getOperand(0), Cond.getOperand(1));
16708 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16709 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16710 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16711 Chain, Dest, CC, Cmp);
16712 CC = DAG.getConstant(X86::COND_NP, MVT::i8);
16722 // Look pass the truncate if the high bits are known zero.
16723 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16724 Cond = Cond.getOperand(0);
16726 // We know the result of AND is compared against zero. Try to match
16728 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16729 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG);
16730 if (NewSetCC.getNode()) {
16731 CC = NewSetCC.getOperand(0);
16732 Cond = NewSetCC.getOperand(1);
16739 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
16740 CC = DAG.getConstant(X86Cond, MVT::i8);
16741 Cond = EmitTest(Cond, X86Cond, dl, DAG);
16743 Cond = ConvertCmpIfNecessary(Cond, DAG);
16744 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16745 Chain, Dest, CC, Cond);
16748 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
16749 // Calls to _alloca are needed to probe the stack when allocating more than 4k
16750 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
16751 // that the guard pages used by the OS virtual memory manager are allocated in
16752 // correct sequence.
16754 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
16755 SelectionDAG &DAG) const {
16756 MachineFunction &MF = DAG.getMachineFunction();
16757 bool SplitStack = MF.shouldSplitStack();
16758 bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMachO()) ||
16763 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16764 SDNode* Node = Op.getNode();
16766 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
16767 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
16768 " not tell us which reg is the stack pointer!");
16769 EVT VT = Node->getValueType(0);
16770 SDValue Tmp1 = SDValue(Node, 0);
16771 SDValue Tmp2 = SDValue(Node, 1);
16772 SDValue Tmp3 = Node->getOperand(2);
16773 SDValue Chain = Tmp1.getOperand(0);
16775 // Chain the dynamic stack allocation so that it doesn't modify the stack
16776 // pointer when other instructions are using the stack.
16777 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true),
16780 SDValue Size = Tmp2.getOperand(1);
16781 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
16782 Chain = SP.getValue(1);
16783 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
16784 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
16785 unsigned StackAlign = TFI.getStackAlignment();
16786 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
16787 if (Align > StackAlign)
16788 Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
16789 DAG.getConstant(-(uint64_t)Align, VT));
16790 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
16792 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
16793 DAG.getIntPtrConstant(0, true), SDValue(),
16796 SDValue Ops[2] = { Tmp1, Tmp2 };
16797 return DAG.getMergeValues(Ops, dl);
16801 SDValue Chain = Op.getOperand(0);
16802 SDValue Size = Op.getOperand(1);
16803 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
16804 EVT VT = Op.getNode()->getValueType(0);
16806 bool Is64Bit = Subtarget->is64Bit();
16807 EVT SPTy = getPointerTy();
16810 MachineRegisterInfo &MRI = MF.getRegInfo();
16813 // The 64 bit implementation of segmented stacks needs to clobber both r10
16814 // r11. This makes it impossible to use it along with nested parameters.
16815 const Function *F = MF.getFunction();
16817 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
16819 if (I->hasNestAttr())
16820 report_fatal_error("Cannot use segmented stacks with functions that "
16821 "have nested arguments.");
16824 const TargetRegisterClass *AddrRegClass =
16825 getRegClassFor(getPointerTy());
16826 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
16827 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
16828 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
16829 DAG.getRegister(Vreg, SPTy));
16830 SDValue Ops1[2] = { Value, Chain };
16831 return DAG.getMergeValues(Ops1, dl);
16834 const unsigned Reg = (Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX);
16836 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag);
16837 Flag = Chain.getValue(1);
16838 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
16840 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
16842 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
16843 unsigned SPReg = RegInfo->getStackRegister();
16844 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
16845 Chain = SP.getValue(1);
16848 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
16849 DAG.getConstant(-(uint64_t)Align, VT));
16850 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
16853 SDValue Ops1[2] = { SP, Chain };
16854 return DAG.getMergeValues(Ops1, dl);
16858 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
16859 MachineFunction &MF = DAG.getMachineFunction();
16860 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
16862 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
16865 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) {
16866 // vastart just stores the address of the VarArgsFrameIndex slot into the
16867 // memory location argument.
16868 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
16870 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
16871 MachinePointerInfo(SV), false, false, 0);
16875 // gp_offset (0 - 6 * 8)
16876 // fp_offset (48 - 48 + 8 * 16)
16877 // overflow_arg_area (point to parameters coming in memory).
16879 SmallVector<SDValue, 8> MemOps;
16880 SDValue FIN = Op.getOperand(1);
16882 SDValue Store = DAG.getStore(Op.getOperand(0), DL,
16883 DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
16885 FIN, MachinePointerInfo(SV), false, false, 0);
16886 MemOps.push_back(Store);
16889 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
16890 FIN, DAG.getIntPtrConstant(4));
16891 Store = DAG.getStore(Op.getOperand(0), DL,
16892 DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
16894 FIN, MachinePointerInfo(SV, 4), false, false, 0);
16895 MemOps.push_back(Store);
16897 // Store ptr to overflow_arg_area
16898 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
16899 FIN, DAG.getIntPtrConstant(4));
16900 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
16902 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
16903 MachinePointerInfo(SV, 8),
16905 MemOps.push_back(Store);
16907 // Store ptr to reg_save_area.
16908 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
16909 FIN, DAG.getIntPtrConstant(8));
16910 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
16912 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN,
16913 MachinePointerInfo(SV, 16), false, false, 0);
16914 MemOps.push_back(Store);
16915 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
16918 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
16919 assert(Subtarget->is64Bit() &&
16920 "LowerVAARG only handles 64-bit va_arg!");
16921 assert((Subtarget->isTargetLinux() ||
16922 Subtarget->isTargetDarwin()) &&
16923 "Unhandled target in LowerVAARG");
16924 assert(Op.getNode()->getNumOperands() == 4);
16925 SDValue Chain = Op.getOperand(0);
16926 SDValue SrcPtr = Op.getOperand(1);
16927 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
16928 unsigned Align = Op.getConstantOperandVal(3);
16931 EVT ArgVT = Op.getNode()->getValueType(0);
16932 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
16933 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy);
16936 // Decide which area this value should be read from.
16937 // TODO: Implement the AMD64 ABI in its entirety. This simple
16938 // selection mechanism works only for the basic types.
16939 if (ArgVT == MVT::f80) {
16940 llvm_unreachable("va_arg for f80 not yet implemented");
16941 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
16942 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
16943 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
16944 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
16946 llvm_unreachable("Unhandled argument type in LowerVAARG");
16949 if (ArgMode == 2) {
16950 // Sanity Check: Make sure using fp_offset makes sense.
16951 assert(!DAG.getTarget().Options.UseSoftFloat &&
16952 !(DAG.getMachineFunction()
16953 .getFunction()->getAttributes()
16954 .hasAttribute(AttributeSet::FunctionIndex,
16955 Attribute::NoImplicitFloat)) &&
16956 Subtarget->hasSSE1());
16959 // Insert VAARG_64 node into the DAG
16960 // VAARG_64 returns two values: Variable Argument Address, Chain
16961 SmallVector<SDValue, 11> InstOps;
16962 InstOps.push_back(Chain);
16963 InstOps.push_back(SrcPtr);
16964 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32));
16965 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8));
16966 InstOps.push_back(DAG.getConstant(Align, MVT::i32));
16967 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other);
16968 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
16969 VTs, InstOps, MVT::i64,
16970 MachinePointerInfo(SV),
16972 /*Volatile=*/false,
16974 /*WriteMem=*/true);
16975 Chain = VAARG.getValue(1);
16977 // Load the next argument and return it
16978 return DAG.getLoad(ArgVT, dl,
16981 MachinePointerInfo(),
16982 false, false, false, 0);
16985 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
16986 SelectionDAG &DAG) {
16987 // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
16988 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
16989 SDValue Chain = Op.getOperand(0);
16990 SDValue DstPtr = Op.getOperand(1);
16991 SDValue SrcPtr = Op.getOperand(2);
16992 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
16993 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
16996 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
16997 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
16999 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
17002 // getTargetVShiftByConstNode - Handle vector element shifts where the shift
17003 // amount is a constant. Takes immediate version of shift as input.
17004 static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
17005 SDValue SrcOp, uint64_t ShiftAmt,
17006 SelectionDAG &DAG) {
17007 MVT ElementType = VT.getVectorElementType();
17009 // Fold this packed shift into its first operand if ShiftAmt is 0.
17013 // Check for ShiftAmt >= element width
17014 if (ShiftAmt >= ElementType.getSizeInBits()) {
17015 if (Opc == X86ISD::VSRAI)
17016 ShiftAmt = ElementType.getSizeInBits() - 1;
17018 return DAG.getConstant(0, VT);
17021 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
17022 && "Unknown target vector shift-by-constant node");
17024 // Fold this packed vector shift into a build vector if SrcOp is a
17025 // vector of Constants or UNDEFs, and SrcOp valuetype is the same as VT.
17026 if (VT == SrcOp.getSimpleValueType() &&
17027 ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
17028 SmallVector<SDValue, 8> Elts;
17029 unsigned NumElts = SrcOp->getNumOperands();
17030 ConstantSDNode *ND;
17033 default: llvm_unreachable(nullptr);
17034 case X86ISD::VSHLI:
17035 for (unsigned i=0; i!=NumElts; ++i) {
17036 SDValue CurrentOp = SrcOp->getOperand(i);
17037 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17038 Elts.push_back(CurrentOp);
17041 ND = cast<ConstantSDNode>(CurrentOp);
17042 const APInt &C = ND->getAPIntValue();
17043 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), ElementType));
17046 case X86ISD::VSRLI:
17047 for (unsigned i=0; i!=NumElts; ++i) {
17048 SDValue CurrentOp = SrcOp->getOperand(i);
17049 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17050 Elts.push_back(CurrentOp);
17053 ND = cast<ConstantSDNode>(CurrentOp);
17054 const APInt &C = ND->getAPIntValue();
17055 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), ElementType));
17058 case X86ISD::VSRAI:
17059 for (unsigned i=0; i!=NumElts; ++i) {
17060 SDValue CurrentOp = SrcOp->getOperand(i);
17061 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17062 Elts.push_back(CurrentOp);
17065 ND = cast<ConstantSDNode>(CurrentOp);
17066 const APInt &C = ND->getAPIntValue();
17067 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), ElementType));
17072 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
17075 return DAG.getNode(Opc, dl, VT, SrcOp, DAG.getConstant(ShiftAmt, MVT::i8));
17078 // getTargetVShiftNode - Handle vector element shifts where the shift amount
17079 // may or may not be a constant. Takes immediate version of shift as input.
17080 static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
17081 SDValue SrcOp, SDValue ShAmt,
17082 SelectionDAG &DAG) {
17083 MVT SVT = ShAmt.getSimpleValueType();
17084 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
17086 // Catch shift-by-constant.
17087 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
17088 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
17089 CShAmt->getZExtValue(), DAG);
17091 // Change opcode to non-immediate version
17093 default: llvm_unreachable("Unknown target vector shift node");
17094 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break;
17095 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break;
17096 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
17099 const X86Subtarget &Subtarget =
17100 static_cast<const X86Subtarget &>(DAG.getSubtarget());
17101 if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
17102 ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
17103 // Let the shuffle legalizer expand this shift amount node.
17104 SDValue Op0 = ShAmt.getOperand(0);
17105 Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0);
17106 ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, &Subtarget, DAG);
17108 // Need to build a vector containing shift amount.
17109 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
17110 SmallVector<SDValue, 4> ShOps;
17111 ShOps.push_back(ShAmt);
17112 if (SVT == MVT::i32) {
17113 ShOps.push_back(DAG.getConstant(0, SVT));
17114 ShOps.push_back(DAG.getUNDEF(SVT));
17116 ShOps.push_back(DAG.getUNDEF(SVT));
17118 MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
17119 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps);
17122 // The return type has to be a 128-bit type with the same element
17123 // type as the input type.
17124 MVT EltVT = VT.getVectorElementType();
17125 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
17127 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt);
17128 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
17131 /// \brief Return (and \p Op, \p Mask) for compare instructions or
17132 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
17133 /// necessary casting for \p Mask when lowering masking intrinsics.
17134 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
17135 SDValue PreservedSrc,
17136 const X86Subtarget *Subtarget,
17137 SelectionDAG &DAG) {
17138 EVT VT = Op.getValueType();
17139 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(),
17140 MVT::i1, VT.getVectorNumElements());
17141 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17142 Mask.getValueType().getSizeInBits());
17145 assert(MaskVT.isSimple() && "invalid mask type");
17147 if (isAllOnes(Mask))
17150 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
17151 // are extracted by EXTRACT_SUBVECTOR.
17152 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17153 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17154 DAG.getIntPtrConstant(0));
17156 switch (Op.getOpcode()) {
17158 case X86ISD::PCMPEQM:
17159 case X86ISD::PCMPGTM:
17161 case X86ISD::CMPMU:
17162 return DAG.getNode(ISD::AND, dl, VT, Op, VMask);
17164 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17165 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17166 return DAG.getNode(ISD::VSELECT, dl, VT, VMask, Op, PreservedSrc);
17169 /// \brief Creates an SDNode for a predicated scalar operation.
17170 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
17171 /// The mask is comming as MVT::i8 and it should be truncated
17172 /// to MVT::i1 while lowering masking intrinsics.
17173 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
17174 /// "X86select" instead of "vselect". We just can't create the "vselect" node for
17175 /// a scalar instruction.
17176 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
17177 SDValue PreservedSrc,
17178 const X86Subtarget *Subtarget,
17179 SelectionDAG &DAG) {
17180 if (isAllOnes(Mask))
17183 EVT VT = Op.getValueType();
17185 // The mask should be of type MVT::i1
17186 SDValue IMask = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Mask);
17188 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17189 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17190 return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc);
17193 static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17194 SelectionDAG &DAG) {
17196 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17197 EVT VT = Op.getValueType();
17198 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
17200 switch(IntrData->Type) {
17201 case INTR_TYPE_1OP:
17202 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
17203 case INTR_TYPE_2OP:
17204 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17206 case INTR_TYPE_3OP:
17207 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17208 Op.getOperand(2), Op.getOperand(3));
17209 case INTR_TYPE_1OP_MASK_RM: {
17210 SDValue Src = Op.getOperand(1);
17211 SDValue Src0 = Op.getOperand(2);
17212 SDValue Mask = Op.getOperand(3);
17213 SDValue RoundingMode = Op.getOperand(4);
17214 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
17216 Mask, Src0, Subtarget, DAG);
17218 case INTR_TYPE_SCALAR_MASK_RM: {
17219 SDValue Src1 = Op.getOperand(1);
17220 SDValue Src2 = Op.getOperand(2);
17221 SDValue Src0 = Op.getOperand(3);
17222 SDValue Mask = Op.getOperand(4);
17223 SDValue RoundingMode = Op.getOperand(5);
17224 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
17226 Mask, Src0, Subtarget, DAG);
17228 case INTR_TYPE_2OP_MASK: {
17229 SDValue Mask = Op.getOperand(4);
17230 SDValue PassThru = Op.getOperand(3);
17231 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17232 if (IntrWithRoundingModeOpcode != 0) {
17233 unsigned Round = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
17234 if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION) {
17235 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17236 dl, Op.getValueType(),
17237 Op.getOperand(1), Op.getOperand(2),
17238 Op.getOperand(3), Op.getOperand(5)),
17239 Mask, PassThru, Subtarget, DAG);
17242 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
17245 Mask, PassThru, Subtarget, DAG);
17247 case FMA_OP_MASK: {
17248 SDValue Src1 = Op.getOperand(1);
17249 SDValue Src2 = Op.getOperand(2);
17250 SDValue Src3 = Op.getOperand(3);
17251 SDValue Mask = Op.getOperand(4);
17252 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17253 if (IntrWithRoundingModeOpcode != 0) {
17254 SDValue Rnd = Op.getOperand(5);
17255 if (cast<ConstantSDNode>(Rnd)->getZExtValue() !=
17256 X86::STATIC_ROUNDING::CUR_DIRECTION)
17257 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17258 dl, Op.getValueType(),
17259 Src1, Src2, Src3, Rnd),
17260 Mask, Src1, Subtarget, DAG);
17262 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
17263 dl, Op.getValueType(),
17265 Mask, Src1, Subtarget, DAG);
17268 case CMP_MASK_CC: {
17269 // Comparison intrinsics with masks.
17270 // Example of transformation:
17271 // (i8 (int_x86_avx512_mask_pcmpeq_q_128
17272 // (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
17274 // (v8i1 (insert_subvector undef,
17275 // (v2i1 (and (PCMPEQM %a, %b),
17276 // (extract_subvector
17277 // (v8i1 (bitcast %mask)), 0))), 0))))
17278 EVT VT = Op.getOperand(1).getValueType();
17279 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17280 VT.getVectorNumElements());
17281 SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3);
17282 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17283 Mask.getValueType().getSizeInBits());
17285 if (IntrData->Type == CMP_MASK_CC) {
17286 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17287 Op.getOperand(2), Op.getOperand(3));
17289 assert(IntrData->Type == CMP_MASK && "Unexpected intrinsic type!");
17290 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17293 SDValue CmpMask = getVectorMaskingNode(Cmp, Mask,
17294 DAG.getTargetConstant(0, MaskVT),
17296 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
17297 DAG.getUNDEF(BitcastVT), CmpMask,
17298 DAG.getIntPtrConstant(0));
17299 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
17301 case COMI: { // Comparison intrinsics
17302 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
17303 SDValue LHS = Op.getOperand(1);
17304 SDValue RHS = Op.getOperand(2);
17305 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG);
17306 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!");
17307 SDValue Cond = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
17308 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17309 DAG.getConstant(X86CC, MVT::i8), Cond);
17310 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17313 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
17314 Op.getOperand(1), Op.getOperand(2), DAG);
17316 return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl,
17317 Op.getSimpleValueType(),
17319 Op.getOperand(2), DAG),
17320 Op.getOperand(4), Op.getOperand(3), Subtarget,
17322 case COMPRESS_EXPAND_IN_REG: {
17323 SDValue Mask = Op.getOperand(3);
17324 SDValue DataToCompress = Op.getOperand(1);
17325 SDValue PassThru = Op.getOperand(2);
17326 if (isAllOnes(Mask)) // return data as is
17327 return Op.getOperand(1);
17328 EVT VT = Op.getValueType();
17329 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17330 VT.getVectorNumElements());
17331 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17332 Mask.getValueType().getSizeInBits());
17334 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17335 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17336 DAG.getIntPtrConstant(0));
17338 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToCompress,
17342 SDValue Mask = Op.getOperand(3);
17343 EVT VT = Op.getValueType();
17344 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17345 VT.getVectorNumElements());
17346 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17347 Mask.getValueType().getSizeInBits());
17349 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17350 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17351 DAG.getIntPtrConstant(0));
17352 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1),
17361 default: return SDValue(); // Don't custom lower most intrinsics.
17363 case Intrinsic::x86_avx512_mask_valign_q_512:
17364 case Intrinsic::x86_avx512_mask_valign_d_512:
17365 // Vector source operands are swapped.
17366 return getVectorMaskingNode(DAG.getNode(X86ISD::VALIGN, dl,
17367 Op.getValueType(), Op.getOperand(2),
17370 Op.getOperand(5), Op.getOperand(4),
17373 // ptest and testp intrinsics. The intrinsic these come from are designed to
17374 // return an integer value, not just an instruction so lower it to the ptest
17375 // or testp pattern and a setcc for the result.
17376 case Intrinsic::x86_sse41_ptestz:
17377 case Intrinsic::x86_sse41_ptestc:
17378 case Intrinsic::x86_sse41_ptestnzc:
17379 case Intrinsic::x86_avx_ptestz_256:
17380 case Intrinsic::x86_avx_ptestc_256:
17381 case Intrinsic::x86_avx_ptestnzc_256:
17382 case Intrinsic::x86_avx_vtestz_ps:
17383 case Intrinsic::x86_avx_vtestc_ps:
17384 case Intrinsic::x86_avx_vtestnzc_ps:
17385 case Intrinsic::x86_avx_vtestz_pd:
17386 case Intrinsic::x86_avx_vtestc_pd:
17387 case Intrinsic::x86_avx_vtestnzc_pd:
17388 case Intrinsic::x86_avx_vtestz_ps_256:
17389 case Intrinsic::x86_avx_vtestc_ps_256:
17390 case Intrinsic::x86_avx_vtestnzc_ps_256:
17391 case Intrinsic::x86_avx_vtestz_pd_256:
17392 case Intrinsic::x86_avx_vtestc_pd_256:
17393 case Intrinsic::x86_avx_vtestnzc_pd_256: {
17394 bool IsTestPacked = false;
17397 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
17398 case Intrinsic::x86_avx_vtestz_ps:
17399 case Intrinsic::x86_avx_vtestz_pd:
17400 case Intrinsic::x86_avx_vtestz_ps_256:
17401 case Intrinsic::x86_avx_vtestz_pd_256:
17402 IsTestPacked = true; // Fallthrough
17403 case Intrinsic::x86_sse41_ptestz:
17404 case Intrinsic::x86_avx_ptestz_256:
17406 X86CC = X86::COND_E;
17408 case Intrinsic::x86_avx_vtestc_ps:
17409 case Intrinsic::x86_avx_vtestc_pd:
17410 case Intrinsic::x86_avx_vtestc_ps_256:
17411 case Intrinsic::x86_avx_vtestc_pd_256:
17412 IsTestPacked = true; // Fallthrough
17413 case Intrinsic::x86_sse41_ptestc:
17414 case Intrinsic::x86_avx_ptestc_256:
17416 X86CC = X86::COND_B;
17418 case Intrinsic::x86_avx_vtestnzc_ps:
17419 case Intrinsic::x86_avx_vtestnzc_pd:
17420 case Intrinsic::x86_avx_vtestnzc_ps_256:
17421 case Intrinsic::x86_avx_vtestnzc_pd_256:
17422 IsTestPacked = true; // Fallthrough
17423 case Intrinsic::x86_sse41_ptestnzc:
17424 case Intrinsic::x86_avx_ptestnzc_256:
17426 X86CC = X86::COND_A;
17430 SDValue LHS = Op.getOperand(1);
17431 SDValue RHS = Op.getOperand(2);
17432 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
17433 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
17434 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17435 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
17436 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17438 case Intrinsic::x86_avx512_kortestz_w:
17439 case Intrinsic::x86_avx512_kortestc_w: {
17440 unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz_w)? X86::COND_E: X86::COND_B;
17441 SDValue LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(1));
17442 SDValue RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(2));
17443 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17444 SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
17445 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test);
17446 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17449 case Intrinsic::x86_sse42_pcmpistria128:
17450 case Intrinsic::x86_sse42_pcmpestria128:
17451 case Intrinsic::x86_sse42_pcmpistric128:
17452 case Intrinsic::x86_sse42_pcmpestric128:
17453 case Intrinsic::x86_sse42_pcmpistrio128:
17454 case Intrinsic::x86_sse42_pcmpestrio128:
17455 case Intrinsic::x86_sse42_pcmpistris128:
17456 case Intrinsic::x86_sse42_pcmpestris128:
17457 case Intrinsic::x86_sse42_pcmpistriz128:
17458 case Intrinsic::x86_sse42_pcmpestriz128: {
17462 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
17463 case Intrinsic::x86_sse42_pcmpistria128:
17464 Opcode = X86ISD::PCMPISTRI;
17465 X86CC = X86::COND_A;
17467 case Intrinsic::x86_sse42_pcmpestria128:
17468 Opcode = X86ISD::PCMPESTRI;
17469 X86CC = X86::COND_A;
17471 case Intrinsic::x86_sse42_pcmpistric128:
17472 Opcode = X86ISD::PCMPISTRI;
17473 X86CC = X86::COND_B;
17475 case Intrinsic::x86_sse42_pcmpestric128:
17476 Opcode = X86ISD::PCMPESTRI;
17477 X86CC = X86::COND_B;
17479 case Intrinsic::x86_sse42_pcmpistrio128:
17480 Opcode = X86ISD::PCMPISTRI;
17481 X86CC = X86::COND_O;
17483 case Intrinsic::x86_sse42_pcmpestrio128:
17484 Opcode = X86ISD::PCMPESTRI;
17485 X86CC = X86::COND_O;
17487 case Intrinsic::x86_sse42_pcmpistris128:
17488 Opcode = X86ISD::PCMPISTRI;
17489 X86CC = X86::COND_S;
17491 case Intrinsic::x86_sse42_pcmpestris128:
17492 Opcode = X86ISD::PCMPESTRI;
17493 X86CC = X86::COND_S;
17495 case Intrinsic::x86_sse42_pcmpistriz128:
17496 Opcode = X86ISD::PCMPISTRI;
17497 X86CC = X86::COND_E;
17499 case Intrinsic::x86_sse42_pcmpestriz128:
17500 Opcode = X86ISD::PCMPESTRI;
17501 X86CC = X86::COND_E;
17504 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17505 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17506 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps);
17507 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17508 DAG.getConstant(X86CC, MVT::i8),
17509 SDValue(PCMP.getNode(), 1));
17510 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17513 case Intrinsic::x86_sse42_pcmpistri128:
17514 case Intrinsic::x86_sse42_pcmpestri128: {
17516 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
17517 Opcode = X86ISD::PCMPISTRI;
17519 Opcode = X86ISD::PCMPESTRI;
17521 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17522 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17523 return DAG.getNode(Opcode, dl, VTs, NewOps);
17528 static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17529 SDValue Src, SDValue Mask, SDValue Base,
17530 SDValue Index, SDValue ScaleOp, SDValue Chain,
17531 const X86Subtarget * Subtarget) {
17533 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17534 assert(C && "Invalid scale type");
17535 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17536 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17537 Index.getSimpleValueType().getVectorNumElements());
17539 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17541 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17543 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17544 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
17545 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17546 SDValue Segment = DAG.getRegister(0, MVT::i32);
17547 if (Src.getOpcode() == ISD::UNDEF)
17548 Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
17549 SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17550 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17551 SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
17552 return DAG.getMergeValues(RetOps, dl);
17555 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17556 SDValue Src, SDValue Mask, SDValue Base,
17557 SDValue Index, SDValue ScaleOp, SDValue Chain) {
17559 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17560 assert(C && "Invalid scale type");
17561 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17562 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17563 SDValue Segment = DAG.getRegister(0, MVT::i32);
17564 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17565 Index.getSimpleValueType().getVectorNumElements());
17567 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17569 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17571 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17572 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
17573 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
17574 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17575 return SDValue(Res, 1);
17578 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17579 SDValue Mask, SDValue Base, SDValue Index,
17580 SDValue ScaleOp, SDValue Chain) {
17582 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17583 assert(C && "Invalid scale type");
17584 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17585 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17586 SDValue Segment = DAG.getRegister(0, MVT::i32);
17588 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
17590 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17592 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17594 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17595 //SDVTList VTs = DAG.getVTList(MVT::Other);
17596 SDValue Ops[] = {MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17597 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
17598 return SDValue(Res, 0);
17601 // getReadPerformanceCounter - Handles the lowering of builtin intrinsics that
17602 // read performance monitor counters (x86_rdpmc).
17603 static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
17604 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17605 SmallVectorImpl<SDValue> &Results) {
17606 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17607 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17610 // The ECX register is used to select the index of the performance counter
17612 SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
17614 SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
17616 // Reads the content of a 64-bit performance counter and returns it in the
17617 // registers EDX:EAX.
17618 if (Subtarget->is64Bit()) {
17619 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17620 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17623 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17624 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17627 Chain = HI.getValue(1);
17629 if (Subtarget->is64Bit()) {
17630 // The EAX register is loaded with the low-order 32 bits. The EDX register
17631 // is loaded with the supported high-order bits of the counter.
17632 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17633 DAG.getConstant(32, MVT::i8));
17634 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17635 Results.push_back(Chain);
17639 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17640 SDValue Ops[] = { LO, HI };
17641 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17642 Results.push_back(Pair);
17643 Results.push_back(Chain);
17646 // getReadTimeStampCounter - Handles the lowering of builtin intrinsics that
17647 // read the time stamp counter (x86_rdtsc and x86_rdtscp). This function is
17648 // also used to custom lower READCYCLECOUNTER nodes.
17649 static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
17650 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17651 SmallVectorImpl<SDValue> &Results) {
17652 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17653 SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
17656 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
17657 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
17658 // and the EAX register is loaded with the low-order 32 bits.
17659 if (Subtarget->is64Bit()) {
17660 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17661 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17664 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17665 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17668 SDValue Chain = HI.getValue(1);
17670 if (Opcode == X86ISD::RDTSCP_DAG) {
17671 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17673 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
17674 // the ECX register. Add 'ecx' explicitly to the chain.
17675 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
17677 // Explicitly store the content of ECX at the location passed in input
17678 // to the 'rdtscp' intrinsic.
17679 Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
17680 MachinePointerInfo(), false, false, 0);
17683 if (Subtarget->is64Bit()) {
17684 // The EDX register is loaded with the high-order 32 bits of the MSR, and
17685 // the EAX register is loaded with the low-order 32 bits.
17686 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17687 DAG.getConstant(32, MVT::i8));
17688 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17689 Results.push_back(Chain);
17693 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17694 SDValue Ops[] = { LO, HI };
17695 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17696 Results.push_back(Pair);
17697 Results.push_back(Chain);
17700 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
17701 SelectionDAG &DAG) {
17702 SmallVector<SDValue, 2> Results;
17704 getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
17706 return DAG.getMergeValues(Results, DL);
17710 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17711 SelectionDAG &DAG) {
17712 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
17714 const IntrinsicData* IntrData = getIntrinsicWithChain(IntNo);
17719 switch(IntrData->Type) {
17721 llvm_unreachable("Unknown Intrinsic Type");
17725 // Emit the node with the right value type.
17726 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other);
17727 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17729 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
17730 // Otherwise return the value from Rand, which is always 0, casted to i32.
17731 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
17732 DAG.getConstant(1, Op->getValueType(1)),
17733 DAG.getConstant(X86::COND_B, MVT::i32),
17734 SDValue(Result.getNode(), 1) };
17735 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
17736 DAG.getVTList(Op->getValueType(1), MVT::Glue),
17739 // Return { result, isValid, chain }.
17740 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
17741 SDValue(Result.getNode(), 2));
17744 //gather(v1, mask, index, base, scale);
17745 SDValue Chain = Op.getOperand(0);
17746 SDValue Src = Op.getOperand(2);
17747 SDValue Base = Op.getOperand(3);
17748 SDValue Index = Op.getOperand(4);
17749 SDValue Mask = Op.getOperand(5);
17750 SDValue Scale = Op.getOperand(6);
17751 return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain,
17755 //scatter(base, mask, index, v1, scale);
17756 SDValue Chain = Op.getOperand(0);
17757 SDValue Base = Op.getOperand(2);
17758 SDValue Mask = Op.getOperand(3);
17759 SDValue Index = Op.getOperand(4);
17760 SDValue Src = Op.getOperand(5);
17761 SDValue Scale = Op.getOperand(6);
17762 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain);
17765 SDValue Hint = Op.getOperand(6);
17767 if (dyn_cast<ConstantSDNode> (Hint) == nullptr ||
17768 (HintVal = dyn_cast<ConstantSDNode> (Hint)->getZExtValue()) > 1)
17769 llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1");
17770 unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
17771 SDValue Chain = Op.getOperand(0);
17772 SDValue Mask = Op.getOperand(2);
17773 SDValue Index = Op.getOperand(3);
17774 SDValue Base = Op.getOperand(4);
17775 SDValue Scale = Op.getOperand(5);
17776 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain);
17778 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
17780 SmallVector<SDValue, 2> Results;
17781 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget, Results);
17782 return DAG.getMergeValues(Results, dl);
17784 // Read Performance Monitoring Counters.
17786 SmallVector<SDValue, 2> Results;
17787 getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
17788 return DAG.getMergeValues(Results, dl);
17790 // XTEST intrinsics.
17792 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17793 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17794 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17795 DAG.getConstant(X86::COND_NE, MVT::i8),
17797 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
17798 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
17799 Ret, SDValue(InTrans.getNode(), 1));
17803 SmallVector<SDValue, 2> Results;
17804 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17805 SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
17806 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
17807 DAG.getConstant(-1, MVT::i8));
17808 SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
17809 Op.getOperand(4), GenCF.getValue(1));
17810 SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
17811 Op.getOperand(5), MachinePointerInfo(),
17813 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17814 DAG.getConstant(X86::COND_B, MVT::i8),
17816 Results.push_back(SetCC);
17817 Results.push_back(Store);
17818 return DAG.getMergeValues(Results, dl);
17820 case COMPRESS_TO_MEM: {
17822 SDValue Mask = Op.getOperand(4);
17823 SDValue DataToCompress = Op.getOperand(3);
17824 SDValue Addr = Op.getOperand(2);
17825 SDValue Chain = Op.getOperand(0);
17827 if (isAllOnes(Mask)) // return just a store
17828 return DAG.getStore(Chain, dl, DataToCompress, Addr,
17829 MachinePointerInfo(), false, false, 0);
17831 EVT VT = DataToCompress.getValueType();
17832 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17833 VT.getVectorNumElements());
17834 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17835 Mask.getValueType().getSizeInBits());
17836 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17837 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17838 DAG.getIntPtrConstant(0));
17840 SDValue Compressed = DAG.getNode(IntrData->Opc0, dl, VT, VMask,
17841 DataToCompress, DAG.getUNDEF(VT));
17842 return DAG.getStore(Chain, dl, Compressed, Addr,
17843 MachinePointerInfo(), false, false, 0);
17845 case EXPAND_FROM_MEM: {
17847 SDValue Mask = Op.getOperand(4);
17848 SDValue PathThru = Op.getOperand(3);
17849 SDValue Addr = Op.getOperand(2);
17850 SDValue Chain = Op.getOperand(0);
17851 EVT VT = Op.getValueType();
17853 if (isAllOnes(Mask)) // return just a load
17854 return DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(), false, false,
17856 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17857 VT.getVectorNumElements());
17858 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17859 Mask.getValueType().getSizeInBits());
17860 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17861 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17862 DAG.getIntPtrConstant(0));
17864 SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(),
17865 false, false, false, 0);
17867 SmallVector<SDValue, 2> Results;
17868 Results.push_back(DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToExpand,
17870 Results.push_back(Chain);
17871 return DAG.getMergeValues(Results, dl);
17876 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
17877 SelectionDAG &DAG) const {
17878 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
17879 MFI->setReturnAddressIsTaken(true);
17881 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
17884 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17886 EVT PtrVT = getPointerTy();
17889 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
17890 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17891 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
17892 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
17893 DAG.getNode(ISD::ADD, dl, PtrVT,
17894 FrameAddr, Offset),
17895 MachinePointerInfo(), false, false, false, 0);
17898 // Just load the return address.
17899 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
17900 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
17901 RetAddrFI, MachinePointerInfo(), false, false, false, 0);
17904 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
17905 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
17906 MFI->setFrameAddressIsTaken(true);
17908 EVT VT = Op.getValueType();
17909 SDLoc dl(Op); // FIXME probably not meaningful
17910 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17911 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17912 unsigned FrameReg = RegInfo->getPtrSizedFrameRegister(
17913 DAG.getMachineFunction());
17914 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
17915 (FrameReg == X86::EBP && VT == MVT::i32)) &&
17916 "Invalid Frame Register!");
17917 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
17919 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
17920 MachinePointerInfo(),
17921 false, false, false, 0);
17925 // FIXME? Maybe this could be a TableGen attribute on some registers and
17926 // this table could be generated automatically from RegInfo.
17927 unsigned X86TargetLowering::getRegisterByName(const char* RegName,
17929 unsigned Reg = StringSwitch<unsigned>(RegName)
17930 .Case("esp", X86::ESP)
17931 .Case("rsp", X86::RSP)
17935 report_fatal_error("Invalid register name global variable");
17938 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
17939 SelectionDAG &DAG) const {
17940 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17941 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
17944 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
17945 SDValue Chain = Op.getOperand(0);
17946 SDValue Offset = Op.getOperand(1);
17947 SDValue Handler = Op.getOperand(2);
17950 EVT PtrVT = getPointerTy();
17951 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17952 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
17953 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
17954 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
17955 "Invalid Frame Register!");
17956 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
17957 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
17959 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
17960 DAG.getIntPtrConstant(RegInfo->getSlotSize()));
17961 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
17962 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
17964 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
17966 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
17967 DAG.getRegister(StoreAddrReg, PtrVT));
17970 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
17971 SelectionDAG &DAG) const {
17973 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
17974 DAG.getVTList(MVT::i32, MVT::Other),
17975 Op.getOperand(0), Op.getOperand(1));
17978 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
17979 SelectionDAG &DAG) const {
17981 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
17982 Op.getOperand(0), Op.getOperand(1));
17985 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
17986 return Op.getOperand(0);
17989 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
17990 SelectionDAG &DAG) const {
17991 SDValue Root = Op.getOperand(0);
17992 SDValue Trmp = Op.getOperand(1); // trampoline
17993 SDValue FPtr = Op.getOperand(2); // nested function
17994 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
17997 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
17998 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
18000 if (Subtarget->is64Bit()) {
18001 SDValue OutChains[6];
18003 // Large code-model.
18004 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
18005 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
18007 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
18008 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
18010 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
18012 // Load the pointer to the nested function into R11.
18013 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
18014 SDValue Addr = Trmp;
18015 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18016 Addr, MachinePointerInfo(TrmpAddr),
18019 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18020 DAG.getConstant(2, MVT::i64));
18021 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
18022 MachinePointerInfo(TrmpAddr, 2),
18025 // Load the 'nest' parameter value into R10.
18026 // R10 is specified in X86CallingConv.td
18027 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
18028 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18029 DAG.getConstant(10, MVT::i64));
18030 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18031 Addr, MachinePointerInfo(TrmpAddr, 10),
18034 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18035 DAG.getConstant(12, MVT::i64));
18036 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
18037 MachinePointerInfo(TrmpAddr, 12),
18040 // Jump to the nested function.
18041 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
18042 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18043 DAG.getConstant(20, MVT::i64));
18044 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18045 Addr, MachinePointerInfo(TrmpAddr, 20),
18048 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
18049 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18050 DAG.getConstant(22, MVT::i64));
18051 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
18052 MachinePointerInfo(TrmpAddr, 22),
18055 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18057 const Function *Func =
18058 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
18059 CallingConv::ID CC = Func->getCallingConv();
18064 llvm_unreachable("Unsupported calling convention");
18065 case CallingConv::C:
18066 case CallingConv::X86_StdCall: {
18067 // Pass 'nest' parameter in ECX.
18068 // Must be kept in sync with X86CallingConv.td
18069 NestReg = X86::ECX;
18071 // Check that ECX wasn't needed by an 'inreg' parameter.
18072 FunctionType *FTy = Func->getFunctionType();
18073 const AttributeSet &Attrs = Func->getAttributes();
18075 if (!Attrs.isEmpty() && !Func->isVarArg()) {
18076 unsigned InRegCount = 0;
18079 for (FunctionType::param_iterator I = FTy->param_begin(),
18080 E = FTy->param_end(); I != E; ++I, ++Idx)
18081 if (Attrs.hasAttribute(Idx, Attribute::InReg))
18082 // FIXME: should only count parameters that are lowered to integers.
18083 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
18085 if (InRegCount > 2) {
18086 report_fatal_error("Nest register in use - reduce number of inreg"
18092 case CallingConv::X86_FastCall:
18093 case CallingConv::X86_ThisCall:
18094 case CallingConv::Fast:
18095 // Pass 'nest' parameter in EAX.
18096 // Must be kept in sync with X86CallingConv.td
18097 NestReg = X86::EAX;
18101 SDValue OutChains[4];
18102 SDValue Addr, Disp;
18104 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18105 DAG.getConstant(10, MVT::i32));
18106 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
18108 // This is storing the opcode for MOV32ri.
18109 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
18110 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
18111 OutChains[0] = DAG.getStore(Root, dl,
18112 DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
18113 Trmp, MachinePointerInfo(TrmpAddr),
18116 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18117 DAG.getConstant(1, MVT::i32));
18118 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
18119 MachinePointerInfo(TrmpAddr, 1),
18122 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
18123 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18124 DAG.getConstant(5, MVT::i32));
18125 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
18126 MachinePointerInfo(TrmpAddr, 5),
18129 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18130 DAG.getConstant(6, MVT::i32));
18131 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
18132 MachinePointerInfo(TrmpAddr, 6),
18135 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18139 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
18140 SelectionDAG &DAG) const {
18142 The rounding mode is in bits 11:10 of FPSR, and has the following
18144 00 Round to nearest
18149 FLT_ROUNDS, on the other hand, expects the following:
18156 To perform the conversion, we do:
18157 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
18160 MachineFunction &MF = DAG.getMachineFunction();
18161 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
18162 unsigned StackAlignment = TFI.getStackAlignment();
18163 MVT VT = Op.getSimpleValueType();
18166 // Save FP Control Word to stack slot
18167 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
18168 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
18170 MachineMemOperand *MMO =
18171 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
18172 MachineMemOperand::MOStore, 2, 2);
18174 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
18175 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
18176 DAG.getVTList(MVT::Other),
18177 Ops, MVT::i16, MMO);
18179 // Load FP Control Word from stack slot
18180 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
18181 MachinePointerInfo(), false, false, false, 0);
18183 // Transform as necessary
18185 DAG.getNode(ISD::SRL, DL, MVT::i16,
18186 DAG.getNode(ISD::AND, DL, MVT::i16,
18187 CWD, DAG.getConstant(0x800, MVT::i16)),
18188 DAG.getConstant(11, MVT::i8));
18190 DAG.getNode(ISD::SRL, DL, MVT::i16,
18191 DAG.getNode(ISD::AND, DL, MVT::i16,
18192 CWD, DAG.getConstant(0x400, MVT::i16)),
18193 DAG.getConstant(9, MVT::i8));
18196 DAG.getNode(ISD::AND, DL, MVT::i16,
18197 DAG.getNode(ISD::ADD, DL, MVT::i16,
18198 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
18199 DAG.getConstant(1, MVT::i16)),
18200 DAG.getConstant(3, MVT::i16));
18202 return DAG.getNode((VT.getSizeInBits() < 16 ?
18203 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
18206 static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
18207 MVT VT = Op.getSimpleValueType();
18209 unsigned NumBits = VT.getSizeInBits();
18212 Op = Op.getOperand(0);
18213 if (VT == MVT::i8) {
18214 // Zero extend to i32 since there is not an i8 bsr.
18216 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18219 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
18220 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18221 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18223 // If src is zero (i.e. bsr sets ZF), returns NumBits.
18226 DAG.getConstant(NumBits+NumBits-1, OpVT),
18227 DAG.getConstant(X86::COND_E, MVT::i8),
18230 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
18232 // Finally xor with NumBits-1.
18233 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18236 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18240 static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
18241 MVT VT = Op.getSimpleValueType();
18243 unsigned NumBits = VT.getSizeInBits();
18246 Op = Op.getOperand(0);
18247 if (VT == MVT::i8) {
18248 // Zero extend to i32 since there is not an i8 bsr.
18250 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18253 // Issue a bsr (scan bits in reverse).
18254 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18255 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18257 // And xor with NumBits-1.
18258 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18261 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18265 static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
18266 MVT VT = Op.getSimpleValueType();
18267 unsigned NumBits = VT.getSizeInBits();
18269 Op = Op.getOperand(0);
18271 // Issue a bsf (scan bits forward) which also sets EFLAGS.
18272 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
18273 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op);
18275 // If src is zero (i.e. bsf sets ZF), returns NumBits.
18278 DAG.getConstant(NumBits, VT),
18279 DAG.getConstant(X86::COND_E, MVT::i8),
18282 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
18285 // Lower256IntArith - Break a 256-bit integer operation into two new 128-bit
18286 // ones, and then concatenate the result back.
18287 static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
18288 MVT VT = Op.getSimpleValueType();
18290 assert(VT.is256BitVector() && VT.isInteger() &&
18291 "Unsupported value type for operation");
18293 unsigned NumElems = VT.getVectorNumElements();
18296 // Extract the LHS vectors
18297 SDValue LHS = Op.getOperand(0);
18298 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
18299 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
18301 // Extract the RHS vectors
18302 SDValue RHS = Op.getOperand(1);
18303 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
18304 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
18306 MVT EltVT = VT.getVectorElementType();
18307 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
18309 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
18310 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
18311 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
18314 static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) {
18315 assert(Op.getSimpleValueType().is256BitVector() &&
18316 Op.getSimpleValueType().isInteger() &&
18317 "Only handle AVX 256-bit vector integer operation");
18318 return Lower256IntArith(Op, DAG);
18321 static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
18322 assert(Op.getSimpleValueType().is256BitVector() &&
18323 Op.getSimpleValueType().isInteger() &&
18324 "Only handle AVX 256-bit vector integer operation");
18325 return Lower256IntArith(Op, DAG);
18328 static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
18329 SelectionDAG &DAG) {
18331 MVT VT = Op.getSimpleValueType();
18333 // Decompose 256-bit ops into smaller 128-bit ops.
18334 if (VT.is256BitVector() && !Subtarget->hasInt256())
18335 return Lower256IntArith(Op, DAG);
18337 SDValue A = Op.getOperand(0);
18338 SDValue B = Op.getOperand(1);
18340 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
18341 if (VT == MVT::v4i32) {
18342 assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() &&
18343 "Should not custom lower when pmuldq is available!");
18345 // Extract the odd parts.
18346 static const int UnpackMask[] = { 1, -1, 3, -1 };
18347 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
18348 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
18350 // Multiply the even parts.
18351 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B);
18352 // Now multiply odd parts.
18353 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds);
18355 Evens = DAG.getNode(ISD::BITCAST, dl, VT, Evens);
18356 Odds = DAG.getNode(ISD::BITCAST, dl, VT, Odds);
18358 // Merge the two vectors back together with a shuffle. This expands into 2
18360 static const int ShufMask[] = { 0, 4, 2, 6 };
18361 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
18364 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
18365 "Only know how to lower V2I64/V4I64/V8I64 multiply");
18367 // Ahi = psrlqi(a, 32);
18368 // Bhi = psrlqi(b, 32);
18370 // AloBlo = pmuludq(a, b);
18371 // AloBhi = pmuludq(a, Bhi);
18372 // AhiBlo = pmuludq(Ahi, b);
18374 // AloBhi = psllqi(AloBhi, 32);
18375 // AhiBlo = psllqi(AhiBlo, 32);
18376 // return AloBlo + AloBhi + AhiBlo;
18378 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
18379 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
18381 // Bit cast to 32-bit vectors for MULUDQ
18382 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 :
18383 (VT == MVT::v4i64) ? MVT::v8i32 : MVT::v16i32;
18384 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A);
18385 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B);
18386 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi);
18387 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi);
18389 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
18390 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
18391 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
18393 AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG);
18394 AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG);
18396 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
18397 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
18400 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
18401 assert(Subtarget->isTargetWin64() && "Unexpected target");
18402 EVT VT = Op.getValueType();
18403 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
18404 "Unexpected return type for lowering");
18408 switch (Op->getOpcode()) {
18409 default: llvm_unreachable("Unexpected request for libcall!");
18410 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
18411 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
18412 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
18413 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
18414 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
18415 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
18419 SDValue InChain = DAG.getEntryNode();
18421 TargetLowering::ArgListTy Args;
18422 TargetLowering::ArgListEntry Entry;
18423 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
18424 EVT ArgVT = Op->getOperand(i).getValueType();
18425 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
18426 "Unexpected argument type for lowering");
18427 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
18428 Entry.Node = StackPtr;
18429 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MachinePointerInfo(),
18431 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
18432 Entry.Ty = PointerType::get(ArgTy,0);
18433 Entry.isSExt = false;
18434 Entry.isZExt = false;
18435 Args.push_back(Entry);
18438 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
18441 TargetLowering::CallLoweringInfo CLI(DAG);
18442 CLI.setDebugLoc(dl).setChain(InChain)
18443 .setCallee(getLibcallCallingConv(LC),
18444 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()),
18445 Callee, std::move(Args), 0)
18446 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
18448 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
18449 return DAG.getNode(ISD::BITCAST, dl, VT, CallInfo.first);
18452 static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
18453 SelectionDAG &DAG) {
18454 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
18455 EVT VT = Op0.getValueType();
18458 assert((VT == MVT::v4i32 && Subtarget->hasSSE2()) ||
18459 (VT == MVT::v8i32 && Subtarget->hasInt256()));
18461 // PMULxD operations multiply each even value (starting at 0) of LHS with
18462 // the related value of RHS and produce a widen result.
18463 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18464 // => <2 x i64> <ae|cg>
18466 // In other word, to have all the results, we need to perform two PMULxD:
18467 // 1. one with the even values.
18468 // 2. one with the odd values.
18469 // To achieve #2, with need to place the odd values at an even position.
18471 // Place the odd value at an even position (basically, shift all values 1
18472 // step to the left):
18473 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
18474 // <a|b|c|d> => <b|undef|d|undef>
18475 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0, Mask);
18476 // <e|f|g|h> => <f|undef|h|undef>
18477 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1, Mask);
18479 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
18481 MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
18482 bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
18484 (!IsSigned || !Subtarget->hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
18485 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18486 // => <2 x i64> <ae|cg>
18487 SDValue Mul1 = DAG.getNode(ISD::BITCAST, dl, VT,
18488 DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
18489 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
18490 // => <2 x i64> <bf|dh>
18491 SDValue Mul2 = DAG.getNode(ISD::BITCAST, dl, VT,
18492 DAG.getNode(Opcode, dl, MulVT, Odd0, Odd1));
18494 // Shuffle it back into the right order.
18495 SDValue Highs, Lows;
18496 if (VT == MVT::v8i32) {
18497 const int HighMask[] = {1, 9, 3, 11, 5, 13, 7, 15};
18498 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18499 const int LowMask[] = {0, 8, 2, 10, 4, 12, 6, 14};
18500 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18502 const int HighMask[] = {1, 5, 3, 7};
18503 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18504 const int LowMask[] = {0, 4, 2, 6};
18505 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18508 // If we have a signed multiply but no PMULDQ fix up the high parts of a
18509 // unsigned multiply.
18510 if (IsSigned && !Subtarget->hasSSE41()) {
18512 DAG.getConstant(31, DAG.getTargetLoweringInfo().getShiftAmountTy(VT));
18513 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
18514 DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
18515 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
18516 DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0);
18518 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
18519 Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup);
18522 // The first result of MUL_LOHI is actually the low value, followed by the
18524 SDValue Ops[] = {Lows, Highs};
18525 return DAG.getMergeValues(Ops, dl);
18528 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
18529 const X86Subtarget *Subtarget) {
18530 MVT VT = Op.getSimpleValueType();
18532 SDValue R = Op.getOperand(0);
18533 SDValue Amt = Op.getOperand(1);
18535 // Optimize shl/srl/sra with constant shift amount.
18536 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
18537 if (auto *ShiftConst = BVAmt->getConstantSplatNode()) {
18538 uint64_t ShiftAmt = ShiftConst->getZExtValue();
18540 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
18541 (Subtarget->hasInt256() &&
18542 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18543 (Subtarget->hasAVX512() &&
18544 (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18545 if (Op.getOpcode() == ISD::SHL)
18546 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18548 if (Op.getOpcode() == ISD::SRL)
18549 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18551 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64)
18552 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18556 if (VT == MVT::v16i8) {
18557 if (Op.getOpcode() == ISD::SHL) {
18558 // Make a large shift.
18559 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18560 MVT::v8i16, R, ShiftAmt,
18562 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18563 // Zero out the rightmost bits.
18564 SmallVector<SDValue, 16> V(16,
18565 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18567 return DAG.getNode(ISD::AND, dl, VT, SHL,
18568 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18570 if (Op.getOpcode() == ISD::SRL) {
18571 // Make a large shift.
18572 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18573 MVT::v8i16, R, ShiftAmt,
18575 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18576 // Zero out the leftmost bits.
18577 SmallVector<SDValue, 16> V(16,
18578 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18580 return DAG.getNode(ISD::AND, dl, VT, SRL,
18581 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18583 if (Op.getOpcode() == ISD::SRA) {
18584 if (ShiftAmt == 7) {
18585 // R s>> 7 === R s< 0
18586 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18587 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18590 // R s>> a === ((R u>> a) ^ m) - m
18591 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18592 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt,
18594 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18595 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18596 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18599 llvm_unreachable("Unknown shift opcode.");
18602 if (Subtarget->hasInt256() && VT == MVT::v32i8) {
18603 if (Op.getOpcode() == ISD::SHL) {
18604 // Make a large shift.
18605 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18606 MVT::v16i16, R, ShiftAmt,
18608 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18609 // Zero out the rightmost bits.
18610 SmallVector<SDValue, 32> V(32,
18611 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18613 return DAG.getNode(ISD::AND, dl, VT, SHL,
18614 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18616 if (Op.getOpcode() == ISD::SRL) {
18617 // Make a large shift.
18618 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18619 MVT::v16i16, R, ShiftAmt,
18621 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18622 // Zero out the leftmost bits.
18623 SmallVector<SDValue, 32> V(32,
18624 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18626 return DAG.getNode(ISD::AND, dl, VT, SRL,
18627 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18629 if (Op.getOpcode() == ISD::SRA) {
18630 if (ShiftAmt == 7) {
18631 // R s>> 7 === R s< 0
18632 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18633 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18636 // R s>> a === ((R u>> a) ^ m) - m
18637 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18638 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt,
18640 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18641 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18642 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18645 llvm_unreachable("Unknown shift opcode.");
18650 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18651 if (!Subtarget->is64Bit() &&
18652 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) &&
18653 Amt.getOpcode() == ISD::BITCAST &&
18654 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18655 Amt = Amt.getOperand(0);
18656 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18657 VT.getVectorNumElements();
18658 unsigned RatioInLog2 = Log2_32_Ceil(Ratio);
18659 uint64_t ShiftAmt = 0;
18660 for (unsigned i = 0; i != Ratio; ++i) {
18661 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i));
18665 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2)));
18667 // Check remaining shift amounts.
18668 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18669 uint64_t ShAmt = 0;
18670 for (unsigned j = 0; j != Ratio; ++j) {
18671 ConstantSDNode *C =
18672 dyn_cast<ConstantSDNode>(Amt.getOperand(i + j));
18676 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2)));
18678 if (ShAmt != ShiftAmt)
18681 switch (Op.getOpcode()) {
18683 llvm_unreachable("Unknown shift opcode!");
18685 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18688 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18691 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18699 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
18700 const X86Subtarget* Subtarget) {
18701 MVT VT = Op.getSimpleValueType();
18703 SDValue R = Op.getOperand(0);
18704 SDValue Amt = Op.getOperand(1);
18706 if ((VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) ||
18707 VT == MVT::v4i32 || VT == MVT::v8i16 ||
18708 (Subtarget->hasInt256() &&
18709 ((VT == MVT::v4i64 && Op.getOpcode() != ISD::SRA) ||
18710 VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18711 (Subtarget->hasAVX512() && (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18713 EVT EltVT = VT.getVectorElementType();
18715 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Amt)) {
18716 // Check if this build_vector node is doing a splat.
18717 // If so, then set BaseShAmt equal to the splat value.
18718 BaseShAmt = BV->getSplatValue();
18719 if (BaseShAmt && BaseShAmt.getOpcode() == ISD::UNDEF)
18720 BaseShAmt = SDValue();
18722 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
18723 Amt = Amt.getOperand(0);
18725 ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt);
18726 if (SVN && SVN->isSplat()) {
18727 unsigned SplatIdx = (unsigned)SVN->getSplatIndex();
18728 SDValue InVec = Amt.getOperand(0);
18729 if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
18730 assert((SplatIdx < InVec.getValueType().getVectorNumElements()) &&
18731 "Unexpected shuffle index found!");
18732 BaseShAmt = InVec.getOperand(SplatIdx);
18733 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
18734 if (ConstantSDNode *C =
18735 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) {
18736 if (C->getZExtValue() == SplatIdx)
18737 BaseShAmt = InVec.getOperand(1);
18742 // Avoid introducing an extract element from a shuffle.
18743 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec,
18744 DAG.getIntPtrConstant(SplatIdx));
18748 if (BaseShAmt.getNode()) {
18749 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
18750 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
18751 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
18752 else if (EltVT.bitsLT(MVT::i32))
18753 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
18755 switch (Op.getOpcode()) {
18757 llvm_unreachable("Unknown shift opcode!");
18759 switch (VT.SimpleTy) {
18760 default: return SDValue();
18769 return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG);
18772 switch (VT.SimpleTy) {
18773 default: return SDValue();
18780 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG);
18783 switch (VT.SimpleTy) {
18784 default: return SDValue();
18793 return getTargetVShiftNode(X86ISD::VSRLI, dl, VT, R, BaseShAmt, DAG);
18799 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18800 if (!Subtarget->is64Bit() &&
18801 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64) ||
18802 (Subtarget->hasAVX512() && VT == MVT::v8i64)) &&
18803 Amt.getOpcode() == ISD::BITCAST &&
18804 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18805 Amt = Amt.getOperand(0);
18806 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18807 VT.getVectorNumElements();
18808 std::vector<SDValue> Vals(Ratio);
18809 for (unsigned i = 0; i != Ratio; ++i)
18810 Vals[i] = Amt.getOperand(i);
18811 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18812 for (unsigned j = 0; j != Ratio; ++j)
18813 if (Vals[j] != Amt.getOperand(i + j))
18816 switch (Op.getOpcode()) {
18818 llvm_unreachable("Unknown shift opcode!");
18820 return DAG.getNode(X86ISD::VSHL, dl, VT, R, Op.getOperand(1));
18822 return DAG.getNode(X86ISD::VSRL, dl, VT, R, Op.getOperand(1));
18824 return DAG.getNode(X86ISD::VSRA, dl, VT, R, Op.getOperand(1));
18831 static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
18832 SelectionDAG &DAG) {
18833 MVT VT = Op.getSimpleValueType();
18835 SDValue R = Op.getOperand(0);
18836 SDValue Amt = Op.getOperand(1);
18839 assert(VT.isVector() && "Custom lowering only for vector shifts!");
18840 assert(Subtarget->hasSSE2() && "Only custom lower when we have SSE2!");
18842 V = LowerScalarImmediateShift(Op, DAG, Subtarget);
18846 V = LowerScalarVariableShift(Op, DAG, Subtarget);
18850 if (Subtarget->hasAVX512() && (VT == MVT::v16i32 || VT == MVT::v8i64))
18852 // AVX2 has VPSLLV/VPSRAV/VPSRLV.
18853 if (Subtarget->hasInt256()) {
18854 if (Op.getOpcode() == ISD::SRL &&
18855 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
18856 VT == MVT::v4i64 || VT == MVT::v8i32))
18858 if (Op.getOpcode() == ISD::SHL &&
18859 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
18860 VT == MVT::v4i64 || VT == MVT::v8i32))
18862 if (Op.getOpcode() == ISD::SRA && (VT == MVT::v4i32 || VT == MVT::v8i32))
18866 // If possible, lower this packed shift into a vector multiply instead of
18867 // expanding it into a sequence of scalar shifts.
18868 // Do this only if the vector shift count is a constant build_vector.
18869 if (Op.getOpcode() == ISD::SHL &&
18870 (VT == MVT::v8i16 || VT == MVT::v4i32 ||
18871 (Subtarget->hasInt256() && VT == MVT::v16i16)) &&
18872 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
18873 SmallVector<SDValue, 8> Elts;
18874 EVT SVT = VT.getScalarType();
18875 unsigned SVTBits = SVT.getSizeInBits();
18876 const APInt &One = APInt(SVTBits, 1);
18877 unsigned NumElems = VT.getVectorNumElements();
18879 for (unsigned i=0; i !=NumElems; ++i) {
18880 SDValue Op = Amt->getOperand(i);
18881 if (Op->getOpcode() == ISD::UNDEF) {
18882 Elts.push_back(Op);
18886 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
18887 const APInt &C = APInt(SVTBits, ND->getAPIntValue().getZExtValue());
18888 uint64_t ShAmt = C.getZExtValue();
18889 if (ShAmt >= SVTBits) {
18890 Elts.push_back(DAG.getUNDEF(SVT));
18893 Elts.push_back(DAG.getConstant(One.shl(ShAmt), SVT));
18895 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
18896 return DAG.getNode(ISD::MUL, dl, VT, R, BV);
18899 // Lower SHL with variable shift amount.
18900 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
18901 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT));
18903 Op = DAG.getNode(ISD::ADD, dl, VT, Op, DAG.getConstant(0x3f800000U, VT));
18904 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
18905 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
18906 return DAG.getNode(ISD::MUL, dl, VT, Op, R);
18909 // If possible, lower this shift as a sequence of two shifts by
18910 // constant plus a MOVSS/MOVSD instead of scalarizing it.
18912 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
18914 // Could be rewritten as:
18915 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
18917 // The advantage is that the two shifts from the example would be
18918 // lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
18919 // the vector shift into four scalar shifts plus four pairs of vector
18921 if ((VT == MVT::v8i16 || VT == MVT::v4i32) &&
18922 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
18923 unsigned TargetOpcode = X86ISD::MOVSS;
18924 bool CanBeSimplified;
18925 // The splat value for the first packed shift (the 'X' from the example).
18926 SDValue Amt1 = Amt->getOperand(0);
18927 // The splat value for the second packed shift (the 'Y' from the example).
18928 SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) :
18929 Amt->getOperand(2);
18931 // See if it is possible to replace this node with a sequence of
18932 // two shifts followed by a MOVSS/MOVSD
18933 if (VT == MVT::v4i32) {
18934 // Check if it is legal to use a MOVSS.
18935 CanBeSimplified = Amt2 == Amt->getOperand(2) &&
18936 Amt2 == Amt->getOperand(3);
18937 if (!CanBeSimplified) {
18938 // Otherwise, check if we can still simplify this node using a MOVSD.
18939 CanBeSimplified = Amt1 == Amt->getOperand(1) &&
18940 Amt->getOperand(2) == Amt->getOperand(3);
18941 TargetOpcode = X86ISD::MOVSD;
18942 Amt2 = Amt->getOperand(2);
18945 // Do similar checks for the case where the machine value type
18947 CanBeSimplified = Amt1 == Amt->getOperand(1);
18948 for (unsigned i=3; i != 8 && CanBeSimplified; ++i)
18949 CanBeSimplified = Amt2 == Amt->getOperand(i);
18951 if (!CanBeSimplified) {
18952 TargetOpcode = X86ISD::MOVSD;
18953 CanBeSimplified = true;
18954 Amt2 = Amt->getOperand(4);
18955 for (unsigned i=0; i != 4 && CanBeSimplified; ++i)
18956 CanBeSimplified = Amt1 == Amt->getOperand(i);
18957 for (unsigned j=4; j != 8 && CanBeSimplified; ++j)
18958 CanBeSimplified = Amt2 == Amt->getOperand(j);
18962 if (CanBeSimplified && isa<ConstantSDNode>(Amt1) &&
18963 isa<ConstantSDNode>(Amt2)) {
18964 // Replace this node with two shifts followed by a MOVSS/MOVSD.
18965 EVT CastVT = MVT::v4i32;
18967 DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), VT);
18968 SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
18970 DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), VT);
18971 SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
18972 if (TargetOpcode == X86ISD::MOVSD)
18973 CastVT = MVT::v2i64;
18974 SDValue BitCast1 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift1);
18975 SDValue BitCast2 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift2);
18976 SDValue Result = getTargetShuffleNode(TargetOpcode, dl, CastVT, BitCast2,
18978 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
18982 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
18983 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq.");
18986 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, VT));
18987 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op);
18989 // Turn 'a' into a mask suitable for VSELECT
18990 SDValue VSelM = DAG.getConstant(0x80, VT);
18991 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
18992 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
18994 SDValue CM1 = DAG.getConstant(0x0f, VT);
18995 SDValue CM2 = DAG.getConstant(0x3f, VT);
18997 // r = VSELECT(r, psllw(r & (char16)15, 4), a);
18998 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1);
18999 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 4, DAG);
19000 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19001 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19004 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19005 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19006 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19008 // r = VSELECT(r, psllw(r & (char16)63, 2), a);
19009 M = DAG.getNode(ISD::AND, dl, VT, R, CM2);
19010 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 2, DAG);
19011 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19012 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19015 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19016 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19017 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19019 // return VSELECT(r, r+r, a);
19020 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel,
19021 DAG.getNode(ISD::ADD, dl, VT, R, R), R);
19025 // It's worth extending once and using the v8i32 shifts for 16-bit types, but
19026 // the extra overheads to get from v16i8 to v8i32 make the existing SSE
19027 // solution better.
19028 if (Subtarget->hasInt256() && VT == MVT::v8i16) {
19029 MVT NewVT = VT == MVT::v8i16 ? MVT::v8i32 : MVT::v16i16;
19031 Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
19032 R = DAG.getNode(ExtOpc, dl, NewVT, R);
19033 Amt = DAG.getNode(ISD::ANY_EXTEND, dl, NewVT, Amt);
19034 return DAG.getNode(ISD::TRUNCATE, dl, VT,
19035 DAG.getNode(Op.getOpcode(), dl, NewVT, R, Amt));
19038 // Decompose 256-bit shifts into smaller 128-bit shifts.
19039 if (VT.is256BitVector()) {
19040 unsigned NumElems = VT.getVectorNumElements();
19041 MVT EltVT = VT.getVectorElementType();
19042 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19044 // Extract the two vectors
19045 SDValue V1 = Extract128BitVector(R, 0, DAG, dl);
19046 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl);
19048 // Recreate the shift amount vectors
19049 SDValue Amt1, Amt2;
19050 if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
19051 // Constant shift amount
19052 SmallVector<SDValue, 4> Amt1Csts;
19053 SmallVector<SDValue, 4> Amt2Csts;
19054 for (unsigned i = 0; i != NumElems/2; ++i)
19055 Amt1Csts.push_back(Amt->getOperand(i));
19056 for (unsigned i = NumElems/2; i != NumElems; ++i)
19057 Amt2Csts.push_back(Amt->getOperand(i));
19059 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt1Csts);
19060 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt2Csts);
19062 // Variable shift amount
19063 Amt1 = Extract128BitVector(Amt, 0, DAG, dl);
19064 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl);
19067 // Issue new vector shifts for the smaller types
19068 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1);
19069 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2);
19071 // Concatenate the result back
19072 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
19078 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
19079 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
19080 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
19081 // looks for this combo and may remove the "setcc" instruction if the "setcc"
19082 // has only one use.
19083 SDNode *N = Op.getNode();
19084 SDValue LHS = N->getOperand(0);
19085 SDValue RHS = N->getOperand(1);
19086 unsigned BaseOp = 0;
19089 switch (Op.getOpcode()) {
19090 default: llvm_unreachable("Unknown ovf instruction!");
19092 // A subtract of one will be selected as a INC. Note that INC doesn't
19093 // set CF, so we can't do this for UADDO.
19094 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19096 BaseOp = X86ISD::INC;
19097 Cond = X86::COND_O;
19100 BaseOp = X86ISD::ADD;
19101 Cond = X86::COND_O;
19104 BaseOp = X86ISD::ADD;
19105 Cond = X86::COND_B;
19108 // A subtract of one will be selected as a DEC. Note that DEC doesn't
19109 // set CF, so we can't do this for USUBO.
19110 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19112 BaseOp = X86ISD::DEC;
19113 Cond = X86::COND_O;
19116 BaseOp = X86ISD::SUB;
19117 Cond = X86::COND_O;
19120 BaseOp = X86ISD::SUB;
19121 Cond = X86::COND_B;
19124 BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL;
19125 Cond = X86::COND_O;
19127 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
19128 if (N->getValueType(0) == MVT::i8) {
19129 BaseOp = X86ISD::UMUL8;
19130 Cond = X86::COND_O;
19133 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
19135 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
19138 DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
19139 DAG.getConstant(X86::COND_O, MVT::i32),
19140 SDValue(Sum.getNode(), 2));
19142 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19146 // Also sets EFLAGS.
19147 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
19148 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
19151 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
19152 DAG.getConstant(Cond, MVT::i32),
19153 SDValue(Sum.getNode(), 1));
19155 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19158 // Sign extension of the low part of vector elements. This may be used either
19159 // when sign extend instructions are not available or if the vector element
19160 // sizes already match the sign-extended size. If the vector elements are in
19161 // their pre-extended size and sign extend instructions are available, that will
19162 // be handled by LowerSIGN_EXTEND.
19163 SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
19164 SelectionDAG &DAG) const {
19166 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
19167 MVT VT = Op.getSimpleValueType();
19169 if (!Subtarget->hasSSE2() || !VT.isVector())
19172 unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
19173 ExtraVT.getScalarType().getSizeInBits();
19175 switch (VT.SimpleTy) {
19176 default: return SDValue();
19179 if (!Subtarget->hasFp256())
19181 if (!Subtarget->hasInt256()) {
19182 // needs to be split
19183 unsigned NumElems = VT.getVectorNumElements();
19185 // Extract the LHS vectors
19186 SDValue LHS = Op.getOperand(0);
19187 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
19188 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
19190 MVT EltVT = VT.getVectorElementType();
19191 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19193 EVT ExtraEltVT = ExtraVT.getVectorElementType();
19194 unsigned ExtraNumElems = ExtraVT.getVectorNumElements();
19195 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT,
19197 SDValue Extra = DAG.getValueType(ExtraVT);
19199 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra);
19200 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra);
19202 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);
19207 SDValue Op0 = Op.getOperand(0);
19209 // This is a sign extension of some low part of vector elements without
19210 // changing the size of the vector elements themselves:
19211 // Shift-Left + Shift-Right-Algebraic.
19212 SDValue Shl = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Op0,
19214 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Shl, BitsDiff,
19220 /// Returns true if the operand type is exactly twice the native width, and
19221 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
19222 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
19223 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
19224 bool X86TargetLowering::needsCmpXchgNb(const Type *MemType) const {
19225 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
19228 return !Subtarget->is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
19229 else if (OpWidth == 128)
19230 return Subtarget->hasCmpxchg16b();
19235 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
19236 return needsCmpXchgNb(SI->getValueOperand()->getType());
19239 // Note: this turns large loads into lock cmpxchg8b/16b.
19240 // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
19241 bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
19242 auto PTy = cast<PointerType>(LI->getPointerOperand()->getType());
19243 return needsCmpXchgNb(PTy->getElementType());
19246 bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
19247 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19248 const Type *MemType = AI->getType();
19250 // If the operand is too big, we must see if cmpxchg8/16b is available
19251 // and default to library calls otherwise.
19252 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19253 return needsCmpXchgNb(MemType);
19255 AtomicRMWInst::BinOp Op = AI->getOperation();
19258 llvm_unreachable("Unknown atomic operation");
19259 case AtomicRMWInst::Xchg:
19260 case AtomicRMWInst::Add:
19261 case AtomicRMWInst::Sub:
19262 // It's better to use xadd, xsub or xchg for these in all cases.
19264 case AtomicRMWInst::Or:
19265 case AtomicRMWInst::And:
19266 case AtomicRMWInst::Xor:
19267 // If the atomicrmw's result isn't actually used, we can just add a "lock"
19268 // prefix to a normal instruction for these operations.
19269 return !AI->use_empty();
19270 case AtomicRMWInst::Nand:
19271 case AtomicRMWInst::Max:
19272 case AtomicRMWInst::Min:
19273 case AtomicRMWInst::UMax:
19274 case AtomicRMWInst::UMin:
19275 // These always require a non-trivial set of data operations on x86. We must
19276 // use a cmpxchg loop.
19281 static bool hasMFENCE(const X86Subtarget& Subtarget) {
19282 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
19283 // no-sse2). There isn't any reason to disable it if the target processor
19285 return Subtarget.hasSSE2() || Subtarget.is64Bit();
19289 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
19290 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19291 const Type *MemType = AI->getType();
19292 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
19293 // there is no benefit in turning such RMWs into loads, and it is actually
19294 // harmful as it introduces a mfence.
19295 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19298 auto Builder = IRBuilder<>(AI);
19299 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
19300 auto SynchScope = AI->getSynchScope();
19301 // We must restrict the ordering to avoid generating loads with Release or
19302 // ReleaseAcquire orderings.
19303 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
19304 auto Ptr = AI->getPointerOperand();
19306 // Before the load we need a fence. Here is an example lifted from
19307 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
19310 // x.store(1, relaxed);
19311 // r1 = y.fetch_add(0, release);
19313 // y.fetch_add(42, acquire);
19314 // r2 = x.load(relaxed);
19315 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
19316 // lowered to just a load without a fence. A mfence flushes the store buffer,
19317 // making the optimization clearly correct.
19318 // FIXME: it is required if isAtLeastRelease(Order) but it is not clear
19319 // otherwise, we might be able to be more agressive on relaxed idempotent
19320 // rmw. In practice, they do not look useful, so we don't try to be
19321 // especially clever.
19322 if (SynchScope == SingleThread) {
19323 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
19324 // the IR level, so we must wrap it in an intrinsic.
19326 } else if (hasMFENCE(*Subtarget)) {
19327 Function *MFence = llvm::Intrinsic::getDeclaration(M,
19328 Intrinsic::x86_sse2_mfence);
19329 Builder.CreateCall(MFence);
19331 // FIXME: it might make sense to use a locked operation here but on a
19332 // different cache-line to prevent cache-line bouncing. In practice it
19333 // is probably a small win, and x86 processors without mfence are rare
19334 // enough that we do not bother.
19338 // Finally we can emit the atomic load.
19339 LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
19340 AI->getType()->getPrimitiveSizeInBits());
19341 Loaded->setAtomic(Order, SynchScope);
19342 AI->replaceAllUsesWith(Loaded);
19343 AI->eraseFromParent();
19347 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
19348 SelectionDAG &DAG) {
19350 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
19351 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
19352 SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
19353 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
19355 // The only fence that needs an instruction is a sequentially-consistent
19356 // cross-thread fence.
19357 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
19358 if (hasMFENCE(*Subtarget))
19359 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
19361 SDValue Chain = Op.getOperand(0);
19362 SDValue Zero = DAG.getConstant(0, MVT::i32);
19364 DAG.getRegister(X86::ESP, MVT::i32), // Base
19365 DAG.getTargetConstant(1, MVT::i8), // Scale
19366 DAG.getRegister(0, MVT::i32), // Index
19367 DAG.getTargetConstant(0, MVT::i32), // Disp
19368 DAG.getRegister(0, MVT::i32), // Segment.
19372 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
19373 return SDValue(Res, 0);
19376 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
19377 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
19380 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
19381 SelectionDAG &DAG) {
19382 MVT T = Op.getSimpleValueType();
19386 switch(T.SimpleTy) {
19387 default: llvm_unreachable("Invalid value type!");
19388 case MVT::i8: Reg = X86::AL; size = 1; break;
19389 case MVT::i16: Reg = X86::AX; size = 2; break;
19390 case MVT::i32: Reg = X86::EAX; size = 4; break;
19392 assert(Subtarget->is64Bit() && "Node not type legal!");
19393 Reg = X86::RAX; size = 8;
19396 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
19397 Op.getOperand(2), SDValue());
19398 SDValue Ops[] = { cpIn.getValue(0),
19401 DAG.getTargetConstant(size, MVT::i8),
19402 cpIn.getValue(1) };
19403 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
19404 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
19405 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
19409 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
19410 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
19411 MVT::i32, cpOut.getValue(2));
19412 SDValue Success = DAG.getNode(X86ISD::SETCC, DL, Op->getValueType(1),
19413 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
19415 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
19416 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
19417 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
19421 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
19422 SelectionDAG &DAG) {
19423 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
19424 MVT DstVT = Op.getSimpleValueType();
19426 if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) {
19427 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19428 if (DstVT != MVT::f64)
19429 // This conversion needs to be expanded.
19432 SDValue InVec = Op->getOperand(0);
19434 unsigned NumElts = SrcVT.getVectorNumElements();
19435 EVT SVT = SrcVT.getVectorElementType();
19437 // Widen the vector in input in the case of MVT::v2i32.
19438 // Example: from MVT::v2i32 to MVT::v4i32.
19439 SmallVector<SDValue, 16> Elts;
19440 for (unsigned i = 0, e = NumElts; i != e; ++i)
19441 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec,
19442 DAG.getIntPtrConstant(i)));
19444 // Explicitly mark the extra elements as Undef.
19445 SDValue Undef = DAG.getUNDEF(SVT);
19446 for (unsigned i = NumElts, e = NumElts * 2; i != e; ++i)
19447 Elts.push_back(Undef);
19449 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
19450 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
19451 SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
19452 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
19453 DAG.getIntPtrConstant(0));
19456 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
19457 Subtarget->hasMMX() && "Unexpected custom BITCAST");
19458 assert((DstVT == MVT::i64 ||
19459 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
19460 "Unexpected custom BITCAST");
19461 // i64 <=> MMX conversions are Legal.
19462 if (SrcVT==MVT::i64 && DstVT.isVector())
19464 if (DstVT==MVT::i64 && SrcVT.isVector())
19466 // MMX <=> MMX conversions are Legal.
19467 if (SrcVT.isVector() && DstVT.isVector())
19469 // All other conversions need to be expanded.
19473 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
19474 SelectionDAG &DAG) {
19475 SDNode *Node = Op.getNode();
19478 Op = Op.getOperand(0);
19479 EVT VT = Op.getValueType();
19480 assert((VT.is128BitVector() || VT.is256BitVector()) &&
19481 "CTPOP lowering only implemented for 128/256-bit wide vector types");
19483 unsigned NumElts = VT.getVectorNumElements();
19484 EVT EltVT = VT.getVectorElementType();
19485 unsigned Len = EltVT.getSizeInBits();
19487 // This is the vectorized version of the "best" algorithm from
19488 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
19489 // with a minor tweak to use a series of adds + shifts instead of vector
19490 // multiplications. Implemented for the v2i64, v4i64, v4i32, v8i32 types:
19492 // v2i64, v4i64, v4i32 => Only profitable w/ popcnt disabled
19493 // v8i32 => Always profitable
19495 // FIXME: There a couple of possible improvements:
19497 // 1) Support for i8 and i16 vectors (needs measurements if popcnt enabled).
19498 // 2) Use strategies from http://wm.ite.pl/articles/sse-popcount.html
19500 assert(EltVT.isInteger() && (Len == 32 || Len == 64) && Len % 8 == 0 &&
19501 "CTPOP not implemented for this vector element type.");
19503 // X86 canonicalize ANDs to vXi64, generate the appropriate bitcasts to avoid
19504 // extra legalization.
19505 bool NeedsBitcast = EltVT == MVT::i32;
19506 MVT BitcastVT = VT.is256BitVector() ? MVT::v4i64 : MVT::v2i64;
19508 SDValue Cst55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), EltVT);
19509 SDValue Cst33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), EltVT);
19510 SDValue Cst0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), EltVT);
19512 // v = v - ((v >> 1) & 0x55555555...)
19513 SmallVector<SDValue, 8> Ones(NumElts, DAG.getConstant(1, EltVT));
19514 SDValue OnesV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ones);
19515 SDValue Srl = DAG.getNode(ISD::SRL, dl, VT, Op, OnesV);
19517 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19519 SmallVector<SDValue, 8> Mask55(NumElts, Cst55);
19520 SDValue M55 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask55);
19522 M55 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M55);
19524 SDValue And = DAG.getNode(ISD::AND, dl, Srl.getValueType(), Srl, M55);
19525 if (VT != And.getValueType())
19526 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19527 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Op, And);
19529 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
19530 SmallVector<SDValue, 8> Mask33(NumElts, Cst33);
19531 SDValue M33 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask33);
19532 SmallVector<SDValue, 8> Twos(NumElts, DAG.getConstant(2, EltVT));
19533 SDValue TwosV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Twos);
19535 Srl = DAG.getNode(ISD::SRL, dl, VT, Sub, TwosV);
19536 if (NeedsBitcast) {
19537 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19538 M33 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M33);
19539 Sub = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Sub);
19542 SDValue AndRHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Srl, M33);
19543 SDValue AndLHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Sub, M33);
19544 if (VT != AndRHS.getValueType()) {
19545 AndRHS = DAG.getNode(ISD::BITCAST, dl, VT, AndRHS);
19546 AndLHS = DAG.getNode(ISD::BITCAST, dl, VT, AndLHS);
19548 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, AndLHS, AndRHS);
19550 // v = (v + (v >> 4)) & 0x0F0F0F0F...
19551 SmallVector<SDValue, 8> Fours(NumElts, DAG.getConstant(4, EltVT));
19552 SDValue FoursV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Fours);
19553 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, FoursV);
19554 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19556 SmallVector<SDValue, 8> Mask0F(NumElts, Cst0F);
19557 SDValue M0F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask0F);
19558 if (NeedsBitcast) {
19559 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19560 M0F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M0F);
19562 And = DAG.getNode(ISD::AND, dl, M0F.getValueType(), Add, M0F);
19563 if (VT != And.getValueType())
19564 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19566 // The algorithm mentioned above uses:
19567 // v = (v * 0x01010101...) >> (Len - 8)
19569 // Change it to use vector adds + vector shifts which yield faster results on
19570 // Haswell than using vector integer multiplication.
19572 // For i32 elements:
19573 // v = v + (v >> 8)
19574 // v = v + (v >> 16)
19576 // For i64 elements:
19577 // v = v + (v >> 8)
19578 // v = v + (v >> 16)
19579 // v = v + (v >> 32)
19582 SmallVector<SDValue, 8> Csts;
19583 for (unsigned i = 8; i <= Len/2; i *= 2) {
19584 Csts.assign(NumElts, DAG.getConstant(i, EltVT));
19585 SDValue CstsV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Csts);
19586 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, CstsV);
19587 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19591 // The result is on the least significant 6-bits on i32 and 7-bits on i64.
19592 SDValue Cst3F = DAG.getConstant(APInt(Len, Len == 32 ? 0x3F : 0x7F), EltVT);
19593 SmallVector<SDValue, 8> Cst3FV(NumElts, Cst3F);
19594 SDValue M3F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Cst3FV);
19595 if (NeedsBitcast) {
19596 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19597 M3F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M3F);
19599 And = DAG.getNode(ISD::AND, dl, M3F.getValueType(), Add, M3F);
19600 if (VT != And.getValueType())
19601 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19606 static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
19607 SDNode *Node = Op.getNode();
19609 EVT T = Node->getValueType(0);
19610 SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
19611 DAG.getConstant(0, T), Node->getOperand(2));
19612 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
19613 cast<AtomicSDNode>(Node)->getMemoryVT(),
19614 Node->getOperand(0),
19615 Node->getOperand(1), negOp,
19616 cast<AtomicSDNode>(Node)->getMemOperand(),
19617 cast<AtomicSDNode>(Node)->getOrdering(),
19618 cast<AtomicSDNode>(Node)->getSynchScope());
19621 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
19622 SDNode *Node = Op.getNode();
19624 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
19626 // Convert seq_cst store -> xchg
19627 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
19628 // FIXME: On 32-bit, store -> fist or movq would be more efficient
19629 // (The only way to get a 16-byte store is cmpxchg16b)
19630 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
19631 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
19632 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
19633 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
19634 cast<AtomicSDNode>(Node)->getMemoryVT(),
19635 Node->getOperand(0),
19636 Node->getOperand(1), Node->getOperand(2),
19637 cast<AtomicSDNode>(Node)->getMemOperand(),
19638 cast<AtomicSDNode>(Node)->getOrdering(),
19639 cast<AtomicSDNode>(Node)->getSynchScope());
19640 return Swap.getValue(1);
19642 // Other atomic stores have a simple pattern.
19646 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
19647 EVT VT = Op.getNode()->getSimpleValueType(0);
19649 // Let legalize expand this if it isn't a legal type yet.
19650 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
19653 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
19656 bool ExtraOp = false;
19657 switch (Op.getOpcode()) {
19658 default: llvm_unreachable("Invalid code");
19659 case ISD::ADDC: Opc = X86ISD::ADD; break;
19660 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break;
19661 case ISD::SUBC: Opc = X86ISD::SUB; break;
19662 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break;
19666 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19668 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19669 Op.getOperand(1), Op.getOperand(2));
19672 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
19673 SelectionDAG &DAG) {
19674 assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit());
19676 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
19677 // which returns the values as { float, float } (in XMM0) or
19678 // { double, double } (which is returned in XMM0, XMM1).
19680 SDValue Arg = Op.getOperand(0);
19681 EVT ArgVT = Arg.getValueType();
19682 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
19684 TargetLowering::ArgListTy Args;
19685 TargetLowering::ArgListEntry Entry;
19689 Entry.isSExt = false;
19690 Entry.isZExt = false;
19691 Args.push_back(Entry);
19693 bool isF64 = ArgVT == MVT::f64;
19694 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
19695 // the small struct {f32, f32} is returned in (eax, edx). For f64,
19696 // the results are returned via SRet in memory.
19697 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret";
19698 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19699 SDValue Callee = DAG.getExternalSymbol(LibcallName, TLI.getPointerTy());
19701 Type *RetTy = isF64
19702 ? (Type*)StructType::get(ArgTy, ArgTy, nullptr)
19703 : (Type*)VectorType::get(ArgTy, 4);
19705 TargetLowering::CallLoweringInfo CLI(DAG);
19706 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
19707 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
19709 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
19712 // Returned in xmm0 and xmm1.
19713 return CallResult.first;
19715 // Returned in bits 0:31 and 32:64 xmm0.
19716 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19717 CallResult.first, DAG.getIntPtrConstant(0));
19718 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19719 CallResult.first, DAG.getIntPtrConstant(1));
19720 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
19721 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
19724 /// LowerOperation - Provide custom lowering hooks for some operations.
19726 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
19727 switch (Op.getOpcode()) {
19728 default: llvm_unreachable("Should not custom lower this!");
19729 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG);
19730 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
19731 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
19732 return LowerCMP_SWAP(Op, Subtarget, DAG);
19733 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
19734 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
19735 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
19736 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
19737 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
19738 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
19739 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
19740 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
19741 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
19742 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
19743 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
19744 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
19745 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
19746 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
19747 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
19748 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
19749 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
19750 case ISD::SHL_PARTS:
19751 case ISD::SRA_PARTS:
19752 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
19753 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
19754 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
19755 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
19756 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
19757 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
19758 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
19759 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
19760 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
19761 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
19762 case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG);
19764 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
19765 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
19766 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
19767 case ISD::SETCC: return LowerSETCC(Op, DAG);
19768 case ISD::SELECT: return LowerSELECT(Op, DAG);
19769 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
19770 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
19771 case ISD::VASTART: return LowerVASTART(Op, DAG);
19772 case ISD::VAARG: return LowerVAARG(Op, DAG);
19773 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
19774 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, Subtarget, DAG);
19775 case ISD::INTRINSIC_VOID:
19776 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
19777 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
19778 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
19779 case ISD::FRAME_TO_ARGS_OFFSET:
19780 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
19781 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
19782 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
19783 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
19784 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
19785 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
19786 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
19787 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
19788 case ISD::CTLZ: return LowerCTLZ(Op, DAG);
19789 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG);
19790 case ISD::CTTZ: return LowerCTTZ(Op, DAG);
19791 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
19792 case ISD::UMUL_LOHI:
19793 case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
19796 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
19802 case ISD::UMULO: return LowerXALUO(Op, DAG);
19803 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
19804 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
19808 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
19809 case ISD::ADD: return LowerADD(Op, DAG);
19810 case ISD::SUB: return LowerSUB(Op, DAG);
19811 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
19815 /// ReplaceNodeResults - Replace a node with an illegal result type
19816 /// with a new node built out of custom code.
19817 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
19818 SmallVectorImpl<SDValue>&Results,
19819 SelectionDAG &DAG) const {
19821 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19822 switch (N->getOpcode()) {
19824 llvm_unreachable("Do not know how to custom type legalize this operation!");
19825 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
19826 case X86ISD::FMINC:
19828 case X86ISD::FMAXC:
19829 case X86ISD::FMAX: {
19830 EVT VT = N->getValueType(0);
19831 if (VT != MVT::v2f32)
19832 llvm_unreachable("Unexpected type (!= v2f32) on FMIN/FMAX.");
19833 SDValue UNDEF = DAG.getUNDEF(VT);
19834 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
19835 N->getOperand(0), UNDEF);
19836 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
19837 N->getOperand(1), UNDEF);
19838 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
19841 case ISD::SIGN_EXTEND_INREG:
19846 // We don't want to expand or promote these.
19853 case ISD::UDIVREM: {
19854 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
19855 Results.push_back(V);
19858 case ISD::FP_TO_SINT:
19859 case ISD::FP_TO_UINT: {
19860 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
19862 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType()))
19865 std::pair<SDValue,SDValue> Vals =
19866 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
19867 SDValue FIST = Vals.first, StackSlot = Vals.second;
19868 if (FIST.getNode()) {
19869 EVT VT = N->getValueType(0);
19870 // Return a load from the stack slot.
19871 if (StackSlot.getNode())
19872 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
19873 MachinePointerInfo(),
19874 false, false, false, 0));
19876 Results.push_back(FIST);
19880 case ISD::UINT_TO_FP: {
19881 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19882 if (N->getOperand(0).getValueType() != MVT::v2i32 ||
19883 N->getValueType(0) != MVT::v2f32)
19885 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
19887 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
19889 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias);
19890 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
19891 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias));
19892 Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or);
19893 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
19894 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
19897 case ISD::FP_ROUND: {
19898 if (!TLI.isTypeLegal(N->getOperand(0).getValueType()))
19900 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
19901 Results.push_back(V);
19904 case ISD::INTRINSIC_W_CHAIN: {
19905 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
19907 default : llvm_unreachable("Do not know how to custom type "
19908 "legalize this intrinsic operation!");
19909 case Intrinsic::x86_rdtsc:
19910 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
19912 case Intrinsic::x86_rdtscp:
19913 return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
19915 case Intrinsic::x86_rdpmc:
19916 return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
19919 case ISD::READCYCLECOUNTER: {
19920 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
19923 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
19924 EVT T = N->getValueType(0);
19925 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
19926 bool Regs64bit = T == MVT::i128;
19927 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
19928 SDValue cpInL, cpInH;
19929 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
19930 DAG.getConstant(0, HalfT));
19931 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
19932 DAG.getConstant(1, HalfT));
19933 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
19934 Regs64bit ? X86::RAX : X86::EAX,
19936 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
19937 Regs64bit ? X86::RDX : X86::EDX,
19938 cpInH, cpInL.getValue(1));
19939 SDValue swapInL, swapInH;
19940 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
19941 DAG.getConstant(0, HalfT));
19942 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
19943 DAG.getConstant(1, HalfT));
19944 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl,
19945 Regs64bit ? X86::RBX : X86::EBX,
19946 swapInL, cpInH.getValue(1));
19947 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl,
19948 Regs64bit ? X86::RCX : X86::ECX,
19949 swapInH, swapInL.getValue(1));
19950 SDValue Ops[] = { swapInH.getValue(0),
19952 swapInH.getValue(1) };
19953 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
19954 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
19955 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG :
19956 X86ISD::LCMPXCHG8_DAG;
19957 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
19958 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
19959 Regs64bit ? X86::RAX : X86::EAX,
19960 HalfT, Result.getValue(1));
19961 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
19962 Regs64bit ? X86::RDX : X86::EDX,
19963 HalfT, cpOutL.getValue(2));
19964 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
19966 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
19967 MVT::i32, cpOutH.getValue(2));
19969 DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
19970 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
19971 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
19973 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
19974 Results.push_back(Success);
19975 Results.push_back(EFLAGS.getValue(1));
19978 case ISD::ATOMIC_SWAP:
19979 case ISD::ATOMIC_LOAD_ADD:
19980 case ISD::ATOMIC_LOAD_SUB:
19981 case ISD::ATOMIC_LOAD_AND:
19982 case ISD::ATOMIC_LOAD_OR:
19983 case ISD::ATOMIC_LOAD_XOR:
19984 case ISD::ATOMIC_LOAD_NAND:
19985 case ISD::ATOMIC_LOAD_MIN:
19986 case ISD::ATOMIC_LOAD_MAX:
19987 case ISD::ATOMIC_LOAD_UMIN:
19988 case ISD::ATOMIC_LOAD_UMAX:
19989 case ISD::ATOMIC_LOAD: {
19990 // Delegate to generic TypeLegalization. Situations we can really handle
19991 // should have already been dealt with by AtomicExpandPass.cpp.
19994 case ISD::BITCAST: {
19995 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19996 EVT DstVT = N->getValueType(0);
19997 EVT SrcVT = N->getOperand(0)->getValueType(0);
19999 if (SrcVT != MVT::f64 ||
20000 (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
20003 unsigned NumElts = DstVT.getVectorNumElements();
20004 EVT SVT = DstVT.getVectorElementType();
20005 EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
20006 SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
20007 MVT::v2f64, N->getOperand(0));
20008 SDValue ToVecInt = DAG.getNode(ISD::BITCAST, dl, WiderVT, Expanded);
20010 if (ExperimentalVectorWideningLegalization) {
20011 // If we are legalizing vectors by widening, we already have the desired
20012 // legal vector type, just return it.
20013 Results.push_back(ToVecInt);
20017 SmallVector<SDValue, 8> Elts;
20018 for (unsigned i = 0, e = NumElts; i != e; ++i)
20019 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
20020 ToVecInt, DAG.getIntPtrConstant(i)));
20022 Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
20027 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
20029 default: return nullptr;
20030 case X86ISD::BSF: return "X86ISD::BSF";
20031 case X86ISD::BSR: return "X86ISD::BSR";
20032 case X86ISD::SHLD: return "X86ISD::SHLD";
20033 case X86ISD::SHRD: return "X86ISD::SHRD";
20034 case X86ISD::FAND: return "X86ISD::FAND";
20035 case X86ISD::FANDN: return "X86ISD::FANDN";
20036 case X86ISD::FOR: return "X86ISD::FOR";
20037 case X86ISD::FXOR: return "X86ISD::FXOR";
20038 case X86ISD::FSRL: return "X86ISD::FSRL";
20039 case X86ISD::FILD: return "X86ISD::FILD";
20040 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
20041 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
20042 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
20043 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
20044 case X86ISD::FLD: return "X86ISD::FLD";
20045 case X86ISD::FST: return "X86ISD::FST";
20046 case X86ISD::CALL: return "X86ISD::CALL";
20047 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
20048 case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
20049 case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
20050 case X86ISD::BT: return "X86ISD::BT";
20051 case X86ISD::CMP: return "X86ISD::CMP";
20052 case X86ISD::COMI: return "X86ISD::COMI";
20053 case X86ISD::UCOMI: return "X86ISD::UCOMI";
20054 case X86ISD::CMPM: return "X86ISD::CMPM";
20055 case X86ISD::CMPMU: return "X86ISD::CMPMU";
20056 case X86ISD::SETCC: return "X86ISD::SETCC";
20057 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
20058 case X86ISD::FSETCC: return "X86ISD::FSETCC";
20059 case X86ISD::CMOV: return "X86ISD::CMOV";
20060 case X86ISD::BRCOND: return "X86ISD::BRCOND";
20061 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
20062 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
20063 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
20064 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
20065 case X86ISD::Wrapper: return "X86ISD::Wrapper";
20066 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
20067 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
20068 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
20069 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
20070 case X86ISD::PINSRB: return "X86ISD::PINSRB";
20071 case X86ISD::PINSRW: return "X86ISD::PINSRW";
20072 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
20073 case X86ISD::ANDNP: return "X86ISD::ANDNP";
20074 case X86ISD::PSIGN: return "X86ISD::PSIGN";
20075 case X86ISD::BLENDI: return "X86ISD::BLENDI";
20076 case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
20077 case X86ISD::SUBUS: return "X86ISD::SUBUS";
20078 case X86ISD::HADD: return "X86ISD::HADD";
20079 case X86ISD::HSUB: return "X86ISD::HSUB";
20080 case X86ISD::FHADD: return "X86ISD::FHADD";
20081 case X86ISD::FHSUB: return "X86ISD::FHSUB";
20082 case X86ISD::UMAX: return "X86ISD::UMAX";
20083 case X86ISD::UMIN: return "X86ISD::UMIN";
20084 case X86ISD::SMAX: return "X86ISD::SMAX";
20085 case X86ISD::SMIN: return "X86ISD::SMIN";
20086 case X86ISD::FMAX: return "X86ISD::FMAX";
20087 case X86ISD::FMIN: return "X86ISD::FMIN";
20088 case X86ISD::FMAXC: return "X86ISD::FMAXC";
20089 case X86ISD::FMINC: return "X86ISD::FMINC";
20090 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
20091 case X86ISD::FRCP: return "X86ISD::FRCP";
20092 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
20093 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
20094 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
20095 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
20096 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
20097 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
20098 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
20099 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
20100 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
20101 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
20102 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
20103 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
20104 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
20105 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
20106 case X86ISD::VZEXT: return "X86ISD::VZEXT";
20107 case X86ISD::VSEXT: return "X86ISD::VSEXT";
20108 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
20109 case X86ISD::VTRUNCM: return "X86ISD::VTRUNCM";
20110 case X86ISD::VINSERT: return "X86ISD::VINSERT";
20111 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
20112 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
20113 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
20114 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
20115 case X86ISD::VSHL: return "X86ISD::VSHL";
20116 case X86ISD::VSRL: return "X86ISD::VSRL";
20117 case X86ISD::VSRA: return "X86ISD::VSRA";
20118 case X86ISD::VSHLI: return "X86ISD::VSHLI";
20119 case X86ISD::VSRLI: return "X86ISD::VSRLI";
20120 case X86ISD::VSRAI: return "X86ISD::VSRAI";
20121 case X86ISD::CMPP: return "X86ISD::CMPP";
20122 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
20123 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
20124 case X86ISD::PCMPEQM: return "X86ISD::PCMPEQM";
20125 case X86ISD::PCMPGTM: return "X86ISD::PCMPGTM";
20126 case X86ISD::ADD: return "X86ISD::ADD";
20127 case X86ISD::SUB: return "X86ISD::SUB";
20128 case X86ISD::ADC: return "X86ISD::ADC";
20129 case X86ISD::SBB: return "X86ISD::SBB";
20130 case X86ISD::SMUL: return "X86ISD::SMUL";
20131 case X86ISD::UMUL: return "X86ISD::UMUL";
20132 case X86ISD::SMUL8: return "X86ISD::SMUL8";
20133 case X86ISD::UMUL8: return "X86ISD::UMUL8";
20134 case X86ISD::SDIVREM8_SEXT_HREG: return "X86ISD::SDIVREM8_SEXT_HREG";
20135 case X86ISD::UDIVREM8_ZEXT_HREG: return "X86ISD::UDIVREM8_ZEXT_HREG";
20136 case X86ISD::INC: return "X86ISD::INC";
20137 case X86ISD::DEC: return "X86ISD::DEC";
20138 case X86ISD::OR: return "X86ISD::OR";
20139 case X86ISD::XOR: return "X86ISD::XOR";
20140 case X86ISD::AND: return "X86ISD::AND";
20141 case X86ISD::BEXTR: return "X86ISD::BEXTR";
20142 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
20143 case X86ISD::PTEST: return "X86ISD::PTEST";
20144 case X86ISD::TESTP: return "X86ISD::TESTP";
20145 case X86ISD::TESTM: return "X86ISD::TESTM";
20146 case X86ISD::TESTNM: return "X86ISD::TESTNM";
20147 case X86ISD::KORTEST: return "X86ISD::KORTEST";
20148 case X86ISD::PACKSS: return "X86ISD::PACKSS";
20149 case X86ISD::PACKUS: return "X86ISD::PACKUS";
20150 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
20151 case X86ISD::VALIGN: return "X86ISD::VALIGN";
20152 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
20153 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
20154 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
20155 case X86ISD::SHUFP: return "X86ISD::SHUFP";
20156 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
20157 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
20158 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
20159 case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
20160 case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
20161 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
20162 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
20163 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
20164 case X86ISD::MOVSD: return "X86ISD::MOVSD";
20165 case X86ISD::MOVSS: return "X86ISD::MOVSS";
20166 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
20167 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
20168 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
20169 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
20170 case X86ISD::VEXTRACT: return "X86ISD::VEXTRACT";
20171 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
20172 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
20173 case X86ISD::VPERMV: return "X86ISD::VPERMV";
20174 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
20175 case X86ISD::VPERMIV3: return "X86ISD::VPERMIV3";
20176 case X86ISD::VPERMI: return "X86ISD::VPERMI";
20177 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
20178 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
20179 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
20180 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
20181 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
20182 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
20183 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
20184 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL";
20185 case X86ISD::SAHF: return "X86ISD::SAHF";
20186 case X86ISD::RDRAND: return "X86ISD::RDRAND";
20187 case X86ISD::RDSEED: return "X86ISD::RDSEED";
20188 case X86ISD::FMADD: return "X86ISD::FMADD";
20189 case X86ISD::FMSUB: return "X86ISD::FMSUB";
20190 case X86ISD::FNMADD: return "X86ISD::FNMADD";
20191 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
20192 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
20193 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
20194 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI";
20195 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI";
20196 case X86ISD::XTEST: return "X86ISD::XTEST";
20197 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
20198 case X86ISD::EXPAND: return "X86ISD::EXPAND";
20199 case X86ISD::SELECT: return "X86ISD::SELECT";
20200 case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
20201 case X86ISD::RCP28: return "X86ISD::RCP28";
20202 case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
20206 // isLegalAddressingMode - Return true if the addressing mode represented
20207 // by AM is legal for this target, for a load/store of the specified type.
20208 bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
20210 // X86 supports extremely general addressing modes.
20211 CodeModel::Model M = getTargetMachine().getCodeModel();
20212 Reloc::Model R = getTargetMachine().getRelocationModel();
20214 // X86 allows a sign-extended 32-bit immediate field as a displacement.
20215 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
20220 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
20222 // If a reference to this global requires an extra load, we can't fold it.
20223 if (isGlobalStubReference(GVFlags))
20226 // If BaseGV requires a register for the PIC base, we cannot also have a
20227 // BaseReg specified.
20228 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
20231 // If lower 4G is not available, then we must use rip-relative addressing.
20232 if ((M != CodeModel::Small || R != Reloc::Static) &&
20233 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
20237 switch (AM.Scale) {
20243 // These scales always work.
20248 // These scales are formed with basereg+scalereg. Only accept if there is
20253 default: // Other stuff never works.
20260 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
20261 unsigned Bits = Ty->getScalarSizeInBits();
20263 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
20264 // particularly cheaper than those without.
20268 // On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
20269 // variable shifts just as cheap as scalar ones.
20270 if (Subtarget->hasInt256() && (Bits == 32 || Bits == 64))
20273 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
20274 // fully general vector.
20278 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
20279 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20281 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
20282 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
20283 return NumBits1 > NumBits2;
20286 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
20287 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20290 if (!isTypeLegal(EVT::getEVT(Ty1)))
20293 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
20295 // Assuming the caller doesn't have a zeroext or signext return parameter,
20296 // truncation all the way down to i1 is valid.
20300 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
20301 return isInt<32>(Imm);
20304 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
20305 // Can also use sub to handle negated immediates.
20306 return isInt<32>(Imm);
20309 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
20310 if (!VT1.isInteger() || !VT2.isInteger())
20312 unsigned NumBits1 = VT1.getSizeInBits();
20313 unsigned NumBits2 = VT2.getSizeInBits();
20314 return NumBits1 > NumBits2;
20317 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
20318 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20319 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit();
20322 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
20323 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20324 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
20327 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
20328 EVT VT1 = Val.getValueType();
20329 if (isZExtFree(VT1, VT2))
20332 if (Val.getOpcode() != ISD::LOAD)
20335 if (!VT1.isSimple() || !VT1.isInteger() ||
20336 !VT2.isSimple() || !VT2.isInteger())
20339 switch (VT1.getSimpleVT().SimpleTy) {
20344 // X86 has 8, 16, and 32-bit zero-extending loads.
20352 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
20353 if (!(Subtarget->hasFMA() || Subtarget->hasFMA4()))
20356 VT = VT.getScalarType();
20358 if (!VT.isSimple())
20361 switch (VT.getSimpleVT().SimpleTy) {
20372 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
20373 // i16 instructions are longer (0x66 prefix) and potentially slower.
20374 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
20377 /// isShuffleMaskLegal - Targets can use this to indicate that they only
20378 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
20379 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
20380 /// are assumed to be legal.
20382 X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
20384 if (!VT.isSimple())
20387 MVT SVT = VT.getSimpleVT();
20389 // Very little shuffling can be done for 64-bit vectors right now.
20390 if (VT.getSizeInBits() == 64)
20393 // This is an experimental legality test that is tailored to match the
20394 // legality test of the experimental lowering more closely. They are gated
20395 // separately to ease testing of performance differences.
20396 if (ExperimentalVectorShuffleLegality)
20397 // We only care that the types being shuffled are legal. The lowering can
20398 // handle any possible shuffle mask that results.
20399 return isTypeLegal(SVT);
20401 // If this is a single-input shuffle with no 128 bit lane crossings we can
20402 // lower it into pshufb.
20403 if ((SVT.is128BitVector() && Subtarget->hasSSSE3()) ||
20404 (SVT.is256BitVector() && Subtarget->hasInt256())) {
20405 bool isLegal = true;
20406 for (unsigned I = 0, E = M.size(); I != E; ++I) {
20407 if (M[I] >= (int)SVT.getVectorNumElements() ||
20408 ShuffleCrosses128bitLane(SVT, I, M[I])) {
20417 // FIXME: blends, shifts.
20418 return (SVT.getVectorNumElements() == 2 ||
20419 ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
20420 isMOVLMask(M, SVT) ||
20421 isCommutedMOVLMask(M, SVT) ||
20422 isMOVHLPSMask(M, SVT) ||
20423 isSHUFPMask(M, SVT) ||
20424 isSHUFPMask(M, SVT, /* Commuted */ true) ||
20425 isPSHUFDMask(M, SVT) ||
20426 isPSHUFDMask(M, SVT, /* SecondOperand */ true) ||
20427 isPSHUFHWMask(M, SVT, Subtarget->hasInt256()) ||
20428 isPSHUFLWMask(M, SVT, Subtarget->hasInt256()) ||
20429 isPALIGNRMask(M, SVT, Subtarget) ||
20430 isUNPCKLMask(M, SVT, Subtarget->hasInt256()) ||
20431 isUNPCKHMask(M, SVT, Subtarget->hasInt256()) ||
20432 isUNPCKL_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20433 isUNPCKH_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20434 isBlendMask(M, SVT, Subtarget->hasSSE41(), Subtarget->hasInt256()) ||
20435 (Subtarget->hasSSE41() && isINSERTPSMask(M, SVT)));
20439 X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
20441 if (!VT.isSimple())
20444 MVT SVT = VT.getSimpleVT();
20446 // This is an experimental legality test that is tailored to match the
20447 // legality test of the experimental lowering more closely. They are gated
20448 // separately to ease testing of performance differences.
20449 if (ExperimentalVectorShuffleLegality)
20450 // The new vector shuffle lowering is very good at managing zero-inputs.
20451 return isShuffleMaskLegal(Mask, VT);
20453 unsigned NumElts = SVT.getVectorNumElements();
20454 // FIXME: This collection of masks seems suspect.
20457 if (NumElts == 4 && SVT.is128BitVector()) {
20458 return (isMOVLMask(Mask, SVT) ||
20459 isCommutedMOVLMask(Mask, SVT, true) ||
20460 isSHUFPMask(Mask, SVT) ||
20461 isSHUFPMask(Mask, SVT, /* Commuted */ true) ||
20462 isBlendMask(Mask, SVT, Subtarget->hasSSE41(),
20463 Subtarget->hasInt256()));
20468 //===----------------------------------------------------------------------===//
20469 // X86 Scheduler Hooks
20470 //===----------------------------------------------------------------------===//
20472 /// Utility function to emit xbegin specifying the start of an RTM region.
20473 static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
20474 const TargetInstrInfo *TII) {
20475 DebugLoc DL = MI->getDebugLoc();
20477 const BasicBlock *BB = MBB->getBasicBlock();
20478 MachineFunction::iterator I = MBB;
20481 // For the v = xbegin(), we generate
20492 MachineBasicBlock *thisMBB = MBB;
20493 MachineFunction *MF = MBB->getParent();
20494 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
20495 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
20496 MF->insert(I, mainMBB);
20497 MF->insert(I, sinkMBB);
20499 // Transfer the remainder of BB and its successor edges to sinkMBB.
20500 sinkMBB->splice(sinkMBB->begin(), MBB,
20501 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
20502 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
20506 // # fallthrough to mainMBB
20507 // # abortion to sinkMBB
20508 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB);
20509 thisMBB->addSuccessor(mainMBB);
20510 thisMBB->addSuccessor(sinkMBB);
20514 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1);
20515 mainMBB->addSuccessor(sinkMBB);
20518 // EAX is live into the sinkMBB
20519 sinkMBB->addLiveIn(X86::EAX);
20520 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
20521 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20524 MI->eraseFromParent();
20528 // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
20529 // or XMM0_V32I8 in AVX all of this code can be replaced with that
20530 // in the .td file.
20531 static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
20532 const TargetInstrInfo *TII) {
20534 switch (MI->getOpcode()) {
20535 default: llvm_unreachable("illegal opcode!");
20536 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
20537 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
20538 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break;
20539 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break;
20540 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break;
20541 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break;
20542 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break;
20543 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
20546 DebugLoc dl = MI->getDebugLoc();
20547 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20549 unsigned NumArgs = MI->getNumOperands();
20550 for (unsigned i = 1; i < NumArgs; ++i) {
20551 MachineOperand &Op = MI->getOperand(i);
20552 if (!(Op.isReg() && Op.isImplicit()))
20553 MIB.addOperand(Op);
20555 if (MI->hasOneMemOperand())
20556 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20558 BuildMI(*BB, MI, dl,
20559 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20560 .addReg(X86::XMM0);
20562 MI->eraseFromParent();
20566 // FIXME: Custom handling because TableGen doesn't support multiple implicit
20567 // defs in an instruction pattern
20568 static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
20569 const TargetInstrInfo *TII) {
20571 switch (MI->getOpcode()) {
20572 default: llvm_unreachable("illegal opcode!");
20573 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
20574 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
20575 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break;
20576 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break;
20577 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break;
20578 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break;
20579 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break;
20580 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
20583 DebugLoc dl = MI->getDebugLoc();
20584 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20586 unsigned NumArgs = MI->getNumOperands(); // remove the results
20587 for (unsigned i = 1; i < NumArgs; ++i) {
20588 MachineOperand &Op = MI->getOperand(i);
20589 if (!(Op.isReg() && Op.isImplicit()))
20590 MIB.addOperand(Op);
20592 if (MI->hasOneMemOperand())
20593 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20595 BuildMI(*BB, MI, dl,
20596 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20599 MI->eraseFromParent();
20603 static MachineBasicBlock *EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
20604 const X86Subtarget *Subtarget) {
20605 DebugLoc dl = MI->getDebugLoc();
20606 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20607 // Address into RAX/EAX, other two args into ECX, EDX.
20608 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
20609 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
20610 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
20611 for (int i = 0; i < X86::AddrNumOperands; ++i)
20612 MIB.addOperand(MI->getOperand(i));
20614 unsigned ValOps = X86::AddrNumOperands;
20615 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
20616 .addReg(MI->getOperand(ValOps).getReg());
20617 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
20618 .addReg(MI->getOperand(ValOps+1).getReg());
20620 // The instruction doesn't actually take any operands though.
20621 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
20623 MI->eraseFromParent(); // The pseudo is gone now.
20627 MachineBasicBlock *
20628 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
20629 MachineBasicBlock *MBB) const {
20630 // Emit va_arg instruction on X86-64.
20632 // Operands to this pseudo-instruction:
20633 // 0 ) Output : destination address (reg)
20634 // 1-5) Input : va_list address (addr, i64mem)
20635 // 6 ) ArgSize : Size (in bytes) of vararg type
20636 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
20637 // 8 ) Align : Alignment of type
20638 // 9 ) EFLAGS (implicit-def)
20640 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
20641 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands");
20643 unsigned DestReg = MI->getOperand(0).getReg();
20644 MachineOperand &Base = MI->getOperand(1);
20645 MachineOperand &Scale = MI->getOperand(2);
20646 MachineOperand &Index = MI->getOperand(3);
20647 MachineOperand &Disp = MI->getOperand(4);
20648 MachineOperand &Segment = MI->getOperand(5);
20649 unsigned ArgSize = MI->getOperand(6).getImm();
20650 unsigned ArgMode = MI->getOperand(7).getImm();
20651 unsigned Align = MI->getOperand(8).getImm();
20653 // Memory Reference
20654 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
20655 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
20656 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
20658 // Machine Information
20659 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20660 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
20661 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
20662 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
20663 DebugLoc DL = MI->getDebugLoc();
20665 // struct va_list {
20668 // i64 overflow_area (address)
20669 // i64 reg_save_area (address)
20671 // sizeof(va_list) = 24
20672 // alignment(va_list) = 8
20674 unsigned TotalNumIntRegs = 6;
20675 unsigned TotalNumXMMRegs = 8;
20676 bool UseGPOffset = (ArgMode == 1);
20677 bool UseFPOffset = (ArgMode == 2);
20678 unsigned MaxOffset = TotalNumIntRegs * 8 +
20679 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
20681 /* Align ArgSize to a multiple of 8 */
20682 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
20683 bool NeedsAlign = (Align > 8);
20685 MachineBasicBlock *thisMBB = MBB;
20686 MachineBasicBlock *overflowMBB;
20687 MachineBasicBlock *offsetMBB;
20688 MachineBasicBlock *endMBB;
20690 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
20691 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
20692 unsigned OffsetReg = 0;
20694 if (!UseGPOffset && !UseFPOffset) {
20695 // If we only pull from the overflow region, we don't create a branch.
20696 // We don't need to alter control flow.
20697 OffsetDestReg = 0; // unused
20698 OverflowDestReg = DestReg;
20700 offsetMBB = nullptr;
20701 overflowMBB = thisMBB;
20704 // First emit code to check if gp_offset (or fp_offset) is below the bound.
20705 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
20706 // If not, pull from overflow_area. (branch to overflowMBB)
20711 // offsetMBB overflowMBB
20716 // Registers for the PHI in endMBB
20717 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
20718 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
20720 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
20721 MachineFunction *MF = MBB->getParent();
20722 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20723 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20724 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20726 MachineFunction::iterator MBBIter = MBB;
20729 // Insert the new basic blocks
20730 MF->insert(MBBIter, offsetMBB);
20731 MF->insert(MBBIter, overflowMBB);
20732 MF->insert(MBBIter, endMBB);
20734 // Transfer the remainder of MBB and its successor edges to endMBB.
20735 endMBB->splice(endMBB->begin(), thisMBB,
20736 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
20737 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
20739 // Make offsetMBB and overflowMBB successors of thisMBB
20740 thisMBB->addSuccessor(offsetMBB);
20741 thisMBB->addSuccessor(overflowMBB);
20743 // endMBB is a successor of both offsetMBB and overflowMBB
20744 offsetMBB->addSuccessor(endMBB);
20745 overflowMBB->addSuccessor(endMBB);
20747 // Load the offset value into a register
20748 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
20749 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
20753 .addDisp(Disp, UseFPOffset ? 4 : 0)
20754 .addOperand(Segment)
20755 .setMemRefs(MMOBegin, MMOEnd);
20757 // Check if there is enough room left to pull this argument.
20758 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
20760 .addImm(MaxOffset + 8 - ArgSizeA8);
20762 // Branch to "overflowMBB" if offset >= max
20763 // Fall through to "offsetMBB" otherwise
20764 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
20765 .addMBB(overflowMBB);
20768 // In offsetMBB, emit code to use the reg_save_area.
20770 assert(OffsetReg != 0);
20772 // Read the reg_save_area address.
20773 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
20774 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
20779 .addOperand(Segment)
20780 .setMemRefs(MMOBegin, MMOEnd);
20782 // Zero-extend the offset
20783 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
20784 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
20787 .addImm(X86::sub_32bit);
20789 // Add the offset to the reg_save_area to get the final address.
20790 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
20791 .addReg(OffsetReg64)
20792 .addReg(RegSaveReg);
20794 // Compute the offset for the next argument
20795 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
20796 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
20798 .addImm(UseFPOffset ? 16 : 8);
20800 // Store it back into the va_list.
20801 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
20805 .addDisp(Disp, UseFPOffset ? 4 : 0)
20806 .addOperand(Segment)
20807 .addReg(NextOffsetReg)
20808 .setMemRefs(MMOBegin, MMOEnd);
20811 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
20816 // Emit code to use overflow area
20819 // Load the overflow_area address into a register.
20820 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
20821 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
20826 .addOperand(Segment)
20827 .setMemRefs(MMOBegin, MMOEnd);
20829 // If we need to align it, do so. Otherwise, just copy the address
20830 // to OverflowDestReg.
20832 // Align the overflow address
20833 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2");
20834 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
20836 // aligned_addr = (addr + (align-1)) & ~(align-1)
20837 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
20838 .addReg(OverflowAddrReg)
20841 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
20843 .addImm(~(uint64_t)(Align-1));
20845 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
20846 .addReg(OverflowAddrReg);
20849 // Compute the next overflow address after this argument.
20850 // (the overflow address should be kept 8-byte aligned)
20851 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
20852 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
20853 .addReg(OverflowDestReg)
20854 .addImm(ArgSizeA8);
20856 // Store the new overflow address.
20857 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
20862 .addOperand(Segment)
20863 .addReg(NextAddrReg)
20864 .setMemRefs(MMOBegin, MMOEnd);
20866 // If we branched, emit the PHI to the front of endMBB.
20868 BuildMI(*endMBB, endMBB->begin(), DL,
20869 TII->get(X86::PHI), DestReg)
20870 .addReg(OffsetDestReg).addMBB(offsetMBB)
20871 .addReg(OverflowDestReg).addMBB(overflowMBB);
20874 // Erase the pseudo instruction
20875 MI->eraseFromParent();
20880 MachineBasicBlock *
20881 X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
20883 MachineBasicBlock *MBB) const {
20884 // Emit code to save XMM registers to the stack. The ABI says that the
20885 // number of registers to save is given in %al, so it's theoretically
20886 // possible to do an indirect jump trick to avoid saving all of them,
20887 // however this code takes a simpler approach and just executes all
20888 // of the stores if %al is non-zero. It's less code, and it's probably
20889 // easier on the hardware branch predictor, and stores aren't all that
20890 // expensive anyway.
20892 // Create the new basic blocks. One block contains all the XMM stores,
20893 // and one block is the final destination regardless of whether any
20894 // stores were performed.
20895 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
20896 MachineFunction *F = MBB->getParent();
20897 MachineFunction::iterator MBBIter = MBB;
20899 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
20900 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
20901 F->insert(MBBIter, XMMSaveMBB);
20902 F->insert(MBBIter, EndMBB);
20904 // Transfer the remainder of MBB and its successor edges to EndMBB.
20905 EndMBB->splice(EndMBB->begin(), MBB,
20906 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
20907 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
20909 // The original block will now fall through to the XMM save block.
20910 MBB->addSuccessor(XMMSaveMBB);
20911 // The XMMSaveMBB will fall through to the end block.
20912 XMMSaveMBB->addSuccessor(EndMBB);
20914 // Now add the instructions.
20915 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20916 DebugLoc DL = MI->getDebugLoc();
20918 unsigned CountReg = MI->getOperand(0).getReg();
20919 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm();
20920 int64_t VarArgsFPOffset = MI->getOperand(2).getImm();
20922 if (!Subtarget->isTargetWin64()) {
20923 // If %al is 0, branch around the XMM save block.
20924 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
20925 BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
20926 MBB->addSuccessor(EndMBB);
20929 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
20930 // that was just emitted, but clearly shouldn't be "saved".
20931 assert((MI->getNumOperands() <= 3 ||
20932 !MI->getOperand(MI->getNumOperands() - 1).isReg() ||
20933 MI->getOperand(MI->getNumOperands() - 1).getReg() == X86::EFLAGS)
20934 && "Expected last argument to be EFLAGS");
20935 unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
20936 // In the XMM save block, save all the XMM argument registers.
20937 for (int i = 3, e = MI->getNumOperands() - 1; i != e; ++i) {
20938 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
20939 MachineMemOperand *MMO =
20940 F->getMachineMemOperand(
20941 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset),
20942 MachineMemOperand::MOStore,
20943 /*Size=*/16, /*Align=*/16);
20944 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
20945 .addFrameIndex(RegSaveFrameIndex)
20946 .addImm(/*Scale=*/1)
20947 .addReg(/*IndexReg=*/0)
20948 .addImm(/*Disp=*/Offset)
20949 .addReg(/*Segment=*/0)
20950 .addReg(MI->getOperand(i).getReg())
20951 .addMemOperand(MMO);
20954 MI->eraseFromParent(); // The pseudo instruction is gone now.
20959 // The EFLAGS operand of SelectItr might be missing a kill marker
20960 // because there were multiple uses of EFLAGS, and ISel didn't know
20961 // which to mark. Figure out whether SelectItr should have had a
20962 // kill marker, and set it if it should. Returns the correct kill
20964 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
20965 MachineBasicBlock* BB,
20966 const TargetRegisterInfo* TRI) {
20967 // Scan forward through BB for a use/def of EFLAGS.
20968 MachineBasicBlock::iterator miI(std::next(SelectItr));
20969 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
20970 const MachineInstr& mi = *miI;
20971 if (mi.readsRegister(X86::EFLAGS))
20973 if (mi.definesRegister(X86::EFLAGS))
20974 break; // Should have kill-flag - update below.
20977 // If we hit the end of the block, check whether EFLAGS is live into a
20979 if (miI == BB->end()) {
20980 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
20981 sEnd = BB->succ_end();
20982 sItr != sEnd; ++sItr) {
20983 MachineBasicBlock* succ = *sItr;
20984 if (succ->isLiveIn(X86::EFLAGS))
20989 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
20990 // out. SelectMI should have a kill flag on EFLAGS.
20991 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
20995 MachineBasicBlock *
20996 X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
20997 MachineBasicBlock *BB) const {
20998 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20999 DebugLoc DL = MI->getDebugLoc();
21001 // To "insert" a SELECT_CC instruction, we actually have to insert the
21002 // diamond control-flow pattern. The incoming instruction knows the
21003 // destination vreg to set, the condition code register to branch on, the
21004 // true/false values to select between, and a branch opcode to use.
21005 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21006 MachineFunction::iterator It = BB;
21012 // cmpTY ccX, r1, r2
21014 // fallthrough --> copy0MBB
21015 MachineBasicBlock *thisMBB = BB;
21016 MachineFunction *F = BB->getParent();
21017 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
21018 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
21019 F->insert(It, copy0MBB);
21020 F->insert(It, sinkMBB);
21022 // If the EFLAGS register isn't dead in the terminator, then claim that it's
21023 // live into the sink and copy blocks.
21024 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
21025 if (!MI->killsRegister(X86::EFLAGS) &&
21026 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) {
21027 copy0MBB->addLiveIn(X86::EFLAGS);
21028 sinkMBB->addLiveIn(X86::EFLAGS);
21031 // Transfer the remainder of BB and its successor edges to sinkMBB.
21032 sinkMBB->splice(sinkMBB->begin(), BB,
21033 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21034 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
21036 // Add the true and fallthrough blocks as its successors.
21037 BB->addSuccessor(copy0MBB);
21038 BB->addSuccessor(sinkMBB);
21040 // Create the conditional branch instruction.
21042 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
21043 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
21046 // %FalseValue = ...
21047 // # fallthrough to sinkMBB
21048 copy0MBB->addSuccessor(sinkMBB);
21051 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
21053 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21054 TII->get(X86::PHI), MI->getOperand(0).getReg())
21055 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
21056 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
21058 MI->eraseFromParent(); // The pseudo instruction is gone now.
21062 MachineBasicBlock *
21063 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
21064 MachineBasicBlock *BB) const {
21065 MachineFunction *MF = BB->getParent();
21066 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21067 DebugLoc DL = MI->getDebugLoc();
21068 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21070 assert(MF->shouldSplitStack());
21072 const bool Is64Bit = Subtarget->is64Bit();
21073 const bool IsLP64 = Subtarget->isTarget64BitLP64();
21075 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
21076 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
21079 // ... [Till the alloca]
21080 // If stacklet is not large enough, jump to mallocMBB
21083 // Allocate by subtracting from RSP
21084 // Jump to continueMBB
21087 // Allocate by call to runtime
21091 // [rest of original BB]
21094 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21095 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21096 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21098 MachineRegisterInfo &MRI = MF->getRegInfo();
21099 const TargetRegisterClass *AddrRegClass =
21100 getRegClassFor(getPointerTy());
21102 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21103 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21104 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
21105 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
21106 sizeVReg = MI->getOperand(1).getReg(),
21107 physSPReg = IsLP64 || Subtarget->isTargetNaCl64() ? X86::RSP : X86::ESP;
21109 MachineFunction::iterator MBBIter = BB;
21112 MF->insert(MBBIter, bumpMBB);
21113 MF->insert(MBBIter, mallocMBB);
21114 MF->insert(MBBIter, continueMBB);
21116 continueMBB->splice(continueMBB->begin(), BB,
21117 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21118 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
21120 // Add code to the main basic block to check if the stack limit has been hit,
21121 // and if so, jump to mallocMBB otherwise to bumpMBB.
21122 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
21123 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
21124 .addReg(tmpSPVReg).addReg(sizeVReg);
21125 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
21126 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
21127 .addReg(SPLimitVReg);
21128 BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
21130 // bumpMBB simply decreases the stack pointer, since we know the current
21131 // stacklet has enough space.
21132 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
21133 .addReg(SPLimitVReg);
21134 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
21135 .addReg(SPLimitVReg);
21136 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21138 // Calls into a routine in libgcc to allocate more space from the heap.
21139 const uint32_t *RegMask =
21140 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21142 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
21144 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21145 .addExternalSymbol("__morestack_allocate_stack_space")
21146 .addRegMask(RegMask)
21147 .addReg(X86::RDI, RegState::Implicit)
21148 .addReg(X86::RAX, RegState::ImplicitDefine);
21149 } else if (Is64Bit) {
21150 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
21152 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21153 .addExternalSymbol("__morestack_allocate_stack_space")
21154 .addRegMask(RegMask)
21155 .addReg(X86::EDI, RegState::Implicit)
21156 .addReg(X86::EAX, RegState::ImplicitDefine);
21158 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
21160 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
21161 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
21162 .addExternalSymbol("__morestack_allocate_stack_space")
21163 .addRegMask(RegMask)
21164 .addReg(X86::EAX, RegState::ImplicitDefine);
21168 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
21171 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
21172 .addReg(IsLP64 ? X86::RAX : X86::EAX);
21173 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21175 // Set up the CFG correctly.
21176 BB->addSuccessor(bumpMBB);
21177 BB->addSuccessor(mallocMBB);
21178 mallocMBB->addSuccessor(continueMBB);
21179 bumpMBB->addSuccessor(continueMBB);
21181 // Take care of the PHI nodes.
21182 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
21183 MI->getOperand(0).getReg())
21184 .addReg(mallocPtrVReg).addMBB(mallocMBB)
21185 .addReg(bumpSPPtrVReg).addMBB(bumpMBB);
21187 // Delete the original pseudo instruction.
21188 MI->eraseFromParent();
21191 return continueMBB;
21194 MachineBasicBlock *
21195 X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
21196 MachineBasicBlock *BB) const {
21197 DebugLoc DL = MI->getDebugLoc();
21199 assert(!Subtarget->isTargetMachO());
21201 X86FrameLowering::emitStackProbeCall(*BB->getParent(), *BB, MI, DL);
21203 MI->eraseFromParent(); // The pseudo instruction is gone now.
21207 MachineBasicBlock *
21208 X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
21209 MachineBasicBlock *BB) const {
21210 // This is pretty easy. We're taking the value that we received from
21211 // our load from the relocation, sticking it in either RDI (x86-64)
21212 // or EAX and doing an indirect call. The return value will then
21213 // be in the normal return register.
21214 MachineFunction *F = BB->getParent();
21215 const X86InstrInfo *TII = Subtarget->getInstrInfo();
21216 DebugLoc DL = MI->getDebugLoc();
21218 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
21219 assert(MI->getOperand(3).isGlobal() && "This should be a global");
21221 // Get a register mask for the lowered call.
21222 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
21223 // proper register mask.
21224 const uint32_t *RegMask =
21225 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21226 if (Subtarget->is64Bit()) {
21227 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21228 TII->get(X86::MOV64rm), X86::RDI)
21230 .addImm(0).addReg(0)
21231 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21232 MI->getOperand(3).getTargetFlags())
21234 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
21235 addDirectMem(MIB, X86::RDI);
21236 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
21237 } else if (F->getTarget().getRelocationModel() != Reloc::PIC_) {
21238 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21239 TII->get(X86::MOV32rm), X86::EAX)
21241 .addImm(0).addReg(0)
21242 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21243 MI->getOperand(3).getTargetFlags())
21245 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21246 addDirectMem(MIB, X86::EAX);
21247 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21249 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21250 TII->get(X86::MOV32rm), X86::EAX)
21251 .addReg(TII->getGlobalBaseReg(F))
21252 .addImm(0).addReg(0)
21253 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21254 MI->getOperand(3).getTargetFlags())
21256 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21257 addDirectMem(MIB, X86::EAX);
21258 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21261 MI->eraseFromParent(); // The pseudo instruction is gone now.
21265 MachineBasicBlock *
21266 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
21267 MachineBasicBlock *MBB) const {
21268 DebugLoc DL = MI->getDebugLoc();
21269 MachineFunction *MF = MBB->getParent();
21270 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21271 MachineRegisterInfo &MRI = MF->getRegInfo();
21273 const BasicBlock *BB = MBB->getBasicBlock();
21274 MachineFunction::iterator I = MBB;
21277 // Memory Reference
21278 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21279 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21282 unsigned MemOpndSlot = 0;
21284 unsigned CurOp = 0;
21286 DstReg = MI->getOperand(CurOp++).getReg();
21287 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
21288 assert(RC->hasType(MVT::i32) && "Invalid destination!");
21289 unsigned mainDstReg = MRI.createVirtualRegister(RC);
21290 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
21292 MemOpndSlot = CurOp;
21294 MVT PVT = getPointerTy();
21295 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21296 "Invalid Pointer Size!");
21298 // For v = setjmp(buf), we generate
21301 // buf[LabelOffset] = restoreMBB
21302 // SjLjSetup restoreMBB
21308 // v = phi(main, restore)
21311 // if base pointer being used, load it from frame
21314 MachineBasicBlock *thisMBB = MBB;
21315 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
21316 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
21317 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
21318 MF->insert(I, mainMBB);
21319 MF->insert(I, sinkMBB);
21320 MF->push_back(restoreMBB);
21322 MachineInstrBuilder MIB;
21324 // Transfer the remainder of BB and its successor edges to sinkMBB.
21325 sinkMBB->splice(sinkMBB->begin(), MBB,
21326 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21327 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
21330 unsigned PtrStoreOpc = 0;
21331 unsigned LabelReg = 0;
21332 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21333 Reloc::Model RM = MF->getTarget().getRelocationModel();
21334 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
21335 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC);
21337 // Prepare IP either in reg or imm.
21338 if (!UseImmLabel) {
21339 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
21340 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
21341 LabelReg = MRI.createVirtualRegister(PtrRC);
21342 if (Subtarget->is64Bit()) {
21343 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
21347 .addMBB(restoreMBB)
21350 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
21351 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
21352 .addReg(XII->getGlobalBaseReg(MF))
21355 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference())
21359 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
21361 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
21362 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21363 if (i == X86::AddrDisp)
21364 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset);
21366 MIB.addOperand(MI->getOperand(MemOpndSlot + i));
21369 MIB.addReg(LabelReg);
21371 MIB.addMBB(restoreMBB);
21372 MIB.setMemRefs(MMOBegin, MMOEnd);
21374 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
21375 .addMBB(restoreMBB);
21377 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21378 MIB.addRegMask(RegInfo->getNoPreservedMask());
21379 thisMBB->addSuccessor(mainMBB);
21380 thisMBB->addSuccessor(restoreMBB);
21384 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
21385 mainMBB->addSuccessor(sinkMBB);
21388 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21389 TII->get(X86::PHI), DstReg)
21390 .addReg(mainDstReg).addMBB(mainMBB)
21391 .addReg(restoreDstReg).addMBB(restoreMBB);
21394 if (RegInfo->hasBasePointer(*MF)) {
21395 const bool Uses64BitFramePtr =
21396 Subtarget->isTarget64BitLP64() || Subtarget->isTargetNaCl64();
21397 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
21398 X86FI->setRestoreBasePointer(MF);
21399 unsigned FramePtr = RegInfo->getFrameRegister(*MF);
21400 unsigned BasePtr = RegInfo->getBaseRegister();
21401 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
21402 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
21403 FramePtr, true, X86FI->getRestoreBasePointerOffset())
21404 .setMIFlag(MachineInstr::FrameSetup);
21406 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
21407 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
21408 restoreMBB->addSuccessor(sinkMBB);
21410 MI->eraseFromParent();
21414 MachineBasicBlock *
21415 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
21416 MachineBasicBlock *MBB) const {
21417 DebugLoc DL = MI->getDebugLoc();
21418 MachineFunction *MF = MBB->getParent();
21419 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21420 MachineRegisterInfo &MRI = MF->getRegInfo();
21422 // Memory Reference
21423 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21424 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21426 MVT PVT = getPointerTy();
21427 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21428 "Invalid Pointer Size!");
21430 const TargetRegisterClass *RC =
21431 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
21432 unsigned Tmp = MRI.createVirtualRegister(RC);
21433 // Since FP is only updated here but NOT referenced, it's treated as GPR.
21434 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21435 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
21436 unsigned SP = RegInfo->getStackRegister();
21438 MachineInstrBuilder MIB;
21440 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21441 const int64_t SPOffset = 2 * PVT.getStoreSize();
21443 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
21444 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
21447 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
21448 for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
21449 MIB.addOperand(MI->getOperand(i));
21450 MIB.setMemRefs(MMOBegin, MMOEnd);
21452 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
21453 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21454 if (i == X86::AddrDisp)
21455 MIB.addDisp(MI->getOperand(i), LabelOffset);
21457 MIB.addOperand(MI->getOperand(i));
21459 MIB.setMemRefs(MMOBegin, MMOEnd);
21461 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
21462 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21463 if (i == X86::AddrDisp)
21464 MIB.addDisp(MI->getOperand(i), SPOffset);
21466 MIB.addOperand(MI->getOperand(i));
21468 MIB.setMemRefs(MMOBegin, MMOEnd);
21470 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
21472 MI->eraseFromParent();
21476 // Replace 213-type (isel default) FMA3 instructions with 231-type for
21477 // accumulator loops. Writing back to the accumulator allows the coalescer
21478 // to remove extra copies in the loop.
21479 MachineBasicBlock *
21480 X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
21481 MachineBasicBlock *MBB) const {
21482 MachineOperand &AddendOp = MI->getOperand(3);
21484 // Bail out early if the addend isn't a register - we can't switch these.
21485 if (!AddendOp.isReg())
21488 MachineFunction &MF = *MBB->getParent();
21489 MachineRegisterInfo &MRI = MF.getRegInfo();
21491 // Check whether the addend is defined by a PHI:
21492 assert(MRI.hasOneDef(AddendOp.getReg()) && "Multiple defs in SSA?");
21493 MachineInstr &AddendDef = *MRI.def_instr_begin(AddendOp.getReg());
21494 if (!AddendDef.isPHI())
21497 // Look for the following pattern:
21499 // %addend = phi [%entry, 0], [%loop, %result]
21501 // %result<tied1> = FMA213 %m2<tied0>, %m1, %addend
21505 // %addend = phi [%entry, 0], [%loop, %result]
21507 // %result<tied1> = FMA231 %addend<tied0>, %m1, %m2
21509 for (unsigned i = 1, e = AddendDef.getNumOperands(); i < e; i += 2) {
21510 assert(AddendDef.getOperand(i).isReg());
21511 MachineOperand PHISrcOp = AddendDef.getOperand(i);
21512 MachineInstr &PHISrcInst = *MRI.def_instr_begin(PHISrcOp.getReg());
21513 if (&PHISrcInst == MI) {
21514 // Found a matching instruction.
21515 unsigned NewFMAOpc = 0;
21516 switch (MI->getOpcode()) {
21517 case X86::VFMADDPDr213r: NewFMAOpc = X86::VFMADDPDr231r; break;
21518 case X86::VFMADDPSr213r: NewFMAOpc = X86::VFMADDPSr231r; break;
21519 case X86::VFMADDSDr213r: NewFMAOpc = X86::VFMADDSDr231r; break;
21520 case X86::VFMADDSSr213r: NewFMAOpc = X86::VFMADDSSr231r; break;
21521 case X86::VFMSUBPDr213r: NewFMAOpc = X86::VFMSUBPDr231r; break;
21522 case X86::VFMSUBPSr213r: NewFMAOpc = X86::VFMSUBPSr231r; break;
21523 case X86::VFMSUBSDr213r: NewFMAOpc = X86::VFMSUBSDr231r; break;
21524 case X86::VFMSUBSSr213r: NewFMAOpc = X86::VFMSUBSSr231r; break;
21525 case X86::VFNMADDPDr213r: NewFMAOpc = X86::VFNMADDPDr231r; break;
21526 case X86::VFNMADDPSr213r: NewFMAOpc = X86::VFNMADDPSr231r; break;
21527 case X86::VFNMADDSDr213r: NewFMAOpc = X86::VFNMADDSDr231r; break;
21528 case X86::VFNMADDSSr213r: NewFMAOpc = X86::VFNMADDSSr231r; break;
21529 case X86::VFNMSUBPDr213r: NewFMAOpc = X86::VFNMSUBPDr231r; break;
21530 case X86::VFNMSUBPSr213r: NewFMAOpc = X86::VFNMSUBPSr231r; break;
21531 case X86::VFNMSUBSDr213r: NewFMAOpc = X86::VFNMSUBSDr231r; break;
21532 case X86::VFNMSUBSSr213r: NewFMAOpc = X86::VFNMSUBSSr231r; break;
21533 case X86::VFMADDSUBPDr213r: NewFMAOpc = X86::VFMADDSUBPDr231r; break;
21534 case X86::VFMADDSUBPSr213r: NewFMAOpc = X86::VFMADDSUBPSr231r; break;
21535 case X86::VFMSUBADDPDr213r: NewFMAOpc = X86::VFMSUBADDPDr231r; break;
21536 case X86::VFMSUBADDPSr213r: NewFMAOpc = X86::VFMSUBADDPSr231r; break;
21538 case X86::VFMADDPDr213rY: NewFMAOpc = X86::VFMADDPDr231rY; break;
21539 case X86::VFMADDPSr213rY: NewFMAOpc = X86::VFMADDPSr231rY; break;
21540 case X86::VFMSUBPDr213rY: NewFMAOpc = X86::VFMSUBPDr231rY; break;
21541 case X86::VFMSUBPSr213rY: NewFMAOpc = X86::VFMSUBPSr231rY; break;
21542 case X86::VFNMADDPDr213rY: NewFMAOpc = X86::VFNMADDPDr231rY; break;
21543 case X86::VFNMADDPSr213rY: NewFMAOpc = X86::VFNMADDPSr231rY; break;
21544 case X86::VFNMSUBPDr213rY: NewFMAOpc = X86::VFNMSUBPDr231rY; break;
21545 case X86::VFNMSUBPSr213rY: NewFMAOpc = X86::VFNMSUBPSr231rY; break;
21546 case X86::VFMADDSUBPDr213rY: NewFMAOpc = X86::VFMADDSUBPDr231rY; break;
21547 case X86::VFMADDSUBPSr213rY: NewFMAOpc = X86::VFMADDSUBPSr231rY; break;
21548 case X86::VFMSUBADDPDr213rY: NewFMAOpc = X86::VFMSUBADDPDr231rY; break;
21549 case X86::VFMSUBADDPSr213rY: NewFMAOpc = X86::VFMSUBADDPSr231rY; break;
21550 default: llvm_unreachable("Unrecognized FMA variant.");
21553 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
21554 MachineInstrBuilder MIB =
21555 BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
21556 .addOperand(MI->getOperand(0))
21557 .addOperand(MI->getOperand(3))
21558 .addOperand(MI->getOperand(2))
21559 .addOperand(MI->getOperand(1));
21560 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
21561 MI->eraseFromParent();
21568 MachineBasicBlock *
21569 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
21570 MachineBasicBlock *BB) const {
21571 switch (MI->getOpcode()) {
21572 default: llvm_unreachable("Unexpected instr type to insert");
21573 case X86::TAILJMPd64:
21574 case X86::TAILJMPr64:
21575 case X86::TAILJMPm64:
21576 case X86::TAILJMPd64_REX:
21577 case X86::TAILJMPr64_REX:
21578 case X86::TAILJMPm64_REX:
21579 llvm_unreachable("TAILJMP64 would not be touched here.");
21580 case X86::TCRETURNdi64:
21581 case X86::TCRETURNri64:
21582 case X86::TCRETURNmi64:
21584 case X86::WIN_ALLOCA:
21585 return EmitLoweredWinAlloca(MI, BB);
21586 case X86::SEG_ALLOCA_32:
21587 case X86::SEG_ALLOCA_64:
21588 return EmitLoweredSegAlloca(MI, BB);
21589 case X86::TLSCall_32:
21590 case X86::TLSCall_64:
21591 return EmitLoweredTLSCall(MI, BB);
21592 case X86::CMOV_GR8:
21593 case X86::CMOV_FR32:
21594 case X86::CMOV_FR64:
21595 case X86::CMOV_V4F32:
21596 case X86::CMOV_V2F64:
21597 case X86::CMOV_V2I64:
21598 case X86::CMOV_V8F32:
21599 case X86::CMOV_V4F64:
21600 case X86::CMOV_V4I64:
21601 case X86::CMOV_V16F32:
21602 case X86::CMOV_V8F64:
21603 case X86::CMOV_V8I64:
21604 case X86::CMOV_GR16:
21605 case X86::CMOV_GR32:
21606 case X86::CMOV_RFP32:
21607 case X86::CMOV_RFP64:
21608 case X86::CMOV_RFP80:
21609 return EmitLoweredSelect(MI, BB);
21611 case X86::FP32_TO_INT16_IN_MEM:
21612 case X86::FP32_TO_INT32_IN_MEM:
21613 case X86::FP32_TO_INT64_IN_MEM:
21614 case X86::FP64_TO_INT16_IN_MEM:
21615 case X86::FP64_TO_INT32_IN_MEM:
21616 case X86::FP64_TO_INT64_IN_MEM:
21617 case X86::FP80_TO_INT16_IN_MEM:
21618 case X86::FP80_TO_INT32_IN_MEM:
21619 case X86::FP80_TO_INT64_IN_MEM: {
21620 MachineFunction *F = BB->getParent();
21621 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21622 DebugLoc DL = MI->getDebugLoc();
21624 // Change the floating point control register to use "round towards zero"
21625 // mode when truncating to an integer value.
21626 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
21627 addFrameReference(BuildMI(*BB, MI, DL,
21628 TII->get(X86::FNSTCW16m)), CWFrameIdx);
21630 // Load the old value of the high byte of the control word...
21632 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
21633 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
21636 // Set the high part to be round to zero...
21637 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
21640 // Reload the modified control word now...
21641 addFrameReference(BuildMI(*BB, MI, DL,
21642 TII->get(X86::FLDCW16m)), CWFrameIdx);
21644 // Restore the memory image of control word to original value
21645 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
21648 // Get the X86 opcode to use.
21650 switch (MI->getOpcode()) {
21651 default: llvm_unreachable("illegal opcode!");
21652 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
21653 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
21654 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
21655 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
21656 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
21657 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
21658 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
21659 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
21660 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
21664 MachineOperand &Op = MI->getOperand(0);
21666 AM.BaseType = X86AddressMode::RegBase;
21667 AM.Base.Reg = Op.getReg();
21669 AM.BaseType = X86AddressMode::FrameIndexBase;
21670 AM.Base.FrameIndex = Op.getIndex();
21672 Op = MI->getOperand(1);
21674 AM.Scale = Op.getImm();
21675 Op = MI->getOperand(2);
21677 AM.IndexReg = Op.getImm();
21678 Op = MI->getOperand(3);
21679 if (Op.isGlobal()) {
21680 AM.GV = Op.getGlobal();
21682 AM.Disp = Op.getImm();
21684 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
21685 .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
21687 // Reload the original control word now.
21688 addFrameReference(BuildMI(*BB, MI, DL,
21689 TII->get(X86::FLDCW16m)), CWFrameIdx);
21691 MI->eraseFromParent(); // The pseudo instruction is gone now.
21694 // String/text processing lowering.
21695 case X86::PCMPISTRM128REG:
21696 case X86::VPCMPISTRM128REG:
21697 case X86::PCMPISTRM128MEM:
21698 case X86::VPCMPISTRM128MEM:
21699 case X86::PCMPESTRM128REG:
21700 case X86::VPCMPESTRM128REG:
21701 case X86::PCMPESTRM128MEM:
21702 case X86::VPCMPESTRM128MEM:
21703 assert(Subtarget->hasSSE42() &&
21704 "Target must have SSE4.2 or AVX features enabled");
21705 return EmitPCMPSTRM(MI, BB, Subtarget->getInstrInfo());
21707 // String/text processing lowering.
21708 case X86::PCMPISTRIREG:
21709 case X86::VPCMPISTRIREG:
21710 case X86::PCMPISTRIMEM:
21711 case X86::VPCMPISTRIMEM:
21712 case X86::PCMPESTRIREG:
21713 case X86::VPCMPESTRIREG:
21714 case X86::PCMPESTRIMEM:
21715 case X86::VPCMPESTRIMEM:
21716 assert(Subtarget->hasSSE42() &&
21717 "Target must have SSE4.2 or AVX features enabled");
21718 return EmitPCMPSTRI(MI, BB, Subtarget->getInstrInfo());
21720 // Thread synchronization.
21722 return EmitMonitor(MI, BB, Subtarget);
21726 return EmitXBegin(MI, BB, Subtarget->getInstrInfo());
21728 case X86::VASTART_SAVE_XMM_REGS:
21729 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
21731 case X86::VAARG_64:
21732 return EmitVAARG64WithCustomInserter(MI, BB);
21734 case X86::EH_SjLj_SetJmp32:
21735 case X86::EH_SjLj_SetJmp64:
21736 return emitEHSjLjSetJmp(MI, BB);
21738 case X86::EH_SjLj_LongJmp32:
21739 case X86::EH_SjLj_LongJmp64:
21740 return emitEHSjLjLongJmp(MI, BB);
21742 case TargetOpcode::STATEPOINT:
21743 // As an implementation detail, STATEPOINT shares the STACKMAP format at
21744 // this point in the process. We diverge later.
21745 return emitPatchPoint(MI, BB);
21747 case TargetOpcode::STACKMAP:
21748 case TargetOpcode::PATCHPOINT:
21749 return emitPatchPoint(MI, BB);
21751 case X86::VFMADDPDr213r:
21752 case X86::VFMADDPSr213r:
21753 case X86::VFMADDSDr213r:
21754 case X86::VFMADDSSr213r:
21755 case X86::VFMSUBPDr213r:
21756 case X86::VFMSUBPSr213r:
21757 case X86::VFMSUBSDr213r:
21758 case X86::VFMSUBSSr213r:
21759 case X86::VFNMADDPDr213r:
21760 case X86::VFNMADDPSr213r:
21761 case X86::VFNMADDSDr213r:
21762 case X86::VFNMADDSSr213r:
21763 case X86::VFNMSUBPDr213r:
21764 case X86::VFNMSUBPSr213r:
21765 case X86::VFNMSUBSDr213r:
21766 case X86::VFNMSUBSSr213r:
21767 case X86::VFMADDSUBPDr213r:
21768 case X86::VFMADDSUBPSr213r:
21769 case X86::VFMSUBADDPDr213r:
21770 case X86::VFMSUBADDPSr213r:
21771 case X86::VFMADDPDr213rY:
21772 case X86::VFMADDPSr213rY:
21773 case X86::VFMSUBPDr213rY:
21774 case X86::VFMSUBPSr213rY:
21775 case X86::VFNMADDPDr213rY:
21776 case X86::VFNMADDPSr213rY:
21777 case X86::VFNMSUBPDr213rY:
21778 case X86::VFNMSUBPSr213rY:
21779 case X86::VFMADDSUBPDr213rY:
21780 case X86::VFMADDSUBPSr213rY:
21781 case X86::VFMSUBADDPDr213rY:
21782 case X86::VFMSUBADDPSr213rY:
21783 return emitFMA3Instr(MI, BB);
21787 //===----------------------------------------------------------------------===//
21788 // X86 Optimization Hooks
21789 //===----------------------------------------------------------------------===//
21791 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
21794 const SelectionDAG &DAG,
21795 unsigned Depth) const {
21796 unsigned BitWidth = KnownZero.getBitWidth();
21797 unsigned Opc = Op.getOpcode();
21798 assert((Opc >= ISD::BUILTIN_OP_END ||
21799 Opc == ISD::INTRINSIC_WO_CHAIN ||
21800 Opc == ISD::INTRINSIC_W_CHAIN ||
21801 Opc == ISD::INTRINSIC_VOID) &&
21802 "Should use MaskedValueIsZero if you don't know whether Op"
21803 " is a target node!");
21805 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
21819 // These nodes' second result is a boolean.
21820 if (Op.getResNo() == 0)
21823 case X86ISD::SETCC:
21824 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
21826 case ISD::INTRINSIC_WO_CHAIN: {
21827 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
21828 unsigned NumLoBits = 0;
21831 case Intrinsic::x86_sse_movmsk_ps:
21832 case Intrinsic::x86_avx_movmsk_ps_256:
21833 case Intrinsic::x86_sse2_movmsk_pd:
21834 case Intrinsic::x86_avx_movmsk_pd_256:
21835 case Intrinsic::x86_mmx_pmovmskb:
21836 case Intrinsic::x86_sse2_pmovmskb_128:
21837 case Intrinsic::x86_avx2_pmovmskb: {
21838 // High bits of movmskp{s|d}, pmovmskb are known zero.
21840 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
21841 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break;
21842 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break;
21843 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break;
21844 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break;
21845 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break;
21846 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
21847 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
21849 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
21858 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
21860 const SelectionDAG &,
21861 unsigned Depth) const {
21862 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
21863 if (Op.getOpcode() == X86ISD::SETCC_CARRY)
21864 return Op.getValueType().getScalarType().getSizeInBits();
21870 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
21871 /// node is a GlobalAddress + offset.
21872 bool X86TargetLowering::isGAPlusOffset(SDNode *N,
21873 const GlobalValue* &GA,
21874 int64_t &Offset) const {
21875 if (N->getOpcode() == X86ISD::Wrapper) {
21876 if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
21877 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
21878 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
21882 return TargetLowering::isGAPlusOffset(N, GA, Offset);
21885 /// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the
21886 /// same as extracting the high 128-bit part of 256-bit vector and then
21887 /// inserting the result into the low part of a new 256-bit vector
21888 static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) {
21889 EVT VT = SVOp->getValueType(0);
21890 unsigned NumElems = VT.getVectorNumElements();
21892 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
21893 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j)
21894 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
21895 SVOp->getMaskElt(j) >= 0)
21901 /// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the
21902 /// same as extracting the low 128-bit part of 256-bit vector and then
21903 /// inserting the result into the high part of a new 256-bit vector
21904 static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) {
21905 EVT VT = SVOp->getValueType(0);
21906 unsigned NumElems = VT.getVectorNumElements();
21908 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
21909 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j)
21910 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
21911 SVOp->getMaskElt(j) >= 0)
21917 /// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors.
21918 static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
21919 TargetLowering::DAGCombinerInfo &DCI,
21920 const X86Subtarget* Subtarget) {
21922 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
21923 SDValue V1 = SVOp->getOperand(0);
21924 SDValue V2 = SVOp->getOperand(1);
21925 EVT VT = SVOp->getValueType(0);
21926 unsigned NumElems = VT.getVectorNumElements();
21928 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
21929 V2.getOpcode() == ISD::CONCAT_VECTORS) {
21933 // V UNDEF BUILD_VECTOR UNDEF
21935 // CONCAT_VECTOR CONCAT_VECTOR
21938 // RESULT: V + zero extended
21940 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR ||
21941 V2.getOperand(1).getOpcode() != ISD::UNDEF ||
21942 V1.getOperand(1).getOpcode() != ISD::UNDEF)
21945 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()))
21948 // To match the shuffle mask, the first half of the mask should
21949 // be exactly the first vector, and all the rest a splat with the
21950 // first element of the second one.
21951 for (unsigned i = 0; i != NumElems/2; ++i)
21952 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) ||
21953 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems))
21956 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD.
21957 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) {
21958 if (Ld->hasNUsesOfValue(1, 0)) {
21959 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other);
21960 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
21962 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
21964 Ld->getPointerInfo(),
21965 Ld->getAlignment(),
21966 false/*isVolatile*/, true/*ReadMem*/,
21967 false/*WriteMem*/);
21969 // Make sure the newly-created LOAD is in the same position as Ld in
21970 // terms of dependency. We create a TokenFactor for Ld and ResNode,
21971 // and update uses of Ld's output chain to use the TokenFactor.
21972 if (Ld->hasAnyUseOfValue(1)) {
21973 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
21974 SDValue(Ld, 1), SDValue(ResNode.getNode(), 1));
21975 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
21976 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1),
21977 SDValue(ResNode.getNode(), 1));
21980 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode);
21984 // Emit a zeroed vector and insert the desired subvector on its
21986 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
21987 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
21988 return DCI.CombineTo(N, InsV);
21991 //===--------------------------------------------------------------------===//
21992 // Combine some shuffles into subvector extracts and inserts:
21995 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
21996 if (isShuffleHigh128VectorInsertLow(SVOp)) {
21997 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl);
21998 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl);
21999 return DCI.CombineTo(N, InsV);
22002 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22003 if (isShuffleLow128VectorInsertHigh(SVOp)) {
22004 SDValue V = Extract128BitVector(V1, 0, DAG, dl);
22005 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl);
22006 return DCI.CombineTo(N, InsV);
22012 /// \brief Combine an arbitrary chain of shuffles into a single instruction if
22015 /// This is the leaf of the recursive combinine below. When we have found some
22016 /// chain of single-use x86 shuffle instructions and accumulated the combined
22017 /// shuffle mask represented by them, this will try to pattern match that mask
22018 /// into either a single instruction if there is a special purpose instruction
22019 /// for this operation, or into a PSHUFB instruction which is a fully general
22020 /// instruction but should only be used to replace chains over a certain depth.
22021 static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
22022 int Depth, bool HasPSHUFB, SelectionDAG &DAG,
22023 TargetLowering::DAGCombinerInfo &DCI,
22024 const X86Subtarget *Subtarget) {
22025 assert(!Mask.empty() && "Cannot combine an empty shuffle mask!");
22027 // Find the operand that enters the chain. Note that multiple uses are OK
22028 // here, we're not going to remove the operand we find.
22029 SDValue Input = Op.getOperand(0);
22030 while (Input.getOpcode() == ISD::BITCAST)
22031 Input = Input.getOperand(0);
22033 MVT VT = Input.getSimpleValueType();
22034 MVT RootVT = Root.getSimpleValueType();
22037 // Just remove no-op shuffle masks.
22038 if (Mask.size() == 1) {
22039 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Input),
22044 // Use the float domain if the operand type is a floating point type.
22045 bool FloatDomain = VT.isFloatingPoint();
22047 // For floating point shuffles, we don't have free copies in the shuffle
22048 // instructions or the ability to load as part of the instruction, so
22049 // canonicalize their shuffles to UNPCK or MOV variants.
22051 // Note that even with AVX we prefer the PSHUFD form of shuffle for integer
22052 // vectors because it can have a load folded into it that UNPCK cannot. This
22053 // doesn't preclude something switching to the shorter encoding post-RA.
22055 if (Mask.equals(0, 0) || Mask.equals(1, 1)) {
22056 bool Lo = Mask.equals(0, 0);
22059 // Check if we have SSE3 which will let us use MOVDDUP. That instruction
22060 // is no slower than UNPCKLPD but has the option to fold the input operand
22061 // into even an unaligned memory load.
22062 if (Lo && Subtarget->hasSSE3()) {
22063 Shuffle = X86ISD::MOVDDUP;
22064 ShuffleVT = MVT::v2f64;
22066 // We have MOVLHPS and MOVHLPS throughout SSE and they encode smaller
22067 // than the UNPCK variants.
22068 Shuffle = Lo ? X86ISD::MOVLHPS : X86ISD::MOVHLPS;
22069 ShuffleVT = MVT::v4f32;
22071 if (Depth == 1 && Root->getOpcode() == Shuffle)
22072 return false; // Nothing to do!
22073 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22074 DCI.AddToWorklist(Op.getNode());
22075 if (Shuffle == X86ISD::MOVDDUP)
22076 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22078 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22079 DCI.AddToWorklist(Op.getNode());
22080 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22084 if (Subtarget->hasSSE3() &&
22085 (Mask.equals(0, 0, 2, 2) || Mask.equals(1, 1, 3, 3))) {
22086 bool Lo = Mask.equals(0, 0, 2, 2);
22087 unsigned Shuffle = Lo ? X86ISD::MOVSLDUP : X86ISD::MOVSHDUP;
22088 MVT ShuffleVT = MVT::v4f32;
22089 if (Depth == 1 && Root->getOpcode() == Shuffle)
22090 return false; // Nothing to do!
22091 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22092 DCI.AddToWorklist(Op.getNode());
22093 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22094 DCI.AddToWorklist(Op.getNode());
22095 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22099 if (Mask.equals(0, 0, 1, 1) || Mask.equals(2, 2, 3, 3)) {
22100 bool Lo = Mask.equals(0, 0, 1, 1);
22101 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22102 MVT ShuffleVT = MVT::v4f32;
22103 if (Depth == 1 && Root->getOpcode() == Shuffle)
22104 return false; // Nothing to do!
22105 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22106 DCI.AddToWorklist(Op.getNode());
22107 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22108 DCI.AddToWorklist(Op.getNode());
22109 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22115 // We always canonicalize the 8 x i16 and 16 x i8 shuffles into their UNPCK
22116 // variants as none of these have single-instruction variants that are
22117 // superior to the UNPCK formulation.
22118 if (!FloatDomain &&
22119 (Mask.equals(0, 0, 1, 1, 2, 2, 3, 3) ||
22120 Mask.equals(4, 4, 5, 5, 6, 6, 7, 7) ||
22121 Mask.equals(0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7) ||
22122 Mask.equals(8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
22124 bool Lo = Mask[0] == 0;
22125 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22126 if (Depth == 1 && Root->getOpcode() == Shuffle)
22127 return false; // Nothing to do!
22129 switch (Mask.size()) {
22131 ShuffleVT = MVT::v8i16;
22134 ShuffleVT = MVT::v16i8;
22137 llvm_unreachable("Impossible mask size!");
22139 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22140 DCI.AddToWorklist(Op.getNode());
22141 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22142 DCI.AddToWorklist(Op.getNode());
22143 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22148 // Don't try to re-form single instruction chains under any circumstances now
22149 // that we've done encoding canonicalization for them.
22153 // If we have 3 or more shuffle instructions or a chain involving PSHUFB, we
22154 // can replace them with a single PSHUFB instruction profitably. Intel's
22155 // manuals suggest only using PSHUFB if doing so replacing 5 instructions, but
22156 // in practice PSHUFB tends to be *very* fast so we're more aggressive.
22157 if ((Depth >= 3 || HasPSHUFB) && Subtarget->hasSSSE3()) {
22158 SmallVector<SDValue, 16> PSHUFBMask;
22159 assert(Mask.size() <= 16 && "Can't shuffle elements smaller than bytes!");
22160 int Ratio = 16 / Mask.size();
22161 for (unsigned i = 0; i < 16; ++i) {
22162 if (Mask[i / Ratio] == SM_SentinelUndef) {
22163 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
22166 int M = Mask[i / Ratio] != SM_SentinelZero
22167 ? Ratio * Mask[i / Ratio] + i % Ratio
22169 PSHUFBMask.push_back(DAG.getConstant(M, MVT::i8));
22171 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Input);
22172 DCI.AddToWorklist(Op.getNode());
22173 SDValue PSHUFBMaskOp =
22174 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, PSHUFBMask);
22175 DCI.AddToWorklist(PSHUFBMaskOp.getNode());
22176 Op = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, Op, PSHUFBMaskOp);
22177 DCI.AddToWorklist(Op.getNode());
22178 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22183 // Failed to find any combines.
22187 /// \brief Fully generic combining of x86 shuffle instructions.
22189 /// This should be the last combine run over the x86 shuffle instructions. Once
22190 /// they have been fully optimized, this will recursively consider all chains
22191 /// of single-use shuffle instructions, build a generic model of the cumulative
22192 /// shuffle operation, and check for simpler instructions which implement this
22193 /// operation. We use this primarily for two purposes:
22195 /// 1) Collapse generic shuffles to specialized single instructions when
22196 /// equivalent. In most cases, this is just an encoding size win, but
22197 /// sometimes we will collapse multiple generic shuffles into a single
22198 /// special-purpose shuffle.
22199 /// 2) Look for sequences of shuffle instructions with 3 or more total
22200 /// instructions, and replace them with the slightly more expensive SSSE3
22201 /// PSHUFB instruction if available. We do this as the last combining step
22202 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
22203 /// a suitable short sequence of other instructions. The PHUFB will either
22204 /// use a register or have to read from memory and so is slightly (but only
22205 /// slightly) more expensive than the other shuffle instructions.
22207 /// Because this is inherently a quadratic operation (for each shuffle in
22208 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
22209 /// This should never be an issue in practice as the shuffle lowering doesn't
22210 /// produce sequences of more than 8 instructions.
22212 /// FIXME: We will currently miss some cases where the redundant shuffling
22213 /// would simplify under the threshold for PSHUFB formation because of
22214 /// combine-ordering. To fix this, we should do the redundant instruction
22215 /// combining in this recursive walk.
22216 static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
22217 ArrayRef<int> RootMask,
22218 int Depth, bool HasPSHUFB,
22220 TargetLowering::DAGCombinerInfo &DCI,
22221 const X86Subtarget *Subtarget) {
22222 // Bound the depth of our recursive combine because this is ultimately
22223 // quadratic in nature.
22227 // Directly rip through bitcasts to find the underlying operand.
22228 while (Op.getOpcode() == ISD::BITCAST && Op.getOperand(0).hasOneUse())
22229 Op = Op.getOperand(0);
22231 MVT VT = Op.getSimpleValueType();
22232 if (!VT.isVector())
22233 return false; // Bail if we hit a non-vector.
22234 // FIXME: This routine should be taught about 256-bit shuffles, or a 256-bit
22235 // version should be added.
22236 if (VT.getSizeInBits() != 128)
22239 assert(Root.getSimpleValueType().isVector() &&
22240 "Shuffles operate on vector types!");
22241 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
22242 "Can only combine shuffles of the same vector register size.");
22244 if (!isTargetShuffle(Op.getOpcode()))
22246 SmallVector<int, 16> OpMask;
22248 bool HaveMask = getTargetShuffleMask(Op.getNode(), VT, OpMask, IsUnary);
22249 // We only can combine unary shuffles which we can decode the mask for.
22250 if (!HaveMask || !IsUnary)
22253 assert(VT.getVectorNumElements() == OpMask.size() &&
22254 "Different mask size from vector size!");
22255 assert(((RootMask.size() > OpMask.size() &&
22256 RootMask.size() % OpMask.size() == 0) ||
22257 (OpMask.size() > RootMask.size() &&
22258 OpMask.size() % RootMask.size() == 0) ||
22259 OpMask.size() == RootMask.size()) &&
22260 "The smaller number of elements must divide the larger.");
22261 int RootRatio = std::max<int>(1, OpMask.size() / RootMask.size());
22262 int OpRatio = std::max<int>(1, RootMask.size() / OpMask.size());
22263 assert(((RootRatio == 1 && OpRatio == 1) ||
22264 (RootRatio == 1) != (OpRatio == 1)) &&
22265 "Must not have a ratio for both incoming and op masks!");
22267 SmallVector<int, 16> Mask;
22268 Mask.reserve(std::max(OpMask.size(), RootMask.size()));
22270 // Merge this shuffle operation's mask into our accumulated mask. Note that
22271 // this shuffle's mask will be the first applied to the input, followed by the
22272 // root mask to get us all the way to the root value arrangement. The reason
22273 // for this order is that we are recursing up the operation chain.
22274 for (int i = 0, e = std::max(OpMask.size(), RootMask.size()); i < e; ++i) {
22275 int RootIdx = i / RootRatio;
22276 if (RootMask[RootIdx] < 0) {
22277 // This is a zero or undef lane, we're done.
22278 Mask.push_back(RootMask[RootIdx]);
22282 int RootMaskedIdx = RootMask[RootIdx] * RootRatio + i % RootRatio;
22283 int OpIdx = RootMaskedIdx / OpRatio;
22284 if (OpMask[OpIdx] < 0) {
22285 // The incoming lanes are zero or undef, it doesn't matter which ones we
22287 Mask.push_back(OpMask[OpIdx]);
22291 // Ok, we have non-zero lanes, map them through.
22292 Mask.push_back(OpMask[OpIdx] * OpRatio +
22293 RootMaskedIdx % OpRatio);
22296 // See if we can recurse into the operand to combine more things.
22297 switch (Op.getOpcode()) {
22298 case X86ISD::PSHUFB:
22300 case X86ISD::PSHUFD:
22301 case X86ISD::PSHUFHW:
22302 case X86ISD::PSHUFLW:
22303 if (Op.getOperand(0).hasOneUse() &&
22304 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22305 HasPSHUFB, DAG, DCI, Subtarget))
22309 case X86ISD::UNPCKL:
22310 case X86ISD::UNPCKH:
22311 assert(Op.getOperand(0) == Op.getOperand(1) && "We only combine unary shuffles!");
22312 // We can't check for single use, we have to check that this shuffle is the only user.
22313 if (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
22314 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22315 HasPSHUFB, DAG, DCI, Subtarget))
22320 // Minor canonicalization of the accumulated shuffle mask to make it easier
22321 // to match below. All this does is detect masks with squential pairs of
22322 // elements, and shrink them to the half-width mask. It does this in a loop
22323 // so it will reduce the size of the mask to the minimal width mask which
22324 // performs an equivalent shuffle.
22325 SmallVector<int, 16> WidenedMask;
22326 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
22327 Mask = std::move(WidenedMask);
22328 WidenedMask.clear();
22331 return combineX86ShuffleChain(Op, Root, Mask, Depth, HasPSHUFB, DAG, DCI,
22335 /// \brief Get the PSHUF-style mask from PSHUF node.
22337 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
22338 /// PSHUF-style masks that can be reused with such instructions.
22339 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
22340 SmallVector<int, 4> Mask;
22342 bool HaveMask = getTargetShuffleMask(N.getNode(), N.getSimpleValueType(), Mask, IsUnary);
22346 switch (N.getOpcode()) {
22347 case X86ISD::PSHUFD:
22349 case X86ISD::PSHUFLW:
22352 case X86ISD::PSHUFHW:
22353 Mask.erase(Mask.begin(), Mask.begin() + 4);
22354 for (int &M : Mask)
22358 llvm_unreachable("No valid shuffle instruction found!");
22362 /// \brief Search for a combinable shuffle across a chain ending in pshufd.
22364 /// We walk up the chain and look for a combinable shuffle, skipping over
22365 /// shuffles that we could hoist this shuffle's transformation past without
22366 /// altering anything.
22368 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
22370 TargetLowering::DAGCombinerInfo &DCI) {
22371 assert(N.getOpcode() == X86ISD::PSHUFD &&
22372 "Called with something other than an x86 128-bit half shuffle!");
22375 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
22376 // of the shuffles in the chain so that we can form a fresh chain to replace
22378 SmallVector<SDValue, 8> Chain;
22379 SDValue V = N.getOperand(0);
22380 for (; V.hasOneUse(); V = V.getOperand(0)) {
22381 switch (V.getOpcode()) {
22383 return SDValue(); // Nothing combined!
22386 // Skip bitcasts as we always know the type for the target specific
22390 case X86ISD::PSHUFD:
22391 // Found another dword shuffle.
22394 case X86ISD::PSHUFLW:
22395 // Check that the low words (being shuffled) are the identity in the
22396 // dword shuffle, and the high words are self-contained.
22397 if (Mask[0] != 0 || Mask[1] != 1 ||
22398 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
22401 Chain.push_back(V);
22404 case X86ISD::PSHUFHW:
22405 // Check that the high words (being shuffled) are the identity in the
22406 // dword shuffle, and the low words are self-contained.
22407 if (Mask[2] != 2 || Mask[3] != 3 ||
22408 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
22411 Chain.push_back(V);
22414 case X86ISD::UNPCKL:
22415 case X86ISD::UNPCKH:
22416 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
22417 // shuffle into a preceding word shuffle.
22418 if (V.getValueType() != MVT::v16i8 && V.getValueType() != MVT::v8i16)
22421 // Search for a half-shuffle which we can combine with.
22422 unsigned CombineOp =
22423 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
22424 if (V.getOperand(0) != V.getOperand(1) ||
22425 !V->isOnlyUserOf(V.getOperand(0).getNode()))
22427 Chain.push_back(V);
22428 V = V.getOperand(0);
22430 switch (V.getOpcode()) {
22432 return SDValue(); // Nothing to combine.
22434 case X86ISD::PSHUFLW:
22435 case X86ISD::PSHUFHW:
22436 if (V.getOpcode() == CombineOp)
22439 Chain.push_back(V);
22443 V = V.getOperand(0);
22447 } while (V.hasOneUse());
22450 // Break out of the loop if we break out of the switch.
22454 if (!V.hasOneUse())
22455 // We fell out of the loop without finding a viable combining instruction.
22458 // Merge this node's mask and our incoming mask.
22459 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22460 for (int &M : Mask)
22462 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
22463 getV4X86ShuffleImm8ForMask(Mask, DAG));
22465 // Rebuild the chain around this new shuffle.
22466 while (!Chain.empty()) {
22467 SDValue W = Chain.pop_back_val();
22469 if (V.getValueType() != W.getOperand(0).getValueType())
22470 V = DAG.getNode(ISD::BITCAST, DL, W.getOperand(0).getValueType(), V);
22472 switch (W.getOpcode()) {
22474 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
22476 case X86ISD::UNPCKL:
22477 case X86ISD::UNPCKH:
22478 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
22481 case X86ISD::PSHUFD:
22482 case X86ISD::PSHUFLW:
22483 case X86ISD::PSHUFHW:
22484 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
22488 if (V.getValueType() != N.getValueType())
22489 V = DAG.getNode(ISD::BITCAST, DL, N.getValueType(), V);
22491 // Return the new chain to replace N.
22495 /// \brief Search for a combinable shuffle across a chain ending in pshuflw or pshufhw.
22497 /// We walk up the chain, skipping shuffles of the other half and looking
22498 /// through shuffles which switch halves trying to find a shuffle of the same
22499 /// pair of dwords.
22500 static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
22502 TargetLowering::DAGCombinerInfo &DCI) {
22504 (N.getOpcode() == X86ISD::PSHUFLW || N.getOpcode() == X86ISD::PSHUFHW) &&
22505 "Called with something other than an x86 128-bit half shuffle!");
22507 unsigned CombineOpcode = N.getOpcode();
22509 // Walk up a single-use chain looking for a combinable shuffle.
22510 SDValue V = N.getOperand(0);
22511 for (; V.hasOneUse(); V = V.getOperand(0)) {
22512 switch (V.getOpcode()) {
22514 return false; // Nothing combined!
22517 // Skip bitcasts as we always know the type for the target specific
22521 case X86ISD::PSHUFLW:
22522 case X86ISD::PSHUFHW:
22523 if (V.getOpcode() == CombineOpcode)
22526 // Other-half shuffles are no-ops.
22529 // Break out of the loop if we break out of the switch.
22533 if (!V.hasOneUse())
22534 // We fell out of the loop without finding a viable combining instruction.
22537 // Combine away the bottom node as its shuffle will be accumulated into
22538 // a preceding shuffle.
22539 DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22541 // Record the old value.
22544 // Merge this node's mask and our incoming mask (adjusted to account for all
22545 // the pshufd instructions encountered).
22546 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22547 for (int &M : Mask)
22549 V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
22550 getV4X86ShuffleImm8ForMask(Mask, DAG));
22552 // Check that the shuffles didn't cancel each other out. If not, we need to
22553 // combine to the new one.
22555 // Replace the combinable shuffle with the combined one, updating all users
22556 // so that we re-evaluate the chain here.
22557 DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
22562 /// \brief Try to combine x86 target specific shuffles.
22563 static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
22564 TargetLowering::DAGCombinerInfo &DCI,
22565 const X86Subtarget *Subtarget) {
22567 MVT VT = N.getSimpleValueType();
22568 SmallVector<int, 4> Mask;
22570 switch (N.getOpcode()) {
22571 case X86ISD::PSHUFD:
22572 case X86ISD::PSHUFLW:
22573 case X86ISD::PSHUFHW:
22574 Mask = getPSHUFShuffleMask(N);
22575 assert(Mask.size() == 4);
22581 // Nuke no-op shuffles that show up after combining.
22582 if (isNoopShuffleMask(Mask))
22583 return DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22585 // Look for simplifications involving one or two shuffle instructions.
22586 SDValue V = N.getOperand(0);
22587 switch (N.getOpcode()) {
22590 case X86ISD::PSHUFLW:
22591 case X86ISD::PSHUFHW:
22592 assert(VT == MVT::v8i16);
22595 if (combineRedundantHalfShuffle(N, Mask, DAG, DCI))
22596 return SDValue(); // We combined away this shuffle, so we're done.
22598 // See if this reduces to a PSHUFD which is no more expensive and can
22599 // combine with more operations. Note that it has to at least flip the
22600 // dwords as otherwise it would have been removed as a no-op.
22601 if (Mask[0] == 2 && Mask[1] == 3 && Mask[2] == 0 && Mask[3] == 1) {
22602 int DMask[] = {0, 1, 2, 3};
22603 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
22604 DMask[DOffset + 0] = DOffset + 1;
22605 DMask[DOffset + 1] = DOffset + 0;
22606 V = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V);
22607 DCI.AddToWorklist(V.getNode());
22608 V = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V,
22609 getV4X86ShuffleImm8ForMask(DMask, DAG));
22610 DCI.AddToWorklist(V.getNode());
22611 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
22614 // Look for shuffle patterns which can be implemented as a single unpack.
22615 // FIXME: This doesn't handle the location of the PSHUFD generically, and
22616 // only works when we have a PSHUFD followed by two half-shuffles.
22617 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
22618 (V.getOpcode() == X86ISD::PSHUFLW ||
22619 V.getOpcode() == X86ISD::PSHUFHW) &&
22620 V.getOpcode() != N.getOpcode() &&
22622 SDValue D = V.getOperand(0);
22623 while (D.getOpcode() == ISD::BITCAST && D.hasOneUse())
22624 D = D.getOperand(0);
22625 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
22626 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22627 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
22628 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22629 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22631 for (int i = 0; i < 4; ++i) {
22632 WordMask[i + NOffset] = Mask[i] + NOffset;
22633 WordMask[i + VOffset] = VMask[i] + VOffset;
22635 // Map the word mask through the DWord mask.
22637 for (int i = 0; i < 8; ++i)
22638 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
22639 const int UnpackLoMask[] = {0, 0, 1, 1, 2, 2, 3, 3};
22640 const int UnpackHiMask[] = {4, 4, 5, 5, 6, 6, 7, 7};
22641 if (std::equal(std::begin(MappedMask), std::end(MappedMask),
22642 std::begin(UnpackLoMask)) ||
22643 std::equal(std::begin(MappedMask), std::end(MappedMask),
22644 std::begin(UnpackHiMask))) {
22645 // We can replace all three shuffles with an unpack.
22646 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, D.getOperand(0));
22647 DCI.AddToWorklist(V.getNode());
22648 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
22650 DL, MVT::v8i16, V, V);
22657 case X86ISD::PSHUFD:
22658 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG, DCI))
22667 /// \brief Try to combine a shuffle into a target-specific add-sub node.
22669 /// We combine this directly on the abstract vector shuffle nodes so it is
22670 /// easier to generically match. We also insert dummy vector shuffle nodes for
22671 /// the operands which explicitly discard the lanes which are unused by this
22672 /// operation to try to flow through the rest of the combiner the fact that
22673 /// they're unused.
22674 static SDValue combineShuffleToAddSub(SDNode *N, SelectionDAG &DAG) {
22676 EVT VT = N->getValueType(0);
22678 // We only handle target-independent shuffles.
22679 // FIXME: It would be easy and harmless to use the target shuffle mask
22680 // extraction tool to support more.
22681 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
22684 auto *SVN = cast<ShuffleVectorSDNode>(N);
22685 ArrayRef<int> Mask = SVN->getMask();
22686 SDValue V1 = N->getOperand(0);
22687 SDValue V2 = N->getOperand(1);
22689 // We require the first shuffle operand to be the SUB node, and the second to
22690 // be the ADD node.
22691 // FIXME: We should support the commuted patterns.
22692 if (V1->getOpcode() != ISD::FSUB || V2->getOpcode() != ISD::FADD)
22695 // If there are other uses of these operations we can't fold them.
22696 if (!V1->hasOneUse() || !V2->hasOneUse())
22699 // Ensure that both operations have the same operands. Note that we can
22700 // commute the FADD operands.
22701 SDValue LHS = V1->getOperand(0), RHS = V1->getOperand(1);
22702 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
22703 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
22706 // We're looking for blends between FADD and FSUB nodes. We insist on these
22707 // nodes being lined up in a specific expected pattern.
22708 if (!(isShuffleEquivalent(Mask, 0, 3) ||
22709 isShuffleEquivalent(Mask, 0, 5, 2, 7) ||
22710 isShuffleEquivalent(Mask, 0, 9, 2, 11, 4, 13, 6, 15)))
22713 // Only specific types are legal at this point, assert so we notice if and
22714 // when these change.
22715 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v8f32 ||
22716 VT == MVT::v4f64) &&
22717 "Unknown vector type encountered!");
22719 return DAG.getNode(X86ISD::ADDSUB, DL, VT, LHS, RHS);
22722 /// PerformShuffleCombine - Performs several different shuffle combines.
22723 static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
22724 TargetLowering::DAGCombinerInfo &DCI,
22725 const X86Subtarget *Subtarget) {
22727 SDValue N0 = N->getOperand(0);
22728 SDValue N1 = N->getOperand(1);
22729 EVT VT = N->getValueType(0);
22731 // Don't create instructions with illegal types after legalize types has run.
22732 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22733 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType()))
22736 // If we have legalized the vector types, look for blends of FADD and FSUB
22737 // nodes that we can fuse into an ADDSUB node.
22738 if (TLI.isTypeLegal(VT) && Subtarget->hasSSE3())
22739 if (SDValue AddSub = combineShuffleToAddSub(N, DAG))
22742 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode
22743 if (Subtarget->hasFp256() && VT.is256BitVector() &&
22744 N->getOpcode() == ISD::VECTOR_SHUFFLE)
22745 return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
22747 // During Type Legalization, when promoting illegal vector types,
22748 // the backend might introduce new shuffle dag nodes and bitcasts.
22750 // This code performs the following transformation:
22751 // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
22752 // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
22754 // We do this only if both the bitcast and the BINOP dag nodes have
22755 // one use. Also, perform this transformation only if the new binary
22756 // operation is legal. This is to avoid introducing dag nodes that
22757 // potentially need to be further expanded (or custom lowered) into a
22758 // less optimal sequence of dag nodes.
22759 if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
22760 N1.getOpcode() == ISD::UNDEF && N0.hasOneUse() &&
22761 N0.getOpcode() == ISD::BITCAST) {
22762 SDValue BC0 = N0.getOperand(0);
22763 EVT SVT = BC0.getValueType();
22764 unsigned Opcode = BC0.getOpcode();
22765 unsigned NumElts = VT.getVectorNumElements();
22767 if (BC0.hasOneUse() && SVT.isVector() &&
22768 SVT.getVectorNumElements() * 2 == NumElts &&
22769 TLI.isOperationLegal(Opcode, VT)) {
22770 bool CanFold = false;
22782 unsigned SVTNumElts = SVT.getVectorNumElements();
22783 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
22784 for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
22785 CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
22786 for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
22787 CanFold = SVOp->getMaskElt(i) < 0;
22790 SDValue BC00 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(0));
22791 SDValue BC01 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(1));
22792 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
22793 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, &SVOp->getMask()[0]);
22798 // Only handle 128 wide vector from here on.
22799 if (!VT.is128BitVector())
22802 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
22803 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
22804 // consecutive, non-overlapping, and in the right order.
22805 SmallVector<SDValue, 16> Elts;
22806 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
22807 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
22809 SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true);
22813 if (isTargetShuffle(N->getOpcode())) {
22815 PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget);
22816 if (Shuffle.getNode())
22819 // Try recursively combining arbitrary sequences of x86 shuffle
22820 // instructions into higher-order shuffles. We do this after combining
22821 // specific PSHUF instruction sequences into their minimal form so that we
22822 // can evaluate how many specialized shuffle instructions are involved in
22823 // a particular chain.
22824 SmallVector<int, 1> NonceMask; // Just a placeholder.
22825 NonceMask.push_back(0);
22826 if (combineX86ShufflesRecursively(SDValue(N, 0), SDValue(N, 0), NonceMask,
22827 /*Depth*/ 1, /*HasPSHUFB*/ false, DAG,
22829 return SDValue(); // This routine will use CombineTo to replace N.
22835 /// PerformTruncateCombine - Converts truncate operation to
22836 /// a sequence of vector shuffle operations.
22837 /// It is possible when we truncate 256-bit vector to 128-bit vector
22838 static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
22839 TargetLowering::DAGCombinerInfo &DCI,
22840 const X86Subtarget *Subtarget) {
22844 /// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target
22845 /// specific shuffle of a load can be folded into a single element load.
22846 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
22847 /// shuffles have been custom lowered so we need to handle those here.
22848 static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
22849 TargetLowering::DAGCombinerInfo &DCI) {
22850 if (DCI.isBeforeLegalizeOps())
22853 SDValue InVec = N->getOperand(0);
22854 SDValue EltNo = N->getOperand(1);
22856 if (!isa<ConstantSDNode>(EltNo))
22859 EVT OriginalVT = InVec.getValueType();
22861 if (InVec.getOpcode() == ISD::BITCAST) {
22862 // Don't duplicate a load with other uses.
22863 if (!InVec.hasOneUse())
22865 EVT BCVT = InVec.getOperand(0).getValueType();
22866 if (BCVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
22868 InVec = InVec.getOperand(0);
22871 EVT CurrentVT = InVec.getValueType();
22873 if (!isTargetShuffle(InVec.getOpcode()))
22876 // Don't duplicate a load with other uses.
22877 if (!InVec.hasOneUse())
22880 SmallVector<int, 16> ShuffleMask;
22882 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(),
22883 ShuffleMask, UnaryShuffle))
22886 // Select the input vector, guarding against out of range extract vector.
22887 unsigned NumElems = CurrentVT.getVectorNumElements();
22888 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
22889 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt];
22890 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0)
22891 : InVec.getOperand(1);
22893 // If inputs to shuffle are the same for both ops, then allow 2 uses
22894 unsigned AllowedUses = InVec.getNumOperands() > 1 &&
22895 InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
22897 if (LdNode.getOpcode() == ISD::BITCAST) {
22898 // Don't duplicate a load with other uses.
22899 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
22902 AllowedUses = 1; // only allow 1 load use if we have a bitcast
22903 LdNode = LdNode.getOperand(0);
22906 if (!ISD::isNormalLoad(LdNode.getNode()))
22909 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
22911 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
22914 EVT EltVT = N->getValueType(0);
22915 // If there's a bitcast before the shuffle, check if the load type and
22916 // alignment is valid.
22917 unsigned Align = LN0->getAlignment();
22918 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22919 unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
22920 EltVT.getTypeForEVT(*DAG.getContext()));
22922 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
22925 // All checks match so transform back to vector_shuffle so that DAG combiner
22926 // can finish the job
22929 // Create shuffle node taking into account the case that its a unary shuffle
22930 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT)
22931 : InVec.getOperand(1);
22932 Shuffle = DAG.getVectorShuffle(CurrentVT, dl,
22933 InVec.getOperand(0), Shuffle,
22935 Shuffle = DAG.getNode(ISD::BITCAST, dl, OriginalVT, Shuffle);
22936 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
22940 /// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
22941 /// generation and convert it from being a bunch of shuffles and extracts
22942 /// into a somewhat faster sequence. For i686, the best sequence is apparently
22943 /// storing the value and loading scalars back, while for x64 we should
22944 /// use 64-bit extracts and shifts.
22945 static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
22946 TargetLowering::DAGCombinerInfo &DCI) {
22947 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI);
22948 if (NewOp.getNode())
22951 SDValue InputVector = N->getOperand(0);
22953 // Detect mmx to i32 conversion through a v2i32 elt extract.
22954 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
22955 N->getValueType(0) == MVT::i32 &&
22956 InputVector.getValueType() == MVT::v2i32) {
22958 // The bitcast source is a direct mmx result.
22959 SDValue MMXSrc = InputVector.getNode()->getOperand(0);
22960 if (MMXSrc.getValueType() == MVT::x86mmx)
22961 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
22962 N->getValueType(0),
22963 InputVector.getNode()->getOperand(0));
22965 // The mmx is indirect: (i64 extract_elt (v1i64 bitcast (x86mmx ...))).
22966 SDValue MMXSrcOp = MMXSrc.getOperand(0);
22967 if (MMXSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT && MMXSrc.hasOneUse() &&
22968 MMXSrc.getValueType() == MVT::i64 && MMXSrcOp.hasOneUse() &&
22969 MMXSrcOp.getOpcode() == ISD::BITCAST &&
22970 MMXSrcOp.getValueType() == MVT::v1i64 &&
22971 MMXSrcOp.getOperand(0).getValueType() == MVT::x86mmx)
22972 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
22973 N->getValueType(0),
22974 MMXSrcOp.getOperand(0));
22977 // Only operate on vectors of 4 elements, where the alternative shuffling
22978 // gets to be more expensive.
22979 if (InputVector.getValueType() != MVT::v4i32)
22982 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
22983 // single use which is a sign-extend or zero-extend, and all elements are
22985 SmallVector<SDNode *, 4> Uses;
22986 unsigned ExtractedElements = 0;
22987 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
22988 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
22989 if (UI.getUse().getResNo() != InputVector.getResNo())
22992 SDNode *Extract = *UI;
22993 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
22996 if (Extract->getValueType(0) != MVT::i32)
22998 if (!Extract->hasOneUse())
23000 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
23001 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
23003 if (!isa<ConstantSDNode>(Extract->getOperand(1)))
23006 // Record which element was extracted.
23007 ExtractedElements |=
23008 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue();
23010 Uses.push_back(Extract);
23013 // If not all the elements were used, this may not be worthwhile.
23014 if (ExtractedElements != 15)
23017 // Ok, we've now decided to do the transformation.
23018 // If 64-bit shifts are legal, use the extract-shift sequence,
23019 // otherwise bounce the vector off the cache.
23020 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23022 SDLoc dl(InputVector);
23024 if (TLI.isOperationLegal(ISD::SRA, MVT::i64)) {
23025 SDValue Cst = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, InputVector);
23026 EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy();
23027 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23028 DAG.getConstant(0, VecIdxTy));
23029 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23030 DAG.getConstant(1, VecIdxTy));
23032 SDValue ShAmt = DAG.getConstant(32,
23033 DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64));
23034 Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
23035 Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23036 DAG.getNode(ISD::SRA, dl, MVT::i64, BottomHalf, ShAmt));
23037 Vals[2] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, TopHalf);
23038 Vals[3] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23039 DAG.getNode(ISD::SRA, dl, MVT::i64, TopHalf, ShAmt));
23041 // Store the value to a temporary stack slot.
23042 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
23043 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
23044 MachinePointerInfo(), false, false, 0);
23046 EVT ElementType = InputVector.getValueType().getVectorElementType();
23047 unsigned EltSize = ElementType.getSizeInBits() / 8;
23049 // Replace each use (extract) with a load of the appropriate element.
23050 for (unsigned i = 0; i < 4; ++i) {
23051 uint64_t Offset = EltSize * i;
23052 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
23054 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
23055 StackPtr, OffsetVal);
23057 // Load the scalar.
23058 Vals[i] = DAG.getLoad(ElementType, dl, Ch,
23059 ScalarAddr, MachinePointerInfo(),
23060 false, false, false, 0);
23065 // Replace the extracts
23066 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
23067 UE = Uses.end(); UI != UE; ++UI) {
23068 SDNode *Extract = *UI;
23070 SDValue Idx = Extract->getOperand(1);
23071 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
23072 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), Vals[IdxVal]);
23075 // The replacement was made in place; don't return anything.
23079 /// \brief Matches a VSELECT onto min/max or return 0 if the node doesn't match.
23080 static std::pair<unsigned, bool>
23081 matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, SDValue RHS,
23082 SelectionDAG &DAG, const X86Subtarget *Subtarget) {
23083 if (!VT.isVector())
23084 return std::make_pair(0, false);
23086 bool NeedSplit = false;
23087 switch (VT.getSimpleVT().SimpleTy) {
23088 default: return std::make_pair(0, false);
23091 if (!Subtarget->hasVLX())
23092 return std::make_pair(0, false);
23096 if (!Subtarget->hasBWI())
23097 return std::make_pair(0, false);
23101 if (!Subtarget->hasAVX512())
23102 return std::make_pair(0, false);
23107 if (!Subtarget->hasAVX2())
23109 if (!Subtarget->hasAVX())
23110 return std::make_pair(0, false);
23115 if (!Subtarget->hasSSE2())
23116 return std::make_pair(0, false);
23119 // SSE2 has only a small subset of the operations.
23120 bool hasUnsigned = Subtarget->hasSSE41() ||
23121 (Subtarget->hasSSE2() && VT == MVT::v16i8);
23122 bool hasSigned = Subtarget->hasSSE41() ||
23123 (Subtarget->hasSSE2() && VT == MVT::v8i16);
23125 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23128 // Check for x CC y ? x : y.
23129 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23130 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23135 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23138 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23141 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23144 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23146 // Check for x CC y ? y : x -- a min/max with reversed arms.
23147 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23148 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23153 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23156 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23159 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23162 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23166 return std::make_pair(Opc, NeedSplit);
23170 transformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
23171 const X86Subtarget *Subtarget) {
23173 SDValue Cond = N->getOperand(0);
23174 SDValue LHS = N->getOperand(1);
23175 SDValue RHS = N->getOperand(2);
23177 if (Cond.getOpcode() == ISD::SIGN_EXTEND) {
23178 SDValue CondSrc = Cond->getOperand(0);
23179 if (CondSrc->getOpcode() == ISD::SIGN_EXTEND_INREG)
23180 Cond = CondSrc->getOperand(0);
23183 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
23186 // A vselect where all conditions and data are constants can be optimized into
23187 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
23188 if (ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
23189 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
23192 unsigned MaskValue = 0;
23193 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
23196 MVT VT = N->getSimpleValueType(0);
23197 unsigned NumElems = VT.getVectorNumElements();
23198 SmallVector<int, 8> ShuffleMask(NumElems, -1);
23199 for (unsigned i = 0; i < NumElems; ++i) {
23200 // Be sure we emit undef where we can.
23201 if (Cond.getOperand(i)->getOpcode() == ISD::UNDEF)
23202 ShuffleMask[i] = -1;
23204 ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1);
23207 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23208 if (!TLI.isShuffleMaskLegal(ShuffleMask, VT))
23210 return DAG.getVectorShuffle(VT, dl, LHS, RHS, &ShuffleMask[0]);
23213 /// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
23215 static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
23216 TargetLowering::DAGCombinerInfo &DCI,
23217 const X86Subtarget *Subtarget) {
23219 SDValue Cond = N->getOperand(0);
23220 // Get the LHS/RHS of the select.
23221 SDValue LHS = N->getOperand(1);
23222 SDValue RHS = N->getOperand(2);
23223 EVT VT = LHS.getValueType();
23224 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23226 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
23227 // instructions match the semantics of the common C idiom x<y?x:y but not
23228 // x<=y?x:y, because of how they handle negative zero (which can be
23229 // ignored in unsafe-math mode).
23230 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
23231 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
23232 VT != MVT::f80 && (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
23233 (Subtarget->hasSSE2() ||
23234 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
23235 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23237 unsigned Opcode = 0;
23238 // Check for x CC y ? x : y.
23239 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23240 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23244 // Converting this to a min would handle NaNs incorrectly, and swapping
23245 // the operands would cause it to handle comparisons between positive
23246 // and negative zero incorrectly.
23247 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23248 if (!DAG.getTarget().Options.UnsafeFPMath &&
23249 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23251 std::swap(LHS, RHS);
23253 Opcode = X86ISD::FMIN;
23256 // Converting this to a min would handle comparisons between positive
23257 // and negative zero incorrectly.
23258 if (!DAG.getTarget().Options.UnsafeFPMath &&
23259 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23261 Opcode = X86ISD::FMIN;
23264 // Converting this to a min would handle both negative zeros and NaNs
23265 // incorrectly, but we can swap the operands to fix both.
23266 std::swap(LHS, RHS);
23270 Opcode = X86ISD::FMIN;
23274 // Converting this to a max would handle comparisons between positive
23275 // and negative zero incorrectly.
23276 if (!DAG.getTarget().Options.UnsafeFPMath &&
23277 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23279 Opcode = X86ISD::FMAX;
23282 // Converting this to a max would handle NaNs incorrectly, and swapping
23283 // the operands would cause it to handle comparisons between positive
23284 // and negative zero incorrectly.
23285 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23286 if (!DAG.getTarget().Options.UnsafeFPMath &&
23287 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23289 std::swap(LHS, RHS);
23291 Opcode = X86ISD::FMAX;
23294 // Converting this to a max would handle both negative zeros and NaNs
23295 // incorrectly, but we can swap the operands to fix both.
23296 std::swap(LHS, RHS);
23300 Opcode = X86ISD::FMAX;
23303 // Check for x CC y ? y : x -- a min/max with reversed arms.
23304 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23305 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23309 // Converting this to a min would handle comparisons between positive
23310 // and negative zero incorrectly, and swapping the operands would
23311 // cause it to handle NaNs incorrectly.
23312 if (!DAG.getTarget().Options.UnsafeFPMath &&
23313 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
23314 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23316 std::swap(LHS, RHS);
23318 Opcode = X86ISD::FMIN;
23321 // Converting this to a min would handle NaNs incorrectly.
23322 if (!DAG.getTarget().Options.UnsafeFPMath &&
23323 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
23325 Opcode = X86ISD::FMIN;
23328 // Converting this to a min would handle both negative zeros and NaNs
23329 // incorrectly, but we can swap the operands to fix both.
23330 std::swap(LHS, RHS);
23334 Opcode = X86ISD::FMIN;
23338 // Converting this to a max would handle NaNs incorrectly.
23339 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23341 Opcode = X86ISD::FMAX;
23344 // Converting this to a max would handle comparisons between positive
23345 // and negative zero incorrectly, and swapping the operands would
23346 // cause it to handle NaNs incorrectly.
23347 if (!DAG.getTarget().Options.UnsafeFPMath &&
23348 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
23349 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23351 std::swap(LHS, RHS);
23353 Opcode = X86ISD::FMAX;
23356 // Converting this to a max would handle both negative zeros and NaNs
23357 // incorrectly, but we can swap the operands to fix both.
23358 std::swap(LHS, RHS);
23362 Opcode = X86ISD::FMAX;
23368 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
23371 EVT CondVT = Cond.getValueType();
23372 if (Subtarget->hasAVX512() && VT.isVector() && CondVT.isVector() &&
23373 CondVT.getVectorElementType() == MVT::i1) {
23374 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
23375 // lowering on KNL. In this case we convert it to
23376 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
23377 // The same situation for all 128 and 256-bit vectors of i8 and i16.
23378 // Since SKX these selects have a proper lowering.
23379 EVT OpVT = LHS.getValueType();
23380 if ((OpVT.is128BitVector() || OpVT.is256BitVector()) &&
23381 (OpVT.getVectorElementType() == MVT::i8 ||
23382 OpVT.getVectorElementType() == MVT::i16) &&
23383 !(Subtarget->hasBWI() && Subtarget->hasVLX())) {
23384 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond);
23385 DCI.AddToWorklist(Cond.getNode());
23386 return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS);
23389 // If this is a select between two integer constants, try to do some
23391 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
23392 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS))
23393 // Don't do this for crazy integer types.
23394 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) {
23395 // If this is efficiently invertible, canonicalize the LHSC/RHSC values
23396 // so that TrueC (the true value) is larger than FalseC.
23397 bool NeedsCondInvert = false;
23399 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
23400 // Efficiently invertible.
23401 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
23402 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
23403 isa<ConstantSDNode>(Cond.getOperand(1))))) {
23404 NeedsCondInvert = true;
23405 std::swap(TrueC, FalseC);
23408 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
23409 if (FalseC->getAPIntValue() == 0 &&
23410 TrueC->getAPIntValue().isPowerOf2()) {
23411 if (NeedsCondInvert) // Invert the condition if needed.
23412 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23413 DAG.getConstant(1, Cond.getValueType()));
23415 // Zero extend the condition if needed.
23416 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
23418 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
23419 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
23420 DAG.getConstant(ShAmt, MVT::i8));
23423 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
23424 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
23425 if (NeedsCondInvert) // Invert the condition if needed.
23426 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23427 DAG.getConstant(1, Cond.getValueType()));
23429 // Zero extend the condition if needed.
23430 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
23431 FalseC->getValueType(0), Cond);
23432 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23433 SDValue(FalseC, 0));
23436 // Optimize cases that will turn into an LEA instruction. This requires
23437 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
23438 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
23439 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
23440 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
23442 bool isFastMultiplier = false;
23444 switch ((unsigned char)Diff) {
23446 case 1: // result = add base, cond
23447 case 2: // result = lea base( , cond*2)
23448 case 3: // result = lea base(cond, cond*2)
23449 case 4: // result = lea base( , cond*4)
23450 case 5: // result = lea base(cond, cond*4)
23451 case 8: // result = lea base( , cond*8)
23452 case 9: // result = lea base(cond, cond*8)
23453 isFastMultiplier = true;
23458 if (isFastMultiplier) {
23459 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
23460 if (NeedsCondInvert) // Invert the condition if needed.
23461 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23462 DAG.getConstant(1, Cond.getValueType()));
23464 // Zero extend the condition if needed.
23465 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
23467 // Scale the condition by the difference.
23469 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
23470 DAG.getConstant(Diff, Cond.getValueType()));
23472 // Add the base if non-zero.
23473 if (FalseC->getAPIntValue() != 0)
23474 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23475 SDValue(FalseC, 0));
23482 // Canonicalize max and min:
23483 // (x > y) ? x : y -> (x >= y) ? x : y
23484 // (x < y) ? x : y -> (x <= y) ? x : y
23485 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
23486 // the need for an extra compare
23487 // against zero. e.g.
23488 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
23490 // testl %edi, %edi
23492 // cmovgl %edi, %eax
23496 // cmovsl %eax, %edi
23497 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
23498 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23499 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23500 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23505 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
23506 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
23507 Cond.getOperand(0), Cond.getOperand(1), NewCC);
23508 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS);
23513 // Early exit check
23514 if (!TLI.isTypeLegal(VT))
23517 // Match VSELECTs into subs with unsigned saturation.
23518 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
23519 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
23520 ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
23521 (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
23522 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23524 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
23525 // left side invert the predicate to simplify logic below.
23527 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
23529 CC = ISD::getSetCCInverse(CC, true);
23530 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
23534 if (Other.getNode() && Other->getNumOperands() == 2 &&
23535 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) {
23536 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
23537 SDValue CondRHS = Cond->getOperand(1);
23539 // Look for a general sub with unsigned saturation first.
23540 // x >= y ? x-y : 0 --> subus x, y
23541 // x > y ? x-y : 0 --> subus x, y
23542 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
23543 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS))
23544 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS);
23546 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS))
23547 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
23548 if (auto *CondRHSBV = dyn_cast<BuildVectorSDNode>(CondRHS))
23549 if (auto *CondRHSConst = CondRHSBV->getConstantSplatNode())
23550 // If the RHS is a constant we have to reverse the const
23551 // canonicalization.
23552 // x > C-1 ? x+-C : 0 --> subus x, C
23553 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
23554 CondRHSConst->getAPIntValue() ==
23555 (-OpRHSConst->getAPIntValue() - 1))
23556 return DAG.getNode(
23557 X86ISD::SUBUS, DL, VT, OpLHS,
23558 DAG.getConstant(-OpRHSConst->getAPIntValue(), VT));
23560 // Another special case: If C was a sign bit, the sub has been
23561 // canonicalized into a xor.
23562 // FIXME: Would it be better to use computeKnownBits to determine
23563 // whether it's safe to decanonicalize the xor?
23564 // x s< 0 ? x^C : 0 --> subus x, C
23565 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
23566 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
23567 OpRHSConst->getAPIntValue().isSignBit())
23568 // Note that we have to rebuild the RHS constant here to ensure we
23569 // don't rely on particular values of undef lanes.
23570 return DAG.getNode(
23571 X86ISD::SUBUS, DL, VT, OpLHS,
23572 DAG.getConstant(OpRHSConst->getAPIntValue(), VT));
23577 // Try to match a min/max vector operation.
23578 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC) {
23579 std::pair<unsigned, bool> ret = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget);
23580 unsigned Opc = ret.first;
23581 bool NeedSplit = ret.second;
23583 if (Opc && NeedSplit) {
23584 unsigned NumElems = VT.getVectorNumElements();
23585 // Extract the LHS vectors
23586 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, DL);
23587 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, DL);
23589 // Extract the RHS vectors
23590 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, DL);
23591 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, DL);
23593 // Create min/max for each subvector
23594 LHS = DAG.getNode(Opc, DL, LHS1.getValueType(), LHS1, RHS1);
23595 RHS = DAG.getNode(Opc, DL, LHS2.getValueType(), LHS2, RHS2);
23597 // Merge the result
23598 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS, RHS);
23600 return DAG.getNode(Opc, DL, VT, LHS, RHS);
23603 // Simplify vector selection if condition value type matches vselect
23605 if (N->getOpcode() == ISD::VSELECT && CondVT == VT) {
23606 assert(Cond.getValueType().isVector() &&
23607 "vector select expects a vector selector!");
23609 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
23610 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
23612 // Try invert the condition if true value is not all 1s and false value
23614 if (!TValIsAllOnes && !FValIsAllZeros &&
23615 // Check if the selector will be produced by CMPP*/PCMP*
23616 Cond.getOpcode() == ISD::SETCC &&
23617 // Check if SETCC has already been promoted
23618 TLI.getSetCCResultType(*DAG.getContext(), VT) == CondVT) {
23619 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
23620 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
23622 if (TValIsAllZeros || FValIsAllOnes) {
23623 SDValue CC = Cond.getOperand(2);
23624 ISD::CondCode NewCC =
23625 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
23626 Cond.getOperand(0).getValueType().isInteger());
23627 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), NewCC);
23628 std::swap(LHS, RHS);
23629 TValIsAllOnes = FValIsAllOnes;
23630 FValIsAllZeros = TValIsAllZeros;
23634 if (TValIsAllOnes || FValIsAllZeros) {
23637 if (TValIsAllOnes && FValIsAllZeros)
23639 else if (TValIsAllOnes)
23640 Ret = DAG.getNode(ISD::OR, DL, CondVT, Cond,
23641 DAG.getNode(ISD::BITCAST, DL, CondVT, RHS));
23642 else if (FValIsAllZeros)
23643 Ret = DAG.getNode(ISD::AND, DL, CondVT, Cond,
23644 DAG.getNode(ISD::BITCAST, DL, CondVT, LHS));
23646 return DAG.getNode(ISD::BITCAST, DL, VT, Ret);
23650 // If we know that this node is legal then we know that it is going to be
23651 // matched by one of the SSE/AVX BLEND instructions. These instructions only
23652 // depend on the highest bit in each word. Try to use SimplifyDemandedBits
23653 // to simplify previous instructions.
23654 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
23655 !DCI.isBeforeLegalize() &&
23656 // We explicitly check against v8i16 and v16i16 because, although
23657 // they're marked as Custom, they might only be legal when Cond is a
23658 // build_vector of constants. This will be taken care in a later
23660 (TLI.isOperationLegalOrCustom(ISD::VSELECT, VT) && VT != MVT::v16i16 &&
23661 VT != MVT::v8i16) &&
23662 // Don't optimize vector of constants. Those are handled by
23663 // the generic code and all the bits must be properly set for
23664 // the generic optimizer.
23665 !ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
23666 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits();
23668 // Don't optimize vector selects that map to mask-registers.
23672 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
23673 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
23675 APInt KnownZero, KnownOne;
23676 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
23677 DCI.isBeforeLegalizeOps());
23678 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) ||
23679 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne,
23681 // If we changed the computation somewhere in the DAG, this change
23682 // will affect all users of Cond.
23683 // Make sure it is fine and update all the nodes so that we do not
23684 // use the generic VSELECT anymore. Otherwise, we may perform
23685 // wrong optimizations as we messed up with the actual expectation
23686 // for the vector boolean values.
23687 if (Cond != TLO.Old) {
23688 // Check all uses of that condition operand to check whether it will be
23689 // consumed by non-BLEND instructions, which may depend on all bits are
23691 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23693 if (I->getOpcode() != ISD::VSELECT)
23694 // TODO: Add other opcodes eventually lowered into BLEND.
23697 // Update all the users of the condition, before committing the change,
23698 // so that the VSELECT optimizations that expect the correct vector
23699 // boolean value will not be triggered.
23700 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23702 DAG.ReplaceAllUsesOfValueWith(
23704 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(*I), I->getValueType(0),
23705 Cond, I->getOperand(1), I->getOperand(2)));
23706 DCI.CommitTargetLoweringOpt(TLO);
23709 // At this point, only Cond is changed. Change the condition
23710 // just for N to keep the opportunity to optimize all other
23711 // users their own way.
23712 DAG.ReplaceAllUsesOfValueWith(
23714 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(N), N->getValueType(0),
23715 TLO.New, N->getOperand(1), N->getOperand(2)));
23720 // We should generate an X86ISD::BLENDI from a vselect if its argument
23721 // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
23722 // constants. This specific pattern gets generated when we split a
23723 // selector for a 512 bit vector in a machine without AVX512 (but with
23724 // 256-bit vectors), during legalization:
23726 // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
23728 // Iff we find this pattern and the build_vectors are built from
23729 // constants, we translate the vselect into a shuffle_vector that we
23730 // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
23731 if ((N->getOpcode() == ISD::VSELECT ||
23732 N->getOpcode() == X86ISD::SHRUNKBLEND) &&
23733 !DCI.isBeforeLegalize()) {
23734 SDValue Shuffle = transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
23735 if (Shuffle.getNode())
23742 // Check whether a boolean test is testing a boolean value generated by
23743 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
23746 // Simplify the following patterns:
23747 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
23748 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
23749 // to (Op EFLAGS Cond)
23751 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
23752 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
23753 // to (Op EFLAGS !Cond)
23755 // where Op could be BRCOND or CMOV.
23757 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
23758 // Quit if not CMP and SUB with its value result used.
23759 if (Cmp.getOpcode() != X86ISD::CMP &&
23760 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0)))
23763 // Quit if not used as a boolean value.
23764 if (CC != X86::COND_E && CC != X86::COND_NE)
23767 // Check CMP operands. One of them should be 0 or 1 and the other should be
23768 // an SetCC or extended from it.
23769 SDValue Op1 = Cmp.getOperand(0);
23770 SDValue Op2 = Cmp.getOperand(1);
23773 const ConstantSDNode* C = nullptr;
23774 bool needOppositeCond = (CC == X86::COND_E);
23775 bool checkAgainstTrue = false; // Is it a comparison against 1?
23777 if ((C = dyn_cast<ConstantSDNode>(Op1)))
23779 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
23781 else // Quit if all operands are not constants.
23784 if (C->getZExtValue() == 1) {
23785 needOppositeCond = !needOppositeCond;
23786 checkAgainstTrue = true;
23787 } else if (C->getZExtValue() != 0)
23788 // Quit if the constant is neither 0 or 1.
23791 bool truncatedToBoolWithAnd = false;
23792 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
23793 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
23794 SetCC.getOpcode() == ISD::TRUNCATE ||
23795 SetCC.getOpcode() == ISD::AND) {
23796 if (SetCC.getOpcode() == ISD::AND) {
23798 ConstantSDNode *CS;
23799 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(0))) &&
23800 CS->getZExtValue() == 1)
23802 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(1))) &&
23803 CS->getZExtValue() == 1)
23807 SetCC = SetCC.getOperand(OpIdx);
23808 truncatedToBoolWithAnd = true;
23810 SetCC = SetCC.getOperand(0);
23813 switch (SetCC.getOpcode()) {
23814 case X86ISD::SETCC_CARRY:
23815 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
23816 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
23817 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
23818 // truncated to i1 using 'and'.
23819 if (checkAgainstTrue && !truncatedToBoolWithAnd)
23821 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
23822 "Invalid use of SETCC_CARRY!");
23824 case X86ISD::SETCC:
23825 // Set the condition code or opposite one if necessary.
23826 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
23827 if (needOppositeCond)
23828 CC = X86::GetOppositeBranchCondition(CC);
23829 return SetCC.getOperand(1);
23830 case X86ISD::CMOV: {
23831 // Check whether false/true value has canonical one, i.e. 0 or 1.
23832 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
23833 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
23834 // Quit if true value is not a constant.
23837 // Quit if false value is not a constant.
23839 SDValue Op = SetCC.getOperand(0);
23840 // Skip 'zext' or 'trunc' node.
23841 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
23842 Op.getOpcode() == ISD::TRUNCATE)
23843 Op = Op.getOperand(0);
23844 // A special case for rdrand/rdseed, where 0 is set if false cond is
23846 if ((Op.getOpcode() != X86ISD::RDRAND &&
23847 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
23850 // Quit if false value is not the constant 0 or 1.
23851 bool FValIsFalse = true;
23852 if (FVal && FVal->getZExtValue() != 0) {
23853 if (FVal->getZExtValue() != 1)
23855 // If FVal is 1, opposite cond is needed.
23856 needOppositeCond = !needOppositeCond;
23857 FValIsFalse = false;
23859 // Quit if TVal is not the constant opposite of FVal.
23860 if (FValIsFalse && TVal->getZExtValue() != 1)
23862 if (!FValIsFalse && TVal->getZExtValue() != 0)
23864 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
23865 if (needOppositeCond)
23866 CC = X86::GetOppositeBranchCondition(CC);
23867 return SetCC.getOperand(3);
23874 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
23875 static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
23876 TargetLowering::DAGCombinerInfo &DCI,
23877 const X86Subtarget *Subtarget) {
23880 // If the flag operand isn't dead, don't touch this CMOV.
23881 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
23884 SDValue FalseOp = N->getOperand(0);
23885 SDValue TrueOp = N->getOperand(1);
23886 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
23887 SDValue Cond = N->getOperand(3);
23889 if (CC == X86::COND_E || CC == X86::COND_NE) {
23890 switch (Cond.getOpcode()) {
23894 // If operand of BSR / BSF are proven never zero, then ZF cannot be set.
23895 if (DAG.isKnownNeverZero(Cond.getOperand(0)))
23896 return (CC == X86::COND_E) ? FalseOp : TrueOp;
23902 Flags = checkBoolTestSetCCCombine(Cond, CC);
23903 if (Flags.getNode() &&
23904 // Extra check as FCMOV only supports a subset of X86 cond.
23905 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
23906 SDValue Ops[] = { FalseOp, TrueOp,
23907 DAG.getConstant(CC, MVT::i8), Flags };
23908 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
23911 // If this is a select between two integer constants, try to do some
23912 // optimizations. Note that the operands are ordered the opposite of SELECT
23914 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
23915 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
23916 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
23917 // larger than FalseC (the false value).
23918 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
23919 CC = X86::GetOppositeBranchCondition(CC);
23920 std::swap(TrueC, FalseC);
23921 std::swap(TrueOp, FalseOp);
23924 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
23925 // This is efficient for any integer data type (including i8/i16) and
23927 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
23928 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
23929 DAG.getConstant(CC, MVT::i8), Cond);
23931 // Zero extend the condition if needed.
23932 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
23934 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
23935 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
23936 DAG.getConstant(ShAmt, MVT::i8));
23937 if (N->getNumValues() == 2) // Dead flag value?
23938 return DCI.CombineTo(N, Cond, SDValue());
23942 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
23943 // for any integer data type, including i8/i16.
23944 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
23945 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
23946 DAG.getConstant(CC, MVT::i8), Cond);
23948 // Zero extend the condition if needed.
23949 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
23950 FalseC->getValueType(0), Cond);
23951 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23952 SDValue(FalseC, 0));
23954 if (N->getNumValues() == 2) // Dead flag value?
23955 return DCI.CombineTo(N, Cond, SDValue());
23959 // Optimize cases that will turn into an LEA instruction. This requires
23960 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
23961 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
23962 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
23963 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
23965 bool isFastMultiplier = false;
23967 switch ((unsigned char)Diff) {
23969 case 1: // result = add base, cond
23970 case 2: // result = lea base( , cond*2)
23971 case 3: // result = lea base(cond, cond*2)
23972 case 4: // result = lea base( , cond*4)
23973 case 5: // result = lea base(cond, cond*4)
23974 case 8: // result = lea base( , cond*8)
23975 case 9: // result = lea base(cond, cond*8)
23976 isFastMultiplier = true;
23981 if (isFastMultiplier) {
23982 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
23983 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
23984 DAG.getConstant(CC, MVT::i8), Cond);
23985 // Zero extend the condition if needed.
23986 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
23988 // Scale the condition by the difference.
23990 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
23991 DAG.getConstant(Diff, Cond.getValueType()));
23993 // Add the base if non-zero.
23994 if (FalseC->getAPIntValue() != 0)
23995 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23996 SDValue(FalseC, 0));
23997 if (N->getNumValues() == 2) // Dead flag value?
23998 return DCI.CombineTo(N, Cond, SDValue());
24005 // Handle these cases:
24006 // (select (x != c), e, c) -> select (x != c), e, x),
24007 // (select (x == c), c, e) -> select (x == c), x, e)
24008 // where the c is an integer constant, and the "select" is the combination
24009 // of CMOV and CMP.
24011 // The rationale for this change is that the conditional-move from a constant
24012 // needs two instructions, however, conditional-move from a register needs
24013 // only one instruction.
24015 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
24016 // some instruction-combining opportunities. This opt needs to be
24017 // postponed as late as possible.
24019 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
24020 // the DCI.xxxx conditions are provided to postpone the optimization as
24021 // late as possible.
24023 ConstantSDNode *CmpAgainst = nullptr;
24024 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
24025 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
24026 !isa<ConstantSDNode>(Cond.getOperand(0))) {
24028 if (CC == X86::COND_NE &&
24029 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
24030 CC = X86::GetOppositeBranchCondition(CC);
24031 std::swap(TrueOp, FalseOp);
24034 if (CC == X86::COND_E &&
24035 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
24036 SDValue Ops[] = { FalseOp, Cond.getOperand(0),
24037 DAG.getConstant(CC, MVT::i8), Cond };
24038 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops);
24046 static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
24047 const X86Subtarget *Subtarget) {
24048 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
24050 default: return SDValue();
24051 // SSE/AVX/AVX2 blend intrinsics.
24052 case Intrinsic::x86_avx2_pblendvb:
24053 case Intrinsic::x86_avx2_pblendw:
24054 case Intrinsic::x86_avx2_pblendd_128:
24055 case Intrinsic::x86_avx2_pblendd_256:
24056 // Don't try to simplify this intrinsic if we don't have AVX2.
24057 if (!Subtarget->hasAVX2())
24060 case Intrinsic::x86_avx_blend_pd_256:
24061 case Intrinsic::x86_avx_blend_ps_256:
24062 case Intrinsic::x86_avx_blendv_pd_256:
24063 case Intrinsic::x86_avx_blendv_ps_256:
24064 // Don't try to simplify this intrinsic if we don't have AVX.
24065 if (!Subtarget->hasAVX())
24068 case Intrinsic::x86_sse41_pblendw:
24069 case Intrinsic::x86_sse41_blendpd:
24070 case Intrinsic::x86_sse41_blendps:
24071 case Intrinsic::x86_sse41_blendvps:
24072 case Intrinsic::x86_sse41_blendvpd:
24073 case Intrinsic::x86_sse41_pblendvb: {
24074 SDValue Op0 = N->getOperand(1);
24075 SDValue Op1 = N->getOperand(2);
24076 SDValue Mask = N->getOperand(3);
24078 // Don't try to simplify this intrinsic if we don't have SSE4.1.
24079 if (!Subtarget->hasSSE41())
24082 // fold (blend A, A, Mask) -> A
24085 // fold (blend A, B, allZeros) -> A
24086 if (ISD::isBuildVectorAllZeros(Mask.getNode()))
24088 // fold (blend A, B, allOnes) -> B
24089 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
24092 // Simplify the case where the mask is a constant i32 value.
24093 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Mask)) {
24094 if (C->isNullValue())
24096 if (C->isAllOnesValue())
24103 // Packed SSE2/AVX2 arithmetic shift immediate intrinsics.
24104 case Intrinsic::x86_sse2_psrai_w:
24105 case Intrinsic::x86_sse2_psrai_d:
24106 case Intrinsic::x86_avx2_psrai_w:
24107 case Intrinsic::x86_avx2_psrai_d:
24108 case Intrinsic::x86_sse2_psra_w:
24109 case Intrinsic::x86_sse2_psra_d:
24110 case Intrinsic::x86_avx2_psra_w:
24111 case Intrinsic::x86_avx2_psra_d: {
24112 SDValue Op0 = N->getOperand(1);
24113 SDValue Op1 = N->getOperand(2);
24114 EVT VT = Op0.getValueType();
24115 assert(VT.isVector() && "Expected a vector type!");
24117 if (isa<BuildVectorSDNode>(Op1))
24118 Op1 = Op1.getOperand(0);
24120 if (!isa<ConstantSDNode>(Op1))
24123 EVT SVT = VT.getVectorElementType();
24124 unsigned SVTBits = SVT.getSizeInBits();
24126 ConstantSDNode *CND = cast<ConstantSDNode>(Op1);
24127 const APInt &C = APInt(SVTBits, CND->getAPIntValue().getZExtValue());
24128 uint64_t ShAmt = C.getZExtValue();
24130 // Don't try to convert this shift into a ISD::SRA if the shift
24131 // count is bigger than or equal to the element size.
24132 if (ShAmt >= SVTBits)
24135 // Trivial case: if the shift count is zero, then fold this
24136 // into the first operand.
24140 // Replace this packed shift intrinsic with a target independent
24142 SDValue Splat = DAG.getConstant(C, VT);
24143 return DAG.getNode(ISD::SRA, SDLoc(N), VT, Op0, Splat);
24148 /// PerformMulCombine - Optimize a single multiply with constant into two
24149 /// in order to implement it with two cheaper instructions, e.g.
24150 /// LEA + SHL, LEA + LEA.
24151 static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
24152 TargetLowering::DAGCombinerInfo &DCI) {
24153 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
24156 EVT VT = N->getValueType(0);
24157 if (VT != MVT::i64 && VT != MVT::i32)
24160 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
24163 uint64_t MulAmt = C->getZExtValue();
24164 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
24167 uint64_t MulAmt1 = 0;
24168 uint64_t MulAmt2 = 0;
24169 if ((MulAmt % 9) == 0) {
24171 MulAmt2 = MulAmt / 9;
24172 } else if ((MulAmt % 5) == 0) {
24174 MulAmt2 = MulAmt / 5;
24175 } else if ((MulAmt % 3) == 0) {
24177 MulAmt2 = MulAmt / 3;
24180 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
24183 if (isPowerOf2_64(MulAmt2) &&
24184 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
24185 // If second multiplifer is pow2, issue it first. We want the multiply by
24186 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
24188 std::swap(MulAmt1, MulAmt2);
24191 if (isPowerOf2_64(MulAmt1))
24192 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
24193 DAG.getConstant(Log2_64(MulAmt1), MVT::i8));
24195 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
24196 DAG.getConstant(MulAmt1, VT));
24198 if (isPowerOf2_64(MulAmt2))
24199 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
24200 DAG.getConstant(Log2_64(MulAmt2), MVT::i8));
24202 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
24203 DAG.getConstant(MulAmt2, VT));
24205 // Do not add new nodes to DAG combiner worklist.
24206 DCI.CombineTo(N, NewMul, false);
24211 static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
24212 SDValue N0 = N->getOperand(0);
24213 SDValue N1 = N->getOperand(1);
24214 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
24215 EVT VT = N0.getValueType();
24217 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
24218 // since the result of setcc_c is all zero's or all ones.
24219 if (VT.isInteger() && !VT.isVector() &&
24220 N1C && N0.getOpcode() == ISD::AND &&
24221 N0.getOperand(1).getOpcode() == ISD::Constant) {
24222 SDValue N00 = N0.getOperand(0);
24223 if (N00.getOpcode() == X86ISD::SETCC_CARRY ||
24224 ((N00.getOpcode() == ISD::ANY_EXTEND ||
24225 N00.getOpcode() == ISD::ZERO_EXTEND) &&
24226 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) {
24227 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
24228 APInt ShAmt = N1C->getAPIntValue();
24229 Mask = Mask.shl(ShAmt);
24231 return DAG.getNode(ISD::AND, SDLoc(N), VT,
24232 N00, DAG.getConstant(Mask, VT));
24236 // Hardware support for vector shifts is sparse which makes us scalarize the
24237 // vector operations in many cases. Also, on sandybridge ADD is faster than
24239 // (shl V, 1) -> add V,V
24240 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
24241 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
24242 assert(N0.getValueType().isVector() && "Invalid vector shift type");
24243 // We shift all of the values by one. In many cases we do not have
24244 // hardware support for this operation. This is better expressed as an ADD
24246 if (N1SplatC->getZExtValue() == 1)
24247 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
24253 /// \brief Returns a vector of 0s if the node in input is a vector logical
24254 /// shift by a constant amount which is known to be bigger than or equal
24255 /// to the vector element size in bits.
24256 static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
24257 const X86Subtarget *Subtarget) {
24258 EVT VT = N->getValueType(0);
24260 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
24261 (!Subtarget->hasInt256() ||
24262 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
24265 SDValue Amt = N->getOperand(1);
24267 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
24268 if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
24269 APInt ShiftAmt = AmtSplat->getAPIntValue();
24270 unsigned MaxAmount = VT.getVectorElementType().getSizeInBits();
24272 // SSE2/AVX2 logical shifts always return a vector of 0s
24273 // if the shift amount is bigger than or equal to
24274 // the element size. The constant shift amount will be
24275 // encoded as a 8-bit immediate.
24276 if (ShiftAmt.trunc(8).uge(MaxAmount))
24277 return getZeroVector(VT, Subtarget, DAG, DL);
24283 /// PerformShiftCombine - Combine shifts.
24284 static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
24285 TargetLowering::DAGCombinerInfo &DCI,
24286 const X86Subtarget *Subtarget) {
24287 if (N->getOpcode() == ISD::SHL) {
24288 SDValue V = PerformSHLCombine(N, DAG);
24289 if (V.getNode()) return V;
24292 if (N->getOpcode() != ISD::SRA) {
24293 // Try to fold this logical shift into a zero vector.
24294 SDValue V = performShiftToAllZeros(N, DAG, Subtarget);
24295 if (V.getNode()) return V;
24301 // CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..))
24302 // where both setccs reference the same FP CMP, and rewrite for CMPEQSS
24303 // and friends. Likewise for OR -> CMPNEQSS.
24304 static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
24305 TargetLowering::DAGCombinerInfo &DCI,
24306 const X86Subtarget *Subtarget) {
24309 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
24310 // we're requiring SSE2 for both.
24311 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
24312 SDValue N0 = N->getOperand(0);
24313 SDValue N1 = N->getOperand(1);
24314 SDValue CMP0 = N0->getOperand(1);
24315 SDValue CMP1 = N1->getOperand(1);
24318 // The SETCCs should both refer to the same CMP.
24319 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
24322 SDValue CMP00 = CMP0->getOperand(0);
24323 SDValue CMP01 = CMP0->getOperand(1);
24324 EVT VT = CMP00.getValueType();
24326 if (VT == MVT::f32 || VT == MVT::f64) {
24327 bool ExpectingFlags = false;
24328 // Check for any users that want flags:
24329 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
24330 !ExpectingFlags && UI != UE; ++UI)
24331 switch (UI->getOpcode()) {
24336 ExpectingFlags = true;
24338 case ISD::CopyToReg:
24339 case ISD::SIGN_EXTEND:
24340 case ISD::ZERO_EXTEND:
24341 case ISD::ANY_EXTEND:
24345 if (!ExpectingFlags) {
24346 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
24347 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
24349 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
24350 X86::CondCode tmp = cc0;
24355 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
24356 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
24357 // FIXME: need symbolic constants for these magic numbers.
24358 // See X86ATTInstPrinter.cpp:printSSECC().
24359 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
24360 if (Subtarget->hasAVX512()) {
24361 SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
24362 CMP01, DAG.getConstant(x86cc, MVT::i8));
24363 if (N->getValueType(0) != MVT::i1)
24364 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0),
24368 SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
24369 CMP00.getValueType(), CMP00, CMP01,
24370 DAG.getConstant(x86cc, MVT::i8));
24372 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
24373 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
24375 if (is64BitFP && !Subtarget->is64Bit()) {
24376 // On a 32-bit target, we cannot bitcast the 64-bit float to a
24377 // 64-bit integer, since that's not a legal type. Since
24378 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
24379 // bits, but can do this little dance to extract the lowest 32 bits
24380 // and work with those going forward.
24381 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
24383 SDValue Vector32 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32,
24385 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
24386 Vector32, DAG.getIntPtrConstant(0));
24390 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, IntVT, OnesOrZeroesF);
24391 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
24392 DAG.getConstant(1, IntVT));
24393 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed);
24394 return OneBitOfTruth;
24402 /// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector
24403 /// so it can be folded inside ANDNP.
24404 static bool CanFoldXORWithAllOnes(const SDNode *N) {
24405 EVT VT = N->getValueType(0);
24407 // Match direct AllOnes for 128 and 256-bit vectors
24408 if (ISD::isBuildVectorAllOnes(N))
24411 // Look through a bit convert.
24412 if (N->getOpcode() == ISD::BITCAST)
24413 N = N->getOperand(0).getNode();
24415 // Sometimes the operand may come from a insert_subvector building a 256-bit
24417 if (VT.is256BitVector() &&
24418 N->getOpcode() == ISD::INSERT_SUBVECTOR) {
24419 SDValue V1 = N->getOperand(0);
24420 SDValue V2 = N->getOperand(1);
24422 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR &&
24423 V1.getOperand(0).getOpcode() == ISD::UNDEF &&
24424 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) &&
24425 ISD::isBuildVectorAllOnes(V2.getNode()))
24432 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
24433 // register. In most cases we actually compare or select YMM-sized registers
24434 // and mixing the two types creates horrible code. This method optimizes
24435 // some of the transition sequences.
24436 static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
24437 TargetLowering::DAGCombinerInfo &DCI,
24438 const X86Subtarget *Subtarget) {
24439 EVT VT = N->getValueType(0);
24440 if (!VT.is256BitVector())
24443 assert((N->getOpcode() == ISD::ANY_EXTEND ||
24444 N->getOpcode() == ISD::ZERO_EXTEND ||
24445 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
24447 SDValue Narrow = N->getOperand(0);
24448 EVT NarrowVT = Narrow->getValueType(0);
24449 if (!NarrowVT.is128BitVector())
24452 if (Narrow->getOpcode() != ISD::XOR &&
24453 Narrow->getOpcode() != ISD::AND &&
24454 Narrow->getOpcode() != ISD::OR)
24457 SDValue N0 = Narrow->getOperand(0);
24458 SDValue N1 = Narrow->getOperand(1);
24461 // The Left side has to be a trunc.
24462 if (N0.getOpcode() != ISD::TRUNCATE)
24465 // The type of the truncated inputs.
24466 EVT WideVT = N0->getOperand(0)->getValueType(0);
24470 // The right side has to be a 'trunc' or a constant vector.
24471 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE;
24472 ConstantSDNode *RHSConstSplat = nullptr;
24473 if (auto *RHSBV = dyn_cast<BuildVectorSDNode>(N1))
24474 RHSConstSplat = RHSBV->getConstantSplatNode();
24475 if (!RHSTrunc && !RHSConstSplat)
24478 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24480 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT))
24483 // Set N0 and N1 to hold the inputs to the new wide operation.
24484 N0 = N0->getOperand(0);
24485 if (RHSConstSplat) {
24486 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getScalarType(),
24487 SDValue(RHSConstSplat, 0));
24488 SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1);
24489 N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C);
24490 } else if (RHSTrunc) {
24491 N1 = N1->getOperand(0);
24494 // Generate the wide operation.
24495 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1);
24496 unsigned Opcode = N->getOpcode();
24498 case ISD::ANY_EXTEND:
24500 case ISD::ZERO_EXTEND: {
24501 unsigned InBits = NarrowVT.getScalarType().getSizeInBits();
24502 APInt Mask = APInt::getAllOnesValue(InBits);
24503 Mask = Mask.zext(VT.getScalarType().getSizeInBits());
24504 return DAG.getNode(ISD::AND, DL, VT,
24505 Op, DAG.getConstant(Mask, VT));
24507 case ISD::SIGN_EXTEND:
24508 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
24509 Op, DAG.getValueType(NarrowVT));
24511 llvm_unreachable("Unexpected opcode");
24515 static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
24516 TargetLowering::DAGCombinerInfo &DCI,
24517 const X86Subtarget *Subtarget) {
24518 EVT VT = N->getValueType(0);
24519 if (DCI.isBeforeLegalizeOps())
24522 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24526 // Create BEXTR instructions
24527 // BEXTR is ((X >> imm) & (2**size-1))
24528 if (VT == MVT::i32 || VT == MVT::i64) {
24529 SDValue N0 = N->getOperand(0);
24530 SDValue N1 = N->getOperand(1);
24533 // Check for BEXTR.
24534 if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
24535 (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
24536 ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
24537 ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
24538 if (MaskNode && ShiftNode) {
24539 uint64_t Mask = MaskNode->getZExtValue();
24540 uint64_t Shift = ShiftNode->getZExtValue();
24541 if (isMask_64(Mask)) {
24542 uint64_t MaskSize = CountPopulation_64(Mask);
24543 if (Shift + MaskSize <= VT.getSizeInBits())
24544 return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
24545 DAG.getConstant(Shift | (MaskSize << 8), VT));
24553 // Want to form ANDNP nodes:
24554 // 1) In the hopes of then easily combining them with OR and AND nodes
24555 // to form PBLEND/PSIGN.
24556 // 2) To match ANDN packed intrinsics
24557 if (VT != MVT::v2i64 && VT != MVT::v4i64)
24560 SDValue N0 = N->getOperand(0);
24561 SDValue N1 = N->getOperand(1);
24564 // Check LHS for vnot
24565 if (N0.getOpcode() == ISD::XOR &&
24566 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
24567 CanFoldXORWithAllOnes(N0.getOperand(1).getNode()))
24568 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1);
24570 // Check RHS for vnot
24571 if (N1.getOpcode() == ISD::XOR &&
24572 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
24573 CanFoldXORWithAllOnes(N1.getOperand(1).getNode()))
24574 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0);
24579 static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
24580 TargetLowering::DAGCombinerInfo &DCI,
24581 const X86Subtarget *Subtarget) {
24582 if (DCI.isBeforeLegalizeOps())
24585 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24589 SDValue N0 = N->getOperand(0);
24590 SDValue N1 = N->getOperand(1);
24591 EVT VT = N->getValueType(0);
24593 // look for psign/blend
24594 if (VT == MVT::v2i64 || VT == MVT::v4i64) {
24595 if (!Subtarget->hasSSSE3() ||
24596 (VT == MVT::v4i64 && !Subtarget->hasInt256()))
24599 // Canonicalize pandn to RHS
24600 if (N0.getOpcode() == X86ISD::ANDNP)
24602 // or (and (m, y), (pandn m, x))
24603 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
24604 SDValue Mask = N1.getOperand(0);
24605 SDValue X = N1.getOperand(1);
24607 if (N0.getOperand(0) == Mask)
24608 Y = N0.getOperand(1);
24609 if (N0.getOperand(1) == Mask)
24610 Y = N0.getOperand(0);
24612 // Check to see if the mask appeared in both the AND and ANDNP and
24616 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
24617 // Look through mask bitcast.
24618 if (Mask.getOpcode() == ISD::BITCAST)
24619 Mask = Mask.getOperand(0);
24620 if (X.getOpcode() == ISD::BITCAST)
24621 X = X.getOperand(0);
24622 if (Y.getOpcode() == ISD::BITCAST)
24623 Y = Y.getOperand(0);
24625 EVT MaskVT = Mask.getValueType();
24627 // Validate that the Mask operand is a vector sra node.
24628 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
24629 // there is no psrai.b
24630 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
24631 unsigned SraAmt = ~0;
24632 if (Mask.getOpcode() == ISD::SRA) {
24633 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
24634 if (auto *AmtConst = AmtBV->getConstantSplatNode())
24635 SraAmt = AmtConst->getZExtValue();
24636 } else if (Mask.getOpcode() == X86ISD::VSRAI) {
24637 SDValue SraC = Mask.getOperand(1);
24638 SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
24640 if ((SraAmt + 1) != EltBits)
24645 // Now we know we at least have a plendvb with the mask val. See if
24646 // we can form a psignb/w/d.
24647 // psign = x.type == y.type == mask.type && y = sub(0, x);
24648 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
24649 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
24650 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
24651 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
24652 "Unsupported VT for PSIGN");
24653 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0));
24654 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24656 // PBLENDVB only available on SSE 4.1
24657 if (!Subtarget->hasSSE41())
24660 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
24662 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X);
24663 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y);
24664 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask);
24665 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
24666 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24670 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
24673 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
24674 MachineFunction &MF = DAG.getMachineFunction();
24675 bool OptForSize = MF.getFunction()->getAttributes().
24676 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
24678 // SHLD/SHRD instructions have lower register pressure, but on some
24679 // platforms they have higher latency than the equivalent
24680 // series of shifts/or that would otherwise be generated.
24681 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
24682 // have higher latencies and we are not optimizing for size.
24683 if (!OptForSize && Subtarget->isSHLDSlow())
24686 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
24688 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
24690 if (!N0.hasOneUse() || !N1.hasOneUse())
24693 SDValue ShAmt0 = N0.getOperand(1);
24694 if (ShAmt0.getValueType() != MVT::i8)
24696 SDValue ShAmt1 = N1.getOperand(1);
24697 if (ShAmt1.getValueType() != MVT::i8)
24699 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
24700 ShAmt0 = ShAmt0.getOperand(0);
24701 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
24702 ShAmt1 = ShAmt1.getOperand(0);
24705 unsigned Opc = X86ISD::SHLD;
24706 SDValue Op0 = N0.getOperand(0);
24707 SDValue Op1 = N1.getOperand(0);
24708 if (ShAmt0.getOpcode() == ISD::SUB) {
24709 Opc = X86ISD::SHRD;
24710 std::swap(Op0, Op1);
24711 std::swap(ShAmt0, ShAmt1);
24714 unsigned Bits = VT.getSizeInBits();
24715 if (ShAmt1.getOpcode() == ISD::SUB) {
24716 SDValue Sum = ShAmt1.getOperand(0);
24717 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
24718 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
24719 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
24720 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
24721 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
24722 return DAG.getNode(Opc, DL, VT,
24724 DAG.getNode(ISD::TRUNCATE, DL,
24727 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
24728 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
24730 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits)
24731 return DAG.getNode(Opc, DL, VT,
24732 N0.getOperand(0), N1.getOperand(0),
24733 DAG.getNode(ISD::TRUNCATE, DL,
24740 // Generate NEG and CMOV for integer abs.
24741 static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
24742 EVT VT = N->getValueType(0);
24744 // Since X86 does not have CMOV for 8-bit integer, we don't convert
24745 // 8-bit integer abs to NEG and CMOV.
24746 if (VT.isInteger() && VT.getSizeInBits() == 8)
24749 SDValue N0 = N->getOperand(0);
24750 SDValue N1 = N->getOperand(1);
24753 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
24754 // and change it to SUB and CMOV.
24755 if (VT.isInteger() && N->getOpcode() == ISD::XOR &&
24756 N0.getOpcode() == ISD::ADD &&
24757 N0.getOperand(1) == N1 &&
24758 N1.getOpcode() == ISD::SRA &&
24759 N1.getOperand(0) == N0.getOperand(0))
24760 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1)))
24761 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) {
24762 // Generate SUB & CMOV.
24763 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
24764 DAG.getConstant(0, VT), N0.getOperand(0));
24766 SDValue Ops[] = { N0.getOperand(0), Neg,
24767 DAG.getConstant(X86::COND_GE, MVT::i8),
24768 SDValue(Neg.getNode(), 1) };
24769 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops);
24774 // PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes
24775 static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
24776 TargetLowering::DAGCombinerInfo &DCI,
24777 const X86Subtarget *Subtarget) {
24778 if (DCI.isBeforeLegalizeOps())
24781 if (Subtarget->hasCMov()) {
24782 SDValue RV = performIntegerAbsCombine(N, DAG);
24790 /// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
24791 static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
24792 TargetLowering::DAGCombinerInfo &DCI,
24793 const X86Subtarget *Subtarget) {
24794 LoadSDNode *Ld = cast<LoadSDNode>(N);
24795 EVT RegVT = Ld->getValueType(0);
24796 EVT MemVT = Ld->getMemoryVT();
24798 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24800 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
24801 // into two 16-byte operations.
24802 ISD::LoadExtType Ext = Ld->getExtensionType();
24803 unsigned Alignment = Ld->getAlignment();
24804 bool IsAligned = Alignment == 0 || Alignment >= MemVT.getSizeInBits()/8;
24805 if (RegVT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
24806 !DCI.isBeforeLegalizeOps() && !IsAligned && Ext == ISD::NON_EXTLOAD) {
24807 unsigned NumElems = RegVT.getVectorNumElements();
24811 SDValue Ptr = Ld->getBasePtr();
24812 SDValue Increment = DAG.getConstant(16, TLI.getPointerTy());
24814 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
24816 SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
24817 Ld->getPointerInfo(), Ld->isVolatile(),
24818 Ld->isNonTemporal(), Ld->isInvariant(),
24820 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
24821 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
24822 Ld->getPointerInfo(), Ld->isVolatile(),
24823 Ld->isNonTemporal(), Ld->isInvariant(),
24824 std::min(16U, Alignment));
24825 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
24827 Load2.getValue(1));
24829 SDValue NewVec = DAG.getUNDEF(RegVT);
24830 NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl);
24831 NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl);
24832 return DCI.CombineTo(N, NewVec, TF, true);
24838 /// PerformMLOADCombine - Resolve extending loads
24839 static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
24840 TargetLowering::DAGCombinerInfo &DCI,
24841 const X86Subtarget *Subtarget) {
24842 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
24843 if (Mld->getExtensionType() != ISD::SEXTLOAD)
24846 EVT VT = Mld->getValueType(0);
24847 unsigned NumElems = VT.getVectorNumElements();
24848 EVT LdVT = Mld->getMemoryVT();
24851 assert(LdVT != VT && "Cannot extend to the same type");
24852 unsigned ToSz = VT.getVectorElementType().getSizeInBits();
24853 unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
24854 // From, To sizes and ElemCount must be pow of two
24855 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
24856 "Unexpected size for extending masked load");
24858 unsigned SizeRatio = ToSz / FromSz;
24859 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
24861 // Create a type on which we perform the shuffle
24862 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
24863 LdVT.getScalarType(), NumElems*SizeRatio);
24864 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
24866 // Convert Src0 value
24867 SDValue WideSrc0 = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mld->getSrc0());
24868 if (Mld->getSrc0().getOpcode() != ISD::UNDEF) {
24869 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
24870 for (unsigned i = 0; i != NumElems; ++i)
24871 ShuffleVec[i] = i * SizeRatio;
24873 // Can't shuffle using an illegal type.
24874 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
24875 && "WideVecVT should be legal");
24876 WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
24877 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
24879 // Prepare the new mask
24881 SDValue Mask = Mld->getMask();
24882 if (Mask.getValueType() == VT) {
24883 // Mask and original value have the same type
24884 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
24885 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
24886 for (unsigned i = 0; i != NumElems; ++i)
24887 ShuffleVec[i] = i * SizeRatio;
24888 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
24889 ShuffleVec[i] = NumElems*SizeRatio;
24890 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
24891 DAG.getConstant(0, WideVecVT),
24895 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
24896 unsigned WidenNumElts = NumElems*SizeRatio;
24897 unsigned MaskNumElts = VT.getVectorNumElements();
24898 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
24901 unsigned NumConcat = WidenNumElts / MaskNumElts;
24902 SmallVector<SDValue, 16> Ops(NumConcat);
24903 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
24905 for (unsigned i = 1; i != NumConcat; ++i)
24908 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
24911 SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
24912 Mld->getBasePtr(), NewMask, WideSrc0,
24913 Mld->getMemoryVT(), Mld->getMemOperand(),
24915 SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd);
24916 return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
24919 /// PerformMSTORECombine - Resolve truncating stores
24920 static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
24921 const X86Subtarget *Subtarget) {
24922 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
24923 if (!Mst->isTruncatingStore())
24926 EVT VT = Mst->getValue().getValueType();
24927 unsigned NumElems = VT.getVectorNumElements();
24928 EVT StVT = Mst->getMemoryVT();
24931 assert(StVT != VT && "Cannot truncate to the same type");
24932 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
24933 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
24935 // From, To sizes and ElemCount must be pow of two
24936 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
24937 "Unexpected size for truncating masked store");
24938 // We are going to use the original vector elt for storing.
24939 // Accumulated smaller vector elements must be a multiple of the store size.
24940 assert (((NumElems * FromSz) % ToSz) == 0 &&
24941 "Unexpected ratio for truncating masked store");
24943 unsigned SizeRatio = FromSz / ToSz;
24944 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
24946 // Create a type on which we perform the shuffle
24947 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
24948 StVT.getScalarType(), NumElems*SizeRatio);
24950 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
24952 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mst->getValue());
24953 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
24954 for (unsigned i = 0; i != NumElems; ++i)
24955 ShuffleVec[i] = i * SizeRatio;
24957 // Can't shuffle using an illegal type.
24958 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
24959 && "WideVecVT should be legal");
24961 SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
24962 DAG.getUNDEF(WideVecVT),
24966 SDValue Mask = Mst->getMask();
24967 if (Mask.getValueType() == VT) {
24968 // Mask and original value have the same type
24969 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
24970 for (unsigned i = 0; i != NumElems; ++i)
24971 ShuffleVec[i] = i * SizeRatio;
24972 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
24973 ShuffleVec[i] = NumElems*SizeRatio;
24974 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
24975 DAG.getConstant(0, WideVecVT),
24979 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
24980 unsigned WidenNumElts = NumElems*SizeRatio;
24981 unsigned MaskNumElts = VT.getVectorNumElements();
24982 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
24985 unsigned NumConcat = WidenNumElts / MaskNumElts;
24986 SmallVector<SDValue, 16> Ops(NumConcat);
24987 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
24989 for (unsigned i = 1; i != NumConcat; ++i)
24992 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
24995 return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal, Mst->getBasePtr(),
24996 NewMask, StVT, Mst->getMemOperand(), false);
24998 /// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
24999 static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
25000 const X86Subtarget *Subtarget) {
25001 StoreSDNode *St = cast<StoreSDNode>(N);
25002 EVT VT = St->getValue().getValueType();
25003 EVT StVT = St->getMemoryVT();
25005 SDValue StoredVal = St->getOperand(1);
25006 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25008 // If we are saving a concatenation of two XMM registers and 32-byte stores
25009 // are slow, such as on Sandy Bridge, perform two 16-byte stores.
25010 unsigned Alignment = St->getAlignment();
25011 bool IsAligned = Alignment == 0 || Alignment >= VT.getSizeInBits()/8;
25012 if (VT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25013 StVT == VT && !IsAligned) {
25014 unsigned NumElems = VT.getVectorNumElements();
25018 SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl);
25019 SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl);
25021 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy());
25022 SDValue Ptr0 = St->getBasePtr();
25023 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride);
25025 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0,
25026 St->getPointerInfo(), St->isVolatile(),
25027 St->isNonTemporal(), Alignment);
25028 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1,
25029 St->getPointerInfo(), St->isVolatile(),
25030 St->isNonTemporal(),
25031 std::min(16U, Alignment));
25032 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
25035 // Optimize trunc store (of multiple scalars) to shuffle and store.
25036 // First, pack all of the elements in one place. Next, store to memory
25037 // in fewer chunks.
25038 if (St->isTruncatingStore() && VT.isVector()) {
25039 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25040 unsigned NumElems = VT.getVectorNumElements();
25041 assert(StVT != VT && "Cannot truncate to the same type");
25042 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25043 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25045 // From, To sizes and ElemCount must be pow of two
25046 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
25047 // We are going to use the original vector elt for storing.
25048 // Accumulated smaller vector elements must be a multiple of the store size.
25049 if (0 != (NumElems * FromSz) % ToSz) return SDValue();
25051 unsigned SizeRatio = FromSz / ToSz;
25053 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25055 // Create a type on which we perform the shuffle
25056 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25057 StVT.getScalarType(), NumElems*SizeRatio);
25059 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25061 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue());
25062 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
25063 for (unsigned i = 0; i != NumElems; ++i)
25064 ShuffleVec[i] = i * SizeRatio;
25066 // Can't shuffle using an illegal type.
25067 if (!TLI.isTypeLegal(WideVecVT))
25070 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25071 DAG.getUNDEF(WideVecVT),
25073 // At this point all of the data is stored at the bottom of the
25074 // register. We now need to save it to mem.
25076 // Find the largest store unit
25077 MVT StoreType = MVT::i8;
25078 for (MVT Tp : MVT::integer_valuetypes()) {
25079 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
25083 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
25084 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
25085 (64 <= NumElems * ToSz))
25086 StoreType = MVT::f64;
25088 // Bitcast the original vector into a vector of store-size units
25089 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
25090 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
25091 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
25092 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff);
25093 SmallVector<SDValue, 8> Chains;
25094 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8,
25095 TLI.getPointerTy());
25096 SDValue Ptr = St->getBasePtr();
25098 // Perform one or more big stores into memory.
25099 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
25100 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
25101 StoreType, ShuffWide,
25102 DAG.getIntPtrConstant(i));
25103 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr,
25104 St->getPointerInfo(), St->isVolatile(),
25105 St->isNonTemporal(), St->getAlignment());
25106 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25107 Chains.push_back(Ch);
25110 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
25113 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
25114 // the FP state in cases where an emms may be missing.
25115 // A preferable solution to the general problem is to figure out the right
25116 // places to insert EMMS. This qualifies as a quick hack.
25118 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
25119 if (VT.getSizeInBits() != 64)
25122 const Function *F = DAG.getMachineFunction().getFunction();
25123 bool NoImplicitFloatOps = F->getAttributes().
25124 hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat);
25125 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
25126 && Subtarget->hasSSE2();
25127 if ((VT.isVector() ||
25128 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
25129 isa<LoadSDNode>(St->getValue()) &&
25130 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
25131 St->getChain().hasOneUse() && !St->isVolatile()) {
25132 SDNode* LdVal = St->getValue().getNode();
25133 LoadSDNode *Ld = nullptr;
25134 int TokenFactorIndex = -1;
25135 SmallVector<SDValue, 8> Ops;
25136 SDNode* ChainVal = St->getChain().getNode();
25137 // Must be a store of a load. We currently handle two cases: the load
25138 // is a direct child, and it's under an intervening TokenFactor. It is
25139 // possible to dig deeper under nested TokenFactors.
25140 if (ChainVal == LdVal)
25141 Ld = cast<LoadSDNode>(St->getChain());
25142 else if (St->getValue().hasOneUse() &&
25143 ChainVal->getOpcode() == ISD::TokenFactor) {
25144 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) {
25145 if (ChainVal->getOperand(i).getNode() == LdVal) {
25146 TokenFactorIndex = i;
25147 Ld = cast<LoadSDNode>(St->getValue());
25149 Ops.push_back(ChainVal->getOperand(i));
25153 if (!Ld || !ISD::isNormalLoad(Ld))
25156 // If this is not the MMX case, i.e. we are just turning i64 load/store
25157 // into f64 load/store, avoid the transformation if there are multiple
25158 // uses of the loaded value.
25159 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
25164 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
25165 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
25167 if (Subtarget->is64Bit() || F64IsLegal) {
25168 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
25169 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
25170 Ld->getPointerInfo(), Ld->isVolatile(),
25171 Ld->isNonTemporal(), Ld->isInvariant(),
25172 Ld->getAlignment());
25173 SDValue NewChain = NewLd.getValue(1);
25174 if (TokenFactorIndex != -1) {
25175 Ops.push_back(NewChain);
25176 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25178 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
25179 St->getPointerInfo(),
25180 St->isVolatile(), St->isNonTemporal(),
25181 St->getAlignment());
25184 // Otherwise, lower to two pairs of 32-bit loads / stores.
25185 SDValue LoAddr = Ld->getBasePtr();
25186 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
25187 DAG.getConstant(4, MVT::i32));
25189 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
25190 Ld->getPointerInfo(),
25191 Ld->isVolatile(), Ld->isNonTemporal(),
25192 Ld->isInvariant(), Ld->getAlignment());
25193 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
25194 Ld->getPointerInfo().getWithOffset(4),
25195 Ld->isVolatile(), Ld->isNonTemporal(),
25197 MinAlign(Ld->getAlignment(), 4));
25199 SDValue NewChain = LoLd.getValue(1);
25200 if (TokenFactorIndex != -1) {
25201 Ops.push_back(LoLd);
25202 Ops.push_back(HiLd);
25203 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25206 LoAddr = St->getBasePtr();
25207 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
25208 DAG.getConstant(4, MVT::i32));
25210 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
25211 St->getPointerInfo(),
25212 St->isVolatile(), St->isNonTemporal(),
25213 St->getAlignment());
25214 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
25215 St->getPointerInfo().getWithOffset(4),
25217 St->isNonTemporal(),
25218 MinAlign(St->getAlignment(), 4));
25219 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
25224 /// Return 'true' if this vector operation is "horizontal"
25225 /// and return the operands for the horizontal operation in LHS and RHS. A
25226 /// horizontal operation performs the binary operation on successive elements
25227 /// of its first operand, then on successive elements of its second operand,
25228 /// returning the resulting values in a vector. For example, if
25229 /// A = < float a0, float a1, float a2, float a3 >
25231 /// B = < float b0, float b1, float b2, float b3 >
25232 /// then the result of doing a horizontal operation on A and B is
25233 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
25234 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
25235 /// A horizontal-op B, for some already available A and B, and if so then LHS is
25236 /// set to A, RHS to B, and the routine returns 'true'.
25237 /// Note that the binary operation should have the property that if one of the
25238 /// operands is UNDEF then the result is UNDEF.
25239 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
25240 // Look for the following pattern: if
25241 // A = < float a0, float a1, float a2, float a3 >
25242 // B = < float b0, float b1, float b2, float b3 >
25244 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
25245 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
25246 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
25247 // which is A horizontal-op B.
25249 // At least one of the operands should be a vector shuffle.
25250 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
25251 RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
25254 MVT VT = LHS.getSimpleValueType();
25256 assert((VT.is128BitVector() || VT.is256BitVector()) &&
25257 "Unsupported vector type for horizontal add/sub");
25259 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to
25260 // operate independently on 128-bit lanes.
25261 unsigned NumElts = VT.getVectorNumElements();
25262 unsigned NumLanes = VT.getSizeInBits()/128;
25263 unsigned NumLaneElts = NumElts / NumLanes;
25264 assert((NumLaneElts % 2 == 0) &&
25265 "Vector type should have an even number of elements in each lane");
25266 unsigned HalfLaneElts = NumLaneElts/2;
25268 // View LHS in the form
25269 // LHS = VECTOR_SHUFFLE A, B, LMask
25270 // If LHS is not a shuffle then pretend it is the shuffle
25271 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
25272 // NOTE: in what follows a default initialized SDValue represents an UNDEF of
25275 SmallVector<int, 16> LMask(NumElts);
25276 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25277 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF)
25278 A = LHS.getOperand(0);
25279 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF)
25280 B = LHS.getOperand(1);
25281 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
25282 std::copy(Mask.begin(), Mask.end(), LMask.begin());
25284 if (LHS.getOpcode() != ISD::UNDEF)
25286 for (unsigned i = 0; i != NumElts; ++i)
25290 // Likewise, view RHS in the form
25291 // RHS = VECTOR_SHUFFLE C, D, RMask
25293 SmallVector<int, 16> RMask(NumElts);
25294 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25295 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF)
25296 C = RHS.getOperand(0);
25297 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF)
25298 D = RHS.getOperand(1);
25299 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
25300 std::copy(Mask.begin(), Mask.end(), RMask.begin());
25302 if (RHS.getOpcode() != ISD::UNDEF)
25304 for (unsigned i = 0; i != NumElts; ++i)
25308 // Check that the shuffles are both shuffling the same vectors.
25309 if (!(A == C && B == D) && !(A == D && B == C))
25312 // If everything is UNDEF then bail out: it would be better to fold to UNDEF.
25313 if (!A.getNode() && !B.getNode())
25316 // If A and B occur in reverse order in RHS, then "swap" them (which means
25317 // rewriting the mask).
25319 CommuteVectorShuffleMask(RMask, NumElts);
25321 // At this point LHS and RHS are equivalent to
25322 // LHS = VECTOR_SHUFFLE A, B, LMask
25323 // RHS = VECTOR_SHUFFLE A, B, RMask
25324 // Check that the masks correspond to performing a horizontal operation.
25325 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
25326 for (unsigned i = 0; i != NumLaneElts; ++i) {
25327 int LIdx = LMask[i+l], RIdx = RMask[i+l];
25329 // Ignore any UNDEF components.
25330 if (LIdx < 0 || RIdx < 0 ||
25331 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
25332 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
25335 // Check that successive elements are being operated on. If not, this is
25336 // not a horizontal operation.
25337 unsigned Src = (i/HalfLaneElts); // each lane is split between srcs
25338 int Index = 2*(i%HalfLaneElts) + NumElts*Src + l;
25339 if (!(LIdx == Index && RIdx == Index + 1) &&
25340 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
25345 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
25346 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
25350 /// Do target-specific dag combines on floating point adds.
25351 static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
25352 const X86Subtarget *Subtarget) {
25353 EVT VT = N->getValueType(0);
25354 SDValue LHS = N->getOperand(0);
25355 SDValue RHS = N->getOperand(1);
25357 // Try to synthesize horizontal adds from adds of shuffles.
25358 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25359 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25360 isHorizontalBinOp(LHS, RHS, true))
25361 return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS);
25365 /// Do target-specific dag combines on floating point subs.
25366 static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
25367 const X86Subtarget *Subtarget) {
25368 EVT VT = N->getValueType(0);
25369 SDValue LHS = N->getOperand(0);
25370 SDValue RHS = N->getOperand(1);
25372 // Try to synthesize horizontal subs from subs of shuffles.
25373 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25374 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25375 isHorizontalBinOp(LHS, RHS, false))
25376 return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS);
25380 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
25381 static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
25382 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
25383 // F[X]OR(0.0, x) -> x
25384 // F[X]OR(x, 0.0) -> x
25385 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25386 if (C->getValueAPF().isPosZero())
25387 return N->getOperand(1);
25388 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25389 if (C->getValueAPF().isPosZero())
25390 return N->getOperand(0);
25394 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
25395 static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
25396 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
25398 // Only perform optimizations if UnsafeMath is used.
25399 if (!DAG.getTarget().Options.UnsafeFPMath)
25402 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
25403 // into FMINC and FMAXC, which are Commutative operations.
25404 unsigned NewOp = 0;
25405 switch (N->getOpcode()) {
25406 default: llvm_unreachable("unknown opcode");
25407 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
25408 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
25411 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
25412 N->getOperand(0), N->getOperand(1));
25415 /// Do target-specific dag combines on X86ISD::FAND nodes.
25416 static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
25417 // FAND(0.0, x) -> 0.0
25418 // FAND(x, 0.0) -> 0.0
25419 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25420 if (C->getValueAPF().isPosZero())
25421 return N->getOperand(0);
25422 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25423 if (C->getValueAPF().isPosZero())
25424 return N->getOperand(1);
25428 /// Do target-specific dag combines on X86ISD::FANDN nodes
25429 static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG) {
25430 // FANDN(x, 0.0) -> 0.0
25431 // FANDN(0.0, x) -> x
25432 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25433 if (C->getValueAPF().isPosZero())
25434 return N->getOperand(1);
25435 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25436 if (C->getValueAPF().isPosZero())
25437 return N->getOperand(1);
25441 static SDValue PerformBTCombine(SDNode *N,
25443 TargetLowering::DAGCombinerInfo &DCI) {
25444 // BT ignores high bits in the bit index operand.
25445 SDValue Op1 = N->getOperand(1);
25446 if (Op1.hasOneUse()) {
25447 unsigned BitWidth = Op1.getValueSizeInBits();
25448 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
25449 APInt KnownZero, KnownOne;
25450 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
25451 !DCI.isBeforeLegalizeOps());
25452 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25453 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) ||
25454 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
25455 DCI.CommitTargetLoweringOpt(TLO);
25460 static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
25461 SDValue Op = N->getOperand(0);
25462 if (Op.getOpcode() == ISD::BITCAST)
25463 Op = Op.getOperand(0);
25464 EVT VT = N->getValueType(0), OpVT = Op.getValueType();
25465 if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
25466 VT.getVectorElementType().getSizeInBits() ==
25467 OpVT.getVectorElementType().getSizeInBits()) {
25468 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
25473 static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
25474 const X86Subtarget *Subtarget) {
25475 EVT VT = N->getValueType(0);
25476 if (!VT.isVector())
25479 SDValue N0 = N->getOperand(0);
25480 SDValue N1 = N->getOperand(1);
25481 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
25484 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
25485 // both SSE and AVX2 since there is no sign-extended shift right
25486 // operation on a vector with 64-bit elements.
25487 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
25488 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
25489 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
25490 N0.getOpcode() == ISD::SIGN_EXTEND)) {
25491 SDValue N00 = N0.getOperand(0);
25493 // EXTLOAD has a better solution on AVX2,
25494 // it may be replaced with X86ISD::VSEXT node.
25495 if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256())
25496 if (!ISD::isNormalLoad(N00.getNode()))
25499 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
25500 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
25502 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
25508 static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
25509 TargetLowering::DAGCombinerInfo &DCI,
25510 const X86Subtarget *Subtarget) {
25511 SDValue N0 = N->getOperand(0);
25512 EVT VT = N->getValueType(0);
25514 // (i8,i32 sext (sdivrem (i8 x, i8 y)) ->
25515 // (i8,i32 (sdivrem_sext_hreg (i8 x, i8 y)
25516 // This exposes the sext to the sdivrem lowering, so that it directly extends
25517 // from AH (which we otherwise need to do contortions to access).
25518 if (N0.getOpcode() == ISD::SDIVREM && N0.getResNo() == 1 &&
25519 N0.getValueType() == MVT::i8 && VT == MVT::i32) {
25521 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25522 SDValue R = DAG.getNode(X86ISD::SDIVREM8_SEXT_HREG, dl, NodeTys,
25523 N0.getOperand(0), N0.getOperand(1));
25524 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25525 return R.getValue(1);
25528 if (!DCI.isBeforeLegalizeOps())
25531 if (!Subtarget->hasFp256())
25534 if (VT.isVector() && VT.getSizeInBits() == 256) {
25535 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25543 static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
25544 const X86Subtarget* Subtarget) {
25546 EVT VT = N->getValueType(0);
25548 // Let legalize expand this if it isn't a legal type yet.
25549 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
25552 EVT ScalarVT = VT.getScalarType();
25553 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
25554 (!Subtarget->hasFMA() && !Subtarget->hasFMA4()))
25557 SDValue A = N->getOperand(0);
25558 SDValue B = N->getOperand(1);
25559 SDValue C = N->getOperand(2);
25561 bool NegA = (A.getOpcode() == ISD::FNEG);
25562 bool NegB = (B.getOpcode() == ISD::FNEG);
25563 bool NegC = (C.getOpcode() == ISD::FNEG);
25565 // Negative multiplication when NegA xor NegB
25566 bool NegMul = (NegA != NegB);
25568 A = A.getOperand(0);
25570 B = B.getOperand(0);
25572 C = C.getOperand(0);
25576 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB;
25578 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB;
25580 return DAG.getNode(Opcode, dl, VT, A, B, C);
25583 static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
25584 TargetLowering::DAGCombinerInfo &DCI,
25585 const X86Subtarget *Subtarget) {
25586 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
25587 // (and (i32 x86isd::setcc_carry), 1)
25588 // This eliminates the zext. This transformation is necessary because
25589 // ISD::SETCC is always legalized to i8.
25591 SDValue N0 = N->getOperand(0);
25592 EVT VT = N->getValueType(0);
25594 if (N0.getOpcode() == ISD::AND &&
25596 N0.getOperand(0).hasOneUse()) {
25597 SDValue N00 = N0.getOperand(0);
25598 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25599 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
25600 if (!C || C->getZExtValue() != 1)
25602 return DAG.getNode(ISD::AND, dl, VT,
25603 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25604 N00.getOperand(0), N00.getOperand(1)),
25605 DAG.getConstant(1, VT));
25609 if (N0.getOpcode() == ISD::TRUNCATE &&
25611 N0.getOperand(0).hasOneUse()) {
25612 SDValue N00 = N0.getOperand(0);
25613 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25614 return DAG.getNode(ISD::AND, dl, VT,
25615 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25616 N00.getOperand(0), N00.getOperand(1)),
25617 DAG.getConstant(1, VT));
25620 if (VT.is256BitVector()) {
25621 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25626 // (i8,i32 zext (udivrem (i8 x, i8 y)) ->
25627 // (i8,i32 (udivrem_zext_hreg (i8 x, i8 y)
25628 // This exposes the zext to the udivrem lowering, so that it directly extends
25629 // from AH (which we otherwise need to do contortions to access).
25630 if (N0.getOpcode() == ISD::UDIVREM &&
25631 N0.getResNo() == 1 && N0.getValueType() == MVT::i8 &&
25632 (VT == MVT::i32 || VT == MVT::i64)) {
25633 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25634 SDValue R = DAG.getNode(X86ISD::UDIVREM8_ZEXT_HREG, dl, NodeTys,
25635 N0.getOperand(0), N0.getOperand(1));
25636 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25637 return R.getValue(1);
25643 // Optimize x == -y --> x+y == 0
25644 // x != -y --> x+y != 0
25645 static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
25646 const X86Subtarget* Subtarget) {
25647 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
25648 SDValue LHS = N->getOperand(0);
25649 SDValue RHS = N->getOperand(1);
25650 EVT VT = N->getValueType(0);
25653 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
25654 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0)))
25655 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) {
25656 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25657 LHS.getValueType(), RHS, LHS.getOperand(1));
25658 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25659 addV, DAG.getConstant(0, addV.getValueType()), CC);
25661 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB)
25662 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0)))
25663 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) {
25664 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25665 RHS.getValueType(), LHS, RHS.getOperand(1));
25666 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25667 addV, DAG.getConstant(0, addV.getValueType()), CC);
25670 if (VT.getScalarType() == MVT::i1) {
25671 bool IsSEXT0 = (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
25672 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25673 bool IsVZero0 = ISD::isBuildVectorAllZeros(LHS.getNode());
25674 if (!IsSEXT0 && !IsVZero0)
25676 bool IsSEXT1 = (RHS.getOpcode() == ISD::SIGN_EXTEND) &&
25677 (RHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25678 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
25680 if (!IsSEXT1 && !IsVZero1)
25683 if (IsSEXT0 && IsVZero1) {
25684 assert(VT == LHS.getOperand(0).getValueType() && "Uexpected operand type");
25685 if (CC == ISD::SETEQ)
25686 return DAG.getNOT(DL, LHS.getOperand(0), VT);
25687 return LHS.getOperand(0);
25689 if (IsSEXT1 && IsVZero0) {
25690 assert(VT == RHS.getOperand(0).getValueType() && "Uexpected operand type");
25691 if (CC == ISD::SETEQ)
25692 return DAG.getNOT(DL, RHS.getOperand(0), VT);
25693 return RHS.getOperand(0);
25700 static SDValue PerformINSERTPSCombine(SDNode *N, SelectionDAG &DAG,
25701 const X86Subtarget *Subtarget) {
25703 MVT VT = N->getOperand(1)->getSimpleValueType(0);
25704 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
25705 "X86insertps is only defined for v4x32");
25707 SDValue Ld = N->getOperand(1);
25708 if (MayFoldLoad(Ld)) {
25709 // Extract the countS bits from the immediate so we can get the proper
25710 // address when narrowing the vector load to a specific element.
25711 // When the second source op is a memory address, interps doesn't use
25712 // countS and just gets an f32 from that address.
25713 unsigned DestIndex =
25714 cast<ConstantSDNode>(N->getOperand(2))->getZExtValue() >> 6;
25715 Ld = NarrowVectorLoadToElement(cast<LoadSDNode>(Ld), DestIndex, DAG);
25719 // Create this as a scalar to vector to match the instruction pattern.
25720 SDValue LoadScalarToVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Ld);
25721 // countS bits are ignored when loading from memory on insertps, which
25722 // means we don't need to explicitly set them to 0.
25723 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N->getOperand(0),
25724 LoadScalarToVector, N->getOperand(2));
25727 // Helper function of PerformSETCCCombine. It is to materialize "setb reg"
25728 // as "sbb reg,reg", since it can be extended without zext and produces
25729 // an all-ones bit which is more useful than 0/1 in some cases.
25730 static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
25733 return DAG.getNode(ISD::AND, DL, VT,
25734 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
25735 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS),
25736 DAG.getConstant(1, VT));
25737 assert (VT == MVT::i1 && "Unexpected type for SECCC node");
25738 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1,
25739 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
25740 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS));
25743 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
25744 static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
25745 TargetLowering::DAGCombinerInfo &DCI,
25746 const X86Subtarget *Subtarget) {
25748 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
25749 SDValue EFLAGS = N->getOperand(1);
25751 if (CC == X86::COND_A) {
25752 // Try to convert COND_A into COND_B in an attempt to facilitate
25753 // materializing "setb reg".
25755 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
25756 // cannot take an immediate as its first operand.
25758 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
25759 EFLAGS.getValueType().isInteger() &&
25760 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
25761 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
25762 EFLAGS.getNode()->getVTList(),
25763 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
25764 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
25765 return MaterializeSETB(DL, NewEFLAGS, DAG, N->getSimpleValueType(0));
25769 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without
25770 // a zext and produces an all-ones bit which is more useful than 0/1 in some
25772 if (CC == X86::COND_B)
25773 return MaterializeSETB(DL, EFLAGS, DAG, N->getSimpleValueType(0));
25777 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
25778 if (Flags.getNode()) {
25779 SDValue Cond = DAG.getConstant(CC, MVT::i8);
25780 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
25786 // Optimize branch condition evaluation.
25788 static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
25789 TargetLowering::DAGCombinerInfo &DCI,
25790 const X86Subtarget *Subtarget) {
25792 SDValue Chain = N->getOperand(0);
25793 SDValue Dest = N->getOperand(1);
25794 SDValue EFLAGS = N->getOperand(3);
25795 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
25799 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
25800 if (Flags.getNode()) {
25801 SDValue Cond = DAG.getConstant(CC, MVT::i8);
25802 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
25809 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
25810 SelectionDAG &DAG) {
25811 // Take advantage of vector comparisons producing 0 or -1 in each lane to
25812 // optimize away operation when it's from a constant.
25814 // The general transformation is:
25815 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
25816 // AND(VECTOR_CMP(x,y), constant2)
25817 // constant2 = UNARYOP(constant)
25819 // Early exit if this isn't a vector operation, the operand of the
25820 // unary operation isn't a bitwise AND, or if the sizes of the operations
25821 // aren't the same.
25822 EVT VT = N->getValueType(0);
25823 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
25824 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
25825 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
25828 // Now check that the other operand of the AND is a constant. We could
25829 // make the transformation for non-constant splats as well, but it's unclear
25830 // that would be a benefit as it would not eliminate any operations, just
25831 // perform one more step in scalar code before moving to the vector unit.
25832 if (BuildVectorSDNode *BV =
25833 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
25834 // Bail out if the vector isn't a constant.
25835 if (!BV->isConstant())
25838 // Everything checks out. Build up the new and improved node.
25840 EVT IntVT = BV->getValueType(0);
25841 // Create a new constant of the appropriate type for the transformed
25843 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
25844 // The AND node needs bitcasts to/from an integer vector type around it.
25845 SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
25846 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
25847 N->getOperand(0)->getOperand(0), MaskConst);
25848 SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
25855 static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
25856 const X86Subtarget *Subtarget) {
25857 // First try to optimize away the conversion entirely when it's
25858 // conditionally from a constant. Vectors only.
25859 SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
25860 if (Res != SDValue())
25863 // Now move on to more general possibilities.
25864 SDValue Op0 = N->getOperand(0);
25865 EVT InVT = Op0->getValueType(0);
25867 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32))
25868 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) {
25870 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
25871 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
25872 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P);
25875 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
25876 // a 32-bit target where SSE doesn't support i64->FP operations.
25877 if (Op0.getOpcode() == ISD::LOAD) {
25878 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
25879 EVT VT = Ld->getValueType(0);
25880 if (!Ld->isVolatile() && !N->getValueType(0).isVector() &&
25881 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
25882 !Subtarget->is64Bit() && VT == MVT::i64) {
25883 SDValue FILDChain = Subtarget->getTargetLowering()->BuildFILD(
25884 SDValue(N, 0), Ld->getValueType(0), Ld->getChain(), Op0, DAG);
25885 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
25892 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
25893 static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
25894 X86TargetLowering::DAGCombinerInfo &DCI) {
25895 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
25896 // the result is either zero or one (depending on the input carry bit).
25897 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
25898 if (X86::isZeroNode(N->getOperand(0)) &&
25899 X86::isZeroNode(N->getOperand(1)) &&
25900 // We don't have a good way to replace an EFLAGS use, so only do this when
25902 SDValue(N, 1).use_empty()) {
25904 EVT VT = N->getValueType(0);
25905 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1));
25906 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
25907 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
25908 DAG.getConstant(X86::COND_B,MVT::i8),
25910 DAG.getConstant(1, VT));
25911 return DCI.CombineTo(N, Res1, CarryOut);
25917 // fold (add Y, (sete X, 0)) -> adc 0, Y
25918 // (add Y, (setne X, 0)) -> sbb -1, Y
25919 // (sub (sete X, 0), Y) -> sbb 0, Y
25920 // (sub (setne X, 0), Y) -> adc -1, Y
25921 static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
25924 // Look through ZExts.
25925 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
25926 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse())
25929 SDValue SetCC = Ext.getOperand(0);
25930 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse())
25933 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
25934 if (CC != X86::COND_E && CC != X86::COND_NE)
25937 SDValue Cmp = SetCC.getOperand(1);
25938 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
25939 !X86::isZeroNode(Cmp.getOperand(1)) ||
25940 !Cmp.getOperand(0).getValueType().isInteger())
25943 SDValue CmpOp0 = Cmp.getOperand(0);
25944 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0,
25945 DAG.getConstant(1, CmpOp0.getValueType()));
25947 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1);
25948 if (CC == X86::COND_NE)
25949 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB,
25950 DL, OtherVal.getValueType(), OtherVal,
25951 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp);
25952 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC,
25953 DL, OtherVal.getValueType(), OtherVal,
25954 DAG.getConstant(0, OtherVal.getValueType()), NewCmp);
25957 /// PerformADDCombine - Do target-specific dag combines on integer adds.
25958 static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG,
25959 const X86Subtarget *Subtarget) {
25960 EVT VT = N->getValueType(0);
25961 SDValue Op0 = N->getOperand(0);
25962 SDValue Op1 = N->getOperand(1);
25964 // Try to synthesize horizontal adds from adds of shuffles.
25965 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
25966 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
25967 isHorizontalBinOp(Op0, Op1, true))
25968 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
25970 return OptimizeConditionalInDecrement(N, DAG);
25973 static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
25974 const X86Subtarget *Subtarget) {
25975 SDValue Op0 = N->getOperand(0);
25976 SDValue Op1 = N->getOperand(1);
25978 // X86 can't encode an immediate LHS of a sub. See if we can push the
25979 // negation into a preceding instruction.
25980 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
25981 // If the RHS of the sub is a XOR with one use and a constant, invert the
25982 // immediate. Then add one to the LHS of the sub so we can turn
25983 // X-Y -> X+~Y+1, saving one register.
25984 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
25985 isa<ConstantSDNode>(Op1.getOperand(1))) {
25986 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
25987 EVT VT = Op0.getValueType();
25988 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
25990 DAG.getConstant(~XorC, VT));
25991 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
25992 DAG.getConstant(C->getAPIntValue()+1, VT));
25996 // Try to synthesize horizontal adds from adds of shuffles.
25997 EVT VT = N->getValueType(0);
25998 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
25999 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26000 isHorizontalBinOp(Op0, Op1, true))
26001 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
26003 return OptimizeConditionalInDecrement(N, DAG);
26006 /// performVZEXTCombine - Performs build vector combines
26007 static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
26008 TargetLowering::DAGCombinerInfo &DCI,
26009 const X86Subtarget *Subtarget) {
26011 MVT VT = N->getSimpleValueType(0);
26012 SDValue Op = N->getOperand(0);
26013 MVT OpVT = Op.getSimpleValueType();
26014 MVT OpEltVT = OpVT.getVectorElementType();
26015 unsigned InputBits = OpEltVT.getSizeInBits() * VT.getVectorNumElements();
26017 // (vzext (bitcast (vzext (x)) -> (vzext x)
26019 while (V.getOpcode() == ISD::BITCAST)
26020 V = V.getOperand(0);
26022 if (V != Op && V.getOpcode() == X86ISD::VZEXT) {
26023 MVT InnerVT = V.getSimpleValueType();
26024 MVT InnerEltVT = InnerVT.getVectorElementType();
26026 // If the element sizes match exactly, we can just do one larger vzext. This
26027 // is always an exact type match as vzext operates on integer types.
26028 if (OpEltVT == InnerEltVT) {
26029 assert(OpVT == InnerVT && "Types must match for vzext!");
26030 return DAG.getNode(X86ISD::VZEXT, DL, VT, V.getOperand(0));
26033 // The only other way we can combine them is if only a single element of the
26034 // inner vzext is used in the input to the outer vzext.
26035 if (InnerEltVT.getSizeInBits() < InputBits)
26038 // In this case, the inner vzext is completely dead because we're going to
26039 // only look at bits inside of the low element. Just do the outer vzext on
26040 // a bitcast of the input to the inner.
26041 return DAG.getNode(X86ISD::VZEXT, DL, VT,
26042 DAG.getNode(ISD::BITCAST, DL, OpVT, V));
26045 // Check if we can bypass extracting and re-inserting an element of an input
26046 // vector. Essentialy:
26047 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
26048 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
26049 V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
26050 V.getOperand(0).getSimpleValueType().getSizeInBits() == InputBits) {
26051 SDValue ExtractedV = V.getOperand(0);
26052 SDValue OrigV = ExtractedV.getOperand(0);
26053 if (auto *ExtractIdx = dyn_cast<ConstantSDNode>(ExtractedV.getOperand(1)))
26054 if (ExtractIdx->getZExtValue() == 0) {
26055 MVT OrigVT = OrigV.getSimpleValueType();
26056 // Extract a subvector if necessary...
26057 if (OrigVT.getSizeInBits() > OpVT.getSizeInBits()) {
26058 int Ratio = OrigVT.getSizeInBits() / OpVT.getSizeInBits();
26059 OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(),
26060 OrigVT.getVectorNumElements() / Ratio);
26061 OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV,
26062 DAG.getIntPtrConstant(0));
26064 Op = DAG.getNode(ISD::BITCAST, DL, OpVT, OrigV);
26065 return DAG.getNode(X86ISD::VZEXT, DL, VT, Op);
26072 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
26073 DAGCombinerInfo &DCI) const {
26074 SelectionDAG &DAG = DCI.DAG;
26075 switch (N->getOpcode()) {
26077 case ISD::EXTRACT_VECTOR_ELT:
26078 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI);
26081 case X86ISD::SHRUNKBLEND:
26082 return PerformSELECTCombine(N, DAG, DCI, Subtarget);
26083 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget);
26084 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
26085 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
26086 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
26087 case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
26090 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget);
26091 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
26092 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
26093 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
26094 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
26095 case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget);
26096 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
26097 case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget);
26098 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, Subtarget);
26099 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
26100 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
26102 case X86ISD::FOR: return PerformFORCombine(N, DAG);
26104 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG);
26105 case X86ISD::FAND: return PerformFANDCombine(N, DAG);
26106 case X86ISD::FANDN: return PerformFANDNCombine(N, DAG);
26107 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
26108 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
26109 case ISD::ANY_EXTEND:
26110 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
26111 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
26112 case ISD::SIGN_EXTEND_INREG:
26113 return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
26114 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget);
26115 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG, Subtarget);
26116 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget);
26117 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
26118 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget);
26119 case X86ISD::SHUFP: // Handle all target specific shuffles
26120 case X86ISD::PALIGNR:
26121 case X86ISD::UNPCKH:
26122 case X86ISD::UNPCKL:
26123 case X86ISD::MOVHLPS:
26124 case X86ISD::MOVLHPS:
26125 case X86ISD::PSHUFB:
26126 case X86ISD::PSHUFD:
26127 case X86ISD::PSHUFHW:
26128 case X86ISD::PSHUFLW:
26129 case X86ISD::MOVSS:
26130 case X86ISD::MOVSD:
26131 case X86ISD::VPERMILPI:
26132 case X86ISD::VPERM2X128:
26133 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
26134 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
26135 case ISD::INTRINSIC_WO_CHAIN:
26136 return PerformINTRINSIC_WO_CHAINCombine(N, DAG, Subtarget);
26137 case X86ISD::INSERTPS: {
26138 if (getTargetMachine().getOptLevel() > CodeGenOpt::None)
26139 return PerformINSERTPSCombine(N, DAG, Subtarget);
26142 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DAG, Subtarget);
26148 /// isTypeDesirableForOp - Return true if the target has native support for
26149 /// the specified value type and it is 'desirable' to use the type for the
26150 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
26151 /// instruction encodings are longer and some i16 instructions are slow.
26152 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
26153 if (!isTypeLegal(VT))
26155 if (VT != MVT::i16)
26162 case ISD::SIGN_EXTEND:
26163 case ISD::ZERO_EXTEND:
26164 case ISD::ANY_EXTEND:
26177 /// IsDesirableToPromoteOp - This method query the target whether it is
26178 /// beneficial for dag combiner to promote the specified node. If true, it
26179 /// should return the desired promotion type by reference.
26180 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
26181 EVT VT = Op.getValueType();
26182 if (VT != MVT::i16)
26185 bool Promote = false;
26186 bool Commute = false;
26187 switch (Op.getOpcode()) {
26190 LoadSDNode *LD = cast<LoadSDNode>(Op);
26191 // If the non-extending load has a single use and it's not live out, then it
26192 // might be folded.
26193 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
26194 Op.hasOneUse()*/) {
26195 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
26196 UE = Op.getNode()->use_end(); UI != UE; ++UI) {
26197 // The only case where we'd want to promote LOAD (rather then it being
26198 // promoted as an operand is when it's only use is liveout.
26199 if (UI->getOpcode() != ISD::CopyToReg)
26206 case ISD::SIGN_EXTEND:
26207 case ISD::ZERO_EXTEND:
26208 case ISD::ANY_EXTEND:
26213 SDValue N0 = Op.getOperand(0);
26214 // Look out for (store (shl (load), x)).
26215 if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
26228 SDValue N0 = Op.getOperand(0);
26229 SDValue N1 = Op.getOperand(1);
26230 if (!Commute && MayFoldLoad(N1))
26232 // Avoid disabling potential load folding opportunities.
26233 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op)))
26235 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op)))
26245 //===----------------------------------------------------------------------===//
26246 // X86 Inline Assembly Support
26247 //===----------------------------------------------------------------------===//
26250 // Helper to match a string separated by whitespace.
26251 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) {
26252 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace.
26254 for (unsigned i = 0, e = args.size(); i != e; ++i) {
26255 StringRef piece(*args[i]);
26256 if (!s.startswith(piece)) // Check if the piece matches.
26259 s = s.substr(piece.size());
26260 StringRef::size_type pos = s.find_first_not_of(" \t");
26261 if (pos == 0) // We matched a prefix.
26269 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={};
26272 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
26274 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
26275 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
26276 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
26277 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
26279 if (AsmPieces.size() == 3)
26281 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
26288 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
26289 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
26291 std::string AsmStr = IA->getAsmString();
26293 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
26294 if (!Ty || Ty->getBitWidth() % 16 != 0)
26297 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
26298 SmallVector<StringRef, 4> AsmPieces;
26299 SplitString(AsmStr, AsmPieces, ";\n");
26301 switch (AsmPieces.size()) {
26302 default: return false;
26304 // FIXME: this should verify that we are targeting a 486 or better. If not,
26305 // we will turn this bswap into something that will be lowered to logical
26306 // ops instead of emitting the bswap asm. For now, we don't support 486 or
26307 // lower so don't worry about this.
26309 if (matchAsm(AsmPieces[0], "bswap", "$0") ||
26310 matchAsm(AsmPieces[0], "bswapl", "$0") ||
26311 matchAsm(AsmPieces[0], "bswapq", "$0") ||
26312 matchAsm(AsmPieces[0], "bswap", "${0:q}") ||
26313 matchAsm(AsmPieces[0], "bswapl", "${0:q}") ||
26314 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) {
26315 // No need to check constraints, nothing other than the equivalent of
26316 // "=r,0" would be valid here.
26317 return IntrinsicLowering::LowerToByteSwap(CI);
26320 // rorw $$8, ${0:w} --> llvm.bswap.i16
26321 if (CI->getType()->isIntegerTy(16) &&
26322 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26323 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") ||
26324 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) {
26326 const std::string &ConstraintsStr = IA->getConstraintString();
26327 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26328 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26329 if (clobbersFlagRegisters(AsmPieces))
26330 return IntrinsicLowering::LowerToByteSwap(CI);
26334 if (CI->getType()->isIntegerTy(32) &&
26335 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26336 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") &&
26337 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") &&
26338 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) {
26340 const std::string &ConstraintsStr = IA->getConstraintString();
26341 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26342 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26343 if (clobbersFlagRegisters(AsmPieces))
26344 return IntrinsicLowering::LowerToByteSwap(CI);
26347 if (CI->getType()->isIntegerTy(64)) {
26348 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
26349 if (Constraints.size() >= 2 &&
26350 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
26351 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
26352 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
26353 if (matchAsm(AsmPieces[0], "bswap", "%eax") &&
26354 matchAsm(AsmPieces[1], "bswap", "%edx") &&
26355 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx"))
26356 return IntrinsicLowering::LowerToByteSwap(CI);
26364 /// getConstraintType - Given a constraint letter, return the type of
26365 /// constraint it is for this target.
26366 X86TargetLowering::ConstraintType
26367 X86TargetLowering::getConstraintType(const std::string &Constraint) const {
26368 if (Constraint.size() == 1) {
26369 switch (Constraint[0]) {
26380 return C_RegisterClass;
26404 return TargetLowering::getConstraintType(Constraint);
26407 /// Examine constraint type and operand type and determine a weight value.
26408 /// This object must already have been set up with the operand type
26409 /// and the current alternative constraint selected.
26410 TargetLowering::ConstraintWeight
26411 X86TargetLowering::getSingleConstraintMatchWeight(
26412 AsmOperandInfo &info, const char *constraint) const {
26413 ConstraintWeight weight = CW_Invalid;
26414 Value *CallOperandVal = info.CallOperandVal;
26415 // If we don't have a value, we can't do a match,
26416 // but allow it at the lowest weight.
26417 if (!CallOperandVal)
26419 Type *type = CallOperandVal->getType();
26420 // Look at the constraint type.
26421 switch (*constraint) {
26423 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
26434 if (CallOperandVal->getType()->isIntegerTy())
26435 weight = CW_SpecificReg;
26440 if (type->isFloatingPointTy())
26441 weight = CW_SpecificReg;
26444 if (type->isX86_MMXTy() && Subtarget->hasMMX())
26445 weight = CW_SpecificReg;
26449 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) ||
26450 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256()))
26451 weight = CW_Register;
26454 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
26455 if (C->getZExtValue() <= 31)
26456 weight = CW_Constant;
26460 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26461 if (C->getZExtValue() <= 63)
26462 weight = CW_Constant;
26466 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26467 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
26468 weight = CW_Constant;
26472 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26473 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
26474 weight = CW_Constant;
26478 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26479 if (C->getZExtValue() <= 3)
26480 weight = CW_Constant;
26484 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26485 if (C->getZExtValue() <= 0xff)
26486 weight = CW_Constant;
26491 if (dyn_cast<ConstantFP>(CallOperandVal)) {
26492 weight = CW_Constant;
26496 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26497 if ((C->getSExtValue() >= -0x80000000LL) &&
26498 (C->getSExtValue() <= 0x7fffffffLL))
26499 weight = CW_Constant;
26503 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26504 if (C->getZExtValue() <= 0xffffffff)
26505 weight = CW_Constant;
26512 /// LowerXConstraint - try to replace an X constraint, which matches anything,
26513 /// with another that has more specific requirements based on the type of the
26514 /// corresponding operand.
26515 const char *X86TargetLowering::
26516 LowerXConstraint(EVT ConstraintVT) const {
26517 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
26518 // 'f' like normal targets.
26519 if (ConstraintVT.isFloatingPoint()) {
26520 if (Subtarget->hasSSE2())
26522 if (Subtarget->hasSSE1())
26526 return TargetLowering::LowerXConstraint(ConstraintVT);
26529 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
26530 /// vector. If it is invalid, don't add anything to Ops.
26531 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
26532 std::string &Constraint,
26533 std::vector<SDValue>&Ops,
26534 SelectionDAG &DAG) const {
26537 // Only support length 1 constraints for now.
26538 if (Constraint.length() > 1) return;
26540 char ConstraintLetter = Constraint[0];
26541 switch (ConstraintLetter) {
26544 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26545 if (C->getZExtValue() <= 31) {
26546 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26552 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26553 if (C->getZExtValue() <= 63) {
26554 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26560 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26561 if (isInt<8>(C->getSExtValue())) {
26562 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26568 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26569 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
26570 (Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) {
26571 Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType());
26577 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26578 if (C->getZExtValue() <= 3) {
26579 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26585 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26586 if (C->getZExtValue() <= 255) {
26587 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26593 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26594 if (C->getZExtValue() <= 127) {
26595 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26601 // 32-bit signed value
26602 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26603 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26604 C->getSExtValue())) {
26605 // Widen to 64 bits here to get it sign extended.
26606 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
26609 // FIXME gcc accepts some relocatable values here too, but only in certain
26610 // memory models; it's complicated.
26615 // 32-bit unsigned value
26616 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26617 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26618 C->getZExtValue())) {
26619 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26623 // FIXME gcc accepts some relocatable values here too, but only in certain
26624 // memory models; it's complicated.
26628 // Literal immediates are always ok.
26629 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
26630 // Widen to 64 bits here to get it sign extended.
26631 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64);
26635 // In any sort of PIC mode addresses need to be computed at runtime by
26636 // adding in a register or some sort of table lookup. These can't
26637 // be used as immediates.
26638 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
26641 // If we are in non-pic codegen mode, we allow the address of a global (with
26642 // an optional displacement) to be used with 'i'.
26643 GlobalAddressSDNode *GA = nullptr;
26644 int64_t Offset = 0;
26646 // Match either (GA), (GA+C), (GA+C1+C2), etc.
26648 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
26649 Offset += GA->getOffset();
26651 } else if (Op.getOpcode() == ISD::ADD) {
26652 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26653 Offset += C->getZExtValue();
26654 Op = Op.getOperand(0);
26657 } else if (Op.getOpcode() == ISD::SUB) {
26658 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26659 Offset += -C->getZExtValue();
26660 Op = Op.getOperand(0);
26665 // Otherwise, this isn't something we can handle, reject it.
26669 const GlobalValue *GV = GA->getGlobal();
26670 // If we require an extra load to get this address, as in PIC mode, we
26671 // can't accept it.
26672 if (isGlobalStubReference(
26673 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget())))
26676 Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
26677 GA->getValueType(0), Offset);
26682 if (Result.getNode()) {
26683 Ops.push_back(Result);
26686 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
26689 std::pair<unsigned, const TargetRegisterClass*>
26690 X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
26692 // First, see if this is a constraint that directly corresponds to an LLVM
26694 if (Constraint.size() == 1) {
26695 // GCC Constraint Letters
26696 switch (Constraint[0]) {
26698 // TODO: Slight differences here in allocation order and leaving
26699 // RIP in the class. Do they matter any more here than they do
26700 // in the normal allocation?
26701 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
26702 if (Subtarget->is64Bit()) {
26703 if (VT == MVT::i32 || VT == MVT::f32)
26704 return std::make_pair(0U, &X86::GR32RegClass);
26705 if (VT == MVT::i16)
26706 return std::make_pair(0U, &X86::GR16RegClass);
26707 if (VT == MVT::i8 || VT == MVT::i1)
26708 return std::make_pair(0U, &X86::GR8RegClass);
26709 if (VT == MVT::i64 || VT == MVT::f64)
26710 return std::make_pair(0U, &X86::GR64RegClass);
26713 // 32-bit fallthrough
26714 case 'Q': // Q_REGS
26715 if (VT == MVT::i32 || VT == MVT::f32)
26716 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
26717 if (VT == MVT::i16)
26718 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
26719 if (VT == MVT::i8 || VT == MVT::i1)
26720 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
26721 if (VT == MVT::i64)
26722 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
26724 case 'r': // GENERAL_REGS
26725 case 'l': // INDEX_REGS
26726 if (VT == MVT::i8 || VT == MVT::i1)
26727 return std::make_pair(0U, &X86::GR8RegClass);
26728 if (VT == MVT::i16)
26729 return std::make_pair(0U, &X86::GR16RegClass);
26730 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit())
26731 return std::make_pair(0U, &X86::GR32RegClass);
26732 return std::make_pair(0U, &X86::GR64RegClass);
26733 case 'R': // LEGACY_REGS
26734 if (VT == MVT::i8 || VT == MVT::i1)
26735 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
26736 if (VT == MVT::i16)
26737 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
26738 if (VT == MVT::i32 || !Subtarget->is64Bit())
26739 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
26740 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
26741 case 'f': // FP Stack registers.
26742 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
26743 // value to the correct fpstack register class.
26744 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
26745 return std::make_pair(0U, &X86::RFP32RegClass);
26746 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
26747 return std::make_pair(0U, &X86::RFP64RegClass);
26748 return std::make_pair(0U, &X86::RFP80RegClass);
26749 case 'y': // MMX_REGS if MMX allowed.
26750 if (!Subtarget->hasMMX()) break;
26751 return std::make_pair(0U, &X86::VR64RegClass);
26752 case 'Y': // SSE_REGS if SSE2 allowed
26753 if (!Subtarget->hasSSE2()) break;
26755 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
26756 if (!Subtarget->hasSSE1()) break;
26758 switch (VT.SimpleTy) {
26760 // Scalar SSE types.
26763 return std::make_pair(0U, &X86::FR32RegClass);
26766 return std::make_pair(0U, &X86::FR64RegClass);
26774 return std::make_pair(0U, &X86::VR128RegClass);
26782 return std::make_pair(0U, &X86::VR256RegClass);
26787 return std::make_pair(0U, &X86::VR512RegClass);
26793 // Use the default implementation in TargetLowering to convert the register
26794 // constraint into a member of a register class.
26795 std::pair<unsigned, const TargetRegisterClass*> Res;
26796 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
26798 // Not found as a standard register?
26800 // Map st(0) -> st(7) -> ST0
26801 if (Constraint.size() == 7 && Constraint[0] == '{' &&
26802 tolower(Constraint[1]) == 's' &&
26803 tolower(Constraint[2]) == 't' &&
26804 Constraint[3] == '(' &&
26805 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
26806 Constraint[5] == ')' &&
26807 Constraint[6] == '}') {
26809 Res.first = X86::FP0+Constraint[4]-'0';
26810 Res.second = &X86::RFP80RegClass;
26814 // GCC allows "st(0)" to be called just plain "st".
26815 if (StringRef("{st}").equals_lower(Constraint)) {
26816 Res.first = X86::FP0;
26817 Res.second = &X86::RFP80RegClass;
26822 if (StringRef("{flags}").equals_lower(Constraint)) {
26823 Res.first = X86::EFLAGS;
26824 Res.second = &X86::CCRRegClass;
26828 // 'A' means EAX + EDX.
26829 if (Constraint == "A") {
26830 Res.first = X86::EAX;
26831 Res.second = &X86::GR32_ADRegClass;
26837 // Otherwise, check to see if this is a register class of the wrong value
26838 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
26839 // turn into {ax},{dx}.
26840 if (Res.second->hasType(VT))
26841 return Res; // Correct type already, nothing to do.
26843 // All of the single-register GCC register classes map their values onto
26844 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
26845 // really want an 8-bit or 32-bit register, map to the appropriate register
26846 // class and return the appropriate register.
26847 if (Res.second == &X86::GR16RegClass) {
26848 if (VT == MVT::i8 || VT == MVT::i1) {
26849 unsigned DestReg = 0;
26850 switch (Res.first) {
26852 case X86::AX: DestReg = X86::AL; break;
26853 case X86::DX: DestReg = X86::DL; break;
26854 case X86::CX: DestReg = X86::CL; break;
26855 case X86::BX: DestReg = X86::BL; break;
26858 Res.first = DestReg;
26859 Res.second = &X86::GR8RegClass;
26861 } else if (VT == MVT::i32 || VT == MVT::f32) {
26862 unsigned DestReg = 0;
26863 switch (Res.first) {
26865 case X86::AX: DestReg = X86::EAX; break;
26866 case X86::DX: DestReg = X86::EDX; break;
26867 case X86::CX: DestReg = X86::ECX; break;
26868 case X86::BX: DestReg = X86::EBX; break;
26869 case X86::SI: DestReg = X86::ESI; break;
26870 case X86::DI: DestReg = X86::EDI; break;
26871 case X86::BP: DestReg = X86::EBP; break;
26872 case X86::SP: DestReg = X86::ESP; break;
26875 Res.first = DestReg;
26876 Res.second = &X86::GR32RegClass;
26878 } else if (VT == MVT::i64 || VT == MVT::f64) {
26879 unsigned DestReg = 0;
26880 switch (Res.first) {
26882 case X86::AX: DestReg = X86::RAX; break;
26883 case X86::DX: DestReg = X86::RDX; break;
26884 case X86::CX: DestReg = X86::RCX; break;
26885 case X86::BX: DestReg = X86::RBX; break;
26886 case X86::SI: DestReg = X86::RSI; break;
26887 case X86::DI: DestReg = X86::RDI; break;
26888 case X86::BP: DestReg = X86::RBP; break;
26889 case X86::SP: DestReg = X86::RSP; break;
26892 Res.first = DestReg;
26893 Res.second = &X86::GR64RegClass;
26896 } else if (Res.second == &X86::FR32RegClass ||
26897 Res.second == &X86::FR64RegClass ||
26898 Res.second == &X86::VR128RegClass ||
26899 Res.second == &X86::VR256RegClass ||
26900 Res.second == &X86::FR32XRegClass ||
26901 Res.second == &X86::FR64XRegClass ||
26902 Res.second == &X86::VR128XRegClass ||
26903 Res.second == &X86::VR256XRegClass ||
26904 Res.second == &X86::VR512RegClass) {
26905 // Handle references to XMM physical registers that got mapped into the
26906 // wrong class. This can happen with constraints like {xmm0} where the
26907 // target independent register mapper will just pick the first match it can
26908 // find, ignoring the required type.
26910 if (VT == MVT::f32 || VT == MVT::i32)
26911 Res.second = &X86::FR32RegClass;
26912 else if (VT == MVT::f64 || VT == MVT::i64)
26913 Res.second = &X86::FR64RegClass;
26914 else if (X86::VR128RegClass.hasType(VT))
26915 Res.second = &X86::VR128RegClass;
26916 else if (X86::VR256RegClass.hasType(VT))
26917 Res.second = &X86::VR256RegClass;
26918 else if (X86::VR512RegClass.hasType(VT))
26919 Res.second = &X86::VR512RegClass;
26925 int X86TargetLowering::getScalingFactorCost(const AddrMode &AM,
26927 // Scaling factors are not free at all.
26928 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
26929 // will take 2 allocations in the out of order engine instead of 1
26930 // for plain addressing mode, i.e. inst (reg1).
26932 // vaddps (%rsi,%drx), %ymm0, %ymm1
26933 // Requires two allocations (one for the load, one for the computation)
26935 // vaddps (%rsi), %ymm0, %ymm1
26936 // Requires just 1 allocation, i.e., freeing allocations for other operations
26937 // and having less micro operations to execute.
26939 // For some X86 architectures, this is even worse because for instance for
26940 // stores, the complex addressing mode forces the instruction to use the
26941 // "load" ports instead of the dedicated "store" port.
26942 // E.g., on Haswell:
26943 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
26944 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
26945 if (isLegalAddressingMode(AM, Ty))
26946 // Scale represents reg2 * scale, thus account for 1
26947 // as soon as we use a second register.
26948 return AM.Scale != 0;
26952 bool X86TargetLowering::isTargetFTOL() const {
26953 return Subtarget->isTargetKnownWindowsMSVC() && !Subtarget->is64Bit();