1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "X86ISelLowering.h"
16 #include "Utils/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "X86TargetMachine.h"
22 #include "X86TargetObjectFile.h"
23 #include "llvm/ADT/SmallBitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/ADT/VariadicFunction.h"
29 #include "llvm/CodeGen/IntrinsicLowering.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineJumpTableInfo.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/CallingConv.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/MC/MCAsmInfo.h"
46 #include "llvm/MC/MCContext.h"
47 #include "llvm/MC/MCExpr.h"
48 #include "llvm/MC/MCSymbol.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Target/TargetOptions.h"
54 #include "X86IntrinsicsInfo.h"
60 #define DEBUG_TYPE "x86-isel"
62 STATISTIC(NumTailCalls, "Number of tail calls");
64 static cl::opt<bool> ExperimentalVectorWideningLegalization(
65 "x86-experimental-vector-widening-legalization", cl::init(false),
66 cl::desc("Enable an experimental vector type legalization through widening "
67 "rather than promotion."),
70 static cl::opt<bool> ExperimentalVectorShuffleLowering(
71 "x86-experimental-vector-shuffle-lowering", cl::init(true),
72 cl::desc("Enable an experimental vector shuffle lowering code path."),
75 static cl::opt<bool> ExperimentalVectorShuffleLegality(
76 "x86-experimental-vector-shuffle-legality", cl::init(false),
77 cl::desc("Enable experimental shuffle legality based on the experimental "
78 "shuffle lowering. Should only be used with the experimental "
82 static cl::opt<int> ReciprocalEstimateRefinementSteps(
83 "x86-recip-refinement-steps", cl::init(1),
84 cl::desc("Specify the number of Newton-Raphson iterations applied to the "
85 "result of the hardware reciprocal estimate instruction."),
88 // Forward declarations.
89 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
92 static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
93 SelectionDAG &DAG, SDLoc dl,
94 unsigned vectorWidth) {
95 assert((vectorWidth == 128 || vectorWidth == 256) &&
96 "Unsupported vector width");
97 EVT VT = Vec.getValueType();
98 EVT ElVT = VT.getVectorElementType();
99 unsigned Factor = VT.getSizeInBits()/vectorWidth;
100 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
101 VT.getVectorNumElements()/Factor);
103 // Extract from UNDEF is UNDEF.
104 if (Vec.getOpcode() == ISD::UNDEF)
105 return DAG.getUNDEF(ResultVT);
107 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
108 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
110 // This is the index of the first element of the vectorWidth-bit chunk
112 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / vectorWidth)
115 // If the input is a buildvector just emit a smaller one.
116 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
117 return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
118 makeArrayRef(Vec->op_begin() + NormalizedIdxVal,
121 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
122 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
125 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
126 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
127 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
128 /// instructions or a simple subregister reference. Idx is an index in the
129 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
130 /// lowering EXTRACT_VECTOR_ELT operations easier.
131 static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
132 SelectionDAG &DAG, SDLoc dl) {
133 assert((Vec.getValueType().is256BitVector() ||
134 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
135 return ExtractSubVector(Vec, IdxVal, DAG, dl, 128);
138 /// Generate a DAG to grab 256-bits from a 512-bit vector.
139 static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal,
140 SelectionDAG &DAG, SDLoc dl) {
141 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
142 return ExtractSubVector(Vec, IdxVal, DAG, dl, 256);
145 static SDValue InsertSubVector(SDValue Result, SDValue Vec,
146 unsigned IdxVal, SelectionDAG &DAG,
147 SDLoc dl, unsigned vectorWidth) {
148 assert((vectorWidth == 128 || vectorWidth == 256) &&
149 "Unsupported vector width");
150 // Inserting UNDEF is Result
151 if (Vec.getOpcode() == ISD::UNDEF)
153 EVT VT = Vec.getValueType();
154 EVT ElVT = VT.getVectorElementType();
155 EVT ResultVT = Result.getValueType();
157 // Insert the relevant vectorWidth bits.
158 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
160 // This is the index of the first element of the vectorWidth-bit chunk
162 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/vectorWidth)
165 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
166 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
169 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
170 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
171 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
172 /// simple superregister reference. Idx is an index in the 128 bits
173 /// we want. It need not be aligned to a 128-bit boundary. That makes
174 /// lowering INSERT_VECTOR_ELT operations easier.
175 static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
176 SelectionDAG &DAG,SDLoc dl) {
177 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
178 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
181 static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
182 SelectionDAG &DAG, SDLoc dl) {
183 assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
184 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
187 /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128
188 /// instructions. This is used because creating CONCAT_VECTOR nodes of
189 /// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
190 /// large BUILD_VECTORS.
191 static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
192 unsigned NumElems, SelectionDAG &DAG,
194 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
195 return Insert128BitVector(V, V2, NumElems/2, DAG, dl);
198 static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
199 unsigned NumElems, SelectionDAG &DAG,
201 SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
202 return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
205 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
206 const X86Subtarget &STI)
207 : TargetLowering(TM), Subtarget(&STI) {
208 X86ScalarSSEf64 = Subtarget->hasSSE2();
209 X86ScalarSSEf32 = Subtarget->hasSSE1();
210 TD = getDataLayout();
212 // Set up the TargetLowering object.
213 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
215 // X86 is weird. It always uses i8 for shift amounts and setcc results.
216 setBooleanContents(ZeroOrOneBooleanContent);
217 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
218 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
220 // For 64-bit, since we have so many registers, use the ILP scheduler.
221 // For 32-bit, use the register pressure specific scheduling.
222 // For Atom, always use ILP scheduling.
223 if (Subtarget->isAtom())
224 setSchedulingPreference(Sched::ILP);
225 else if (Subtarget->is64Bit())
226 setSchedulingPreference(Sched::ILP);
228 setSchedulingPreference(Sched::RegPressure);
229 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
230 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
232 // Bypass expensive divides on Atom when compiling with O2.
233 if (TM.getOptLevel() >= CodeGenOpt::Default) {
234 if (Subtarget->hasSlowDivide32())
235 addBypassSlowDiv(32, 8);
236 if (Subtarget->hasSlowDivide64() && Subtarget->is64Bit())
237 addBypassSlowDiv(64, 16);
240 if (Subtarget->isTargetKnownWindowsMSVC()) {
241 // Setup Windows compiler runtime calls.
242 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
243 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
244 setLibcallName(RTLIB::SREM_I64, "_allrem");
245 setLibcallName(RTLIB::UREM_I64, "_aullrem");
246 setLibcallName(RTLIB::MUL_I64, "_allmul");
247 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
248 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
249 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
250 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
251 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
253 // The _ftol2 runtime function has an unusual calling conv, which
254 // is modeled by a special pseudo-instruction.
255 setLibcallName(RTLIB::FPTOUINT_F64_I64, nullptr);
256 setLibcallName(RTLIB::FPTOUINT_F32_I64, nullptr);
257 setLibcallName(RTLIB::FPTOUINT_F64_I32, nullptr);
258 setLibcallName(RTLIB::FPTOUINT_F32_I32, nullptr);
261 if (Subtarget->isTargetDarwin()) {
262 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
263 setUseUnderscoreSetJmp(false);
264 setUseUnderscoreLongJmp(false);
265 } else if (Subtarget->isTargetWindowsGNU()) {
266 // MS runtime is weird: it exports _setjmp, but longjmp!
267 setUseUnderscoreSetJmp(true);
268 setUseUnderscoreLongJmp(false);
270 setUseUnderscoreSetJmp(true);
271 setUseUnderscoreLongJmp(true);
274 // Set up the register classes.
275 addRegisterClass(MVT::i8, &X86::GR8RegClass);
276 addRegisterClass(MVT::i16, &X86::GR16RegClass);
277 addRegisterClass(MVT::i32, &X86::GR32RegClass);
278 if (Subtarget->is64Bit())
279 addRegisterClass(MVT::i64, &X86::GR64RegClass);
281 for (MVT VT : MVT::integer_valuetypes())
282 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
284 // We don't accept any truncstore of integer registers.
285 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
286 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
287 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
288 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
289 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
290 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
292 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
294 // SETOEQ and SETUNE require checking two conditions.
295 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
296 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
297 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
298 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
299 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
300 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
302 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
304 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
305 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
306 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
308 if (Subtarget->is64Bit()) {
309 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
310 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
311 } else if (!TM.Options.UseSoftFloat) {
312 // We have an algorithm for SSE2->double, and we turn this into a
313 // 64-bit FILD followed by conditional FADD for other targets.
314 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
315 // We have an algorithm for SSE2, and we turn this into a 64-bit
316 // FILD for other targets.
317 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
320 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
322 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
323 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
325 if (!TM.Options.UseSoftFloat) {
326 // SSE has no i16 to fp conversion, only i32
327 if (X86ScalarSSEf32) {
328 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
329 // f32 and f64 cases are Legal, f80 case is not
330 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
332 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
333 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
336 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
337 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
340 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
341 // are Legal, f80 is custom lowered.
342 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
343 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
345 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
347 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
348 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
350 if (X86ScalarSSEf32) {
351 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
352 // f32 and f64 cases are Legal, f80 case is not
353 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
355 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
356 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
359 // Handle FP_TO_UINT by promoting the destination to a larger signed
361 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
362 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
363 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
365 if (Subtarget->is64Bit()) {
366 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
367 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
368 } else if (!TM.Options.UseSoftFloat) {
369 // Since AVX is a superset of SSE3, only check for SSE here.
370 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3())
371 // Expand FP_TO_UINT into a select.
372 // FIXME: We would like to use a Custom expander here eventually to do
373 // the optimal thing for SSE vs. the default expansion in the legalizer.
374 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
376 // With SSE3 we can use fisttpll to convert to a signed i64; without
377 // SSE, we're stuck with a fistpll.
378 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
381 if (isTargetFTOL()) {
382 // Use the _ftol2 runtime function, which has a pseudo-instruction
383 // to handle its weird calling convention.
384 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
387 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
388 if (!X86ScalarSSEf64) {
389 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
390 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
391 if (Subtarget->is64Bit()) {
392 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
393 // Without SSE, i64->f64 goes through memory.
394 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
398 // Scalar integer divide and remainder are lowered to use operations that
399 // produce two results, to match the available instructions. This exposes
400 // the two-result form to trivial CSE, which is able to combine x/y and x%y
401 // into a single instruction.
403 // Scalar integer multiply-high is also lowered to use two-result
404 // operations, to match the available instructions. However, plain multiply
405 // (low) operations are left as Legal, as there are single-result
406 // instructions for this in x86. Using the two-result multiply instructions
407 // when both high and low results are needed must be arranged by dagcombine.
408 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
410 setOperationAction(ISD::MULHS, VT, Expand);
411 setOperationAction(ISD::MULHU, VT, Expand);
412 setOperationAction(ISD::SDIV, VT, Expand);
413 setOperationAction(ISD::UDIV, VT, Expand);
414 setOperationAction(ISD::SREM, VT, Expand);
415 setOperationAction(ISD::UREM, VT, Expand);
417 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences.
418 setOperationAction(ISD::ADDC, VT, Custom);
419 setOperationAction(ISD::ADDE, VT, Custom);
420 setOperationAction(ISD::SUBC, VT, Custom);
421 setOperationAction(ISD::SUBE, VT, Custom);
424 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
425 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
426 setOperationAction(ISD::BR_CC , MVT::f32, Expand);
427 setOperationAction(ISD::BR_CC , MVT::f64, Expand);
428 setOperationAction(ISD::BR_CC , MVT::f80, Expand);
429 setOperationAction(ISD::BR_CC , MVT::i8, Expand);
430 setOperationAction(ISD::BR_CC , MVT::i16, Expand);
431 setOperationAction(ISD::BR_CC , MVT::i32, Expand);
432 setOperationAction(ISD::BR_CC , MVT::i64, Expand);
433 setOperationAction(ISD::SELECT_CC , MVT::f32, Expand);
434 setOperationAction(ISD::SELECT_CC , MVT::f64, Expand);
435 setOperationAction(ISD::SELECT_CC , MVT::f80, Expand);
436 setOperationAction(ISD::SELECT_CC , MVT::i8, Expand);
437 setOperationAction(ISD::SELECT_CC , MVT::i16, Expand);
438 setOperationAction(ISD::SELECT_CC , MVT::i32, Expand);
439 setOperationAction(ISD::SELECT_CC , MVT::i64, Expand);
440 if (Subtarget->is64Bit())
441 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
442 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
443 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
444 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
445 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
446 setOperationAction(ISD::FREM , MVT::f32 , Expand);
447 setOperationAction(ISD::FREM , MVT::f64 , Expand);
448 setOperationAction(ISD::FREM , MVT::f80 , Expand);
449 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
451 // Promote the i8 variants and force them on up to i32 which has a shorter
453 setOperationAction(ISD::CTTZ , MVT::i8 , Promote);
454 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32);
455 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote);
456 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32);
457 if (Subtarget->hasBMI()) {
458 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand);
459 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand);
460 if (Subtarget->is64Bit())
461 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
463 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
464 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
465 if (Subtarget->is64Bit())
466 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
469 if (Subtarget->hasLZCNT()) {
470 // When promoting the i8 variants, force them to i32 for a shorter
472 setOperationAction(ISD::CTLZ , MVT::i8 , Promote);
473 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32);
474 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote);
475 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
476 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand);
477 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand);
478 if (Subtarget->is64Bit())
479 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
481 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
482 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
483 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
484 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
485 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
486 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
487 if (Subtarget->is64Bit()) {
488 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
489 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
493 // Special handling for half-precision floating point conversions.
494 // If we don't have F16C support, then lower half float conversions
495 // into library calls.
496 if (TM.Options.UseSoftFloat || !Subtarget->hasF16C()) {
497 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
498 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
501 // There's never any support for operations beyond MVT::f32.
502 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
503 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
504 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
505 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
507 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
508 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
509 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
510 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
511 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
512 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
514 if (Subtarget->hasPOPCNT()) {
515 setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
517 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
518 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
519 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
520 if (Subtarget->is64Bit())
521 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
524 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
526 if (!Subtarget->hasMOVBE())
527 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
529 // These should be promoted to a larger select which is supported.
530 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
531 // X86 wants to expand cmov itself.
532 setOperationAction(ISD::SELECT , MVT::i8 , Custom);
533 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
534 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
535 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
536 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
537 setOperationAction(ISD::SELECT , MVT::f80 , Custom);
538 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
539 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
540 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
541 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
542 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
543 setOperationAction(ISD::SETCC , MVT::f80 , Custom);
544 if (Subtarget->is64Bit()) {
545 setOperationAction(ISD::SELECT , MVT::i64 , Custom);
546 setOperationAction(ISD::SETCC , MVT::i64 , Custom);
548 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
549 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
550 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
551 // support continuation, user-level threading, and etc.. As a result, no
552 // other SjLj exception interfaces are implemented and please don't build
553 // your own exception handling based on them.
554 // LLVM/Clang supports zero-cost DWARF exception handling.
555 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
556 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
559 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
560 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
561 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
562 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
563 if (Subtarget->is64Bit())
564 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
565 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
566 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom);
567 if (Subtarget->is64Bit()) {
568 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
569 setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
570 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
571 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
572 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom);
574 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
575 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
576 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
577 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
578 if (Subtarget->is64Bit()) {
579 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
580 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
581 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
584 if (Subtarget->hasSSE1())
585 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
587 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
589 // Expand certain atomics
590 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
592 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
593 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
594 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
597 if (Subtarget->hasCmpxchg16b()) {
598 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
601 // FIXME - use subtarget debug flags
602 if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetELF() &&
603 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWin64()) {
604 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
607 if (Subtarget->is64Bit()) {
608 setExceptionPointerRegister(X86::RAX);
609 setExceptionSelectorRegister(X86::RDX);
611 setExceptionPointerRegister(X86::EAX);
612 setExceptionSelectorRegister(X86::EDX);
614 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
615 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
617 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
618 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
620 setOperationAction(ISD::TRAP, MVT::Other, Legal);
621 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
623 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
624 setOperationAction(ISD::VASTART , MVT::Other, Custom);
625 setOperationAction(ISD::VAEND , MVT::Other, Expand);
626 if (Subtarget->is64Bit() && !Subtarget->isTargetWin64()) {
627 // TargetInfo::X86_64ABIBuiltinVaList
628 setOperationAction(ISD::VAARG , MVT::Other, Custom);
629 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
631 // TargetInfo::CharPtrBuiltinVaList
632 setOperationAction(ISD::VAARG , MVT::Other, Expand);
633 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
636 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
637 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
639 setOperationAction(ISD::DYNAMIC_STACKALLOC, getPointerTy(), Custom);
641 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) {
642 // f32 and f64 use SSE.
643 // Set up the FP register classes.
644 addRegisterClass(MVT::f32, &X86::FR32RegClass);
645 addRegisterClass(MVT::f64, &X86::FR64RegClass);
647 // Use ANDPD to simulate FABS.
648 setOperationAction(ISD::FABS , MVT::f64, Custom);
649 setOperationAction(ISD::FABS , MVT::f32, Custom);
651 // Use XORP to simulate FNEG.
652 setOperationAction(ISD::FNEG , MVT::f64, Custom);
653 setOperationAction(ISD::FNEG , MVT::f32, Custom);
655 // Use ANDPD and ORPD to simulate FCOPYSIGN.
656 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
657 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
659 // Lower this to FGETSIGNx86 plus an AND.
660 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
661 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
663 // We don't support sin/cos/fmod
664 setOperationAction(ISD::FSIN , MVT::f64, Expand);
665 setOperationAction(ISD::FCOS , MVT::f64, Expand);
666 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
667 setOperationAction(ISD::FSIN , MVT::f32, Expand);
668 setOperationAction(ISD::FCOS , MVT::f32, Expand);
669 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
671 // Expand FP immediates into loads from the stack, except for the special
673 addLegalFPImmediate(APFloat(+0.0)); // xorpd
674 addLegalFPImmediate(APFloat(+0.0f)); // xorps
675 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) {
676 // Use SSE for f32, x87 for f64.
677 // Set up the FP register classes.
678 addRegisterClass(MVT::f32, &X86::FR32RegClass);
679 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
681 // Use ANDPS to simulate FABS.
682 setOperationAction(ISD::FABS , MVT::f32, Custom);
684 // Use XORP to simulate FNEG.
685 setOperationAction(ISD::FNEG , MVT::f32, Custom);
687 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
689 // Use ANDPS and ORPS to simulate FCOPYSIGN.
690 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
691 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
693 // We don't support sin/cos/fmod
694 setOperationAction(ISD::FSIN , MVT::f32, Expand);
695 setOperationAction(ISD::FCOS , MVT::f32, Expand);
696 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
698 // Special cases we handle for FP constants.
699 addLegalFPImmediate(APFloat(+0.0f)); // xorps
700 addLegalFPImmediate(APFloat(+0.0)); // FLD0
701 addLegalFPImmediate(APFloat(+1.0)); // FLD1
702 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
703 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
705 if (!TM.Options.UnsafeFPMath) {
706 setOperationAction(ISD::FSIN , MVT::f64, Expand);
707 setOperationAction(ISD::FCOS , MVT::f64, Expand);
708 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
710 } else if (!TM.Options.UseSoftFloat) {
711 // f32 and f64 in x87.
712 // Set up the FP register classes.
713 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
714 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
716 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
717 setOperationAction(ISD::UNDEF, MVT::f32, Expand);
718 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
719 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
721 if (!TM.Options.UnsafeFPMath) {
722 setOperationAction(ISD::FSIN , MVT::f64, Expand);
723 setOperationAction(ISD::FSIN , MVT::f32, Expand);
724 setOperationAction(ISD::FCOS , MVT::f64, Expand);
725 setOperationAction(ISD::FCOS , MVT::f32, Expand);
726 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
727 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
729 addLegalFPImmediate(APFloat(+0.0)); // FLD0
730 addLegalFPImmediate(APFloat(+1.0)); // FLD1
731 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
732 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
733 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
734 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
735 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
736 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
739 // We don't support FMA.
740 setOperationAction(ISD::FMA, MVT::f64, Expand);
741 setOperationAction(ISD::FMA, MVT::f32, Expand);
743 // Long double always uses X87.
744 if (!TM.Options.UseSoftFloat) {
745 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
746 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
747 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
749 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended);
750 addLegalFPImmediate(TmpFlt); // FLD0
752 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
755 APFloat TmpFlt2(+1.0);
756 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
758 addLegalFPImmediate(TmpFlt2); // FLD1
759 TmpFlt2.changeSign();
760 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
763 if (!TM.Options.UnsafeFPMath) {
764 setOperationAction(ISD::FSIN , MVT::f80, Expand);
765 setOperationAction(ISD::FCOS , MVT::f80, Expand);
766 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
769 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
770 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
771 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
772 setOperationAction(ISD::FRINT, MVT::f80, Expand);
773 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
774 setOperationAction(ISD::FMA, MVT::f80, Expand);
777 // Always use a library call for pow.
778 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
779 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
780 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
782 setOperationAction(ISD::FLOG, MVT::f80, Expand);
783 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
784 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
785 setOperationAction(ISD::FEXP, MVT::f80, Expand);
786 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
787 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
788 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
790 // First set operation action for all vector types to either promote
791 // (for widening) or expand (for scalarization). Then we will selectively
792 // turn on ones that can be effectively codegen'd.
793 for (MVT VT : MVT::vector_valuetypes()) {
794 setOperationAction(ISD::ADD , VT, Expand);
795 setOperationAction(ISD::SUB , VT, Expand);
796 setOperationAction(ISD::FADD, VT, Expand);
797 setOperationAction(ISD::FNEG, VT, Expand);
798 setOperationAction(ISD::FSUB, VT, Expand);
799 setOperationAction(ISD::MUL , VT, Expand);
800 setOperationAction(ISD::FMUL, VT, Expand);
801 setOperationAction(ISD::SDIV, VT, Expand);
802 setOperationAction(ISD::UDIV, VT, Expand);
803 setOperationAction(ISD::FDIV, VT, Expand);
804 setOperationAction(ISD::SREM, VT, Expand);
805 setOperationAction(ISD::UREM, VT, Expand);
806 setOperationAction(ISD::LOAD, VT, Expand);
807 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
808 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
809 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
810 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
811 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
812 setOperationAction(ISD::FABS, VT, Expand);
813 setOperationAction(ISD::FSIN, VT, Expand);
814 setOperationAction(ISD::FSINCOS, VT, Expand);
815 setOperationAction(ISD::FCOS, VT, Expand);
816 setOperationAction(ISD::FSINCOS, VT, Expand);
817 setOperationAction(ISD::FREM, VT, Expand);
818 setOperationAction(ISD::FMA, VT, Expand);
819 setOperationAction(ISD::FPOWI, VT, Expand);
820 setOperationAction(ISD::FSQRT, VT, Expand);
821 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
822 setOperationAction(ISD::FFLOOR, VT, Expand);
823 setOperationAction(ISD::FCEIL, VT, Expand);
824 setOperationAction(ISD::FTRUNC, VT, Expand);
825 setOperationAction(ISD::FRINT, VT, Expand);
826 setOperationAction(ISD::FNEARBYINT, VT, Expand);
827 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
828 setOperationAction(ISD::MULHS, VT, Expand);
829 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
830 setOperationAction(ISD::MULHU, VT, Expand);
831 setOperationAction(ISD::SDIVREM, VT, Expand);
832 setOperationAction(ISD::UDIVREM, VT, Expand);
833 setOperationAction(ISD::FPOW, VT, Expand);
834 setOperationAction(ISD::CTPOP, VT, Expand);
835 setOperationAction(ISD::CTTZ, VT, Expand);
836 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
837 setOperationAction(ISD::CTLZ, VT, Expand);
838 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
839 setOperationAction(ISD::SHL, VT, Expand);
840 setOperationAction(ISD::SRA, VT, Expand);
841 setOperationAction(ISD::SRL, VT, Expand);
842 setOperationAction(ISD::ROTL, VT, Expand);
843 setOperationAction(ISD::ROTR, VT, Expand);
844 setOperationAction(ISD::BSWAP, VT, Expand);
845 setOperationAction(ISD::SETCC, VT, Expand);
846 setOperationAction(ISD::FLOG, VT, Expand);
847 setOperationAction(ISD::FLOG2, VT, Expand);
848 setOperationAction(ISD::FLOG10, VT, Expand);
849 setOperationAction(ISD::FEXP, VT, Expand);
850 setOperationAction(ISD::FEXP2, VT, Expand);
851 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
852 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
853 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
854 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
855 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
856 setOperationAction(ISD::TRUNCATE, VT, Expand);
857 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
858 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
859 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
860 setOperationAction(ISD::VSELECT, VT, Expand);
861 setOperationAction(ISD::SELECT_CC, VT, Expand);
862 for (MVT InnerVT : MVT::vector_valuetypes()) {
863 setTruncStoreAction(InnerVT, VT, Expand);
865 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
866 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
868 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
869 // types, we have to deal with them whether we ask for Expansion or not.
870 // Setting Expand causes its own optimisation problems though, so leave
872 if (VT.getVectorElementType() == MVT::i1)
873 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
877 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
878 // with -msoft-float, disable use of MMX as well.
879 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) {
880 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
881 // No operations on x86mmx supported, everything uses intrinsics.
884 // MMX-sized vectors (other than x86mmx) are expected to be expanded
885 // into smaller operations.
886 setOperationAction(ISD::MULHS, MVT::v8i8, Expand);
887 setOperationAction(ISD::MULHS, MVT::v4i16, Expand);
888 setOperationAction(ISD::MULHS, MVT::v2i32, Expand);
889 setOperationAction(ISD::MULHS, MVT::v1i64, Expand);
890 setOperationAction(ISD::AND, MVT::v8i8, Expand);
891 setOperationAction(ISD::AND, MVT::v4i16, Expand);
892 setOperationAction(ISD::AND, MVT::v2i32, Expand);
893 setOperationAction(ISD::AND, MVT::v1i64, Expand);
894 setOperationAction(ISD::OR, MVT::v8i8, Expand);
895 setOperationAction(ISD::OR, MVT::v4i16, Expand);
896 setOperationAction(ISD::OR, MVT::v2i32, Expand);
897 setOperationAction(ISD::OR, MVT::v1i64, Expand);
898 setOperationAction(ISD::XOR, MVT::v8i8, Expand);
899 setOperationAction(ISD::XOR, MVT::v4i16, Expand);
900 setOperationAction(ISD::XOR, MVT::v2i32, Expand);
901 setOperationAction(ISD::XOR, MVT::v1i64, Expand);
902 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand);
903 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand);
904 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand);
905 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand);
906 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
907 setOperationAction(ISD::SELECT, MVT::v8i8, Expand);
908 setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
909 setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
910 setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
911 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand);
912 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand);
913 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand);
914 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
916 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) {
917 addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
919 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
920 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
921 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
922 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
923 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
924 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
925 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
926 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
927 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
928 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
929 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
930 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
931 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
932 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
935 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) {
936 addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
938 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
939 // registers cannot be used even for integer operations.
940 addRegisterClass(MVT::v16i8, &X86::VR128RegClass);
941 addRegisterClass(MVT::v8i16, &X86::VR128RegClass);
942 addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
943 addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
945 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
946 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
947 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
948 setOperationAction(ISD::ADD, MVT::v2i64, Legal);
949 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
950 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
951 setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
952 setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
953 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
954 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
955 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
956 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
957 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
958 setOperationAction(ISD::SUB, MVT::v2i64, Legal);
959 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
960 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
961 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
962 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
963 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
964 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
965 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
966 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
968 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
969 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
970 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
971 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
973 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
974 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
975 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
976 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
977 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
979 // Only provide customized ctpop vector bit twiddling for vector types we
980 // know to perform better than using the popcnt instructions on each vector
981 // element. If popcnt isn't supported, always provide the custom version.
982 if (!Subtarget->hasPOPCNT()) {
983 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
984 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom);
987 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
988 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
989 MVT VT = (MVT::SimpleValueType)i;
990 // Do not attempt to custom lower non-power-of-2 vectors
991 if (!isPowerOf2_32(VT.getVectorNumElements()))
993 // Do not attempt to custom lower non-128-bit vectors
994 if (!VT.is128BitVector())
996 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
997 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
998 setOperationAction(ISD::VSELECT, VT, Custom);
999 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1002 // We support custom legalizing of sext and anyext loads for specific
1003 // memory vector types which we can load as a scalar (or sequence of
1004 // scalars) and extend in-register to a legal 128-bit vector type. For sext
1005 // loads these must work with a single scalar load.
1006 for (MVT VT : MVT::integer_vector_valuetypes()) {
1007 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
1008 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
1009 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
1010 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
1011 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
1012 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
1013 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
1014 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
1015 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
1018 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1019 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1020 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
1021 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
1022 setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
1023 setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
1024 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
1025 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
1027 if (Subtarget->is64Bit()) {
1028 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1029 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1032 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
1033 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
1034 MVT VT = (MVT::SimpleValueType)i;
1036 // Do not attempt to promote non-128-bit vectors
1037 if (!VT.is128BitVector())
1040 setOperationAction(ISD::AND, VT, Promote);
1041 AddPromotedToType (ISD::AND, VT, MVT::v2i64);
1042 setOperationAction(ISD::OR, VT, Promote);
1043 AddPromotedToType (ISD::OR, VT, MVT::v2i64);
1044 setOperationAction(ISD::XOR, VT, Promote);
1045 AddPromotedToType (ISD::XOR, VT, MVT::v2i64);
1046 setOperationAction(ISD::LOAD, VT, Promote);
1047 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
1048 setOperationAction(ISD::SELECT, VT, Promote);
1049 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64);
1052 // Custom lower v2i64 and v2f64 selects.
1053 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
1054 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
1055 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
1056 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
1058 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
1059 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
1061 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1062 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1063 // As there is no 64-bit GPR available, we need build a special custom
1064 // sequence to convert from v2i32 to v2f32.
1065 if (!Subtarget->is64Bit())
1066 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
1068 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1069 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
1071 for (MVT VT : MVT::fp_vector_valuetypes())
1072 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
1074 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1075 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1076 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1079 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
1080 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1081 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1082 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1083 setOperationAction(ISD::FRINT, MVT::f32, Legal);
1084 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1085 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1086 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1087 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1088 setOperationAction(ISD::FRINT, MVT::f64, Legal);
1089 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1091 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1092 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
1093 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1094 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
1095 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
1096 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
1097 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
1098 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
1099 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
1100 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
1102 // FIXME: Do we need to handle scalar-to-vector here?
1103 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1105 // We directly match byte blends in the backend as they match the VSELECT
1107 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1109 // SSE41 brings specific instructions for doing vector sign extend even in
1110 // cases where we don't have SRA.
1111 for (MVT VT : MVT::integer_vector_valuetypes()) {
1112 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
1113 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
1114 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
1117 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1118 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1119 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1120 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1121 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1122 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1123 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1125 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1126 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1127 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1128 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1129 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1130 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1132 // i8 and i16 vectors are custom because the source register and source
1133 // source memory operand types are not the same width. f32 vectors are
1134 // custom since the immediate controlling the insert encodes additional
1136 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1137 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1138 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1139 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1141 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
1142 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
1143 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
1144 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
1146 // FIXME: these should be Legal, but that's only for the case where
1147 // the index is constant. For now custom expand to deal with that.
1148 if (Subtarget->is64Bit()) {
1149 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1150 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1154 if (Subtarget->hasSSE2()) {
1155 setOperationAction(ISD::SRL, MVT::v8i16, Custom);
1156 setOperationAction(ISD::SRL, MVT::v16i8, Custom);
1158 setOperationAction(ISD::SHL, MVT::v8i16, Custom);
1159 setOperationAction(ISD::SHL, MVT::v16i8, Custom);
1161 setOperationAction(ISD::SRA, MVT::v8i16, Custom);
1162 setOperationAction(ISD::SRA, MVT::v16i8, Custom);
1164 // In the customized shift lowering, the legal cases in AVX2 will be
1166 setOperationAction(ISD::SRL, MVT::v2i64, Custom);
1167 setOperationAction(ISD::SRL, MVT::v4i32, Custom);
1169 setOperationAction(ISD::SHL, MVT::v2i64, Custom);
1170 setOperationAction(ISD::SHL, MVT::v4i32, Custom);
1172 setOperationAction(ISD::SRA, MVT::v4i32, Custom);
1175 if (!TM.Options.UseSoftFloat && Subtarget->hasFp256()) {
1176 addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
1177 addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
1178 addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
1179 addRegisterClass(MVT::v8f32, &X86::VR256RegClass);
1180 addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
1181 addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
1183 setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
1184 setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
1185 setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
1187 setOperationAction(ISD::FADD, MVT::v8f32, Legal);
1188 setOperationAction(ISD::FSUB, MVT::v8f32, Legal);
1189 setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
1190 setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
1191 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
1192 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal);
1193 setOperationAction(ISD::FCEIL, MVT::v8f32, Legal);
1194 setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal);
1195 setOperationAction(ISD::FRINT, MVT::v8f32, Legal);
1196 setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal);
1197 setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
1198 setOperationAction(ISD::FABS, MVT::v8f32, Custom);
1200 setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1201 setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1202 setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1203 setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1204 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1205 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1206 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
1207 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1208 setOperationAction(ISD::FRINT, MVT::v4f64, Legal);
1209 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal);
1210 setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
1211 setOperationAction(ISD::FABS, MVT::v4f64, Custom);
1213 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1214 // even though v8i16 is a legal type.
1215 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Promote);
1216 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Promote);
1217 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1219 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote);
1220 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1221 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
1223 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
1224 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
1226 for (MVT VT : MVT::fp_vector_valuetypes())
1227 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
1229 setOperationAction(ISD::SRL, MVT::v16i16, Custom);
1230 setOperationAction(ISD::SRL, MVT::v32i8, Custom);
1232 setOperationAction(ISD::SHL, MVT::v16i16, Custom);
1233 setOperationAction(ISD::SHL, MVT::v32i8, Custom);
1235 setOperationAction(ISD::SRA, MVT::v16i16, Custom);
1236 setOperationAction(ISD::SRA, MVT::v32i8, Custom);
1238 setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
1239 setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
1240 setOperationAction(ISD::SETCC, MVT::v8i32, Custom);
1241 setOperationAction(ISD::SETCC, MVT::v4i64, Custom);
1243 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1244 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1245 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1247 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1248 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
1249 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1250 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom);
1251 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
1252 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom);
1253 setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom);
1254 setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom);
1255 setOperationAction(ISD::ANY_EXTEND, MVT::v16i16, Custom);
1256 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1257 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1258 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1260 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) {
1261 setOperationAction(ISD::FMA, MVT::v8f32, Legal);
1262 setOperationAction(ISD::FMA, MVT::v4f64, Legal);
1263 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
1264 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
1265 setOperationAction(ISD::FMA, MVT::f32, Legal);
1266 setOperationAction(ISD::FMA, MVT::f64, Legal);
1269 if (Subtarget->hasInt256()) {
1270 setOperationAction(ISD::ADD, MVT::v4i64, Legal);
1271 setOperationAction(ISD::ADD, MVT::v8i32, Legal);
1272 setOperationAction(ISD::ADD, MVT::v16i16, Legal);
1273 setOperationAction(ISD::ADD, MVT::v32i8, Legal);
1275 setOperationAction(ISD::SUB, MVT::v4i64, Legal);
1276 setOperationAction(ISD::SUB, MVT::v8i32, Legal);
1277 setOperationAction(ISD::SUB, MVT::v16i16, Legal);
1278 setOperationAction(ISD::SUB, MVT::v32i8, Legal);
1280 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1281 setOperationAction(ISD::MUL, MVT::v8i32, Legal);
1282 setOperationAction(ISD::MUL, MVT::v16i16, Legal);
1283 // Don't lower v32i8 because there is no 128-bit byte mul
1285 setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
1286 setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
1287 setOperationAction(ISD::MULHU, MVT::v16i16, Legal);
1288 setOperationAction(ISD::MULHS, MVT::v16i16, Legal);
1290 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1291 // when we have a 256bit-wide blend with immediate.
1292 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1294 // Only provide customized ctpop vector bit twiddling for vector types we
1295 // know to perform better than using the popcnt instructions on each
1296 // vector element. If popcnt isn't supported, always provide the custom
1298 if (!Subtarget->hasPOPCNT())
1299 setOperationAction(ISD::CTPOP, MVT::v4i64, Custom);
1301 // Custom CTPOP always performs better on natively supported v8i32
1302 setOperationAction(ISD::CTPOP, MVT::v8i32, Custom);
1304 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1305 setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1306 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1307 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1308 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1309 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1310 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1312 setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1313 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1314 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1315 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1316 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1317 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1319 setOperationAction(ISD::ADD, MVT::v4i64, Custom);
1320 setOperationAction(ISD::ADD, MVT::v8i32, Custom);
1321 setOperationAction(ISD::ADD, MVT::v16i16, Custom);
1322 setOperationAction(ISD::ADD, MVT::v32i8, Custom);
1324 setOperationAction(ISD::SUB, MVT::v4i64, Custom);
1325 setOperationAction(ISD::SUB, MVT::v8i32, Custom);
1326 setOperationAction(ISD::SUB, MVT::v16i16, Custom);
1327 setOperationAction(ISD::SUB, MVT::v32i8, Custom);
1329 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1330 setOperationAction(ISD::MUL, MVT::v8i32, Custom);
1331 setOperationAction(ISD::MUL, MVT::v16i16, Custom);
1332 // Don't lower v32i8 because there is no 128-bit byte mul
1335 // In the customized shift lowering, the legal cases in AVX2 will be
1337 setOperationAction(ISD::SRL, MVT::v4i64, Custom);
1338 setOperationAction(ISD::SRL, MVT::v8i32, Custom);
1340 setOperationAction(ISD::SHL, MVT::v4i64, Custom);
1341 setOperationAction(ISD::SHL, MVT::v8i32, Custom);
1343 setOperationAction(ISD::SRA, MVT::v8i32, Custom);
1345 // Custom lower several nodes for 256-bit types.
1346 for (MVT VT : MVT::vector_valuetypes()) {
1347 if (VT.getScalarSizeInBits() >= 32) {
1348 setOperationAction(ISD::MLOAD, VT, Legal);
1349 setOperationAction(ISD::MSTORE, VT, Legal);
1351 // Extract subvector is special because the value type
1352 // (result) is 128-bit but the source is 256-bit wide.
1353 if (VT.is128BitVector()) {
1354 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1356 // Do not attempt to custom lower other non-256-bit vectors
1357 if (!VT.is256BitVector())
1360 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1361 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1362 setOperationAction(ISD::VSELECT, VT, Custom);
1363 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1364 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1365 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1366 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1367 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1370 if (Subtarget->hasInt256())
1371 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1374 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
1375 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) {
1376 MVT VT = (MVT::SimpleValueType)i;
1378 // Do not attempt to promote non-256-bit vectors
1379 if (!VT.is256BitVector())
1382 setOperationAction(ISD::AND, VT, Promote);
1383 AddPromotedToType (ISD::AND, VT, MVT::v4i64);
1384 setOperationAction(ISD::OR, VT, Promote);
1385 AddPromotedToType (ISD::OR, VT, MVT::v4i64);
1386 setOperationAction(ISD::XOR, VT, Promote);
1387 AddPromotedToType (ISD::XOR, VT, MVT::v4i64);
1388 setOperationAction(ISD::LOAD, VT, Promote);
1389 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
1390 setOperationAction(ISD::SELECT, VT, Promote);
1391 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
1395 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512()) {
1396 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1397 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1398 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1399 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1401 addRegisterClass(MVT::i1, &X86::VK1RegClass);
1402 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1403 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1405 for (MVT VT : MVT::fp_vector_valuetypes())
1406 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
1408 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1409 setOperationAction(ISD::SETCC, MVT::i1, Custom);
1410 setOperationAction(ISD::XOR, MVT::i1, Legal);
1411 setOperationAction(ISD::OR, MVT::i1, Legal);
1412 setOperationAction(ISD::AND, MVT::i1, Legal);
1413 setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
1414 setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
1415 setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
1416 setOperationAction(ISD::LOAD, MVT::v16i32, Legal);
1417 setOperationAction(ISD::LOAD, MVT::v16i1, Legal);
1419 setOperationAction(ISD::FADD, MVT::v16f32, Legal);
1420 setOperationAction(ISD::FSUB, MVT::v16f32, Legal);
1421 setOperationAction(ISD::FMUL, MVT::v16f32, Legal);
1422 setOperationAction(ISD::FDIV, MVT::v16f32, Legal);
1423 setOperationAction(ISD::FSQRT, MVT::v16f32, Legal);
1424 setOperationAction(ISD::FNEG, MVT::v16f32, Custom);
1426 setOperationAction(ISD::FADD, MVT::v8f64, Legal);
1427 setOperationAction(ISD::FSUB, MVT::v8f64, Legal);
1428 setOperationAction(ISD::FMUL, MVT::v8f64, Legal);
1429 setOperationAction(ISD::FDIV, MVT::v8f64, Legal);
1430 setOperationAction(ISD::FSQRT, MVT::v8f64, Legal);
1431 setOperationAction(ISD::FNEG, MVT::v8f64, Custom);
1432 setOperationAction(ISD::FMA, MVT::v8f64, Legal);
1433 setOperationAction(ISD::FMA, MVT::v16f32, Legal);
1435 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1436 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1437 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1438 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1439 if (Subtarget->is64Bit()) {
1440 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1441 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1442 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1443 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1445 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1446 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1447 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1448 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1449 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1450 setOperationAction(ISD::SINT_TO_FP, MVT::v8i1, Custom);
1451 setOperationAction(ISD::SINT_TO_FP, MVT::v16i1, Custom);
1452 setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Promote);
1453 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Promote);
1454 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1455 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1456 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1457 setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal);
1458 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1460 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
1461 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1462 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1463 setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
1464 setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
1465 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1466 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1467 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1468 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1469 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1470 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
1471 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
1472 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1474 setOperationAction(ISD::FFLOOR, MVT::v16f32, Legal);
1475 setOperationAction(ISD::FFLOOR, MVT::v8f64, Legal);
1476 setOperationAction(ISD::FCEIL, MVT::v16f32, Legal);
1477 setOperationAction(ISD::FCEIL, MVT::v8f64, Legal);
1478 setOperationAction(ISD::FTRUNC, MVT::v16f32, Legal);
1479 setOperationAction(ISD::FTRUNC, MVT::v8f64, Legal);
1480 setOperationAction(ISD::FRINT, MVT::v16f32, Legal);
1481 setOperationAction(ISD::FRINT, MVT::v8f64, Legal);
1482 setOperationAction(ISD::FNEARBYINT, MVT::v16f32, Legal);
1483 setOperationAction(ISD::FNEARBYINT, MVT::v8f64, Legal);
1485 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1486 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1487 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1488 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1489 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
1490 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Legal);
1492 setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
1493 setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
1495 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1497 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
1498 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
1499 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
1500 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
1501 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
1502 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
1503 setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
1504 setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
1505 setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
1507 setOperationAction(ISD::ADD, MVT::v8i64, Legal);
1508 setOperationAction(ISD::ADD, MVT::v16i32, Legal);
1510 setOperationAction(ISD::SUB, MVT::v8i64, Legal);
1511 setOperationAction(ISD::SUB, MVT::v16i32, Legal);
1513 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1515 setOperationAction(ISD::SRL, MVT::v8i64, Custom);
1516 setOperationAction(ISD::SRL, MVT::v16i32, Custom);
1518 setOperationAction(ISD::SHL, MVT::v8i64, Custom);
1519 setOperationAction(ISD::SHL, MVT::v16i32, Custom);
1521 setOperationAction(ISD::SRA, MVT::v8i64, Custom);
1522 setOperationAction(ISD::SRA, MVT::v16i32, Custom);
1524 setOperationAction(ISD::AND, MVT::v8i64, Legal);
1525 setOperationAction(ISD::OR, MVT::v8i64, Legal);
1526 setOperationAction(ISD::XOR, MVT::v8i64, Legal);
1527 setOperationAction(ISD::AND, MVT::v16i32, Legal);
1528 setOperationAction(ISD::OR, MVT::v16i32, Legal);
1529 setOperationAction(ISD::XOR, MVT::v16i32, Legal);
1531 if (Subtarget->hasCDI()) {
1532 setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
1533 setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
1536 // Custom lower several nodes.
1537 for (MVT VT : MVT::vector_valuetypes()) {
1538 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1539 // Extract subvector is special because the value type
1540 // (result) is 256/128-bit but the source is 512-bit wide.
1541 if (VT.is128BitVector() || VT.is256BitVector()) {
1542 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1544 if (VT.getVectorElementType() == MVT::i1)
1545 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1547 // Do not attempt to custom lower other non-512-bit vectors
1548 if (!VT.is512BitVector())
1551 if ( EltSize >= 32) {
1552 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1553 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1554 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1555 setOperationAction(ISD::VSELECT, VT, Legal);
1556 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1557 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1558 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1559 setOperationAction(ISD::MLOAD, VT, Legal);
1560 setOperationAction(ISD::MSTORE, VT, Legal);
1563 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1564 MVT VT = (MVT::SimpleValueType)i;
1566 // Do not attempt to promote non-512-bit vectors.
1567 if (!VT.is512BitVector())
1570 setOperationAction(ISD::SELECT, VT, Promote);
1571 AddPromotedToType (ISD::SELECT, VT, MVT::v8i64);
1575 if (!TM.Options.UseSoftFloat && Subtarget->hasBWI()) {
1576 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1577 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1579 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1580 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1582 setOperationAction(ISD::LOAD, MVT::v32i16, Legal);
1583 setOperationAction(ISD::LOAD, MVT::v64i8, Legal);
1584 setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
1585 setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
1586 setOperationAction(ISD::ADD, MVT::v32i16, Legal);
1587 setOperationAction(ISD::ADD, MVT::v64i8, Legal);
1588 setOperationAction(ISD::SUB, MVT::v32i16, Legal);
1589 setOperationAction(ISD::SUB, MVT::v64i8, Legal);
1590 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1592 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1593 const MVT VT = (MVT::SimpleValueType)i;
1595 const unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1597 // Do not attempt to promote non-512-bit vectors.
1598 if (!VT.is512BitVector())
1602 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1603 setOperationAction(ISD::VSELECT, VT, Legal);
1608 if (!TM.Options.UseSoftFloat && Subtarget->hasVLX()) {
1609 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1610 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1612 setOperationAction(ISD::SETCC, MVT::v4i1, Custom);
1613 setOperationAction(ISD::SETCC, MVT::v2i1, Custom);
1614 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Legal);
1616 setOperationAction(ISD::AND, MVT::v8i32, Legal);
1617 setOperationAction(ISD::OR, MVT::v8i32, Legal);
1618 setOperationAction(ISD::XOR, MVT::v8i32, Legal);
1619 setOperationAction(ISD::AND, MVT::v4i32, Legal);
1620 setOperationAction(ISD::OR, MVT::v4i32, Legal);
1621 setOperationAction(ISD::XOR, MVT::v4i32, Legal);
1624 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion
1625 // of this type with custom code.
1626 for (MVT VT : MVT::vector_valuetypes())
1627 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
1629 // We want to custom lower some of our intrinsics.
1630 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1631 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1632 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1633 if (!Subtarget->is64Bit())
1634 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1636 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1637 // handle type legalization for these operations here.
1639 // FIXME: We really should do custom legalization for addition and
1640 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1641 // than generic legalization for 64-bit multiplication-with-overflow, though.
1642 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) {
1643 // Add/Sub/Mul with overflow operations are custom lowered.
1645 setOperationAction(ISD::SADDO, VT, Custom);
1646 setOperationAction(ISD::UADDO, VT, Custom);
1647 setOperationAction(ISD::SSUBO, VT, Custom);
1648 setOperationAction(ISD::USUBO, VT, Custom);
1649 setOperationAction(ISD::SMULO, VT, Custom);
1650 setOperationAction(ISD::UMULO, VT, Custom);
1654 if (!Subtarget->is64Bit()) {
1655 // These libcalls are not available in 32-bit.
1656 setLibcallName(RTLIB::SHL_I128, nullptr);
1657 setLibcallName(RTLIB::SRL_I128, nullptr);
1658 setLibcallName(RTLIB::SRA_I128, nullptr);
1661 // Combine sin / cos into one node or libcall if possible.
1662 if (Subtarget->hasSinCos()) {
1663 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1664 setLibcallName(RTLIB::SINCOS_F64, "sincos");
1665 if (Subtarget->isTargetDarwin()) {
1666 // For MacOSX, we don't want the normal expansion of a libcall to sincos.
1667 // We want to issue a libcall to __sincos_stret to avoid memory traffic.
1668 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1669 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1673 if (Subtarget->isTargetWin64()) {
1674 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1675 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1676 setOperationAction(ISD::SREM, MVT::i128, Custom);
1677 setOperationAction(ISD::UREM, MVT::i128, Custom);
1678 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1679 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1682 // We have target-specific dag combine patterns for the following nodes:
1683 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1684 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1685 setTargetDAGCombine(ISD::BITCAST);
1686 setTargetDAGCombine(ISD::VSELECT);
1687 setTargetDAGCombine(ISD::SELECT);
1688 setTargetDAGCombine(ISD::SHL);
1689 setTargetDAGCombine(ISD::SRA);
1690 setTargetDAGCombine(ISD::SRL);
1691 setTargetDAGCombine(ISD::OR);
1692 setTargetDAGCombine(ISD::AND);
1693 setTargetDAGCombine(ISD::ADD);
1694 setTargetDAGCombine(ISD::FADD);
1695 setTargetDAGCombine(ISD::FSUB);
1696 setTargetDAGCombine(ISD::FMA);
1697 setTargetDAGCombine(ISD::SUB);
1698 setTargetDAGCombine(ISD::LOAD);
1699 setTargetDAGCombine(ISD::MLOAD);
1700 setTargetDAGCombine(ISD::STORE);
1701 setTargetDAGCombine(ISD::MSTORE);
1702 setTargetDAGCombine(ISD::ZERO_EXTEND);
1703 setTargetDAGCombine(ISD::ANY_EXTEND);
1704 setTargetDAGCombine(ISD::SIGN_EXTEND);
1705 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1706 setTargetDAGCombine(ISD::TRUNCATE);
1707 setTargetDAGCombine(ISD::SINT_TO_FP);
1708 setTargetDAGCombine(ISD::SETCC);
1709 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1710 setTargetDAGCombine(ISD::BUILD_VECTOR);
1711 setTargetDAGCombine(ISD::MUL);
1712 setTargetDAGCombine(ISD::XOR);
1714 computeRegisterProperties();
1716 // On Darwin, -Os means optimize for size without hurting performance,
1717 // do not reduce the limit.
1718 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1719 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8;
1720 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1721 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1722 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1723 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1724 setPrefLoopAlignment(4); // 2^4 bytes.
1726 // Predictable cmov don't hurt on atom because it's in-order.
1727 PredictableSelectIsExpensive = !Subtarget->isAtom();
1728 EnableExtLdPromotion = true;
1729 setPrefFunctionAlignment(4); // 2^4 bytes.
1731 verifyIntrinsicTables();
1734 // This has so far only been implemented for 64-bit MachO.
1735 bool X86TargetLowering::useLoadStackGuardNode() const {
1736 return Subtarget->isTargetMachO() && Subtarget->is64Bit();
1739 TargetLoweringBase::LegalizeTypeAction
1740 X86TargetLowering::getPreferredVectorAction(EVT VT) const {
1741 if (ExperimentalVectorWideningLegalization &&
1742 VT.getVectorNumElements() != 1 &&
1743 VT.getVectorElementType().getSimpleVT() != MVT::i1)
1744 return TypeWidenVector;
1746 return TargetLoweringBase::getPreferredVectorAction(VT);
1749 EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
1751 return Subtarget->hasAVX512() ? MVT::i1: MVT::i8;
1753 const unsigned NumElts = VT.getVectorNumElements();
1754 const EVT EltVT = VT.getVectorElementType();
1755 if (VT.is512BitVector()) {
1756 if (Subtarget->hasAVX512())
1757 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1758 EltVT == MVT::f32 || EltVT == MVT::f64)
1760 case 8: return MVT::v8i1;
1761 case 16: return MVT::v16i1;
1763 if (Subtarget->hasBWI())
1764 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1766 case 32: return MVT::v32i1;
1767 case 64: return MVT::v64i1;
1771 if (VT.is256BitVector() || VT.is128BitVector()) {
1772 if (Subtarget->hasVLX())
1773 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1774 EltVT == MVT::f32 || EltVT == MVT::f64)
1776 case 2: return MVT::v2i1;
1777 case 4: return MVT::v4i1;
1778 case 8: return MVT::v8i1;
1780 if (Subtarget->hasBWI() && Subtarget->hasVLX())
1781 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1783 case 8: return MVT::v8i1;
1784 case 16: return MVT::v16i1;
1785 case 32: return MVT::v32i1;
1789 return VT.changeVectorElementTypeToInteger();
1792 /// Helper for getByValTypeAlignment to determine
1793 /// the desired ByVal argument alignment.
1794 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
1797 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1798 if (VTy->getBitWidth() == 128)
1800 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1801 unsigned EltAlign = 0;
1802 getMaxByValAlign(ATy->getElementType(), EltAlign);
1803 if (EltAlign > MaxAlign)
1804 MaxAlign = EltAlign;
1805 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1806 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1807 unsigned EltAlign = 0;
1808 getMaxByValAlign(STy->getElementType(i), EltAlign);
1809 if (EltAlign > MaxAlign)
1810 MaxAlign = EltAlign;
1817 /// Return the desired alignment for ByVal aggregate
1818 /// function arguments in the caller parameter area. For X86, aggregates
1819 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
1820 /// are at 4-byte boundaries.
1821 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
1822 if (Subtarget->is64Bit()) {
1823 // Max of 8 and alignment of type.
1824 unsigned TyAlign = TD->getABITypeAlignment(Ty);
1831 if (Subtarget->hasSSE1())
1832 getMaxByValAlign(Ty, Align);
1836 /// Returns the target specific optimal type for load
1837 /// and store operations as a result of memset, memcpy, and memmove
1838 /// lowering. If DstAlign is zero that means it's safe to destination
1839 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
1840 /// means there isn't a need to check it against alignment requirement,
1841 /// probably because the source does not need to be loaded. If 'IsMemset' is
1842 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
1843 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
1844 /// source is constant so it does not need to be loaded.
1845 /// It returns EVT::Other if the type should be determined using generic
1846 /// target-independent logic.
1848 X86TargetLowering::getOptimalMemOpType(uint64_t Size,
1849 unsigned DstAlign, unsigned SrcAlign,
1850 bool IsMemset, bool ZeroMemset,
1852 MachineFunction &MF) const {
1853 const Function *F = MF.getFunction();
1854 if ((!IsMemset || ZeroMemset) &&
1855 !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
1857 (Subtarget->isUnalignedMemAccessFast() ||
1858 ((DstAlign == 0 || DstAlign >= 16) &&
1859 (SrcAlign == 0 || SrcAlign >= 16)))) {
1861 if (Subtarget->hasInt256())
1863 if (Subtarget->hasFp256())
1866 if (Subtarget->hasSSE2())
1868 if (Subtarget->hasSSE1())
1870 } else if (!MemcpyStrSrc && Size >= 8 &&
1871 !Subtarget->is64Bit() &&
1872 Subtarget->hasSSE2()) {
1873 // Do not use f64 to lower memcpy if source is string constant. It's
1874 // better to use i32 to avoid the loads.
1878 if (Subtarget->is64Bit() && Size >= 8)
1883 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
1885 return X86ScalarSSEf32;
1886 else if (VT == MVT::f64)
1887 return X86ScalarSSEf64;
1892 X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
1897 *Fast = Subtarget->isUnalignedMemAccessFast();
1901 /// Return the entry encoding for a jump table in the
1902 /// current function. The returned value is a member of the
1903 /// MachineJumpTableInfo::JTEntryKind enum.
1904 unsigned X86TargetLowering::getJumpTableEncoding() const {
1905 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
1907 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1908 Subtarget->isPICStyleGOT())
1909 return MachineJumpTableInfo::EK_Custom32;
1911 // Otherwise, use the normal jump table encoding heuristics.
1912 return TargetLowering::getJumpTableEncoding();
1916 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
1917 const MachineBasicBlock *MBB,
1918 unsigned uid,MCContext &Ctx) const{
1919 assert(MBB->getParent()->getTarget().getRelocationModel() == Reloc::PIC_ &&
1920 Subtarget->isPICStyleGOT());
1921 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
1923 return MCSymbolRefExpr::Create(MBB->getSymbol(),
1924 MCSymbolRefExpr::VK_GOTOFF, Ctx);
1927 /// Returns relocation base for the given PIC jumptable.
1928 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
1929 SelectionDAG &DAG) const {
1930 if (!Subtarget->is64Bit())
1931 // This doesn't have SDLoc associated with it, but is not really the
1932 // same as a Register.
1933 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy());
1937 /// This returns the relocation base for the given PIC jumptable,
1938 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
1939 const MCExpr *X86TargetLowering::
1940 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
1941 MCContext &Ctx) const {
1942 // X86-64 uses RIP relative addressing based on the jump table label.
1943 if (Subtarget->isPICStyleRIPRel())
1944 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
1946 // Otherwise, the reference is relative to the PIC base.
1947 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
1950 // FIXME: Why this routine is here? Move to RegInfo!
1951 std::pair<const TargetRegisterClass*, uint8_t>
1952 X86TargetLowering::findRepresentativeClass(MVT VT) const{
1953 const TargetRegisterClass *RRC = nullptr;
1955 switch (VT.SimpleTy) {
1957 return TargetLowering::findRepresentativeClass(VT);
1958 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
1959 RRC = Subtarget->is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
1962 RRC = &X86::VR64RegClass;
1964 case MVT::f32: case MVT::f64:
1965 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1966 case MVT::v4f32: case MVT::v2f64:
1967 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32:
1969 RRC = &X86::VR128RegClass;
1972 return std::make_pair(RRC, Cost);
1975 bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
1976 unsigned &Offset) const {
1977 if (!Subtarget->isTargetLinux())
1980 if (Subtarget->is64Bit()) {
1981 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
1983 if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
1995 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1996 unsigned DestAS) const {
1997 assert(SrcAS != DestAS && "Expected different address spaces!");
1999 return SrcAS < 256 && DestAS < 256;
2002 //===----------------------------------------------------------------------===//
2003 // Return Value Calling Convention Implementation
2004 //===----------------------------------------------------------------------===//
2006 #include "X86GenCallingConv.inc"
2009 X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2010 MachineFunction &MF, bool isVarArg,
2011 const SmallVectorImpl<ISD::OutputArg> &Outs,
2012 LLVMContext &Context) const {
2013 SmallVector<CCValAssign, 16> RVLocs;
2014 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2015 return CCInfo.CheckReturn(Outs, RetCC_X86);
2018 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2019 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2024 X86TargetLowering::LowerReturn(SDValue Chain,
2025 CallingConv::ID CallConv, bool isVarArg,
2026 const SmallVectorImpl<ISD::OutputArg> &Outs,
2027 const SmallVectorImpl<SDValue> &OutVals,
2028 SDLoc dl, SelectionDAG &DAG) const {
2029 MachineFunction &MF = DAG.getMachineFunction();
2030 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2032 SmallVector<CCValAssign, 16> RVLocs;
2033 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2034 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2037 SmallVector<SDValue, 6> RetOps;
2038 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2039 // Operand #1 = Bytes To Pop
2040 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(),
2043 // Copy the result values into the output registers.
2044 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2045 CCValAssign &VA = RVLocs[i];
2046 assert(VA.isRegLoc() && "Can only return in registers!");
2047 SDValue ValToCopy = OutVals[i];
2048 EVT ValVT = ValToCopy.getValueType();
2050 // Promote values to the appropriate types.
2051 if (VA.getLocInfo() == CCValAssign::SExt)
2052 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2053 else if (VA.getLocInfo() == CCValAssign::ZExt)
2054 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2055 else if (VA.getLocInfo() == CCValAssign::AExt)
2056 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2057 else if (VA.getLocInfo() == CCValAssign::BCvt)
2058 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy);
2060 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2061 "Unexpected FP-extend for return value.");
2063 // If this is x86-64, and we disabled SSE, we can't return FP values,
2064 // or SSE or MMX vectors.
2065 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2066 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2067 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
2068 report_fatal_error("SSE register return with SSE disabled");
2070 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2071 // llvm-gcc has never done it right and no one has noticed, so this
2072 // should be OK for now.
2073 if (ValVT == MVT::f64 &&
2074 (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
2075 report_fatal_error("SSE2 register return with SSE2 disabled");
2077 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2078 // the RET instruction and handled by the FP Stackifier.
2079 if (VA.getLocReg() == X86::FP0 ||
2080 VA.getLocReg() == X86::FP1) {
2081 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2082 // change the value to the FP stack register class.
2083 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2084 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2085 RetOps.push_back(ValToCopy);
2086 // Don't emit a copytoreg.
2090 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2091 // which is returned in RAX / RDX.
2092 if (Subtarget->is64Bit()) {
2093 if (ValVT == MVT::x86mmx) {
2094 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2095 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy);
2096 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2098 // If we don't have SSE2 available, convert to v4f32 so the generated
2099 // register is legal.
2100 if (!Subtarget->hasSSE2())
2101 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy);
2106 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
2107 Flag = Chain.getValue(1);
2108 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2111 // The x86-64 ABIs require that for returning structs by value we copy
2112 // the sret argument into %rax/%eax (depending on ABI) for the return.
2113 // Win32 requires us to put the sret argument to %eax as well.
2114 // We saved the argument into a virtual register in the entry block,
2115 // so now we copy the value out and into %rax/%eax.
2117 // Checking Function.hasStructRetAttr() here is insufficient because the IR
2118 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2119 // false, then an sret argument may be implicitly inserted in the SelDAG. In
2120 // either case FuncInfo->setSRetReturnReg() will have been called.
2121 if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
2122 assert((Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) &&
2123 "No need for an sret register");
2124 SDValue Val = DAG.getCopyFromReg(Chain, dl, SRetReg, getPointerTy());
2127 = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ?
2128 X86::RAX : X86::EAX;
2129 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2130 Flag = Chain.getValue(1);
2132 // RAX/EAX now acts like a return value.
2133 RetOps.push_back(DAG.getRegister(RetValReg, getPointerTy()));
2136 RetOps[0] = Chain; // Update chain.
2138 // Add the flag if we have it.
2140 RetOps.push_back(Flag);
2142 return DAG.getNode(X86ISD::RET_FLAG, dl, MVT::Other, RetOps);
2145 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2146 if (N->getNumValues() != 1)
2148 if (!N->hasNUsesOfValue(1, 0))
2151 SDValue TCChain = Chain;
2152 SDNode *Copy = *N->use_begin();
2153 if (Copy->getOpcode() == ISD::CopyToReg) {
2154 // If the copy has a glue operand, we conservatively assume it isn't safe to
2155 // perform a tail call.
2156 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2158 TCChain = Copy->getOperand(0);
2159 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2162 bool HasRet = false;
2163 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2165 if (UI->getOpcode() != X86ISD::RET_FLAG)
2167 // If we are returning more than one value, we can definitely
2168 // not make a tail call see PR19530
2169 if (UI->getNumOperands() > 4)
2171 if (UI->getNumOperands() == 4 &&
2172 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2185 X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
2186 ISD::NodeType ExtendKind) const {
2188 // TODO: Is this also valid on 32-bit?
2189 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND)
2190 ReturnMVT = MVT::i8;
2192 ReturnMVT = MVT::i32;
2194 EVT MinVT = getRegisterType(Context, ReturnMVT);
2195 return VT.bitsLT(MinVT) ? MinVT : VT;
2198 /// Lower the result values of a call into the
2199 /// appropriate copies out of appropriate physical registers.
2202 X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
2203 CallingConv::ID CallConv, bool isVarArg,
2204 const SmallVectorImpl<ISD::InputArg> &Ins,
2205 SDLoc dl, SelectionDAG &DAG,
2206 SmallVectorImpl<SDValue> &InVals) const {
2208 // Assign locations to each value returned by this call.
2209 SmallVector<CCValAssign, 16> RVLocs;
2210 bool Is64Bit = Subtarget->is64Bit();
2211 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2213 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2215 // Copy all of the result registers out of their specified physreg.
2216 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2217 CCValAssign &VA = RVLocs[i];
2218 EVT CopyVT = VA.getValVT();
2220 // If this is x86-64, and we disabled SSE, we can't return FP values
2221 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
2222 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
2223 report_fatal_error("SSE register return with SSE disabled");
2226 // If we prefer to use the value in xmm registers, copy it out as f80 and
2227 // use a truncate to move it from fp stack reg to xmm reg.
2228 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2229 isScalarFPTypeInSSEReg(VA.getValVT()))
2232 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
2233 CopyVT, InFlag).getValue(1);
2234 SDValue Val = Chain.getValue(0);
2236 if (CopyVT != VA.getValVT())
2237 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2238 // This truncation won't change the value.
2239 DAG.getIntPtrConstant(1));
2241 InFlag = Chain.getValue(2);
2242 InVals.push_back(Val);
2248 //===----------------------------------------------------------------------===//
2249 // C & StdCall & Fast Calling Convention implementation
2250 //===----------------------------------------------------------------------===//
2251 // StdCall calling convention seems to be standard for many Windows' API
2252 // routines and around. It differs from C calling convention just a little:
2253 // callee should clean up the stack, not caller. Symbols should be also
2254 // decorated in some fancy way :) It doesn't support any vector arguments.
2255 // For info on fast calling convention see Fast Calling Convention (tail call)
2256 // implementation LowerX86_32FastCCCallTo.
2258 /// CallIsStructReturn - Determines whether a call uses struct return
2260 enum StructReturnType {
2265 static StructReturnType
2266 callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
2268 return NotStructReturn;
2270 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2271 if (!Flags.isSRet())
2272 return NotStructReturn;
2273 if (Flags.isInReg())
2274 return RegStructReturn;
2275 return StackStructReturn;
2278 /// Determines whether a function uses struct return semantics.
2279 static StructReturnType
2280 argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
2282 return NotStructReturn;
2284 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2285 if (!Flags.isSRet())
2286 return NotStructReturn;
2287 if (Flags.isInReg())
2288 return RegStructReturn;
2289 return StackStructReturn;
2292 /// Make a copy of an aggregate at address specified by "Src" to address
2293 /// "Dst" with size and alignment information specified by the specific
2294 /// parameter attribute. The copy will be passed as a byval function parameter.
2296 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
2297 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
2299 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
2301 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2302 /*isVolatile*/false, /*AlwaysInline=*/true,
2303 MachinePointerInfo(), MachinePointerInfo());
2306 /// Return true if the calling convention is one that
2307 /// supports tail call optimization.
2308 static bool IsTailCallConvention(CallingConv::ID CC) {
2309 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2310 CC == CallingConv::HiPE);
2313 /// \brief Return true if the calling convention is a C calling convention.
2314 static bool IsCCallConvention(CallingConv::ID CC) {
2315 return (CC == CallingConv::C || CC == CallingConv::X86_64_Win64 ||
2316 CC == CallingConv::X86_64_SysV);
2319 bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
2320 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
2324 CallingConv::ID CalleeCC = CS.getCallingConv();
2325 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
2331 /// Return true if the function is being made into
2332 /// a tailcall target by changing its ABI.
2333 static bool FuncIsMadeTailCallSafe(CallingConv::ID CC,
2334 bool GuaranteedTailCallOpt) {
2335 return GuaranteedTailCallOpt && IsTailCallConvention(CC);
2339 X86TargetLowering::LowerMemArgument(SDValue Chain,
2340 CallingConv::ID CallConv,
2341 const SmallVectorImpl<ISD::InputArg> &Ins,
2342 SDLoc dl, SelectionDAG &DAG,
2343 const CCValAssign &VA,
2344 MachineFrameInfo *MFI,
2346 // Create the nodes corresponding to a load from this parameter slot.
2347 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2348 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(
2349 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
2350 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
2353 // If value is passed by pointer we have address passed instead of the value
2355 if (VA.getLocInfo() == CCValAssign::Indirect)
2356 ValVT = VA.getLocVT();
2358 ValVT = VA.getValVT();
2360 // FIXME: For now, all byval parameter objects are marked mutable. This can be
2361 // changed with more analysis.
2362 // In case of tail call optimization mark all arguments mutable. Since they
2363 // could be overwritten by lowering of arguments in case of a tail call.
2364 if (Flags.isByVal()) {
2365 unsigned Bytes = Flags.getByValSize();
2366 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
2367 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable);
2368 return DAG.getFrameIndex(FI, getPointerTy());
2370 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
2371 VA.getLocMemOffset(), isImmutable);
2372 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2373 return DAG.getLoad(ValVT, dl, Chain, FIN,
2374 MachinePointerInfo::getFixedStack(FI),
2375 false, false, false, 0);
2379 // FIXME: Get this from tablegen.
2380 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
2381 const X86Subtarget *Subtarget) {
2382 assert(Subtarget->is64Bit());
2384 if (Subtarget->isCallingConvWin64(CallConv)) {
2385 static const MCPhysReg GPR64ArgRegsWin64[] = {
2386 X86::RCX, X86::RDX, X86::R8, X86::R9
2388 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
2391 static const MCPhysReg GPR64ArgRegs64Bit[] = {
2392 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
2394 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
2397 // FIXME: Get this from tablegen.
2398 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
2399 CallingConv::ID CallConv,
2400 const X86Subtarget *Subtarget) {
2401 assert(Subtarget->is64Bit());
2402 if (Subtarget->isCallingConvWin64(CallConv)) {
2403 // The XMM registers which might contain var arg parameters are shadowed
2404 // in their paired GPR. So we only need to save the GPR to their home
2406 // TODO: __vectorcall will change this.
2410 const Function *Fn = MF.getFunction();
2411 bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat);
2412 assert(!(MF.getTarget().Options.UseSoftFloat && NoImplicitFloatOps) &&
2413 "SSE register cannot be used when SSE is disabled!");
2414 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps ||
2415 !Subtarget->hasSSE1())
2416 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
2420 static const MCPhysReg XMMArgRegs64Bit[] = {
2421 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2422 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2424 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
2428 X86TargetLowering::LowerFormalArguments(SDValue Chain,
2429 CallingConv::ID CallConv,
2431 const SmallVectorImpl<ISD::InputArg> &Ins,
2434 SmallVectorImpl<SDValue> &InVals)
2436 MachineFunction &MF = DAG.getMachineFunction();
2437 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2439 const Function* Fn = MF.getFunction();
2440 if (Fn->hasExternalLinkage() &&
2441 Subtarget->isTargetCygMing() &&
2442 Fn->getName() == "main")
2443 FuncInfo->setForceFramePointer(true);
2445 MachineFrameInfo *MFI = MF.getFrameInfo();
2446 bool Is64Bit = Subtarget->is64Bit();
2447 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2449 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2450 "Var args not supported with calling convention fastcc, ghc or hipe");
2452 // Assign locations to all of the incoming arguments.
2453 SmallVector<CCValAssign, 16> ArgLocs;
2454 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2456 // Allocate shadow area for Win64
2458 CCInfo.AllocateStack(32, 8);
2460 CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
2462 unsigned LastVal = ~0U;
2464 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2465 CCValAssign &VA = ArgLocs[i];
2466 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
2468 assert(VA.getValNo() != LastVal &&
2469 "Don't support value assigned to multiple locs yet");
2471 LastVal = VA.getValNo();
2473 if (VA.isRegLoc()) {
2474 EVT RegVT = VA.getLocVT();
2475 const TargetRegisterClass *RC;
2476 if (RegVT == MVT::i32)
2477 RC = &X86::GR32RegClass;
2478 else if (Is64Bit && RegVT == MVT::i64)
2479 RC = &X86::GR64RegClass;
2480 else if (RegVT == MVT::f32)
2481 RC = &X86::FR32RegClass;
2482 else if (RegVT == MVT::f64)
2483 RC = &X86::FR64RegClass;
2484 else if (RegVT.is512BitVector())
2485 RC = &X86::VR512RegClass;
2486 else if (RegVT.is256BitVector())
2487 RC = &X86::VR256RegClass;
2488 else if (RegVT.is128BitVector())
2489 RC = &X86::VR128RegClass;
2490 else if (RegVT == MVT::x86mmx)
2491 RC = &X86::VR64RegClass;
2492 else if (RegVT == MVT::i1)
2493 RC = &X86::VK1RegClass;
2494 else if (RegVT == MVT::v8i1)
2495 RC = &X86::VK8RegClass;
2496 else if (RegVT == MVT::v16i1)
2497 RC = &X86::VK16RegClass;
2498 else if (RegVT == MVT::v32i1)
2499 RC = &X86::VK32RegClass;
2500 else if (RegVT == MVT::v64i1)
2501 RC = &X86::VK64RegClass;
2503 llvm_unreachable("Unknown argument type!");
2505 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2506 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2508 // If this is an 8 or 16-bit value, it is really passed promoted to 32
2509 // bits. Insert an assert[sz]ext to capture this, then truncate to the
2511 if (VA.getLocInfo() == CCValAssign::SExt)
2512 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2513 DAG.getValueType(VA.getValVT()));
2514 else if (VA.getLocInfo() == CCValAssign::ZExt)
2515 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2516 DAG.getValueType(VA.getValVT()));
2517 else if (VA.getLocInfo() == CCValAssign::BCvt)
2518 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
2520 if (VA.isExtInLoc()) {
2521 // Handle MMX values passed in XMM regs.
2522 if (RegVT.isVector())
2523 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
2525 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2528 assert(VA.isMemLoc());
2529 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i);
2532 // If value is passed via pointer - do a load.
2533 if (VA.getLocInfo() == CCValAssign::Indirect)
2534 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
2535 MachinePointerInfo(), false, false, false, 0);
2537 InVals.push_back(ArgValue);
2540 if (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) {
2541 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2542 // The x86-64 ABIs require that for returning structs by value we copy
2543 // the sret argument into %rax/%eax (depending on ABI) for the return.
2544 // Win32 requires us to put the sret argument to %eax as well.
2545 // Save the argument into a virtual register so that we can access it
2546 // from the return points.
2547 if (Ins[i].Flags.isSRet()) {
2548 unsigned Reg = FuncInfo->getSRetReturnReg();
2550 MVT PtrTy = getPointerTy();
2551 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
2552 FuncInfo->setSRetReturnReg(Reg);
2554 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[i]);
2555 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
2561 unsigned StackSize = CCInfo.getNextStackOffset();
2562 // Align stack specially for tail calls.
2563 if (FuncIsMadeTailCallSafe(CallConv,
2564 MF.getTarget().Options.GuaranteedTailCallOpt))
2565 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
2567 // If the function takes variable number of arguments, make a frame index for
2568 // the start of the first vararg value... for expansion of llvm.va_start. We
2569 // can skip this if there are no va_start calls.
2570 if (MFI->hasVAStart() &&
2571 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
2572 CallConv != CallingConv::X86_ThisCall))) {
2573 FuncInfo->setVarArgsFrameIndex(
2574 MFI->CreateFixedObject(1, StackSize, true));
2577 // Figure out if XMM registers are in use.
2578 assert(!(MF.getTarget().Options.UseSoftFloat &&
2579 Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
2580 "SSE register cannot be used when SSE is disabled!");
2582 // 64-bit calling conventions support varargs and register parameters, so we
2583 // have to do extra work to spill them in the prologue.
2584 if (Is64Bit && isVarArg && MFI->hasVAStart()) {
2585 // Find the first unallocated argument registers.
2586 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
2587 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
2588 unsigned NumIntRegs =
2589 CCInfo.getFirstUnallocated(ArgGPRs.data(), ArgGPRs.size());
2590 unsigned NumXMMRegs =
2591 CCInfo.getFirstUnallocated(ArgXMMs.data(), ArgXMMs.size());
2592 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
2593 "SSE register cannot be used when SSE is disabled!");
2595 // Gather all the live in physical registers.
2596 SmallVector<SDValue, 6> LiveGPRs;
2597 SmallVector<SDValue, 8> LiveXMMRegs;
2599 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
2600 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
2602 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
2604 if (!ArgXMMs.empty()) {
2605 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2606 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
2607 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
2608 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
2609 LiveXMMRegs.push_back(
2610 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
2615 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
2616 // Get to the caller-allocated home save location. Add 8 to account
2617 // for the return address.
2618 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
2619 FuncInfo->setRegSaveFrameIndex(
2620 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
2621 // Fixup to set vararg frame on shadow area (4 x i64).
2623 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
2625 // For X86-64, if there are vararg parameters that are passed via
2626 // registers, then we must store them to their spots on the stack so
2627 // they may be loaded by deferencing the result of va_next.
2628 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
2629 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
2630 FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
2631 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
2634 // Store the integer parameter registers.
2635 SmallVector<SDValue, 8> MemOps;
2636 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
2638 unsigned Offset = FuncInfo->getVarArgsGPOffset();
2639 for (SDValue Val : LiveGPRs) {
2640 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
2641 DAG.getIntPtrConstant(Offset));
2643 DAG.getStore(Val.getValue(1), dl, Val, FIN,
2644 MachinePointerInfo::getFixedStack(
2645 FuncInfo->getRegSaveFrameIndex(), Offset),
2647 MemOps.push_back(Store);
2651 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
2652 // Now store the XMM (fp + vector) parameter registers.
2653 SmallVector<SDValue, 12> SaveXMMOps;
2654 SaveXMMOps.push_back(Chain);
2655 SaveXMMOps.push_back(ALVal);
2656 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2657 FuncInfo->getRegSaveFrameIndex()));
2658 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2659 FuncInfo->getVarArgsFPOffset()));
2660 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
2662 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
2663 MVT::Other, SaveXMMOps));
2666 if (!MemOps.empty())
2667 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
2670 if (isVarArg && MFI->hasMustTailInVarArgFunc()) {
2671 // Find the largest legal vector type.
2672 MVT VecVT = MVT::Other;
2673 // FIXME: Only some x86_32 calling conventions support AVX512.
2674 if (Subtarget->hasAVX512() &&
2675 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
2676 CallConv == CallingConv::Intel_OCL_BI)))
2677 VecVT = MVT::v16f32;
2678 else if (Subtarget->hasAVX())
2680 else if (Subtarget->hasSSE2())
2683 // We forward some GPRs and some vector types.
2684 SmallVector<MVT, 2> RegParmTypes;
2685 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
2686 RegParmTypes.push_back(IntVT);
2687 if (VecVT != MVT::Other)
2688 RegParmTypes.push_back(VecVT);
2690 // Compute the set of forwarded registers. The rest are scratch.
2691 SmallVectorImpl<ForwardedRegister> &Forwards =
2692 FuncInfo->getForwardedMustTailRegParms();
2693 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
2695 // Conservatively forward AL on x86_64, since it might be used for varargs.
2696 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
2697 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2698 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
2701 // Copy all forwards from physical to virtual registers.
2702 for (ForwardedRegister &F : Forwards) {
2703 // FIXME: Can we use a less constrained schedule?
2704 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
2705 F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
2706 Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
2710 // Some CCs need callee pop.
2711 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
2712 MF.getTarget().Options.GuaranteedTailCallOpt)) {
2713 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
2715 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
2716 // If this is an sret function, the return should pop the hidden pointer.
2717 if (!Is64Bit && !IsTailCallConvention(CallConv) &&
2718 !Subtarget->getTargetTriple().isOSMSVCRT() &&
2719 argsAreStructReturn(Ins) == StackStructReturn)
2720 FuncInfo->setBytesToPopOnReturn(4);
2724 // RegSaveFrameIndex is X86-64 only.
2725 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
2726 if (CallConv == CallingConv::X86_FastCall ||
2727 CallConv == CallingConv::X86_ThisCall)
2728 // fastcc functions can't have varargs.
2729 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
2732 FuncInfo->setArgumentStackSize(StackSize);
2738 X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
2739 SDValue StackPtr, SDValue Arg,
2740 SDLoc dl, SelectionDAG &DAG,
2741 const CCValAssign &VA,
2742 ISD::ArgFlagsTy Flags) const {
2743 unsigned LocMemOffset = VA.getLocMemOffset();
2744 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
2745 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
2746 if (Flags.isByVal())
2747 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
2749 return DAG.getStore(Chain, dl, Arg, PtrOff,
2750 MachinePointerInfo::getStack(LocMemOffset),
2754 /// Emit a load of return address if tail call
2755 /// optimization is performed and it is required.
2757 X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
2758 SDValue &OutRetAddr, SDValue Chain,
2759 bool IsTailCall, bool Is64Bit,
2760 int FPDiff, SDLoc dl) const {
2761 // Adjust the Return address stack slot.
2762 EVT VT = getPointerTy();
2763 OutRetAddr = getReturnAddressFrameIndex(DAG);
2765 // Load the "old" Return address.
2766 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
2767 false, false, false, 0);
2768 return SDValue(OutRetAddr.getNode(), 1);
2771 /// Emit a store of the return address if tail call
2772 /// optimization is performed and it is required (FPDiff!=0).
2773 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
2774 SDValue Chain, SDValue RetAddrFrIdx,
2775 EVT PtrVT, unsigned SlotSize,
2776 int FPDiff, SDLoc dl) {
2777 // Store the return address to the appropriate stack slot.
2778 if (!FPDiff) return Chain;
2779 // Calculate the new stack slot for the return address.
2780 int NewReturnAddrFI =
2781 MF.getFrameInfo()->CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
2783 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
2784 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
2785 MachinePointerInfo::getFixedStack(NewReturnAddrFI),
2791 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2792 SmallVectorImpl<SDValue> &InVals) const {
2793 SelectionDAG &DAG = CLI.DAG;
2795 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2796 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2797 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2798 SDValue Chain = CLI.Chain;
2799 SDValue Callee = CLI.Callee;
2800 CallingConv::ID CallConv = CLI.CallConv;
2801 bool &isTailCall = CLI.IsTailCall;
2802 bool isVarArg = CLI.IsVarArg;
2804 MachineFunction &MF = DAG.getMachineFunction();
2805 bool Is64Bit = Subtarget->is64Bit();
2806 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2807 StructReturnType SR = callIsStructReturn(Outs);
2808 bool IsSibcall = false;
2809 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
2811 if (MF.getTarget().Options.DisableTailCalls)
2814 bool IsMustTail = CLI.CS && CLI.CS->isMustTailCall();
2816 // Force this to be a tail call. The verifier rules are enough to ensure
2817 // that we can lower this successfully without moving the return address
2820 } else if (isTailCall) {
2821 // Check if it's really possible to do a tail call.
2822 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
2823 isVarArg, SR != NotStructReturn,
2824 MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
2825 Outs, OutVals, Ins, DAG);
2827 // Sibcalls are automatically detected tailcalls which do not require
2829 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
2836 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2837 "Var args not supported with calling convention fastcc, ghc or hipe");
2839 // Analyze operands of the call, assigning locations to each operand.
2840 SmallVector<CCValAssign, 16> ArgLocs;
2841 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2843 // Allocate shadow area for Win64
2845 CCInfo.AllocateStack(32, 8);
2847 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
2849 // Get a count of how many bytes are to be pushed on the stack.
2850 unsigned NumBytes = CCInfo.getNextStackOffset();
2852 // This is a sibcall. The memory operands are available in caller's
2853 // own caller's stack.
2855 else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2856 IsTailCallConvention(CallConv))
2857 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
2860 if (isTailCall && !IsSibcall && !IsMustTail) {
2861 // Lower arguments at fp - stackoffset + fpdiff.
2862 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
2864 FPDiff = NumBytesCallerPushed - NumBytes;
2866 // Set the delta of movement of the returnaddr stackslot.
2867 // But only set if delta is greater than previous delta.
2868 if (FPDiff < X86Info->getTCReturnAddrDelta())
2869 X86Info->setTCReturnAddrDelta(FPDiff);
2872 unsigned NumBytesToPush = NumBytes;
2873 unsigned NumBytesToPop = NumBytes;
2875 // If we have an inalloca argument, all stack space has already been allocated
2876 // for us and be right at the top of the stack. We don't support multiple
2877 // arguments passed in memory when using inalloca.
2878 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
2880 if (!ArgLocs.back().isMemLoc())
2881 report_fatal_error("cannot use inalloca attribute on a register "
2883 if (ArgLocs.back().getLocMemOffset() != 0)
2884 report_fatal_error("any parameter with the inalloca attribute must be "
2885 "the only memory argument");
2889 Chain = DAG.getCALLSEQ_START(
2890 Chain, DAG.getIntPtrConstant(NumBytesToPush, true), dl);
2892 SDValue RetAddrFrIdx;
2893 // Load return address for tail calls.
2894 if (isTailCall && FPDiff)
2895 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2896 Is64Bit, FPDiff, dl);
2898 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2899 SmallVector<SDValue, 8> MemOpChains;
2902 // Walk the register/memloc assignments, inserting copies/loads. In the case
2903 // of tail call optimization arguments are handle later.
2904 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2905 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2906 // Skip inalloca arguments, they have already been written.
2907 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2908 if (Flags.isInAlloca())
2911 CCValAssign &VA = ArgLocs[i];
2912 EVT RegVT = VA.getLocVT();
2913 SDValue Arg = OutVals[i];
2914 bool isByVal = Flags.isByVal();
2916 // Promote the value if needed.
2917 switch (VA.getLocInfo()) {
2918 default: llvm_unreachable("Unknown loc info!");
2919 case CCValAssign::Full: break;
2920 case CCValAssign::SExt:
2921 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
2923 case CCValAssign::ZExt:
2924 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
2926 case CCValAssign::AExt:
2927 if (RegVT.is128BitVector()) {
2928 // Special case: passing MMX values in XMM registers.
2929 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
2930 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
2931 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
2933 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
2935 case CCValAssign::BCvt:
2936 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg);
2938 case CCValAssign::Indirect: {
2939 // Store the argument.
2940 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
2941 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2942 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot,
2943 MachinePointerInfo::getFixedStack(FI),
2950 if (VA.isRegLoc()) {
2951 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2952 if (isVarArg && IsWin64) {
2953 // Win64 ABI requires argument XMM reg to be copied to the corresponding
2954 // shadow reg if callee is a varargs function.
2955 unsigned ShadowReg = 0;
2956 switch (VA.getLocReg()) {
2957 case X86::XMM0: ShadowReg = X86::RCX; break;
2958 case X86::XMM1: ShadowReg = X86::RDX; break;
2959 case X86::XMM2: ShadowReg = X86::R8; break;
2960 case X86::XMM3: ShadowReg = X86::R9; break;
2963 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
2965 } else if (!IsSibcall && (!isTailCall || isByVal)) {
2966 assert(VA.isMemLoc());
2967 if (!StackPtr.getNode())
2968 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
2970 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2971 dl, DAG, VA, Flags));
2975 if (!MemOpChains.empty())
2976 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2978 if (Subtarget->isPICStyleGOT()) {
2979 // ELF / PIC requires GOT in the EBX register before function calls via PLT
2982 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX),
2983 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy())));
2985 // If we are tail calling and generating PIC/GOT style code load the
2986 // address of the callee into ECX. The value in ecx is used as target of
2987 // the tail jump. This is done to circumvent the ebx/callee-saved problem
2988 // for tail calls on PIC/GOT architectures. Normally we would just put the
2989 // address of GOT into ebx and then call target@PLT. But for tail calls
2990 // ebx would be restored (since ebx is callee saved) before jumping to the
2993 // Note: The actual moving to ECX is done further down.
2994 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
2995 if (G && !G->getGlobal()->hasHiddenVisibility() &&
2996 !G->getGlobal()->hasProtectedVisibility())
2997 Callee = LowerGlobalAddress(Callee, DAG);
2998 else if (isa<ExternalSymbolSDNode>(Callee))
2999 Callee = LowerExternalSymbol(Callee, DAG);
3003 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
3004 // From AMD64 ABI document:
3005 // For calls that may call functions that use varargs or stdargs
3006 // (prototype-less calls or calls to functions containing ellipsis (...) in
3007 // the declaration) %al is used as hidden argument to specify the number
3008 // of SSE registers used. The contents of %al do not need to match exactly
3009 // the number of registers, but must be an ubound on the number of SSE
3010 // registers used and is in the range 0 - 8 inclusive.
3012 // Count the number of XMM registers allocated.
3013 static const MCPhysReg XMMArgRegs[] = {
3014 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3015 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3017 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
3018 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3019 && "SSE registers cannot be used when SSE is disabled");
3021 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3022 DAG.getConstant(NumXMMRegs, MVT::i8)));
3025 if (isVarArg && IsMustTail) {
3026 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3027 for (const auto &F : Forwards) {
3028 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3029 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3033 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3034 // don't need this because the eligibility check rejects calls that require
3035 // shuffling arguments passed in memory.
3036 if (!IsSibcall && isTailCall) {
3037 // Force all the incoming stack arguments to be loaded from the stack
3038 // before any new outgoing arguments are stored to the stack, because the
3039 // outgoing stack slots may alias the incoming argument stack slots, and
3040 // the alias isn't otherwise explicit. This is slightly more conservative
3041 // than necessary, because it means that each store effectively depends
3042 // on every argument instead of just those arguments it would clobber.
3043 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3045 SmallVector<SDValue, 8> MemOpChains2;
3048 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3049 CCValAssign &VA = ArgLocs[i];
3052 assert(VA.isMemLoc());
3053 SDValue Arg = OutVals[i];
3054 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3055 // Skip inalloca arguments. They don't require any work.
3056 if (Flags.isInAlloca())
3058 // Create frame index.
3059 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3060 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3061 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
3062 FIN = DAG.getFrameIndex(FI, getPointerTy());
3064 if (Flags.isByVal()) {
3065 // Copy relative to framepointer.
3066 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
3067 if (!StackPtr.getNode())
3068 StackPtr = DAG.getCopyFromReg(Chain, dl,
3069 RegInfo->getStackRegister(),
3071 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
3073 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3077 // Store relative to framepointer.
3078 MemOpChains2.push_back(
3079 DAG.getStore(ArgChain, dl, Arg, FIN,
3080 MachinePointerInfo::getFixedStack(FI),
3085 if (!MemOpChains2.empty())
3086 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3088 // Store the return address to the appropriate stack slot.
3089 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3090 getPointerTy(), RegInfo->getSlotSize(),
3094 // Build a sequence of copy-to-reg nodes chained together with token chain
3095 // and flag operands which copy the outgoing args into registers.
3097 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3098 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3099 RegsToPass[i].second, InFlag);
3100 InFlag = Chain.getValue(1);
3103 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3104 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3105 // In the 64-bit large code model, we have to make all calls
3106 // through a register, since the call instruction's 32-bit
3107 // pc-relative offset may not be large enough to hold the whole
3109 } else if (Callee->getOpcode() == ISD::GlobalAddress) {
3110 // If the callee is a GlobalAddress node (quite common, every direct call
3111 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
3113 GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
3115 // We should use extra load for direct calls to dllimported functions in
3117 const GlobalValue *GV = G->getGlobal();
3118 if (!GV->hasDLLImportStorageClass()) {
3119 unsigned char OpFlags = 0;
3120 bool ExtraLoad = false;
3121 unsigned WrapperKind = ISD::DELETED_NODE;
3123 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
3124 // external symbols most go through the PLT in PIC mode. If the symbol
3125 // has hidden or protected visibility, or if it is static or local, then
3126 // we don't need to use the PLT - we can directly call it.
3127 if (Subtarget->isTargetELF() &&
3128 DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
3129 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
3130 OpFlags = X86II::MO_PLT;
3131 } else if (Subtarget->isPICStyleStubAny() &&
3132 (GV->isDeclaration() || GV->isWeakForLinker()) &&
3133 (!Subtarget->getTargetTriple().isMacOSX() ||
3134 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3135 // PC-relative references to external symbols should go through $stub,
3136 // unless we're building with the leopard linker or later, which
3137 // automatically synthesizes these stubs.
3138 OpFlags = X86II::MO_DARWIN_STUB;
3139 } else if (Subtarget->isPICStyleRIPRel() && isa<Function>(GV) &&
3140 cast<Function>(GV)->hasFnAttribute(Attribute::NonLazyBind)) {
3141 // If the function is marked as non-lazy, generate an indirect call
3142 // which loads from the GOT directly. This avoids runtime overhead
3143 // at the cost of eager binding (and one extra byte of encoding).
3144 OpFlags = X86II::MO_GOTPCREL;
3145 WrapperKind = X86ISD::WrapperRIP;
3149 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
3150 G->getOffset(), OpFlags);
3152 // Add a wrapper if needed.
3153 if (WrapperKind != ISD::DELETED_NODE)
3154 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee);
3155 // Add extra indirection if needed.
3157 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
3158 MachinePointerInfo::getGOT(),
3159 false, false, false, 0);
3161 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3162 unsigned char OpFlags = 0;
3164 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
3165 // external symbols should go through the PLT.
3166 if (Subtarget->isTargetELF() &&
3167 DAG.getTarget().getRelocationModel() == Reloc::PIC_) {
3168 OpFlags = X86II::MO_PLT;
3169 } else if (Subtarget->isPICStyleStubAny() &&
3170 (!Subtarget->getTargetTriple().isMacOSX() ||
3171 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3172 // PC-relative references to external symbols should go through $stub,
3173 // unless we're building with the leopard linker or later, which
3174 // automatically synthesizes these stubs.
3175 OpFlags = X86II::MO_DARWIN_STUB;
3178 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
3180 } else if (Subtarget->isTarget64BitILP32() &&
3181 Callee->getValueType(0) == MVT::i32) {
3182 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
3183 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
3186 // Returns a chain & a flag for retval copy to use.
3187 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3188 SmallVector<SDValue, 8> Ops;
3190 if (!IsSibcall && isTailCall) {
3191 Chain = DAG.getCALLSEQ_END(Chain,
3192 DAG.getIntPtrConstant(NumBytesToPop, true),
3193 DAG.getIntPtrConstant(0, true), InFlag, dl);
3194 InFlag = Chain.getValue(1);
3197 Ops.push_back(Chain);
3198 Ops.push_back(Callee);
3201 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
3203 // Add argument registers to the end of the list so that they are known live
3205 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3206 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3207 RegsToPass[i].second.getValueType()));
3209 // Add a register mask operand representing the call-preserved registers.
3210 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
3211 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
3212 assert(Mask && "Missing call preserved mask for calling convention");
3213 Ops.push_back(DAG.getRegisterMask(Mask));
3215 if (InFlag.getNode())
3216 Ops.push_back(InFlag);
3220 //// If this is the first return lowered for this function, add the regs
3221 //// to the liveout set for the function.
3222 // This isn't right, although it's probably harmless on x86; liveouts
3223 // should be computed from returns not tail calls. Consider a void
3224 // function making a tail call to a function returning int.
3225 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
3228 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
3229 InFlag = Chain.getValue(1);
3231 // Create the CALLSEQ_END node.
3232 unsigned NumBytesForCalleeToPop;
3233 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3234 DAG.getTarget().Options.GuaranteedTailCallOpt))
3235 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
3236 else if (!Is64Bit && !IsTailCallConvention(CallConv) &&
3237 !Subtarget->getTargetTriple().isOSMSVCRT() &&
3238 SR == StackStructReturn)
3239 // If this is a call to a struct-return function, the callee
3240 // pops the hidden struct pointer, so we have to push it back.
3241 // This is common for Darwin/X86, Linux & Mingw32 targets.
3242 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
3243 NumBytesForCalleeToPop = 4;
3245 NumBytesForCalleeToPop = 0; // Callee pops nothing.
3247 // Returns a flag for retval copy to use.
3249 Chain = DAG.getCALLSEQ_END(Chain,
3250 DAG.getIntPtrConstant(NumBytesToPop, true),
3251 DAG.getIntPtrConstant(NumBytesForCalleeToPop,
3254 InFlag = Chain.getValue(1);
3257 // Handle result values, copying them out of physregs into vregs that we
3259 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
3260 Ins, dl, DAG, InVals);
3263 //===----------------------------------------------------------------------===//
3264 // Fast Calling Convention (tail call) implementation
3265 //===----------------------------------------------------------------------===//
3267 // Like std call, callee cleans arguments, convention except that ECX is
3268 // reserved for storing the tail called function address. Only 2 registers are
3269 // free for argument passing (inreg). Tail call optimization is performed
3271 // * tailcallopt is enabled
3272 // * caller/callee are fastcc
3273 // On X86_64 architecture with GOT-style position independent code only local
3274 // (within module) calls are supported at the moment.
3275 // To keep the stack aligned according to platform abi the function
3276 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
3277 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
3278 // If a tail called function callee has more arguments than the caller the
3279 // caller needs to make sure that there is room to move the RETADDR to. This is
3280 // achieved by reserving an area the size of the argument delta right after the
3281 // original RETADDR, but before the saved framepointer or the spilled registers
3282 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
3294 /// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
3295 /// for a 16 byte align requirement.
3297 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
3298 SelectionDAG& DAG) const {
3299 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3300 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
3301 unsigned StackAlignment = TFI.getStackAlignment();
3302 uint64_t AlignMask = StackAlignment - 1;
3303 int64_t Offset = StackSize;
3304 unsigned SlotSize = RegInfo->getSlotSize();
3305 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
3306 // Number smaller than 12 so just add the difference.
3307 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
3309 // Mask out lower bits, add stackalignment once plus the 12 bytes.
3310 Offset = ((~AlignMask) & Offset) + StackAlignment +
3311 (StackAlignment-SlotSize);
3316 /// MatchingStackOffset - Return true if the given stack call argument is
3317 /// already available in the same position (relatively) of the caller's
3318 /// incoming argument stack.
3320 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
3321 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
3322 const X86InstrInfo *TII) {
3323 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
3325 if (Arg.getOpcode() == ISD::CopyFromReg) {
3326 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
3327 if (!TargetRegisterInfo::isVirtualRegister(VR))
3329 MachineInstr *Def = MRI->getVRegDef(VR);
3332 if (!Flags.isByVal()) {
3333 if (!TII->isLoadFromStackSlot(Def, FI))
3336 unsigned Opcode = Def->getOpcode();
3337 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
3338 Opcode == X86::LEA64_32r) &&
3339 Def->getOperand(1).isFI()) {
3340 FI = Def->getOperand(1).getIndex();
3341 Bytes = Flags.getByValSize();
3345 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
3346 if (Flags.isByVal())
3347 // ByVal argument is passed in as a pointer but it's now being
3348 // dereferenced. e.g.
3349 // define @foo(%struct.X* %A) {
3350 // tail call @bar(%struct.X* byval %A)
3353 SDValue Ptr = Ld->getBasePtr();
3354 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
3357 FI = FINode->getIndex();
3358 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
3359 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
3360 FI = FINode->getIndex();
3361 Bytes = Flags.getByValSize();
3365 assert(FI != INT_MAX);
3366 if (!MFI->isFixedObjectIndex(FI))
3368 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
3371 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
3372 /// for tail call optimization. Targets which want to do tail call
3373 /// optimization should implement this function.
3375 X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
3376 CallingConv::ID CalleeCC,
3378 bool isCalleeStructRet,
3379 bool isCallerStructRet,
3381 const SmallVectorImpl<ISD::OutputArg> &Outs,
3382 const SmallVectorImpl<SDValue> &OutVals,
3383 const SmallVectorImpl<ISD::InputArg> &Ins,
3384 SelectionDAG &DAG) const {
3385 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
3388 // If -tailcallopt is specified, make fastcc functions tail-callable.
3389 const MachineFunction &MF = DAG.getMachineFunction();
3390 const Function *CallerF = MF.getFunction();
3392 // If the function return type is x86_fp80 and the callee return type is not,
3393 // then the FP_EXTEND of the call result is not a nop. It's not safe to
3394 // perform a tailcall optimization here.
3395 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
3398 CallingConv::ID CallerCC = CallerF->getCallingConv();
3399 bool CCMatch = CallerCC == CalleeCC;
3400 bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
3401 bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
3403 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
3404 if (IsTailCallConvention(CalleeCC) && CCMatch)
3409 // Look for obvious safe cases to perform tail call optimization that do not
3410 // require ABI changes. This is what gcc calls sibcall.
3412 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
3413 // emit a special epilogue.
3414 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3415 if (RegInfo->needsStackRealignment(MF))
3418 // Also avoid sibcall optimization if either caller or callee uses struct
3419 // return semantics.
3420 if (isCalleeStructRet || isCallerStructRet)
3423 // An stdcall/thiscall caller is expected to clean up its arguments; the
3424 // callee isn't going to do that.
3425 // FIXME: this is more restrictive than needed. We could produce a tailcall
3426 // when the stack adjustment matches. For example, with a thiscall that takes
3427 // only one argument.
3428 if (!CCMatch && (CallerCC == CallingConv::X86_StdCall ||
3429 CallerCC == CallingConv::X86_ThisCall))
3432 // Do not sibcall optimize vararg calls unless all arguments are passed via
3434 if (isVarArg && !Outs.empty()) {
3436 // Optimizing for varargs on Win64 is unlikely to be safe without
3437 // additional testing.
3438 if (IsCalleeWin64 || IsCallerWin64)
3441 SmallVector<CCValAssign, 16> ArgLocs;
3442 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3445 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3446 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
3447 if (!ArgLocs[i].isRegLoc())
3451 // If the call result is in ST0 / ST1, it needs to be popped off the x87
3452 // stack. Therefore, if it's not used by the call it is not safe to optimize
3453 // this into a sibcall.
3454 bool Unused = false;
3455 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3462 SmallVector<CCValAssign, 16> RVLocs;
3463 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), RVLocs,
3465 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3466 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
3467 CCValAssign &VA = RVLocs[i];
3468 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
3473 // If the calling conventions do not match, then we'd better make sure the
3474 // results are returned in the same way as what the caller expects.
3476 SmallVector<CCValAssign, 16> RVLocs1;
3477 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
3479 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
3481 SmallVector<CCValAssign, 16> RVLocs2;
3482 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
3484 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
3486 if (RVLocs1.size() != RVLocs2.size())
3488 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
3489 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
3491 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
3493 if (RVLocs1[i].isRegLoc()) {
3494 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
3497 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
3503 // If the callee takes no arguments then go on to check the results of the
3505 if (!Outs.empty()) {
3506 // Check if stack adjustment is needed. For now, do not do this if any
3507 // argument is passed on the stack.
3508 SmallVector<CCValAssign, 16> ArgLocs;
3509 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3512 // Allocate shadow area for Win64
3514 CCInfo.AllocateStack(32, 8);
3516 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3517 if (CCInfo.getNextStackOffset()) {
3518 MachineFunction &MF = DAG.getMachineFunction();
3519 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn())
3522 // Check if the arguments are already laid out in the right way as
3523 // the caller's fixed stack objects.
3524 MachineFrameInfo *MFI = MF.getFrameInfo();
3525 const MachineRegisterInfo *MRI = &MF.getRegInfo();
3526 const X86InstrInfo *TII = Subtarget->getInstrInfo();
3527 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3528 CCValAssign &VA = ArgLocs[i];
3529 SDValue Arg = OutVals[i];
3530 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3531 if (VA.getLocInfo() == CCValAssign::Indirect)
3533 if (!VA.isRegLoc()) {
3534 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
3541 // If the tailcall address may be in a register, then make sure it's
3542 // possible to register allocate for it. In 32-bit, the call address can
3543 // only target EAX, EDX, or ECX since the tail call must be scheduled after
3544 // callee-saved registers are restored. These happen to be the same
3545 // registers used to pass 'inreg' arguments so watch out for those.
3546 if (!Subtarget->is64Bit() &&
3547 ((!isa<GlobalAddressSDNode>(Callee) &&
3548 !isa<ExternalSymbolSDNode>(Callee)) ||
3549 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
3550 unsigned NumInRegs = 0;
3551 // In PIC we need an extra register to formulate the address computation
3553 unsigned MaxInRegs =
3554 (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
3556 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3557 CCValAssign &VA = ArgLocs[i];
3560 unsigned Reg = VA.getLocReg();
3563 case X86::EAX: case X86::EDX: case X86::ECX:
3564 if (++NumInRegs == MaxInRegs)
3576 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
3577 const TargetLibraryInfo *libInfo) const {
3578 return X86::createFastISel(funcInfo, libInfo);
3581 //===----------------------------------------------------------------------===//
3582 // Other Lowering Hooks
3583 //===----------------------------------------------------------------------===//
3585 static bool MayFoldLoad(SDValue Op) {
3586 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
3589 static bool MayFoldIntoStore(SDValue Op) {
3590 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
3593 static bool isTargetShuffle(unsigned Opcode) {
3595 default: return false;
3596 case X86ISD::BLENDI:
3597 case X86ISD::PSHUFB:
3598 case X86ISD::PSHUFD:
3599 case X86ISD::PSHUFHW:
3600 case X86ISD::PSHUFLW:
3602 case X86ISD::PALIGNR:
3603 case X86ISD::MOVLHPS:
3604 case X86ISD::MOVLHPD:
3605 case X86ISD::MOVHLPS:
3606 case X86ISD::MOVLPS:
3607 case X86ISD::MOVLPD:
3608 case X86ISD::MOVSHDUP:
3609 case X86ISD::MOVSLDUP:
3610 case X86ISD::MOVDDUP:
3613 case X86ISD::UNPCKL:
3614 case X86ISD::UNPCKH:
3615 case X86ISD::VPERMILPI:
3616 case X86ISD::VPERM2X128:
3617 case X86ISD::VPERMI:
3622 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3623 SDValue V1, SelectionDAG &DAG) {
3625 default: llvm_unreachable("Unknown x86 shuffle node");
3626 case X86ISD::MOVSHDUP:
3627 case X86ISD::MOVSLDUP:
3628 case X86ISD::MOVDDUP:
3629 return DAG.getNode(Opc, dl, VT, V1);
3633 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3634 SDValue V1, unsigned TargetMask,
3635 SelectionDAG &DAG) {
3637 default: llvm_unreachable("Unknown x86 shuffle node");
3638 case X86ISD::PSHUFD:
3639 case X86ISD::PSHUFHW:
3640 case X86ISD::PSHUFLW:
3641 case X86ISD::VPERMILPI:
3642 case X86ISD::VPERMI:
3643 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8));
3647 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3648 SDValue V1, SDValue V2, unsigned TargetMask,
3649 SelectionDAG &DAG) {
3651 default: llvm_unreachable("Unknown x86 shuffle node");
3652 case X86ISD::PALIGNR:
3653 case X86ISD::VALIGN:
3655 case X86ISD::VPERM2X128:
3656 return DAG.getNode(Opc, dl, VT, V1, V2,
3657 DAG.getConstant(TargetMask, MVT::i8));
3661 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3662 SDValue V1, SDValue V2, SelectionDAG &DAG) {
3664 default: llvm_unreachable("Unknown x86 shuffle node");
3665 case X86ISD::MOVLHPS:
3666 case X86ISD::MOVLHPD:
3667 case X86ISD::MOVHLPS:
3668 case X86ISD::MOVLPS:
3669 case X86ISD::MOVLPD:
3672 case X86ISD::UNPCKL:
3673 case X86ISD::UNPCKH:
3674 return DAG.getNode(Opc, dl, VT, V1, V2);
3678 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
3679 MachineFunction &MF = DAG.getMachineFunction();
3680 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3681 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3682 int ReturnAddrIndex = FuncInfo->getRAIndex();
3684 if (ReturnAddrIndex == 0) {
3685 // Set up a frame object for the return address.
3686 unsigned SlotSize = RegInfo->getSlotSize();
3687 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize,
3690 FuncInfo->setRAIndex(ReturnAddrIndex);
3693 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
3696 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
3697 bool hasSymbolicDisplacement) {
3698 // Offset should fit into 32 bit immediate field.
3699 if (!isInt<32>(Offset))
3702 // If we don't have a symbolic displacement - we don't have any extra
3704 if (!hasSymbolicDisplacement)
3707 // FIXME: Some tweaks might be needed for medium code model.
3708 if (M != CodeModel::Small && M != CodeModel::Kernel)
3711 // For small code model we assume that latest object is 16MB before end of 31
3712 // bits boundary. We may also accept pretty large negative constants knowing
3713 // that all objects are in the positive half of address space.
3714 if (M == CodeModel::Small && Offset < 16*1024*1024)
3717 // For kernel code model we know that all object resist in the negative half
3718 // of 32bits address space. We may not accept negative offsets, since they may
3719 // be just off and we may accept pretty large positive ones.
3720 if (M == CodeModel::Kernel && Offset >= 0)
3726 /// isCalleePop - Determines whether the callee is required to pop its
3727 /// own arguments. Callee pop is necessary to support tail calls.
3728 bool X86::isCalleePop(CallingConv::ID CallingConv,
3729 bool is64Bit, bool IsVarArg, bool TailCallOpt) {
3730 switch (CallingConv) {
3733 case CallingConv::X86_StdCall:
3734 case CallingConv::X86_FastCall:
3735 case CallingConv::X86_ThisCall:
3737 case CallingConv::Fast:
3738 case CallingConv::GHC:
3739 case CallingConv::HiPE:
3746 /// \brief Return true if the condition is an unsigned comparison operation.
3747 static bool isX86CCUnsigned(unsigned X86CC) {
3749 default: llvm_unreachable("Invalid integer condition!");
3750 case X86::COND_E: return true;
3751 case X86::COND_G: return false;
3752 case X86::COND_GE: return false;
3753 case X86::COND_L: return false;
3754 case X86::COND_LE: return false;
3755 case X86::COND_NE: return true;
3756 case X86::COND_B: return true;
3757 case X86::COND_A: return true;
3758 case X86::COND_BE: return true;
3759 case X86::COND_AE: return true;
3761 llvm_unreachable("covered switch fell through?!");
3764 /// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
3765 /// specific condition code, returning the condition code and the LHS/RHS of the
3766 /// comparison to make.
3767 static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
3768 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
3770 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
3771 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
3772 // X > -1 -> X == 0, jump !sign.
3773 RHS = DAG.getConstant(0, RHS.getValueType());
3774 return X86::COND_NS;
3776 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
3777 // X < 0 -> X == 0, jump on sign.
3780 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
3782 RHS = DAG.getConstant(0, RHS.getValueType());
3783 return X86::COND_LE;
3787 switch (SetCCOpcode) {
3788 default: llvm_unreachable("Invalid integer condition!");
3789 case ISD::SETEQ: return X86::COND_E;
3790 case ISD::SETGT: return X86::COND_G;
3791 case ISD::SETGE: return X86::COND_GE;
3792 case ISD::SETLT: return X86::COND_L;
3793 case ISD::SETLE: return X86::COND_LE;
3794 case ISD::SETNE: return X86::COND_NE;
3795 case ISD::SETULT: return X86::COND_B;
3796 case ISD::SETUGT: return X86::COND_A;
3797 case ISD::SETULE: return X86::COND_BE;
3798 case ISD::SETUGE: return X86::COND_AE;
3802 // First determine if it is required or is profitable to flip the operands.
3804 // If LHS is a foldable load, but RHS is not, flip the condition.
3805 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
3806 !ISD::isNON_EXTLoad(RHS.getNode())) {
3807 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
3808 std::swap(LHS, RHS);
3811 switch (SetCCOpcode) {
3817 std::swap(LHS, RHS);
3821 // On a floating point condition, the flags are set as follows:
3823 // 0 | 0 | 0 | X > Y
3824 // 0 | 0 | 1 | X < Y
3825 // 1 | 0 | 0 | X == Y
3826 // 1 | 1 | 1 | unordered
3827 switch (SetCCOpcode) {
3828 default: llvm_unreachable("Condcode should be pre-legalized away");
3830 case ISD::SETEQ: return X86::COND_E;
3831 case ISD::SETOLT: // flipped
3833 case ISD::SETGT: return X86::COND_A;
3834 case ISD::SETOLE: // flipped
3836 case ISD::SETGE: return X86::COND_AE;
3837 case ISD::SETUGT: // flipped
3839 case ISD::SETLT: return X86::COND_B;
3840 case ISD::SETUGE: // flipped
3842 case ISD::SETLE: return X86::COND_BE;
3844 case ISD::SETNE: return X86::COND_NE;
3845 case ISD::SETUO: return X86::COND_P;
3846 case ISD::SETO: return X86::COND_NP;
3848 case ISD::SETUNE: return X86::COND_INVALID;
3852 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
3853 /// code. Current x86 isa includes the following FP cmov instructions:
3854 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
3855 static bool hasFPCMov(unsigned X86CC) {
3871 /// isFPImmLegal - Returns true if the target can instruction select the
3872 /// specified FP immediate natively. If false, the legalizer will
3873 /// materialize the FP immediate as a load from a constant pool.
3874 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
3875 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
3876 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
3882 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
3883 ISD::LoadExtType ExtTy,
3885 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
3886 // relocation target a movq or addq instruction: don't let the load shrink.
3887 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
3888 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
3889 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
3890 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
3894 /// \brief Returns true if it is beneficial to convert a load of a constant
3895 /// to just the constant itself.
3896 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
3898 assert(Ty->isIntegerTy());
3900 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3901 if (BitSize == 0 || BitSize > 64)
3906 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
3907 unsigned Index) const {
3908 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
3911 return (Index == 0 || Index == ResVT.getVectorNumElements());
3914 bool X86TargetLowering::isCheapToSpeculateCttz() const {
3915 // Speculate cttz only if we can directly use TZCNT.
3916 return Subtarget->hasBMI();
3919 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
3920 // Speculate ctlz only if we can directly use LZCNT.
3921 return Subtarget->hasLZCNT();
3924 /// isUndefOrInRange - Return true if Val is undef or if its value falls within
3925 /// the specified range (L, H].
3926 static bool isUndefOrInRange(int Val, int Low, int Hi) {
3927 return (Val < 0) || (Val >= Low && Val < Hi);
3930 /// isUndefOrEqual - Val is either less than zero (undef) or equal to the
3931 /// specified value.
3932 static bool isUndefOrEqual(int Val, int CmpVal) {
3933 return (Val < 0 || Val == CmpVal);
3936 /// isSequentialOrUndefInRange - Return true if every element in Mask, beginning
3937 /// from position Pos and ending in Pos+Size, falls within the specified
3938 /// sequential range (Low, Low+Size]. or is undef.
3939 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
3940 unsigned Pos, unsigned Size, int Low) {
3941 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
3942 if (!isUndefOrEqual(Mask[i], Low))
3947 /// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
3948 /// is suitable for input to PSHUFD. That is, it doesn't reference the other
3949 /// operand - by default will match for first operand.
3950 static bool isPSHUFDMask(ArrayRef<int> Mask, MVT VT,
3951 bool TestSecondOperand = false) {
3952 if (VT != MVT::v4f32 && VT != MVT::v4i32 &&
3953 VT != MVT::v2f64 && VT != MVT::v2i64)
3956 unsigned NumElems = VT.getVectorNumElements();
3957 unsigned Lo = TestSecondOperand ? NumElems : 0;
3958 unsigned Hi = Lo + NumElems;
3960 for (unsigned i = 0; i < NumElems; ++i)
3961 if (!isUndefOrInRange(Mask[i], (int)Lo, (int)Hi))
3967 /// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
3968 /// is suitable for input to PSHUFHW.
3969 static bool isPSHUFHWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3970 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3973 // Lower quadword copied in order or undef.
3974 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0))
3977 // Upper quadword shuffled.
3978 for (unsigned i = 4; i != 8; ++i)
3979 if (!isUndefOrInRange(Mask[i], 4, 8))
3982 if (VT == MVT::v16i16) {
3983 // Lower quadword copied in order or undef.
3984 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8))
3987 // Upper quadword shuffled.
3988 for (unsigned i = 12; i != 16; ++i)
3989 if (!isUndefOrInRange(Mask[i], 12, 16))
3996 /// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
3997 /// is suitable for input to PSHUFLW.
3998 static bool isPSHUFLWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3999 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
4002 // Upper quadword copied in order.
4003 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4))
4006 // Lower quadword shuffled.
4007 for (unsigned i = 0; i != 4; ++i)
4008 if (!isUndefOrInRange(Mask[i], 0, 4))
4011 if (VT == MVT::v16i16) {
4012 // Upper quadword copied in order.
4013 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12))
4016 // Lower quadword shuffled.
4017 for (unsigned i = 8; i != 12; ++i)
4018 if (!isUndefOrInRange(Mask[i], 8, 12))
4025 /// \brief Return true if the mask specifies a shuffle of elements that is
4026 /// suitable for input to intralane (palignr) or interlane (valign) vector
4028 static bool isAlignrMask(ArrayRef<int> Mask, MVT VT, bool InterLane) {
4029 unsigned NumElts = VT.getVectorNumElements();
4030 unsigned NumLanes = InterLane ? 1: VT.getSizeInBits()/128;
4031 unsigned NumLaneElts = NumElts/NumLanes;
4033 // Do not handle 64-bit element shuffles with palignr.
4034 if (NumLaneElts == 2)
4037 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) {
4039 for (i = 0; i != NumLaneElts; ++i) {
4044 // Lane is all undef, go to next lane
4045 if (i == NumLaneElts)
4048 int Start = Mask[i+l];
4050 // Make sure its in this lane in one of the sources
4051 if (!isUndefOrInRange(Start, l, l+NumLaneElts) &&
4052 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts))
4055 // If not lane 0, then we must match lane 0
4056 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l))
4059 // Correct second source to be contiguous with first source
4060 if (Start >= (int)NumElts)
4061 Start -= NumElts - NumLaneElts;
4063 // Make sure we're shifting in the right direction.
4064 if (Start <= (int)(i+l))
4069 // Check the rest of the elements to see if they are consecutive.
4070 for (++i; i != NumLaneElts; ++i) {
4071 int Idx = Mask[i+l];
4073 // Make sure its in this lane
4074 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) &&
4075 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts))
4078 // If not lane 0, then we must match lane 0
4079 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l))
4082 if (Idx >= (int)NumElts)
4083 Idx -= NumElts - NumLaneElts;
4085 if (!isUndefOrEqual(Idx, Start+i))
4094 /// \brief Return true if the node specifies a shuffle of elements that is
4095 /// suitable for input to PALIGNR.
4096 static bool isPALIGNRMask(ArrayRef<int> Mask, MVT VT,
4097 const X86Subtarget *Subtarget) {
4098 if ((VT.is128BitVector() && !Subtarget->hasSSSE3()) ||
4099 (VT.is256BitVector() && !Subtarget->hasInt256()) ||
4100 VT.is512BitVector())
4101 // FIXME: Add AVX512BW.
4104 return isAlignrMask(Mask, VT, false);
4107 /// \brief Return true if the node specifies a shuffle of elements that is
4108 /// suitable for input to VALIGN.
4109 static bool isVALIGNMask(ArrayRef<int> Mask, MVT VT,
4110 const X86Subtarget *Subtarget) {
4111 // FIXME: Add AVX512VL.
4112 if (!VT.is512BitVector() || !Subtarget->hasAVX512())
4114 return isAlignrMask(Mask, VT, true);
4117 /// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
4118 /// the two vector operands have swapped position.
4119 static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask,
4120 unsigned NumElems) {
4121 for (unsigned i = 0; i != NumElems; ++i) {
4125 else if (idx < (int)NumElems)
4126 Mask[i] = idx + NumElems;
4128 Mask[i] = idx - NumElems;
4132 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
4133 /// specifies a shuffle of elements that is suitable for input to 128/256-bit
4134 /// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be
4135 /// reverse of what x86 shuffles want.
4136 static bool isSHUFPMask(ArrayRef<int> Mask, MVT VT, bool Commuted = false) {
4138 unsigned NumElems = VT.getVectorNumElements();
4139 unsigned NumLanes = VT.getSizeInBits()/128;
4140 unsigned NumLaneElems = NumElems/NumLanes;
4142 if (NumLaneElems != 2 && NumLaneElems != 4)
4145 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4146 bool symmetricMaskRequired =
4147 (VT.getSizeInBits() >= 256) && (EltSize == 32);
4149 // VSHUFPSY divides the resulting vector into 4 chunks.
4150 // The sources are also splitted into 4 chunks, and each destination
4151 // chunk must come from a different source chunk.
4153 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0
4154 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9
4156 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4,
4157 // Y3..Y0, Y3..Y0, X3..X0, X3..X0
4159 // VSHUFPDY divides the resulting vector into 4 chunks.
4160 // The sources are also splitted into 4 chunks, and each destination
4161 // chunk must come from a different source chunk.
4163 // SRC1 => X3 X2 X1 X0
4164 // SRC2 => Y3 Y2 Y1 Y0
4166 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0
4168 SmallVector<int, 4> MaskVal(NumLaneElems, -1);
4169 unsigned HalfLaneElems = NumLaneElems/2;
4170 for (unsigned l = 0; l != NumElems; l += NumLaneElems) {
4171 for (unsigned i = 0; i != NumLaneElems; ++i) {
4172 int Idx = Mask[i+l];
4173 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0);
4174 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems))
4176 // For VSHUFPSY, the mask of the second half must be the same as the
4177 // first but with the appropriate offsets. This works in the same way as
4178 // VPERMILPS works with masks.
4179 if (!symmetricMaskRequired || Idx < 0)
4181 if (MaskVal[i] < 0) {
4182 MaskVal[i] = Idx - l;
4185 if ((signed)(Idx - l) != MaskVal[i])
4193 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
4194 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
4195 static bool isMOVHLPSMask(ArrayRef<int> Mask, MVT VT) {
4196 if (!VT.is128BitVector())
4199 unsigned NumElems = VT.getVectorNumElements();
4204 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
4205 return isUndefOrEqual(Mask[0], 6) &&
4206 isUndefOrEqual(Mask[1], 7) &&
4207 isUndefOrEqual(Mask[2], 2) &&
4208 isUndefOrEqual(Mask[3], 3);
4211 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
4212 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
4214 static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, MVT VT) {
4215 if (!VT.is128BitVector())
4218 unsigned NumElems = VT.getVectorNumElements();
4223 return isUndefOrEqual(Mask[0], 2) &&
4224 isUndefOrEqual(Mask[1], 3) &&
4225 isUndefOrEqual(Mask[2], 2) &&
4226 isUndefOrEqual(Mask[3], 3);
4229 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
4230 /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
4231 static bool isMOVLPMask(ArrayRef<int> Mask, MVT VT) {
4232 if (!VT.is128BitVector())
4235 unsigned NumElems = VT.getVectorNumElements();
4237 if (NumElems != 2 && NumElems != 4)
4240 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4241 if (!isUndefOrEqual(Mask[i], i + NumElems))
4244 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
4245 if (!isUndefOrEqual(Mask[i], i))
4251 /// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
4252 /// specifies a shuffle of elements that is suitable for input to MOVLHPS.
4253 static bool isMOVLHPSMask(ArrayRef<int> Mask, MVT VT) {
4254 if (!VT.is128BitVector())
4257 unsigned NumElems = VT.getVectorNumElements();
4259 if (NumElems != 2 && NumElems != 4)
4262 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4263 if (!isUndefOrEqual(Mask[i], i))
4266 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4267 if (!isUndefOrEqual(Mask[i + e], i + NumElems))
4273 /// isINSERTPSMask - Return true if the specified VECTOR_SHUFFLE operand
4274 /// specifies a shuffle of elements that is suitable for input to INSERTPS.
4275 /// i. e: If all but one element come from the same vector.
4276 static bool isINSERTPSMask(ArrayRef<int> Mask, MVT VT) {
4277 // TODO: Deal with AVX's VINSERTPS
4278 if (!VT.is128BitVector() || (VT != MVT::v4f32 && VT != MVT::v4i32))
4281 unsigned CorrectPosV1 = 0;
4282 unsigned CorrectPosV2 = 0;
4283 for (int i = 0, e = (int)VT.getVectorNumElements(); i != e; ++i) {
4284 if (Mask[i] == -1) {
4292 else if (Mask[i] == i + 4)
4296 if (CorrectPosV1 == 3 || CorrectPosV2 == 3)
4297 // We have 3 elements (undefs count as elements from any vector) from one
4298 // vector, and one from another.
4305 // Some special combinations that can be optimized.
4308 SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp,
4309 SelectionDAG &DAG) {
4310 MVT VT = SVOp->getSimpleValueType(0);
4313 if (VT != MVT::v8i32 && VT != MVT::v8f32)
4316 ArrayRef<int> Mask = SVOp->getMask();
4318 // These are the special masks that may be optimized.
4319 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14};
4320 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15};
4321 bool MatchEvenMask = true;
4322 bool MatchOddMask = true;
4323 for (int i=0; i<8; ++i) {
4324 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i]))
4325 MatchEvenMask = false;
4326 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i]))
4327 MatchOddMask = false;
4330 if (!MatchEvenMask && !MatchOddMask)
4333 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT);
4335 SDValue Op0 = SVOp->getOperand(0);
4336 SDValue Op1 = SVOp->getOperand(1);
4338 if (MatchEvenMask) {
4339 // Shift the second operand right to 32 bits.
4340 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 };
4341 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask);
4343 // Shift the first operand left to 32 bits.
4344 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 };
4345 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask);
4347 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15};
4348 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask);
4351 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
4352 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
4353 static bool isUNPCKLMask(ArrayRef<int> Mask, MVT VT,
4354 bool HasInt256, bool V2IsSplat = false) {
4356 assert(VT.getSizeInBits() >= 128 &&
4357 "Unsupported vector type for unpckl");
4359 unsigned NumElts = VT.getVectorNumElements();
4360 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4361 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4364 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4365 "Unsupported vector type for unpckh");
4367 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4368 unsigned NumLanes = VT.getSizeInBits()/128;
4369 unsigned NumLaneElts = NumElts/NumLanes;
4371 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4372 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4373 int BitI = Mask[l+i];
4374 int BitI1 = Mask[l+i+1];
4375 if (!isUndefOrEqual(BitI, j))
4378 if (!isUndefOrEqual(BitI1, NumElts))
4381 if (!isUndefOrEqual(BitI1, j + NumElts))
4390 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
4391 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
4392 static bool isUNPCKHMask(ArrayRef<int> Mask, MVT VT,
4393 bool HasInt256, bool V2IsSplat = false) {
4394 assert(VT.getSizeInBits() >= 128 &&
4395 "Unsupported vector type for unpckh");
4397 unsigned NumElts = VT.getVectorNumElements();
4398 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4399 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4402 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4403 "Unsupported vector type for unpckh");
4405 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4406 unsigned NumLanes = VT.getSizeInBits()/128;
4407 unsigned NumLaneElts = NumElts/NumLanes;
4409 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4410 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4411 int BitI = Mask[l+i];
4412 int BitI1 = Mask[l+i+1];
4413 if (!isUndefOrEqual(BitI, j))
4416 if (isUndefOrEqual(BitI1, NumElts))
4419 if (!isUndefOrEqual(BitI1, j+NumElts))
4427 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
4428 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
4430 static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4431 unsigned NumElts = VT.getVectorNumElements();
4432 bool Is256BitVec = VT.is256BitVector();
4434 if (VT.is512BitVector())
4436 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4437 "Unsupported vector type for unpckh");
4439 if (Is256BitVec && NumElts != 4 && NumElts != 8 &&
4440 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4443 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern
4444 // FIXME: Need a better way to get rid of this, there's no latency difference
4445 // between UNPCKLPD and MOVDDUP, the later should always be checked first and
4446 // the former later. We should also remove the "_undef" special mask.
4447 if (NumElts == 4 && Is256BitVec)
4450 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4451 // independently on 128-bit lanes.
4452 unsigned NumLanes = VT.getSizeInBits()/128;
4453 unsigned NumLaneElts = NumElts/NumLanes;
4455 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4456 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4457 int BitI = Mask[l+i];
4458 int BitI1 = Mask[l+i+1];
4460 if (!isUndefOrEqual(BitI, j))
4462 if (!isUndefOrEqual(BitI1, j))
4470 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
4471 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
4473 static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4474 unsigned NumElts = VT.getVectorNumElements();
4476 if (VT.is512BitVector())
4479 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4480 "Unsupported vector type for unpckh");
4482 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4483 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4486 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4487 // independently on 128-bit lanes.
4488 unsigned NumLanes = VT.getSizeInBits()/128;
4489 unsigned NumLaneElts = NumElts/NumLanes;
4491 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4492 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4493 int BitI = Mask[l+i];
4494 int BitI1 = Mask[l+i+1];
4495 if (!isUndefOrEqual(BitI, j))
4497 if (!isUndefOrEqual(BitI1, j))
4504 // Match for INSERTI64x4 INSERTF64x4 instructions (src0[0], src1[0]) or
4505 // (src1[0], src0[1]), manipulation with 256-bit sub-vectors
4506 static bool isINSERT64x4Mask(ArrayRef<int> Mask, MVT VT, unsigned int *Imm) {
4507 if (!VT.is512BitVector())
4510 unsigned NumElts = VT.getVectorNumElements();
4511 unsigned HalfSize = NumElts/2;
4512 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, 0)) {
4513 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, NumElts)) {
4518 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, NumElts)) {
4519 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, HalfSize)) {
4527 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
4528 /// specifies a shuffle of elements that is suitable for input to MOVSS,
4529 /// MOVSD, and MOVD, i.e. setting the lowest element.
4530 static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) {
4531 if (VT.getVectorElementType().getSizeInBits() < 32)
4533 if (!VT.is128BitVector())
4536 unsigned NumElts = VT.getVectorNumElements();
4538 if (!isUndefOrEqual(Mask[0], NumElts))
4541 for (unsigned i = 1; i != NumElts; ++i)
4542 if (!isUndefOrEqual(Mask[i], i))
4548 /// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered
4549 /// as permutations between 128-bit chunks or halves. As an example: this
4551 /// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15>
4552 /// The first half comes from the second half of V1 and the second half from the
4553 /// the second half of V2.
4554 static bool isVPERM2X128Mask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4555 if (!HasFp256 || !VT.is256BitVector())
4558 // The shuffle result is divided into half A and half B. In total the two
4559 // sources have 4 halves, namely: C, D, E, F. The final values of A and
4560 // B must come from C, D, E or F.
4561 unsigned HalfSize = VT.getVectorNumElements()/2;
4562 bool MatchA = false, MatchB = false;
4564 // Check if A comes from one of C, D, E, F.
4565 for (unsigned Half = 0; Half != 4; ++Half) {
4566 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) {
4572 // Check if B comes from one of C, D, E, F.
4573 for (unsigned Half = 0; Half != 4; ++Half) {
4574 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) {
4580 return MatchA && MatchB;
4583 /// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle
4584 /// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions.
4585 static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) {
4586 MVT VT = SVOp->getSimpleValueType(0);
4588 unsigned HalfSize = VT.getVectorNumElements()/2;
4590 unsigned FstHalf = 0, SndHalf = 0;
4591 for (unsigned i = 0; i < HalfSize; ++i) {
4592 if (SVOp->getMaskElt(i) > 0) {
4593 FstHalf = SVOp->getMaskElt(i)/HalfSize;
4597 for (unsigned i = HalfSize; i < HalfSize*2; ++i) {
4598 if (SVOp->getMaskElt(i) > 0) {
4599 SndHalf = SVOp->getMaskElt(i)/HalfSize;
4604 return (FstHalf | (SndHalf << 4));
4607 // Symmetric in-lane mask. Each lane has 4 elements (for imm8)
4608 static bool isPermImmMask(ArrayRef<int> Mask, MVT VT, unsigned& Imm8) {
4609 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4613 unsigned NumElts = VT.getVectorNumElements();
4615 if (VT.is128BitVector() || (VT.is256BitVector() && EltSize == 64)) {
4616 for (unsigned i = 0; i != NumElts; ++i) {
4619 Imm8 |= Mask[i] << (i*2);
4624 unsigned LaneSize = 4;
4625 SmallVector<int, 4> MaskVal(LaneSize, -1);
4627 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4628 for (unsigned i = 0; i != LaneSize; ++i) {
4629 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4633 if (MaskVal[i] < 0) {
4634 MaskVal[i] = Mask[i+l] - l;
4635 Imm8 |= MaskVal[i] << (i*2);
4638 if (Mask[i+l] != (signed)(MaskVal[i]+l))
4645 /// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand
4646 /// specifies a shuffle of elements that is suitable for input to VPERMILPD*.
4647 /// Note that VPERMIL mask matching is different depending whether theunderlying
4648 /// type is 32 or 64. In the VPERMILPS the high half of the mask should point
4649 /// to the same elements of the low, but to the higher half of the source.
4650 /// In VPERMILPD the two lanes could be shuffled independently of each other
4651 /// with the same restriction that lanes can't be crossed. Also handles PSHUFDY.
4652 static bool isVPERMILPMask(ArrayRef<int> Mask, MVT VT) {
4653 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4654 if (VT.getSizeInBits() < 256 || EltSize < 32)
4656 bool symmetricMaskRequired = (EltSize == 32);
4657 unsigned NumElts = VT.getVectorNumElements();
4659 unsigned NumLanes = VT.getSizeInBits()/128;
4660 unsigned LaneSize = NumElts/NumLanes;
4661 // 2 or 4 elements in one lane
4663 SmallVector<int, 4> ExpectedMaskVal(LaneSize, -1);
4664 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4665 for (unsigned i = 0; i != LaneSize; ++i) {
4666 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4668 if (symmetricMaskRequired) {
4669 if (ExpectedMaskVal[i] < 0 && Mask[i+l] >= 0) {
4670 ExpectedMaskVal[i] = Mask[i+l] - l;
4673 if (!isUndefOrEqual(Mask[i+l], ExpectedMaskVal[i]+l))
4681 /// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse
4682 /// of what x86 movss want. X86 movs requires the lowest element to be lowest
4683 /// element of vector 2 and the other elements to come from vector 1 in order.
4684 static bool isCommutedMOVLMask(ArrayRef<int> Mask, MVT VT,
4685 bool V2IsSplat = false, bool V2IsUndef = false) {
4686 if (!VT.is128BitVector())
4689 unsigned NumOps = VT.getVectorNumElements();
4690 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
4693 if (!isUndefOrEqual(Mask[0], 0))
4696 for (unsigned i = 1; i != NumOps; ++i)
4697 if (!(isUndefOrEqual(Mask[i], i+NumOps) ||
4698 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) ||
4699 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps))))
4705 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4706 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
4707 /// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7>
4708 static bool isMOVSHDUPMask(ArrayRef<int> Mask, MVT VT,
4709 const X86Subtarget *Subtarget) {
4710 if (!Subtarget->hasSSE3())
4713 unsigned NumElems = VT.getVectorNumElements();
4715 if ((VT.is128BitVector() && NumElems != 4) ||
4716 (VT.is256BitVector() && NumElems != 8) ||
4717 (VT.is512BitVector() && NumElems != 16))
4720 // "i+1" is the value the indexed mask element must have
4721 for (unsigned i = 0; i != NumElems; i += 2)
4722 if (!isUndefOrEqual(Mask[i], i+1) ||
4723 !isUndefOrEqual(Mask[i+1], i+1))
4729 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4730 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
4731 /// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6>
4732 static bool isMOVSLDUPMask(ArrayRef<int> Mask, MVT VT,
4733 const X86Subtarget *Subtarget) {
4734 if (!Subtarget->hasSSE3())
4737 unsigned NumElems = VT.getVectorNumElements();
4739 if ((VT.is128BitVector() && NumElems != 4) ||
4740 (VT.is256BitVector() && NumElems != 8) ||
4741 (VT.is512BitVector() && NumElems != 16))
4744 // "i" is the value the indexed mask element must have
4745 for (unsigned i = 0; i != NumElems; i += 2)
4746 if (!isUndefOrEqual(Mask[i], i) ||
4747 !isUndefOrEqual(Mask[i+1], i))
4753 /// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand
4754 /// specifies a shuffle of elements that is suitable for input to 256-bit
4755 /// version of MOVDDUP.
4756 static bool isMOVDDUPYMask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4757 if (!HasFp256 || !VT.is256BitVector())
4760 unsigned NumElts = VT.getVectorNumElements();
4764 for (unsigned i = 0; i != NumElts/2; ++i)
4765 if (!isUndefOrEqual(Mask[i], 0))
4767 for (unsigned i = NumElts/2; i != NumElts; ++i)
4768 if (!isUndefOrEqual(Mask[i], NumElts/2))
4773 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4774 /// specifies a shuffle of elements that is suitable for input to 128-bit
4775 /// version of MOVDDUP.
4776 static bool isMOVDDUPMask(ArrayRef<int> Mask, MVT VT) {
4777 if (!VT.is128BitVector())
4780 unsigned e = VT.getVectorNumElements() / 2;
4781 for (unsigned i = 0; i != e; ++i)
4782 if (!isUndefOrEqual(Mask[i], i))
4784 for (unsigned i = 0; i != e; ++i)
4785 if (!isUndefOrEqual(Mask[e+i], i))
4790 /// isVEXTRACTIndex - Return true if the specified
4791 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
4792 /// suitable for instruction that extract 128 or 256 bit vectors
4793 static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
4794 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4795 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4798 // The index should be aligned on a vecWidth-bit boundary.
4800 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4802 MVT VT = N->getSimpleValueType(0);
4803 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4804 bool Result = (Index * ElSize) % vecWidth == 0;
4809 /// isVINSERTIndex - Return true if the specified INSERT_SUBVECTOR
4810 /// operand specifies a subvector insert that is suitable for input to
4811 /// insertion of 128 or 256-bit subvectors
4812 static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) {
4813 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4814 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4816 // The index should be aligned on a vecWidth-bit boundary.
4818 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4820 MVT VT = N->getSimpleValueType(0);
4821 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4822 bool Result = (Index * ElSize) % vecWidth == 0;
4827 bool X86::isVINSERT128Index(SDNode *N) {
4828 return isVINSERTIndex(N, 128);
4831 bool X86::isVINSERT256Index(SDNode *N) {
4832 return isVINSERTIndex(N, 256);
4835 bool X86::isVEXTRACT128Index(SDNode *N) {
4836 return isVEXTRACTIndex(N, 128);
4839 bool X86::isVEXTRACT256Index(SDNode *N) {
4840 return isVEXTRACTIndex(N, 256);
4843 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
4844 /// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions.
4845 /// Handles 128-bit and 256-bit.
4846 static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
4847 MVT VT = N->getSimpleValueType(0);
4849 assert((VT.getSizeInBits() >= 128) &&
4850 "Unsupported vector type for PSHUF/SHUFP");
4852 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate
4853 // independently on 128-bit lanes.
4854 unsigned NumElts = VT.getVectorNumElements();
4855 unsigned NumLanes = VT.getSizeInBits()/128;
4856 unsigned NumLaneElts = NumElts/NumLanes;
4858 assert((NumLaneElts == 2 || NumLaneElts == 4 || NumLaneElts == 8) &&
4859 "Only supports 2, 4 or 8 elements per lane");
4861 unsigned Shift = (NumLaneElts >= 4) ? 1 : 0;
4863 for (unsigned i = 0; i != NumElts; ++i) {
4864 int Elt = N->getMaskElt(i);
4865 if (Elt < 0) continue;
4866 Elt &= NumLaneElts - 1;
4867 unsigned ShAmt = (i << Shift) % 8;
4868 Mask |= Elt << ShAmt;
4874 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
4875 /// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction.
4876 static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) {
4877 MVT VT = N->getSimpleValueType(0);
4879 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4880 "Unsupported vector type for PSHUFHW");
4882 unsigned NumElts = VT.getVectorNumElements();
4885 for (unsigned l = 0; l != NumElts; l += 8) {
4886 // 8 nodes per lane, but we only care about the last 4.
4887 for (unsigned i = 0; i < 4; ++i) {
4888 int Elt = N->getMaskElt(l+i+4);
4889 if (Elt < 0) continue;
4890 Elt &= 0x3; // only 2-bits.
4891 Mask |= Elt << (i * 2);
4898 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
4899 /// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction.
4900 static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) {
4901 MVT VT = N->getSimpleValueType(0);
4903 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4904 "Unsupported vector type for PSHUFHW");
4906 unsigned NumElts = VT.getVectorNumElements();
4909 for (unsigned l = 0; l != NumElts; l += 8) {
4910 // 8 nodes per lane, but we only care about the first 4.
4911 for (unsigned i = 0; i < 4; ++i) {
4912 int Elt = N->getMaskElt(l+i);
4913 if (Elt < 0) continue;
4914 Elt &= 0x3; // only 2-bits
4915 Mask |= Elt << (i * 2);
4922 /// \brief Return the appropriate immediate to shuffle the specified
4923 /// VECTOR_SHUFFLE mask with the PALIGNR (if InterLane is false) or with
4924 /// VALIGN (if Interlane is true) instructions.
4925 static unsigned getShuffleAlignrImmediate(ShuffleVectorSDNode *SVOp,
4927 MVT VT = SVOp->getSimpleValueType(0);
4928 unsigned EltSize = InterLane ? 1 :
4929 VT.getVectorElementType().getSizeInBits() >> 3;
4931 unsigned NumElts = VT.getVectorNumElements();
4932 unsigned NumLanes = VT.is512BitVector() ? 1 : VT.getSizeInBits()/128;
4933 unsigned NumLaneElts = NumElts/NumLanes;
4937 for (i = 0; i != NumElts; ++i) {
4938 Val = SVOp->getMaskElt(i);
4942 if (Val >= (int)NumElts)
4943 Val -= NumElts - NumLaneElts;
4945 assert(Val - i > 0 && "PALIGNR imm should be positive");
4946 return (Val - i) * EltSize;
4949 /// \brief Return the appropriate immediate to shuffle the specified
4950 /// VECTOR_SHUFFLE mask with the PALIGNR instruction.
4951 static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) {
4952 return getShuffleAlignrImmediate(SVOp, false);
4955 /// \brief Return the appropriate immediate to shuffle the specified
4956 /// VECTOR_SHUFFLE mask with the VALIGN instruction.
4957 static unsigned getShuffleVALIGNImmediate(ShuffleVectorSDNode *SVOp) {
4958 return getShuffleAlignrImmediate(SVOp, true);
4962 static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) {
4963 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4964 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4965 llvm_unreachable("Illegal extract subvector for VEXTRACT");
4968 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4970 MVT VecVT = N->getOperand(0).getSimpleValueType();
4971 MVT ElVT = VecVT.getVectorElementType();
4973 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4974 return Index / NumElemsPerChunk;
4977 static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) {
4978 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4979 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4980 llvm_unreachable("Illegal insert subvector for VINSERT");
4983 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4985 MVT VecVT = N->getSimpleValueType(0);
4986 MVT ElVT = VecVT.getVectorElementType();
4988 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4989 return Index / NumElemsPerChunk;
4992 /// getExtractVEXTRACT128Immediate - Return the appropriate immediate
4993 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128
4994 /// and VINSERTI128 instructions.
4995 unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) {
4996 return getExtractVEXTRACTImmediate(N, 128);
4999 /// getExtractVEXTRACT256Immediate - Return the appropriate immediate
5000 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF64x4
5001 /// and VINSERTI64x4 instructions.
5002 unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) {
5003 return getExtractVEXTRACTImmediate(N, 256);
5006 /// getInsertVINSERT128Immediate - Return the appropriate immediate
5007 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128
5008 /// and VINSERTI128 instructions.
5009 unsigned X86::getInsertVINSERT128Immediate(SDNode *N) {
5010 return getInsertVINSERTImmediate(N, 128);
5013 /// getInsertVINSERT256Immediate - Return the appropriate immediate
5014 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF46x4
5015 /// and VINSERTI64x4 instructions.
5016 unsigned X86::getInsertVINSERT256Immediate(SDNode *N) {
5017 return getInsertVINSERTImmediate(N, 256);
5020 /// isZero - Returns true if Elt is a constant integer zero
5021 static bool isZero(SDValue V) {
5022 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
5023 return C && C->isNullValue();
5026 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
5028 bool X86::isZeroNode(SDValue Elt) {
5031 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Elt))
5032 return CFP->getValueAPF().isPosZero();
5036 /// ShouldXformToMOVHLPS - Return true if the node should be transformed to
5037 /// match movhlps. The lower half elements should come from upper half of
5038 /// V1 (and in order), and the upper half elements should come from the upper
5039 /// half of V2 (and in order).
5040 static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, MVT VT) {
5041 if (!VT.is128BitVector())
5043 if (VT.getVectorNumElements() != 4)
5045 for (unsigned i = 0, e = 2; i != e; ++i)
5046 if (!isUndefOrEqual(Mask[i], i+2))
5048 for (unsigned i = 2; i != 4; ++i)
5049 if (!isUndefOrEqual(Mask[i], i+4))
5054 /// isScalarLoadToVector - Returns true if the node is a scalar load that
5055 /// is promoted to a vector. It also returns the LoadSDNode by reference if
5057 static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = nullptr) {
5058 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR)
5060 N = N->getOperand(0).getNode();
5061 if (!ISD::isNON_EXTLoad(N))
5064 *LD = cast<LoadSDNode>(N);
5068 // Test whether the given value is a vector value which will be legalized
5070 static bool WillBeConstantPoolLoad(SDNode *N) {
5071 if (N->getOpcode() != ISD::BUILD_VECTOR)
5074 // Check for any non-constant elements.
5075 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
5076 switch (N->getOperand(i).getNode()->getOpcode()) {
5078 case ISD::ConstantFP:
5085 // Vectors of all-zeros and all-ones are materialized with special
5086 // instructions rather than being loaded.
5087 return !ISD::isBuildVectorAllZeros(N) &&
5088 !ISD::isBuildVectorAllOnes(N);
5091 /// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
5092 /// match movlp{s|d}. The lower half elements should come from lower half of
5093 /// V1 (and in order), and the upper half elements should come from the upper
5094 /// half of V2 (and in order). And since V1 will become the source of the
5095 /// MOVLP, it must be either a vector load or a scalar load to vector.
5096 static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
5097 ArrayRef<int> Mask, MVT VT) {
5098 if (!VT.is128BitVector())
5101 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
5103 // Is V2 is a vector load, don't do this transformation. We will try to use
5104 // load folding shufps op.
5105 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2))
5108 unsigned NumElems = VT.getVectorNumElements();
5110 if (NumElems != 2 && NumElems != 4)
5112 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
5113 if (!isUndefOrEqual(Mask[i], i))
5115 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
5116 if (!isUndefOrEqual(Mask[i], i+NumElems))
5121 /// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
5122 /// to an zero vector.
5123 /// FIXME: move to dag combiner / method on ShuffleVectorSDNode
5124 static bool isZeroShuffle(ShuffleVectorSDNode *N) {
5125 SDValue V1 = N->getOperand(0);
5126 SDValue V2 = N->getOperand(1);
5127 unsigned NumElems = N->getValueType(0).getVectorNumElements();
5128 for (unsigned i = 0; i != NumElems; ++i) {
5129 int Idx = N->getMaskElt(i);
5130 if (Idx >= (int)NumElems) {
5131 unsigned Opc = V2.getOpcode();
5132 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode()))
5134 if (Opc != ISD::BUILD_VECTOR ||
5135 !X86::isZeroNode(V2.getOperand(Idx-NumElems)))
5137 } else if (Idx >= 0) {
5138 unsigned Opc = V1.getOpcode();
5139 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode()))
5141 if (Opc != ISD::BUILD_VECTOR ||
5142 !X86::isZeroNode(V1.getOperand(Idx)))
5149 /// getZeroVector - Returns a vector of specified type with all zero elements.
5151 static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
5152 SelectionDAG &DAG, SDLoc dl) {
5153 assert(VT.isVector() && "Expected a vector type");
5155 // Always build SSE zero vectors as <4 x i32> bitcasted
5156 // to their dest type. This ensures they get CSE'd.
5158 if (VT.is128BitVector()) { // SSE
5159 if (Subtarget->hasSSE2()) { // SSE2
5160 SDValue Cst = DAG.getConstant(0, MVT::i32);
5161 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5163 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5164 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
5166 } else if (VT.is256BitVector()) { // AVX
5167 if (Subtarget->hasInt256()) { // AVX2
5168 SDValue Cst = DAG.getConstant(0, MVT::i32);
5169 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5170 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5172 // 256-bit logic and arithmetic instructions in AVX are all
5173 // floating-point, no support for integer ops. Emit fp zeroed vectors.
5174 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5175 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5176 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops);
5178 } else if (VT.is512BitVector()) { // AVX-512
5179 SDValue Cst = DAG.getConstant(0, MVT::i32);
5180 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
5181 Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5182 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
5183 } else if (VT.getScalarType() == MVT::i1) {
5184 assert(VT.getVectorNumElements() <= 16 && "Unexpected vector type");
5185 SDValue Cst = DAG.getConstant(0, MVT::i1);
5186 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
5187 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
5189 llvm_unreachable("Unexpected vector type");
5191 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5194 /// getOnesVector - Returns a vector of specified type with all bits set.
5195 /// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with
5196 /// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately.
5197 /// Then bitcast to their original type, ensuring they get CSE'd.
5198 static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG,
5200 assert(VT.isVector() && "Expected a vector type");
5202 SDValue Cst = DAG.getConstant(~0U, MVT::i32);
5204 if (VT.is256BitVector()) {
5205 if (HasInt256) { // AVX2
5206 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5207 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5209 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5210 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
5212 } else if (VT.is128BitVector()) {
5213 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5215 llvm_unreachable("Unexpected vector type");
5217 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5220 /// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
5221 /// that point to V2 points to its first element.
5222 static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) {
5223 for (unsigned i = 0; i != NumElems; ++i) {
5224 if (Mask[i] > (int)NumElems) {
5230 /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
5231 /// operation of specified width.
5232 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
5234 unsigned NumElems = VT.getVectorNumElements();
5235 SmallVector<int, 8> Mask;
5236 Mask.push_back(NumElems);
5237 for (unsigned i = 1; i != NumElems; ++i)
5239 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5242 /// getUnpackl - Returns a vector_shuffle node for an unpackl operation.
5243 static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5245 unsigned NumElems = VT.getVectorNumElements();
5246 SmallVector<int, 8> Mask;
5247 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
5249 Mask.push_back(i + NumElems);
5251 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5254 /// getUnpackh - Returns a vector_shuffle node for an unpackh operation.
5255 static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5257 unsigned NumElems = VT.getVectorNumElements();
5258 SmallVector<int, 8> Mask;
5259 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) {
5260 Mask.push_back(i + Half);
5261 Mask.push_back(i + NumElems + Half);
5263 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5266 // PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by
5267 // a generic shuffle instruction because the target has no such instructions.
5268 // Generate shuffles which repeat i16 and i8 several times until they can be
5269 // represented by v4f32 and then be manipulated by target suported shuffles.
5270 static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) {
5271 MVT VT = V.getSimpleValueType();
5272 int NumElems = VT.getVectorNumElements();
5275 while (NumElems > 4) {
5276 if (EltNo < NumElems/2) {
5277 V = getUnpackl(DAG, dl, VT, V, V);
5279 V = getUnpackh(DAG, dl, VT, V, V);
5280 EltNo -= NumElems/2;
5287 /// getLegalSplat - Generate a legal splat with supported x86 shuffles
5288 static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) {
5289 MVT VT = V.getSimpleValueType();
5292 if (VT.is128BitVector()) {
5293 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V);
5294 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
5295 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32),
5297 } else if (VT.is256BitVector()) {
5298 // To use VPERMILPS to splat scalars, the second half of indicies must
5299 // refer to the higher part, which is a duplication of the lower one,
5300 // because VPERMILPS can only handle in-lane permutations.
5301 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo,
5302 EltNo+4, EltNo+4, EltNo+4, EltNo+4 };
5304 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V);
5305 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32),
5308 llvm_unreachable("Vector size not supported");
5310 return DAG.getNode(ISD::BITCAST, dl, VT, V);
5313 /// PromoteSplat - Splat is promoted to target supported vector shuffles.
5314 static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
5315 MVT SrcVT = SV->getSimpleValueType(0);
5316 SDValue V1 = SV->getOperand(0);
5319 int EltNo = SV->getSplatIndex();
5320 int NumElems = SrcVT.getVectorNumElements();
5321 bool Is256BitVec = SrcVT.is256BitVector();
5323 assert(((SrcVT.is128BitVector() && NumElems > 4) || Is256BitVec) &&
5324 "Unknown how to promote splat for type");
5326 // Extract the 128-bit part containing the splat element and update
5327 // the splat element index when it refers to the higher register.
5329 V1 = Extract128BitVector(V1, EltNo, DAG, dl);
5330 if (EltNo >= NumElems/2)
5331 EltNo -= NumElems/2;
5334 // All i16 and i8 vector types can't be used directly by a generic shuffle
5335 // instruction because the target has no such instruction. Generate shuffles
5336 // which repeat i16 and i8 several times until they fit in i32, and then can
5337 // be manipulated by target suported shuffles.
5338 MVT EltVT = SrcVT.getVectorElementType();
5339 if (EltVT == MVT::i8 || EltVT == MVT::i16)
5340 V1 = PromoteSplati8i16(V1, DAG, EltNo);
5342 // Recreate the 256-bit vector and place the same 128-bit vector
5343 // into the low and high part. This is necessary because we want
5344 // to use VPERM* to shuffle the vectors
5346 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1);
5349 return getLegalSplat(DAG, V1, EltNo);
5352 /// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
5353 /// vector of zero or undef vector. This produces a shuffle where the low
5354 /// element of V2 is swizzled into the zero/undef vector, landing at element
5355 /// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
5356 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
5358 const X86Subtarget *Subtarget,
5359 SelectionDAG &DAG) {
5360 MVT VT = V2.getSimpleValueType();
5362 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
5363 unsigned NumElems = VT.getVectorNumElements();
5364 SmallVector<int, 16> MaskVec;
5365 for (unsigned i = 0; i != NumElems; ++i)
5366 // If this is the insertion idx, put the low elt of V2 here.
5367 MaskVec.push_back(i == Idx ? NumElems : i);
5368 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]);
5371 /// getTargetShuffleMask - Calculates the shuffle mask corresponding to the
5372 /// target specific opcode. Returns true if the Mask could be calculated. Sets
5373 /// IsUnary to true if only uses one source. Note that this will set IsUnary for
5374 /// shuffles which use a single input multiple times, and in those cases it will
5375 /// adjust the mask to only have indices within that single input.
5376 static bool getTargetShuffleMask(SDNode *N, MVT VT,
5377 SmallVectorImpl<int> &Mask, bool &IsUnary) {
5378 unsigned NumElems = VT.getVectorNumElements();
5382 bool IsFakeUnary = false;
5383 switch(N->getOpcode()) {
5384 case X86ISD::BLENDI:
5385 ImmN = N->getOperand(N->getNumOperands()-1);
5386 DecodeBLENDMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5389 ImmN = N->getOperand(N->getNumOperands()-1);
5390 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5391 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5393 case X86ISD::UNPCKH:
5394 DecodeUNPCKHMask(VT, Mask);
5395 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5397 case X86ISD::UNPCKL:
5398 DecodeUNPCKLMask(VT, Mask);
5399 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5401 case X86ISD::MOVHLPS:
5402 DecodeMOVHLPSMask(NumElems, Mask);
5403 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5405 case X86ISD::MOVLHPS:
5406 DecodeMOVLHPSMask(NumElems, Mask);
5407 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5409 case X86ISD::PALIGNR:
5410 ImmN = N->getOperand(N->getNumOperands()-1);
5411 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5413 case X86ISD::PSHUFD:
5414 case X86ISD::VPERMILPI:
5415 ImmN = N->getOperand(N->getNumOperands()-1);
5416 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5419 case X86ISD::PSHUFHW:
5420 ImmN = N->getOperand(N->getNumOperands()-1);
5421 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5424 case X86ISD::PSHUFLW:
5425 ImmN = N->getOperand(N->getNumOperands()-1);
5426 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5429 case X86ISD::PSHUFB: {
5431 SDValue MaskNode = N->getOperand(1);
5432 while (MaskNode->getOpcode() == ISD::BITCAST)
5433 MaskNode = MaskNode->getOperand(0);
5435 if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
5436 // If we have a build-vector, then things are easy.
5437 EVT VT = MaskNode.getValueType();
5438 assert(VT.isVector() &&
5439 "Can't produce a non-vector with a build_vector!");
5440 if (!VT.isInteger())
5443 int NumBytesPerElement = VT.getVectorElementType().getSizeInBits() / 8;
5445 SmallVector<uint64_t, 32> RawMask;
5446 for (int i = 0, e = MaskNode->getNumOperands(); i < e; ++i) {
5447 SDValue Op = MaskNode->getOperand(i);
5448 if (Op->getOpcode() == ISD::UNDEF) {
5449 RawMask.push_back((uint64_t)SM_SentinelUndef);
5452 auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
5455 APInt MaskElement = CN->getAPIntValue();
5457 // We now have to decode the element which could be any integer size and
5458 // extract each byte of it.
5459 for (int j = 0; j < NumBytesPerElement; ++j) {
5460 // Note that this is x86 and so always little endian: the low byte is
5461 // the first byte of the mask.
5462 RawMask.push_back(MaskElement.getLoBits(8).getZExtValue());
5463 MaskElement = MaskElement.lshr(8);
5466 DecodePSHUFBMask(RawMask, Mask);
5470 auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
5474 SDValue Ptr = MaskLoad->getBasePtr();
5475 if (Ptr->getOpcode() == X86ISD::Wrapper)
5476 Ptr = Ptr->getOperand(0);
5478 auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
5479 if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
5482 if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
5483 DecodePSHUFBMask(C, Mask);
5491 case X86ISD::VPERMI:
5492 ImmN = N->getOperand(N->getNumOperands()-1);
5493 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5498 DecodeScalarMoveMask(VT, /* IsLoad */ false, Mask);
5500 case X86ISD::VPERM2X128:
5501 ImmN = N->getOperand(N->getNumOperands()-1);
5502 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5503 if (Mask.empty()) return false;
5505 case X86ISD::MOVSLDUP:
5506 DecodeMOVSLDUPMask(VT, Mask);
5509 case X86ISD::MOVSHDUP:
5510 DecodeMOVSHDUPMask(VT, Mask);
5513 case X86ISD::MOVDDUP:
5514 DecodeMOVDDUPMask(VT, Mask);
5517 case X86ISD::MOVLHPD:
5518 case X86ISD::MOVLPD:
5519 case X86ISD::MOVLPS:
5520 // Not yet implemented
5522 default: llvm_unreachable("unknown target shuffle node");
5525 // If we have a fake unary shuffle, the shuffle mask is spread across two
5526 // inputs that are actually the same node. Re-map the mask to always point
5527 // into the first input.
5530 if (M >= (int)Mask.size())
5536 /// getShuffleScalarElt - Returns the scalar element that will make up the ith
5537 /// element of the result of the vector shuffle.
5538 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
5541 return SDValue(); // Limit search depth.
5543 SDValue V = SDValue(N, 0);
5544 EVT VT = V.getValueType();
5545 unsigned Opcode = V.getOpcode();
5547 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
5548 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
5549 int Elt = SV->getMaskElt(Index);
5552 return DAG.getUNDEF(VT.getVectorElementType());
5554 unsigned NumElems = VT.getVectorNumElements();
5555 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
5556 : SV->getOperand(1);
5557 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
5560 // Recurse into target specific vector shuffles to find scalars.
5561 if (isTargetShuffle(Opcode)) {
5562 MVT ShufVT = V.getSimpleValueType();
5563 unsigned NumElems = ShufVT.getVectorNumElements();
5564 SmallVector<int, 16> ShuffleMask;
5567 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary))
5570 int Elt = ShuffleMask[Index];
5572 return DAG.getUNDEF(ShufVT.getVectorElementType());
5574 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0)
5576 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
5580 // Actual nodes that may contain scalar elements
5581 if (Opcode == ISD::BITCAST) {
5582 V = V.getOperand(0);
5583 EVT SrcVT = V.getValueType();
5584 unsigned NumElems = VT.getVectorNumElements();
5586 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
5590 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
5591 return (Index == 0) ? V.getOperand(0)
5592 : DAG.getUNDEF(VT.getVectorElementType());
5594 if (V.getOpcode() == ISD::BUILD_VECTOR)
5595 return V.getOperand(Index);
5600 /// getNumOfConsecutiveZeros - Return the number of elements of a vector
5601 /// shuffle operation which come from a consecutively from a zero. The
5602 /// search can start in two different directions, from left or right.
5603 /// We count undefs as zeros until PreferredNum is reached.
5604 static unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp,
5605 unsigned NumElems, bool ZerosFromLeft,
5607 unsigned PreferredNum = -1U) {
5608 unsigned NumZeros = 0;
5609 for (unsigned i = 0; i != NumElems; ++i) {
5610 unsigned Index = ZerosFromLeft ? i : NumElems - i - 1;
5611 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0);
5615 if (X86::isZeroNode(Elt))
5617 else if (Elt.getOpcode() == ISD::UNDEF) // Undef as zero up to PreferredNum.
5618 NumZeros = std::min(NumZeros + 1, PreferredNum);
5626 /// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE)
5627 /// correspond consecutively to elements from one of the vector operands,
5628 /// starting from its index OpIdx. Also tell OpNum which source vector operand.
5630 bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp,
5631 unsigned MaskI, unsigned MaskE, unsigned OpIdx,
5632 unsigned NumElems, unsigned &OpNum) {
5633 bool SeenV1 = false;
5634 bool SeenV2 = false;
5636 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) {
5637 int Idx = SVOp->getMaskElt(i);
5638 // Ignore undef indicies
5642 if (Idx < (int)NumElems)
5647 // Only accept consecutive elements from the same vector
5648 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2))
5652 OpNum = SeenV1 ? 0 : 1;
5656 /// isVectorShiftRight - Returns true if the shuffle can be implemented as a
5657 /// logical left shift of a vector.
5658 static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5659 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5661 SVOp->getSimpleValueType(0).getVectorNumElements();
5662 unsigned NumZeros = getNumOfConsecutiveZeros(
5663 SVOp, NumElems, false /* check zeros from right */, DAG,
5664 SVOp->getMaskElt(0));
5670 // Considering the elements in the mask that are not consecutive zeros,
5671 // check if they consecutively come from only one of the source vectors.
5673 // V1 = {X, A, B, C} 0
5675 // vector_shuffle V1, V2 <1, 2, 3, X>
5677 if (!isShuffleMaskConsecutive(SVOp,
5678 0, // Mask Start Index
5679 NumElems-NumZeros, // Mask End Index(exclusive)
5680 NumZeros, // Where to start looking in the src vector
5681 NumElems, // Number of elements in vector
5682 OpSrc)) // Which source operand ?
5687 ShVal = SVOp->getOperand(OpSrc);
5691 /// isVectorShiftLeft - Returns true if the shuffle can be implemented as a
5692 /// logical left shift of a vector.
5693 static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5694 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5696 SVOp->getSimpleValueType(0).getVectorNumElements();
5697 unsigned NumZeros = getNumOfConsecutiveZeros(
5698 SVOp, NumElems, true /* check zeros from left */, DAG,
5699 NumElems - SVOp->getMaskElt(NumElems - 1) - 1);
5705 // Considering the elements in the mask that are not consecutive zeros,
5706 // check if they consecutively come from only one of the source vectors.
5708 // 0 { A, B, X, X } = V2
5710 // vector_shuffle V1, V2 <X, X, 4, 5>
5712 if (!isShuffleMaskConsecutive(SVOp,
5713 NumZeros, // Mask Start Index
5714 NumElems, // Mask End Index(exclusive)
5715 0, // Where to start looking in the src vector
5716 NumElems, // Number of elements in vector
5717 OpSrc)) // Which source operand ?
5722 ShVal = SVOp->getOperand(OpSrc);
5726 /// isVectorShift - Returns true if the shuffle can be implemented as a
5727 /// logical left or right shift of a vector.
5728 static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5729 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5730 // Although the logic below support any bitwidth size, there are no
5731 // shift instructions which handle more than 128-bit vectors.
5732 if (!SVOp->getSimpleValueType(0).is128BitVector())
5735 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) ||
5736 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt))
5742 /// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
5744 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
5745 unsigned NumNonZero, unsigned NumZero,
5747 const X86Subtarget* Subtarget,
5748 const TargetLowering &TLI) {
5755 for (unsigned i = 0; i < 16; ++i) {
5756 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
5757 if (ThisIsNonZero && First) {
5759 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5761 V = DAG.getUNDEF(MVT::v8i16);
5766 SDValue ThisElt, LastElt;
5767 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
5768 if (LastIsNonZero) {
5769 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
5770 MVT::i16, Op.getOperand(i-1));
5772 if (ThisIsNonZero) {
5773 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
5774 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
5775 ThisElt, DAG.getConstant(8, MVT::i8));
5777 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
5781 if (ThisElt.getNode())
5782 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
5783 DAG.getIntPtrConstant(i/2));
5787 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V);
5790 /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
5792 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
5793 unsigned NumNonZero, unsigned NumZero,
5795 const X86Subtarget* Subtarget,
5796 const TargetLowering &TLI) {
5803 for (unsigned i = 0; i < 8; ++i) {
5804 bool isNonZero = (NonZeros & (1 << i)) != 0;
5808 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5810 V = DAG.getUNDEF(MVT::v8i16);
5813 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
5814 MVT::v8i16, V, Op.getOperand(i),
5815 DAG.getIntPtrConstant(i));
5822 /// LowerBuildVectorv4x32 - Custom lower build_vector of v4i32 or v4f32.
5823 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
5824 const X86Subtarget *Subtarget,
5825 const TargetLowering &TLI) {
5826 // Find all zeroable elements.
5827 std::bitset<4> Zeroable;
5828 for (int i=0; i < 4; ++i) {
5829 SDValue Elt = Op->getOperand(i);
5830 Zeroable[i] = (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt));
5832 assert(Zeroable.size() - Zeroable.count() > 1 &&
5833 "We expect at least two non-zero elements!");
5835 // We only know how to deal with build_vector nodes where elements are either
5836 // zeroable or extract_vector_elt with constant index.
5837 SDValue FirstNonZero;
5838 unsigned FirstNonZeroIdx;
5839 for (unsigned i=0; i < 4; ++i) {
5842 SDValue Elt = Op->getOperand(i);
5843 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5844 !isa<ConstantSDNode>(Elt.getOperand(1)))
5846 // Make sure that this node is extracting from a 128-bit vector.
5847 MVT VT = Elt.getOperand(0).getSimpleValueType();
5848 if (!VT.is128BitVector())
5850 if (!FirstNonZero.getNode()) {
5852 FirstNonZeroIdx = i;
5856 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
5857 SDValue V1 = FirstNonZero.getOperand(0);
5858 MVT VT = V1.getSimpleValueType();
5860 // See if this build_vector can be lowered as a blend with zero.
5862 unsigned EltMaskIdx, EltIdx;
5864 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
5865 if (Zeroable[EltIdx]) {
5866 // The zero vector will be on the right hand side.
5867 Mask[EltIdx] = EltIdx+4;
5871 Elt = Op->getOperand(EltIdx);
5872 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
5873 EltMaskIdx = cast<ConstantSDNode>(Elt.getOperand(1))->getZExtValue();
5874 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
5876 Mask[EltIdx] = EltIdx;
5880 // Let the shuffle legalizer deal with blend operations.
5881 SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
5882 if (V1.getSimpleValueType() != VT)
5883 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), VT, V1);
5884 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, &Mask[0]);
5887 // See if we can lower this build_vector to a INSERTPS.
5888 if (!Subtarget->hasSSE41())
5891 SDValue V2 = Elt.getOperand(0);
5892 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
5895 bool CanFold = true;
5896 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
5900 SDValue Current = Op->getOperand(i);
5901 SDValue SrcVector = Current->getOperand(0);
5904 CanFold = SrcVector == V1 &&
5905 cast<ConstantSDNode>(Current.getOperand(1))->getZExtValue() == i;
5911 assert(V1.getNode() && "Expected at least two non-zero elements!");
5912 if (V1.getSimpleValueType() != MVT::v4f32)
5913 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), MVT::v4f32, V1);
5914 if (V2.getSimpleValueType() != MVT::v4f32)
5915 V2 = DAG.getNode(ISD::BITCAST, SDLoc(V2), MVT::v4f32, V2);
5917 // Ok, we can emit an INSERTPS instruction.
5918 unsigned ZMask = Zeroable.to_ulong();
5920 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
5921 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
5922 SDValue Result = DAG.getNode(X86ISD::INSERTPS, SDLoc(Op), MVT::v4f32, V1, V2,
5923 DAG.getIntPtrConstant(InsertPSMask));
5924 return DAG.getNode(ISD::BITCAST, SDLoc(Op), VT, Result);
5927 /// Return a vector logical shift node.
5928 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
5929 unsigned NumBits, SelectionDAG &DAG,
5930 const TargetLowering &TLI, SDLoc dl) {
5931 assert(VT.is128BitVector() && "Unknown type for VShift");
5932 MVT ShVT = MVT::v2i64;
5933 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
5934 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
5935 MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(SrcOp.getValueType());
5936 assert(NumBits % 8 == 0 && "Only support byte sized shifts");
5937 SDValue ShiftVal = DAG.getConstant(NumBits/8, ScalarShiftTy);
5938 return DAG.getNode(ISD::BITCAST, dl, VT,
5939 DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
5943 LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
5945 // Check if the scalar load can be widened into a vector load. And if
5946 // the address is "base + cst" see if the cst can be "absorbed" into
5947 // the shuffle mask.
5948 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
5949 SDValue Ptr = LD->getBasePtr();
5950 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
5952 EVT PVT = LD->getValueType(0);
5953 if (PVT != MVT::i32 && PVT != MVT::f32)
5958 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
5959 FI = FINode->getIndex();
5961 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
5962 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
5963 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5964 Offset = Ptr.getConstantOperandVal(1);
5965 Ptr = Ptr.getOperand(0);
5970 // FIXME: 256-bit vector instructions don't require a strict alignment,
5971 // improve this code to support it better.
5972 unsigned RequiredAlign = VT.getSizeInBits()/8;
5973 SDValue Chain = LD->getChain();
5974 // Make sure the stack object alignment is at least 16 or 32.
5975 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
5976 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
5977 if (MFI->isFixedObjectIndex(FI)) {
5978 // Can't change the alignment. FIXME: It's possible to compute
5979 // the exact stack offset and reference FI + adjust offset instead.
5980 // If someone *really* cares about this. That's the way to implement it.
5983 MFI->setObjectAlignment(FI, RequiredAlign);
5987 // (Offset % 16 or 32) must be multiple of 4. Then address is then
5988 // Ptr + (Offset & ~15).
5991 if ((Offset % RequiredAlign) & 3)
5993 int64_t StartOffset = Offset & ~(RequiredAlign-1);
5995 Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(),
5996 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType()));
5998 int EltNo = (Offset - StartOffset) >> 2;
5999 unsigned NumElems = VT.getVectorNumElements();
6001 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
6002 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
6003 LD->getPointerInfo().getWithOffset(StartOffset),
6004 false, false, false, 0);
6006 SmallVector<int, 8> Mask(NumElems, EltNo);
6008 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
6014 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
6015 /// elements can be replaced by a single large load which has the same value as
6016 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
6018 /// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
6020 /// FIXME: we'd also like to handle the case where the last elements are zero
6021 /// rather than undef via VZEXT_LOAD, but we do not detect that case today.
6022 /// There's even a handy isZeroNode for that purpose.
6023 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
6024 SDLoc &DL, SelectionDAG &DAG,
6025 bool isAfterLegalize) {
6026 unsigned NumElems = Elts.size();
6028 LoadSDNode *LDBase = nullptr;
6029 unsigned LastLoadedElt = -1U;
6031 // For each element in the initializer, see if we've found a load or an undef.
6032 // If we don't find an initial load element, or later load elements are
6033 // non-consecutive, bail out.
6034 for (unsigned i = 0; i < NumElems; ++i) {
6035 SDValue Elt = Elts[i];
6036 // Look through a bitcast.
6037 if (Elt.getNode() && Elt.getOpcode() == ISD::BITCAST)
6038 Elt = Elt.getOperand(0);
6039 if (!Elt.getNode() ||
6040 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
6043 if (Elt.getNode()->getOpcode() == ISD::UNDEF)
6045 LDBase = cast<LoadSDNode>(Elt.getNode());
6049 if (Elt.getOpcode() == ISD::UNDEF)
6052 LoadSDNode *LD = cast<LoadSDNode>(Elt);
6053 EVT LdVT = Elt.getValueType();
6054 // Each loaded element must be the correct fractional portion of the
6055 // requested vector load.
6056 if (LdVT.getSizeInBits() != VT.getSizeInBits() / NumElems)
6058 if (!DAG.isConsecutiveLoad(LD, LDBase, LdVT.getSizeInBits() / 8, i))
6063 // If we have found an entire vector of loads and undefs, then return a large
6064 // load of the entire vector width starting at the base pointer. If we found
6065 // consecutive loads for the low half, generate a vzext_load node.
6066 if (LastLoadedElt == NumElems - 1) {
6067 assert(LDBase && "Did not find base load for merging consecutive loads");
6068 EVT EltVT = LDBase->getValueType(0);
6069 // Ensure that the input vector size for the merged loads matches the
6070 // cumulative size of the input elements.
6071 if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems)
6074 if (isAfterLegalize &&
6075 !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
6078 SDValue NewLd = SDValue();
6080 NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
6081 LDBase->getPointerInfo(), LDBase->isVolatile(),
6082 LDBase->isNonTemporal(), LDBase->isInvariant(),
6083 LDBase->getAlignment());
6085 if (LDBase->hasAnyUseOfValue(1)) {
6086 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6088 SDValue(NewLd.getNode(), 1));
6089 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6090 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6091 SDValue(NewLd.getNode(), 1));
6097 //TODO: The code below fires only for for loading the low v2i32 / v2f32
6098 //of a v4i32 / v4f32. It's probably worth generalizing.
6099 EVT EltVT = VT.getVectorElementType();
6100 if (NumElems == 4 && LastLoadedElt == 1 && (EltVT.getSizeInBits() == 32) &&
6101 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
6102 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
6103 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
6105 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::i64,
6106 LDBase->getPointerInfo(),
6107 LDBase->getAlignment(),
6108 false/*isVolatile*/, true/*ReadMem*/,
6111 // Make sure the newly-created LOAD is in the same position as LDBase in
6112 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and
6113 // update uses of LDBase's output chain to use the TokenFactor.
6114 if (LDBase->hasAnyUseOfValue(1)) {
6115 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6116 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1));
6117 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6118 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6119 SDValue(ResNode.getNode(), 1));
6122 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode);
6127 /// LowerVectorBroadcast - Attempt to use the vbroadcast instruction
6128 /// to generate a splat value for the following cases:
6129 /// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
6130 /// 2. A splat shuffle which uses a scalar_to_vector node which comes from
6131 /// a scalar load, or a constant.
6132 /// The VBROADCAST node is returned when a pattern is found,
6133 /// or SDValue() otherwise.
6134 static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
6135 SelectionDAG &DAG) {
6136 // VBROADCAST requires AVX.
6137 // TODO: Splats could be generated for non-AVX CPUs using SSE
6138 // instructions, but there's less potential gain for only 128-bit vectors.
6139 if (!Subtarget->hasAVX())
6142 MVT VT = Op.getSimpleValueType();
6145 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
6146 "Unsupported vector type for broadcast.");
6151 switch (Op.getOpcode()) {
6153 // Unknown pattern found.
6156 case ISD::BUILD_VECTOR: {
6157 auto *BVOp = cast<BuildVectorSDNode>(Op.getNode());
6158 BitVector UndefElements;
6159 SDValue Splat = BVOp->getSplatValue(&UndefElements);
6161 // We need a splat of a single value to use broadcast, and it doesn't
6162 // make any sense if the value is only in one element of the vector.
6163 if (!Splat || (VT.getVectorNumElements() - UndefElements.count()) <= 1)
6167 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6168 Ld.getOpcode() == ISD::ConstantFP);
6170 // Make sure that all of the users of a non-constant load are from the
6171 // BUILD_VECTOR node.
6172 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
6177 case ISD::VECTOR_SHUFFLE: {
6178 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
6180 // Shuffles must have a splat mask where the first element is
6182 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0)
6185 SDValue Sc = Op.getOperand(0);
6186 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
6187 Sc.getOpcode() != ISD::BUILD_VECTOR) {
6189 if (!Subtarget->hasInt256())
6192 // Use the register form of the broadcast instruction available on AVX2.
6193 if (VT.getSizeInBits() >= 256)
6194 Sc = Extract128BitVector(Sc, 0, DAG, dl);
6195 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
6198 Ld = Sc.getOperand(0);
6199 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6200 Ld.getOpcode() == ISD::ConstantFP);
6202 // The scalar_to_vector node and the suspected
6203 // load node must have exactly one user.
6204 // Constants may have multiple users.
6206 // AVX-512 has register version of the broadcast
6207 bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() &&
6208 Ld.getValueType().getSizeInBits() >= 32;
6209 if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
6216 unsigned ScalarSize = Ld.getValueType().getSizeInBits();
6217 bool IsGE256 = (VT.getSizeInBits() >= 256);
6219 // When optimizing for size, generate up to 5 extra bytes for a broadcast
6220 // instruction to save 8 or more bytes of constant pool data.
6221 // TODO: If multiple splats are generated to load the same constant,
6222 // it may be detrimental to overall size. There needs to be a way to detect
6223 // that condition to know if this is truly a size win.
6224 const Function *F = DAG.getMachineFunction().getFunction();
6225 bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize);
6227 // Handle broadcasting a single constant scalar from the constant pool
6229 // On Sandybridge (no AVX2), it is still better to load a constant vector
6230 // from the constant pool and not to broadcast it from a scalar.
6231 // But override that restriction when optimizing for size.
6232 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
6233 if (ConstSplatVal && (Subtarget->hasAVX2() || OptForSize)) {
6234 EVT CVT = Ld.getValueType();
6235 assert(!CVT.isVector() && "Must not broadcast a vector type");
6237 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
6238 // For size optimization, also splat v2f64 and v2i64, and for size opt
6239 // with AVX2, also splat i8 and i16.
6240 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
6241 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6242 (OptForSize && (ScalarSize == 64 || Subtarget->hasAVX2()))) {
6243 const Constant *C = nullptr;
6244 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
6245 C = CI->getConstantIntValue();
6246 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
6247 C = CF->getConstantFPValue();
6249 assert(C && "Invalid constant type");
6251 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6252 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
6253 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
6254 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP,
6255 MachinePointerInfo::getConstantPool(),
6256 false, false, false, Alignment);
6258 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6262 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
6264 // Handle AVX2 in-register broadcasts.
6265 if (!IsLoad && Subtarget->hasInt256() &&
6266 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
6267 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6269 // The scalar source must be a normal load.
6273 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6274 (Subtarget->hasVLX() && ScalarSize == 64))
6275 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6277 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
6278 // double since there is no vbroadcastsd xmm
6279 if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) {
6280 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
6281 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6284 // Unsupported broadcast.
6288 /// \brief For an EXTRACT_VECTOR_ELT with a constant index return the real
6289 /// underlying vector and index.
6291 /// Modifies \p ExtractedFromVec to the real vector and returns the real
6293 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
6295 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
6296 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
6299 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
6301 // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
6303 // (extract_vector_elt (vector_shuffle<2,u,u,u>
6304 // (extract_subvector (v8f32 %vreg0), Constant<4>),
6307 // In this case the vector is the extract_subvector expression and the index
6308 // is 2, as specified by the shuffle.
6309 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
6310 SDValue ShuffleVec = SVOp->getOperand(0);
6311 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
6312 assert(ShuffleVecVT.getVectorElementType() ==
6313 ExtractedFromVec.getSimpleValueType().getVectorElementType());
6315 int ShuffleIdx = SVOp->getMaskElt(Idx);
6316 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
6317 ExtractedFromVec = ShuffleVec;
6323 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
6324 MVT VT = Op.getSimpleValueType();
6326 // Skip if insert_vec_elt is not supported.
6327 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6328 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
6332 unsigned NumElems = Op.getNumOperands();
6336 SmallVector<unsigned, 4> InsertIndices;
6337 SmallVector<int, 8> Mask(NumElems, -1);
6339 for (unsigned i = 0; i != NumElems; ++i) {
6340 unsigned Opc = Op.getOperand(i).getOpcode();
6342 if (Opc == ISD::UNDEF)
6345 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
6346 // Quit if more than 1 elements need inserting.
6347 if (InsertIndices.size() > 1)
6350 InsertIndices.push_back(i);
6354 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
6355 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
6356 // Quit if non-constant index.
6357 if (!isa<ConstantSDNode>(ExtIdx))
6359 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
6361 // Quit if extracted from vector of different type.
6362 if (ExtractedFromVec.getValueType() != VT)
6365 if (!VecIn1.getNode())
6366 VecIn1 = ExtractedFromVec;
6367 else if (VecIn1 != ExtractedFromVec) {
6368 if (!VecIn2.getNode())
6369 VecIn2 = ExtractedFromVec;
6370 else if (VecIn2 != ExtractedFromVec)
6371 // Quit if more than 2 vectors to shuffle
6375 if (ExtractedFromVec == VecIn1)
6377 else if (ExtractedFromVec == VecIn2)
6378 Mask[i] = Idx + NumElems;
6381 if (!VecIn1.getNode())
6384 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
6385 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]);
6386 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
6387 unsigned Idx = InsertIndices[i];
6388 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
6389 DAG.getIntPtrConstant(Idx));
6395 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
6397 X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
6399 MVT VT = Op.getSimpleValueType();
6400 assert((VT.getVectorElementType() == MVT::i1) && (VT.getSizeInBits() <= 16) &&
6401 "Unexpected type in LowerBUILD_VECTORvXi1!");
6404 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6405 SDValue Cst = DAG.getTargetConstant(0, MVT::i1);
6406 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6407 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6410 if (ISD::isBuildVectorAllOnes(Op.getNode())) {
6411 SDValue Cst = DAG.getTargetConstant(1, MVT::i1);
6412 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6413 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6416 bool AllContants = true;
6417 uint64_t Immediate = 0;
6418 int NonConstIdx = -1;
6419 bool IsSplat = true;
6420 unsigned NumNonConsts = 0;
6421 unsigned NumConsts = 0;
6422 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
6423 SDValue In = Op.getOperand(idx);
6424 if (In.getOpcode() == ISD::UNDEF)
6426 if (!isa<ConstantSDNode>(In)) {
6427 AllContants = false;
6432 if (cast<ConstantSDNode>(In)->getZExtValue())
6433 Immediate |= (1ULL << idx);
6435 if (In != Op.getOperand(0))
6440 SDValue FullMask = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1,
6441 DAG.getConstant(Immediate, MVT::i16));
6442 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, FullMask,
6443 DAG.getIntPtrConstant(0));
6446 if (NumNonConsts == 1 && NonConstIdx != 0) {
6449 SDValue VecAsImm = DAG.getConstant(Immediate,
6450 MVT::getIntegerVT(VT.getSizeInBits()));
6451 DstVec = DAG.getNode(ISD::BITCAST, dl, VT, VecAsImm);
6454 DstVec = DAG.getUNDEF(VT);
6455 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
6456 Op.getOperand(NonConstIdx),
6457 DAG.getIntPtrConstant(NonConstIdx));
6459 if (!IsSplat && (NonConstIdx != 0))
6460 llvm_unreachable("Unsupported BUILD_VECTOR operation");
6461 MVT SelectVT = (VT == MVT::v16i1)? MVT::i16 : MVT::i8;
6464 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6465 DAG.getConstant(-1, SelectVT),
6466 DAG.getConstant(0, SelectVT));
6468 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6469 DAG.getConstant((Immediate | 1), SelectVT),
6470 DAG.getConstant(Immediate, SelectVT));
6471 return DAG.getNode(ISD::BITCAST, dl, VT, Select);
6474 /// \brief Return true if \p N implements a horizontal binop and return the
6475 /// operands for the horizontal binop into V0 and V1.
6477 /// This is a helper function of PerformBUILD_VECTORCombine.
6478 /// This function checks that the build_vector \p N in input implements a
6479 /// horizontal operation. Parameter \p Opcode defines the kind of horizontal
6480 /// operation to match.
6481 /// For example, if \p Opcode is equal to ISD::ADD, then this function
6482 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
6483 /// is equal to ISD::SUB, then this function checks if this is a horizontal
6486 /// This function only analyzes elements of \p N whose indices are
6487 /// in range [BaseIdx, LastIdx).
6488 static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
6490 unsigned BaseIdx, unsigned LastIdx,
6491 SDValue &V0, SDValue &V1) {
6492 EVT VT = N->getValueType(0);
6494 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
6495 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
6496 "Invalid Vector in input!");
6498 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
6499 bool CanFold = true;
6500 unsigned ExpectedVExtractIdx = BaseIdx;
6501 unsigned NumElts = LastIdx - BaseIdx;
6502 V0 = DAG.getUNDEF(VT);
6503 V1 = DAG.getUNDEF(VT);
6505 // Check if N implements a horizontal binop.
6506 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
6507 SDValue Op = N->getOperand(i + BaseIdx);
6510 if (Op->getOpcode() == ISD::UNDEF) {
6511 // Update the expected vector extract index.
6512 if (i * 2 == NumElts)
6513 ExpectedVExtractIdx = BaseIdx;
6514 ExpectedVExtractIdx += 2;
6518 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
6523 SDValue Op0 = Op.getOperand(0);
6524 SDValue Op1 = Op.getOperand(1);
6526 // Try to match the following pattern:
6527 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
6528 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6529 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6530 Op0.getOperand(0) == Op1.getOperand(0) &&
6531 isa<ConstantSDNode>(Op0.getOperand(1)) &&
6532 isa<ConstantSDNode>(Op1.getOperand(1)));
6536 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6537 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
6539 if (i * 2 < NumElts) {
6540 if (V0.getOpcode() == ISD::UNDEF)
6541 V0 = Op0.getOperand(0);
6543 if (V1.getOpcode() == ISD::UNDEF)
6544 V1 = Op0.getOperand(0);
6545 if (i * 2 == NumElts)
6546 ExpectedVExtractIdx = BaseIdx;
6549 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
6550 if (I0 == ExpectedVExtractIdx)
6551 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
6552 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
6553 // Try to match the following dag sequence:
6554 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
6555 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
6559 ExpectedVExtractIdx += 2;
6565 /// \brief Emit a sequence of two 128-bit horizontal add/sub followed by
6566 /// a concat_vector.
6568 /// This is a helper function of PerformBUILD_VECTORCombine.
6569 /// This function expects two 256-bit vectors called V0 and V1.
6570 /// At first, each vector is split into two separate 128-bit vectors.
6571 /// Then, the resulting 128-bit vectors are used to implement two
6572 /// horizontal binary operations.
6574 /// The kind of horizontal binary operation is defined by \p X86Opcode.
6576 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
6577 /// the two new horizontal binop.
6578 /// When Mode is set, the first horizontal binop dag node would take as input
6579 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
6580 /// horizontal binop dag node would take as input the lower 128-bit of V1
6581 /// and the upper 128-bit of V1.
6583 /// HADD V0_LO, V0_HI
6584 /// HADD V1_LO, V1_HI
6586 /// Otherwise, the first horizontal binop dag node takes as input the lower
6587 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
6588 /// dag node takes the the upper 128-bit of V0 and the upper 128-bit of V1.
6590 /// HADD V0_LO, V1_LO
6591 /// HADD V0_HI, V1_HI
6593 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
6594 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
6595 /// the upper 128-bits of the result.
6596 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
6597 SDLoc DL, SelectionDAG &DAG,
6598 unsigned X86Opcode, bool Mode,
6599 bool isUndefLO, bool isUndefHI) {
6600 EVT VT = V0.getValueType();
6601 assert(VT.is256BitVector() && VT == V1.getValueType() &&
6602 "Invalid nodes in input!");
6604 unsigned NumElts = VT.getVectorNumElements();
6605 SDValue V0_LO = Extract128BitVector(V0, 0, DAG, DL);
6606 SDValue V0_HI = Extract128BitVector(V0, NumElts/2, DAG, DL);
6607 SDValue V1_LO = Extract128BitVector(V1, 0, DAG, DL);
6608 SDValue V1_HI = Extract128BitVector(V1, NumElts/2, DAG, DL);
6609 EVT NewVT = V0_LO.getValueType();
6611 SDValue LO = DAG.getUNDEF(NewVT);
6612 SDValue HI = DAG.getUNDEF(NewVT);
6615 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6616 if (!isUndefLO && V0->getOpcode() != ISD::UNDEF)
6617 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
6618 if (!isUndefHI && V1->getOpcode() != ISD::UNDEF)
6619 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
6621 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6622 if (!isUndefLO && (V0_LO->getOpcode() != ISD::UNDEF ||
6623 V1_LO->getOpcode() != ISD::UNDEF))
6624 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
6626 if (!isUndefHI && (V0_HI->getOpcode() != ISD::UNDEF ||
6627 V1_HI->getOpcode() != ISD::UNDEF))
6628 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
6631 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
6634 /// \brief Try to fold a build_vector that performs an 'addsub' into the
6635 /// sequence of 'vadd + vsub + blendi'.
6636 static SDValue matchAddSub(const BuildVectorSDNode *BV, SelectionDAG &DAG,
6637 const X86Subtarget *Subtarget) {
6639 EVT VT = BV->getValueType(0);
6640 unsigned NumElts = VT.getVectorNumElements();
6641 SDValue InVec0 = DAG.getUNDEF(VT);
6642 SDValue InVec1 = DAG.getUNDEF(VT);
6644 assert((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
6645 VT == MVT::v2f64) && "build_vector with an invalid type found!");
6647 // Odd-numbered elements in the input build vector are obtained from
6648 // adding two integer/float elements.
6649 // Even-numbered elements in the input build vector are obtained from
6650 // subtracting two integer/float elements.
6651 unsigned ExpectedOpcode = ISD::FSUB;
6652 unsigned NextExpectedOpcode = ISD::FADD;
6653 bool AddFound = false;
6654 bool SubFound = false;
6656 for (unsigned i = 0, e = NumElts; i != e; ++i) {
6657 SDValue Op = BV->getOperand(i);
6659 // Skip 'undef' values.
6660 unsigned Opcode = Op.getOpcode();
6661 if (Opcode == ISD::UNDEF) {
6662 std::swap(ExpectedOpcode, NextExpectedOpcode);
6666 // Early exit if we found an unexpected opcode.
6667 if (Opcode != ExpectedOpcode)
6670 SDValue Op0 = Op.getOperand(0);
6671 SDValue Op1 = Op.getOperand(1);
6673 // Try to match the following pattern:
6674 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
6675 // Early exit if we cannot match that sequence.
6676 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6677 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6678 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
6679 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
6680 Op0.getOperand(1) != Op1.getOperand(1))
6683 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6687 // We found a valid add/sub node. Update the information accordingly.
6693 // Update InVec0 and InVec1.
6694 if (InVec0.getOpcode() == ISD::UNDEF)
6695 InVec0 = Op0.getOperand(0);
6696 if (InVec1.getOpcode() == ISD::UNDEF)
6697 InVec1 = Op1.getOperand(0);
6699 // Make sure that operands in input to each add/sub node always
6700 // come from a same pair of vectors.
6701 if (InVec0 != Op0.getOperand(0)) {
6702 if (ExpectedOpcode == ISD::FSUB)
6705 // FADD is commutable. Try to commute the operands
6706 // and then test again.
6707 std::swap(Op0, Op1);
6708 if (InVec0 != Op0.getOperand(0))
6712 if (InVec1 != Op1.getOperand(0))
6715 // Update the pair of expected opcodes.
6716 std::swap(ExpectedOpcode, NextExpectedOpcode);
6719 // Don't try to fold this build_vector into an ADDSUB if the inputs are undef.
6720 if (AddFound && SubFound && InVec0.getOpcode() != ISD::UNDEF &&
6721 InVec1.getOpcode() != ISD::UNDEF)
6722 return DAG.getNode(X86ISD::ADDSUB, DL, VT, InVec0, InVec1);
6727 static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
6728 const X86Subtarget *Subtarget) {
6730 EVT VT = N->getValueType(0);
6731 unsigned NumElts = VT.getVectorNumElements();
6732 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
6733 SDValue InVec0, InVec1;
6735 // Try to match an ADDSUB.
6736 if ((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
6737 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
6738 SDValue Value = matchAddSub(BV, DAG, Subtarget);
6739 if (Value.getNode())
6743 // Try to match horizontal ADD/SUB.
6744 unsigned NumUndefsLO = 0;
6745 unsigned NumUndefsHI = 0;
6746 unsigned Half = NumElts/2;
6748 // Count the number of UNDEF operands in the build_vector in input.
6749 for (unsigned i = 0, e = Half; i != e; ++i)
6750 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6753 for (unsigned i = Half, e = NumElts; i != e; ++i)
6754 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6757 // Early exit if this is either a build_vector of all UNDEFs or all the
6758 // operands but one are UNDEF.
6759 if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
6762 if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget->hasSSE3()) {
6763 // Try to match an SSE3 float HADD/HSUB.
6764 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6765 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6767 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6768 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6769 } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) {
6770 // Try to match an SSSE3 integer HADD/HSUB.
6771 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6772 return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
6774 if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6775 return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
6778 if (!Subtarget->hasAVX())
6781 if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
6782 // Try to match an AVX horizontal add/sub of packed single/double
6783 // precision floating point values from 256-bit vectors.
6784 SDValue InVec2, InVec3;
6785 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
6786 isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
6787 ((InVec0.getOpcode() == ISD::UNDEF ||
6788 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6789 ((InVec1.getOpcode() == ISD::UNDEF ||
6790 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6791 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6793 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
6794 isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
6795 ((InVec0.getOpcode() == ISD::UNDEF ||
6796 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6797 ((InVec1.getOpcode() == ISD::UNDEF ||
6798 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6799 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6800 } else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
6801 // Try to match an AVX2 horizontal add/sub of signed integers.
6802 SDValue InVec2, InVec3;
6804 bool CanFold = true;
6806 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
6807 isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
6808 ((InVec0.getOpcode() == ISD::UNDEF ||
6809 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6810 ((InVec1.getOpcode() == ISD::UNDEF ||
6811 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6812 X86Opcode = X86ISD::HADD;
6813 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
6814 isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
6815 ((InVec0.getOpcode() == ISD::UNDEF ||
6816 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6817 ((InVec1.getOpcode() == ISD::UNDEF ||
6818 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6819 X86Opcode = X86ISD::HSUB;
6824 // Fold this build_vector into a single horizontal add/sub.
6825 // Do this only if the target has AVX2.
6826 if (Subtarget->hasAVX2())
6827 return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
6829 // Do not try to expand this build_vector into a pair of horizontal
6830 // add/sub if we can emit a pair of scalar add/sub.
6831 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6834 // Convert this build_vector into a pair of horizontal binop followed by
6836 bool isUndefLO = NumUndefsLO == Half;
6837 bool isUndefHI = NumUndefsHI == Half;
6838 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
6839 isUndefLO, isUndefHI);
6843 if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
6844 VT == MVT::v16i16) && Subtarget->hasAVX()) {
6846 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6847 X86Opcode = X86ISD::HADD;
6848 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6849 X86Opcode = X86ISD::HSUB;
6850 else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6851 X86Opcode = X86ISD::FHADD;
6852 else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6853 X86Opcode = X86ISD::FHSUB;
6857 // Don't try to expand this build_vector into a pair of horizontal add/sub
6858 // if we can simply emit a pair of scalar add/sub.
6859 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6862 // Convert this build_vector into two horizontal add/sub followed by
6864 bool isUndefLO = NumUndefsLO == Half;
6865 bool isUndefHI = NumUndefsHI == Half;
6866 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
6867 isUndefLO, isUndefHI);
6874 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
6877 MVT VT = Op.getSimpleValueType();
6878 MVT ExtVT = VT.getVectorElementType();
6879 unsigned NumElems = Op.getNumOperands();
6881 // Generate vectors for predicate vectors.
6882 if (VT.getScalarType() == MVT::i1 && Subtarget->hasAVX512())
6883 return LowerBUILD_VECTORvXi1(Op, DAG);
6885 // Vectors containing all zeros can be matched by pxor and xorps later
6886 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6887 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
6888 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
6889 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
6892 return getZeroVector(VT, Subtarget, DAG, dl);
6895 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
6896 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
6897 // vpcmpeqd on 256-bit vectors.
6898 if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
6899 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256()))
6902 if (!VT.is512BitVector())
6903 return getOnesVector(VT, Subtarget->hasInt256(), DAG, dl);
6906 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
6907 if (Broadcast.getNode())
6910 unsigned EVTBits = ExtVT.getSizeInBits();
6912 unsigned NumZero = 0;
6913 unsigned NumNonZero = 0;
6914 unsigned NonZeros = 0;
6915 bool IsAllConstants = true;
6916 SmallSet<SDValue, 8> Values;
6917 for (unsigned i = 0; i < NumElems; ++i) {
6918 SDValue Elt = Op.getOperand(i);
6919 if (Elt.getOpcode() == ISD::UNDEF)
6922 if (Elt.getOpcode() != ISD::Constant &&
6923 Elt.getOpcode() != ISD::ConstantFP)
6924 IsAllConstants = false;
6925 if (X86::isZeroNode(Elt))
6928 NonZeros |= (1 << i);
6933 // All undef vector. Return an UNDEF. All zero vectors were handled above.
6934 if (NumNonZero == 0)
6935 return DAG.getUNDEF(VT);
6937 // Special case for single non-zero, non-undef, element.
6938 if (NumNonZero == 1) {
6939 unsigned Idx = countTrailingZeros(NonZeros);
6940 SDValue Item = Op.getOperand(Idx);
6942 // If this is an insertion of an i64 value on x86-32, and if the top bits of
6943 // the value are obviously zero, truncate the value to i32 and do the
6944 // insertion that way. Only do this if the value is non-constant or if the
6945 // value is a constant being inserted into element 0. It is cheaper to do
6946 // a constant pool load than it is to do a movd + shuffle.
6947 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
6948 (!IsAllConstants || Idx == 0)) {
6949 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
6951 assert(VT == MVT::v2i64 && "Expected an SSE value type!");
6952 EVT VecVT = MVT::v4i32;
6953 unsigned VecElts = 4;
6955 // Truncate the value (which may itself be a constant) to i32, and
6956 // convert it to a vector with movd (S2V+shuffle to zero extend).
6957 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
6958 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
6960 // If using the new shuffle lowering, just directly insert this.
6961 if (ExperimentalVectorShuffleLowering)
6963 ISD::BITCAST, dl, VT,
6964 getShuffleVectorZeroOrUndef(Item, Idx * 2, true, Subtarget, DAG));
6966 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6968 // Now we have our 32-bit value zero extended in the low element of
6969 // a vector. If Idx != 0, swizzle it into place.
6971 SmallVector<int, 4> Mask;
6972 Mask.push_back(Idx);
6973 for (unsigned i = 1; i != VecElts; ++i)
6975 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT),
6978 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
6982 // If we have a constant or non-constant insertion into the low element of
6983 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
6984 // the rest of the elements. This will be matched as movd/movq/movss/movsd
6985 // depending on what the source datatype is.
6988 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6990 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
6991 (ExtVT == MVT::i64 && Subtarget->is64Bit())) {
6992 if (VT.is256BitVector() || VT.is512BitVector()) {
6993 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
6994 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
6995 Item, DAG.getIntPtrConstant(0));
6997 assert(VT.is128BitVector() && "Expected an SSE value type!");
6998 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6999 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
7000 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7003 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
7004 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
7005 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
7006 if (VT.is256BitVector()) {
7007 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
7008 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
7010 assert(VT.is128BitVector() && "Expected an SSE value type!");
7011 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7013 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
7017 // Is it a vector logical left shift?
7018 if (NumElems == 2 && Idx == 1 &&
7019 X86::isZeroNode(Op.getOperand(0)) &&
7020 !X86::isZeroNode(Op.getOperand(1))) {
7021 unsigned NumBits = VT.getSizeInBits();
7022 return getVShift(true, VT,
7023 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
7024 VT, Op.getOperand(1)),
7025 NumBits/2, DAG, *this, dl);
7028 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
7031 // Otherwise, if this is a vector with i32 or f32 elements, and the element
7032 // is a non-constant being inserted into an element other than the low one,
7033 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
7034 // movd/movss) to move this into the low element, then shuffle it into
7036 if (EVTBits == 32) {
7037 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7039 // If using the new shuffle lowering, just directly insert this.
7040 if (ExperimentalVectorShuffleLowering)
7041 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
7043 // Turn it into a shuffle of zero and zero-extended scalar to vector.
7044 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG);
7045 SmallVector<int, 8> MaskVec;
7046 for (unsigned i = 0; i != NumElems; ++i)
7047 MaskVec.push_back(i == Idx ? 0 : 1);
7048 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]);
7052 // Splat is obviously ok. Let legalizer expand it to a shuffle.
7053 if (Values.size() == 1) {
7054 if (EVTBits == 32) {
7055 // Instead of a shuffle like this:
7056 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
7057 // Check if it's possible to issue this instead.
7058 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
7059 unsigned Idx = countTrailingZeros(NonZeros);
7060 SDValue Item = Op.getOperand(Idx);
7061 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
7062 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
7067 // A vector full of immediates; various special cases are already
7068 // handled, so this is best done with a single constant-pool load.
7072 // For AVX-length vectors, see if we can use a vector load to get all of the
7073 // elements, otherwise build the individual 128-bit pieces and use
7074 // shuffles to put them in place.
7075 if (VT.is256BitVector() || VT.is512BitVector()) {
7076 SmallVector<SDValue, 64> V(Op->op_begin(), Op->op_begin() + NumElems);
7078 // Check for a build vector of consecutive loads.
7079 if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
7082 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
7084 // Build both the lower and upper subvector.
7085 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7086 makeArrayRef(&V[0], NumElems/2));
7087 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7088 makeArrayRef(&V[NumElems / 2], NumElems/2));
7090 // Recreate the wider vector with the lower and upper part.
7091 if (VT.is256BitVector())
7092 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7093 return Concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7096 // Let legalizer expand 2-wide build_vectors.
7097 if (EVTBits == 64) {
7098 if (NumNonZero == 1) {
7099 // One half is zero or undef.
7100 unsigned Idx = countTrailingZeros(NonZeros);
7101 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
7102 Op.getOperand(Idx));
7103 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
7108 // If element VT is < 32 bits, convert it to inserts into a zero vector.
7109 if (EVTBits == 8 && NumElems == 16) {
7110 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
7112 if (V.getNode()) return V;
7115 if (EVTBits == 16 && NumElems == 8) {
7116 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
7118 if (V.getNode()) return V;
7121 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
7122 if (EVTBits == 32 && NumElems == 4) {
7123 SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget, *this);
7128 // If element VT is == 32 bits, turn it into a number of shuffles.
7129 SmallVector<SDValue, 8> V(NumElems);
7130 if (NumElems == 4 && NumZero > 0) {
7131 for (unsigned i = 0; i < 4; ++i) {
7132 bool isZero = !(NonZeros & (1 << i));
7134 V[i] = getZeroVector(VT, Subtarget, DAG, dl);
7136 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7139 for (unsigned i = 0; i < 2; ++i) {
7140 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
7143 V[i] = V[i*2]; // Must be a zero vector.
7146 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]);
7149 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]);
7152 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]);
7157 bool Reverse1 = (NonZeros & 0x3) == 2;
7158 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
7162 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
7163 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
7165 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
7168 if (Values.size() > 1 && VT.is128BitVector()) {
7169 // Check for a build vector of consecutive loads.
7170 for (unsigned i = 0; i < NumElems; ++i)
7171 V[i] = Op.getOperand(i);
7173 // Check for elements which are consecutive loads.
7174 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false);
7178 // Check for a build vector from mostly shuffle plus few inserting.
7179 SDValue Sh = buildFromShuffleMostly(Op, DAG);
7183 // For SSE 4.1, use insertps to put the high elements into the low element.
7184 if (Subtarget->hasSSE41()) {
7186 if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
7187 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
7189 Result = DAG.getUNDEF(VT);
7191 for (unsigned i = 1; i < NumElems; ++i) {
7192 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
7193 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
7194 Op.getOperand(i), DAG.getIntPtrConstant(i));
7199 // Otherwise, expand into a number of unpckl*, start by extending each of
7200 // our (non-undef) elements to the full vector width with the element in the
7201 // bottom slot of the vector (which generates no code for SSE).
7202 for (unsigned i = 0; i < NumElems; ++i) {
7203 if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
7204 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7206 V[i] = DAG.getUNDEF(VT);
7209 // Next, we iteratively mix elements, e.g. for v4f32:
7210 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
7211 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
7212 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
7213 unsigned EltStride = NumElems >> 1;
7214 while (EltStride != 0) {
7215 for (unsigned i = 0; i < EltStride; ++i) {
7216 // If V[i+EltStride] is undef and this is the first round of mixing,
7217 // then it is safe to just drop this shuffle: V[i] is already in the
7218 // right place, the one element (since it's the first round) being
7219 // inserted as undef can be dropped. This isn't safe for successive
7220 // rounds because they will permute elements within both vectors.
7221 if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
7222 EltStride == NumElems/2)
7225 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
7234 // LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction
7235 // to create 256-bit vectors from two other 128-bit ones.
7236 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7238 MVT ResVT = Op.getSimpleValueType();
7240 assert((ResVT.is256BitVector() ||
7241 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
7243 SDValue V1 = Op.getOperand(0);
7244 SDValue V2 = Op.getOperand(1);
7245 unsigned NumElems = ResVT.getVectorNumElements();
7246 if(ResVT.is256BitVector())
7247 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7249 if (Op.getNumOperands() == 4) {
7250 MVT HalfVT = MVT::getVectorVT(ResVT.getScalarType(),
7251 ResVT.getVectorNumElements()/2);
7252 SDValue V3 = Op.getOperand(2);
7253 SDValue V4 = Op.getOperand(3);
7254 return Concat256BitVectors(Concat128BitVectors(V1, V2, HalfVT, NumElems/2, DAG, dl),
7255 Concat128BitVectors(V3, V4, HalfVT, NumElems/2, DAG, dl), ResVT, NumElems, DAG, dl);
7257 return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7260 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7261 MVT LLVM_ATTRIBUTE_UNUSED VT = Op.getSimpleValueType();
7262 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
7263 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
7264 Op.getNumOperands() == 4)));
7266 // AVX can use the vinsertf128 instruction to create 256-bit vectors
7267 // from two other 128-bit ones.
7269 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
7270 return LowerAVXCONCAT_VECTORS(Op, DAG);
7274 //===----------------------------------------------------------------------===//
7275 // Vector shuffle lowering
7277 // This is an experimental code path for lowering vector shuffles on x86. It is
7278 // designed to handle arbitrary vector shuffles and blends, gracefully
7279 // degrading performance as necessary. It works hard to recognize idiomatic
7280 // shuffles and lower them to optimal instruction patterns without leaving
7281 // a framework that allows reasonably efficient handling of all vector shuffle
7283 //===----------------------------------------------------------------------===//
7285 /// \brief Tiny helper function to identify a no-op mask.
7287 /// This is a somewhat boring predicate function. It checks whether the mask
7288 /// array input, which is assumed to be a single-input shuffle mask of the kind
7289 /// used by the X86 shuffle instructions (not a fully general
7290 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
7291 /// in-place shuffle are 'no-op's.
7292 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
7293 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7294 if (Mask[i] != -1 && Mask[i] != i)
7299 /// \brief Helper function to classify a mask as a single-input mask.
7301 /// This isn't a generic single-input test because in the vector shuffle
7302 /// lowering we canonicalize single inputs to be the first input operand. This
7303 /// means we can more quickly test for a single input by only checking whether
7304 /// an input from the second operand exists. We also assume that the size of
7305 /// mask corresponds to the size of the input vectors which isn't true in the
7306 /// fully general case.
7307 static bool isSingleInputShuffleMask(ArrayRef<int> Mask) {
7309 if (M >= (int)Mask.size())
7314 /// \brief Test whether there are elements crossing 128-bit lanes in this
7317 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
7318 /// and we routinely test for these.
7319 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
7320 int LaneSize = 128 / VT.getScalarSizeInBits();
7321 int Size = Mask.size();
7322 for (int i = 0; i < Size; ++i)
7323 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
7328 /// \brief Test whether a shuffle mask is equivalent within each 128-bit lane.
7330 /// This checks a shuffle mask to see if it is performing the same
7331 /// 128-bit lane-relative shuffle in each 128-bit lane. This trivially implies
7332 /// that it is also not lane-crossing. It may however involve a blend from the
7333 /// same lane of a second vector.
7335 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
7336 /// non-trivial to compute in the face of undef lanes. The representation is
7337 /// *not* suitable for use with existing 128-bit shuffles as it will contain
7338 /// entries from both V1 and V2 inputs to the wider mask.
7340 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
7341 SmallVectorImpl<int> &RepeatedMask) {
7342 int LaneSize = 128 / VT.getScalarSizeInBits();
7343 RepeatedMask.resize(LaneSize, -1);
7344 int Size = Mask.size();
7345 for (int i = 0; i < Size; ++i) {
7348 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
7349 // This entry crosses lanes, so there is no way to model this shuffle.
7352 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
7353 if (RepeatedMask[i % LaneSize] == -1)
7354 // This is the first non-undef entry in this slot of a 128-bit lane.
7355 RepeatedMask[i % LaneSize] =
7356 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + Size;
7357 else if (RepeatedMask[i % LaneSize] + (i / LaneSize) * LaneSize != Mask[i])
7358 // Found a mismatch with the repeated mask.
7364 /// \brief Base case helper for testing a single mask element.
7365 static bool isShuffleEquivalentImpl(SDValue V1, SDValue V2,
7366 BuildVectorSDNode *BV1,
7367 BuildVectorSDNode *BV2, ArrayRef<int> Mask,
7369 int Size = Mask.size();
7370 if (Mask[i] != -1 && Mask[i] != Arg) {
7371 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
7372 auto *ArgsBV = Arg < Size ? BV1 : BV2;
7373 if (!MaskBV || !ArgsBV ||
7374 MaskBV->getOperand(Mask[i] % Size) != ArgsBV->getOperand(Arg % Size))
7380 /// \brief Recursive helper to peel off and test each mask element.
7381 template <typename... Ts>
7382 static bool isShuffleEquivalentImpl(SDValue V1, SDValue V2,
7383 BuildVectorSDNode *BV1,
7384 BuildVectorSDNode *BV2, ArrayRef<int> Mask,
7385 int i, int Arg, Ts... Args) {
7386 if (!isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, i, Arg))
7389 return isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, i + 1, Args...);
7392 /// \brief Checks whether a shuffle mask is equivalent to an explicit list of
7395 /// This is a fast way to test a shuffle mask against a fixed pattern:
7397 /// if (isShuffleEquivalent(Mask, 3, 2, 1, 0)) { ... }
7399 /// It returns true if the mask is exactly as wide as the argument list, and
7400 /// each element of the mask is either -1 (signifying undef) or the value given
7401 /// in the argument.
7402 template <typename... Ts>
7403 static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
7405 if (Mask.size() != sizeof...(Args))
7408 // If the values are build vectors, we can look through them to find
7409 // equivalent inputs that make the shuffles equivalent.
7410 auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
7411 auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
7413 // Recursively peel off arguments and test them against the mask.
7414 return isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, 0, Args...);
7417 /// \brief Get a 4-lane 8-bit shuffle immediate for a mask.
7419 /// This helper function produces an 8-bit shuffle immediate corresponding to
7420 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
7421 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
7424 /// NB: We rely heavily on "undef" masks preserving the input lane.
7425 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,
7426 SelectionDAG &DAG) {
7427 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
7428 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
7429 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
7430 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
7431 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
7434 Imm |= (Mask[0] == -1 ? 0 : Mask[0]) << 0;
7435 Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2;
7436 Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4;
7437 Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6;
7438 return DAG.getConstant(Imm, MVT::i8);
7441 /// \brief Try to emit a blend instruction for a shuffle using bit math.
7443 /// This is used as a fallback approach when first class blend instructions are
7444 /// unavailable. Currently it is only suitable for integer vectors, but could
7445 /// be generalized for floating point vectors if desirable.
7446 static SDValue lowerVectorShuffleAsBitBlend(SDLoc DL, MVT VT, SDValue V1,
7447 SDValue V2, ArrayRef<int> Mask,
7448 SelectionDAG &DAG) {
7449 assert(VT.isInteger() && "Only supports integer vector types!");
7450 MVT EltVT = VT.getScalarType();
7451 int NumEltBits = EltVT.getSizeInBits();
7452 SDValue Zero = DAG.getConstant(0, EltVT);
7453 SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), EltVT);
7454 SmallVector<SDValue, 16> MaskOps;
7455 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7456 if (Mask[i] != -1 && Mask[i] != i && Mask[i] != i + Size)
7457 return SDValue(); // Shuffled input!
7458 MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
7461 SDValue V1Mask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, MaskOps);
7462 V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
7463 // We have to cast V2 around.
7464 MVT MaskVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
7465 V2 = DAG.getNode(ISD::BITCAST, DL, VT,
7466 DAG.getNode(X86ISD::ANDNP, DL, MaskVT,
7467 DAG.getNode(ISD::BITCAST, DL, MaskVT, V1Mask),
7468 DAG.getNode(ISD::BITCAST, DL, MaskVT, V2)));
7469 return DAG.getNode(ISD::OR, DL, VT, V1, V2);
7472 /// \brief Try to emit a blend instruction for a shuffle.
7474 /// This doesn't do any checks for the availability of instructions for blending
7475 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
7476 /// be matched in the backend with the type given. What it does check for is
7477 /// that the shuffle mask is in fact a blend.
7478 static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
7479 SDValue V2, ArrayRef<int> Mask,
7480 const X86Subtarget *Subtarget,
7481 SelectionDAG &DAG) {
7482 unsigned BlendMask = 0;
7483 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7484 if (Mask[i] >= Size) {
7485 if (Mask[i] != i + Size)
7486 return SDValue(); // Shuffled V2 input!
7487 BlendMask |= 1u << i;
7490 if (Mask[i] >= 0 && Mask[i] != i)
7491 return SDValue(); // Shuffled V1 input!
7493 switch (VT.SimpleTy) {
7498 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
7499 DAG.getConstant(BlendMask, MVT::i8));
7503 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7507 // If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
7508 // that instruction.
7509 if (Subtarget->hasAVX2()) {
7510 // Scale the blend by the number of 32-bit dwords per element.
7511 int Scale = VT.getScalarSizeInBits() / 32;
7513 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7514 if (Mask[i] >= Size)
7515 for (int j = 0; j < Scale; ++j)
7516 BlendMask |= 1u << (i * Scale + j);
7518 MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32;
7519 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7520 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7521 return DAG.getNode(ISD::BITCAST, DL, VT,
7522 DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
7523 DAG.getConstant(BlendMask, MVT::i8)));
7527 // For integer shuffles we need to expand the mask and cast the inputs to
7528 // v8i16s prior to blending.
7529 int Scale = 8 / VT.getVectorNumElements();
7531 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7532 if (Mask[i] >= Size)
7533 for (int j = 0; j < Scale; ++j)
7534 BlendMask |= 1u << (i * Scale + j);
7536 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
7537 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
7538 return DAG.getNode(ISD::BITCAST, DL, VT,
7539 DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
7540 DAG.getConstant(BlendMask, MVT::i8)));
7544 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7545 SmallVector<int, 8> RepeatedMask;
7546 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
7547 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
7548 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
7550 for (int i = 0; i < 8; ++i)
7551 if (RepeatedMask[i] >= 16)
7552 BlendMask |= 1u << i;
7553 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
7554 DAG.getConstant(BlendMask, MVT::i8));
7560 // Scale the blend by the number of bytes per element.
7561 int Scale = VT.getScalarSizeInBits() / 8;
7563 // This form of blend is always done on bytes. Compute the byte vector
7565 MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
7567 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
7568 // mix of LLVM's code generator and the x86 backend. We tell the code
7569 // generator that boolean values in the elements of an x86 vector register
7570 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
7571 // mapping a select to operand #1, and 'false' mapping to operand #2. The
7572 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
7573 // of the element (the remaining are ignored) and 0 in that high bit would
7574 // mean operand #1 while 1 in the high bit would mean operand #2. So while
7575 // the LLVM model for boolean values in vector elements gets the relevant
7576 // bit set, it is set backwards and over constrained relative to x86's
7578 SmallVector<SDValue, 32> VSELECTMask;
7579 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7580 for (int j = 0; j < Scale; ++j)
7581 VSELECTMask.push_back(
7582 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
7583 : DAG.getConstant(Mask[i] < Size ? -1 : 0, MVT::i8));
7585 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7586 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7588 ISD::BITCAST, DL, VT,
7589 DAG.getNode(ISD::VSELECT, DL, BlendVT,
7590 DAG.getNode(ISD::BUILD_VECTOR, DL, BlendVT, VSELECTMask),
7595 llvm_unreachable("Not a supported integer vector type!");
7599 /// \brief Try to lower as a blend of elements from two inputs followed by
7600 /// a single-input permutation.
7602 /// This matches the pattern where we can blend elements from two inputs and
7603 /// then reduce the shuffle to a single-input permutation.
7604 static SDValue lowerVectorShuffleAsBlendAndPermute(SDLoc DL, MVT VT, SDValue V1,
7607 SelectionDAG &DAG) {
7608 // We build up the blend mask while checking whether a blend is a viable way
7609 // to reduce the shuffle.
7610 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7611 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
7613 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7617 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
7619 if (BlendMask[Mask[i] % Size] == -1)
7620 BlendMask[Mask[i] % Size] = Mask[i];
7621 else if (BlendMask[Mask[i] % Size] != Mask[i])
7622 return SDValue(); // Can't blend in the needed input!
7624 PermuteMask[i] = Mask[i] % Size;
7627 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7628 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
7631 /// \brief Generic routine to decompose a shuffle and blend into indepndent
7632 /// blends and permutes.
7634 /// This matches the extremely common pattern for handling combined
7635 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
7636 /// operations. It will try to pick the best arrangement of shuffles and
7638 static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
7642 SelectionDAG &DAG) {
7643 // Shuffle the input elements into the desired positions in V1 and V2 and
7644 // blend them together.
7645 SmallVector<int, 32> V1Mask(Mask.size(), -1);
7646 SmallVector<int, 32> V2Mask(Mask.size(), -1);
7647 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7648 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7649 if (Mask[i] >= 0 && Mask[i] < Size) {
7650 V1Mask[i] = Mask[i];
7652 } else if (Mask[i] >= Size) {
7653 V2Mask[i] = Mask[i] - Size;
7654 BlendMask[i] = i + Size;
7657 // Try to lower with the simpler initial blend strategy unless one of the
7658 // input shuffles would be a no-op. We prefer to shuffle inputs as the
7659 // shuffle may be able to fold with a load or other benefit. However, when
7660 // we'll have to do 2x as many shuffles in order to achieve this, blending
7661 // first is a better strategy.
7662 if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask))
7663 if (SDValue BlendPerm =
7664 lowerVectorShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask, DAG))
7667 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
7668 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
7669 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7672 /// \brief Try to lower a vector shuffle as a byte rotation.
7674 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
7675 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
7676 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
7677 /// try to generically lower a vector shuffle through such an pattern. It
7678 /// does not check for the profitability of lowering either as PALIGNR or
7679 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
7680 /// This matches shuffle vectors that look like:
7682 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
7684 /// Essentially it concatenates V1 and V2, shifts right by some number of
7685 /// elements, and takes the low elements as the result. Note that while this is
7686 /// specified as a *right shift* because x86 is little-endian, it is a *left
7687 /// rotate* of the vector lanes.
7688 static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
7691 const X86Subtarget *Subtarget,
7692 SelectionDAG &DAG) {
7693 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7695 int NumElts = Mask.size();
7696 int NumLanes = VT.getSizeInBits() / 128;
7697 int NumLaneElts = NumElts / NumLanes;
7699 // We need to detect various ways of spelling a rotation:
7700 // [11, 12, 13, 14, 15, 0, 1, 2]
7701 // [-1, 12, 13, 14, -1, -1, 1, -1]
7702 // [-1, -1, -1, -1, -1, -1, 1, 2]
7703 // [ 3, 4, 5, 6, 7, 8, 9, 10]
7704 // [-1, 4, 5, 6, -1, -1, 9, -1]
7705 // [-1, 4, 5, 6, -1, -1, -1, -1]
7708 for (int l = 0; l < NumElts; l += NumLaneElts) {
7709 for (int i = 0; i < NumLaneElts; ++i) {
7710 if (Mask[l + i] == -1)
7712 assert(Mask[l + i] >= 0 && "Only -1 is a valid negative mask element!");
7714 // Get the mod-Size index and lane correct it.
7715 int LaneIdx = (Mask[l + i] % NumElts) - l;
7716 // Make sure it was in this lane.
7717 if (LaneIdx < 0 || LaneIdx >= NumLaneElts)
7720 // Determine where a rotated vector would have started.
7721 int StartIdx = i - LaneIdx;
7723 // The identity rotation isn't interesting, stop.
7726 // If we found the tail of a vector the rotation must be the missing
7727 // front. If we found the head of a vector, it must be how much of the
7729 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumLaneElts - StartIdx;
7732 Rotation = CandidateRotation;
7733 else if (Rotation != CandidateRotation)
7734 // The rotations don't match, so we can't match this mask.
7737 // Compute which value this mask is pointing at.
7738 SDValue MaskV = Mask[l + i] < NumElts ? V1 : V2;
7740 // Compute which of the two target values this index should be assigned
7741 // to. This reflects whether the high elements are remaining or the low
7742 // elements are remaining.
7743 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
7745 // Either set up this value if we've not encountered it before, or check
7746 // that it remains consistent.
7749 else if (TargetV != MaskV)
7750 // This may be a rotation, but it pulls from the inputs in some
7751 // unsupported interleaving.
7756 // Check that we successfully analyzed the mask, and normalize the results.
7757 assert(Rotation != 0 && "Failed to locate a viable rotation!");
7758 assert((Lo || Hi) && "Failed to find a rotated input vector!");
7764 // The actual rotate instruction rotates bytes, so we need to scale the
7765 // rotation based on how many bytes are in the vector lane.
7766 int Scale = 16 / NumLaneElts;
7768 // SSSE3 targets can use the palignr instruction.
7769 if (Subtarget->hasSSSE3()) {
7770 // Cast the inputs to i8 vector of correct length to match PALIGNR.
7771 MVT AlignVT = MVT::getVectorVT(MVT::i8, 16 * NumLanes);
7772 Lo = DAG.getNode(ISD::BITCAST, DL, AlignVT, Lo);
7773 Hi = DAG.getNode(ISD::BITCAST, DL, AlignVT, Hi);
7775 return DAG.getNode(ISD::BITCAST, DL, VT,
7776 DAG.getNode(X86ISD::PALIGNR, DL, AlignVT, Hi, Lo,
7777 DAG.getConstant(Rotation * Scale, MVT::i8)));
7780 assert(VT.getSizeInBits() == 128 &&
7781 "Rotate-based lowering only supports 128-bit lowering!");
7782 assert(Mask.size() <= 16 &&
7783 "Can shuffle at most 16 bytes in a 128-bit vector!");
7785 // Default SSE2 implementation
7786 int LoByteShift = 16 - Rotation * Scale;
7787 int HiByteShift = Rotation * Scale;
7789 // Cast the inputs to v2i64 to match PSLLDQ/PSRLDQ.
7790 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Lo);
7791 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Hi);
7793 SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo,
7794 DAG.getConstant(LoByteShift, MVT::i8));
7795 SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi,
7796 DAG.getConstant(HiByteShift, MVT::i8));
7797 return DAG.getNode(ISD::BITCAST, DL, VT,
7798 DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift));
7801 /// \brief Compute whether each element of a shuffle is zeroable.
7803 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
7804 /// Either it is an undef element in the shuffle mask, the element of the input
7805 /// referenced is undef, or the element of the input referenced is known to be
7806 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
7807 /// as many lanes with this technique as possible to simplify the remaining
7809 static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
7810 SDValue V1, SDValue V2) {
7811 SmallBitVector Zeroable(Mask.size(), false);
7813 while (V1.getOpcode() == ISD::BITCAST)
7814 V1 = V1->getOperand(0);
7815 while (V2.getOpcode() == ISD::BITCAST)
7816 V2 = V2->getOperand(0);
7818 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
7819 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
7821 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7823 // Handle the easy cases.
7824 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
7829 // If this is an index into a build_vector node (which has the same number
7830 // of elements), dig out the input value and use it.
7831 SDValue V = M < Size ? V1 : V2;
7832 if (V.getOpcode() != ISD::BUILD_VECTOR || Size != (int)V.getNumOperands())
7835 SDValue Input = V.getOperand(M % Size);
7836 // The UNDEF opcode check really should be dead code here, but not quite
7837 // worth asserting on (it isn't invalid, just unexpected).
7838 if (Input.getOpcode() == ISD::UNDEF || X86::isZeroNode(Input))
7845 /// \brief Try to emit a bitmask instruction for a shuffle.
7847 /// This handles cases where we can model a blend exactly as a bitmask due to
7848 /// one of the inputs being zeroable.
7849 static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1,
7850 SDValue V2, ArrayRef<int> Mask,
7851 SelectionDAG &DAG) {
7852 MVT EltVT = VT.getScalarType();
7853 int NumEltBits = EltVT.getSizeInBits();
7854 MVT IntEltVT = MVT::getIntegerVT(NumEltBits);
7855 SDValue Zero = DAG.getConstant(0, IntEltVT);
7856 SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), IntEltVT);
7857 if (EltVT.isFloatingPoint()) {
7858 Zero = DAG.getNode(ISD::BITCAST, DL, EltVT, Zero);
7859 AllOnes = DAG.getNode(ISD::BITCAST, DL, EltVT, AllOnes);
7861 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
7862 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7864 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7867 if (Mask[i] % Size != i)
7868 return SDValue(); // Not a blend.
7870 V = Mask[i] < Size ? V1 : V2;
7871 else if (V != (Mask[i] < Size ? V1 : V2))
7872 return SDValue(); // Can only let one input through the mask.
7874 VMaskOps[i] = AllOnes;
7877 return SDValue(); // No non-zeroable elements!
7879 SDValue VMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, VMaskOps);
7880 V = DAG.getNode(VT.isFloatingPoint()
7881 ? (unsigned) X86ISD::FAND : (unsigned) ISD::AND,
7886 /// \brief Try to lower a vector shuffle as a bit shift (shifts in zeros).
7888 /// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
7889 /// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
7890 /// matches elements from one of the input vectors shuffled to the left or
7891 /// right with zeroable elements 'shifted in'. It handles both the strictly
7892 /// bit-wise element shifts and the byte shift across an entire 128-bit double
7895 /// PSHL : (little-endian) left bit shift.
7896 /// [ zz, 0, zz, 2 ]
7897 /// [ -1, 4, zz, -1 ]
7898 /// PSRL : (little-endian) right bit shift.
7900 /// [ -1, -1, 7, zz]
7901 /// PSLLDQ : (little-endian) left byte shift
7902 /// [ zz, 0, 1, 2, 3, 4, 5, 6]
7903 /// [ zz, zz, -1, -1, 2, 3, 4, -1]
7904 /// [ zz, zz, zz, zz, zz, zz, -1, 1]
7905 /// PSRLDQ : (little-endian) right byte shift
7906 /// [ 5, 6, 7, zz, zz, zz, zz, zz]
7907 /// [ -1, 5, 6, 7, zz, zz, zz, zz]
7908 /// [ 1, 2, -1, -1, -1, -1, zz, zz]
7909 static SDValue lowerVectorShuffleAsShift(SDLoc DL, MVT VT, SDValue V1,
7910 SDValue V2, ArrayRef<int> Mask,
7911 SelectionDAG &DAG) {
7912 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7914 int Size = Mask.size();
7915 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
7917 auto CheckZeros = [&](int Shift, int Scale, bool Left) {
7918 for (int i = 0; i < Size; i += Scale)
7919 for (int j = 0; j < Shift; ++j)
7920 if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
7926 auto MatchShift = [&](int Shift, int Scale, bool Left, SDValue V) {
7927 for (int i = 0; i != Size; i += Scale) {
7928 unsigned Pos = Left ? i + Shift : i;
7929 unsigned Low = Left ? i : i + Shift;
7930 unsigned Len = Scale - Shift;
7931 if (!isSequentialOrUndefInRange(Mask, Pos, Len,
7932 Low + (V == V1 ? 0 : Size)))
7936 int ShiftEltBits = VT.getScalarSizeInBits() * Scale;
7937 bool ByteShift = ShiftEltBits > 64;
7938 unsigned OpCode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
7939 : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
7940 int ShiftAmt = Shift * VT.getScalarSizeInBits() / (ByteShift ? 8 : 1);
7942 // Normalize the scale for byte shifts to still produce an i64 element
7944 Scale = ByteShift ? Scale / 2 : Scale;
7946 // We need to round trip through the appropriate type for the shift.
7947 MVT ShiftSVT = MVT::getIntegerVT(VT.getScalarSizeInBits() * Scale);
7948 MVT ShiftVT = MVT::getVectorVT(ShiftSVT, Size / Scale);
7949 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
7950 "Illegal integer vector type");
7951 V = DAG.getNode(ISD::BITCAST, DL, ShiftVT, V);
7953 V = DAG.getNode(OpCode, DL, ShiftVT, V, DAG.getConstant(ShiftAmt, MVT::i8));
7954 return DAG.getNode(ISD::BITCAST, DL, VT, V);
7957 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
7958 // keep doubling the size of the integer elements up to that. We can
7959 // then shift the elements of the integer vector by whole multiples of
7960 // their width within the elements of the larger integer vector. Test each
7961 // multiple to see if we can find a match with the moved element indices
7962 // and that the shifted in elements are all zeroable.
7963 for (int Scale = 2; Scale * VT.getScalarSizeInBits() <= 128; Scale *= 2)
7964 for (int Shift = 1; Shift != Scale; ++Shift)
7965 for (bool Left : {true, false})
7966 if (CheckZeros(Shift, Scale, Left))
7967 for (SDValue V : {V1, V2})
7968 if (SDValue Match = MatchShift(Shift, Scale, Left, V))
7975 /// \brief Lower a vector shuffle as a zero or any extension.
7977 /// Given a specific number of elements, element bit width, and extension
7978 /// stride, produce either a zero or any extension based on the available
7979 /// features of the subtarget.
7980 static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
7981 SDLoc DL, MVT VT, int Scale, bool AnyExt, SDValue InputV,
7982 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7983 assert(Scale > 1 && "Need a scale to extend.");
7984 int NumElements = VT.getVectorNumElements();
7985 int EltBits = VT.getScalarSizeInBits();
7986 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
7987 "Only 8, 16, and 32 bit elements can be extended.");
7988 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
7990 // Found a valid zext mask! Try various lowering strategies based on the
7991 // input type and available ISA extensions.
7992 if (Subtarget->hasSSE41()) {
7993 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
7994 NumElements / Scale);
7995 return DAG.getNode(ISD::BITCAST, DL, VT,
7996 DAG.getNode(X86ISD::VZEXT, DL, ExtVT, InputV));
7999 // For any extends we can cheat for larger element sizes and use shuffle
8000 // instructions that can fold with a load and/or copy.
8001 if (AnyExt && EltBits == 32) {
8002 int PSHUFDMask[4] = {0, -1, 1, -1};
8004 ISD::BITCAST, DL, VT,
8005 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8006 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
8007 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
8009 if (AnyExt && EltBits == 16 && Scale > 2) {
8010 int PSHUFDMask[4] = {0, -1, 0, -1};
8011 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8012 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
8013 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG));
8014 int PSHUFHWMask[4] = {1, -1, -1, -1};
8016 ISD::BITCAST, DL, VT,
8017 DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16,
8018 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, InputV),
8019 getV4X86ShuffleImm8ForMask(PSHUFHWMask, DAG)));
8022 // If this would require more than 2 unpack instructions to expand, use
8023 // pshufb when available. We can only use more than 2 unpack instructions
8024 // when zero extending i8 elements which also makes it easier to use pshufb.
8025 if (Scale > 4 && EltBits == 8 && Subtarget->hasSSSE3()) {
8026 assert(NumElements == 16 && "Unexpected byte vector width!");
8027 SDValue PSHUFBMask[16];
8028 for (int i = 0; i < 16; ++i)
8030 DAG.getConstant((i % Scale == 0) ? i / Scale : 0x80, MVT::i8);
8031 InputV = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, InputV);
8032 return DAG.getNode(ISD::BITCAST, DL, VT,
8033 DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
8034 DAG.getNode(ISD::BUILD_VECTOR, DL,
8035 MVT::v16i8, PSHUFBMask)));
8038 // Otherwise emit a sequence of unpacks.
8040 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
8041 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
8042 : getZeroVector(InputVT, Subtarget, DAG, DL);
8043 InputV = DAG.getNode(ISD::BITCAST, DL, InputVT, InputV);
8044 InputV = DAG.getNode(X86ISD::UNPCKL, DL, InputVT, InputV, Ext);
8048 } while (Scale > 1);
8049 return DAG.getNode(ISD::BITCAST, DL, VT, InputV);
8052 /// \brief Try to lower a vector shuffle as a zero extension on any microarch.
8054 /// This routine will try to do everything in its power to cleverly lower
8055 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
8056 /// check for the profitability of this lowering, it tries to aggressively
8057 /// match this pattern. It will use all of the micro-architectural details it
8058 /// can to emit an efficient lowering. It handles both blends with all-zero
8059 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
8060 /// masking out later).
8062 /// The reason we have dedicated lowering for zext-style shuffles is that they
8063 /// are both incredibly common and often quite performance sensitive.
8064 static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
8065 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8066 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8067 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8069 int Bits = VT.getSizeInBits();
8070 int NumElements = VT.getVectorNumElements();
8071 assert(VT.getScalarSizeInBits() <= 32 &&
8072 "Exceeds 32-bit integer zero extension limit");
8073 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
8075 // Define a helper function to check a particular ext-scale and lower to it if
8077 auto Lower = [&](int Scale) -> SDValue {
8080 for (int i = 0; i < NumElements; ++i) {
8082 continue; // Valid anywhere but doesn't tell us anything.
8083 if (i % Scale != 0) {
8084 // Each of the extended elements need to be zeroable.
8088 // We no longer are in the anyext case.
8093 // Each of the base elements needs to be consecutive indices into the
8094 // same input vector.
8095 SDValue V = Mask[i] < NumElements ? V1 : V2;
8098 else if (InputV != V)
8099 return SDValue(); // Flip-flopping inputs.
8101 if (Mask[i] % NumElements != i / Scale)
8102 return SDValue(); // Non-consecutive strided elements.
8105 // If we fail to find an input, we have a zero-shuffle which should always
8106 // have already been handled.
8107 // FIXME: Maybe handle this here in case during blending we end up with one?
8111 return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
8112 DL, VT, Scale, AnyExt, InputV, Subtarget, DAG);
8115 // The widest scale possible for extending is to a 64-bit integer.
8116 assert(Bits % 64 == 0 &&
8117 "The number of bits in a vector must be divisible by 64 on x86!");
8118 int NumExtElements = Bits / 64;
8120 // Each iteration, try extending the elements half as much, but into twice as
8122 for (; NumExtElements < NumElements; NumExtElements *= 2) {
8123 assert(NumElements % NumExtElements == 0 &&
8124 "The input vector size must be divisible by the extended size.");
8125 if (SDValue V = Lower(NumElements / NumExtElements))
8129 // General extends failed, but 128-bit vectors may be able to use MOVQ.
8133 // Returns one of the source operands if the shuffle can be reduced to a
8134 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
8135 auto CanZExtLowHalf = [&]() {
8136 for (int i = NumElements / 2; i != NumElements; ++i)
8139 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
8141 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
8146 if (SDValue V = CanZExtLowHalf()) {
8147 V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V);
8148 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
8149 return DAG.getNode(ISD::BITCAST, DL, VT, V);
8152 // No viable ext lowering found.
8156 /// \brief Try to get a scalar value for a specific element of a vector.
8158 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
8159 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
8160 SelectionDAG &DAG) {
8161 MVT VT = V.getSimpleValueType();
8162 MVT EltVT = VT.getVectorElementType();
8163 while (V.getOpcode() == ISD::BITCAST)
8164 V = V.getOperand(0);
8165 // If the bitcasts shift the element size, we can't extract an equivalent
8167 MVT NewVT = V.getSimpleValueType();
8168 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
8171 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8172 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR))
8173 return DAG.getNode(ISD::BITCAST, SDLoc(V), EltVT, V.getOperand(Idx));
8178 /// \brief Helper to test for a load that can be folded with x86 shuffles.
8180 /// This is particularly important because the set of instructions varies
8181 /// significantly based on whether the operand is a load or not.
8182 static bool isShuffleFoldableLoad(SDValue V) {
8183 while (V.getOpcode() == ISD::BITCAST)
8184 V = V.getOperand(0);
8186 return ISD::isNON_EXTLoad(V.getNode());
8189 /// \brief Try to lower insertion of a single element into a zero vector.
8191 /// This is a common pattern that we have especially efficient patterns to lower
8192 /// across all subtarget feature sets.
8193 static SDValue lowerVectorShuffleAsElementInsertion(
8194 MVT VT, SDLoc DL, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8195 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8196 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8198 MVT EltVT = VT.getVectorElementType();
8200 int V2Index = std::find_if(Mask.begin(), Mask.end(),
8201 [&Mask](int M) { return M >= (int)Mask.size(); }) -
8203 bool IsV1Zeroable = true;
8204 for (int i = 0, Size = Mask.size(); i < Size; ++i)
8205 if (i != V2Index && !Zeroable[i]) {
8206 IsV1Zeroable = false;
8210 // Check for a single input from a SCALAR_TO_VECTOR node.
8211 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
8212 // all the smarts here sunk into that routine. However, the current
8213 // lowering of BUILD_VECTOR makes that nearly impossible until the old
8214 // vector shuffle lowering is dead.
8215 if (SDValue V2S = getScalarValueForVectorElement(
8216 V2, Mask[V2Index] - Mask.size(), DAG)) {
8217 // We need to zext the scalar if it is smaller than an i32.
8218 V2S = DAG.getNode(ISD::BITCAST, DL, EltVT, V2S);
8219 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
8220 // Using zext to expand a narrow element won't work for non-zero
8225 // Zero-extend directly to i32.
8227 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
8229 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
8230 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
8231 EltVT == MVT::i16) {
8232 // Either not inserting from the low element of the input or the input
8233 // element size is too small to use VZEXT_MOVL to clear the high bits.
8237 if (!IsV1Zeroable) {
8238 // If V1 can't be treated as a zero vector we have fewer options to lower
8239 // this. We can't support integer vectors or non-zero targets cheaply, and
8240 // the V1 elements can't be permuted in any way.
8241 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
8242 if (!VT.isFloatingPoint() || V2Index != 0)
8244 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
8245 V1Mask[V2Index] = -1;
8246 if (!isNoopShuffleMask(V1Mask))
8248 // This is essentially a special case blend operation, but if we have
8249 // general purpose blend operations, they are always faster. Bail and let
8250 // the rest of the lowering handle these as blends.
8251 if (Subtarget->hasSSE41())
8254 // Otherwise, use MOVSD or MOVSS.
8255 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
8256 "Only two types of floating point element types to handle!");
8257 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
8261 // This lowering only works for the low element with floating point vectors.
8262 if (VT.isFloatingPoint() && V2Index != 0)
8265 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
8267 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8270 // If we have 4 or fewer lanes we can cheaply shuffle the element into
8271 // the desired position. Otherwise it is more efficient to do a vector
8272 // shift left. We know that we can do a vector shift left because all
8273 // the inputs are zero.
8274 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
8275 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
8276 V2Shuffle[V2Index] = 0;
8277 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
8279 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V2);
8281 X86ISD::VSHLDQ, DL, MVT::v2i64, V2,
8283 V2Index * EltVT.getSizeInBits()/8,
8284 DAG.getTargetLoweringInfo().getScalarShiftAmountTy(MVT::v2i64)));
8285 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8291 /// \brief Try to lower broadcast of a single element.
8293 /// For convenience, this code also bundles all of the subtarget feature set
8294 /// filtering. While a little annoying to re-dispatch on type here, there isn't
8295 /// a convenient way to factor it out.
8296 static SDValue lowerVectorShuffleAsBroadcast(MVT VT, SDLoc DL, SDValue V,
8298 const X86Subtarget *Subtarget,
8299 SelectionDAG &DAG) {
8300 if (!Subtarget->hasAVX())
8302 if (VT.isInteger() && !Subtarget->hasAVX2())
8305 // Check that the mask is a broadcast.
8306 int BroadcastIdx = -1;
8308 if (M >= 0 && BroadcastIdx == -1)
8310 else if (M >= 0 && M != BroadcastIdx)
8313 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
8314 "a sorted mask where the broadcast "
8317 // Go up the chain of (vector) values to try and find a scalar load that
8318 // we can combine with the broadcast.
8320 switch (V.getOpcode()) {
8321 case ISD::CONCAT_VECTORS: {
8322 int OperandSize = Mask.size() / V.getNumOperands();
8323 V = V.getOperand(BroadcastIdx / OperandSize);
8324 BroadcastIdx %= OperandSize;
8328 case ISD::INSERT_SUBVECTOR: {
8329 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
8330 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
8334 int BeginIdx = (int)ConstantIdx->getZExtValue();
8336 BeginIdx + (int)VInner.getValueType().getVectorNumElements();
8337 if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) {
8338 BroadcastIdx -= BeginIdx;
8349 // Check if this is a broadcast of a scalar. We special case lowering
8350 // for scalars so that we can more effectively fold with loads.
8351 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8352 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
8353 V = V.getOperand(BroadcastIdx);
8355 // If the scalar isn't a load we can't broadcast from it in AVX1, only with
8357 if (!Subtarget->hasAVX2() && !isShuffleFoldableLoad(V))
8359 } else if (BroadcastIdx != 0 || !Subtarget->hasAVX2()) {
8360 // We can't broadcast from a vector register w/o AVX2, and we can only
8361 // broadcast from the zero-element of a vector register.
8365 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, V);
8368 // Check for whether we can use INSERTPS to perform the shuffle. We only use
8369 // INSERTPS when the V1 elements are already in the correct locations
8370 // because otherwise we can just always use two SHUFPS instructions which
8371 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
8372 // perform INSERTPS if a single V1 element is out of place and all V2
8373 // elements are zeroable.
8374 static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
8376 SelectionDAG &DAG) {
8377 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8378 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8379 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8380 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8382 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8385 int V1DstIndex = -1;
8386 int V2DstIndex = -1;
8387 bool V1UsedInPlace = false;
8389 for (int i = 0; i < 4; ++i) {
8390 // Synthesize a zero mask from the zeroable elements (includes undefs).
8396 // Flag if we use any V1 inputs in place.
8398 V1UsedInPlace = true;
8402 // We can only insert a single non-zeroable element.
8403 if (V1DstIndex != -1 || V2DstIndex != -1)
8407 // V1 input out of place for insertion.
8410 // V2 input for insertion.
8415 // Don't bother if we have no (non-zeroable) element for insertion.
8416 if (V1DstIndex == -1 && V2DstIndex == -1)
8419 // Determine element insertion src/dst indices. The src index is from the
8420 // start of the inserted vector, not the start of the concatenated vector.
8421 unsigned V2SrcIndex = 0;
8422 if (V1DstIndex != -1) {
8423 // If we have a V1 input out of place, we use V1 as the V2 element insertion
8424 // and don't use the original V2 at all.
8425 V2SrcIndex = Mask[V1DstIndex];
8426 V2DstIndex = V1DstIndex;
8429 V2SrcIndex = Mask[V2DstIndex] - 4;
8432 // If no V1 inputs are used in place, then the result is created only from
8433 // the zero mask and the V2 insertion - so remove V1 dependency.
8435 V1 = DAG.getUNDEF(MVT::v4f32);
8437 unsigned InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
8438 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
8440 // Insert the V2 element into the desired position.
8442 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
8443 DAG.getConstant(InsertPSMask, MVT::i8));
8446 /// \brief Try to lower a shuffle as a permute of the inputs followed by an
8447 /// UNPCK instruction.
8449 /// This specifically targets cases where we end up with alternating between
8450 /// the two inputs, and so can permute them into something that feeds a single
8451 /// UNPCK instruction. Note that this routine only targets integer vectors
8452 /// because for floating point vectors we have a generalized SHUFPS lowering
8453 /// strategy that handles everything that doesn't *exactly* match an unpack,
8454 /// making this clever lowering unnecessary.
8455 static SDValue lowerVectorShuffleAsUnpack(MVT VT, SDLoc DL, SDValue V1,
8456 SDValue V2, ArrayRef<int> Mask,
8457 SelectionDAG &DAG) {
8458 assert(!VT.isFloatingPoint() &&
8459 "This routine only supports integer vectors.");
8460 assert(!isSingleInputShuffleMask(Mask) &&
8461 "This routine should only be used when blending two inputs.");
8462 assert(Mask.size() >= 2 && "Single element masks are invalid.");
8464 int Size = Mask.size();
8466 int NumLoInputs = std::count_if(Mask.begin(), Mask.end(), [Size](int M) {
8467 return M >= 0 && M % Size < Size / 2;
8469 int NumHiInputs = std::count_if(
8470 Mask.begin(), Mask.end(), [Size](int M) { return M % Size > Size / 2; });
8472 bool UnpackLo = NumLoInputs >= NumHiInputs;
8474 auto TryUnpack = [&](MVT UnpackVT, int Scale) {
8475 SmallVector<int, 32> V1Mask(Mask.size(), -1);
8476 SmallVector<int, 32> V2Mask(Mask.size(), -1);
8478 for (int i = 0; i < Size; ++i) {
8482 // Each element of the unpack contains Scale elements from this mask.
8483 int UnpackIdx = i / Scale;
8485 // We only handle the case where V1 feeds the first slots of the unpack.
8486 // We rely on canonicalization to ensure this is the case.
8487 if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
8490 // Setup the mask for this input. The indexing is tricky as we have to
8491 // handle the unpack stride.
8492 SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
8493 VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
8497 // Shuffle the inputs into place.
8498 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
8499 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
8501 // Cast the inputs to the type we will use to unpack them.
8502 V1 = DAG.getNode(ISD::BITCAST, DL, UnpackVT, V1);
8503 V2 = DAG.getNode(ISD::BITCAST, DL, UnpackVT, V2);
8505 // Unpack the inputs and cast the result back to the desired type.
8506 return DAG.getNode(ISD::BITCAST, DL, VT,
8507 DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH,
8508 DL, UnpackVT, V1, V2));
8511 // We try each unpack from the largest to the smallest to try and find one
8512 // that fits this mask.
8513 int OrigNumElements = VT.getVectorNumElements();
8514 int OrigScalarSize = VT.getScalarSizeInBits();
8515 for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2) {
8516 int Scale = ScalarSize / OrigScalarSize;
8517 int NumElements = OrigNumElements / Scale;
8518 MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), NumElements);
8519 if (SDValue Unpack = TryUnpack(UnpackVT, Scale))
8526 /// \brief Handle lowering of 2-lane 64-bit floating point shuffles.
8528 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
8529 /// support for floating point shuffles but not integer shuffles. These
8530 /// instructions will incur a domain crossing penalty on some chips though so
8531 /// it is better to avoid lowering through this for integer vectors where
8533 static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8534 const X86Subtarget *Subtarget,
8535 SelectionDAG &DAG) {
8537 assert(Op.getSimpleValueType() == MVT::v2f64 && "Bad shuffle type!");
8538 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8539 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8540 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8541 ArrayRef<int> Mask = SVOp->getMask();
8542 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8544 if (isSingleInputShuffleMask(Mask)) {
8545 // Use low duplicate instructions for masks that match their pattern.
8546 if (Subtarget->hasSSE3())
8547 if (isShuffleEquivalent(V1, V2, Mask, 0, 0))
8548 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, V1);
8550 // Straight shuffle of a single input vector. Simulate this by using the
8551 // single input as both of the "inputs" to this instruction..
8552 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
8554 if (Subtarget->hasAVX()) {
8555 // If we have AVX, we can use VPERMILPS which will allow folding a load
8556 // into the shuffle.
8557 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
8558 DAG.getConstant(SHUFPDMask, MVT::i8));
8561 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1,
8562 DAG.getConstant(SHUFPDMask, MVT::i8));
8564 assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
8565 assert(Mask[1] >= 2 && "Non-canonicalized blend!");
8567 // If we have a single input, insert that into V1 if we can do so cheaply.
8568 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8569 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8570 MVT::v2f64, DL, V1, V2, Mask, Subtarget, DAG))
8572 // Try inverting the insertion since for v2 masks it is easy to do and we
8573 // can't reliably sort the mask one way or the other.
8574 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8575 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8576 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8577 MVT::v2f64, DL, V2, V1, InverseMask, Subtarget, DAG))
8581 // Try to use one of the special instruction patterns to handle two common
8582 // blend patterns if a zero-blend above didn't work.
8583 if (isShuffleEquivalent(V1, V2, Mask, 0, 3) || isShuffleEquivalent(V1, V2, Mask, 1, 3))
8584 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
8585 // We can either use a special instruction to load over the low double or
8586 // to move just the low double.
8588 isShuffleFoldableLoad(V1S) ? X86ISD::MOVLPD : X86ISD::MOVSD,
8590 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
8592 if (Subtarget->hasSSE41())
8593 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
8597 // Use dedicated unpack instructions for masks that match their pattern.
8598 if (isShuffleEquivalent(V1, V2, Mask, 0, 2))
8599 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2f64, V1, V2);
8600 if (isShuffleEquivalent(V1, V2, Mask, 1, 3))
8601 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2f64, V1, V2);
8603 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
8604 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V2,
8605 DAG.getConstant(SHUFPDMask, MVT::i8));
8608 /// \brief Handle lowering of 2-lane 64-bit integer shuffles.
8610 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
8611 /// the integer unit to minimize domain crossing penalties. However, for blends
8612 /// it falls back to the floating point shuffle operation with appropriate bit
8614 static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8615 const X86Subtarget *Subtarget,
8616 SelectionDAG &DAG) {
8618 assert(Op.getSimpleValueType() == MVT::v2i64 && "Bad shuffle type!");
8619 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8620 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8621 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8622 ArrayRef<int> Mask = SVOp->getMask();
8623 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8625 if (isSingleInputShuffleMask(Mask)) {
8626 // Check for being able to broadcast a single element.
8627 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v2i64, DL, V1,
8628 Mask, Subtarget, DAG))
8631 // Straight shuffle of a single input vector. For everything from SSE2
8632 // onward this has a single fast instruction with no scary immediates.
8633 // We have to map the mask as it is actually a v4i32 shuffle instruction.
8634 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V1);
8635 int WidenedMask[4] = {
8636 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
8637 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
8639 ISD::BITCAST, DL, MVT::v2i64,
8640 DAG.getNode(X86ISD::PSHUFD, SDLoc(Op), MVT::v4i32, V1,
8641 getV4X86ShuffleImm8ForMask(WidenedMask, DAG)));
8643 assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
8644 assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
8645 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
8646 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
8648 // Try to use shift instructions.
8650 lowerVectorShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask, DAG))
8653 // When loading a scalar and then shuffling it into a vector we can often do
8654 // the insertion cheaply.
8655 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8656 MVT::v2i64, DL, V1, V2, Mask, Subtarget, DAG))
8658 // Try inverting the insertion since for v2 masks it is easy to do and we
8659 // can't reliably sort the mask one way or the other.
8660 int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
8661 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8662 MVT::v2i64, DL, V2, V1, InverseMask, Subtarget, DAG))
8665 // We have different paths for blend lowering, but they all must use the
8666 // *exact* same predicate.
8667 bool IsBlendSupported = Subtarget->hasSSE41();
8668 if (IsBlendSupported)
8669 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
8673 // Use dedicated unpack instructions for masks that match their pattern.
8674 if (isShuffleEquivalent(V1, V2, Mask, 0, 2))
8675 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, V1, V2);
8676 if (isShuffleEquivalent(V1, V2, Mask, 1, 3))
8677 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2i64, V1, V2);
8679 // Try to use byte rotation instructions.
8680 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8681 if (Subtarget->hasSSSE3())
8682 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8683 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
8686 // If we have direct support for blends, we should lower by decomposing into
8687 // a permute. That will be faster than the domain cross.
8688 if (IsBlendSupported)
8689 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2,
8692 // We implement this with SHUFPD which is pretty lame because it will likely
8693 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
8694 // However, all the alternatives are still more cycles and newer chips don't
8695 // have this problem. It would be really nice if x86 had better shuffles here.
8696 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V1);
8697 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V2);
8698 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
8699 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
8702 /// \brief Test whether this can be lowered with a single SHUFPS instruction.
8704 /// This is used to disable more specialized lowerings when the shufps lowering
8705 /// will happen to be efficient.
8706 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
8707 // This routine only handles 128-bit shufps.
8708 assert(Mask.size() == 4 && "Unsupported mask size!");
8710 // To lower with a single SHUFPS we need to have the low half and high half
8711 // each requiring a single input.
8712 if (Mask[0] != -1 && Mask[1] != -1 && (Mask[0] < 4) != (Mask[1] < 4))
8714 if (Mask[2] != -1 && Mask[3] != -1 && (Mask[2] < 4) != (Mask[3] < 4))
8720 /// \brief Lower a vector shuffle using the SHUFPS instruction.
8722 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
8723 /// It makes no assumptions about whether this is the *best* lowering, it simply
8725 static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
8726 ArrayRef<int> Mask, SDValue V1,
8727 SDValue V2, SelectionDAG &DAG) {
8728 SDValue LowV = V1, HighV = V2;
8729 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
8732 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8734 if (NumV2Elements == 1) {
8736 std::find_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }) -
8739 // Compute the index adjacent to V2Index and in the same half by toggling
8741 int V2AdjIndex = V2Index ^ 1;
8743 if (Mask[V2AdjIndex] == -1) {
8744 // Handles all the cases where we have a single V2 element and an undef.
8745 // This will only ever happen in the high lanes because we commute the
8746 // vector otherwise.
8748 std::swap(LowV, HighV);
8749 NewMask[V2Index] -= 4;
8751 // Handle the case where the V2 element ends up adjacent to a V1 element.
8752 // To make this work, blend them together as the first step.
8753 int V1Index = V2AdjIndex;
8754 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
8755 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
8756 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8758 // Now proceed to reconstruct the final blend as we have the necessary
8759 // high or low half formed.
8766 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
8767 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
8769 } else if (NumV2Elements == 2) {
8770 if (Mask[0] < 4 && Mask[1] < 4) {
8771 // Handle the easy case where we have V1 in the low lanes and V2 in the
8775 } else if (Mask[2] < 4 && Mask[3] < 4) {
8776 // We also handle the reversed case because this utility may get called
8777 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
8778 // arrange things in the right direction.
8784 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
8785 // trying to place elements directly, just blend them and set up the final
8786 // shuffle to place them.
8788 // The first two blend mask elements are for V1, the second two are for
8790 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
8791 Mask[2] < 4 ? Mask[2] : Mask[3],
8792 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
8793 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
8794 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
8795 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8797 // Now we do a normal shuffle of V1 by giving V1 as both operands to
8800 NewMask[0] = Mask[0] < 4 ? 0 : 2;
8801 NewMask[1] = Mask[0] < 4 ? 2 : 0;
8802 NewMask[2] = Mask[2] < 4 ? 1 : 3;
8803 NewMask[3] = Mask[2] < 4 ? 3 : 1;
8806 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
8807 getV4X86ShuffleImm8ForMask(NewMask, DAG));
8810 /// \brief Lower 4-lane 32-bit floating point shuffles.
8812 /// Uses instructions exclusively from the floating point unit to minimize
8813 /// domain crossing penalties, as these are sufficient to implement all v4f32
8815 static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8816 const X86Subtarget *Subtarget,
8817 SelectionDAG &DAG) {
8819 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8820 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8821 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8822 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8823 ArrayRef<int> Mask = SVOp->getMask();
8824 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8827 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8829 if (NumV2Elements == 0) {
8830 // Check for being able to broadcast a single element.
8831 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f32, DL, V1,
8832 Mask, Subtarget, DAG))
8835 // Use even/odd duplicate instructions for masks that match their pattern.
8836 if (Subtarget->hasSSE3()) {
8837 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2))
8838 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
8839 if (isShuffleEquivalent(V1, V2, Mask, 1, 1, 3, 3))
8840 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
8843 if (Subtarget->hasAVX()) {
8844 // If we have AVX, we can use VPERMILPS which will allow folding a load
8845 // into the shuffle.
8846 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
8847 getV4X86ShuffleImm8ForMask(Mask, DAG));
8850 // Otherwise, use a straight shuffle of a single input vector. We pass the
8851 // input vector to both operands to simulate this with a SHUFPS.
8852 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
8853 getV4X86ShuffleImm8ForMask(Mask, DAG));
8856 // There are special ways we can lower some single-element blends. However, we
8857 // have custom ways we can lower more complex single-element blends below that
8858 // we defer to if both this and BLENDPS fail to match, so restrict this to
8859 // when the V2 input is targeting element 0 of the mask -- that is the fast
8861 if (NumV2Elements == 1 && Mask[0] >= 4)
8862 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4f32, DL, V1, V2,
8863 Mask, Subtarget, DAG))
8866 if (Subtarget->hasSSE41()) {
8867 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
8871 // Use INSERTPS if we can complete the shuffle efficiently.
8872 if (SDValue V = lowerVectorShuffleAsInsertPS(Op, V1, V2, Mask, DAG))
8875 if (!isSingleSHUFPSMask(Mask))
8876 if (SDValue BlendPerm = lowerVectorShuffleAsBlendAndPermute(
8877 DL, MVT::v4f32, V1, V2, Mask, DAG))
8881 // Use dedicated unpack instructions for masks that match their pattern.
8882 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 1, 5))
8883 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V1, V2);
8884 if (isShuffleEquivalent(V1, V2, Mask, 2, 6, 3, 7))
8885 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V1, V2);
8886 if (isShuffleEquivalent(V1, V2, Mask, 4, 0, 5, 1))
8887 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V2, V1);
8888 if (isShuffleEquivalent(V1, V2, Mask, 6, 2, 7, 3))
8889 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V2, V1);
8891 // Otherwise fall back to a SHUFPS lowering strategy.
8892 return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
8895 /// \brief Lower 4-lane i32 vector shuffles.
8897 /// We try to handle these with integer-domain shuffles where we can, but for
8898 /// blends we use the floating point domain blend instructions.
8899 static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8900 const X86Subtarget *Subtarget,
8901 SelectionDAG &DAG) {
8903 assert(Op.getSimpleValueType() == MVT::v4i32 && "Bad shuffle type!");
8904 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8905 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8906 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8907 ArrayRef<int> Mask = SVOp->getMask();
8908 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8910 // Whenever we can lower this as a zext, that instruction is strictly faster
8911 // than any alternative. It also allows us to fold memory operands into the
8912 // shuffle in many cases.
8913 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2,
8914 Mask, Subtarget, DAG))
8918 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8920 if (NumV2Elements == 0) {
8921 // Check for being able to broadcast a single element.
8922 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i32, DL, V1,
8923 Mask, Subtarget, DAG))
8926 // Straight shuffle of a single input vector. For everything from SSE2
8927 // onward this has a single fast instruction with no scary immediates.
8928 // We coerce the shuffle pattern to be compatible with UNPCK instructions
8929 // but we aren't actually going to use the UNPCK instruction because doing
8930 // so prevents folding a load into this instruction or making a copy.
8931 const int UnpackLoMask[] = {0, 0, 1, 1};
8932 const int UnpackHiMask[] = {2, 2, 3, 3};
8933 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 1, 1))
8934 Mask = UnpackLoMask;
8935 else if (isShuffleEquivalent(V1, V2, Mask, 2, 2, 3, 3))
8936 Mask = UnpackHiMask;
8938 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
8939 getV4X86ShuffleImm8ForMask(Mask, DAG));
8942 // Try to use shift instructions.
8944 lowerVectorShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask, DAG))
8947 // There are special ways we can lower some single-element blends.
8948 if (NumV2Elements == 1)
8949 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4i32, DL, V1, V2,
8950 Mask, Subtarget, DAG))
8953 // We have different paths for blend lowering, but they all must use the
8954 // *exact* same predicate.
8955 bool IsBlendSupported = Subtarget->hasSSE41();
8956 if (IsBlendSupported)
8957 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
8961 if (SDValue Masked =
8962 lowerVectorShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask, DAG))
8965 // Use dedicated unpack instructions for masks that match their pattern.
8966 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 1, 5))
8967 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V1, V2);
8968 if (isShuffleEquivalent(V1, V2, Mask, 2, 6, 3, 7))
8969 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V1, V2);
8970 if (isShuffleEquivalent(V1, V2, Mask, 4, 0, 5, 1))
8971 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V2, V1);
8972 if (isShuffleEquivalent(V1, V2, Mask, 6, 2, 7, 3))
8973 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V2, V1);
8975 // Try to use byte rotation instructions.
8976 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8977 if (Subtarget->hasSSSE3())
8978 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8979 DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
8982 // If we have direct support for blends, we should lower by decomposing into
8983 // a permute. That will be faster than the domain cross.
8984 if (IsBlendSupported)
8985 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2,
8988 // Try to lower by permuting the inputs into an unpack instruction.
8989 if (SDValue Unpack =
8990 lowerVectorShuffleAsUnpack(MVT::v4i32, DL, V1, V2, Mask, DAG))
8993 // We implement this with SHUFPS because it can blend from two vectors.
8994 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
8995 // up the inputs, bypassing domain shift penalties that we would encur if we
8996 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
8998 return DAG.getNode(ISD::BITCAST, DL, MVT::v4i32,
8999 DAG.getVectorShuffle(
9001 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V1),
9002 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V2), Mask));
9005 /// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
9006 /// shuffle lowering, and the most complex part.
9008 /// The lowering strategy is to try to form pairs of input lanes which are
9009 /// targeted at the same half of the final vector, and then use a dword shuffle
9010 /// to place them onto the right half, and finally unpack the paired lanes into
9011 /// their final position.
9013 /// The exact breakdown of how to form these dword pairs and align them on the
9014 /// correct sides is really tricky. See the comments within the function for
9015 /// more of the details.
9016 static SDValue lowerV8I16SingleInputVectorShuffle(
9017 SDLoc DL, SDValue V, MutableArrayRef<int> Mask,
9018 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
9019 assert(V.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9020 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
9021 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
9023 SmallVector<int, 4> LoInputs;
9024 std::copy_if(LoMask.begin(), LoMask.end(), std::back_inserter(LoInputs),
9025 [](int M) { return M >= 0; });
9026 std::sort(LoInputs.begin(), LoInputs.end());
9027 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
9028 SmallVector<int, 4> HiInputs;
9029 std::copy_if(HiMask.begin(), HiMask.end(), std::back_inserter(HiInputs),
9030 [](int M) { return M >= 0; });
9031 std::sort(HiInputs.begin(), HiInputs.end());
9032 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
9034 std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
9035 int NumHToL = LoInputs.size() - NumLToL;
9037 std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
9038 int NumHToH = HiInputs.size() - NumLToH;
9039 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
9040 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
9041 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
9042 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
9044 // Check for being able to broadcast a single element.
9045 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i16, DL, V,
9046 Mask, Subtarget, DAG))
9049 // Try to use shift instructions.
9051 lowerVectorShuffleAsShift(DL, MVT::v8i16, V, V, Mask, DAG))
9054 // Use dedicated unpack instructions for masks that match their pattern.
9055 if (isShuffleEquivalent(V, V, Mask, 0, 0, 1, 1, 2, 2, 3, 3))
9056 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V, V);
9057 if (isShuffleEquivalent(V, V, Mask, 4, 4, 5, 5, 6, 6, 7, 7))
9058 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V, V);
9060 // Try to use byte rotation instructions.
9061 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9062 DL, MVT::v8i16, V, V, Mask, Subtarget, DAG))
9065 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
9066 // such inputs we can swap two of the dwords across the half mark and end up
9067 // with <=2 inputs to each half in each half. Once there, we can fall through
9068 // to the generic code below. For example:
9070 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
9071 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
9073 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
9074 // and an existing 2-into-2 on the other half. In this case we may have to
9075 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
9076 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
9077 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
9078 // because any other situation (including a 3-into-1 or 1-into-3 in the other
9079 // half than the one we target for fixing) will be fixed when we re-enter this
9080 // path. We will also combine away any sequence of PSHUFD instructions that
9081 // result into a single instruction. Here is an example of the tricky case:
9083 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
9084 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
9086 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
9088 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
9089 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
9091 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
9092 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
9094 // The result is fine to be handled by the generic logic.
9095 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
9096 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
9097 int AOffset, int BOffset) {
9098 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
9099 "Must call this with A having 3 or 1 inputs from the A half.");
9100 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
9101 "Must call this with B having 1 or 3 inputs from the B half.");
9102 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
9103 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
9105 // Compute the index of dword with only one word among the three inputs in
9106 // a half by taking the sum of the half with three inputs and subtracting
9107 // the sum of the actual three inputs. The difference is the remaining
9110 int &TripleDWord = AToAInputs.size() == 3 ? ADWord : BDWord;
9111 int &OneInputDWord = AToAInputs.size() == 3 ? BDWord : ADWord;
9112 int TripleInputOffset = AToAInputs.size() == 3 ? AOffset : BOffset;
9113 ArrayRef<int> TripleInputs = AToAInputs.size() == 3 ? AToAInputs : BToAInputs;
9114 int OneInput = AToAInputs.size() == 3 ? BToAInputs[0] : AToAInputs[0];
9115 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
9116 int TripleNonInputIdx =
9117 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
9118 TripleDWord = TripleNonInputIdx / 2;
9120 // We use xor with one to compute the adjacent DWord to whichever one the
9122 OneInputDWord = (OneInput / 2) ^ 1;
9124 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
9125 // and BToA inputs. If there is also such a problem with the BToB and AToB
9126 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
9127 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
9128 // is essential that we don't *create* a 3<-1 as then we might oscillate.
9129 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
9130 // Compute how many inputs will be flipped by swapping these DWords. We
9132 // to balance this to ensure we don't form a 3-1 shuffle in the other
9134 int NumFlippedAToBInputs =
9135 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
9136 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
9137 int NumFlippedBToBInputs =
9138 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
9139 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
9140 if ((NumFlippedAToBInputs == 1 &&
9141 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
9142 (NumFlippedBToBInputs == 1 &&
9143 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
9144 // We choose whether to fix the A half or B half based on whether that
9145 // half has zero flipped inputs. At zero, we may not be able to fix it
9146 // with that half. We also bias towards fixing the B half because that
9147 // will more commonly be the high half, and we have to bias one way.
9148 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
9149 ArrayRef<int> Inputs) {
9150 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
9151 bool IsFixIdxInput = std::find(Inputs.begin(), Inputs.end(),
9152 PinnedIdx ^ 1) != Inputs.end();
9153 // Determine whether the free index is in the flipped dword or the
9154 // unflipped dword based on where the pinned index is. We use this bit
9155 // in an xor to conditionally select the adjacent dword.
9156 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
9157 bool IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9158 FixFreeIdx) != Inputs.end();
9159 if (IsFixIdxInput == IsFixFreeIdxInput)
9161 IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9162 FixFreeIdx) != Inputs.end();
9163 assert(IsFixIdxInput != IsFixFreeIdxInput &&
9164 "We need to be changing the number of flipped inputs!");
9165 int PSHUFHalfMask[] = {0, 1, 2, 3};
9166 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
9167 V = DAG.getNode(FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
9169 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DAG));
9172 if (M != -1 && M == FixIdx)
9174 else if (M != -1 && M == FixFreeIdx)
9177 if (NumFlippedBToBInputs != 0) {
9179 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9180 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
9182 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
9184 AToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9185 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
9190 int PSHUFDMask[] = {0, 1, 2, 3};
9191 PSHUFDMask[ADWord] = BDWord;
9192 PSHUFDMask[BDWord] = ADWord;
9193 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9194 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9195 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9196 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9198 // Adjust the mask to match the new locations of A and B.
9200 if (M != -1 && M/2 == ADWord)
9201 M = 2 * BDWord + M % 2;
9202 else if (M != -1 && M/2 == BDWord)
9203 M = 2 * ADWord + M % 2;
9205 // Recurse back into this routine to re-compute state now that this isn't
9206 // a 3 and 1 problem.
9207 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9210 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
9211 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
9212 else if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
9213 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
9215 // At this point there are at most two inputs to the low and high halves from
9216 // each half. That means the inputs can always be grouped into dwords and
9217 // those dwords can then be moved to the correct half with a dword shuffle.
9218 // We use at most one low and one high word shuffle to collect these paired
9219 // inputs into dwords, and finally a dword shuffle to place them.
9220 int PSHUFLMask[4] = {-1, -1, -1, -1};
9221 int PSHUFHMask[4] = {-1, -1, -1, -1};
9222 int PSHUFDMask[4] = {-1, -1, -1, -1};
9224 // First fix the masks for all the inputs that are staying in their
9225 // original halves. This will then dictate the targets of the cross-half
9227 auto fixInPlaceInputs =
9228 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
9229 MutableArrayRef<int> SourceHalfMask,
9230 MutableArrayRef<int> HalfMask, int HalfOffset) {
9231 if (InPlaceInputs.empty())
9233 if (InPlaceInputs.size() == 1) {
9234 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9235 InPlaceInputs[0] - HalfOffset;
9236 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
9239 if (IncomingInputs.empty()) {
9240 // Just fix all of the in place inputs.
9241 for (int Input : InPlaceInputs) {
9242 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
9243 PSHUFDMask[Input / 2] = Input / 2;
9248 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
9249 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9250 InPlaceInputs[0] - HalfOffset;
9251 // Put the second input next to the first so that they are packed into
9252 // a dword. We find the adjacent index by toggling the low bit.
9253 int AdjIndex = InPlaceInputs[0] ^ 1;
9254 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
9255 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
9256 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
9258 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
9259 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
9261 // Now gather the cross-half inputs and place them into a free dword of
9262 // their target half.
9263 // FIXME: This operation could almost certainly be simplified dramatically to
9264 // look more like the 3-1 fixing operation.
9265 auto moveInputsToRightHalf = [&PSHUFDMask](
9266 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
9267 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
9268 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
9270 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
9271 return SourceHalfMask[Word] != -1 && SourceHalfMask[Word] != Word;
9273 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
9275 int LowWord = Word & ~1;
9276 int HighWord = Word | 1;
9277 return isWordClobbered(SourceHalfMask, LowWord) ||
9278 isWordClobbered(SourceHalfMask, HighWord);
9281 if (IncomingInputs.empty())
9284 if (ExistingInputs.empty()) {
9285 // Map any dwords with inputs from them into the right half.
9286 for (int Input : IncomingInputs) {
9287 // If the source half mask maps over the inputs, turn those into
9288 // swaps and use the swapped lane.
9289 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
9290 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == -1) {
9291 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
9292 Input - SourceOffset;
9293 // We have to swap the uses in our half mask in one sweep.
9294 for (int &M : HalfMask)
9295 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
9297 else if (M == Input)
9298 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9300 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
9301 Input - SourceOffset &&
9302 "Previous placement doesn't match!");
9304 // Note that this correctly re-maps both when we do a swap and when
9305 // we observe the other side of the swap above. We rely on that to
9306 // avoid swapping the members of the input list directly.
9307 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9310 // Map the input's dword into the correct half.
9311 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == -1)
9312 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
9314 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
9316 "Previous placement doesn't match!");
9319 // And just directly shift any other-half mask elements to be same-half
9320 // as we will have mirrored the dword containing the element into the
9321 // same position within that half.
9322 for (int &M : HalfMask)
9323 if (M >= SourceOffset && M < SourceOffset + 4) {
9324 M = M - SourceOffset + DestOffset;
9325 assert(M >= 0 && "This should never wrap below zero!");
9330 // Ensure we have the input in a viable dword of its current half. This
9331 // is particularly tricky because the original position may be clobbered
9332 // by inputs being moved and *staying* in that half.
9333 if (IncomingInputs.size() == 1) {
9334 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9335 int InputFixed = std::find(std::begin(SourceHalfMask),
9336 std::end(SourceHalfMask), -1) -
9337 std::begin(SourceHalfMask) + SourceOffset;
9338 SourceHalfMask[InputFixed - SourceOffset] =
9339 IncomingInputs[0] - SourceOffset;
9340 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
9342 IncomingInputs[0] = InputFixed;
9344 } else if (IncomingInputs.size() == 2) {
9345 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
9346 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9347 // We have two non-adjacent or clobbered inputs we need to extract from
9348 // the source half. To do this, we need to map them into some adjacent
9349 // dword slot in the source mask.
9350 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
9351 IncomingInputs[1] - SourceOffset};
9353 // If there is a free slot in the source half mask adjacent to one of
9354 // the inputs, place the other input in it. We use (Index XOR 1) to
9355 // compute an adjacent index.
9356 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
9357 SourceHalfMask[InputsFixed[0] ^ 1] == -1) {
9358 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
9359 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9360 InputsFixed[1] = InputsFixed[0] ^ 1;
9361 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
9362 SourceHalfMask[InputsFixed[1] ^ 1] == -1) {
9363 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
9364 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
9365 InputsFixed[0] = InputsFixed[1] ^ 1;
9366 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] == -1 &&
9367 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] == -1) {
9368 // The two inputs are in the same DWord but it is clobbered and the
9369 // adjacent DWord isn't used at all. Move both inputs to the free
9371 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
9372 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
9373 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
9374 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
9376 // The only way we hit this point is if there is no clobbering
9377 // (because there are no off-half inputs to this half) and there is no
9378 // free slot adjacent to one of the inputs. In this case, we have to
9379 // swap an input with a non-input.
9380 for (int i = 0; i < 4; ++i)
9381 assert((SourceHalfMask[i] == -1 || SourceHalfMask[i] == i) &&
9382 "We can't handle any clobbers here!");
9383 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
9384 "Cannot have adjacent inputs here!");
9386 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9387 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
9389 // We also have to update the final source mask in this case because
9390 // it may need to undo the above swap.
9391 for (int &M : FinalSourceHalfMask)
9392 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
9393 M = InputsFixed[1] + SourceOffset;
9394 else if (M == InputsFixed[1] + SourceOffset)
9395 M = (InputsFixed[0] ^ 1) + SourceOffset;
9397 InputsFixed[1] = InputsFixed[0] ^ 1;
9400 // Point everything at the fixed inputs.
9401 for (int &M : HalfMask)
9402 if (M == IncomingInputs[0])
9403 M = InputsFixed[0] + SourceOffset;
9404 else if (M == IncomingInputs[1])
9405 M = InputsFixed[1] + SourceOffset;
9407 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
9408 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
9411 llvm_unreachable("Unhandled input size!");
9414 // Now hoist the DWord down to the right half.
9415 int FreeDWord = (PSHUFDMask[DestOffset / 2] == -1 ? 0 : 1) + DestOffset / 2;
9416 assert(PSHUFDMask[FreeDWord] == -1 && "DWord not free");
9417 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
9418 for (int &M : HalfMask)
9419 for (int Input : IncomingInputs)
9421 M = FreeDWord * 2 + Input % 2;
9423 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
9424 /*SourceOffset*/ 4, /*DestOffset*/ 0);
9425 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
9426 /*SourceOffset*/ 0, /*DestOffset*/ 4);
9428 // Now enact all the shuffles we've computed to move the inputs into their
9430 if (!isNoopShuffleMask(PSHUFLMask))
9431 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9432 getV4X86ShuffleImm8ForMask(PSHUFLMask, DAG));
9433 if (!isNoopShuffleMask(PSHUFHMask))
9434 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9435 getV4X86ShuffleImm8ForMask(PSHUFHMask, DAG));
9436 if (!isNoopShuffleMask(PSHUFDMask))
9437 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9438 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9439 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9440 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9442 // At this point, each half should contain all its inputs, and we can then
9443 // just shuffle them into their final position.
9444 assert(std::count_if(LoMask.begin(), LoMask.end(),
9445 [](int M) { return M >= 4; }) == 0 &&
9446 "Failed to lift all the high half inputs to the low mask!");
9447 assert(std::count_if(HiMask.begin(), HiMask.end(),
9448 [](int M) { return M >= 0 && M < 4; }) == 0 &&
9449 "Failed to lift all the low half inputs to the high mask!");
9451 // Do a half shuffle for the low mask.
9452 if (!isNoopShuffleMask(LoMask))
9453 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9454 getV4X86ShuffleImm8ForMask(LoMask, DAG));
9456 // Do a half shuffle with the high mask after shifting its values down.
9457 for (int &M : HiMask)
9460 if (!isNoopShuffleMask(HiMask))
9461 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9462 getV4X86ShuffleImm8ForMask(HiMask, DAG));
9467 /// \brief Detect whether the mask pattern should be lowered through
9470 /// This essentially tests whether viewing the mask as an interleaving of two
9471 /// sub-sequences reduces the cross-input traffic of a blend operation. If so,
9472 /// lowering it through interleaving is a significantly better strategy.
9473 static bool shouldLowerAsInterleaving(ArrayRef<int> Mask) {
9474 int NumEvenInputs[2] = {0, 0};
9475 int NumOddInputs[2] = {0, 0};
9476 int NumLoInputs[2] = {0, 0};
9477 int NumHiInputs[2] = {0, 0};
9478 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
9482 int InputIdx = Mask[i] >= Size;
9485 ++NumLoInputs[InputIdx];
9487 ++NumHiInputs[InputIdx];
9490 ++NumEvenInputs[InputIdx];
9492 ++NumOddInputs[InputIdx];
9495 // The minimum number of cross-input results for both the interleaved and
9496 // split cases. If interleaving results in fewer cross-input results, return
9498 int InterleavedCrosses = std::min(NumEvenInputs[1] + NumOddInputs[0],
9499 NumEvenInputs[0] + NumOddInputs[1]);
9500 int SplitCrosses = std::min(NumLoInputs[1] + NumHiInputs[0],
9501 NumLoInputs[0] + NumHiInputs[1]);
9502 return InterleavedCrosses < SplitCrosses;
9505 /// \brief Blend two v8i16 vectors using a naive unpack strategy.
9507 /// This strategy only works when the inputs from each vector fit into a single
9508 /// half of that vector, and generally there are not so many inputs as to leave
9509 /// the in-place shuffles required highly constrained (and thus expensive). It
9510 /// shifts all the inputs into a single side of both input vectors and then
9511 /// uses an unpack to interleave these inputs in a single vector. At that
9512 /// point, we will fall back on the generic single input shuffle lowering.
9513 static SDValue lowerV8I16BasicBlendVectorShuffle(SDLoc DL, SDValue V1,
9515 MutableArrayRef<int> Mask,
9516 const X86Subtarget *Subtarget,
9517 SelectionDAG &DAG) {
9518 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9519 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9520 SmallVector<int, 3> LoV1Inputs, HiV1Inputs, LoV2Inputs, HiV2Inputs;
9521 for (int i = 0; i < 8; ++i)
9522 if (Mask[i] >= 0 && Mask[i] < 4)
9523 LoV1Inputs.push_back(i);
9524 else if (Mask[i] >= 4 && Mask[i] < 8)
9525 HiV1Inputs.push_back(i);
9526 else if (Mask[i] >= 8 && Mask[i] < 12)
9527 LoV2Inputs.push_back(i);
9528 else if (Mask[i] >= 12)
9529 HiV2Inputs.push_back(i);
9531 int NumV1Inputs = LoV1Inputs.size() + HiV1Inputs.size();
9532 int NumV2Inputs = LoV2Inputs.size() + HiV2Inputs.size();
9535 assert(NumV1Inputs > 0 && NumV1Inputs <= 3 && "At most 3 inputs supported");
9536 assert(NumV2Inputs > 0 && NumV2Inputs <= 3 && "At most 3 inputs supported");
9537 assert(NumV1Inputs + NumV2Inputs <= 4 && "At most 4 combined inputs");
9539 bool MergeFromLo = LoV1Inputs.size() + LoV2Inputs.size() >=
9540 HiV1Inputs.size() + HiV2Inputs.size();
9542 auto moveInputsToHalf = [&](SDValue V, ArrayRef<int> LoInputs,
9543 ArrayRef<int> HiInputs, bool MoveToLo,
9545 ArrayRef<int> GoodInputs = MoveToLo ? LoInputs : HiInputs;
9546 ArrayRef<int> BadInputs = MoveToLo ? HiInputs : LoInputs;
9547 if (BadInputs.empty())
9550 int MoveMask[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9551 int MoveOffset = MoveToLo ? 0 : 4;
9553 if (GoodInputs.empty()) {
9554 for (int BadInput : BadInputs) {
9555 MoveMask[Mask[BadInput] % 4 + MoveOffset] = Mask[BadInput] - MaskOffset;
9556 Mask[BadInput] = Mask[BadInput] % 4 + MoveOffset + MaskOffset;
9559 if (GoodInputs.size() == 2) {
9560 // If the low inputs are spread across two dwords, pack them into
9562 MoveMask[MoveOffset] = Mask[GoodInputs[0]] - MaskOffset;
9563 MoveMask[MoveOffset + 1] = Mask[GoodInputs[1]] - MaskOffset;
9564 Mask[GoodInputs[0]] = MoveOffset + MaskOffset;
9565 Mask[GoodInputs[1]] = MoveOffset + 1 + MaskOffset;
9567 // Otherwise pin the good inputs.
9568 for (int GoodInput : GoodInputs)
9569 MoveMask[Mask[GoodInput] - MaskOffset] = Mask[GoodInput] - MaskOffset;
9572 if (BadInputs.size() == 2) {
9573 // If we have two bad inputs then there may be either one or two good
9574 // inputs fixed in place. Find a fixed input, and then find the *other*
9575 // two adjacent indices by using modular arithmetic.
9577 std::find_if(std::begin(MoveMask) + MoveOffset, std::end(MoveMask),
9578 [](int M) { return M >= 0; }) -
9579 std::begin(MoveMask);
9581 ((((GoodMaskIdx - MoveOffset) & ~1) + 2) % 4) + MoveOffset;
9582 assert(MoveMask[MoveMaskIdx] == -1 && "Expected empty slot");
9583 assert(MoveMask[MoveMaskIdx + 1] == -1 && "Expected empty slot");
9584 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9585 MoveMask[MoveMaskIdx + 1] = Mask[BadInputs[1]] - MaskOffset;
9586 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9587 Mask[BadInputs[1]] = MoveMaskIdx + 1 + MaskOffset;
9589 assert(BadInputs.size() == 1 && "All sizes handled");
9590 int MoveMaskIdx = std::find(std::begin(MoveMask) + MoveOffset,
9591 std::end(MoveMask), -1) -
9592 std::begin(MoveMask);
9593 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9594 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9598 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9601 V1 = moveInputsToHalf(V1, LoV1Inputs, HiV1Inputs, MergeFromLo,
9603 V2 = moveInputsToHalf(V2, LoV2Inputs, HiV2Inputs, MergeFromLo,
9606 // FIXME: Select an interleaving of the merge of V1 and V2 that minimizes
9607 // cross-half traffic in the final shuffle.
9609 // Munge the mask to be a single-input mask after the unpack merges the
9613 M = 2 * (M % 4) + (M / 8);
9615 return DAG.getVectorShuffle(
9616 MVT::v8i16, DL, DAG.getNode(MergeFromLo ? X86ISD::UNPCKL : X86ISD::UNPCKH,
9617 DL, MVT::v8i16, V1, V2),
9618 DAG.getUNDEF(MVT::v8i16), Mask);
9621 /// \brief Generic lowering of 8-lane i16 shuffles.
9623 /// This handles both single-input shuffles and combined shuffle/blends with
9624 /// two inputs. The single input shuffles are immediately delegated to
9625 /// a dedicated lowering routine.
9627 /// The blends are lowered in one of three fundamental ways. If there are few
9628 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
9629 /// of the input is significantly cheaper when lowered as an interleaving of
9630 /// the two inputs, try to interleave them. Otherwise, blend the low and high
9631 /// halves of the inputs separately (making them have relatively few inputs)
9632 /// and then concatenate them.
9633 static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9634 const X86Subtarget *Subtarget,
9635 SelectionDAG &DAG) {
9637 assert(Op.getSimpleValueType() == MVT::v8i16 && "Bad shuffle type!");
9638 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9639 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9640 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9641 ArrayRef<int> OrigMask = SVOp->getMask();
9642 int MaskStorage[8] = {OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9643 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7]};
9644 MutableArrayRef<int> Mask(MaskStorage);
9646 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
9648 // Whenever we can lower this as a zext, that instruction is strictly faster
9649 // than any alternative.
9650 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9651 DL, MVT::v8i16, V1, V2, OrigMask, Subtarget, DAG))
9654 auto isV1 = [](int M) { return M >= 0 && M < 8; };
9655 auto isV2 = [](int M) { return M >= 8; };
9657 int NumV1Inputs = std::count_if(Mask.begin(), Mask.end(), isV1);
9658 int NumV2Inputs = std::count_if(Mask.begin(), Mask.end(), isV2);
9660 if (NumV2Inputs == 0)
9661 return lowerV8I16SingleInputVectorShuffle(DL, V1, Mask, Subtarget, DAG);
9663 assert(NumV1Inputs > 0 && "All single-input shuffles should be canonicalized "
9664 "to be V1-input shuffles.");
9666 // Try to use shift instructions.
9668 lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask, DAG))
9671 // There are special ways we can lower some single-element blends.
9672 if (NumV2Inputs == 1)
9673 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v8i16, DL, V1, V2,
9674 Mask, Subtarget, DAG))
9677 // We have different paths for blend lowering, but they all must use the
9678 // *exact* same predicate.
9679 bool IsBlendSupported = Subtarget->hasSSE41();
9680 if (IsBlendSupported)
9681 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
9685 if (SDValue Masked =
9686 lowerVectorShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask, DAG))
9689 // Use dedicated unpack instructions for masks that match their pattern.
9690 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 2, 10, 3, 11))
9691 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V1, V2);
9692 if (isShuffleEquivalent(V1, V2, Mask, 4, 12, 5, 13, 6, 14, 7, 15))
9693 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V1, V2);
9695 // Try to use byte rotation instructions.
9696 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9697 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
9700 if (SDValue BitBlend =
9701 lowerVectorShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
9704 if (NumV1Inputs + NumV2Inputs <= 4)
9705 return lowerV8I16BasicBlendVectorShuffle(DL, V1, V2, Mask, Subtarget, DAG);
9707 // Check whether an interleaving lowering is likely to be more efficient.
9708 // This isn't perfect but it is a strong heuristic that tends to work well on
9709 // the kinds of shuffles that show up in practice.
9711 // FIXME: Handle 1x, 2x, and 4x interleaving.
9712 if (shouldLowerAsInterleaving(Mask)) {
9713 // FIXME: Figure out whether we should pack these into the low or high
9716 int EMask[8], OMask[8];
9717 for (int i = 0; i < 4; ++i) {
9718 EMask[i] = Mask[2*i];
9719 OMask[i] = Mask[2*i + 1];
9724 SDValue Evens = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, EMask);
9725 SDValue Odds = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, OMask);
9727 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, Evens, Odds);
9730 // If we have direct support for blends, we should lower by decomposing into
9732 if (IsBlendSupported)
9733 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
9736 // Try to lower by permuting the inputs into an unpack instruction.
9737 if (SDValue Unpack =
9738 lowerVectorShuffleAsUnpack(MVT::v8i16, DL, V1, V2, Mask, DAG))
9741 int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9742 int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9744 for (int i = 0; i < 4; ++i) {
9745 LoBlendMask[i] = Mask[i];
9746 HiBlendMask[i] = Mask[i + 4];
9749 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
9750 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
9751 LoV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, LoV);
9752 HiV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, HiV);
9754 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9755 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, LoV, HiV));
9758 /// \brief Check whether a compaction lowering can be done by dropping even
9759 /// elements and compute how many times even elements must be dropped.
9761 /// This handles shuffles which take every Nth element where N is a power of
9762 /// two. Example shuffle masks:
9764 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
9765 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
9766 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
9767 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
9768 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
9769 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
9771 /// Any of these lanes can of course be undef.
9773 /// This routine only supports N <= 3.
9774 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
9777 /// \returns N above, or the number of times even elements must be dropped if
9778 /// there is such a number. Otherwise returns zero.
9779 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
9780 // Figure out whether we're looping over two inputs or just one.
9781 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9783 // The modulus for the shuffle vector entries is based on whether this is
9784 // a single input or not.
9785 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
9786 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
9787 "We should only be called with masks with a power-of-2 size!");
9789 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
9791 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
9792 // and 2^3 simultaneously. This is because we may have ambiguity with
9793 // partially undef inputs.
9794 bool ViableForN[3] = {true, true, true};
9796 for (int i = 0, e = Mask.size(); i < e; ++i) {
9797 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
9802 bool IsAnyViable = false;
9803 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9804 if (ViableForN[j]) {
9807 // The shuffle mask must be equal to (i * 2^N) % M.
9808 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
9811 ViableForN[j] = false;
9813 // Early exit if we exhaust the possible powers of two.
9818 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9822 // Return 0 as there is no viable power of two.
9826 /// \brief Generic lowering of v16i8 shuffles.
9828 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
9829 /// detect any complexity reducing interleaving. If that doesn't help, it uses
9830 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
9831 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
9833 static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9834 const X86Subtarget *Subtarget,
9835 SelectionDAG &DAG) {
9837 assert(Op.getSimpleValueType() == MVT::v16i8 && "Bad shuffle type!");
9838 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9839 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9840 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9841 ArrayRef<int> OrigMask = SVOp->getMask();
9842 assert(OrigMask.size() == 16 && "Unexpected mask size for v16 shuffle!");
9844 // Try to use shift instructions.
9846 lowerVectorShuffleAsShift(DL, MVT::v16i8, V1, V2, OrigMask, DAG))
9849 // Try to use byte rotation instructions.
9850 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9851 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9854 // Try to use a zext lowering.
9855 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9856 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9859 int MaskStorage[16] = {
9860 OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9861 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7],
9862 OrigMask[8], OrigMask[9], OrigMask[10], OrigMask[11],
9863 OrigMask[12], OrigMask[13], OrigMask[14], OrigMask[15]};
9864 MutableArrayRef<int> Mask(MaskStorage);
9865 MutableArrayRef<int> LoMask = Mask.slice(0, 8);
9866 MutableArrayRef<int> HiMask = Mask.slice(8, 8);
9869 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 16; });
9871 // For single-input shuffles, there are some nicer lowering tricks we can use.
9872 if (NumV2Elements == 0) {
9873 // Check for being able to broadcast a single element.
9874 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i8, DL, V1,
9875 Mask, Subtarget, DAG))
9878 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
9879 // Notably, this handles splat and partial-splat shuffles more efficiently.
9880 // However, it only makes sense if the pre-duplication shuffle simplifies
9881 // things significantly. Currently, this means we need to be able to
9882 // express the pre-duplication shuffle as an i16 shuffle.
9884 // FIXME: We should check for other patterns which can be widened into an
9885 // i16 shuffle as well.
9886 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
9887 for (int i = 0; i < 16; i += 2)
9888 if (Mask[i] != -1 && Mask[i + 1] != -1 && Mask[i] != Mask[i + 1])
9893 auto tryToWidenViaDuplication = [&]() -> SDValue {
9894 if (!canWidenViaDuplication(Mask))
9896 SmallVector<int, 4> LoInputs;
9897 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(LoInputs),
9898 [](int M) { return M >= 0 && M < 8; });
9899 std::sort(LoInputs.begin(), LoInputs.end());
9900 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
9902 SmallVector<int, 4> HiInputs;
9903 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(HiInputs),
9904 [](int M) { return M >= 8; });
9905 std::sort(HiInputs.begin(), HiInputs.end());
9906 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
9909 bool TargetLo = LoInputs.size() >= HiInputs.size();
9910 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
9911 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
9913 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9914 SmallDenseMap<int, int, 8> LaneMap;
9915 for (int I : InPlaceInputs) {
9916 PreDupI16Shuffle[I/2] = I/2;
9919 int j = TargetLo ? 0 : 4, je = j + 4;
9920 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
9921 // Check if j is already a shuffle of this input. This happens when
9922 // there are two adjacent bytes after we move the low one.
9923 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
9924 // If we haven't yet mapped the input, search for a slot into which
9926 while (j < je && PreDupI16Shuffle[j] != -1)
9930 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
9933 // Map this input with the i16 shuffle.
9934 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
9937 // Update the lane map based on the mapping we ended up with.
9938 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
9941 ISD::BITCAST, DL, MVT::v16i8,
9942 DAG.getVectorShuffle(MVT::v8i16, DL,
9943 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9944 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
9946 // Unpack the bytes to form the i16s that will be shuffled into place.
9947 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9948 MVT::v16i8, V1, V1);
9950 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9951 for (int i = 0; i < 16; ++i)
9952 if (Mask[i] != -1) {
9953 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
9954 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
9955 if (PostDupI16Shuffle[i / 2] == -1)
9956 PostDupI16Shuffle[i / 2] = MappedMask;
9958 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
9959 "Conflicting entrties in the original shuffle!");
9962 ISD::BITCAST, DL, MVT::v16i8,
9963 DAG.getVectorShuffle(MVT::v8i16, DL,
9964 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9965 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
9967 if (SDValue V = tryToWidenViaDuplication())
9971 // Check whether an interleaving lowering is likely to be more efficient.
9972 // This isn't perfect but it is a strong heuristic that tends to work well on
9973 // the kinds of shuffles that show up in practice.
9975 // FIXME: We need to handle other interleaving widths (i16, i32, ...).
9976 if (shouldLowerAsInterleaving(Mask)) {
9977 int NumLoHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9978 return (M >= 0 && M < 8) || (M >= 16 && M < 24);
9980 int NumHiHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9981 return (M >= 8 && M < 16) || M >= 24;
9983 int EMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9984 -1, -1, -1, -1, -1, -1, -1, -1};
9985 int OMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9986 -1, -1, -1, -1, -1, -1, -1, -1};
9987 bool UnpackLo = NumLoHalf >= NumHiHalf;
9988 MutableArrayRef<int> TargetEMask(UnpackLo ? EMask : EMask + 8, 8);
9989 MutableArrayRef<int> TargetOMask(UnpackLo ? OMask : OMask + 8, 8);
9990 for (int i = 0; i < 8; ++i) {
9991 TargetEMask[i] = Mask[2 * i];
9992 TargetOMask[i] = Mask[2 * i + 1];
9995 SDValue Evens = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, EMask);
9996 SDValue Odds = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, OMask);
9998 return DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9999 MVT::v16i8, Evens, Odds);
10002 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
10003 // with PSHUFB. It is important to do this before we attempt to generate any
10004 // blends but after all of the single-input lowerings. If the single input
10005 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
10006 // want to preserve that and we can DAG combine any longer sequences into
10007 // a PSHUFB in the end. But once we start blending from multiple inputs,
10008 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
10009 // and there are *very* few patterns that would actually be faster than the
10010 // PSHUFB approach because of its ability to zero lanes.
10012 // FIXME: The only exceptions to the above are blends which are exact
10013 // interleavings with direct instructions supporting them. We currently don't
10014 // handle those well here.
10015 if (Subtarget->hasSSSE3()) {
10016 SDValue V1Mask[16];
10017 SDValue V2Mask[16];
10018 bool V1InUse = false;
10019 bool V2InUse = false;
10020 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
10022 for (int i = 0; i < 16; ++i) {
10023 if (Mask[i] == -1) {
10024 V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
10026 const int ZeroMask = 0x80;
10027 int V1Idx = (Mask[i] < 16 ? Mask[i] : ZeroMask);
10028 int V2Idx = (Mask[i] < 16 ? ZeroMask : Mask[i] - 16);
10030 V1Idx = V2Idx = ZeroMask;
10031 V1Mask[i] = DAG.getConstant(V1Idx, MVT::i8);
10032 V2Mask[i] = DAG.getConstant(V2Idx, MVT::i8);
10033 V1InUse |= (ZeroMask != V1Idx);
10034 V2InUse |= (ZeroMask != V2Idx);
10038 // If both V1 and V2 are in use and we can use a direct blend, do so. This
10039 // avoids using blends to handle blends-with-zero which is important as
10040 // a single pshufb is significantly faster for that.
10041 if (V1InUse && V2InUse && Subtarget->hasSSE41())
10042 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i8, V1, V2, Mask,
10048 V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V1,
10049 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
10051 V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V2,
10052 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
10054 // If we need shuffled inputs from both, blend the two.
10055 if (V1InUse && V2InUse)
10056 return DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
10058 return V1; // Single inputs are easy.
10060 return V2; // Single inputs are easy.
10061 // Shuffling to a zeroable vector.
10062 return getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
10065 // There are special ways we can lower some single-element blends.
10066 if (NumV2Elements == 1)
10067 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v16i8, DL, V1, V2,
10068 Mask, Subtarget, DAG))
10071 if (SDValue BitBlend =
10072 lowerVectorShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
10075 // Check whether a compaction lowering can be done. This handles shuffles
10076 // which take every Nth element for some even N. See the helper function for
10079 // We special case these as they can be particularly efficiently handled with
10080 // the PACKUSB instruction on x86 and they show up in common patterns of
10081 // rearranging bytes to truncate wide elements.
10082 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask)) {
10083 // NumEvenDrops is the power of two stride of the elements. Another way of
10084 // thinking about it is that we need to drop the even elements this many
10085 // times to get the original input.
10086 bool IsSingleInput = isSingleInputShuffleMask(Mask);
10088 // First we need to zero all the dropped bytes.
10089 assert(NumEvenDrops <= 3 &&
10090 "No support for dropping even elements more than 3 times.");
10091 // We use the mask type to pick which bytes are preserved based on how many
10092 // elements are dropped.
10093 MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
10094 SDValue ByteClearMask =
10095 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8,
10096 DAG.getConstant(0xFF, MaskVTs[NumEvenDrops - 1]));
10097 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
10098 if (!IsSingleInput)
10099 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
10101 // Now pack things back together.
10102 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
10103 V2 = IsSingleInput ? V1 : DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
10104 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
10105 for (int i = 1; i < NumEvenDrops; ++i) {
10106 Result = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, Result);
10107 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
10113 int V1LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10114 int V1HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10115 int V2LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10116 int V2HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10118 auto buildBlendMasks = [](MutableArrayRef<int> HalfMask,
10119 MutableArrayRef<int> V1HalfBlendMask,
10120 MutableArrayRef<int> V2HalfBlendMask) {
10121 for (int i = 0; i < 8; ++i)
10122 if (HalfMask[i] >= 0 && HalfMask[i] < 16) {
10123 V1HalfBlendMask[i] = HalfMask[i];
10125 } else if (HalfMask[i] >= 16) {
10126 V2HalfBlendMask[i] = HalfMask[i] - 16;
10127 HalfMask[i] = i + 8;
10130 buildBlendMasks(LoMask, V1LoBlendMask, V2LoBlendMask);
10131 buildBlendMasks(HiMask, V1HiBlendMask, V2HiBlendMask);
10133 SDValue Zero = getZeroVector(MVT::v8i16, Subtarget, DAG, DL);
10135 auto buildLoAndHiV8s = [&](SDValue V, MutableArrayRef<int> LoBlendMask,
10136 MutableArrayRef<int> HiBlendMask) {
10138 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
10139 // them out and avoid using UNPCK{L,H} to extract the elements of V as
10141 if (std::none_of(LoBlendMask.begin(), LoBlendMask.end(),
10142 [](int M) { return M >= 0 && M % 2 == 1; }) &&
10143 std::none_of(HiBlendMask.begin(), HiBlendMask.end(),
10144 [](int M) { return M >= 0 && M % 2 == 1; })) {
10145 // Use a mask to drop the high bytes.
10146 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
10147 V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, V1,
10148 DAG.getConstant(0x00FF, MVT::v8i16));
10150 // This will be a single vector shuffle instead of a blend so nuke V2.
10151 V2 = DAG.getUNDEF(MVT::v8i16);
10153 // Squash the masks to point directly into V1.
10154 for (int &M : LoBlendMask)
10157 for (int &M : HiBlendMask)
10161 // Otherwise just unpack the low half of V into V1 and the high half into
10162 // V2 so that we can blend them as i16s.
10163 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
10164 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
10165 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
10166 DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
10169 SDValue BlendedLo = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
10170 SDValue BlendedHi = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
10171 return std::make_pair(BlendedLo, BlendedHi);
10173 SDValue V1Lo, V1Hi, V2Lo, V2Hi;
10174 std::tie(V1Lo, V1Hi) = buildLoAndHiV8s(V1, V1LoBlendMask, V1HiBlendMask);
10175 std::tie(V2Lo, V2Hi) = buildLoAndHiV8s(V2, V2LoBlendMask, V2HiBlendMask);
10177 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Lo, V2Lo, LoMask);
10178 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Hi, V2Hi, HiMask);
10180 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
10183 /// \brief Dispatching routine to lower various 128-bit x86 vector shuffles.
10185 /// This routine breaks down the specific type of 128-bit shuffle and
10186 /// dispatches to the lowering routines accordingly.
10187 static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10188 MVT VT, const X86Subtarget *Subtarget,
10189 SelectionDAG &DAG) {
10190 switch (VT.SimpleTy) {
10192 return lowerV2I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10194 return lowerV2F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10196 return lowerV4I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10198 return lowerV4F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10200 return lowerV8I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
10202 return lowerV16I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
10205 llvm_unreachable("Unimplemented!");
10209 /// \brief Helper function to test whether a shuffle mask could be
10210 /// simplified by widening the elements being shuffled.
10212 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
10213 /// leaves it in an unspecified state.
10215 /// NOTE: This must handle normal vector shuffle masks and *target* vector
10216 /// shuffle masks. The latter have the special property of a '-2' representing
10217 /// a zero-ed lane of a vector.
10218 static bool canWidenShuffleElements(ArrayRef<int> Mask,
10219 SmallVectorImpl<int> &WidenedMask) {
10220 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
10221 // If both elements are undef, its trivial.
10222 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] == SM_SentinelUndef) {
10223 WidenedMask.push_back(SM_SentinelUndef);
10227 // Check for an undef mask and a mask value properly aligned to fit with
10228 // a pair of values. If we find such a case, use the non-undef mask's value.
10229 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] >= 0 && Mask[i + 1] % 2 == 1) {
10230 WidenedMask.push_back(Mask[i + 1] / 2);
10233 if (Mask[i + 1] == SM_SentinelUndef && Mask[i] >= 0 && Mask[i] % 2 == 0) {
10234 WidenedMask.push_back(Mask[i] / 2);
10238 // When zeroing, we need to spread the zeroing across both lanes to widen.
10239 if (Mask[i] == SM_SentinelZero || Mask[i + 1] == SM_SentinelZero) {
10240 if ((Mask[i] == SM_SentinelZero || Mask[i] == SM_SentinelUndef) &&
10241 (Mask[i + 1] == SM_SentinelZero || Mask[i + 1] == SM_SentinelUndef)) {
10242 WidenedMask.push_back(SM_SentinelZero);
10248 // Finally check if the two mask values are adjacent and aligned with
10250 if (Mask[i] != SM_SentinelUndef && Mask[i] % 2 == 0 && Mask[i] + 1 == Mask[i + 1]) {
10251 WidenedMask.push_back(Mask[i] / 2);
10255 // Otherwise we can't safely widen the elements used in this shuffle.
10258 assert(WidenedMask.size() == Mask.size() / 2 &&
10259 "Incorrect size of mask after widening the elements!");
10264 /// \brief Generic routine to split vector shuffle into half-sized shuffles.
10266 /// This routine just extracts two subvectors, shuffles them independently, and
10267 /// then concatenates them back together. This should work effectively with all
10268 /// AVX vector shuffle types.
10269 static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10270 SDValue V2, ArrayRef<int> Mask,
10271 SelectionDAG &DAG) {
10272 assert(VT.getSizeInBits() >= 256 &&
10273 "Only for 256-bit or wider vector shuffles!");
10274 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
10275 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
10277 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
10278 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
10280 int NumElements = VT.getVectorNumElements();
10281 int SplitNumElements = NumElements / 2;
10282 MVT ScalarVT = VT.getScalarType();
10283 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
10285 // Rather than splitting build-vectors, just build two narrower build
10286 // vectors. This helps shuffling with splats and zeros.
10287 auto SplitVector = [&](SDValue V) {
10288 while (V.getOpcode() == ISD::BITCAST)
10289 V = V->getOperand(0);
10291 MVT OrigVT = V.getSimpleValueType();
10292 int OrigNumElements = OrigVT.getVectorNumElements();
10293 int OrigSplitNumElements = OrigNumElements / 2;
10294 MVT OrigScalarVT = OrigVT.getScalarType();
10295 MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
10299 auto *BV = dyn_cast<BuildVectorSDNode>(V);
10301 LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10302 DAG.getIntPtrConstant(0));
10303 HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10304 DAG.getIntPtrConstant(OrigSplitNumElements));
10307 SmallVector<SDValue, 16> LoOps, HiOps;
10308 for (int i = 0; i < OrigSplitNumElements; ++i) {
10309 LoOps.push_back(BV->getOperand(i));
10310 HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
10312 LoV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, LoOps);
10313 HiV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, HiOps);
10315 return std::make_pair(DAG.getNode(ISD::BITCAST, DL, SplitVT, LoV),
10316 DAG.getNode(ISD::BITCAST, DL, SplitVT, HiV));
10319 SDValue LoV1, HiV1, LoV2, HiV2;
10320 std::tie(LoV1, HiV1) = SplitVector(V1);
10321 std::tie(LoV2, HiV2) = SplitVector(V2);
10323 // Now create two 4-way blends of these half-width vectors.
10324 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
10325 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
10326 SmallVector<int, 32> V1BlendMask, V2BlendMask, BlendMask;
10327 for (int i = 0; i < SplitNumElements; ++i) {
10328 int M = HalfMask[i];
10329 if (M >= NumElements) {
10330 if (M >= NumElements + SplitNumElements)
10334 V2BlendMask.push_back(M - NumElements);
10335 V1BlendMask.push_back(-1);
10336 BlendMask.push_back(SplitNumElements + i);
10337 } else if (M >= 0) {
10338 if (M >= SplitNumElements)
10342 V2BlendMask.push_back(-1);
10343 V1BlendMask.push_back(M);
10344 BlendMask.push_back(i);
10346 V2BlendMask.push_back(-1);
10347 V1BlendMask.push_back(-1);
10348 BlendMask.push_back(-1);
10352 // Because the lowering happens after all combining takes place, we need to
10353 // manually combine these blend masks as much as possible so that we create
10354 // a minimal number of high-level vector shuffle nodes.
10356 // First try just blending the halves of V1 or V2.
10357 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
10358 return DAG.getUNDEF(SplitVT);
10359 if (!UseLoV2 && !UseHiV2)
10360 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10361 if (!UseLoV1 && !UseHiV1)
10362 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10364 SDValue V1Blend, V2Blend;
10365 if (UseLoV1 && UseHiV1) {
10367 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10369 // We only use half of V1 so map the usage down into the final blend mask.
10370 V1Blend = UseLoV1 ? LoV1 : HiV1;
10371 for (int i = 0; i < SplitNumElements; ++i)
10372 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
10373 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
10375 if (UseLoV2 && UseHiV2) {
10377 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10379 // We only use half of V2 so map the usage down into the final blend mask.
10380 V2Blend = UseLoV2 ? LoV2 : HiV2;
10381 for (int i = 0; i < SplitNumElements; ++i)
10382 if (BlendMask[i] >= SplitNumElements)
10383 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
10385 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
10387 SDValue Lo = HalfBlend(LoMask);
10388 SDValue Hi = HalfBlend(HiMask);
10389 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
10392 /// \brief Either split a vector in halves or decompose the shuffles and the
10395 /// This is provided as a good fallback for many lowerings of non-single-input
10396 /// shuffles with more than one 128-bit lane. In those cases, we want to select
10397 /// between splitting the shuffle into 128-bit components and stitching those
10398 /// back together vs. extracting the single-input shuffles and blending those
10400 static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
10401 SDValue V2, ArrayRef<int> Mask,
10402 SelectionDAG &DAG) {
10403 assert(!isSingleInputShuffleMask(Mask) && "This routine must not be used to "
10404 "lower single-input shuffles as it "
10405 "could then recurse on itself.");
10406 int Size = Mask.size();
10408 // If this can be modeled as a broadcast of two elements followed by a blend,
10409 // prefer that lowering. This is especially important because broadcasts can
10410 // often fold with memory operands.
10411 auto DoBothBroadcast = [&] {
10412 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
10415 if (V2BroadcastIdx == -1)
10416 V2BroadcastIdx = M - Size;
10417 else if (M - Size != V2BroadcastIdx)
10419 } else if (M >= 0) {
10420 if (V1BroadcastIdx == -1)
10421 V1BroadcastIdx = M;
10422 else if (M != V1BroadcastIdx)
10427 if (DoBothBroadcast())
10428 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
10431 // If the inputs all stem from a single 128-bit lane of each input, then we
10432 // split them rather than blending because the split will decompose to
10433 // unusually few instructions.
10434 int LaneCount = VT.getSizeInBits() / 128;
10435 int LaneSize = Size / LaneCount;
10436 SmallBitVector LaneInputs[2];
10437 LaneInputs[0].resize(LaneCount, false);
10438 LaneInputs[1].resize(LaneCount, false);
10439 for (int i = 0; i < Size; ++i)
10441 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
10442 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
10443 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10445 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
10446 // that the decomposed single-input shuffles don't end up here.
10447 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10450 /// \brief Lower a vector shuffle crossing multiple 128-bit lanes as
10451 /// a permutation and blend of those lanes.
10453 /// This essentially blends the out-of-lane inputs to each lane into the lane
10454 /// from a permuted copy of the vector. This lowering strategy results in four
10455 /// instructions in the worst case for a single-input cross lane shuffle which
10456 /// is lower than any other fully general cross-lane shuffle strategy I'm aware
10457 /// of. Special cases for each particular shuffle pattern should be handled
10458 /// prior to trying this lowering.
10459 static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT,
10460 SDValue V1, SDValue V2,
10461 ArrayRef<int> Mask,
10462 SelectionDAG &DAG) {
10463 // FIXME: This should probably be generalized for 512-bit vectors as well.
10464 assert(VT.getSizeInBits() == 256 && "Only for 256-bit vector shuffles!");
10465 int LaneSize = Mask.size() / 2;
10467 // If there are only inputs from one 128-bit lane, splitting will in fact be
10468 // less expensive. The flags track wether the given lane contains an element
10469 // that crosses to another lane.
10470 bool LaneCrossing[2] = {false, false};
10471 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10472 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10473 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
10474 if (!LaneCrossing[0] || !LaneCrossing[1])
10475 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10477 if (isSingleInputShuffleMask(Mask)) {
10478 SmallVector<int, 32> FlippedBlendMask;
10479 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10480 FlippedBlendMask.push_back(
10481 Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
10483 : Mask[i] % LaneSize +
10484 (i / LaneSize) * LaneSize + Size));
10486 // Flip the vector, and blend the results which should now be in-lane. The
10487 // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
10488 // 5 for the high source. The value 3 selects the high half of source 2 and
10489 // the value 2 selects the low half of source 2. We only use source 2 to
10490 // allow folding it into a memory operand.
10491 unsigned PERMMask = 3 | 2 << 4;
10492 SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
10493 V1, DAG.getConstant(PERMMask, MVT::i8));
10494 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
10497 // This now reduces to two single-input shuffles of V1 and V2 which at worst
10498 // will be handled by the above logic and a blend of the results, much like
10499 // other patterns in AVX.
10500 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10503 /// \brief Handle lowering 2-lane 128-bit shuffles.
10504 static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10505 SDValue V2, ArrayRef<int> Mask,
10506 const X86Subtarget *Subtarget,
10507 SelectionDAG &DAG) {
10508 // Blends are faster and handle all the non-lane-crossing cases.
10509 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask,
10513 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
10514 VT.getVectorNumElements() / 2);
10515 // Check for patterns which can be matched with a single insert of a 128-bit
10517 if (isShuffleEquivalent(V1, V2, Mask, 0, 1, 0, 1) ||
10518 isShuffleEquivalent(V1, V2, Mask, 0, 1, 4, 5)) {
10519 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10520 DAG.getIntPtrConstant(0));
10521 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
10522 Mask[2] < 4 ? V1 : V2, DAG.getIntPtrConstant(0));
10523 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10525 if (isShuffleEquivalent(V1, V2, Mask, 0, 1, 6, 7)) {
10526 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10527 DAG.getIntPtrConstant(0));
10528 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
10529 DAG.getIntPtrConstant(2));
10530 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10533 // Otherwise form a 128-bit permutation.
10534 // FIXME: Detect zero-vector inputs and use the VPERM2X128 to zero that half.
10535 unsigned PermMask = Mask[0] / 2 | (Mask[2] / 2) << 4;
10536 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
10537 DAG.getConstant(PermMask, MVT::i8));
10540 /// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
10541 /// shuffling each lane.
10543 /// This will only succeed when the result of fixing the 128-bit lanes results
10544 /// in a single-input non-lane-crossing shuffle with a repeating shuffle mask in
10545 /// each 128-bit lanes. This handles many cases where we can quickly blend away
10546 /// the lane crosses early and then use simpler shuffles within each lane.
10548 /// FIXME: It might be worthwhile at some point to support this without
10549 /// requiring the 128-bit lane-relative shuffles to be repeating, but currently
10550 /// in x86 only floating point has interesting non-repeating shuffles, and even
10551 /// those are still *marginally* more expensive.
10552 static SDValue lowerVectorShuffleByMerging128BitLanes(
10553 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
10554 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
10555 assert(!isSingleInputShuffleMask(Mask) &&
10556 "This is only useful with multiple inputs.");
10558 int Size = Mask.size();
10559 int LaneSize = 128 / VT.getScalarSizeInBits();
10560 int NumLanes = Size / LaneSize;
10561 assert(NumLanes > 1 && "Only handles 256-bit and wider shuffles.");
10563 // See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
10564 // check whether the in-128-bit lane shuffles share a repeating pattern.
10565 SmallVector<int, 4> Lanes;
10566 Lanes.resize(NumLanes, -1);
10567 SmallVector<int, 4> InLaneMask;
10568 InLaneMask.resize(LaneSize, -1);
10569 for (int i = 0; i < Size; ++i) {
10573 int j = i / LaneSize;
10575 if (Lanes[j] < 0) {
10576 // First entry we've seen for this lane.
10577 Lanes[j] = Mask[i] / LaneSize;
10578 } else if (Lanes[j] != Mask[i] / LaneSize) {
10579 // This doesn't match the lane selected previously!
10583 // Check that within each lane we have a consistent shuffle mask.
10584 int k = i % LaneSize;
10585 if (InLaneMask[k] < 0) {
10586 InLaneMask[k] = Mask[i] % LaneSize;
10587 } else if (InLaneMask[k] != Mask[i] % LaneSize) {
10588 // This doesn't fit a repeating in-lane mask.
10593 // First shuffle the lanes into place.
10594 MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
10595 VT.getSizeInBits() / 64);
10596 SmallVector<int, 8> LaneMask;
10597 LaneMask.resize(NumLanes * 2, -1);
10598 for (int i = 0; i < NumLanes; ++i)
10599 if (Lanes[i] >= 0) {
10600 LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
10601 LaneMask[2 * i + 1] = 2*Lanes[i] + 1;
10604 V1 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V1);
10605 V2 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V2);
10606 SDValue LaneShuffle = DAG.getVectorShuffle(LaneVT, DL, V1, V2, LaneMask);
10608 // Cast it back to the type we actually want.
10609 LaneShuffle = DAG.getNode(ISD::BITCAST, DL, VT, LaneShuffle);
10611 // Now do a simple shuffle that isn't lane crossing.
10612 SmallVector<int, 8> NewMask;
10613 NewMask.resize(Size, -1);
10614 for (int i = 0; i < Size; ++i)
10616 NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
10617 assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) &&
10618 "Must not introduce lane crosses at this point!");
10620 return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask);
10623 /// \brief Test whether the specified input (0 or 1) is in-place blended by the
10626 /// This returns true if the elements from a particular input are already in the
10627 /// slot required by the given mask and require no permutation.
10628 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
10629 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
10630 int Size = Mask.size();
10631 for (int i = 0; i < Size; ++i)
10632 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
10638 /// \brief Handle lowering of 4-lane 64-bit floating point shuffles.
10640 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
10641 /// isn't available.
10642 static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10643 const X86Subtarget *Subtarget,
10644 SelectionDAG &DAG) {
10646 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10647 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10648 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10649 ArrayRef<int> Mask = SVOp->getMask();
10650 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10652 SmallVector<int, 4> WidenedMask;
10653 if (canWidenShuffleElements(Mask, WidenedMask))
10654 return lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask, Subtarget,
10657 if (isSingleInputShuffleMask(Mask)) {
10658 // Check for being able to broadcast a single element.
10659 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f64, DL, V1,
10660 Mask, Subtarget, DAG))
10663 // Use low duplicate instructions for masks that match their pattern.
10664 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2))
10665 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
10667 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
10668 // Non-half-crossing single input shuffles can be lowerid with an
10669 // interleaved permutation.
10670 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
10671 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
10672 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
10673 DAG.getConstant(VPERMILPMask, MVT::i8));
10676 // With AVX2 we have direct support for this permutation.
10677 if (Subtarget->hasAVX2())
10678 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
10679 getV4X86ShuffleImm8ForMask(Mask, DAG));
10681 // Otherwise, fall back.
10682 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
10686 // X86 has dedicated unpack instructions that can handle specific blend
10687 // operations: UNPCKH and UNPCKL.
10688 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 2, 6))
10689 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V1, V2);
10690 if (isShuffleEquivalent(V1, V2, Mask, 1, 5, 3, 7))
10691 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V1, V2);
10692 if (isShuffleEquivalent(V1, V2, Mask, 4, 0, 6, 2))
10693 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V2, V1);
10694 if (isShuffleEquivalent(V1, V2, Mask, 5, 1, 7, 3))
10695 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V2, V1);
10697 // If we have a single input to the zero element, insert that into V1 if we
10698 // can do so cheaply.
10699 int NumV2Elements =
10700 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
10701 if (NumV2Elements == 1 && Mask[0] >= 4)
10702 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
10703 MVT::v4f64, DL, V1, V2, Mask, Subtarget, DAG))
10706 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
10710 // Check if the blend happens to exactly fit that of SHUFPD.
10711 if ((Mask[0] == -1 || Mask[0] < 2) &&
10712 (Mask[1] == -1 || (Mask[1] >= 4 && Mask[1] < 6)) &&
10713 (Mask[2] == -1 || (Mask[2] >= 2 && Mask[2] < 4)) &&
10714 (Mask[3] == -1 || Mask[3] >= 6)) {
10715 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 5) << 1) |
10716 ((Mask[2] == 3) << 2) | ((Mask[3] == 7) << 3);
10717 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V1, V2,
10718 DAG.getConstant(SHUFPDMask, MVT::i8));
10720 if ((Mask[0] == -1 || (Mask[0] >= 4 && Mask[0] < 6)) &&
10721 (Mask[1] == -1 || Mask[1] < 2) &&
10722 (Mask[2] == -1 || Mask[2] >= 6) &&
10723 (Mask[3] == -1 || (Mask[3] >= 2 && Mask[3] < 4))) {
10724 unsigned SHUFPDMask = (Mask[0] == 5) | ((Mask[1] == 1) << 1) |
10725 ((Mask[2] == 7) << 2) | ((Mask[3] == 3) << 3);
10726 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V2, V1,
10727 DAG.getConstant(SHUFPDMask, MVT::i8));
10730 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10731 // shuffle. However, if we have AVX2 and either inputs are already in place,
10732 // we will be able to shuffle even across lanes the other input in a single
10733 // instruction so skip this pattern.
10734 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10735 isShuffleMaskInputInPlace(1, Mask))))
10736 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10737 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
10740 // If we have AVX2 then we always want to lower with a blend because an v4 we
10741 // can fully permute the elements.
10742 if (Subtarget->hasAVX2())
10743 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
10746 // Otherwise fall back on generic lowering.
10747 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, DAG);
10750 /// \brief Handle lowering of 4-lane 64-bit integer shuffles.
10752 /// This routine is only called when we have AVX2 and thus a reasonable
10753 /// instruction set for v4i64 shuffling..
10754 static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10755 const X86Subtarget *Subtarget,
10756 SelectionDAG &DAG) {
10758 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10759 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10760 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10761 ArrayRef<int> Mask = SVOp->getMask();
10762 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10763 assert(Subtarget->hasAVX2() && "We can only lower v4i64 with AVX2!");
10765 SmallVector<int, 4> WidenedMask;
10766 if (canWidenShuffleElements(Mask, WidenedMask))
10767 return lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask, Subtarget,
10770 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
10774 // Check for being able to broadcast a single element.
10775 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i64, DL, V1,
10776 Mask, Subtarget, DAG))
10779 // When the shuffle is mirrored between the 128-bit lanes of the unit, we can
10780 // use lower latency instructions that will operate on both 128-bit lanes.
10781 SmallVector<int, 2> RepeatedMask;
10782 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
10783 if (isSingleInputShuffleMask(Mask)) {
10784 int PSHUFDMask[] = {-1, -1, -1, -1};
10785 for (int i = 0; i < 2; ++i)
10786 if (RepeatedMask[i] >= 0) {
10787 PSHUFDMask[2 * i] = 2 * RepeatedMask[i];
10788 PSHUFDMask[2 * i + 1] = 2 * RepeatedMask[i] + 1;
10790 return DAG.getNode(
10791 ISD::BITCAST, DL, MVT::v4i64,
10792 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
10793 DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, V1),
10794 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
10798 // AVX2 provides a direct instruction for permuting a single input across
10800 if (isSingleInputShuffleMask(Mask))
10801 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
10802 getV4X86ShuffleImm8ForMask(Mask, DAG));
10804 // Try to use shift instructions.
10805 if (SDValue Shift =
10806 lowerVectorShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask, DAG))
10809 // Use dedicated unpack instructions for masks that match their pattern.
10810 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 2, 6))
10811 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V1, V2);
10812 if (isShuffleEquivalent(V1, V2, Mask, 1, 5, 3, 7))
10813 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V1, V2);
10814 if (isShuffleEquivalent(V1, V2, Mask, 4, 0, 6, 2))
10815 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V2, V1);
10816 if (isShuffleEquivalent(V1, V2, Mask, 5, 1, 7, 3))
10817 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V2, V1);
10819 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10820 // shuffle. However, if we have AVX2 and either inputs are already in place,
10821 // we will be able to shuffle even across lanes the other input in a single
10822 // instruction so skip this pattern.
10823 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10824 isShuffleMaskInputInPlace(1, Mask))))
10825 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10826 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
10829 // Otherwise fall back on generic blend lowering.
10830 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
10834 /// \brief Handle lowering of 8-lane 32-bit floating point shuffles.
10836 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
10837 /// isn't available.
10838 static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10839 const X86Subtarget *Subtarget,
10840 SelectionDAG &DAG) {
10842 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10843 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10844 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10845 ArrayRef<int> Mask = SVOp->getMask();
10846 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10848 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
10852 // Check for being able to broadcast a single element.
10853 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8f32, DL, V1,
10854 Mask, Subtarget, DAG))
10857 // If the shuffle mask is repeated in each 128-bit lane, we have many more
10858 // options to efficiently lower the shuffle.
10859 SmallVector<int, 4> RepeatedMask;
10860 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
10861 assert(RepeatedMask.size() == 4 &&
10862 "Repeated masks must be half the mask width!");
10864 // Use even/odd duplicate instructions for masks that match their pattern.
10865 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2, 4, 4, 6, 6))
10866 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
10867 if (isShuffleEquivalent(V1, V2, Mask, 1, 1, 3, 3, 5, 5, 7, 7))
10868 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
10870 if (isSingleInputShuffleMask(Mask))
10871 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
10872 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10874 // Use dedicated unpack instructions for masks that match their pattern.
10875 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10876 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V1, V2);
10877 if (isShuffleEquivalent(V1, V2, Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10878 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V1, V2);
10879 if (isShuffleEquivalent(V1, V2, Mask, 8, 0, 9, 1, 12, 4, 13, 5))
10880 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V2, V1);
10881 if (isShuffleEquivalent(V1, V2, Mask, 10, 2, 11, 3, 14, 6, 15, 7))
10882 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V2, V1);
10884 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
10885 // have already handled any direct blends. We also need to squash the
10886 // repeated mask into a simulated v4f32 mask.
10887 for (int i = 0; i < 4; ++i)
10888 if (RepeatedMask[i] >= 8)
10889 RepeatedMask[i] -= 4;
10890 return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
10893 // If we have a single input shuffle with different shuffle patterns in the
10894 // two 128-bit lanes use the variable mask to VPERMILPS.
10895 if (isSingleInputShuffleMask(Mask)) {
10896 SDValue VPermMask[8];
10897 for (int i = 0; i < 8; ++i)
10898 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10899 : DAG.getConstant(Mask[i], MVT::i32);
10900 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
10901 return DAG.getNode(
10902 X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
10903 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask));
10905 if (Subtarget->hasAVX2())
10906 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32,
10907 DAG.getNode(ISD::BITCAST, DL, MVT::v8f32,
10908 DAG.getNode(ISD::BUILD_VECTOR, DL,
10909 MVT::v8i32, VPermMask)),
10912 // Otherwise, fall back.
10913 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
10917 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10919 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10920 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
10923 // If we have AVX2 then we always want to lower with a blend because at v8 we
10924 // can fully permute the elements.
10925 if (Subtarget->hasAVX2())
10926 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
10929 // Otherwise fall back on generic lowering.
10930 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, DAG);
10933 /// \brief Handle lowering of 8-lane 32-bit integer shuffles.
10935 /// This routine is only called when we have AVX2 and thus a reasonable
10936 /// instruction set for v8i32 shuffling..
10937 static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10938 const X86Subtarget *Subtarget,
10939 SelectionDAG &DAG) {
10941 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10942 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10943 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10944 ArrayRef<int> Mask = SVOp->getMask();
10945 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10946 assert(Subtarget->hasAVX2() && "We can only lower v8i32 with AVX2!");
10948 // Whenever we can lower this as a zext, that instruction is strictly faster
10949 // than any alternative. It also allows us to fold memory operands into the
10950 // shuffle in many cases.
10951 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2,
10952 Mask, Subtarget, DAG))
10955 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
10959 // Check for being able to broadcast a single element.
10960 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i32, DL, V1,
10961 Mask, Subtarget, DAG))
10964 // If the shuffle mask is repeated in each 128-bit lane we can use more
10965 // efficient instructions that mirror the shuffles across the two 128-bit
10967 SmallVector<int, 4> RepeatedMask;
10968 if (is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask)) {
10969 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
10970 if (isSingleInputShuffleMask(Mask))
10971 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
10972 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10974 // Use dedicated unpack instructions for masks that match their pattern.
10975 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10976 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V1, V2);
10977 if (isShuffleEquivalent(V1, V2, Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10978 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V1, V2);
10979 if (isShuffleEquivalent(V1, V2, Mask, 8, 0, 9, 1, 12, 4, 13, 5))
10980 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V2, V1);
10981 if (isShuffleEquivalent(V1, V2, Mask, 10, 2, 11, 3, 14, 6, 15, 7))
10982 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V2, V1);
10985 // Try to use shift instructions.
10986 if (SDValue Shift =
10987 lowerVectorShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask, DAG))
10990 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
10991 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10994 // If the shuffle patterns aren't repeated but it is a single input, directly
10995 // generate a cross-lane VPERMD instruction.
10996 if (isSingleInputShuffleMask(Mask)) {
10997 SDValue VPermMask[8];
10998 for (int i = 0; i < 8; ++i)
10999 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
11000 : DAG.getConstant(Mask[i], MVT::i32);
11001 return DAG.getNode(
11002 X86ISD::VPERMV, DL, MVT::v8i32,
11003 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
11006 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11008 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11009 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
11012 // Otherwise fall back on generic blend lowering.
11013 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
11017 /// \brief Handle lowering of 16-lane 16-bit integer shuffles.
11019 /// This routine is only called when we have AVX2 and thus a reasonable
11020 /// instruction set for v16i16 shuffling..
11021 static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11022 const X86Subtarget *Subtarget,
11023 SelectionDAG &DAG) {
11025 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
11026 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
11027 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11028 ArrayRef<int> Mask = SVOp->getMask();
11029 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11030 assert(Subtarget->hasAVX2() && "We can only lower v16i16 with AVX2!");
11032 // Whenever we can lower this as a zext, that instruction is strictly faster
11033 // than any alternative. It also allows us to fold memory operands into the
11034 // shuffle in many cases.
11035 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v16i16, V1, V2,
11036 Mask, Subtarget, DAG))
11039 // Check for being able to broadcast a single element.
11040 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i16, DL, V1,
11041 Mask, Subtarget, DAG))
11044 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
11048 // Use dedicated unpack instructions for masks that match their pattern.
11049 if (isShuffleEquivalent(V1, V2, Mask,
11050 // First 128-bit lane:
11051 0, 16, 1, 17, 2, 18, 3, 19,
11052 // Second 128-bit lane:
11053 8, 24, 9, 25, 10, 26, 11, 27))
11054 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i16, V1, V2);
11055 if (isShuffleEquivalent(V1, V2, Mask,
11056 // First 128-bit lane:
11057 4, 20, 5, 21, 6, 22, 7, 23,
11058 // Second 128-bit lane:
11059 12, 28, 13, 29, 14, 30, 15, 31))
11060 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i16, V1, V2);
11062 // Try to use shift instructions.
11063 if (SDValue Shift =
11064 lowerVectorShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask, DAG))
11067 // Try to use byte rotation instructions.
11068 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
11069 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
11072 if (isSingleInputShuffleMask(Mask)) {
11073 // There are no generalized cross-lane shuffle operations available on i16
11075 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
11076 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
11079 SDValue PSHUFBMask[32];
11080 for (int i = 0; i < 16; ++i) {
11081 if (Mask[i] == -1) {
11082 PSHUFBMask[2 * i] = PSHUFBMask[2 * i + 1] = DAG.getUNDEF(MVT::i8);
11086 int M = i < 8 ? Mask[i] : Mask[i] - 8;
11087 assert(M >= 0 && M < 8 && "Invalid single-input mask!");
11088 PSHUFBMask[2 * i] = DAG.getConstant(2 * M, MVT::i8);
11089 PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, MVT::i8);
11091 return DAG.getNode(
11092 ISD::BITCAST, DL, MVT::v16i16,
11094 X86ISD::PSHUFB, DL, MVT::v32i8,
11095 DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1),
11096 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask)));
11099 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11101 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11102 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
11105 // Otherwise fall back on generic lowering.
11106 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG);
11109 /// \brief Handle lowering of 32-lane 8-bit integer shuffles.
11111 /// This routine is only called when we have AVX2 and thus a reasonable
11112 /// instruction set for v32i8 shuffling..
11113 static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11114 const X86Subtarget *Subtarget,
11115 SelectionDAG &DAG) {
11117 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
11118 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
11119 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11120 ArrayRef<int> Mask = SVOp->getMask();
11121 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
11122 assert(Subtarget->hasAVX2() && "We can only lower v32i8 with AVX2!");
11124 // Whenever we can lower this as a zext, that instruction is strictly faster
11125 // than any alternative. It also allows us to fold memory operands into the
11126 // shuffle in many cases.
11127 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2,
11128 Mask, Subtarget, DAG))
11131 // Check for being able to broadcast a single element.
11132 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v32i8, DL, V1,
11133 Mask, Subtarget, DAG))
11136 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
11140 // Use dedicated unpack instructions for masks that match their pattern.
11141 // Note that these are repeated 128-bit lane unpacks, not unpacks across all
11143 if (isShuffleEquivalent(
11145 // First 128-bit lane:
11146 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39,
11147 // Second 128-bit lane:
11148 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55))
11149 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v32i8, V1, V2);
11150 if (isShuffleEquivalent(
11152 // First 128-bit lane:
11153 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47,
11154 // Second 128-bit lane:
11155 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63))
11156 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v32i8, V1, V2);
11158 // Try to use shift instructions.
11159 if (SDValue Shift =
11160 lowerVectorShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask, DAG))
11163 // Try to use byte rotation instructions.
11164 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
11165 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
11168 if (isSingleInputShuffleMask(Mask)) {
11169 // There are no generalized cross-lane shuffle operations available on i8
11171 if (is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
11172 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2,
11175 SDValue PSHUFBMask[32];
11176 for (int i = 0; i < 32; ++i)
11179 ? DAG.getUNDEF(MVT::i8)
11180 : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, MVT::i8);
11182 return DAG.getNode(
11183 X86ISD::PSHUFB, DL, MVT::v32i8, V1,
11184 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask));
11187 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11189 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11190 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
11193 // Otherwise fall back on generic lowering.
11194 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG);
11197 /// \brief High-level routine to lower various 256-bit x86 vector shuffles.
11199 /// This routine either breaks down the specific type of a 256-bit x86 vector
11200 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
11201 /// together based on the available instructions.
11202 static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11203 MVT VT, const X86Subtarget *Subtarget,
11204 SelectionDAG &DAG) {
11206 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11207 ArrayRef<int> Mask = SVOp->getMask();
11209 // There is a really nice hard cut-over between AVX1 and AVX2 that means we can
11210 // check for those subtargets here and avoid much of the subtarget querying in
11211 // the per-vector-type lowering routines. With AVX1 we have essentially *zero*
11212 // ability to manipulate a 256-bit vector with integer types. Since we'll use
11213 // floating point types there eventually, just immediately cast everything to
11214 // a float and operate entirely in that domain.
11215 if (VT.isInteger() && !Subtarget->hasAVX2()) {
11216 int ElementBits = VT.getScalarSizeInBits();
11217 if (ElementBits < 32)
11218 // No floating point type available, decompose into 128-bit vectors.
11219 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11221 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
11222 VT.getVectorNumElements());
11223 V1 = DAG.getNode(ISD::BITCAST, DL, FpVT, V1);
11224 V2 = DAG.getNode(ISD::BITCAST, DL, FpVT, V2);
11225 return DAG.getNode(ISD::BITCAST, DL, VT,
11226 DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
11229 switch (VT.SimpleTy) {
11231 return lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11233 return lowerV4I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11235 return lowerV8F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11237 return lowerV8I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11239 return lowerV16I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11241 return lowerV32I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11244 llvm_unreachable("Not a valid 256-bit x86 vector type!");
11248 /// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
11249 static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11250 const X86Subtarget *Subtarget,
11251 SelectionDAG &DAG) {
11253 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11254 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11255 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11256 ArrayRef<int> Mask = SVOp->getMask();
11257 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11259 // X86 has dedicated unpack instructions that can handle specific blend
11260 // operations: UNPCKH and UNPCKL.
11261 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 2, 10, 4, 12, 6, 14))
11262 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f64, V1, V2);
11263 if (isShuffleEquivalent(V1, V2, Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11264 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f64, V1, V2);
11266 // FIXME: Implement direct support for this type!
11267 return splitAndLowerVectorShuffle(DL, MVT::v8f64, V1, V2, Mask, DAG);
11270 /// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
11271 static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11272 const X86Subtarget *Subtarget,
11273 SelectionDAG &DAG) {
11275 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11276 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11277 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11278 ArrayRef<int> Mask = SVOp->getMask();
11279 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11281 // Use dedicated unpack instructions for masks that match their pattern.
11282 if (isShuffleEquivalent(V1, V2, Mask,
11283 0, 16, 1, 17, 4, 20, 5, 21,
11284 8, 24, 9, 25, 12, 28, 13, 29))
11285 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16f32, V1, V2);
11286 if (isShuffleEquivalent(V1, V2, Mask,
11287 2, 18, 3, 19, 6, 22, 7, 23,
11288 10, 26, 11, 27, 14, 30, 15, 31))
11289 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16f32, V1, V2);
11291 // FIXME: Implement direct support for this type!
11292 return splitAndLowerVectorShuffle(DL, MVT::v16f32, V1, V2, Mask, DAG);
11295 /// \brief Handle lowering of 8-lane 64-bit integer shuffles.
11296 static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11297 const X86Subtarget *Subtarget,
11298 SelectionDAG &DAG) {
11300 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11301 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11302 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11303 ArrayRef<int> Mask = SVOp->getMask();
11304 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11306 // X86 has dedicated unpack instructions that can handle specific blend
11307 // operations: UNPCKH and UNPCKL.
11308 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 2, 10, 4, 12, 6, 14))
11309 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i64, V1, V2);
11310 if (isShuffleEquivalent(V1, V2, Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11311 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i64, V1, V2);
11313 // FIXME: Implement direct support for this type!
11314 return splitAndLowerVectorShuffle(DL, MVT::v8i64, V1, V2, Mask, DAG);
11317 /// \brief Handle lowering of 16-lane 32-bit integer shuffles.
11318 static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11319 const X86Subtarget *Subtarget,
11320 SelectionDAG &DAG) {
11322 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11323 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11324 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11325 ArrayRef<int> Mask = SVOp->getMask();
11326 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11328 // Use dedicated unpack instructions for masks that match their pattern.
11329 if (isShuffleEquivalent(V1, V2, Mask,
11330 0, 16, 1, 17, 4, 20, 5, 21,
11331 8, 24, 9, 25, 12, 28, 13, 29))
11332 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i32, V1, V2);
11333 if (isShuffleEquivalent(V1, V2, Mask,
11334 2, 18, 3, 19, 6, 22, 7, 23,
11335 10, 26, 11, 27, 14, 30, 15, 31))
11336 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i32, V1, V2);
11338 // FIXME: Implement direct support for this type!
11339 return splitAndLowerVectorShuffle(DL, MVT::v16i32, V1, V2, Mask, DAG);
11342 /// \brief Handle lowering of 32-lane 16-bit integer shuffles.
11343 static SDValue lowerV32I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11344 const X86Subtarget *Subtarget,
11345 SelectionDAG &DAG) {
11347 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11348 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11349 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11350 ArrayRef<int> Mask = SVOp->getMask();
11351 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
11352 assert(Subtarget->hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
11354 // FIXME: Implement direct support for this type!
11355 return splitAndLowerVectorShuffle(DL, MVT::v32i16, V1, V2, Mask, DAG);
11358 /// \brief Handle lowering of 64-lane 8-bit integer shuffles.
11359 static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11360 const X86Subtarget *Subtarget,
11361 SelectionDAG &DAG) {
11363 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11364 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11365 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11366 ArrayRef<int> Mask = SVOp->getMask();
11367 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
11368 assert(Subtarget->hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
11370 // FIXME: Implement direct support for this type!
11371 return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
11374 /// \brief High-level routine to lower various 512-bit x86 vector shuffles.
11376 /// This routine either breaks down the specific type of a 512-bit x86 vector
11377 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
11378 /// together based on the available instructions.
11379 static SDValue lower512BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11380 MVT VT, const X86Subtarget *Subtarget,
11381 SelectionDAG &DAG) {
11383 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11384 ArrayRef<int> Mask = SVOp->getMask();
11385 assert(Subtarget->hasAVX512() &&
11386 "Cannot lower 512-bit vectors w/ basic ISA!");
11388 // Check for being able to broadcast a single element.
11389 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(VT.SimpleTy, DL, V1,
11390 Mask, Subtarget, DAG))
11393 // Dispatch to each element type for lowering. If we don't have supprot for
11394 // specific element type shuffles at 512 bits, immediately split them and
11395 // lower them. Each lowering routine of a given type is allowed to assume that
11396 // the requisite ISA extensions for that element type are available.
11397 switch (VT.SimpleTy) {
11399 return lowerV8F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11401 return lowerV16F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11403 return lowerV8I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11405 return lowerV16I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11407 if (Subtarget->hasBWI())
11408 return lowerV32I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11411 if (Subtarget->hasBWI())
11412 return lowerV64I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11416 llvm_unreachable("Not a valid 512-bit x86 vector type!");
11419 // Otherwise fall back on splitting.
11420 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11423 /// \brief Top-level lowering for x86 vector shuffles.
11425 /// This handles decomposition, canonicalization, and lowering of all x86
11426 /// vector shuffles. Most of the specific lowering strategies are encapsulated
11427 /// above in helper routines. The canonicalization attempts to widen shuffles
11428 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
11429 /// s.t. only one of the two inputs needs to be tested, etc.
11430 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
11431 SelectionDAG &DAG) {
11432 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11433 ArrayRef<int> Mask = SVOp->getMask();
11434 SDValue V1 = Op.getOperand(0);
11435 SDValue V2 = Op.getOperand(1);
11436 MVT VT = Op.getSimpleValueType();
11437 int NumElements = VT.getVectorNumElements();
11440 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
11442 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
11443 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11444 if (V1IsUndef && V2IsUndef)
11445 return DAG.getUNDEF(VT);
11447 // When we create a shuffle node we put the UNDEF node to second operand,
11448 // but in some cases the first operand may be transformed to UNDEF.
11449 // In this case we should just commute the node.
11451 return DAG.getCommutedVectorShuffle(*SVOp);
11453 // Check for non-undef masks pointing at an undef vector and make the masks
11454 // undef as well. This makes it easier to match the shuffle based solely on
11458 if (M >= NumElements) {
11459 SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
11460 for (int &M : NewMask)
11461 if (M >= NumElements)
11463 return DAG.getVectorShuffle(VT, dl, V1, V2, NewMask);
11466 // We actually see shuffles that are entirely re-arrangements of a set of
11467 // zero inputs. This mostly happens while decomposing complex shuffles into
11468 // simple ones. Directly lower these as a buildvector of zeros.
11469 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
11470 if (Zeroable.all())
11471 return getZeroVector(VT, Subtarget, DAG, dl);
11473 // Try to collapse shuffles into using a vector type with fewer elements but
11474 // wider element types. We cap this to not form integers or floating point
11475 // elements wider than 64 bits, but it might be interesting to form i128
11476 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
11477 SmallVector<int, 16> WidenedMask;
11478 if (VT.getScalarSizeInBits() < 64 &&
11479 canWidenShuffleElements(Mask, WidenedMask)) {
11480 MVT NewEltVT = VT.isFloatingPoint()
11481 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
11482 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
11483 MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
11484 // Make sure that the new vector type is legal. For example, v2f64 isn't
11486 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
11487 V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
11488 V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
11489 return DAG.getNode(ISD::BITCAST, dl, VT,
11490 DAG.getVectorShuffle(NewVT, dl, V1, V2, WidenedMask));
11494 int NumV1Elements = 0, NumUndefElements = 0, NumV2Elements = 0;
11495 for (int M : SVOp->getMask())
11497 ++NumUndefElements;
11498 else if (M < NumElements)
11503 // Commute the shuffle as needed such that more elements come from V1 than
11504 // V2. This allows us to match the shuffle pattern strictly on how many
11505 // elements come from V1 without handling the symmetric cases.
11506 if (NumV2Elements > NumV1Elements)
11507 return DAG.getCommutedVectorShuffle(*SVOp);
11509 // When the number of V1 and V2 elements are the same, try to minimize the
11510 // number of uses of V2 in the low half of the vector. When that is tied,
11511 // ensure that the sum of indices for V1 is equal to or lower than the sum
11512 // indices for V2. When those are equal, try to ensure that the number of odd
11513 // indices for V1 is lower than the number of odd indices for V2.
11514 if (NumV1Elements == NumV2Elements) {
11515 int LowV1Elements = 0, LowV2Elements = 0;
11516 for (int M : SVOp->getMask().slice(0, NumElements / 2))
11517 if (M >= NumElements)
11521 if (LowV2Elements > LowV1Elements) {
11522 return DAG.getCommutedVectorShuffle(*SVOp);
11523 } else if (LowV2Elements == LowV1Elements) {
11524 int SumV1Indices = 0, SumV2Indices = 0;
11525 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11526 if (SVOp->getMask()[i] >= NumElements)
11528 else if (SVOp->getMask()[i] >= 0)
11530 if (SumV2Indices < SumV1Indices) {
11531 return DAG.getCommutedVectorShuffle(*SVOp);
11532 } else if (SumV2Indices == SumV1Indices) {
11533 int NumV1OddIndices = 0, NumV2OddIndices = 0;
11534 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11535 if (SVOp->getMask()[i] >= NumElements)
11536 NumV2OddIndices += i % 2;
11537 else if (SVOp->getMask()[i] >= 0)
11538 NumV1OddIndices += i % 2;
11539 if (NumV2OddIndices < NumV1OddIndices)
11540 return DAG.getCommutedVectorShuffle(*SVOp);
11545 // For each vector width, delegate to a specialized lowering routine.
11546 if (VT.getSizeInBits() == 128)
11547 return lower128BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11549 if (VT.getSizeInBits() == 256)
11550 return lower256BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11552 // Force AVX-512 vectors to be scalarized for now.
11553 // FIXME: Implement AVX-512 support!
11554 if (VT.getSizeInBits() == 512)
11555 return lower512BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11557 llvm_unreachable("Unimplemented!");
11561 //===----------------------------------------------------------------------===//
11562 // Legacy vector shuffle lowering
11564 // This code is the legacy code handling vector shuffles until the above
11565 // replaces its functionality and performance.
11566 //===----------------------------------------------------------------------===//
11568 static bool isBlendMask(ArrayRef<int> MaskVals, MVT VT, bool hasSSE41,
11569 bool hasInt256, unsigned *MaskOut = nullptr) {
11570 MVT EltVT = VT.getVectorElementType();
11572 // There is no blend with immediate in AVX-512.
11573 if (VT.is512BitVector())
11576 if (!hasSSE41 || EltVT == MVT::i8)
11578 if (!hasInt256 && VT == MVT::v16i16)
11581 unsigned MaskValue = 0;
11582 unsigned NumElems = VT.getVectorNumElements();
11583 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
11584 unsigned NumLanes = (NumElems - 1) / 8 + 1;
11585 unsigned NumElemsInLane = NumElems / NumLanes;
11587 // Blend for v16i16 should be symmetric for both lanes.
11588 for (unsigned i = 0; i < NumElemsInLane; ++i) {
11590 int SndLaneEltIdx = (NumLanes == 2) ? MaskVals[i + NumElemsInLane] : -1;
11591 int EltIdx = MaskVals[i];
11593 if ((EltIdx < 0 || EltIdx == (int)i) &&
11594 (SndLaneEltIdx < 0 || SndLaneEltIdx == (int)(i + NumElemsInLane)))
11597 if (((unsigned)EltIdx == (i + NumElems)) &&
11598 (SndLaneEltIdx < 0 ||
11599 (unsigned)SndLaneEltIdx == i + NumElems + NumElemsInLane))
11600 MaskValue |= (1 << i);
11606 *MaskOut = MaskValue;
11610 // Try to lower a shuffle node into a simple blend instruction.
11611 // This function assumes isBlendMask returns true for this
11612 // SuffleVectorSDNode
11613 static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
11614 unsigned MaskValue,
11615 const X86Subtarget *Subtarget,
11616 SelectionDAG &DAG) {
11617 MVT VT = SVOp->getSimpleValueType(0);
11618 MVT EltVT = VT.getVectorElementType();
11619 assert(isBlendMask(SVOp->getMask(), VT, Subtarget->hasSSE41(),
11620 Subtarget->hasInt256() && "Trying to lower a "
11621 "VECTOR_SHUFFLE to a Blend but "
11622 "with the wrong mask"));
11623 SDValue V1 = SVOp->getOperand(0);
11624 SDValue V2 = SVOp->getOperand(1);
11626 unsigned NumElems = VT.getVectorNumElements();
11628 // Convert i32 vectors to floating point if it is not AVX2.
11629 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
11631 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
11632 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
11634 V1 = DAG.getNode(ISD::BITCAST, dl, VT, V1);
11635 V2 = DAG.getNode(ISD::BITCAST, dl, VT, V2);
11638 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, V1, V2,
11639 DAG.getConstant(MaskValue, MVT::i32));
11640 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
11643 /// In vector type \p VT, return true if the element at index \p InputIdx
11644 /// falls on a different 128-bit lane than \p OutputIdx.
11645 static bool ShuffleCrosses128bitLane(MVT VT, unsigned InputIdx,
11646 unsigned OutputIdx) {
11647 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
11648 return InputIdx * EltSize / 128 != OutputIdx * EltSize / 128;
11651 /// Generate a PSHUFB if possible. Selects elements from \p V1 according to
11652 /// \p MaskVals. MaskVals[OutputIdx] = InputIdx specifies that we want to
11653 /// shuffle the element at InputIdx in V1 to OutputIdx in the result. If \p
11654 /// MaskVals refers to elements outside of \p V1 or is undef (-1), insert a
11656 static SDValue getPSHUFB(ArrayRef<int> MaskVals, SDValue V1, SDLoc &dl,
11657 SelectionDAG &DAG) {
11658 MVT VT = V1.getSimpleValueType();
11659 assert(VT.is128BitVector() || VT.is256BitVector());
11661 MVT EltVT = VT.getVectorElementType();
11662 unsigned EltSizeInBytes = EltVT.getSizeInBits() / 8;
11663 unsigned NumElts = VT.getVectorNumElements();
11665 SmallVector<SDValue, 32> PshufbMask;
11666 for (unsigned OutputIdx = 0; OutputIdx < NumElts; ++OutputIdx) {
11667 int InputIdx = MaskVals[OutputIdx];
11668 unsigned InputByteIdx;
11670 if (InputIdx < 0 || NumElts <= (unsigned)InputIdx)
11671 InputByteIdx = 0x80;
11673 // Cross lane is not allowed.
11674 if (ShuffleCrosses128bitLane(VT, InputIdx, OutputIdx))
11676 InputByteIdx = InputIdx * EltSizeInBytes;
11677 // Index is an byte offset within the 128-bit lane.
11678 InputByteIdx &= 0xf;
11681 for (unsigned j = 0; j < EltSizeInBytes; ++j) {
11682 PshufbMask.push_back(DAG.getConstant(InputByteIdx, MVT::i8));
11683 if (InputByteIdx != 0x80)
11688 MVT ShufVT = MVT::getVectorVT(MVT::i8, PshufbMask.size());
11690 V1 = DAG.getNode(ISD::BITCAST, dl, ShufVT, V1);
11691 return DAG.getNode(X86ISD::PSHUFB, dl, ShufVT, V1,
11692 DAG.getNode(ISD::BUILD_VECTOR, dl, ShufVT, PshufbMask));
11695 // v8i16 shuffles - Prefer shuffles in the following order:
11696 // 1. [all] pshuflw, pshufhw, optional move
11697 // 2. [ssse3] 1 x pshufb
11698 // 3. [ssse3] 2 x pshufb + 1 x por
11699 // 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw)
11701 LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
11702 SelectionDAG &DAG) {
11703 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11704 SDValue V1 = SVOp->getOperand(0);
11705 SDValue V2 = SVOp->getOperand(1);
11707 SmallVector<int, 8> MaskVals;
11709 // Determine if more than 1 of the words in each of the low and high quadwords
11710 // of the result come from the same quadword of one of the two inputs. Undef
11711 // mask values count as coming from any quadword, for better codegen.
11713 // Lo/HiQuad[i] = j indicates how many words from the ith quad of the input
11714 // feeds this quad. For i, 0 and 1 refer to V1, 2 and 3 refer to V2.
11715 unsigned LoQuad[] = { 0, 0, 0, 0 };
11716 unsigned HiQuad[] = { 0, 0, 0, 0 };
11717 // Indices of quads used.
11718 std::bitset<4> InputQuads;
11719 for (unsigned i = 0; i < 8; ++i) {
11720 unsigned *Quad = i < 4 ? LoQuad : HiQuad;
11721 int EltIdx = SVOp->getMaskElt(i);
11722 MaskVals.push_back(EltIdx);
11730 ++Quad[EltIdx / 4];
11731 InputQuads.set(EltIdx / 4);
11734 int BestLoQuad = -1;
11735 unsigned MaxQuad = 1;
11736 for (unsigned i = 0; i < 4; ++i) {
11737 if (LoQuad[i] > MaxQuad) {
11739 MaxQuad = LoQuad[i];
11743 int BestHiQuad = -1;
11745 for (unsigned i = 0; i < 4; ++i) {
11746 if (HiQuad[i] > MaxQuad) {
11748 MaxQuad = HiQuad[i];
11752 // For SSSE3, If all 8 words of the result come from only 1 quadword of each
11753 // of the two input vectors, shuffle them into one input vector so only a
11754 // single pshufb instruction is necessary. If there are more than 2 input
11755 // quads, disable the next transformation since it does not help SSSE3.
11756 bool V1Used = InputQuads[0] || InputQuads[1];
11757 bool V2Used = InputQuads[2] || InputQuads[3];
11758 if (Subtarget->hasSSSE3()) {
11759 if (InputQuads.count() == 2 && V1Used && V2Used) {
11760 BestLoQuad = InputQuads[0] ? 0 : 1;
11761 BestHiQuad = InputQuads[2] ? 2 : 3;
11763 if (InputQuads.count() > 2) {
11769 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update
11770 // the shuffle mask. If a quad is scored as -1, that means that it contains
11771 // words from all 4 input quadwords.
11773 if (BestLoQuad >= 0 || BestHiQuad >= 0) {
11775 BestLoQuad < 0 ? 0 : BestLoQuad,
11776 BestHiQuad < 0 ? 1 : BestHiQuad
11778 NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
11779 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1),
11780 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]);
11781 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV);
11783 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
11784 // source words for the shuffle, to aid later transformations.
11785 bool AllWordsInNewV = true;
11786 bool InOrder[2] = { true, true };
11787 for (unsigned i = 0; i != 8; ++i) {
11788 int idx = MaskVals[i];
11790 InOrder[i/4] = false;
11791 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad)
11793 AllWordsInNewV = false;
11797 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV;
11798 if (AllWordsInNewV) {
11799 for (int i = 0; i != 8; ++i) {
11800 int idx = MaskVals[i];
11803 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4;
11804 if ((idx != i) && idx < 4)
11806 if ((idx != i) && idx > 3)
11815 // If we've eliminated the use of V2, and the new mask is a pshuflw or
11816 // pshufhw, that's as cheap as it gets. Return the new shuffle.
11817 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) {
11818 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW;
11819 unsigned TargetMask = 0;
11820 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV,
11821 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]);
11822 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11823 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp):
11824 getShufflePSHUFLWImmediate(SVOp);
11825 V1 = NewV.getOperand(0);
11826 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG);
11830 // Promote splats to a larger type which usually leads to more efficient code.
11831 // FIXME: Is this true if pshufb is available?
11832 if (SVOp->isSplat())
11833 return PromoteSplat(SVOp, DAG);
11835 // If we have SSSE3, and all words of the result are from 1 input vector,
11836 // case 2 is generated, otherwise case 3 is generated. If no SSSE3
11837 // is present, fall back to case 4.
11838 if (Subtarget->hasSSSE3()) {
11839 SmallVector<SDValue,16> pshufbMask;
11841 // If we have elements from both input vectors, set the high bit of the
11842 // shuffle mask element to zero out elements that come from V2 in the V1
11843 // mask, and elements that come from V1 in the V2 mask, so that the two
11844 // results can be OR'd together.
11845 bool TwoInputs = V1Used && V2Used;
11846 V1 = getPSHUFB(MaskVals, V1, dl, DAG);
11848 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11850 // Calculate the shuffle mask for the second input, shuffle it, and
11851 // OR it with the first shuffled input.
11852 CommuteVectorShuffleMask(MaskVals, 8);
11853 V2 = getPSHUFB(MaskVals, V2, dl, DAG);
11854 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11855 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11858 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
11859 // and update MaskVals with new element order.
11860 std::bitset<8> InOrder;
11861 if (BestLoQuad >= 0) {
11862 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 };
11863 for (int i = 0; i != 4; ++i) {
11864 int idx = MaskVals[i];
11867 } else if ((idx / 4) == BestLoQuad) {
11868 MaskV[i] = idx & 3;
11872 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11875 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11876 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11877 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16,
11878 NewV.getOperand(0),
11879 getShufflePSHUFLWImmediate(SVOp), DAG);
11883 // If BestHi >= 0, generate a pshufhw to put the high elements in order,
11884 // and update MaskVals with the new element order.
11885 if (BestHiQuad >= 0) {
11886 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 };
11887 for (unsigned i = 4; i != 8; ++i) {
11888 int idx = MaskVals[i];
11891 } else if ((idx / 4) == BestHiQuad) {
11892 MaskV[i] = (idx & 3) + 4;
11896 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11899 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11900 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11901 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16,
11902 NewV.getOperand(0),
11903 getShufflePSHUFHWImmediate(SVOp), DAG);
11907 // In case BestHi & BestLo were both -1, which means each quadword has a word
11908 // from each of the four input quadwords, calculate the InOrder bitvector now
11909 // before falling through to the insert/extract cleanup.
11910 if (BestLoQuad == -1 && BestHiQuad == -1) {
11912 for (int i = 0; i != 8; ++i)
11913 if (MaskVals[i] < 0 || MaskVals[i] == i)
11917 // The other elements are put in the right place using pextrw and pinsrw.
11918 for (unsigned i = 0; i != 8; ++i) {
11921 int EltIdx = MaskVals[i];
11924 SDValue ExtOp = (EltIdx < 8) ?
11925 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1,
11926 DAG.getIntPtrConstant(EltIdx)) :
11927 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2,
11928 DAG.getIntPtrConstant(EltIdx - 8));
11929 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp,
11930 DAG.getIntPtrConstant(i));
11935 /// \brief v16i16 shuffles
11937 /// FIXME: We only support generation of a single pshufb currently. We can
11938 /// generalize the other applicable cases from LowerVECTOR_SHUFFLEv8i16 as
11939 /// well (e.g 2 x pshufb + 1 x por).
11941 LowerVECTOR_SHUFFLEv16i16(SDValue Op, SelectionDAG &DAG) {
11942 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11943 SDValue V1 = SVOp->getOperand(0);
11944 SDValue V2 = SVOp->getOperand(1);
11947 if (V2.getOpcode() != ISD::UNDEF)
11950 SmallVector<int, 16> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11951 return getPSHUFB(MaskVals, V1, dl, DAG);
11954 // v16i8 shuffles - Prefer shuffles in the following order:
11955 // 1. [ssse3] 1 x pshufb
11956 // 2. [ssse3] 2 x pshufb + 1 x por
11957 // 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw
11958 static SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
11959 const X86Subtarget* Subtarget,
11960 SelectionDAG &DAG) {
11961 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11962 SDValue V1 = SVOp->getOperand(0);
11963 SDValue V2 = SVOp->getOperand(1);
11965 ArrayRef<int> MaskVals = SVOp->getMask();
11967 // Promote splats to a larger type which usually leads to more efficient code.
11968 // FIXME: Is this true if pshufb is available?
11969 if (SVOp->isSplat())
11970 return PromoteSplat(SVOp, DAG);
11972 // If we have SSSE3, case 1 is generated when all result bytes come from
11973 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is
11974 // present, fall back to case 3.
11976 // If SSSE3, use 1 pshufb instruction per vector with elements in the result.
11977 if (Subtarget->hasSSSE3()) {
11978 SmallVector<SDValue,16> pshufbMask;
11980 // If all result elements are from one input vector, then only translate
11981 // undef mask values to 0x80 (zero out result) in the pshufb mask.
11983 // Otherwise, we have elements from both input vectors, and must zero out
11984 // elements that come from V2 in the first mask, and V1 in the second mask
11985 // so that we can OR them together.
11986 for (unsigned i = 0; i != 16; ++i) {
11987 int EltIdx = MaskVals[i];
11988 if (EltIdx < 0 || EltIdx >= 16)
11990 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11992 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
11993 DAG.getNode(ISD::BUILD_VECTOR, dl,
11994 MVT::v16i8, pshufbMask));
11996 // As PSHUFB will zero elements with negative indices, it's safe to ignore
11997 // the 2nd operand if it's undefined or zero.
11998 if (V2.getOpcode() == ISD::UNDEF ||
11999 ISD::isBuildVectorAllZeros(V2.getNode()))
12002 // Calculate the shuffle mask for the second input, shuffle it, and
12003 // OR it with the first shuffled input.
12004 pshufbMask.clear();
12005 for (unsigned i = 0; i != 16; ++i) {
12006 int EltIdx = MaskVals[i];
12007 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16;
12008 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
12010 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
12011 DAG.getNode(ISD::BUILD_VECTOR, dl,
12012 MVT::v16i8, pshufbMask));
12013 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
12016 // No SSSE3 - Calculate in place words and then fix all out of place words
12017 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from
12018 // the 16 different words that comprise the two doublequadword input vectors.
12019 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
12020 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
12022 for (int i = 0; i != 8; ++i) {
12023 int Elt0 = MaskVals[i*2];
12024 int Elt1 = MaskVals[i*2+1];
12026 // This word of the result is all undef, skip it.
12027 if (Elt0 < 0 && Elt1 < 0)
12030 // This word of the result is already in the correct place, skip it.
12031 if ((Elt0 == i*2) && (Elt1 == i*2+1))
12034 SDValue Elt0Src = Elt0 < 16 ? V1 : V2;
12035 SDValue Elt1Src = Elt1 < 16 ? V1 : V2;
12038 // If Elt0 and Elt1 are defined, are consecutive, and can be load
12039 // using a single extract together, load it and store it.
12040 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) {
12041 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
12042 DAG.getIntPtrConstant(Elt1 / 2));
12043 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
12044 DAG.getIntPtrConstant(i));
12048 // If Elt1 is defined, extract it from the appropriate source. If the
12049 // source byte is not also odd, shift the extracted word left 8 bits
12050 // otherwise clear the bottom 8 bits if we need to do an or.
12052 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
12053 DAG.getIntPtrConstant(Elt1 / 2));
12054 if ((Elt1 & 1) == 0)
12055 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
12057 TLI.getShiftAmountTy(InsElt.getValueType())));
12058 else if (Elt0 >= 0)
12059 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
12060 DAG.getConstant(0xFF00, MVT::i16));
12062 // If Elt0 is defined, extract it from the appropriate source. If the
12063 // source byte is not also even, shift the extracted word right 8 bits. If
12064 // Elt1 was also defined, OR the extracted values together before
12065 // inserting them in the result.
12067 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
12068 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2));
12069 if ((Elt0 & 1) != 0)
12070 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
12072 TLI.getShiftAmountTy(InsElt0.getValueType())));
12073 else if (Elt1 >= 0)
12074 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
12075 DAG.getConstant(0x00FF, MVT::i16));
12076 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0)
12079 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
12080 DAG.getIntPtrConstant(i));
12082 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV);
12085 // v32i8 shuffles - Translate to VPSHUFB if possible.
12087 SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp,
12088 const X86Subtarget *Subtarget,
12089 SelectionDAG &DAG) {
12090 MVT VT = SVOp->getSimpleValueType(0);
12091 SDValue V1 = SVOp->getOperand(0);
12092 SDValue V2 = SVOp->getOperand(1);
12094 SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
12096 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
12097 bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode());
12098 bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode());
12100 // VPSHUFB may be generated if
12101 // (1) one of input vector is undefined or zeroinitializer.
12102 // The mask value 0x80 puts 0 in the corresponding slot of the vector.
12103 // And (2) the mask indexes don't cross the 128-bit lane.
12104 if (VT != MVT::v32i8 || !Subtarget->hasInt256() ||
12105 (!V2IsUndef && !V2IsAllZero && !V1IsAllZero))
12108 if (V1IsAllZero && !V2IsAllZero) {
12109 CommuteVectorShuffleMask(MaskVals, 32);
12112 return getPSHUFB(MaskVals, V1, dl, DAG);
12115 /// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
12116 /// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be
12117 /// done when every pair / quad of shuffle mask elements point to elements in
12118 /// the right sequence. e.g.
12119 /// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15>
12121 SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
12122 SelectionDAG &DAG) {
12123 MVT VT = SVOp->getSimpleValueType(0);
12125 unsigned NumElems = VT.getVectorNumElements();
12128 switch (VT.SimpleTy) {
12129 default: llvm_unreachable("Unexpected!");
12132 return SDValue(SVOp, 0);
12133 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break;
12134 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break;
12135 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break;
12136 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break;
12137 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break;
12138 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break;
12141 SmallVector<int, 8> MaskVec;
12142 for (unsigned i = 0; i != NumElems; i += Scale) {
12144 for (unsigned j = 0; j != Scale; ++j) {
12145 int EltIdx = SVOp->getMaskElt(i+j);
12149 StartIdx = (EltIdx / Scale);
12150 if (EltIdx != (int)(StartIdx*Scale + j))
12153 MaskVec.push_back(StartIdx);
12156 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0));
12157 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1));
12158 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
12161 /// getVZextMovL - Return a zero-extending vector move low node.
12163 static SDValue getVZextMovL(MVT VT, MVT OpVT,
12164 SDValue SrcOp, SelectionDAG &DAG,
12165 const X86Subtarget *Subtarget, SDLoc dl) {
12166 if (VT == MVT::v2f64 || VT == MVT::v4f32) {
12167 LoadSDNode *LD = nullptr;
12168 if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
12169 LD = dyn_cast<LoadSDNode>(SrcOp);
12171 // movssrr and movsdrr do not clear top bits. Try to use movd, movq
12173 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
12174 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) &&
12175 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
12176 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST &&
12177 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {
12179 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
12180 return DAG.getNode(ISD::BITCAST, dl, VT,
12181 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
12182 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
12184 SrcOp.getOperand(0)
12190 return DAG.getNode(ISD::BITCAST, dl, VT,
12191 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
12192 DAG.getNode(ISD::BITCAST, dl,
12196 /// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles
12197 /// which could not be matched by any known target speficic shuffle
12199 LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
12201 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG);
12202 if (NewOp.getNode())
12205 MVT VT = SVOp->getSimpleValueType(0);
12207 unsigned NumElems = VT.getVectorNumElements();
12208 unsigned NumLaneElems = NumElems / 2;
12211 MVT EltVT = VT.getVectorElementType();
12212 MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems);
12215 SmallVector<int, 16> Mask;
12216 for (unsigned l = 0; l < 2; ++l) {
12217 // Build a shuffle mask for the output, discovering on the fly which
12218 // input vectors to use as shuffle operands (recorded in InputUsed).
12219 // If building a suitable shuffle vector proves too hard, then bail
12220 // out with UseBuildVector set.
12221 bool UseBuildVector = false;
12222 int InputUsed[2] = { -1, -1 }; // Not yet discovered.
12223 unsigned LaneStart = l * NumLaneElems;
12224 for (unsigned i = 0; i != NumLaneElems; ++i) {
12225 // The mask element. This indexes into the input.
12226 int Idx = SVOp->getMaskElt(i+LaneStart);
12228 // the mask element does not index into any input vector.
12229 Mask.push_back(-1);
12233 // The input vector this mask element indexes into.
12234 int Input = Idx / NumLaneElems;
12236 // Turn the index into an offset from the start of the input vector.
12237 Idx -= Input * NumLaneElems;
12239 // Find or create a shuffle vector operand to hold this input.
12241 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) {
12242 if (InputUsed[OpNo] == Input)
12243 // This input vector is already an operand.
12245 if (InputUsed[OpNo] < 0) {
12246 // Create a new operand for this input vector.
12247 InputUsed[OpNo] = Input;
12252 if (OpNo >= array_lengthof(InputUsed)) {
12253 // More than two input vectors used! Give up on trying to create a
12254 // shuffle vector. Insert all elements into a BUILD_VECTOR instead.
12255 UseBuildVector = true;
12259 // Add the mask index for the new shuffle vector.
12260 Mask.push_back(Idx + OpNo * NumLaneElems);
12263 if (UseBuildVector) {
12264 SmallVector<SDValue, 16> SVOps;
12265 for (unsigned i = 0; i != NumLaneElems; ++i) {
12266 // The mask element. This indexes into the input.
12267 int Idx = SVOp->getMaskElt(i+LaneStart);
12269 SVOps.push_back(DAG.getUNDEF(EltVT));
12273 // The input vector this mask element indexes into.
12274 int Input = Idx / NumElems;
12276 // Turn the index into an offset from the start of the input vector.
12277 Idx -= Input * NumElems;
12279 // Extract the vector element by hand.
12280 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
12281 SVOp->getOperand(Input),
12282 DAG.getIntPtrConstant(Idx)));
12285 // Construct the output using a BUILD_VECTOR.
12286 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, SVOps);
12287 } else if (InputUsed[0] < 0) {
12288 // No input vectors were used! The result is undefined.
12289 Output[l] = DAG.getUNDEF(NVT);
12291 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2),
12292 (InputUsed[0] % 2) * NumLaneElems,
12294 // If only one input was used, use an undefined vector for the other.
12295 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) :
12296 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2),
12297 (InputUsed[1] % 2) * NumLaneElems, DAG, dl);
12298 // At least one input vector was used. Create a new shuffle vector.
12299 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]);
12305 // Concatenate the result back
12306 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]);
12309 /// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with
12310 /// 4 elements, and match them with several different shuffle types.
12312 LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
12313 SDValue V1 = SVOp->getOperand(0);
12314 SDValue V2 = SVOp->getOperand(1);
12316 MVT VT = SVOp->getSimpleValueType(0);
12318 assert(VT.is128BitVector() && "Unsupported vector size");
12320 std::pair<int, int> Locs[4];
12321 int Mask1[] = { -1, -1, -1, -1 };
12322 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end());
12324 unsigned NumHi = 0;
12325 unsigned NumLo = 0;
12326 for (unsigned i = 0; i != 4; ++i) {
12327 int Idx = PermMask[i];
12329 Locs[i] = std::make_pair(-1, -1);
12331 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!");
12333 Locs[i] = std::make_pair(0, NumLo);
12334 Mask1[NumLo] = Idx;
12337 Locs[i] = std::make_pair(1, NumHi);
12339 Mask1[2+NumHi] = Idx;
12345 if (NumLo <= 2 && NumHi <= 2) {
12346 // If no more than two elements come from either vector. This can be
12347 // implemented with two shuffles. First shuffle gather the elements.
12348 // The second shuffle, which takes the first shuffle as both of its
12349 // vector operands, put the elements into the right order.
12350 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12352 int Mask2[] = { -1, -1, -1, -1 };
12354 for (unsigned i = 0; i != 4; ++i)
12355 if (Locs[i].first != -1) {
12356 unsigned Idx = (i < 2) ? 0 : 4;
12357 Idx += Locs[i].first * 2 + Locs[i].second;
12361 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]);
12364 if (NumLo == 3 || NumHi == 3) {
12365 // Otherwise, we must have three elements from one vector, call it X, and
12366 // one element from the other, call it Y. First, use a shufps to build an
12367 // intermediate vector with the one element from Y and the element from X
12368 // that will be in the same half in the final destination (the indexes don't
12369 // matter). Then, use a shufps to build the final vector, taking the half
12370 // containing the element from Y from the intermediate, and the other half
12373 // Normalize it so the 3 elements come from V1.
12374 CommuteVectorShuffleMask(PermMask, 4);
12378 // Find the element from V2.
12380 for (HiIndex = 0; HiIndex < 3; ++HiIndex) {
12381 int Val = PermMask[HiIndex];
12388 Mask1[0] = PermMask[HiIndex];
12390 Mask1[2] = PermMask[HiIndex^1];
12392 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12394 if (HiIndex >= 2) {
12395 Mask1[0] = PermMask[0];
12396 Mask1[1] = PermMask[1];
12397 Mask1[2] = HiIndex & 1 ? 6 : 4;
12398 Mask1[3] = HiIndex & 1 ? 4 : 6;
12399 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12402 Mask1[0] = HiIndex & 1 ? 2 : 0;
12403 Mask1[1] = HiIndex & 1 ? 0 : 2;
12404 Mask1[2] = PermMask[2];
12405 Mask1[3] = PermMask[3];
12410 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]);
12413 // Break it into (shuffle shuffle_hi, shuffle_lo).
12414 int LoMask[] = { -1, -1, -1, -1 };
12415 int HiMask[] = { -1, -1, -1, -1 };
12417 int *MaskPtr = LoMask;
12418 unsigned MaskIdx = 0;
12419 unsigned LoIdx = 0;
12420 unsigned HiIdx = 2;
12421 for (unsigned i = 0; i != 4; ++i) {
12428 int Idx = PermMask[i];
12430 Locs[i] = std::make_pair(-1, -1);
12431 } else if (Idx < 4) {
12432 Locs[i] = std::make_pair(MaskIdx, LoIdx);
12433 MaskPtr[LoIdx] = Idx;
12436 Locs[i] = std::make_pair(MaskIdx, HiIdx);
12437 MaskPtr[HiIdx] = Idx;
12442 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]);
12443 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]);
12444 int MaskOps[] = { -1, -1, -1, -1 };
12445 for (unsigned i = 0; i != 4; ++i)
12446 if (Locs[i].first != -1)
12447 MaskOps[i] = Locs[i].first * 4 + Locs[i].second;
12448 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]);
12451 static bool MayFoldVectorLoad(SDValue V) {
12452 while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
12453 V = V.getOperand(0);
12455 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
12456 V = V.getOperand(0);
12457 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR &&
12458 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF)
12459 // BUILD_VECTOR (load), undef
12460 V = V.getOperand(0);
12462 return MayFoldLoad(V);
12466 SDValue getMOVDDup(SDValue &Op, SDLoc &dl, SDValue V1, SelectionDAG &DAG) {
12467 MVT VT = Op.getSimpleValueType();
12469 // Canonicalize to v2f64.
12470 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
12471 return DAG.getNode(ISD::BITCAST, dl, VT,
12472 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64,
12477 SDValue getMOVLowToHigh(SDValue &Op, SDLoc &dl, SelectionDAG &DAG,
12479 SDValue V1 = Op.getOperand(0);
12480 SDValue V2 = Op.getOperand(1);
12481 MVT VT = Op.getSimpleValueType();
12483 assert(VT != MVT::v2i64 && "unsupported shuffle type");
12485 if (HasSSE2 && VT == MVT::v2f64)
12486 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG);
12488 // v4f32 or v4i32: canonicalize to v4f32 (which is legal for SSE1)
12489 return DAG.getNode(ISD::BITCAST, dl, VT,
12490 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32,
12491 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1),
12492 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG));
12496 SDValue getMOVHighToLow(SDValue &Op, SDLoc &dl, SelectionDAG &DAG) {
12497 SDValue V1 = Op.getOperand(0);
12498 SDValue V2 = Op.getOperand(1);
12499 MVT VT = Op.getSimpleValueType();
12501 assert((VT == MVT::v4i32 || VT == MVT::v4f32) &&
12502 "unsupported shuffle type");
12504 if (V2.getOpcode() == ISD::UNDEF)
12508 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG);
12512 SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
12513 SDValue V1 = Op.getOperand(0);
12514 SDValue V2 = Op.getOperand(1);
12515 MVT VT = Op.getSimpleValueType();
12516 unsigned NumElems = VT.getVectorNumElements();
12518 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second
12519 // operand of these instructions is only memory, so check if there's a
12520 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the
12522 bool CanFoldLoad = false;
12524 // Trivial case, when V2 comes from a load.
12525 if (MayFoldVectorLoad(V2))
12526 CanFoldLoad = true;
12528 // When V1 is a load, it can be folded later into a store in isel, example:
12529 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1)
12531 // (MOVLPSmr addr:$src1, VR128:$src2)
12532 // So, recognize this potential and also use MOVLPS or MOVLPD
12533 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op))
12534 CanFoldLoad = true;
12536 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12538 if (HasSSE2 && NumElems == 2)
12539 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
12542 // If we don't care about the second element, proceed to use movss.
12543 if (SVOp->getMaskElt(1) != -1)
12544 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG);
12547 // movl and movlp will both match v2i64, but v2i64 is never matched by
12548 // movl earlier because we make it strict to avoid messing with the movlp load
12549 // folding logic (see the code above getMOVLP call). Match it here then,
12550 // this is horrible, but will stay like this until we move all shuffle
12551 // matching to x86 specific nodes. Note that for the 1st condition all
12552 // types are matched with movsd.
12554 // FIXME: isMOVLMask should be checked and matched before getMOVLP,
12555 // as to remove this logic from here, as much as possible
12556 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT))
12557 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12558 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12561 assert(VT != MVT::v4i32 && "unsupported shuffle type");
12563 // Invert the operand order and use SHUFPS to match it.
12564 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1,
12565 getShuffleSHUFImmediate(SVOp), DAG);
12568 static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index,
12569 SelectionDAG &DAG) {
12571 MVT VT = Load->getSimpleValueType(0);
12572 MVT EVT = VT.getVectorElementType();
12573 SDValue Addr = Load->getOperand(1);
12574 SDValue NewAddr = DAG.getNode(
12575 ISD::ADD, dl, Addr.getSimpleValueType(), Addr,
12576 DAG.getConstant(Index * EVT.getStoreSize(), Addr.getSimpleValueType()));
12579 DAG.getLoad(EVT, dl, Load->getChain(), NewAddr,
12580 DAG.getMachineFunction().getMachineMemOperand(
12581 Load->getMemOperand(), 0, EVT.getStoreSize()));
12585 // It is only safe to call this function if isINSERTPSMask is true for
12586 // this shufflevector mask.
12587 static SDValue getINSERTPS(ShuffleVectorSDNode *SVOp, SDLoc &dl,
12588 SelectionDAG &DAG) {
12589 // Generate an insertps instruction when inserting an f32 from memory onto a
12590 // v4f32 or when copying a member from one v4f32 to another.
12591 // We also use it for transferring i32 from one register to another,
12592 // since it simply copies the same bits.
12593 // If we're transferring an i32 from memory to a specific element in a
12594 // register, we output a generic DAG that will match the PINSRD
12596 MVT VT = SVOp->getSimpleValueType(0);
12597 MVT EVT = VT.getVectorElementType();
12598 SDValue V1 = SVOp->getOperand(0);
12599 SDValue V2 = SVOp->getOperand(1);
12600 auto Mask = SVOp->getMask();
12601 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
12602 "unsupported vector type for insertps/pinsrd");
12604 auto FromV1Predicate = [](const int &i) { return i < 4 && i > -1; };
12605 auto FromV2Predicate = [](const int &i) { return i >= 4; };
12606 int FromV1 = std::count_if(Mask.begin(), Mask.end(), FromV1Predicate);
12610 unsigned DestIndex;
12614 DestIndex = std::find_if(Mask.begin(), Mask.end(), FromV1Predicate) -
12617 // If we have 1 element from each vector, we have to check if we're
12618 // changing V1's element's place. If so, we're done. Otherwise, we
12619 // should assume we're changing V2's element's place and behave
12621 int FromV2 = std::count_if(Mask.begin(), Mask.end(), FromV2Predicate);
12622 assert(DestIndex <= INT32_MAX && "truncated destination index");
12623 if (FromV1 == FromV2 &&
12624 static_cast<int>(DestIndex) == Mask[DestIndex] % 4) {
12628 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12631 assert(std::count_if(Mask.begin(), Mask.end(), FromV2Predicate) == 1 &&
12632 "More than one element from V1 and from V2, or no elements from one "
12633 "of the vectors. This case should not have returned true from "
12638 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12641 // Get an index into the source vector in the range [0,4) (the mask is
12642 // in the range [0,8) because it can address V1 and V2)
12643 unsigned SrcIndex = Mask[DestIndex] % 4;
12644 if (MayFoldLoad(From)) {
12645 // Trivial case, when From comes from a load and is only used by the
12646 // shuffle. Make it use insertps from the vector that we need from that
12649 NarrowVectorLoadToElement(cast<LoadSDNode>(From), SrcIndex, DAG);
12650 if (!NewLoad.getNode())
12653 if (EVT == MVT::f32) {
12654 // Create this as a scalar to vector to match the instruction pattern.
12655 SDValue LoadScalarToVector =
12656 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, NewLoad);
12657 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4);
12658 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, LoadScalarToVector,
12660 } else { // EVT == MVT::i32
12661 // If we're getting an i32 from memory, use an INSERT_VECTOR_ELT
12662 // instruction, to match the PINSRD instruction, which loads an i32 to a
12663 // certain vector element.
12664 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, To, NewLoad,
12665 DAG.getConstant(DestIndex, MVT::i32));
12669 // Vector-element-to-vector
12670 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4 | SrcIndex << 6);
12671 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, From, InsertpsMask);
12674 // Reduce a vector shuffle to zext.
12675 static SDValue LowerVectorIntExtend(SDValue Op, const X86Subtarget *Subtarget,
12676 SelectionDAG &DAG) {
12677 // PMOVZX is only available from SSE41.
12678 if (!Subtarget->hasSSE41())
12681 MVT VT = Op.getSimpleValueType();
12683 // Only AVX2 support 256-bit vector integer extending.
12684 if (!Subtarget->hasInt256() && VT.is256BitVector())
12687 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12689 SDValue V1 = Op.getOperand(0);
12690 SDValue V2 = Op.getOperand(1);
12691 unsigned NumElems = VT.getVectorNumElements();
12693 // Extending is an unary operation and the element type of the source vector
12694 // won't be equal to or larger than i64.
12695 if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() ||
12696 VT.getVectorElementType() == MVT::i64)
12699 // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4.
12700 unsigned Shift = 1; // Start from 2, i.e. 1 << 1.
12701 while ((1U << Shift) < NumElems) {
12702 if (SVOp->getMaskElt(1U << Shift) == 1)
12705 // The maximal ratio is 8, i.e. from i8 to i64.
12710 // Check the shuffle mask.
12711 unsigned Mask = (1U << Shift) - 1;
12712 for (unsigned i = 0; i != NumElems; ++i) {
12713 int EltIdx = SVOp->getMaskElt(i);
12714 if ((i & Mask) != 0 && EltIdx != -1)
12716 if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift))
12720 unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift;
12721 MVT NeVT = MVT::getIntegerVT(NBits);
12722 MVT NVT = MVT::getVectorVT(NeVT, NumElems >> Shift);
12724 if (!DAG.getTargetLoweringInfo().isTypeLegal(NVT))
12727 return DAG.getNode(ISD::BITCAST, DL, VT,
12728 DAG.getNode(X86ISD::VZEXT, DL, NVT, V1));
12731 static SDValue NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
12732 SelectionDAG &DAG) {
12733 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12734 MVT VT = Op.getSimpleValueType();
12736 SDValue V1 = Op.getOperand(0);
12737 SDValue V2 = Op.getOperand(1);
12739 if (isZeroShuffle(SVOp))
12740 return getZeroVector(VT, Subtarget, DAG, dl);
12742 // Handle splat operations
12743 if (SVOp->isSplat()) {
12744 // Use vbroadcast whenever the splat comes from a foldable load
12745 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
12746 if (Broadcast.getNode())
12750 // Check integer expanding shuffles.
12751 SDValue NewOp = LowerVectorIntExtend(Op, Subtarget, DAG);
12752 if (NewOp.getNode())
12755 // If the shuffle can be profitably rewritten as a narrower shuffle, then
12757 if (VT == MVT::v8i16 || VT == MVT::v16i8 || VT == MVT::v16i16 ||
12758 VT == MVT::v32i8) {
12759 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12760 if (NewOp.getNode())
12761 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
12762 } else if (VT.is128BitVector() && Subtarget->hasSSE2()) {
12763 // FIXME: Figure out a cleaner way to do this.
12764 if (ISD::isBuildVectorAllZeros(V2.getNode())) {
12765 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12766 if (NewOp.getNode()) {
12767 MVT NewVT = NewOp.getSimpleValueType();
12768 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(),
12769 NewVT, true, false))
12770 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), DAG, Subtarget,
12773 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
12774 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12775 if (NewOp.getNode()) {
12776 MVT NewVT = NewOp.getSimpleValueType();
12777 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT))
12778 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), DAG, Subtarget,
12787 X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
12788 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12789 SDValue V1 = Op.getOperand(0);
12790 SDValue V2 = Op.getOperand(1);
12791 MVT VT = Op.getSimpleValueType();
12793 unsigned NumElems = VT.getVectorNumElements();
12794 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
12795 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
12796 bool V1IsSplat = false;
12797 bool V2IsSplat = false;
12798 bool HasSSE2 = Subtarget->hasSSE2();
12799 bool HasFp256 = Subtarget->hasFp256();
12800 bool HasInt256 = Subtarget->hasInt256();
12801 MachineFunction &MF = DAG.getMachineFunction();
12803 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
12805 // Check if we should use the experimental vector shuffle lowering. If so,
12806 // delegate completely to that code path.
12807 if (ExperimentalVectorShuffleLowering)
12808 return lowerVectorShuffle(Op, Subtarget, DAG);
12810 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
12812 if (V1IsUndef && V2IsUndef)
12813 return DAG.getUNDEF(VT);
12815 // When we create a shuffle node we put the UNDEF node to second operand,
12816 // but in some cases the first operand may be transformed to UNDEF.
12817 // In this case we should just commute the node.
12819 return DAG.getCommutedVectorShuffle(*SVOp);
12821 // Vector shuffle lowering takes 3 steps:
12823 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable
12824 // narrowing and commutation of operands should be handled.
12825 // 2) Matching of shuffles with known shuffle masks to x86 target specific
12827 // 3) Rewriting of unmatched masks into new generic shuffle operations,
12828 // so the shuffle can be broken into other shuffles and the legalizer can
12829 // try the lowering again.
12831 // The general idea is that no vector_shuffle operation should be left to
12832 // be matched during isel, all of them must be converted to a target specific
12835 // Normalize the input vectors. Here splats, zeroed vectors, profitable
12836 // narrowing and commutation of operands should be handled. The actual code
12837 // doesn't include all of those, work in progress...
12838 SDValue NewOp = NormalizeVectorShuffle(Op, Subtarget, DAG);
12839 if (NewOp.getNode())
12842 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end());
12844 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and
12845 // unpckh_undef). Only use pshufd if speed is more important than size.
12846 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12847 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12848 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12849 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12851 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() &&
12852 V2IsUndef && MayFoldVectorLoad(V1))
12853 return getMOVDDup(Op, dl, V1, DAG);
12855 if (isMOVHLPS_v_undef_Mask(M, VT))
12856 return getMOVHighToLow(Op, dl, DAG);
12858 // Use to match splats
12859 if (HasSSE2 && isUNPCKHMask(M, VT, HasInt256) && V2IsUndef &&
12860 (VT == MVT::v2f64 || VT == MVT::v2i64))
12861 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12863 if (isPSHUFDMask(M, VT)) {
12864 // The actual implementation will match the mask in the if above and then
12865 // during isel it can match several different instructions, not only pshufd
12866 // as its name says, sad but true, emulate the behavior for now...
12867 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64)))
12868 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG);
12870 unsigned TargetMask = getShuffleSHUFImmediate(SVOp);
12872 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32))
12873 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG);
12875 if (HasFp256 && (VT == MVT::v4f32 || VT == MVT::v2f64))
12876 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1, TargetMask,
12879 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1,
12883 if (isPALIGNRMask(M, VT, Subtarget))
12884 return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2,
12885 getShufflePALIGNRImmediate(SVOp),
12888 if (isVALIGNMask(M, VT, Subtarget))
12889 return getTargetShuffleNode(X86ISD::VALIGN, dl, VT, V1, V2,
12890 getShuffleVALIGNImmediate(SVOp),
12893 // Check if this can be converted into a logical shift.
12894 bool isLeft = false;
12895 unsigned ShAmt = 0;
12897 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt);
12898 if (isShift && ShVal.hasOneUse()) {
12899 // If the shifted value has multiple uses, it may be cheaper to use
12900 // v_set0 + movlhps or movhlps, etc.
12901 MVT EltVT = VT.getVectorElementType();
12902 ShAmt *= EltVT.getSizeInBits();
12903 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12906 if (isMOVLMask(M, VT)) {
12907 if (ISD::isBuildVectorAllZeros(V1.getNode()))
12908 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
12909 if (!isMOVLPMask(M, VT)) {
12910 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
12911 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12913 if (VT == MVT::v4i32 || VT == MVT::v4f32)
12914 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12918 // FIXME: fold these into legal mask.
12919 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasInt256))
12920 return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
12922 if (isMOVHLPSMask(M, VT))
12923 return getMOVHighToLow(Op, dl, DAG);
12925 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget))
12926 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG);
12928 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget))
12929 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG);
12931 if (isMOVLPMask(M, VT))
12932 return getMOVLP(Op, dl, DAG, HasSSE2);
12934 if (ShouldXformToMOVHLPS(M, VT) ||
12935 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT))
12936 return DAG.getCommutedVectorShuffle(*SVOp);
12939 // No better options. Use a vshldq / vsrldq.
12940 MVT EltVT = VT.getVectorElementType();
12941 ShAmt *= EltVT.getSizeInBits();
12942 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12945 bool Commuted = false;
12946 // FIXME: This should also accept a bitcast of a splat? Be careful, not
12947 // 1,1,1,1 -> v8i16 though.
12948 BitVector UndefElements;
12949 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V1.getNode()))
12950 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12952 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V2.getNode()))
12953 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12956 // Canonicalize the splat or undef, if present, to be on the RHS.
12957 if (!V2IsUndef && V1IsSplat && !V2IsSplat) {
12958 CommuteVectorShuffleMask(M, NumElems);
12960 std::swap(V1IsSplat, V2IsSplat);
12964 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) {
12965 // Shuffling low element of v1 into undef, just return v1.
12968 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which
12969 // the instruction selector will not match, so get a canonical MOVL with
12970 // swapped operands to undo the commute.
12971 return getMOVL(DAG, dl, VT, V2, V1);
12974 if (isUNPCKLMask(M, VT, HasInt256))
12975 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12977 if (isUNPCKHMask(M, VT, HasInt256))
12978 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12981 // Normalize mask so all entries that point to V2 points to its first
12982 // element then try to match unpck{h|l} again. If match, return a
12983 // new vector_shuffle with the corrected mask.p
12984 SmallVector<int, 8> NewMask(M.begin(), M.end());
12985 NormalizeMask(NewMask, NumElems);
12986 if (isUNPCKLMask(NewMask, VT, HasInt256, true))
12987 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12988 if (isUNPCKHMask(NewMask, VT, HasInt256, true))
12989 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12993 // Commute is back and try unpck* again.
12994 // FIXME: this seems wrong.
12995 CommuteVectorShuffleMask(M, NumElems);
12997 std::swap(V1IsSplat, V2IsSplat);
12999 if (isUNPCKLMask(M, VT, HasInt256))
13000 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
13002 if (isUNPCKHMask(M, VT, HasInt256))
13003 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
13006 // Normalize the node to match x86 shuffle ops if needed
13007 if (!V2IsUndef && (isSHUFPMask(M, VT, /* Commuted */ true)))
13008 return DAG.getCommutedVectorShuffle(*SVOp);
13010 // The checks below are all present in isShuffleMaskLegal, but they are
13011 // inlined here right now to enable us to directly emit target specific
13012 // nodes, and remove one by one until they don't return Op anymore.
13014 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) &&
13015 SVOp->getSplatIndex() == 0 && V2IsUndef) {
13016 if (VT == MVT::v2f64 || VT == MVT::v2i64)
13017 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
13020 if (isPSHUFHWMask(M, VT, HasInt256))
13021 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1,
13022 getShufflePSHUFHWImmediate(SVOp),
13025 if (isPSHUFLWMask(M, VT, HasInt256))
13026 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1,
13027 getShufflePSHUFLWImmediate(SVOp),
13030 unsigned MaskValue;
13031 if (isBlendMask(M, VT, Subtarget->hasSSE41(), HasInt256, &MaskValue))
13032 return LowerVECTOR_SHUFFLEtoBlend(SVOp, MaskValue, Subtarget, DAG);
13034 if (isSHUFPMask(M, VT))
13035 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2,
13036 getShuffleSHUFImmediate(SVOp), DAG);
13038 if (isUNPCKL_v_undef_Mask(M, VT, HasInt256))
13039 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
13040 if (isUNPCKH_v_undef_Mask(M, VT, HasInt256))
13041 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
13043 //===--------------------------------------------------------------------===//
13044 // Generate target specific nodes for 128 or 256-bit shuffles only
13045 // supported in the AVX instruction set.
13048 // Handle VMOVDDUPY permutations
13049 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasFp256))
13050 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG);
13052 // Handle VPERMILPS/D* permutations
13053 if (isVPERMILPMask(M, VT)) {
13054 if ((HasInt256 && VT == MVT::v8i32) || VT == MVT::v16i32)
13055 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1,
13056 getShuffleSHUFImmediate(SVOp), DAG);
13057 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1,
13058 getShuffleSHUFImmediate(SVOp), DAG);
13062 if (VT.is512BitVector() && isINSERT64x4Mask(M, VT, &Idx))
13063 return Insert256BitVector(V1, Extract256BitVector(V2, 0, DAG, dl),
13064 Idx*(NumElems/2), DAG, dl);
13066 // Handle VPERM2F128/VPERM2I128 permutations
13067 if (isVPERM2X128Mask(M, VT, HasFp256))
13068 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1,
13069 V2, getShuffleVPERM2X128Immediate(SVOp), DAG);
13071 if (Subtarget->hasSSE41() && isINSERTPSMask(M, VT))
13072 return getINSERTPS(SVOp, dl, DAG);
13075 if (V2IsUndef && HasInt256 && isPermImmMask(M, VT, Imm8))
13076 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, Imm8, DAG);
13078 if ((V2IsUndef && HasInt256 && VT.is256BitVector() && NumElems == 8) ||
13079 VT.is512BitVector()) {
13080 MVT MaskEltVT = MVT::getIntegerVT(VT.getVectorElementType().getSizeInBits());
13081 MVT MaskVectorVT = MVT::getVectorVT(MaskEltVT, NumElems);
13082 SmallVector<SDValue, 16> permclMask;
13083 for (unsigned i = 0; i != NumElems; ++i) {
13084 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MaskEltVT));
13087 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVectorVT, permclMask);
13089 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32
13090 return DAG.getNode(X86ISD::VPERMV, dl, VT,
13091 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1);
13092 return DAG.getNode(X86ISD::VPERMV3, dl, VT, V1,
13093 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V2);
13096 //===--------------------------------------------------------------------===//
13097 // Since no target specific shuffle was selected for this generic one,
13098 // lower it into other known shuffles. FIXME: this isn't true yet, but
13099 // this is the plan.
13102 // Handle v8i16 specifically since SSE can do byte extraction and insertion.
13103 if (VT == MVT::v8i16) {
13104 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG);
13105 if (NewOp.getNode())
13109 if (VT == MVT::v16i16 && HasInt256) {
13110 SDValue NewOp = LowerVECTOR_SHUFFLEv16i16(Op, DAG);
13111 if (NewOp.getNode())
13115 if (VT == MVT::v16i8) {
13116 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, Subtarget, DAG);
13117 if (NewOp.getNode())
13121 if (VT == MVT::v32i8) {
13122 SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG);
13123 if (NewOp.getNode())
13127 // Handle all 128-bit wide vectors with 4 elements, and match them with
13128 // several different shuffle types.
13129 if (NumElems == 4 && VT.is128BitVector())
13130 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG);
13132 // Handle general 256-bit shuffles
13133 if (VT.is256BitVector())
13134 return LowerVECTOR_SHUFFLE_256(SVOp, DAG);
13139 // This function assumes its argument is a BUILD_VECTOR of constants or
13140 // undef SDNodes. i.e: ISD::isBuildVectorOfConstantSDNodes(BuildVector) is
13142 static bool BUILD_VECTORtoBlendMask(BuildVectorSDNode *BuildVector,
13143 unsigned &MaskValue) {
13145 unsigned NumElems = BuildVector->getNumOperands();
13146 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
13147 unsigned NumLanes = (NumElems - 1) / 8 + 1;
13148 unsigned NumElemsInLane = NumElems / NumLanes;
13150 // Blend for v16i16 should be symetric for the both lanes.
13151 for (unsigned i = 0; i < NumElemsInLane; ++i) {
13152 SDValue EltCond = BuildVector->getOperand(i);
13153 SDValue SndLaneEltCond =
13154 (NumLanes == 2) ? BuildVector->getOperand(i + NumElemsInLane) : EltCond;
13156 int Lane1Cond = -1, Lane2Cond = -1;
13157 if (isa<ConstantSDNode>(EltCond))
13158 Lane1Cond = !isZero(EltCond);
13159 if (isa<ConstantSDNode>(SndLaneEltCond))
13160 Lane2Cond = !isZero(SndLaneEltCond);
13162 if (Lane1Cond == Lane2Cond || Lane2Cond < 0)
13163 // Lane1Cond != 0, means we want the first argument.
13164 // Lane1Cond == 0, means we want the second argument.
13165 // The encoding of this argument is 0 for the first argument, 1
13166 // for the second. Therefore, invert the condition.
13167 MaskValue |= !Lane1Cond << i;
13168 else if (Lane1Cond < 0)
13169 MaskValue |= !Lane2Cond << i;
13176 /// \brief Try to lower a VSELECT instruction to a vector shuffle.
13177 static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
13178 const X86Subtarget *Subtarget,
13179 SelectionDAG &DAG) {
13180 SDValue Cond = Op.getOperand(0);
13181 SDValue LHS = Op.getOperand(1);
13182 SDValue RHS = Op.getOperand(2);
13184 MVT VT = Op.getSimpleValueType();
13186 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
13188 auto *CondBV = cast<BuildVectorSDNode>(Cond);
13190 // Only non-legal VSELECTs reach this lowering, convert those into generic
13191 // shuffles and re-use the shuffle lowering path for blends.
13192 SmallVector<int, 32> Mask;
13193 for (int i = 0, Size = VT.getVectorNumElements(); i < Size; ++i) {
13194 SDValue CondElt = CondBV->getOperand(i);
13196 isa<ConstantSDNode>(CondElt) ? i + (isZero(CondElt) ? Size : 0) : -1);
13198 return DAG.getVectorShuffle(VT, dl, LHS, RHS, Mask);
13201 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
13202 // A vselect where all conditions and data are constants can be optimized into
13203 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
13204 if (ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(0).getNode()) &&
13205 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(1).getNode()) &&
13206 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(2).getNode()))
13209 // Try to lower this to a blend-style vector shuffle. This can handle all
13210 // constant condition cases.
13211 SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG);
13212 if (BlendOp.getNode())
13215 // Variable blends are only legal from SSE4.1 onward.
13216 if (!Subtarget->hasSSE41())
13219 // Some types for vselect were previously set to Expand, not Legal or
13220 // Custom. Return an empty SDValue so we fall-through to Expand, after
13221 // the Custom lowering phase.
13222 MVT VT = Op.getSimpleValueType();
13223 switch (VT.SimpleTy) {
13228 if (Subtarget->hasBWI() && Subtarget->hasVLX())
13233 // We couldn't create a "Blend with immediate" node.
13234 // This node should still be legal, but we'll have to emit a blendv*
13239 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
13240 MVT VT = Op.getSimpleValueType();
13243 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
13246 if (VT.getSizeInBits() == 8) {
13247 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
13248 Op.getOperand(0), Op.getOperand(1));
13249 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13250 DAG.getValueType(VT));
13251 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13254 if (VT.getSizeInBits() == 16) {
13255 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13256 // If Idx is 0, it's cheaper to do a move instead of a pextrw.
13258 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13259 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13260 DAG.getNode(ISD::BITCAST, dl,
13263 Op.getOperand(1)));
13264 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
13265 Op.getOperand(0), Op.getOperand(1));
13266 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13267 DAG.getValueType(VT));
13268 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13271 if (VT == MVT::f32) {
13272 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
13273 // the result back to FR32 register. It's only worth matching if the
13274 // result has a single use which is a store or a bitcast to i32. And in
13275 // the case of a store, it's not worth it if the index is a constant 0,
13276 // because a MOVSSmr can be used instead, which is smaller and faster.
13277 if (!Op.hasOneUse())
13279 SDNode *User = *Op.getNode()->use_begin();
13280 if ((User->getOpcode() != ISD::STORE ||
13281 (isa<ConstantSDNode>(Op.getOperand(1)) &&
13282 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
13283 (User->getOpcode() != ISD::BITCAST ||
13284 User->getValueType(0) != MVT::i32))
13286 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13287 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
13290 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
13293 if (VT == MVT::i32 || VT == MVT::i64) {
13294 // ExtractPS/pextrq works with constant index.
13295 if (isa<ConstantSDNode>(Op.getOperand(1)))
13301 /// Extract one bit from mask vector, like v16i1 or v8i1.
13302 /// AVX-512 feature.
13304 X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const {
13305 SDValue Vec = Op.getOperand(0);
13307 MVT VecVT = Vec.getSimpleValueType();
13308 SDValue Idx = Op.getOperand(1);
13309 MVT EltVT = Op.getSimpleValueType();
13311 assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
13312 assert((VecVT.getVectorNumElements() <= 16 || Subtarget->hasBWI()) &&
13313 "Unexpected vector type in ExtractBitFromMaskVector");
13315 // variable index can't be handled in mask registers,
13316 // extend vector to VR512
13317 if (!isa<ConstantSDNode>(Idx)) {
13318 MVT ExtVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13319 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Vec);
13320 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
13321 ExtVT.getVectorElementType(), Ext, Idx);
13322 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
13325 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13326 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13327 if (!Subtarget->hasDQI() && (VecVT.getVectorNumElements() <= 8))
13328 rc = getRegClassFor(MVT::v16i1);
13329 unsigned MaxSift = rc->getSize()*8 - 1;
13330 Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
13331 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13332 Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
13333 DAG.getConstant(MaxSift, MVT::i8));
13334 return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec,
13335 DAG.getIntPtrConstant(0));
13339 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
13340 SelectionDAG &DAG) const {
13342 SDValue Vec = Op.getOperand(0);
13343 MVT VecVT = Vec.getSimpleValueType();
13344 SDValue Idx = Op.getOperand(1);
13346 if (Op.getSimpleValueType() == MVT::i1)
13347 return ExtractBitFromMaskVector(Op, DAG);
13349 if (!isa<ConstantSDNode>(Idx)) {
13350 if (VecVT.is512BitVector() ||
13351 (VecVT.is256BitVector() && Subtarget->hasInt256() &&
13352 VecVT.getVectorElementType().getSizeInBits() == 32)) {
13355 MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
13356 MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
13357 MaskEltVT.getSizeInBits());
13359 Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT);
13360 SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT,
13361 getZeroVector(MaskVT, Subtarget, DAG, dl),
13362 Idx, DAG.getConstant(0, getPointerTy()));
13363 SDValue Perm = DAG.getNode(X86ISD::VPERMV, dl, VecVT, Mask, Vec);
13364 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(),
13365 Perm, DAG.getConstant(0, getPointerTy()));
13370 // If this is a 256-bit vector result, first extract the 128-bit vector and
13371 // then extract the element from the 128-bit vector.
13372 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
13374 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13375 // Get the 128-bit vector.
13376 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl);
13377 MVT EltVT = VecVT.getVectorElementType();
13379 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
13381 //if (IdxVal >= NumElems/2)
13382 // IdxVal -= NumElems/2;
13383 IdxVal -= (IdxVal/ElemsPerChunk)*ElemsPerChunk;
13384 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
13385 DAG.getConstant(IdxVal, MVT::i32));
13388 assert(VecVT.is128BitVector() && "Unexpected vector length");
13390 if (Subtarget->hasSSE41()) {
13391 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
13396 MVT VT = Op.getSimpleValueType();
13397 // TODO: handle v16i8.
13398 if (VT.getSizeInBits() == 16) {
13399 SDValue Vec = Op.getOperand(0);
13400 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13402 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13403 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13404 DAG.getNode(ISD::BITCAST, dl,
13406 Op.getOperand(1)));
13407 // Transform it so it match pextrw which produces a 32-bit result.
13408 MVT EltVT = MVT::i32;
13409 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT,
13410 Op.getOperand(0), Op.getOperand(1));
13411 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
13412 DAG.getValueType(VT));
13413 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13416 if (VT.getSizeInBits() == 32) {
13417 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13421 // SHUFPS the element to the lowest double word, then movss.
13422 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 };
13423 MVT VVT = Op.getOperand(0).getSimpleValueType();
13424 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13425 DAG.getUNDEF(VVT), Mask);
13426 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13427 DAG.getIntPtrConstant(0));
13430 if (VT.getSizeInBits() == 64) {
13431 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
13432 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
13433 // to match extract_elt for f64.
13434 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13438 // UNPCKHPD the element to the lowest double word, then movsd.
13439 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
13440 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
13441 int Mask[2] = { 1, -1 };
13442 MVT VVT = Op.getOperand(0).getSimpleValueType();
13443 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13444 DAG.getUNDEF(VVT), Mask);
13445 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13446 DAG.getIntPtrConstant(0));
13452 /// Insert one bit to mask vector, like v16i1 or v8i1.
13453 /// AVX-512 feature.
13455 X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
13457 SDValue Vec = Op.getOperand(0);
13458 SDValue Elt = Op.getOperand(1);
13459 SDValue Idx = Op.getOperand(2);
13460 MVT VecVT = Vec.getSimpleValueType();
13462 if (!isa<ConstantSDNode>(Idx)) {
13463 // Non constant index. Extend source and destination,
13464 // insert element and then truncate the result.
13465 MVT ExtVecVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13466 MVT ExtEltVT = (VecVT == MVT::v8i1 ? MVT::i64 : MVT::i32);
13467 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
13468 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVecVT, Vec),
13469 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx);
13470 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
13473 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13474 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
13475 if (Vec.getOpcode() == ISD::UNDEF)
13476 return DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13477 DAG.getConstant(IdxVal, MVT::i8));
13478 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13479 unsigned MaxSift = rc->getSize()*8 - 1;
13480 EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13481 DAG.getConstant(MaxSift, MVT::i8));
13482 EltInVec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, EltInVec,
13483 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13484 return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
13487 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
13488 SelectionDAG &DAG) const {
13489 MVT VT = Op.getSimpleValueType();
13490 MVT EltVT = VT.getVectorElementType();
13492 if (EltVT == MVT::i1)
13493 return InsertBitToMaskVector(Op, DAG);
13496 SDValue N0 = Op.getOperand(0);
13497 SDValue N1 = Op.getOperand(1);
13498 SDValue N2 = Op.getOperand(2);
13499 if (!isa<ConstantSDNode>(N2))
13501 auto *N2C = cast<ConstantSDNode>(N2);
13502 unsigned IdxVal = N2C->getZExtValue();
13504 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
13505 // into that, and then insert the subvector back into the result.
13506 if (VT.is256BitVector() || VT.is512BitVector()) {
13507 // Get the desired 128-bit vector half.
13508 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
13510 // Insert the element into the desired half.
13511 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
13512 unsigned IdxIn128 = IdxVal - (IdxVal / NumEltsIn128) * NumEltsIn128;
13514 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
13515 DAG.getConstant(IdxIn128, MVT::i32));
13517 // Insert the changed part back to the 256-bit vector
13518 return Insert128BitVector(N0, V, IdxVal, DAG, dl);
13520 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
13522 if (Subtarget->hasSSE41()) {
13523 if (EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) {
13525 if (VT == MVT::v8i16) {
13526 Opc = X86ISD::PINSRW;
13528 assert(VT == MVT::v16i8);
13529 Opc = X86ISD::PINSRB;
13532 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
13534 if (N1.getValueType() != MVT::i32)
13535 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13536 if (N2.getValueType() != MVT::i32)
13537 N2 = DAG.getIntPtrConstant(IdxVal);
13538 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
13541 if (EltVT == MVT::f32) {
13542 // Bits [7:6] of the constant are the source select. This will always be
13543 // zero here. The DAG Combiner may combine an extract_elt index into
13545 // bits. For example (insert (extract, 3), 2) could be matched by
13547 // the '3' into bits [7:6] of X86ISD::INSERTPS.
13548 // Bits [5:4] of the constant are the destination select. This is the
13549 // value of the incoming immediate.
13550 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
13551 // combine either bitwise AND or insert of float 0.0 to set these bits.
13552 N2 = DAG.getIntPtrConstant(IdxVal << 4);
13553 // Create this as a scalar to vector..
13554 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
13555 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
13558 if (EltVT == MVT::i32 || EltVT == MVT::i64) {
13559 // PINSR* works with constant index.
13564 if (EltVT == MVT::i8)
13567 if (EltVT.getSizeInBits() == 16) {
13568 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
13569 // as its second argument.
13570 if (N1.getValueType() != MVT::i32)
13571 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13572 if (N2.getValueType() != MVT::i32)
13573 N2 = DAG.getIntPtrConstant(IdxVal);
13574 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
13579 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
13581 MVT OpVT = Op.getSimpleValueType();
13583 // If this is a 256-bit vector result, first insert into a 128-bit
13584 // vector and then insert into the 256-bit vector.
13585 if (!OpVT.is128BitVector()) {
13586 // Insert into a 128-bit vector.
13587 unsigned SizeFactor = OpVT.getSizeInBits()/128;
13588 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
13589 OpVT.getVectorNumElements() / SizeFactor);
13591 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
13593 // Insert the 128-bit vector.
13594 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
13597 if (OpVT == MVT::v1i64 &&
13598 Op.getOperand(0).getValueType() == MVT::i64)
13599 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
13601 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
13602 assert(OpVT.is128BitVector() && "Expected an SSE type!");
13603 return DAG.getNode(ISD::BITCAST, dl, OpVT,
13604 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
13607 // Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
13608 // a simple subregister reference or explicit instructions to grab
13609 // upper bits of a vector.
13610 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13611 SelectionDAG &DAG) {
13613 SDValue In = Op.getOperand(0);
13614 SDValue Idx = Op.getOperand(1);
13615 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13616 MVT ResVT = Op.getSimpleValueType();
13617 MVT InVT = In.getSimpleValueType();
13619 if (Subtarget->hasFp256()) {
13620 if (ResVT.is128BitVector() &&
13621 (InVT.is256BitVector() || InVT.is512BitVector()) &&
13622 isa<ConstantSDNode>(Idx)) {
13623 return Extract128BitVector(In, IdxVal, DAG, dl);
13625 if (ResVT.is256BitVector() && InVT.is512BitVector() &&
13626 isa<ConstantSDNode>(Idx)) {
13627 return Extract256BitVector(In, IdxVal, DAG, dl);
13633 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
13634 // simple superregister reference or explicit instructions to insert
13635 // the upper bits of a vector.
13636 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13637 SelectionDAG &DAG) {
13638 if (!Subtarget->hasAVX())
13642 SDValue Vec = Op.getOperand(0);
13643 SDValue SubVec = Op.getOperand(1);
13644 SDValue Idx = Op.getOperand(2);
13646 if (!isa<ConstantSDNode>(Idx))
13649 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13650 MVT OpVT = Op.getSimpleValueType();
13651 MVT SubVecVT = SubVec.getSimpleValueType();
13653 // Fold two 16-byte subvector loads into one 32-byte load:
13654 // (insert_subvector (insert_subvector undef, (load addr), 0),
13655 // (load addr + 16), Elts/2)
13657 if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
13658 Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
13659 OpVT.is256BitVector() && SubVecVT.is128BitVector() &&
13660 !Subtarget->isUnalignedMem32Slow()) {
13661 SDValue SubVec2 = Vec.getOperand(1);
13662 if (auto *Idx2 = dyn_cast<ConstantSDNode>(Vec.getOperand(2))) {
13663 if (Idx2->getZExtValue() == 0) {
13664 SDValue Ops[] = { SubVec2, SubVec };
13665 SDValue LD = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, false);
13672 if ((OpVT.is256BitVector() || OpVT.is512BitVector()) &&
13673 SubVecVT.is128BitVector())
13674 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
13676 if (OpVT.is512BitVector() && SubVecVT.is256BitVector())
13677 return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
13682 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
13683 // their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
13684 // one of the above mentioned nodes. It has to be wrapped because otherwise
13685 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
13686 // be used to form addressing mode. These wrapped nodes will be selected
13689 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
13690 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
13692 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13693 // global base reg.
13694 unsigned char OpFlag = 0;
13695 unsigned WrapperKind = X86ISD::Wrapper;
13696 CodeModel::Model M = DAG.getTarget().getCodeModel();
13698 if (Subtarget->isPICStyleRIPRel() &&
13699 (M == CodeModel::Small || M == CodeModel::Kernel))
13700 WrapperKind = X86ISD::WrapperRIP;
13701 else if (Subtarget->isPICStyleGOT())
13702 OpFlag = X86II::MO_GOTOFF;
13703 else if (Subtarget->isPICStyleStubPIC())
13704 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13706 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
13707 CP->getAlignment(),
13708 CP->getOffset(), OpFlag);
13710 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13711 // With PIC, the address is actually $g + Offset.
13713 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13714 DAG.getNode(X86ISD::GlobalBaseReg,
13715 SDLoc(), getPointerTy()),
13722 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
13723 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
13725 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13726 // global base reg.
13727 unsigned char OpFlag = 0;
13728 unsigned WrapperKind = X86ISD::Wrapper;
13729 CodeModel::Model M = DAG.getTarget().getCodeModel();
13731 if (Subtarget->isPICStyleRIPRel() &&
13732 (M == CodeModel::Small || M == CodeModel::Kernel))
13733 WrapperKind = X86ISD::WrapperRIP;
13734 else if (Subtarget->isPICStyleGOT())
13735 OpFlag = X86II::MO_GOTOFF;
13736 else if (Subtarget->isPICStyleStubPIC())
13737 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13739 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
13742 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13744 // With PIC, the address is actually $g + Offset.
13746 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13747 DAG.getNode(X86ISD::GlobalBaseReg,
13748 SDLoc(), getPointerTy()),
13755 X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
13756 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
13758 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13759 // global base reg.
13760 unsigned char OpFlag = 0;
13761 unsigned WrapperKind = X86ISD::Wrapper;
13762 CodeModel::Model M = DAG.getTarget().getCodeModel();
13764 if (Subtarget->isPICStyleRIPRel() &&
13765 (M == CodeModel::Small || M == CodeModel::Kernel)) {
13766 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF())
13767 OpFlag = X86II::MO_GOTPCREL;
13768 WrapperKind = X86ISD::WrapperRIP;
13769 } else if (Subtarget->isPICStyleGOT()) {
13770 OpFlag = X86II::MO_GOT;
13771 } else if (Subtarget->isPICStyleStubPIC()) {
13772 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE;
13773 } else if (Subtarget->isPICStyleStubNoDynamic()) {
13774 OpFlag = X86II::MO_DARWIN_NONLAZY;
13777 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag);
13780 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13782 // With PIC, the address is actually $g + Offset.
13783 if (DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
13784 !Subtarget->is64Bit()) {
13785 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13786 DAG.getNode(X86ISD::GlobalBaseReg,
13787 SDLoc(), getPointerTy()),
13791 // For symbols that require a load from a stub to get the address, emit the
13793 if (isGlobalStubReference(OpFlag))
13794 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result,
13795 MachinePointerInfo::getGOT(), false, false, false, 0);
13801 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
13802 // Create the TargetBlockAddressAddress node.
13803 unsigned char OpFlags =
13804 Subtarget->ClassifyBlockAddressReference();
13805 CodeModel::Model M = DAG.getTarget().getCodeModel();
13806 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
13807 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
13809 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset,
13812 if (Subtarget->isPICStyleRIPRel() &&
13813 (M == CodeModel::Small || M == CodeModel::Kernel))
13814 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13816 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13818 // With PIC, the address is actually $g + Offset.
13819 if (isGlobalRelativeToPICBase(OpFlags)) {
13820 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13821 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13829 X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
13830 int64_t Offset, SelectionDAG &DAG) const {
13831 // Create the TargetGlobalAddress node, folding in the constant
13832 // offset if it is legal.
13833 unsigned char OpFlags =
13834 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget());
13835 CodeModel::Model M = DAG.getTarget().getCodeModel();
13837 if (OpFlags == X86II::MO_NO_FLAG &&
13838 X86::isOffsetSuitableForCodeModel(Offset, M)) {
13839 // A direct static reference to a global.
13840 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
13843 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
13846 if (Subtarget->isPICStyleRIPRel() &&
13847 (M == CodeModel::Small || M == CodeModel::Kernel))
13848 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13850 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13852 // With PIC, the address is actually $g + Offset.
13853 if (isGlobalRelativeToPICBase(OpFlags)) {
13854 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13855 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13859 // For globals that require a load from a stub to get the address, emit the
13861 if (isGlobalStubReference(OpFlags))
13862 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
13863 MachinePointerInfo::getGOT(), false, false, false, 0);
13865 // If there was a non-zero offset that we didn't fold, create an explicit
13866 // addition for it.
13868 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result,
13869 DAG.getConstant(Offset, getPointerTy()));
13875 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
13876 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
13877 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
13878 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
13882 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
13883 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
13884 unsigned char OperandFlags, bool LocalDynamic = false) {
13885 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13886 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13888 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13889 GA->getValueType(0),
13893 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
13897 SDValue Ops[] = { Chain, TGA, *InFlag };
13898 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13900 SDValue Ops[] = { Chain, TGA };
13901 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13904 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
13905 MFI->setAdjustsStack(true);
13906 MFI->setHasCalls(true);
13908 SDValue Flag = Chain.getValue(1);
13909 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
13912 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
13914 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13917 SDLoc dl(GA); // ? function entry point might be better
13918 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13919 DAG.getNode(X86ISD::GlobalBaseReg,
13920 SDLoc(), PtrVT), InFlag);
13921 InFlag = Chain.getValue(1);
13923 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
13926 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
13928 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13930 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
13931 X86::RAX, X86II::MO_TLSGD);
13934 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
13940 // Get the start address of the TLS block for this module.
13941 X86MachineFunctionInfo* MFI = DAG.getMachineFunction()
13942 .getInfo<X86MachineFunctionInfo>();
13943 MFI->incNumLocalDynamicTLSAccesses();
13947 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
13948 X86II::MO_TLSLD, /*LocalDynamic=*/true);
13951 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13952 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
13953 InFlag = Chain.getValue(1);
13954 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
13955 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
13958 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
13962 unsigned char OperandFlags = X86II::MO_DTPOFF;
13963 unsigned WrapperKind = X86ISD::Wrapper;
13964 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13965 GA->getValueType(0),
13966 GA->getOffset(), OperandFlags);
13967 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13969 // Add x@dtpoff with the base.
13970 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
13973 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
13974 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13975 const EVT PtrVT, TLSModel::Model model,
13976 bool is64Bit, bool isPIC) {
13979 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
13980 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
13981 is64Bit ? 257 : 256));
13983 SDValue ThreadPointer =
13984 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0),
13985 MachinePointerInfo(Ptr), false, false, false, 0);
13987 unsigned char OperandFlags = 0;
13988 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
13990 unsigned WrapperKind = X86ISD::Wrapper;
13991 if (model == TLSModel::LocalExec) {
13992 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
13993 } else if (model == TLSModel::InitialExec) {
13995 OperandFlags = X86II::MO_GOTTPOFF;
13996 WrapperKind = X86ISD::WrapperRIP;
13998 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
14001 llvm_unreachable("Unexpected model");
14004 // emit "addl x@ntpoff,%eax" (local exec)
14005 // or "addl x@indntpoff,%eax" (initial exec)
14006 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
14008 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
14009 GA->getOffset(), OperandFlags);
14010 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
14012 if (model == TLSModel::InitialExec) {
14013 if (isPIC && !is64Bit) {
14014 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
14015 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
14019 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
14020 MachinePointerInfo::getGOT(), false, false, false, 0);
14023 // The address of the thread local variable is the add of the thread
14024 // pointer with the offset of the variable.
14025 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
14029 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
14031 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
14032 const GlobalValue *GV = GA->getGlobal();
14034 if (Subtarget->isTargetELF()) {
14035 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
14038 case TLSModel::GeneralDynamic:
14039 if (Subtarget->is64Bit())
14040 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
14041 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
14042 case TLSModel::LocalDynamic:
14043 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(),
14044 Subtarget->is64Bit());
14045 case TLSModel::InitialExec:
14046 case TLSModel::LocalExec:
14047 return LowerToTLSExecModel(
14048 GA, DAG, getPointerTy(), model, Subtarget->is64Bit(),
14049 DAG.getTarget().getRelocationModel() == Reloc::PIC_);
14051 llvm_unreachable("Unknown TLS model.");
14054 if (Subtarget->isTargetDarwin()) {
14055 // Darwin only has one model of TLS. Lower to that.
14056 unsigned char OpFlag = 0;
14057 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
14058 X86ISD::WrapperRIP : X86ISD::Wrapper;
14060 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
14061 // global base reg.
14062 bool PIC32 = (DAG.getTarget().getRelocationModel() == Reloc::PIC_) &&
14063 !Subtarget->is64Bit();
14065 OpFlag = X86II::MO_TLVP_PIC_BASE;
14067 OpFlag = X86II::MO_TLVP;
14069 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
14070 GA->getValueType(0),
14071 GA->getOffset(), OpFlag);
14072 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
14074 // With PIC32, the address is actually $g + Offset.
14076 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
14077 DAG.getNode(X86ISD::GlobalBaseReg,
14078 SDLoc(), getPointerTy()),
14081 // Lowering the machine isd will make sure everything is in the right
14083 SDValue Chain = DAG.getEntryNode();
14084 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
14085 SDValue Args[] = { Chain, Offset };
14086 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
14088 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
14089 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
14090 MFI->setAdjustsStack(true);
14092 // And our return value (tls address) is in the standard call return value
14094 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
14095 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(),
14096 Chain.getValue(1));
14099 if (Subtarget->isTargetKnownWindowsMSVC() ||
14100 Subtarget->isTargetWindowsGNU()) {
14101 // Just use the implicit TLS architecture
14102 // Need to generate someting similar to:
14103 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
14105 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
14106 // mov rcx, qword [rdx+rcx*8]
14107 // mov eax, .tls$:tlsvar
14108 // [rax+rcx] contains the address
14109 // Windows 64bit: gs:0x58
14110 // Windows 32bit: fs:__tls_array
14113 SDValue Chain = DAG.getEntryNode();
14115 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
14116 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
14117 // use its literal value of 0x2C.
14118 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit()
14119 ? Type::getInt8PtrTy(*DAG.getContext(),
14121 : Type::getInt32PtrTy(*DAG.getContext(),
14125 Subtarget->is64Bit()
14126 ? DAG.getIntPtrConstant(0x58)
14127 : (Subtarget->isTargetWindowsGNU()
14128 ? DAG.getIntPtrConstant(0x2C)
14129 : DAG.getExternalSymbol("_tls_array", getPointerTy()));
14131 SDValue ThreadPointer =
14132 DAG.getLoad(getPointerTy(), dl, Chain, TlsArray,
14133 MachinePointerInfo(Ptr), false, false, false, 0);
14135 // Load the _tls_index variable
14136 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy());
14137 if (Subtarget->is64Bit())
14138 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain,
14139 IDX, MachinePointerInfo(), MVT::i32,
14140 false, false, false, 0);
14142 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(),
14143 false, false, false, 0);
14145 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()),
14147 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale);
14149 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX);
14150 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(),
14151 false, false, false, 0);
14153 // Get the offset of start of .tls section
14154 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
14155 GA->getValueType(0),
14156 GA->getOffset(), X86II::MO_SECREL);
14157 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA);
14159 // The address of the thread local variable is the add of the thread
14160 // pointer with the offset of the variable.
14161 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset);
14164 llvm_unreachable("TLS not implemented for this target.");
14167 /// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values
14168 /// and take a 2 x i32 value to shift plus a shift amount.
14169 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
14170 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
14171 MVT VT = Op.getSimpleValueType();
14172 unsigned VTBits = VT.getSizeInBits();
14174 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
14175 SDValue ShOpLo = Op.getOperand(0);
14176 SDValue ShOpHi = Op.getOperand(1);
14177 SDValue ShAmt = Op.getOperand(2);
14178 // X86ISD::SHLD and X86ISD::SHRD have defined overflow behavior but the
14179 // generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
14181 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14182 DAG.getConstant(VTBits - 1, MVT::i8));
14183 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
14184 DAG.getConstant(VTBits - 1, MVT::i8))
14185 : DAG.getConstant(0, VT);
14187 SDValue Tmp2, Tmp3;
14188 if (Op.getOpcode() == ISD::SHL_PARTS) {
14189 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
14190 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
14192 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
14193 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
14196 // If the shift amount is larger or equal than the width of a part we can't
14197 // rely on the results of shld/shrd. Insert a test and select the appropriate
14198 // values for large shift amounts.
14199 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14200 DAG.getConstant(VTBits, MVT::i8));
14201 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
14202 AndNode, DAG.getConstant(0, MVT::i8));
14205 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
14206 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
14207 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
14209 if (Op.getOpcode() == ISD::SHL_PARTS) {
14210 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14211 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14213 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14214 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14217 SDValue Ops[2] = { Lo, Hi };
14218 return DAG.getMergeValues(Ops, dl);
14221 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
14222 SelectionDAG &DAG) const {
14223 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
14226 if (SrcVT.isVector()) {
14227 if (SrcVT.getVectorElementType() == MVT::i1) {
14228 MVT IntegerVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
14229 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14230 DAG.getNode(ISD::SIGN_EXTEND, dl, IntegerVT,
14231 Op.getOperand(0)));
14236 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
14237 "Unknown SINT_TO_FP to lower!");
14239 // These are really Legal; return the operand so the caller accepts it as
14241 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
14243 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
14244 Subtarget->is64Bit()) {
14248 unsigned Size = SrcVT.getSizeInBits()/8;
14249 MachineFunction &MF = DAG.getMachineFunction();
14250 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false);
14251 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14252 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14254 MachinePointerInfo::getFixedStack(SSFI),
14256 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
14259 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
14261 SelectionDAG &DAG) const {
14265 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
14267 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
14269 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
14271 unsigned ByteSize = SrcVT.getSizeInBits()/8;
14273 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
14274 MachineMemOperand *MMO;
14276 int SSFI = FI->getIndex();
14278 DAG.getMachineFunction()
14279 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14280 MachineMemOperand::MOLoad, ByteSize, ByteSize);
14282 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
14283 StackSlot = StackSlot.getOperand(1);
14285 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
14286 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
14288 Tys, Ops, SrcVT, MMO);
14291 Chain = Result.getValue(1);
14292 SDValue InFlag = Result.getValue(2);
14294 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
14295 // shouldn't be necessary except that RFP cannot be live across
14296 // multiple blocks. When stackifier is fixed, they can be uncoupled.
14297 MachineFunction &MF = DAG.getMachineFunction();
14298 unsigned SSFISize = Op.getValueType().getSizeInBits()/8;
14299 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false);
14300 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14301 Tys = DAG.getVTList(MVT::Other);
14303 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
14305 MachineMemOperand *MMO =
14306 DAG.getMachineFunction()
14307 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14308 MachineMemOperand::MOStore, SSFISize, SSFISize);
14310 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
14311 Ops, Op.getValueType(), MMO);
14312 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot,
14313 MachinePointerInfo::getFixedStack(SSFI),
14314 false, false, false, 0);
14320 // LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
14321 SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
14322 SelectionDAG &DAG) const {
14323 // This algorithm is not obvious. Here it is what we're trying to output:
14326 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
14327 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
14329 haddpd %xmm0, %xmm0
14331 pshufd $0x4e, %xmm0, %xmm1
14337 LLVMContext *Context = DAG.getContext();
14339 // Build some magic constants.
14340 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
14341 Constant *C0 = ConstantDataVector::get(*Context, CV0);
14342 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
14344 SmallVector<Constant*,2> CV1;
14346 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14347 APInt(64, 0x4330000000000000ULL))));
14349 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14350 APInt(64, 0x4530000000000000ULL))));
14351 Constant *C1 = ConstantVector::get(CV1);
14352 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
14354 // Load the 64-bit value into an XMM register.
14355 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
14357 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
14358 MachinePointerInfo::getConstantPool(),
14359 false, false, false, 16);
14360 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32,
14361 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1),
14364 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
14365 MachinePointerInfo::getConstantPool(),
14366 false, false, false, 16);
14367 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1);
14368 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
14371 if (Subtarget->hasSSE3()) {
14372 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
14373 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
14375 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub);
14376 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32,
14378 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64,
14379 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle),
14383 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
14384 DAG.getIntPtrConstant(0));
14387 // LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
14388 SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
14389 SelectionDAG &DAG) const {
14391 // FP constant to bias correct the final result.
14392 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
14395 // Load the 32-bit value into an XMM register.
14396 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
14399 // Zero out the upper parts of the register.
14400 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
14402 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14403 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
14404 DAG.getIntPtrConstant(0));
14406 // Or the load with the bias.
14407 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
14408 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14409 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14410 MVT::v2f64, Load)),
14411 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14412 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14413 MVT::v2f64, Bias)));
14414 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14415 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or),
14416 DAG.getIntPtrConstant(0));
14418 // Subtract the bias.
14419 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
14421 // Handle final rounding.
14422 EVT DestVT = Op.getValueType();
14424 if (DestVT.bitsLT(MVT::f64))
14425 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
14426 DAG.getIntPtrConstant(0));
14427 if (DestVT.bitsGT(MVT::f64))
14428 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
14430 // Handle final rounding.
14434 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
14435 const X86Subtarget &Subtarget) {
14436 // The algorithm is the following:
14437 // #ifdef __SSE4_1__
14438 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14439 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14440 // (uint4) 0x53000000, 0xaa);
14442 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14443 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14445 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14446 // return (float4) lo + fhi;
14449 SDValue V = Op->getOperand(0);
14450 EVT VecIntVT = V.getValueType();
14451 bool Is128 = VecIntVT == MVT::v4i32;
14452 EVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
14453 // If we convert to something else than the supported type, e.g., to v4f64,
14455 if (VecFloatVT != Op->getValueType(0))
14458 unsigned NumElts = VecIntVT.getVectorNumElements();
14459 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
14460 "Unsupported custom type");
14461 assert(NumElts <= 8 && "The size of the constant array must be fixed");
14463 // In the #idef/#else code, we have in common:
14464 // - The vector of constants:
14470 // Create the splat vector for 0x4b000000.
14471 SDValue CstLow = DAG.getConstant(0x4b000000, MVT::i32);
14472 SDValue CstLowArray[] = {CstLow, CstLow, CstLow, CstLow,
14473 CstLow, CstLow, CstLow, CstLow};
14474 SDValue VecCstLow = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14475 makeArrayRef(&CstLowArray[0], NumElts));
14476 // Create the splat vector for 0x53000000.
14477 SDValue CstHigh = DAG.getConstant(0x53000000, MVT::i32);
14478 SDValue CstHighArray[] = {CstHigh, CstHigh, CstHigh, CstHigh,
14479 CstHigh, CstHigh, CstHigh, CstHigh};
14480 SDValue VecCstHigh = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14481 makeArrayRef(&CstHighArray[0], NumElts));
14483 // Create the right shift.
14484 SDValue CstShift = DAG.getConstant(16, MVT::i32);
14485 SDValue CstShiftArray[] = {CstShift, CstShift, CstShift, CstShift,
14486 CstShift, CstShift, CstShift, CstShift};
14487 SDValue VecCstShift = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14488 makeArrayRef(&CstShiftArray[0], NumElts));
14489 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
14492 if (Subtarget.hasSSE41()) {
14493 EVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
14494 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14495 SDValue VecCstLowBitcast =
14496 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstLow);
14497 SDValue VecBitcast = DAG.getNode(ISD::BITCAST, DL, VecI16VT, V);
14498 // Low will be bitcasted right away, so do not bother bitcasting back to its
14500 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
14501 VecCstLowBitcast, DAG.getConstant(0xaa, MVT::i32));
14502 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14503 // (uint4) 0x53000000, 0xaa);
14504 SDValue VecCstHighBitcast =
14505 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstHigh);
14506 SDValue VecShiftBitcast =
14507 DAG.getNode(ISD::BITCAST, DL, VecI16VT, HighShift);
14508 // High will be bitcasted right away, so do not bother bitcasting back to
14509 // its original type.
14510 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
14511 VecCstHighBitcast, DAG.getConstant(0xaa, MVT::i32));
14513 SDValue CstMask = DAG.getConstant(0xffff, MVT::i32);
14514 SDValue VecCstMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, CstMask,
14515 CstMask, CstMask, CstMask);
14516 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14517 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
14518 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
14520 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14521 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
14524 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
14525 SDValue CstFAdd = DAG.getConstantFP(
14526 APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), MVT::f32);
14527 SDValue CstFAddArray[] = {CstFAdd, CstFAdd, CstFAdd, CstFAdd,
14528 CstFAdd, CstFAdd, CstFAdd, CstFAdd};
14529 SDValue VecCstFAdd = DAG.getNode(ISD::BUILD_VECTOR, DL, VecFloatVT,
14530 makeArrayRef(&CstFAddArray[0], NumElts));
14532 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14533 SDValue HighBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, High);
14535 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
14536 // return (float4) lo + fhi;
14537 SDValue LowBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, Low);
14538 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
14541 SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
14542 SelectionDAG &DAG) const {
14543 SDValue N0 = Op.getOperand(0);
14544 MVT SVT = N0.getSimpleValueType();
14547 switch (SVT.SimpleTy) {
14549 llvm_unreachable("Custom UINT_TO_FP is not supported!");
14554 MVT NVT = MVT::getVectorVT(MVT::i32, SVT.getVectorNumElements());
14555 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14556 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0));
14560 return lowerUINT_TO_FP_vXi32(Op, DAG, *Subtarget);
14562 llvm_unreachable(nullptr);
14565 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
14566 SelectionDAG &DAG) const {
14567 SDValue N0 = Op.getOperand(0);
14570 if (Op.getValueType().isVector())
14571 return lowerUINT_TO_FP_vec(Op, DAG);
14573 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
14574 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
14575 // the optimization here.
14576 if (DAG.SignBitIsZero(N0))
14577 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
14579 MVT SrcVT = N0.getSimpleValueType();
14580 MVT DstVT = Op.getSimpleValueType();
14581 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
14582 return LowerUINT_TO_FP_i64(Op, DAG);
14583 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
14584 return LowerUINT_TO_FP_i32(Op, DAG);
14585 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
14588 // Make a 64-bit buffer, and use it to build an FILD.
14589 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
14590 if (SrcVT == MVT::i32) {
14591 SDValue WordOff = DAG.getConstant(4, getPointerTy());
14592 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
14593 getPointerTy(), StackSlot, WordOff);
14594 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14595 StackSlot, MachinePointerInfo(),
14597 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
14598 OffsetSlot, MachinePointerInfo(),
14600 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
14604 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
14605 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14606 StackSlot, MachinePointerInfo(),
14608 // For i64 source, we need to add the appropriate power of 2 if the input
14609 // was negative. This is the same as the optimization in
14610 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
14611 // we must be careful to do the computation in x87 extended precision, not
14612 // in SSE. (The generic code can't know it's OK to do this, or how to.)
14613 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
14614 MachineMemOperand *MMO =
14615 DAG.getMachineFunction()
14616 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14617 MachineMemOperand::MOLoad, 8, 8);
14619 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
14620 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
14621 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
14624 APInt FF(32, 0x5F800000ULL);
14626 // Check whether the sign bit is set.
14627 SDValue SignSet = DAG.getSetCC(dl,
14628 getSetCCResultType(*DAG.getContext(), MVT::i64),
14629 Op.getOperand(0), DAG.getConstant(0, MVT::i64),
14632 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
14633 SDValue FudgePtr = DAG.getConstantPool(
14634 ConstantInt::get(*DAG.getContext(), FF.zext(64)),
14637 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
14638 SDValue Zero = DAG.getIntPtrConstant(0);
14639 SDValue Four = DAG.getIntPtrConstant(4);
14640 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
14642 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset);
14644 // Load the value out, extending it from f32 to f80.
14645 // FIXME: Avoid the extend by constructing the right constant pool?
14646 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(),
14647 FudgePtr, MachinePointerInfo::getConstantPool(),
14648 MVT::f32, false, false, false, 4);
14649 // Extend everything to 80 bits to force it to be done on x87.
14650 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
14651 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0));
14654 std::pair<SDValue,SDValue>
14655 X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
14656 bool IsSigned, bool IsReplace) const {
14659 EVT DstTy = Op.getValueType();
14661 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) {
14662 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
14666 assert(DstTy.getSimpleVT() <= MVT::i64 &&
14667 DstTy.getSimpleVT() >= MVT::i16 &&
14668 "Unknown FP_TO_INT to lower!");
14670 // These are really Legal.
14671 if (DstTy == MVT::i32 &&
14672 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14673 return std::make_pair(SDValue(), SDValue());
14674 if (Subtarget->is64Bit() &&
14675 DstTy == MVT::i64 &&
14676 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14677 return std::make_pair(SDValue(), SDValue());
14679 // We lower FP->int64 either into FISTP64 followed by a load from a temporary
14680 // stack slot, or into the FTOL runtime function.
14681 MachineFunction &MF = DAG.getMachineFunction();
14682 unsigned MemSize = DstTy.getSizeInBits()/8;
14683 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14684 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14687 if (!IsSigned && isIntegerTypeFTOL(DstTy))
14688 Opc = X86ISD::WIN_FTOL;
14690 switch (DstTy.getSimpleVT().SimpleTy) {
14691 default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
14692 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
14693 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
14694 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
14697 SDValue Chain = DAG.getEntryNode();
14698 SDValue Value = Op.getOperand(0);
14699 EVT TheVT = Op.getOperand(0).getValueType();
14700 // FIXME This causes a redundant load/store if the SSE-class value is already
14701 // in memory, such as if it is on the callstack.
14702 if (isScalarFPTypeInSSEReg(TheVT)) {
14703 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
14704 Chain = DAG.getStore(Chain, DL, Value, StackSlot,
14705 MachinePointerInfo::getFixedStack(SSFI),
14707 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
14709 Chain, StackSlot, DAG.getValueType(TheVT)
14712 MachineMemOperand *MMO =
14713 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14714 MachineMemOperand::MOLoad, MemSize, MemSize);
14715 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
14716 Chain = Value.getValue(1);
14717 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14718 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14721 MachineMemOperand *MMO =
14722 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14723 MachineMemOperand::MOStore, MemSize, MemSize);
14725 if (Opc != X86ISD::WIN_FTOL) {
14726 // Build the FP_TO_INT*_IN_MEM
14727 SDValue Ops[] = { Chain, Value, StackSlot };
14728 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
14730 return std::make_pair(FIST, StackSlot);
14732 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL,
14733 DAG.getVTList(MVT::Other, MVT::Glue),
14735 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX,
14736 MVT::i32, ftol.getValue(1));
14737 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX,
14738 MVT::i32, eax.getValue(2));
14739 SDValue Ops[] = { eax, edx };
14740 SDValue pair = IsReplace
14741 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops)
14742 : DAG.getMergeValues(Ops, DL);
14743 return std::make_pair(pair, SDValue());
14747 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
14748 const X86Subtarget *Subtarget) {
14749 MVT VT = Op->getSimpleValueType(0);
14750 SDValue In = Op->getOperand(0);
14751 MVT InVT = In.getSimpleValueType();
14754 // Optimize vectors in AVX mode:
14757 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
14758 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
14759 // Concat upper and lower parts.
14762 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
14763 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
14764 // Concat upper and lower parts.
14767 if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) &&
14768 ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) &&
14769 ((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
14772 if (Subtarget->hasInt256())
14773 return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
14775 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
14776 SDValue Undef = DAG.getUNDEF(InVT);
14777 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
14778 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14779 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14781 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(),
14782 VT.getVectorNumElements()/2);
14784 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo);
14785 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi);
14787 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
14790 static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
14791 SelectionDAG &DAG) {
14792 MVT VT = Op->getSimpleValueType(0);
14793 SDValue In = Op->getOperand(0);
14794 MVT InVT = In.getSimpleValueType();
14796 unsigned int NumElts = VT.getVectorNumElements();
14797 if (NumElts != 8 && NumElts != 16)
14800 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
14801 return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
14803 EVT ExtVT = (NumElts == 8)? MVT::v8i64 : MVT::v16i32;
14804 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14805 // Now we have only mask extension
14806 assert(InVT.getVectorElementType() == MVT::i1);
14807 SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
14808 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14809 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
14810 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14811 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14812 MachinePointerInfo::getConstantPool(),
14813 false, false, false, Alignment);
14815 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, DL, ExtVT, In, Ld);
14816 if (VT.is512BitVector())
14818 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Brcst);
14821 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14822 SelectionDAG &DAG) {
14823 if (Subtarget->hasFp256()) {
14824 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14832 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14833 SelectionDAG &DAG) {
14835 MVT VT = Op.getSimpleValueType();
14836 SDValue In = Op.getOperand(0);
14837 MVT SVT = In.getSimpleValueType();
14839 if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
14840 return LowerZERO_EXTEND_AVX512(Op, DAG);
14842 if (Subtarget->hasFp256()) {
14843 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14848 assert(!VT.is256BitVector() || !SVT.is128BitVector() ||
14849 VT.getVectorNumElements() != SVT.getVectorNumElements());
14853 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
14855 MVT VT = Op.getSimpleValueType();
14856 SDValue In = Op.getOperand(0);
14857 MVT InVT = In.getSimpleValueType();
14859 if (VT == MVT::i1) {
14860 assert((InVT.isInteger() && (InVT.getSizeInBits() <= 64)) &&
14861 "Invalid scalar TRUNCATE operation");
14862 if (InVT.getSizeInBits() >= 32)
14864 In = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, In);
14865 return DAG.getNode(ISD::TRUNCATE, DL, VT, In);
14867 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
14868 "Invalid TRUNCATE operation");
14870 if (InVT.is512BitVector() || VT.getVectorElementType() == MVT::i1) {
14871 if (VT.getVectorElementType().getSizeInBits() >=8)
14872 return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
14874 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
14875 unsigned NumElts = InVT.getVectorNumElements();
14876 assert ((NumElts == 8 || NumElts == 16) && "Unexpected vector type");
14877 if (InVT.getSizeInBits() < 512) {
14878 MVT ExtVT = (NumElts == 16)? MVT::v16i32 : MVT::v8i64;
14879 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
14883 SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
14884 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14885 SDValue CP = DAG.getConstantPool(C, getPointerTy());
14886 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14887 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14888 MachinePointerInfo::getConstantPool(),
14889 false, false, false, Alignment);
14890 SDValue OneV = DAG.getNode(X86ISD::VBROADCAST, DL, InVT, Ld);
14891 SDValue And = DAG.getNode(ISD::AND, DL, InVT, OneV, In);
14892 return DAG.getNode(X86ISD::TESTM, DL, VT, And, And);
14895 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
14896 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
14897 if (Subtarget->hasInt256()) {
14898 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
14899 In = DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, In);
14900 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32),
14902 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
14903 DAG.getIntPtrConstant(0));
14906 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14907 DAG.getIntPtrConstant(0));
14908 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14909 DAG.getIntPtrConstant(2));
14910 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14911 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14912 static const int ShufMask[] = {0, 2, 4, 6};
14913 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
14916 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
14917 // On AVX2, v8i32 -> v8i16 becomed PSHUFB.
14918 if (Subtarget->hasInt256()) {
14919 In = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, In);
14921 SmallVector<SDValue,32> pshufbMask;
14922 for (unsigned i = 0; i < 2; ++i) {
14923 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8));
14924 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8));
14925 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8));
14926 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8));
14927 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8));
14928 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8));
14929 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8));
14930 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8));
14931 for (unsigned j = 0; j < 8; ++j)
14932 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
14934 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
14935 In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
14936 In = DAG.getNode(ISD::BITCAST, DL, MVT::v4i64, In);
14938 static const int ShufMask[] = {0, 2, -1, -1};
14939 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64),
14941 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14942 DAG.getIntPtrConstant(0));
14943 return DAG.getNode(ISD::BITCAST, DL, VT, In);
14946 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14947 DAG.getIntPtrConstant(0));
14949 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14950 DAG.getIntPtrConstant(4));
14952 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpLo);
14953 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpHi);
14955 // The PSHUFB mask:
14956 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
14957 -1, -1, -1, -1, -1, -1, -1, -1};
14959 SDValue Undef = DAG.getUNDEF(MVT::v16i8);
14960 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, Undef, ShufMask1);
14961 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, Undef, ShufMask1);
14963 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14964 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14966 // The MOVLHPS Mask:
14967 static const int ShufMask2[] = {0, 1, 4, 5};
14968 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
14969 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, res);
14972 // Handle truncation of V256 to V128 using shuffles.
14973 if (!VT.is128BitVector() || !InVT.is256BitVector())
14976 assert(Subtarget->hasFp256() && "256-bit vector without AVX!");
14978 unsigned NumElems = VT.getVectorNumElements();
14979 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
14981 SmallVector<int, 16> MaskVec(NumElems * 2, -1);
14982 // Prepare truncation shuffle mask
14983 for (unsigned i = 0; i != NumElems; ++i)
14984 MaskVec[i] = i * 2;
14985 SDValue V = DAG.getVectorShuffle(NVT, DL,
14986 DAG.getNode(ISD::BITCAST, DL, NVT, In),
14987 DAG.getUNDEF(NVT), &MaskVec[0]);
14988 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
14989 DAG.getIntPtrConstant(0));
14992 SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
14993 SelectionDAG &DAG) const {
14994 assert(!Op.getSimpleValueType().isVector());
14996 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14997 /*IsSigned=*/ true, /*IsReplace=*/ false);
14998 SDValue FIST = Vals.first, StackSlot = Vals.second;
14999 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
15000 if (!FIST.getNode()) return Op;
15002 if (StackSlot.getNode())
15003 // Load the result.
15004 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
15005 FIST, StackSlot, MachinePointerInfo(),
15006 false, false, false, 0);
15008 // The node is the result.
15012 SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
15013 SelectionDAG &DAG) const {
15014 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
15015 /*IsSigned=*/ false, /*IsReplace=*/ false);
15016 SDValue FIST = Vals.first, StackSlot = Vals.second;
15017 assert(FIST.getNode() && "Unexpected failure");
15019 if (StackSlot.getNode())
15020 // Load the result.
15021 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
15022 FIST, StackSlot, MachinePointerInfo(),
15023 false, false, false, 0);
15025 // The node is the result.
15029 static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
15031 MVT VT = Op.getSimpleValueType();
15032 SDValue In = Op.getOperand(0);
15033 MVT SVT = In.getSimpleValueType();
15035 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
15037 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
15038 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
15039 In, DAG.getUNDEF(SVT)));
15042 /// The only differences between FABS and FNEG are the mask and the logic op.
15043 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
15044 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
15045 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
15046 "Wrong opcode for lowering FABS or FNEG.");
15048 bool IsFABS = (Op.getOpcode() == ISD::FABS);
15050 // If this is a FABS and it has an FNEG user, bail out to fold the combination
15051 // into an FNABS. We'll lower the FABS after that if it is still in use.
15053 for (SDNode *User : Op->uses())
15054 if (User->getOpcode() == ISD::FNEG)
15057 SDValue Op0 = Op.getOperand(0);
15058 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
15061 MVT VT = Op.getSimpleValueType();
15062 // Assume scalar op for initialization; update for vector if needed.
15063 // Note that there are no scalar bitwise logical SSE/AVX instructions, so we
15064 // generate a 16-byte vector constant and logic op even for the scalar case.
15065 // Using a 16-byte mask allows folding the load of the mask with
15066 // the logic op, so it can save (~4 bytes) on code size.
15068 unsigned NumElts = VT == MVT::f64 ? 2 : 4;
15069 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
15070 // decide if we should generate a 16-byte constant mask when we only need 4 or
15071 // 8 bytes for the scalar case.
15072 if (VT.isVector()) {
15073 EltVT = VT.getVectorElementType();
15074 NumElts = VT.getVectorNumElements();
15077 unsigned EltBits = EltVT.getSizeInBits();
15078 LLVMContext *Context = DAG.getContext();
15079 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
15081 IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignBit(EltBits);
15082 Constant *C = ConstantInt::get(*Context, MaskElt);
15083 C = ConstantVector::getSplat(NumElts, C);
15084 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15085 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy());
15086 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
15087 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
15088 MachinePointerInfo::getConstantPool(),
15089 false, false, false, Alignment);
15091 if (VT.isVector()) {
15092 // For a vector, cast operands to a vector type, perform the logic op,
15093 // and cast the result back to the original value type.
15094 MVT VecVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
15095 SDValue MaskCasted = DAG.getNode(ISD::BITCAST, dl, VecVT, Mask);
15096 SDValue Operand = IsFNABS ?
15097 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0.getOperand(0)) :
15098 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0);
15099 unsigned BitOp = IsFABS ? ISD::AND : IsFNABS ? ISD::OR : ISD::XOR;
15100 return DAG.getNode(ISD::BITCAST, dl, VT,
15101 DAG.getNode(BitOp, dl, VecVT, Operand, MaskCasted));
15104 // If not vector, then scalar.
15105 unsigned BitOp = IsFABS ? X86ISD::FAND : IsFNABS ? X86ISD::FOR : X86ISD::FXOR;
15106 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
15107 return DAG.getNode(BitOp, dl, VT, Operand, Mask);
15110 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
15111 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15112 LLVMContext *Context = DAG.getContext();
15113 SDValue Op0 = Op.getOperand(0);
15114 SDValue Op1 = Op.getOperand(1);
15116 MVT VT = Op.getSimpleValueType();
15117 MVT SrcVT = Op1.getSimpleValueType();
15119 // If second operand is smaller, extend it first.
15120 if (SrcVT.bitsLT(VT)) {
15121 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1);
15124 // And if it is bigger, shrink it first.
15125 if (SrcVT.bitsGT(VT)) {
15126 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1));
15130 // At this point the operands and the result should have the same
15131 // type, and that won't be f80 since that is not custom lowered.
15133 const fltSemantics &Sem =
15134 VT == MVT::f64 ? APFloat::IEEEdouble : APFloat::IEEEsingle;
15135 const unsigned SizeInBits = VT.getSizeInBits();
15137 SmallVector<Constant *, 4> CV(
15138 VT == MVT::f64 ? 2 : 4,
15139 ConstantFP::get(*Context, APFloat(Sem, APInt(SizeInBits, 0))));
15141 // First, clear all bits but the sign bit from the second operand (sign).
15142 CV[0] = ConstantFP::get(*Context,
15143 APFloat(Sem, APInt::getHighBitsSet(SizeInBits, 1)));
15144 Constant *C = ConstantVector::get(CV);
15145 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
15146 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
15147 MachinePointerInfo::getConstantPool(),
15148 false, false, false, 16);
15149 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1);
15151 // Next, clear the sign bit from the first operand (magnitude).
15152 // If it's a constant, we can clear it here.
15153 if (ConstantFPSDNode *Op0CN = dyn_cast<ConstantFPSDNode>(Op0)) {
15154 APFloat APF = Op0CN->getValueAPF();
15155 // If the magnitude is a positive zero, the sign bit alone is enough.
15156 if (APF.isPosZero())
15159 CV[0] = ConstantFP::get(*Context, APF);
15161 CV[0] = ConstantFP::get(
15163 APFloat(Sem, APInt::getLowBitsSet(SizeInBits, SizeInBits - 1)));
15165 C = ConstantVector::get(CV);
15166 CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
15167 SDValue Val = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
15168 MachinePointerInfo::getConstantPool(),
15169 false, false, false, 16);
15170 // If the magnitude operand wasn't a constant, we need to AND out the sign.
15171 if (!isa<ConstantFPSDNode>(Op0))
15172 Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Val);
15174 // OR the magnitude value with the sign bit.
15175 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit);
15178 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
15179 SDValue N0 = Op.getOperand(0);
15181 MVT VT = Op.getSimpleValueType();
15183 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
15184 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
15185 DAG.getConstant(1, VT));
15186 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT));
15189 // Check whether an OR'd tree is PTEST-able.
15190 static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
15191 SelectionDAG &DAG) {
15192 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
15194 if (!Subtarget->hasSSE41())
15197 if (!Op->hasOneUse())
15200 SDNode *N = Op.getNode();
15203 SmallVector<SDValue, 8> Opnds;
15204 DenseMap<SDValue, unsigned> VecInMap;
15205 SmallVector<SDValue, 8> VecIns;
15206 EVT VT = MVT::Other;
15208 // Recognize a special case where a vector is casted into wide integer to
15210 Opnds.push_back(N->getOperand(0));
15211 Opnds.push_back(N->getOperand(1));
15213 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
15214 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
15215 // BFS traverse all OR'd operands.
15216 if (I->getOpcode() == ISD::OR) {
15217 Opnds.push_back(I->getOperand(0));
15218 Opnds.push_back(I->getOperand(1));
15219 // Re-evaluate the number of nodes to be traversed.
15220 e += 2; // 2 more nodes (LHS and RHS) are pushed.
15224 // Quit if a non-EXTRACT_VECTOR_ELT
15225 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
15228 // Quit if without a constant index.
15229 SDValue Idx = I->getOperand(1);
15230 if (!isa<ConstantSDNode>(Idx))
15233 SDValue ExtractedFromVec = I->getOperand(0);
15234 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
15235 if (M == VecInMap.end()) {
15236 VT = ExtractedFromVec.getValueType();
15237 // Quit if not 128/256-bit vector.
15238 if (!VT.is128BitVector() && !VT.is256BitVector())
15240 // Quit if not the same type.
15241 if (VecInMap.begin() != VecInMap.end() &&
15242 VT != VecInMap.begin()->first.getValueType())
15244 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
15245 VecIns.push_back(ExtractedFromVec);
15247 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
15250 assert((VT.is128BitVector() || VT.is256BitVector()) &&
15251 "Not extracted from 128-/256-bit vector.");
15253 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
15255 for (DenseMap<SDValue, unsigned>::const_iterator
15256 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
15257 // Quit if not all elements are used.
15258 if (I->second != FullMask)
15262 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
15264 // Cast all vectors into TestVT for PTEST.
15265 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
15266 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]);
15268 // If more than one full vectors are evaluated, OR them first before PTEST.
15269 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
15270 // Each iteration will OR 2 nodes and append the result until there is only
15271 // 1 node left, i.e. the final OR'd value of all vectors.
15272 SDValue LHS = VecIns[Slot];
15273 SDValue RHS = VecIns[Slot + 1];
15274 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
15277 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
15278 VecIns.back(), VecIns.back());
15281 /// \brief return true if \c Op has a use that doesn't just read flags.
15282 static bool hasNonFlagsUse(SDValue Op) {
15283 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
15285 SDNode *User = *UI;
15286 unsigned UOpNo = UI.getOperandNo();
15287 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
15288 // Look pass truncate.
15289 UOpNo = User->use_begin().getOperandNo();
15290 User = *User->use_begin();
15293 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
15294 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
15300 /// Emit nodes that will be selected as "test Op0,Op0", or something
15302 SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
15303 SelectionDAG &DAG) const {
15304 if (Op.getValueType() == MVT::i1) {
15305 SDValue ExtOp = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Op);
15306 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, ExtOp,
15307 DAG.getConstant(0, MVT::i8));
15309 // CF and OF aren't always set the way we want. Determine which
15310 // of these we need.
15311 bool NeedCF = false;
15312 bool NeedOF = false;
15315 case X86::COND_A: case X86::COND_AE:
15316 case X86::COND_B: case X86::COND_BE:
15319 case X86::COND_G: case X86::COND_GE:
15320 case X86::COND_L: case X86::COND_LE:
15321 case X86::COND_O: case X86::COND_NO: {
15322 // Check if we really need to set the
15323 // Overflow flag. If NoSignedWrap is present
15324 // that is not actually needed.
15325 switch (Op->getOpcode()) {
15330 const BinaryWithFlagsSDNode *BinNode =
15331 cast<BinaryWithFlagsSDNode>(Op.getNode());
15332 if (BinNode->hasNoSignedWrap())
15342 // See if we can use the EFLAGS value from the operand instead of
15343 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
15344 // we prove that the arithmetic won't overflow, we can't use OF or CF.
15345 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
15346 // Emit a CMP with 0, which is the TEST pattern.
15347 //if (Op.getValueType() == MVT::i1)
15348 // return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op,
15349 // DAG.getConstant(0, MVT::i1));
15350 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15351 DAG.getConstant(0, Op.getValueType()));
15353 unsigned Opcode = 0;
15354 unsigned NumOperands = 0;
15356 // Truncate operations may prevent the merge of the SETCC instruction
15357 // and the arithmetic instruction before it. Attempt to truncate the operands
15358 // of the arithmetic instruction and use a reduced bit-width instruction.
15359 bool NeedTruncation = false;
15360 SDValue ArithOp = Op;
15361 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
15362 SDValue Arith = Op->getOperand(0);
15363 // Both the trunc and the arithmetic op need to have one user each.
15364 if (Arith->hasOneUse())
15365 switch (Arith.getOpcode()) {
15372 NeedTruncation = true;
15378 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
15379 // which may be the result of a CAST. We use the variable 'Op', which is the
15380 // non-casted variable when we check for possible users.
15381 switch (ArithOp.getOpcode()) {
15383 // Due to an isel shortcoming, be conservative if this add is likely to be
15384 // selected as part of a load-modify-store instruction. When the root node
15385 // in a match is a store, isel doesn't know how to remap non-chain non-flag
15386 // uses of other nodes in the match, such as the ADD in this case. This
15387 // leads to the ADD being left around and reselected, with the result being
15388 // two adds in the output. Alas, even if none our users are stores, that
15389 // doesn't prove we're O.K. Ergo, if we have any parents that aren't
15390 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
15391 // climbing the DAG back to the root, and it doesn't seem to be worth the
15393 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15394 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15395 if (UI->getOpcode() != ISD::CopyToReg &&
15396 UI->getOpcode() != ISD::SETCC &&
15397 UI->getOpcode() != ISD::STORE)
15400 if (ConstantSDNode *C =
15401 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
15402 // An add of one will be selected as an INC.
15403 if (C->getAPIntValue() == 1 && !Subtarget->slowIncDec()) {
15404 Opcode = X86ISD::INC;
15409 // An add of negative one (subtract of one) will be selected as a DEC.
15410 if (C->getAPIntValue().isAllOnesValue() && !Subtarget->slowIncDec()) {
15411 Opcode = X86ISD::DEC;
15417 // Otherwise use a regular EFLAGS-setting add.
15418 Opcode = X86ISD::ADD;
15423 // If we have a constant logical shift that's only used in a comparison
15424 // against zero turn it into an equivalent AND. This allows turning it into
15425 // a TEST instruction later.
15426 if ((X86CC == X86::COND_E || X86CC == X86::COND_NE) && Op->hasOneUse() &&
15427 isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
15428 EVT VT = Op.getValueType();
15429 unsigned BitWidth = VT.getSizeInBits();
15430 unsigned ShAmt = Op->getConstantOperandVal(1);
15431 if (ShAmt >= BitWidth) // Avoid undefined shifts.
15433 APInt Mask = ArithOp.getOpcode() == ISD::SRL
15434 ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
15435 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
15436 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
15438 SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
15439 DAG.getConstant(Mask, VT));
15440 DAG.ReplaceAllUsesWith(Op, New);
15446 // If the primary and result isn't used, don't bother using X86ISD::AND,
15447 // because a TEST instruction will be better.
15448 if (!hasNonFlagsUse(Op))
15454 // Due to the ISEL shortcoming noted above, be conservative if this op is
15455 // likely to be selected as part of a load-modify-store instruction.
15456 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15457 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15458 if (UI->getOpcode() == ISD::STORE)
15461 // Otherwise use a regular EFLAGS-setting instruction.
15462 switch (ArithOp.getOpcode()) {
15463 default: llvm_unreachable("unexpected operator!");
15464 case ISD::SUB: Opcode = X86ISD::SUB; break;
15465 case ISD::XOR: Opcode = X86ISD::XOR; break;
15466 case ISD::AND: Opcode = X86ISD::AND; break;
15468 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
15469 SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG);
15470 if (EFLAGS.getNode())
15473 Opcode = X86ISD::OR;
15487 return SDValue(Op.getNode(), 1);
15493 // If we found that truncation is beneficial, perform the truncation and
15495 if (NeedTruncation) {
15496 EVT VT = Op.getValueType();
15497 SDValue WideVal = Op->getOperand(0);
15498 EVT WideVT = WideVal.getValueType();
15499 unsigned ConvertedOp = 0;
15500 // Use a target machine opcode to prevent further DAGCombine
15501 // optimizations that may separate the arithmetic operations
15502 // from the setcc node.
15503 switch (WideVal.getOpcode()) {
15505 case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
15506 case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
15507 case ISD::AND: ConvertedOp = X86ISD::AND; break;
15508 case ISD::OR: ConvertedOp = X86ISD::OR; break;
15509 case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
15513 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15514 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
15515 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
15516 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
15517 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1);
15523 // Emit a CMP with 0, which is the TEST pattern.
15524 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15525 DAG.getConstant(0, Op.getValueType()));
15527 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
15528 SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
15530 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
15531 DAG.ReplaceAllUsesWith(Op, New);
15532 return SDValue(New.getNode(), 1);
15535 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
15537 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
15538 SDLoc dl, SelectionDAG &DAG) const {
15539 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) {
15540 if (C->getAPIntValue() == 0)
15541 return EmitTest(Op0, X86CC, dl, DAG);
15543 if (Op0.getValueType() == MVT::i1)
15544 llvm_unreachable("Unexpected comparison operation for MVT::i1 operands");
15547 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
15548 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
15549 // Do the comparison at i32 if it's smaller, besides the Atom case.
15550 // This avoids subregister aliasing issues. Keep the smaller reference
15551 // if we're optimizing for size, however, as that'll allow better folding
15552 // of memory operations.
15553 if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
15554 !DAG.getMachineFunction().getFunction()->hasFnAttribute(
15555 Attribute::MinSize) &&
15556 !Subtarget->isAtom()) {
15557 unsigned ExtendOp =
15558 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
15559 Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
15560 Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
15562 // Use SUB instead of CMP to enable CSE between SUB and CMP.
15563 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
15564 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs,
15566 return SDValue(Sub.getNode(), 1);
15568 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
15571 /// Convert a comparison if required by the subtarget.
15572 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
15573 SelectionDAG &DAG) const {
15574 // If the subtarget does not support the FUCOMI instruction, floating-point
15575 // comparisons have to be converted.
15576 if (Subtarget->hasCMov() ||
15577 Cmp.getOpcode() != X86ISD::CMP ||
15578 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
15579 !Cmp.getOperand(1).getValueType().isFloatingPoint())
15582 // The instruction selector will select an FUCOM instruction instead of
15583 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
15584 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
15585 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
15587 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
15588 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
15589 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
15590 DAG.getConstant(8, MVT::i8));
15591 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
15592 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
15595 /// The minimum architected relative accuracy is 2^-12. We need one
15596 /// Newton-Raphson step to have a good float result (24 bits of precision).
15597 SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
15598 DAGCombinerInfo &DCI,
15599 unsigned &RefinementSteps,
15600 bool &UseOneConstNR) const {
15601 // FIXME: We should use instruction latency models to calculate the cost of
15602 // each potential sequence, but this is very hard to do reliably because
15603 // at least Intel's Core* chips have variable timing based on the number of
15604 // significant digits in the divisor and/or sqrt operand.
15605 if (!Subtarget->useSqrtEst())
15608 EVT VT = Op.getValueType();
15610 // SSE1 has rsqrtss and rsqrtps.
15611 // TODO: Add support for AVX512 (v16f32).
15612 // It is likely not profitable to do this for f64 because a double-precision
15613 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
15614 // instructions: convert to single, rsqrtss, convert back to double, refine
15615 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
15616 // along with FMA, this could be a throughput win.
15617 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15618 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15619 RefinementSteps = 1;
15620 UseOneConstNR = false;
15621 return DCI.DAG.getNode(X86ISD::FRSQRT, SDLoc(Op), VT, Op);
15626 /// The minimum architected relative accuracy is 2^-12. We need one
15627 /// Newton-Raphson step to have a good float result (24 bits of precision).
15628 SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
15629 DAGCombinerInfo &DCI,
15630 unsigned &RefinementSteps) const {
15631 // FIXME: We should use instruction latency models to calculate the cost of
15632 // each potential sequence, but this is very hard to do reliably because
15633 // at least Intel's Core* chips have variable timing based on the number of
15634 // significant digits in the divisor.
15635 if (!Subtarget->useReciprocalEst())
15638 EVT VT = Op.getValueType();
15640 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
15641 // TODO: Add support for AVX512 (v16f32).
15642 // It is likely not profitable to do this for f64 because a double-precision
15643 // reciprocal estimate with refinement on x86 prior to FMA requires
15644 // 15 instructions: convert to single, rcpss, convert back to double, refine
15645 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
15646 // along with FMA, this could be a throughput win.
15647 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15648 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15649 RefinementSteps = ReciprocalEstimateRefinementSteps;
15650 return DCI.DAG.getNode(X86ISD::FRCP, SDLoc(Op), VT, Op);
15655 static bool isAllOnes(SDValue V) {
15656 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
15657 return C && C->isAllOnesValue();
15660 /// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
15661 /// if it's possible.
15662 SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
15663 SDLoc dl, SelectionDAG &DAG) const {
15664 SDValue Op0 = And.getOperand(0);
15665 SDValue Op1 = And.getOperand(1);
15666 if (Op0.getOpcode() == ISD::TRUNCATE)
15667 Op0 = Op0.getOperand(0);
15668 if (Op1.getOpcode() == ISD::TRUNCATE)
15669 Op1 = Op1.getOperand(0);
15672 if (Op1.getOpcode() == ISD::SHL)
15673 std::swap(Op0, Op1);
15674 if (Op0.getOpcode() == ISD::SHL) {
15675 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0)))
15676 if (And00C->getZExtValue() == 1) {
15677 // If we looked past a truncate, check that it's only truncating away
15679 unsigned BitWidth = Op0.getValueSizeInBits();
15680 unsigned AndBitWidth = And.getValueSizeInBits();
15681 if (BitWidth > AndBitWidth) {
15683 DAG.computeKnownBits(Op0, Zeros, Ones);
15684 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
15688 RHS = Op0.getOperand(1);
15690 } else if (Op1.getOpcode() == ISD::Constant) {
15691 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
15692 uint64_t AndRHSVal = AndRHS->getZExtValue();
15693 SDValue AndLHS = Op0;
15695 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
15696 LHS = AndLHS.getOperand(0);
15697 RHS = AndLHS.getOperand(1);
15700 // Use BT if the immediate can't be encoded in a TEST instruction.
15701 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
15703 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType());
15707 if (LHS.getNode()) {
15708 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT
15709 // instruction. Since the shift amount is in-range-or-undefined, we know
15710 // that doing a bittest on the i32 value is ok. We extend to i32 because
15711 // the encoding for the i16 version is larger than the i32 version.
15712 // Also promote i16 to i32 for performance / code size reason.
15713 if (LHS.getValueType() == MVT::i8 ||
15714 LHS.getValueType() == MVT::i16)
15715 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
15717 // If the operand types disagree, extend the shift amount to match. Since
15718 // BT ignores high bits (like shifts) we can use anyextend.
15719 if (LHS.getValueType() != RHS.getValueType())
15720 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS);
15722 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
15723 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
15724 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15725 DAG.getConstant(Cond, MVT::i8), BT);
15731 /// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point
15733 static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
15738 // SSE Condition code mapping:
15747 switch (SetCCOpcode) {
15748 default: llvm_unreachable("Unexpected SETCC condition");
15750 case ISD::SETEQ: SSECC = 0; break;
15752 case ISD::SETGT: Swap = true; // Fallthrough
15754 case ISD::SETOLT: SSECC = 1; break;
15756 case ISD::SETGE: Swap = true; // Fallthrough
15758 case ISD::SETOLE: SSECC = 2; break;
15759 case ISD::SETUO: SSECC = 3; break;
15761 case ISD::SETNE: SSECC = 4; break;
15762 case ISD::SETULE: Swap = true; // Fallthrough
15763 case ISD::SETUGE: SSECC = 5; break;
15764 case ISD::SETULT: Swap = true; // Fallthrough
15765 case ISD::SETUGT: SSECC = 6; break;
15766 case ISD::SETO: SSECC = 7; break;
15768 case ISD::SETONE: SSECC = 8; break;
15771 std::swap(Op0, Op1);
15776 // Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128
15777 // ones, and then concatenate the result back.
15778 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
15779 MVT VT = Op.getSimpleValueType();
15781 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
15782 "Unsupported value type for operation");
15784 unsigned NumElems = VT.getVectorNumElements();
15786 SDValue CC = Op.getOperand(2);
15788 // Extract the LHS vectors
15789 SDValue LHS = Op.getOperand(0);
15790 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
15791 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
15793 // Extract the RHS vectors
15794 SDValue RHS = Op.getOperand(1);
15795 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
15796 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
15798 // Issue the operation on the smaller types and concatenate the result back
15799 MVT EltVT = VT.getVectorElementType();
15800 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
15801 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
15802 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
15803 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
15806 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
15807 const X86Subtarget *Subtarget) {
15808 SDValue Op0 = Op.getOperand(0);
15809 SDValue Op1 = Op.getOperand(1);
15810 SDValue CC = Op.getOperand(2);
15811 MVT VT = Op.getSimpleValueType();
15814 assert(Op0.getValueType().getVectorElementType().getSizeInBits() >= 8 &&
15815 Op.getValueType().getScalarType() == MVT::i1 &&
15816 "Cannot set masked compare for this operation");
15818 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15820 bool Unsigned = false;
15823 switch (SetCCOpcode) {
15824 default: llvm_unreachable("Unexpected SETCC condition");
15825 case ISD::SETNE: SSECC = 4; break;
15826 case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
15827 case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
15828 case ISD::SETLT: Swap = true; //fall-through
15829 case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
15830 case ISD::SETULT: SSECC = 1; Unsigned = true; break;
15831 case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
15832 case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
15833 case ISD::SETULE: Unsigned = true; //fall-through
15834 case ISD::SETLE: SSECC = 2; break;
15838 std::swap(Op0, Op1);
15840 return DAG.getNode(Opc, dl, VT, Op0, Op1);
15841 Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
15842 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15843 DAG.getConstant(SSECC, MVT::i8));
15846 /// \brief Try to turn a VSETULT into a VSETULE by modifying its second
15847 /// operand \p Op1. If non-trivial (for example because it's not constant)
15848 /// return an empty value.
15849 static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
15851 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
15855 MVT VT = Op1.getSimpleValueType();
15856 MVT EVT = VT.getVectorElementType();
15857 unsigned n = VT.getVectorNumElements();
15858 SmallVector<SDValue, 8> ULTOp1;
15860 for (unsigned i = 0; i < n; ++i) {
15861 ConstantSDNode *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
15862 if (!Elt || Elt->isOpaque() || Elt->getValueType(0) != EVT)
15865 // Avoid underflow.
15866 APInt Val = Elt->getAPIntValue();
15870 ULTOp1.push_back(DAG.getConstant(Val - 1, EVT));
15873 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
15876 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
15877 SelectionDAG &DAG) {
15878 SDValue Op0 = Op.getOperand(0);
15879 SDValue Op1 = Op.getOperand(1);
15880 SDValue CC = Op.getOperand(2);
15881 MVT VT = Op.getSimpleValueType();
15882 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15883 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
15888 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
15889 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
15892 unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
15893 unsigned Opc = X86ISD::CMPP;
15894 if (Subtarget->hasAVX512() && VT.getVectorElementType() == MVT::i1) {
15895 assert(VT.getVectorNumElements() <= 16);
15896 Opc = X86ISD::CMPM;
15898 // In the two special cases we can't handle, emit two comparisons.
15901 unsigned CombineOpc;
15902 if (SetCCOpcode == ISD::SETUEQ) {
15903 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR;
15905 assert(SetCCOpcode == ISD::SETONE);
15906 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND;
15909 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15910 DAG.getConstant(CC0, MVT::i8));
15911 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15912 DAG.getConstant(CC1, MVT::i8));
15913 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
15915 // Handle all other FP comparisons here.
15916 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15917 DAG.getConstant(SSECC, MVT::i8));
15920 // Break 256-bit integer vector compare into smaller ones.
15921 if (VT.is256BitVector() && !Subtarget->hasInt256())
15922 return Lower256IntVSETCC(Op, DAG);
15924 bool MaskResult = (VT.getVectorElementType() == MVT::i1);
15925 EVT OpVT = Op1.getValueType();
15926 if (Subtarget->hasAVX512()) {
15927 if (Op1.getValueType().is512BitVector() ||
15928 (Subtarget->hasBWI() && Subtarget->hasVLX()) ||
15929 (MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32))
15930 return LowerIntVSETCC_AVX512(Op, DAG, Subtarget);
15932 // In AVX-512 architecture setcc returns mask with i1 elements,
15933 // But there is no compare instruction for i8 and i16 elements in KNL.
15934 // We are not talking about 512-bit operands in this case, these
15935 // types are illegal.
15937 (OpVT.getVectorElementType().getSizeInBits() < 32 &&
15938 OpVT.getVectorElementType().getSizeInBits() >= 8))
15939 return DAG.getNode(ISD::TRUNCATE, dl, VT,
15940 DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
15943 // We are handling one of the integer comparisons here. Since SSE only has
15944 // GT and EQ comparisons for integer, swapping operands and multiple
15945 // operations may be required for some comparisons.
15947 bool Swap = false, Invert = false, FlipSigns = false, MinMax = false;
15948 bool Subus = false;
15950 switch (SetCCOpcode) {
15951 default: llvm_unreachable("Unexpected SETCC condition");
15952 case ISD::SETNE: Invert = true;
15953 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
15954 case ISD::SETLT: Swap = true;
15955 case ISD::SETGT: Opc = X86ISD::PCMPGT; break;
15956 case ISD::SETGE: Swap = true;
15957 case ISD::SETLE: Opc = X86ISD::PCMPGT;
15958 Invert = true; break;
15959 case ISD::SETULT: Swap = true;
15960 case ISD::SETUGT: Opc = X86ISD::PCMPGT;
15961 FlipSigns = true; break;
15962 case ISD::SETUGE: Swap = true;
15963 case ISD::SETULE: Opc = X86ISD::PCMPGT;
15964 FlipSigns = true; Invert = true; break;
15967 // Special case: Use min/max operations for SETULE/SETUGE
15968 MVT VET = VT.getVectorElementType();
15970 (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
15971 || (Subtarget->hasSSE2() && (VET == MVT::i8));
15974 switch (SetCCOpcode) {
15976 case ISD::SETULE: Opc = X86ISD::UMIN; MinMax = true; break;
15977 case ISD::SETUGE: Opc = X86ISD::UMAX; MinMax = true; break;
15980 if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
15983 bool hasSubus = Subtarget->hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
15984 if (!MinMax && hasSubus) {
15985 // As another special case, use PSUBUS[BW] when it's profitable. E.g. for
15987 // t = psubus Op0, Op1
15988 // pcmpeq t, <0..0>
15989 switch (SetCCOpcode) {
15991 case ISD::SETULT: {
15992 // If the comparison is against a constant we can turn this into a
15993 // setule. With psubus, setule does not require a swap. This is
15994 // beneficial because the constant in the register is no longer
15995 // destructed as the destination so it can be hoisted out of a loop.
15996 // Only do this pre-AVX since vpcmp* is no longer destructive.
15997 if (Subtarget->hasAVX())
15999 SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
16000 if (ULEOp1.getNode()) {
16002 Subus = true; Invert = false; Swap = false;
16006 // Psubus is better than flip-sign because it requires no inversion.
16007 case ISD::SETUGE: Subus = true; Invert = false; Swap = true; break;
16008 case ISD::SETULE: Subus = true; Invert = false; Swap = false; break;
16012 Opc = X86ISD::SUBUS;
16018 std::swap(Op0, Op1);
16020 // Check that the operation in question is available (most are plain SSE2,
16021 // but PCMPGTQ and PCMPEQQ have different requirements).
16022 if (VT == MVT::v2i64) {
16023 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) {
16024 assert(Subtarget->hasSSE2() && "Don't know how to lower!");
16026 // First cast everything to the right type.
16027 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
16028 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
16030 // Since SSE has no unsigned integer comparisons, we need to flip the sign
16031 // bits of the inputs before performing those operations. The lower
16032 // compare is always unsigned.
16035 SB = DAG.getConstant(0x80000000U, MVT::v4i32);
16037 SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32);
16038 SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32);
16039 SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
16040 Sign, Zero, Sign, Zero);
16042 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
16043 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
16045 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
16046 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
16047 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
16049 // Create masks for only the low parts/high parts of the 64 bit integers.
16050 static const int MaskHi[] = { 1, 1, 3, 3 };
16051 static const int MaskLo[] = { 0, 0, 2, 2 };
16052 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
16053 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
16054 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
16056 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
16057 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
16060 Result = DAG.getNOT(dl, Result, MVT::v4i32);
16062 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
16065 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) {
16066 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
16067 // pcmpeqd + pshufd + pand.
16068 assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!");
16070 // First cast everything to the right type.
16071 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
16072 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
16075 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
16077 // Make sure the lower and upper halves are both all-ones.
16078 static const int Mask[] = { 1, 0, 3, 2 };
16079 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
16080 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
16083 Result = DAG.getNOT(dl, Result, MVT::v4i32);
16085 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
16089 // Since SSE has no unsigned integer comparisons, we need to flip the sign
16090 // bits of the inputs before performing those operations.
16092 EVT EltVT = VT.getVectorElementType();
16093 SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT);
16094 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB);
16095 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB);
16098 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
16100 // If the logical-not of the result is required, perform that now.
16102 Result = DAG.getNOT(dl, Result, VT);
16105 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
16108 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
16109 getZeroVector(VT, Subtarget, DAG, dl));
16114 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
16116 MVT VT = Op.getSimpleValueType();
16118 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
16120 assert(((!Subtarget->hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
16121 && "SetCC type must be 8-bit or 1-bit integer");
16122 SDValue Op0 = Op.getOperand(0);
16123 SDValue Op1 = Op.getOperand(1);
16125 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
16127 // Optimize to BT if possible.
16128 // Lower (X & (1 << N)) == 0 to BT(X, N).
16129 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
16130 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
16131 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() &&
16132 Op1.getOpcode() == ISD::Constant &&
16133 cast<ConstantSDNode>(Op1)->isNullValue() &&
16134 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16135 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG);
16136 if (NewSetCC.getNode()) {
16138 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
16143 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
16145 if (Op1.getOpcode() == ISD::Constant &&
16146 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
16147 cast<ConstantSDNode>(Op1)->isNullValue()) &&
16148 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16150 // If the input is a setcc, then reuse the input setcc or use a new one with
16151 // the inverted condition.
16152 if (Op0.getOpcode() == X86ISD::SETCC) {
16153 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
16154 bool Invert = (CC == ISD::SETNE) ^
16155 cast<ConstantSDNode>(Op1)->isNullValue();
16159 CCode = X86::GetOppositeBranchCondition(CCode);
16160 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16161 DAG.getConstant(CCode, MVT::i8),
16162 Op0.getOperand(1));
16164 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
16168 if ((Op0.getValueType() == MVT::i1) && (Op1.getOpcode() == ISD::Constant) &&
16169 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1) &&
16170 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16172 ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
16173 return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, MVT::i1), NewCC);
16176 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
16177 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
16178 if (X86CC == X86::COND_INVALID)
16181 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
16182 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
16183 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16184 DAG.getConstant(X86CC, MVT::i8), EFLAGS);
16186 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
16190 // isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
16191 static bool isX86LogicalCmp(SDValue Op) {
16192 unsigned Opc = Op.getNode()->getOpcode();
16193 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
16194 Opc == X86ISD::SAHF)
16196 if (Op.getResNo() == 1 &&
16197 (Opc == X86ISD::ADD ||
16198 Opc == X86ISD::SUB ||
16199 Opc == X86ISD::ADC ||
16200 Opc == X86ISD::SBB ||
16201 Opc == X86ISD::SMUL ||
16202 Opc == X86ISD::UMUL ||
16203 Opc == X86ISD::INC ||
16204 Opc == X86ISD::DEC ||
16205 Opc == X86ISD::OR ||
16206 Opc == X86ISD::XOR ||
16207 Opc == X86ISD::AND))
16210 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
16216 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
16217 if (V.getOpcode() != ISD::TRUNCATE)
16220 SDValue VOp0 = V.getOperand(0);
16221 unsigned InBits = VOp0.getValueSizeInBits();
16222 unsigned Bits = V.getValueSizeInBits();
16223 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
16226 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
16227 bool addTest = true;
16228 SDValue Cond = Op.getOperand(0);
16229 SDValue Op1 = Op.getOperand(1);
16230 SDValue Op2 = Op.getOperand(2);
16232 EVT VT = Op1.getValueType();
16235 // Lower fp selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
16236 // are available. Otherwise fp cmovs get lowered into a less efficient branch
16237 // sequence later on.
16238 if (Cond.getOpcode() == ISD::SETCC &&
16239 ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
16240 (Subtarget->hasSSE1() && VT == MVT::f32)) &&
16241 VT == Cond.getOperand(0).getValueType() && Cond->hasOneUse()) {
16242 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
16243 int SSECC = translateX86FSETCC(
16244 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
16247 if (Subtarget->hasAVX512()) {
16248 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
16249 DAG.getConstant(SSECC, MVT::i8));
16250 return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
16252 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
16253 DAG.getConstant(SSECC, MVT::i8));
16254 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
16255 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
16256 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
16260 if (Cond.getOpcode() == ISD::SETCC) {
16261 SDValue NewCond = LowerSETCC(Cond, DAG);
16262 if (NewCond.getNode())
16266 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
16267 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
16268 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
16269 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
16270 if (Cond.getOpcode() == X86ISD::SETCC &&
16271 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
16272 isZero(Cond.getOperand(1).getOperand(1))) {
16273 SDValue Cmp = Cond.getOperand(1);
16275 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
16277 if ((isAllOnes(Op1) || isAllOnes(Op2)) &&
16278 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
16279 SDValue Y = isAllOnes(Op2) ? Op1 : Op2;
16281 SDValue CmpOp0 = Cmp.getOperand(0);
16282 // Apply further optimizations for special cases
16283 // (select (x != 0), -1, 0) -> neg & sbb
16284 // (select (x == 0), 0, -1) -> neg & sbb
16285 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y))
16286 if (YC->isNullValue() &&
16287 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) {
16288 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
16289 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs,
16290 DAG.getConstant(0, CmpOp0.getValueType()),
16292 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16293 DAG.getConstant(X86::COND_B, MVT::i8),
16294 SDValue(Neg.getNode(), 1));
16298 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
16299 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
16300 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16302 SDValue Res = // Res = 0 or -1.
16303 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16304 DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
16306 if (isAllOnes(Op1) != (CondCode == X86::COND_E))
16307 Res = DAG.getNOT(DL, Res, Res.getValueType());
16309 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2);
16310 if (!N2C || !N2C->isNullValue())
16311 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
16316 // Look past (and (setcc_carry (cmp ...)), 1).
16317 if (Cond.getOpcode() == ISD::AND &&
16318 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16319 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16320 if (C && C->getAPIntValue() == 1)
16321 Cond = Cond.getOperand(0);
16324 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16325 // setting operand in place of the X86ISD::SETCC.
16326 unsigned CondOpcode = Cond.getOpcode();
16327 if (CondOpcode == X86ISD::SETCC ||
16328 CondOpcode == X86ISD::SETCC_CARRY) {
16329 CC = Cond.getOperand(0);
16331 SDValue Cmp = Cond.getOperand(1);
16332 unsigned Opc = Cmp.getOpcode();
16333 MVT VT = Op.getSimpleValueType();
16335 bool IllegalFPCMov = false;
16336 if (VT.isFloatingPoint() && !VT.isVector() &&
16337 !isScalarFPTypeInSSEReg(VT)) // FPStack?
16338 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
16340 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
16341 Opc == X86ISD::BT) { // FIXME
16345 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16346 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16347 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16348 Cond.getOperand(0).getValueType() != MVT::i8)) {
16349 SDValue LHS = Cond.getOperand(0);
16350 SDValue RHS = Cond.getOperand(1);
16351 unsigned X86Opcode;
16354 switch (CondOpcode) {
16355 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16356 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16357 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16358 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16359 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16360 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16361 default: llvm_unreachable("unexpected overflowing operator");
16363 if (CondOpcode == ISD::UMULO)
16364 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16367 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16369 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
16371 if (CondOpcode == ISD::UMULO)
16372 Cond = X86Op.getValue(2);
16374 Cond = X86Op.getValue(1);
16376 CC = DAG.getConstant(X86Cond, MVT::i8);
16381 // Look pass the truncate if the high bits are known zero.
16382 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16383 Cond = Cond.getOperand(0);
16385 // We know the result of AND is compared against zero. Try to match
16387 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16388 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG);
16389 if (NewSetCC.getNode()) {
16390 CC = NewSetCC.getOperand(0);
16391 Cond = NewSetCC.getOperand(1);
16398 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16399 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
16402 // a < b ? -1 : 0 -> RES = ~setcc_carry
16403 // a < b ? 0 : -1 -> RES = setcc_carry
16404 // a >= b ? -1 : 0 -> RES = setcc_carry
16405 // a >= b ? 0 : -1 -> RES = ~setcc_carry
16406 if (Cond.getOpcode() == X86ISD::SUB) {
16407 Cond = ConvertCmpIfNecessary(Cond, DAG);
16408 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
16410 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
16411 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) {
16412 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16413 DAG.getConstant(X86::COND_B, MVT::i8), Cond);
16414 if (isAllOnes(Op1) != (CondCode == X86::COND_B))
16415 return DAG.getNOT(DL, Res, Res.getValueType());
16420 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
16421 // widen the cmov and push the truncate through. This avoids introducing a new
16422 // branch during isel and doesn't add any extensions.
16423 if (Op.getValueType() == MVT::i8 &&
16424 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
16425 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
16426 if (T1.getValueType() == T2.getValueType() &&
16427 // Blacklist CopyFromReg to avoid partial register stalls.
16428 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
16429 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
16430 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond);
16431 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
16435 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
16436 // condition is true.
16437 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
16438 SDValue Ops[] = { Op2, Op1, CC, Cond };
16439 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops);
16442 static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, const X86Subtarget *Subtarget,
16443 SelectionDAG &DAG) {
16444 MVT VT = Op->getSimpleValueType(0);
16445 SDValue In = Op->getOperand(0);
16446 MVT InVT = In.getSimpleValueType();
16447 MVT VTElt = VT.getVectorElementType();
16448 MVT InVTElt = InVT.getVectorElementType();
16452 if ((InVTElt == MVT::i1) &&
16453 (((Subtarget->hasBWI() && Subtarget->hasVLX() &&
16454 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() <= 16)) ||
16456 ((Subtarget->hasBWI() && VT.is512BitVector() &&
16457 VTElt.getSizeInBits() <= 16)) ||
16459 ((Subtarget->hasDQI() && Subtarget->hasVLX() &&
16460 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) ||
16462 ((Subtarget->hasDQI() && VT.is512BitVector() &&
16463 VTElt.getSizeInBits() >= 32))))
16464 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16466 unsigned int NumElts = VT.getVectorNumElements();
16468 if (NumElts != 8 && NumElts != 16)
16471 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) {
16472 if (In.getOpcode() == X86ISD::VSEXT || In.getOpcode() == X86ISD::VZEXT)
16473 return DAG.getNode(In.getOpcode(), dl, VT, In.getOperand(0));
16474 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16477 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16478 assert (InVT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
16480 MVT ExtVT = (NumElts == 8) ? MVT::v8i64 : MVT::v16i32;
16481 Constant *C = ConstantInt::get(*DAG.getContext(),
16482 APInt::getAllOnesValue(ExtVT.getScalarType().getSizeInBits()));
16484 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
16485 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
16486 SDValue Ld = DAG.getLoad(ExtVT.getScalarType(), dl, DAG.getEntryNode(), CP,
16487 MachinePointerInfo::getConstantPool(),
16488 false, false, false, Alignment);
16489 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, dl, ExtVT, In, Ld);
16490 if (VT.is512BitVector())
16492 return DAG.getNode(X86ISD::VTRUNC, dl, VT, Brcst);
16495 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
16496 SelectionDAG &DAG) {
16497 MVT VT = Op->getSimpleValueType(0);
16498 SDValue In = Op->getOperand(0);
16499 MVT InVT = In.getSimpleValueType();
16502 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
16503 return LowerSIGN_EXTEND_AVX512(Op, Subtarget, DAG);
16505 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) &&
16506 (VT != MVT::v8i32 || InVT != MVT::v8i16) &&
16507 (VT != MVT::v16i16 || InVT != MVT::v16i8))
16510 if (Subtarget->hasInt256())
16511 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16513 // Optimize vectors in AVX mode
16514 // Sign extend v8i16 to v8i32 and
16517 // Divide input vector into two parts
16518 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
16519 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
16520 // concat the vectors to original VT
16522 unsigned NumElems = InVT.getVectorNumElements();
16523 SDValue Undef = DAG.getUNDEF(InVT);
16525 SmallVector<int,8> ShufMask1(NumElems, -1);
16526 for (unsigned i = 0; i != NumElems/2; ++i)
16529 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]);
16531 SmallVector<int,8> ShufMask2(NumElems, -1);
16532 for (unsigned i = 0; i != NumElems/2; ++i)
16533 ShufMask2[i] = i + NumElems/2;
16535 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]);
16537 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(),
16538 VT.getVectorNumElements()/2);
16540 OpLo = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpLo);
16541 OpHi = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpHi);
16543 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
16546 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
16547 // may emit an illegal shuffle but the expansion is still better than scalar
16548 // code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
16549 // we'll emit a shuffle and a arithmetic shift.
16550 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
16551 // TODO: It is possible to support ZExt by zeroing the undef values during
16552 // the shuffle phase or after the shuffle.
16553 static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
16554 SelectionDAG &DAG) {
16555 MVT RegVT = Op.getSimpleValueType();
16556 assert(RegVT.isVector() && "We only custom lower vector sext loads.");
16557 assert(RegVT.isInteger() &&
16558 "We only custom lower integer vector sext loads.");
16560 // Nothing useful we can do without SSE2 shuffles.
16561 assert(Subtarget->hasSSE2() && "We only custom lower sext loads with SSE2.");
16563 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
16565 EVT MemVT = Ld->getMemoryVT();
16566 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16567 unsigned RegSz = RegVT.getSizeInBits();
16569 ISD::LoadExtType Ext = Ld->getExtensionType();
16571 assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
16572 && "Only anyext and sext are currently implemented.");
16573 assert(MemVT != RegVT && "Cannot extend to the same type");
16574 assert(MemVT.isVector() && "Must load a vector from memory");
16576 unsigned NumElems = RegVT.getVectorNumElements();
16577 unsigned MemSz = MemVT.getSizeInBits();
16578 assert(RegSz > MemSz && "Register size must be greater than the mem size");
16580 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) {
16581 // The only way in which we have a legal 256-bit vector result but not the
16582 // integer 256-bit operations needed to directly lower a sextload is if we
16583 // have AVX1 but not AVX2. In that case, we can always emit a sextload to
16584 // a 128-bit vector and a normal sign_extend to 256-bits that should get
16585 // correctly legalized. We do this late to allow the canonical form of
16586 // sextload to persist throughout the rest of the DAG combiner -- it wants
16587 // to fold together any extensions it can, and so will fuse a sign_extend
16588 // of an sextload into a sextload targeting a wider value.
16590 if (MemSz == 128) {
16591 // Just switch this to a normal load.
16592 assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
16593 "it must be a legal 128-bit vector "
16595 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
16596 Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(),
16597 Ld->isInvariant(), Ld->getAlignment());
16599 assert(MemSz < 128 &&
16600 "Can't extend a type wider than 128 bits to a 256 bit vector!");
16601 // Do an sext load to a 128-bit vector type. We want to use the same
16602 // number of elements, but elements half as wide. This will end up being
16603 // recursively lowered by this routine, but will succeed as we definitely
16604 // have all the necessary features if we're using AVX1.
16606 EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
16607 EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
16609 DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
16610 Ld->getPointerInfo(), MemVT, Ld->isVolatile(),
16611 Ld->isNonTemporal(), Ld->isInvariant(),
16612 Ld->getAlignment());
16615 // Replace chain users with the new chain.
16616 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
16617 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
16619 // Finally, do a normal sign-extend to the desired register.
16620 return DAG.getSExtOrTrunc(Load, dl, RegVT);
16623 // All sizes must be a power of two.
16624 assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
16625 "Non-power-of-two elements are not custom lowered!");
16627 // Attempt to load the original value using scalar loads.
16628 // Find the largest scalar type that divides the total loaded size.
16629 MVT SclrLoadTy = MVT::i8;
16630 for (MVT Tp : MVT::integer_valuetypes()) {
16631 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
16636 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
16637 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
16639 SclrLoadTy = MVT::f64;
16641 // Calculate the number of scalar loads that we need to perform
16642 // in order to load our vector from memory.
16643 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
16645 assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
16646 "Can only lower sext loads with a single scalar load!");
16648 unsigned loadRegZize = RegSz;
16649 if (Ext == ISD::SEXTLOAD && RegSz == 256)
16652 // Represent our vector as a sequence of elements which are the
16653 // largest scalar that we can load.
16654 EVT LoadUnitVecVT = EVT::getVectorVT(
16655 *DAG.getContext(), SclrLoadTy, loadRegZize / SclrLoadTy.getSizeInBits());
16657 // Represent the data using the same element type that is stored in
16658 // memory. In practice, we ''widen'' MemVT.
16660 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
16661 loadRegZize / MemVT.getScalarType().getSizeInBits());
16663 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
16664 "Invalid vector type");
16666 // We can't shuffle using an illegal type.
16667 assert(TLI.isTypeLegal(WideVecVT) &&
16668 "We only lower types that form legal widened vector types");
16670 SmallVector<SDValue, 8> Chains;
16671 SDValue Ptr = Ld->getBasePtr();
16672 SDValue Increment =
16673 DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, TLI.getPointerTy());
16674 SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
16676 for (unsigned i = 0; i < NumLoads; ++i) {
16677 // Perform a single load.
16678 SDValue ScalarLoad =
16679 DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
16680 Ld->isVolatile(), Ld->isNonTemporal(), Ld->isInvariant(),
16681 Ld->getAlignment());
16682 Chains.push_back(ScalarLoad.getValue(1));
16683 // Create the first element type using SCALAR_TO_VECTOR in order to avoid
16684 // another round of DAGCombining.
16686 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
16688 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
16689 ScalarLoad, DAG.getIntPtrConstant(i));
16691 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
16694 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
16696 // Bitcast the loaded value to a vector of the original element type, in
16697 // the size of the target vector type.
16698 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res);
16699 unsigned SizeRatio = RegSz / MemSz;
16701 if (Ext == ISD::SEXTLOAD) {
16702 // If we have SSE4.1, we can directly emit a VSEXT node.
16703 if (Subtarget->hasSSE41()) {
16704 SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec);
16705 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16709 // Otherwise we'll shuffle the small elements in the high bits of the
16710 // larger type and perform an arithmetic shift. If the shift is not legal
16711 // it's better to scalarize.
16712 assert(TLI.isOperationLegalOrCustom(ISD::SRA, RegVT) &&
16713 "We can't implement a sext load without an arithmetic right shift!");
16715 // Redistribute the loaded elements into the different locations.
16716 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16717 for (unsigned i = 0; i != NumElems; ++i)
16718 ShuffleVec[i * SizeRatio + SizeRatio - 1] = i;
16720 SDValue Shuff = DAG.getVectorShuffle(
16721 WideVecVT, dl, SlicedVec, DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16723 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16725 // Build the arithmetic shift.
16726 unsigned Amt = RegVT.getVectorElementType().getSizeInBits() -
16727 MemVT.getVectorElementType().getSizeInBits();
16729 DAG.getNode(ISD::SRA, dl, RegVT, Shuff, DAG.getConstant(Amt, RegVT));
16731 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16735 // Redistribute the loaded elements into the different locations.
16736 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16737 for (unsigned i = 0; i != NumElems; ++i)
16738 ShuffleVec[i * SizeRatio] = i;
16740 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
16741 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16743 // Bitcast to the requested type.
16744 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16745 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16749 // isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
16750 // ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart
16751 // from the AND / OR.
16752 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
16753 Opc = Op.getOpcode();
16754 if (Opc != ISD::OR && Opc != ISD::AND)
16756 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16757 Op.getOperand(0).hasOneUse() &&
16758 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
16759 Op.getOperand(1).hasOneUse());
16762 // isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and
16763 // 1 and that the SETCC node has a single use.
16764 static bool isXor1OfSetCC(SDValue Op) {
16765 if (Op.getOpcode() != ISD::XOR)
16767 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
16768 if (N1C && N1C->getAPIntValue() == 1) {
16769 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16770 Op.getOperand(0).hasOneUse();
16775 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
16776 bool addTest = true;
16777 SDValue Chain = Op.getOperand(0);
16778 SDValue Cond = Op.getOperand(1);
16779 SDValue Dest = Op.getOperand(2);
16782 bool Inverted = false;
16784 if (Cond.getOpcode() == ISD::SETCC) {
16785 // Check for setcc([su]{add,sub,mul}o == 0).
16786 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
16787 isa<ConstantSDNode>(Cond.getOperand(1)) &&
16788 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() &&
16789 Cond.getOperand(0).getResNo() == 1 &&
16790 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
16791 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
16792 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
16793 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
16794 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
16795 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
16797 Cond = Cond.getOperand(0);
16799 SDValue NewCond = LowerSETCC(Cond, DAG);
16800 if (NewCond.getNode())
16805 // FIXME: LowerXALUO doesn't handle these!!
16806 else if (Cond.getOpcode() == X86ISD::ADD ||
16807 Cond.getOpcode() == X86ISD::SUB ||
16808 Cond.getOpcode() == X86ISD::SMUL ||
16809 Cond.getOpcode() == X86ISD::UMUL)
16810 Cond = LowerXALUO(Cond, DAG);
16813 // Look pass (and (setcc_carry (cmp ...)), 1).
16814 if (Cond.getOpcode() == ISD::AND &&
16815 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16816 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16817 if (C && C->getAPIntValue() == 1)
16818 Cond = Cond.getOperand(0);
16821 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16822 // setting operand in place of the X86ISD::SETCC.
16823 unsigned CondOpcode = Cond.getOpcode();
16824 if (CondOpcode == X86ISD::SETCC ||
16825 CondOpcode == X86ISD::SETCC_CARRY) {
16826 CC = Cond.getOperand(0);
16828 SDValue Cmp = Cond.getOperand(1);
16829 unsigned Opc = Cmp.getOpcode();
16830 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
16831 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
16835 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
16839 // These can only come from an arithmetic instruction with overflow,
16840 // e.g. SADDO, UADDO.
16841 Cond = Cond.getNode()->getOperand(1);
16847 CondOpcode = Cond.getOpcode();
16848 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16849 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16850 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16851 Cond.getOperand(0).getValueType() != MVT::i8)) {
16852 SDValue LHS = Cond.getOperand(0);
16853 SDValue RHS = Cond.getOperand(1);
16854 unsigned X86Opcode;
16857 // Keep this in sync with LowerXALUO, otherwise we might create redundant
16858 // instructions that can't be removed afterwards (i.e. X86ISD::ADD and
16860 switch (CondOpcode) {
16861 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16863 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16865 X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
16868 X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16869 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16871 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16873 X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
16876 X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16877 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16878 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16879 default: llvm_unreachable("unexpected overflowing operator");
16882 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
16883 if (CondOpcode == ISD::UMULO)
16884 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16887 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16889 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
16891 if (CondOpcode == ISD::UMULO)
16892 Cond = X86Op.getValue(2);
16894 Cond = X86Op.getValue(1);
16896 CC = DAG.getConstant(X86Cond, MVT::i8);
16900 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
16901 SDValue Cmp = Cond.getOperand(0).getOperand(1);
16902 if (CondOpc == ISD::OR) {
16903 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
16904 // two branches instead of an explicit OR instruction with a
16906 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16907 isX86LogicalCmp(Cmp)) {
16908 CC = Cond.getOperand(0).getOperand(0);
16909 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16910 Chain, Dest, CC, Cmp);
16911 CC = Cond.getOperand(1).getOperand(0);
16915 } else { // ISD::AND
16916 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
16917 // two branches instead of an explicit AND instruction with a
16918 // separate test. However, we only do this if this block doesn't
16919 // have a fall-through edge, because this requires an explicit
16920 // jmp when the condition is false.
16921 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16922 isX86LogicalCmp(Cmp) &&
16923 Op.getNode()->hasOneUse()) {
16924 X86::CondCode CCode =
16925 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16926 CCode = X86::GetOppositeBranchCondition(CCode);
16927 CC = DAG.getConstant(CCode, MVT::i8);
16928 SDNode *User = *Op.getNode()->use_begin();
16929 // Look for an unconditional branch following this conditional branch.
16930 // We need this because we need to reverse the successors in order
16931 // to implement FCMP_OEQ.
16932 if (User->getOpcode() == ISD::BR) {
16933 SDValue FalseBB = User->getOperand(1);
16935 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16936 assert(NewBR == User);
16940 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16941 Chain, Dest, CC, Cmp);
16942 X86::CondCode CCode =
16943 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
16944 CCode = X86::GetOppositeBranchCondition(CCode);
16945 CC = DAG.getConstant(CCode, MVT::i8);
16951 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
16952 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
16953 // It should be transformed during dag combiner except when the condition
16954 // is set by a arithmetics with overflow node.
16955 X86::CondCode CCode =
16956 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16957 CCode = X86::GetOppositeBranchCondition(CCode);
16958 CC = DAG.getConstant(CCode, MVT::i8);
16959 Cond = Cond.getOperand(0).getOperand(1);
16961 } else if (Cond.getOpcode() == ISD::SETCC &&
16962 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
16963 // For FCMP_OEQ, we can emit
16964 // two branches instead of an explicit AND instruction with a
16965 // separate test. However, we only do this if this block doesn't
16966 // have a fall-through edge, because this requires an explicit
16967 // jmp when the condition is false.
16968 if (Op.getNode()->hasOneUse()) {
16969 SDNode *User = *Op.getNode()->use_begin();
16970 // Look for an unconditional branch following this conditional branch.
16971 // We need this because we need to reverse the successors in order
16972 // to implement FCMP_OEQ.
16973 if (User->getOpcode() == ISD::BR) {
16974 SDValue FalseBB = User->getOperand(1);
16976 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16977 assert(NewBR == User);
16981 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16982 Cond.getOperand(0), Cond.getOperand(1));
16983 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16984 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16985 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16986 Chain, Dest, CC, Cmp);
16987 CC = DAG.getConstant(X86::COND_P, MVT::i8);
16992 } else if (Cond.getOpcode() == ISD::SETCC &&
16993 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
16994 // For FCMP_UNE, we can emit
16995 // two branches instead of an explicit AND instruction with a
16996 // separate test. However, we only do this if this block doesn't
16997 // have a fall-through edge, because this requires an explicit
16998 // jmp when the condition is false.
16999 if (Op.getNode()->hasOneUse()) {
17000 SDNode *User = *Op.getNode()->use_begin();
17001 // Look for an unconditional branch following this conditional branch.
17002 // We need this because we need to reverse the successors in order
17003 // to implement FCMP_UNE.
17004 if (User->getOpcode() == ISD::BR) {
17005 SDValue FalseBB = User->getOperand(1);
17007 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
17008 assert(NewBR == User);
17011 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
17012 Cond.getOperand(0), Cond.getOperand(1));
17013 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
17014 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
17015 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
17016 Chain, Dest, CC, Cmp);
17017 CC = DAG.getConstant(X86::COND_NP, MVT::i8);
17027 // Look pass the truncate if the high bits are known zero.
17028 if (isTruncWithZeroHighBitsInput(Cond, DAG))
17029 Cond = Cond.getOperand(0);
17031 // We know the result of AND is compared against zero. Try to match
17033 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
17034 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG);
17035 if (NewSetCC.getNode()) {
17036 CC = NewSetCC.getOperand(0);
17037 Cond = NewSetCC.getOperand(1);
17044 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
17045 CC = DAG.getConstant(X86Cond, MVT::i8);
17046 Cond = EmitTest(Cond, X86Cond, dl, DAG);
17048 Cond = ConvertCmpIfNecessary(Cond, DAG);
17049 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
17050 Chain, Dest, CC, Cond);
17053 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
17054 // Calls to _alloca are needed to probe the stack when allocating more than 4k
17055 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
17056 // that the guard pages used by the OS virtual memory manager are allocated in
17057 // correct sequence.
17059 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
17060 SelectionDAG &DAG) const {
17061 MachineFunction &MF = DAG.getMachineFunction();
17062 bool SplitStack = MF.shouldSplitStack();
17063 bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMachO()) ||
17068 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
17069 SDNode* Node = Op.getNode();
17071 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
17072 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
17073 " not tell us which reg is the stack pointer!");
17074 EVT VT = Node->getValueType(0);
17075 SDValue Tmp1 = SDValue(Node, 0);
17076 SDValue Tmp2 = SDValue(Node, 1);
17077 SDValue Tmp3 = Node->getOperand(2);
17078 SDValue Chain = Tmp1.getOperand(0);
17080 // Chain the dynamic stack allocation so that it doesn't modify the stack
17081 // pointer when other instructions are using the stack.
17082 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true),
17085 SDValue Size = Tmp2.getOperand(1);
17086 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
17087 Chain = SP.getValue(1);
17088 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
17089 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
17090 unsigned StackAlign = TFI.getStackAlignment();
17091 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
17092 if (Align > StackAlign)
17093 Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
17094 DAG.getConstant(-(uint64_t)Align, VT));
17095 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
17097 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
17098 DAG.getIntPtrConstant(0, true), SDValue(),
17101 SDValue Ops[2] = { Tmp1, Tmp2 };
17102 return DAG.getMergeValues(Ops, dl);
17106 SDValue Chain = Op.getOperand(0);
17107 SDValue Size = Op.getOperand(1);
17108 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
17109 EVT VT = Op.getNode()->getValueType(0);
17111 bool Is64Bit = Subtarget->is64Bit();
17112 EVT SPTy = getPointerTy();
17115 MachineRegisterInfo &MRI = MF.getRegInfo();
17118 // The 64 bit implementation of segmented stacks needs to clobber both r10
17119 // r11. This makes it impossible to use it along with nested parameters.
17120 const Function *F = MF.getFunction();
17122 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
17124 if (I->hasNestAttr())
17125 report_fatal_error("Cannot use segmented stacks with functions that "
17126 "have nested arguments.");
17129 const TargetRegisterClass *AddrRegClass =
17130 getRegClassFor(getPointerTy());
17131 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
17132 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
17133 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
17134 DAG.getRegister(Vreg, SPTy));
17135 SDValue Ops1[2] = { Value, Chain };
17136 return DAG.getMergeValues(Ops1, dl);
17139 const unsigned Reg = (Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX);
17141 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag);
17142 Flag = Chain.getValue(1);
17143 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
17145 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
17147 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17148 unsigned SPReg = RegInfo->getStackRegister();
17149 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
17150 Chain = SP.getValue(1);
17153 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
17154 DAG.getConstant(-(uint64_t)Align, VT));
17155 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
17158 SDValue Ops1[2] = { SP, Chain };
17159 return DAG.getMergeValues(Ops1, dl);
17163 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
17164 MachineFunction &MF = DAG.getMachineFunction();
17165 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
17167 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
17170 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) {
17171 // vastart just stores the address of the VarArgsFrameIndex slot into the
17172 // memory location argument.
17173 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
17175 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
17176 MachinePointerInfo(SV), false, false, 0);
17180 // gp_offset (0 - 6 * 8)
17181 // fp_offset (48 - 48 + 8 * 16)
17182 // overflow_arg_area (point to parameters coming in memory).
17184 SmallVector<SDValue, 8> MemOps;
17185 SDValue FIN = Op.getOperand(1);
17187 SDValue Store = DAG.getStore(Op.getOperand(0), DL,
17188 DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
17190 FIN, MachinePointerInfo(SV), false, false, 0);
17191 MemOps.push_back(Store);
17194 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17195 FIN, DAG.getIntPtrConstant(4));
17196 Store = DAG.getStore(Op.getOperand(0), DL,
17197 DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
17199 FIN, MachinePointerInfo(SV, 4), false, false, 0);
17200 MemOps.push_back(Store);
17202 // Store ptr to overflow_arg_area
17203 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17204 FIN, DAG.getIntPtrConstant(4));
17205 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
17207 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
17208 MachinePointerInfo(SV, 8),
17210 MemOps.push_back(Store);
17212 // Store ptr to reg_save_area.
17213 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17214 FIN, DAG.getIntPtrConstant(8));
17215 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
17217 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN,
17218 MachinePointerInfo(SV, 16), false, false, 0);
17219 MemOps.push_back(Store);
17220 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
17223 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
17224 assert(Subtarget->is64Bit() &&
17225 "LowerVAARG only handles 64-bit va_arg!");
17226 assert((Subtarget->isTargetLinux() ||
17227 Subtarget->isTargetDarwin()) &&
17228 "Unhandled target in LowerVAARG");
17229 assert(Op.getNode()->getNumOperands() == 4);
17230 SDValue Chain = Op.getOperand(0);
17231 SDValue SrcPtr = Op.getOperand(1);
17232 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
17233 unsigned Align = Op.getConstantOperandVal(3);
17236 EVT ArgVT = Op.getNode()->getValueType(0);
17237 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
17238 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy);
17241 // Decide which area this value should be read from.
17242 // TODO: Implement the AMD64 ABI in its entirety. This simple
17243 // selection mechanism works only for the basic types.
17244 if (ArgVT == MVT::f80) {
17245 llvm_unreachable("va_arg for f80 not yet implemented");
17246 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
17247 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
17248 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
17249 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
17251 llvm_unreachable("Unhandled argument type in LowerVAARG");
17254 if (ArgMode == 2) {
17255 // Sanity Check: Make sure using fp_offset makes sense.
17256 assert(!DAG.getTarget().Options.UseSoftFloat &&
17257 !(DAG.getMachineFunction().getFunction()->hasFnAttribute(
17258 Attribute::NoImplicitFloat)) &&
17259 Subtarget->hasSSE1());
17262 // Insert VAARG_64 node into the DAG
17263 // VAARG_64 returns two values: Variable Argument Address, Chain
17264 SmallVector<SDValue, 11> InstOps;
17265 InstOps.push_back(Chain);
17266 InstOps.push_back(SrcPtr);
17267 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32));
17268 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8));
17269 InstOps.push_back(DAG.getConstant(Align, MVT::i32));
17270 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other);
17271 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
17272 VTs, InstOps, MVT::i64,
17273 MachinePointerInfo(SV),
17275 /*Volatile=*/false,
17277 /*WriteMem=*/true);
17278 Chain = VAARG.getValue(1);
17280 // Load the next argument and return it
17281 return DAG.getLoad(ArgVT, dl,
17284 MachinePointerInfo(),
17285 false, false, false, 0);
17288 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
17289 SelectionDAG &DAG) {
17290 // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
17291 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
17292 SDValue Chain = Op.getOperand(0);
17293 SDValue DstPtr = Op.getOperand(1);
17294 SDValue SrcPtr = Op.getOperand(2);
17295 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
17296 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
17299 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
17300 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
17302 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
17305 // getTargetVShiftByConstNode - Handle vector element shifts where the shift
17306 // amount is a constant. Takes immediate version of shift as input.
17307 static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
17308 SDValue SrcOp, uint64_t ShiftAmt,
17309 SelectionDAG &DAG) {
17310 MVT ElementType = VT.getVectorElementType();
17312 // Fold this packed shift into its first operand if ShiftAmt is 0.
17316 // Check for ShiftAmt >= element width
17317 if (ShiftAmt >= ElementType.getSizeInBits()) {
17318 if (Opc == X86ISD::VSRAI)
17319 ShiftAmt = ElementType.getSizeInBits() - 1;
17321 return DAG.getConstant(0, VT);
17324 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
17325 && "Unknown target vector shift-by-constant node");
17327 // Fold this packed vector shift into a build vector if SrcOp is a
17328 // vector of Constants or UNDEFs, and SrcOp valuetype is the same as VT.
17329 if (VT == SrcOp.getSimpleValueType() &&
17330 ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
17331 SmallVector<SDValue, 8> Elts;
17332 unsigned NumElts = SrcOp->getNumOperands();
17333 ConstantSDNode *ND;
17336 default: llvm_unreachable(nullptr);
17337 case X86ISD::VSHLI:
17338 for (unsigned i=0; i!=NumElts; ++i) {
17339 SDValue CurrentOp = SrcOp->getOperand(i);
17340 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17341 Elts.push_back(CurrentOp);
17344 ND = cast<ConstantSDNode>(CurrentOp);
17345 const APInt &C = ND->getAPIntValue();
17346 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), ElementType));
17349 case X86ISD::VSRLI:
17350 for (unsigned i=0; i!=NumElts; ++i) {
17351 SDValue CurrentOp = SrcOp->getOperand(i);
17352 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17353 Elts.push_back(CurrentOp);
17356 ND = cast<ConstantSDNode>(CurrentOp);
17357 const APInt &C = ND->getAPIntValue();
17358 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), ElementType));
17361 case X86ISD::VSRAI:
17362 for (unsigned i=0; i!=NumElts; ++i) {
17363 SDValue CurrentOp = SrcOp->getOperand(i);
17364 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17365 Elts.push_back(CurrentOp);
17368 ND = cast<ConstantSDNode>(CurrentOp);
17369 const APInt &C = ND->getAPIntValue();
17370 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), ElementType));
17375 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
17378 return DAG.getNode(Opc, dl, VT, SrcOp, DAG.getConstant(ShiftAmt, MVT::i8));
17381 // getTargetVShiftNode - Handle vector element shifts where the shift amount
17382 // may or may not be a constant. Takes immediate version of shift as input.
17383 static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
17384 SDValue SrcOp, SDValue ShAmt,
17385 SelectionDAG &DAG) {
17386 MVT SVT = ShAmt.getSimpleValueType();
17387 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
17389 // Catch shift-by-constant.
17390 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
17391 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
17392 CShAmt->getZExtValue(), DAG);
17394 // Change opcode to non-immediate version
17396 default: llvm_unreachable("Unknown target vector shift node");
17397 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break;
17398 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break;
17399 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
17402 const X86Subtarget &Subtarget =
17403 static_cast<const X86Subtarget &>(DAG.getSubtarget());
17404 if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
17405 ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
17406 // Let the shuffle legalizer expand this shift amount node.
17407 SDValue Op0 = ShAmt.getOperand(0);
17408 Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0);
17409 ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, &Subtarget, DAG);
17411 // Need to build a vector containing shift amount.
17412 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
17413 SmallVector<SDValue, 4> ShOps;
17414 ShOps.push_back(ShAmt);
17415 if (SVT == MVT::i32) {
17416 ShOps.push_back(DAG.getConstant(0, SVT));
17417 ShOps.push_back(DAG.getUNDEF(SVT));
17419 ShOps.push_back(DAG.getUNDEF(SVT));
17421 MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
17422 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps);
17425 // The return type has to be a 128-bit type with the same element
17426 // type as the input type.
17427 MVT EltVT = VT.getVectorElementType();
17428 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
17430 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt);
17431 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
17434 /// \brief Return (and \p Op, \p Mask) for compare instructions or
17435 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
17436 /// necessary casting for \p Mask when lowering masking intrinsics.
17437 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
17438 SDValue PreservedSrc,
17439 const X86Subtarget *Subtarget,
17440 SelectionDAG &DAG) {
17441 EVT VT = Op.getValueType();
17442 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(),
17443 MVT::i1, VT.getVectorNumElements());
17444 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17445 Mask.getValueType().getSizeInBits());
17448 assert(MaskVT.isSimple() && "invalid mask type");
17450 if (isAllOnes(Mask))
17453 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
17454 // are extracted by EXTRACT_SUBVECTOR.
17455 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17456 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17457 DAG.getIntPtrConstant(0));
17459 switch (Op.getOpcode()) {
17461 case X86ISD::PCMPEQM:
17462 case X86ISD::PCMPGTM:
17464 case X86ISD::CMPMU:
17465 return DAG.getNode(ISD::AND, dl, VT, Op, VMask);
17467 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17468 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17469 return DAG.getNode(ISD::VSELECT, dl, VT, VMask, Op, PreservedSrc);
17472 /// \brief Creates an SDNode for a predicated scalar operation.
17473 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
17474 /// The mask is comming as MVT::i8 and it should be truncated
17475 /// to MVT::i1 while lowering masking intrinsics.
17476 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
17477 /// "X86select" instead of "vselect". We just can't create the "vselect" node for
17478 /// a scalar instruction.
17479 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
17480 SDValue PreservedSrc,
17481 const X86Subtarget *Subtarget,
17482 SelectionDAG &DAG) {
17483 if (isAllOnes(Mask))
17486 EVT VT = Op.getValueType();
17488 // The mask should be of type MVT::i1
17489 SDValue IMask = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Mask);
17491 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17492 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17493 return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc);
17496 static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17497 SelectionDAG &DAG) {
17499 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17500 EVT VT = Op.getValueType();
17501 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
17503 switch(IntrData->Type) {
17504 case INTR_TYPE_1OP:
17505 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
17506 case INTR_TYPE_2OP:
17507 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17509 case INTR_TYPE_3OP:
17510 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17511 Op.getOperand(2), Op.getOperand(3));
17512 case INTR_TYPE_1OP_MASK_RM: {
17513 SDValue Src = Op.getOperand(1);
17514 SDValue Src0 = Op.getOperand(2);
17515 SDValue Mask = Op.getOperand(3);
17516 SDValue RoundingMode = Op.getOperand(4);
17517 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
17519 Mask, Src0, Subtarget, DAG);
17521 case INTR_TYPE_SCALAR_MASK_RM: {
17522 SDValue Src1 = Op.getOperand(1);
17523 SDValue Src2 = Op.getOperand(2);
17524 SDValue Src0 = Op.getOperand(3);
17525 SDValue Mask = Op.getOperand(4);
17526 SDValue RoundingMode = Op.getOperand(5);
17527 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
17529 Mask, Src0, Subtarget, DAG);
17531 case INTR_TYPE_2OP_MASK: {
17532 SDValue Src1 = Op.getOperand(1);
17533 SDValue Src2 = Op.getOperand(2);
17534 SDValue PassThru = Op.getOperand(3);
17535 SDValue Mask = Op.getOperand(4);
17536 // We specify 2 possible opcodes for intrinsics with rounding modes.
17537 // First, we check if the intrinsic may have non-default rounding mode,
17538 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
17539 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17540 if (IntrWithRoundingModeOpcode != 0) {
17541 SDValue Rnd = Op.getOperand(5);
17542 unsigned Round = cast<ConstantSDNode>(Rnd)->getZExtValue();
17543 if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION) {
17544 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17545 dl, Op.getValueType(),
17547 Mask, PassThru, Subtarget, DAG);
17550 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
17552 Mask, PassThru, Subtarget, DAG);
17554 case FMA_OP_MASK: {
17555 SDValue Src1 = Op.getOperand(1);
17556 SDValue Src2 = Op.getOperand(2);
17557 SDValue Src3 = Op.getOperand(3);
17558 SDValue Mask = Op.getOperand(4);
17559 // We specify 2 possible opcodes for intrinsics with rounding modes.
17560 // First, we check if the intrinsic may have non-default rounding mode,
17561 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
17562 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17563 if (IntrWithRoundingModeOpcode != 0) {
17564 SDValue Rnd = Op.getOperand(5);
17565 if (cast<ConstantSDNode>(Rnd)->getZExtValue() !=
17566 X86::STATIC_ROUNDING::CUR_DIRECTION)
17567 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17568 dl, Op.getValueType(),
17569 Src1, Src2, Src3, Rnd),
17570 Mask, Src1, Subtarget, DAG);
17572 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
17573 dl, Op.getValueType(),
17575 Mask, Src1, Subtarget, DAG);
17578 case CMP_MASK_CC: {
17579 // Comparison intrinsics with masks.
17580 // Example of transformation:
17581 // (i8 (int_x86_avx512_mask_pcmpeq_q_128
17582 // (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
17584 // (v8i1 (insert_subvector undef,
17585 // (v2i1 (and (PCMPEQM %a, %b),
17586 // (extract_subvector
17587 // (v8i1 (bitcast %mask)), 0))), 0))))
17588 EVT VT = Op.getOperand(1).getValueType();
17589 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17590 VT.getVectorNumElements());
17591 SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3);
17592 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17593 Mask.getValueType().getSizeInBits());
17595 if (IntrData->Type == CMP_MASK_CC) {
17596 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17597 Op.getOperand(2), Op.getOperand(3));
17599 assert(IntrData->Type == CMP_MASK && "Unexpected intrinsic type!");
17600 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17603 SDValue CmpMask = getVectorMaskingNode(Cmp, Mask,
17604 DAG.getTargetConstant(0, MaskVT),
17606 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
17607 DAG.getUNDEF(BitcastVT), CmpMask,
17608 DAG.getIntPtrConstant(0));
17609 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
17611 case COMI: { // Comparison intrinsics
17612 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
17613 SDValue LHS = Op.getOperand(1);
17614 SDValue RHS = Op.getOperand(2);
17615 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG);
17616 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!");
17617 SDValue Cond = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
17618 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17619 DAG.getConstant(X86CC, MVT::i8), Cond);
17620 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17623 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
17624 Op.getOperand(1), Op.getOperand(2), DAG);
17626 return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl,
17627 Op.getSimpleValueType(),
17629 Op.getOperand(2), DAG),
17630 Op.getOperand(4), Op.getOperand(3), Subtarget,
17632 case COMPRESS_EXPAND_IN_REG: {
17633 SDValue Mask = Op.getOperand(3);
17634 SDValue DataToCompress = Op.getOperand(1);
17635 SDValue PassThru = Op.getOperand(2);
17636 if (isAllOnes(Mask)) // return data as is
17637 return Op.getOperand(1);
17638 EVT VT = Op.getValueType();
17639 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17640 VT.getVectorNumElements());
17641 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17642 Mask.getValueType().getSizeInBits());
17644 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17645 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17646 DAG.getIntPtrConstant(0));
17648 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToCompress,
17652 SDValue Mask = Op.getOperand(3);
17653 EVT VT = Op.getValueType();
17654 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17655 VT.getVectorNumElements());
17656 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17657 Mask.getValueType().getSizeInBits());
17659 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17660 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17661 DAG.getIntPtrConstant(0));
17662 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1),
17671 default: return SDValue(); // Don't custom lower most intrinsics.
17673 case Intrinsic::x86_avx512_mask_valign_q_512:
17674 case Intrinsic::x86_avx512_mask_valign_d_512:
17675 // Vector source operands are swapped.
17676 return getVectorMaskingNode(DAG.getNode(X86ISD::VALIGN, dl,
17677 Op.getValueType(), Op.getOperand(2),
17680 Op.getOperand(5), Op.getOperand(4),
17683 // ptest and testp intrinsics. The intrinsic these come from are designed to
17684 // return an integer value, not just an instruction so lower it to the ptest
17685 // or testp pattern and a setcc for the result.
17686 case Intrinsic::x86_sse41_ptestz:
17687 case Intrinsic::x86_sse41_ptestc:
17688 case Intrinsic::x86_sse41_ptestnzc:
17689 case Intrinsic::x86_avx_ptestz_256:
17690 case Intrinsic::x86_avx_ptestc_256:
17691 case Intrinsic::x86_avx_ptestnzc_256:
17692 case Intrinsic::x86_avx_vtestz_ps:
17693 case Intrinsic::x86_avx_vtestc_ps:
17694 case Intrinsic::x86_avx_vtestnzc_ps:
17695 case Intrinsic::x86_avx_vtestz_pd:
17696 case Intrinsic::x86_avx_vtestc_pd:
17697 case Intrinsic::x86_avx_vtestnzc_pd:
17698 case Intrinsic::x86_avx_vtestz_ps_256:
17699 case Intrinsic::x86_avx_vtestc_ps_256:
17700 case Intrinsic::x86_avx_vtestnzc_ps_256:
17701 case Intrinsic::x86_avx_vtestz_pd_256:
17702 case Intrinsic::x86_avx_vtestc_pd_256:
17703 case Intrinsic::x86_avx_vtestnzc_pd_256: {
17704 bool IsTestPacked = false;
17707 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
17708 case Intrinsic::x86_avx_vtestz_ps:
17709 case Intrinsic::x86_avx_vtestz_pd:
17710 case Intrinsic::x86_avx_vtestz_ps_256:
17711 case Intrinsic::x86_avx_vtestz_pd_256:
17712 IsTestPacked = true; // Fallthrough
17713 case Intrinsic::x86_sse41_ptestz:
17714 case Intrinsic::x86_avx_ptestz_256:
17716 X86CC = X86::COND_E;
17718 case Intrinsic::x86_avx_vtestc_ps:
17719 case Intrinsic::x86_avx_vtestc_pd:
17720 case Intrinsic::x86_avx_vtestc_ps_256:
17721 case Intrinsic::x86_avx_vtestc_pd_256:
17722 IsTestPacked = true; // Fallthrough
17723 case Intrinsic::x86_sse41_ptestc:
17724 case Intrinsic::x86_avx_ptestc_256:
17726 X86CC = X86::COND_B;
17728 case Intrinsic::x86_avx_vtestnzc_ps:
17729 case Intrinsic::x86_avx_vtestnzc_pd:
17730 case Intrinsic::x86_avx_vtestnzc_ps_256:
17731 case Intrinsic::x86_avx_vtestnzc_pd_256:
17732 IsTestPacked = true; // Fallthrough
17733 case Intrinsic::x86_sse41_ptestnzc:
17734 case Intrinsic::x86_avx_ptestnzc_256:
17736 X86CC = X86::COND_A;
17740 SDValue LHS = Op.getOperand(1);
17741 SDValue RHS = Op.getOperand(2);
17742 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
17743 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
17744 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17745 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
17746 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17748 case Intrinsic::x86_avx512_kortestz_w:
17749 case Intrinsic::x86_avx512_kortestc_w: {
17750 unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz_w)? X86::COND_E: X86::COND_B;
17751 SDValue LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(1));
17752 SDValue RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(2));
17753 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17754 SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
17755 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test);
17756 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17759 case Intrinsic::x86_sse42_pcmpistria128:
17760 case Intrinsic::x86_sse42_pcmpestria128:
17761 case Intrinsic::x86_sse42_pcmpistric128:
17762 case Intrinsic::x86_sse42_pcmpestric128:
17763 case Intrinsic::x86_sse42_pcmpistrio128:
17764 case Intrinsic::x86_sse42_pcmpestrio128:
17765 case Intrinsic::x86_sse42_pcmpistris128:
17766 case Intrinsic::x86_sse42_pcmpestris128:
17767 case Intrinsic::x86_sse42_pcmpistriz128:
17768 case Intrinsic::x86_sse42_pcmpestriz128: {
17772 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
17773 case Intrinsic::x86_sse42_pcmpistria128:
17774 Opcode = X86ISD::PCMPISTRI;
17775 X86CC = X86::COND_A;
17777 case Intrinsic::x86_sse42_pcmpestria128:
17778 Opcode = X86ISD::PCMPESTRI;
17779 X86CC = X86::COND_A;
17781 case Intrinsic::x86_sse42_pcmpistric128:
17782 Opcode = X86ISD::PCMPISTRI;
17783 X86CC = X86::COND_B;
17785 case Intrinsic::x86_sse42_pcmpestric128:
17786 Opcode = X86ISD::PCMPESTRI;
17787 X86CC = X86::COND_B;
17789 case Intrinsic::x86_sse42_pcmpistrio128:
17790 Opcode = X86ISD::PCMPISTRI;
17791 X86CC = X86::COND_O;
17793 case Intrinsic::x86_sse42_pcmpestrio128:
17794 Opcode = X86ISD::PCMPESTRI;
17795 X86CC = X86::COND_O;
17797 case Intrinsic::x86_sse42_pcmpistris128:
17798 Opcode = X86ISD::PCMPISTRI;
17799 X86CC = X86::COND_S;
17801 case Intrinsic::x86_sse42_pcmpestris128:
17802 Opcode = X86ISD::PCMPESTRI;
17803 X86CC = X86::COND_S;
17805 case Intrinsic::x86_sse42_pcmpistriz128:
17806 Opcode = X86ISD::PCMPISTRI;
17807 X86CC = X86::COND_E;
17809 case Intrinsic::x86_sse42_pcmpestriz128:
17810 Opcode = X86ISD::PCMPESTRI;
17811 X86CC = X86::COND_E;
17814 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17815 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17816 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps);
17817 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17818 DAG.getConstant(X86CC, MVT::i8),
17819 SDValue(PCMP.getNode(), 1));
17820 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17823 case Intrinsic::x86_sse42_pcmpistri128:
17824 case Intrinsic::x86_sse42_pcmpestri128: {
17826 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
17827 Opcode = X86ISD::PCMPISTRI;
17829 Opcode = X86ISD::PCMPESTRI;
17831 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17832 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17833 return DAG.getNode(Opcode, dl, VTs, NewOps);
17838 static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17839 SDValue Src, SDValue Mask, SDValue Base,
17840 SDValue Index, SDValue ScaleOp, SDValue Chain,
17841 const X86Subtarget * Subtarget) {
17843 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17844 assert(C && "Invalid scale type");
17845 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17846 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17847 Index.getSimpleValueType().getVectorNumElements());
17849 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17851 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17853 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17854 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
17855 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17856 SDValue Segment = DAG.getRegister(0, MVT::i32);
17857 if (Src.getOpcode() == ISD::UNDEF)
17858 Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
17859 SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17860 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17861 SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
17862 return DAG.getMergeValues(RetOps, dl);
17865 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17866 SDValue Src, SDValue Mask, SDValue Base,
17867 SDValue Index, SDValue ScaleOp, SDValue Chain) {
17869 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17870 assert(C && "Invalid scale type");
17871 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17872 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17873 SDValue Segment = DAG.getRegister(0, MVT::i32);
17874 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17875 Index.getSimpleValueType().getVectorNumElements());
17877 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17879 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17881 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17882 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
17883 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
17884 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17885 return SDValue(Res, 1);
17888 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17889 SDValue Mask, SDValue Base, SDValue Index,
17890 SDValue ScaleOp, SDValue Chain) {
17892 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17893 assert(C && "Invalid scale type");
17894 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17895 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17896 SDValue Segment = DAG.getRegister(0, MVT::i32);
17898 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
17900 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17902 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17904 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17905 //SDVTList VTs = DAG.getVTList(MVT::Other);
17906 SDValue Ops[] = {MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17907 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
17908 return SDValue(Res, 0);
17911 // getReadPerformanceCounter - Handles the lowering of builtin intrinsics that
17912 // read performance monitor counters (x86_rdpmc).
17913 static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
17914 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17915 SmallVectorImpl<SDValue> &Results) {
17916 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17917 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17920 // The ECX register is used to select the index of the performance counter
17922 SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
17924 SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
17926 // Reads the content of a 64-bit performance counter and returns it in the
17927 // registers EDX:EAX.
17928 if (Subtarget->is64Bit()) {
17929 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17930 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17933 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17934 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17937 Chain = HI.getValue(1);
17939 if (Subtarget->is64Bit()) {
17940 // The EAX register is loaded with the low-order 32 bits. The EDX register
17941 // is loaded with the supported high-order bits of the counter.
17942 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17943 DAG.getConstant(32, MVT::i8));
17944 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17945 Results.push_back(Chain);
17949 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17950 SDValue Ops[] = { LO, HI };
17951 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17952 Results.push_back(Pair);
17953 Results.push_back(Chain);
17956 // getReadTimeStampCounter - Handles the lowering of builtin intrinsics that
17957 // read the time stamp counter (x86_rdtsc and x86_rdtscp). This function is
17958 // also used to custom lower READCYCLECOUNTER nodes.
17959 static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
17960 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17961 SmallVectorImpl<SDValue> &Results) {
17962 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17963 SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
17966 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
17967 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
17968 // and the EAX register is loaded with the low-order 32 bits.
17969 if (Subtarget->is64Bit()) {
17970 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17971 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17974 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17975 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17978 SDValue Chain = HI.getValue(1);
17980 if (Opcode == X86ISD::RDTSCP_DAG) {
17981 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17983 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
17984 // the ECX register. Add 'ecx' explicitly to the chain.
17985 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
17987 // Explicitly store the content of ECX at the location passed in input
17988 // to the 'rdtscp' intrinsic.
17989 Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
17990 MachinePointerInfo(), false, false, 0);
17993 if (Subtarget->is64Bit()) {
17994 // The EDX register is loaded with the high-order 32 bits of the MSR, and
17995 // the EAX register is loaded with the low-order 32 bits.
17996 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17997 DAG.getConstant(32, MVT::i8));
17998 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17999 Results.push_back(Chain);
18003 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
18004 SDValue Ops[] = { LO, HI };
18005 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
18006 Results.push_back(Pair);
18007 Results.push_back(Chain);
18010 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
18011 SelectionDAG &DAG) {
18012 SmallVector<SDValue, 2> Results;
18014 getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
18016 return DAG.getMergeValues(Results, DL);
18020 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
18021 SelectionDAG &DAG) {
18022 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
18024 const IntrinsicData* IntrData = getIntrinsicWithChain(IntNo);
18029 switch(IntrData->Type) {
18031 llvm_unreachable("Unknown Intrinsic Type");
18035 // Emit the node with the right value type.
18036 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other);
18037 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
18039 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
18040 // Otherwise return the value from Rand, which is always 0, casted to i32.
18041 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
18042 DAG.getConstant(1, Op->getValueType(1)),
18043 DAG.getConstant(X86::COND_B, MVT::i32),
18044 SDValue(Result.getNode(), 1) };
18045 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
18046 DAG.getVTList(Op->getValueType(1), MVT::Glue),
18049 // Return { result, isValid, chain }.
18050 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
18051 SDValue(Result.getNode(), 2));
18054 //gather(v1, mask, index, base, scale);
18055 SDValue Chain = Op.getOperand(0);
18056 SDValue Src = Op.getOperand(2);
18057 SDValue Base = Op.getOperand(3);
18058 SDValue Index = Op.getOperand(4);
18059 SDValue Mask = Op.getOperand(5);
18060 SDValue Scale = Op.getOperand(6);
18061 return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain,
18065 //scatter(base, mask, index, v1, scale);
18066 SDValue Chain = Op.getOperand(0);
18067 SDValue Base = Op.getOperand(2);
18068 SDValue Mask = Op.getOperand(3);
18069 SDValue Index = Op.getOperand(4);
18070 SDValue Src = Op.getOperand(5);
18071 SDValue Scale = Op.getOperand(6);
18072 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain);
18075 SDValue Hint = Op.getOperand(6);
18077 if (dyn_cast<ConstantSDNode> (Hint) == nullptr ||
18078 (HintVal = dyn_cast<ConstantSDNode> (Hint)->getZExtValue()) > 1)
18079 llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1");
18080 unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
18081 SDValue Chain = Op.getOperand(0);
18082 SDValue Mask = Op.getOperand(2);
18083 SDValue Index = Op.getOperand(3);
18084 SDValue Base = Op.getOperand(4);
18085 SDValue Scale = Op.getOperand(5);
18086 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain);
18088 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
18090 SmallVector<SDValue, 2> Results;
18091 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget, Results);
18092 return DAG.getMergeValues(Results, dl);
18094 // Read Performance Monitoring Counters.
18096 SmallVector<SDValue, 2> Results;
18097 getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
18098 return DAG.getMergeValues(Results, dl);
18100 // XTEST intrinsics.
18102 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
18103 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
18104 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
18105 DAG.getConstant(X86::COND_NE, MVT::i8),
18107 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
18108 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
18109 Ret, SDValue(InTrans.getNode(), 1));
18113 SmallVector<SDValue, 2> Results;
18114 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
18115 SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
18116 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
18117 DAG.getConstant(-1, MVT::i8));
18118 SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
18119 Op.getOperand(4), GenCF.getValue(1));
18120 SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
18121 Op.getOperand(5), MachinePointerInfo(),
18123 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
18124 DAG.getConstant(X86::COND_B, MVT::i8),
18126 Results.push_back(SetCC);
18127 Results.push_back(Store);
18128 return DAG.getMergeValues(Results, dl);
18130 case COMPRESS_TO_MEM: {
18132 SDValue Mask = Op.getOperand(4);
18133 SDValue DataToCompress = Op.getOperand(3);
18134 SDValue Addr = Op.getOperand(2);
18135 SDValue Chain = Op.getOperand(0);
18137 if (isAllOnes(Mask)) // return just a store
18138 return DAG.getStore(Chain, dl, DataToCompress, Addr,
18139 MachinePointerInfo(), false, false, 0);
18141 EVT VT = DataToCompress.getValueType();
18142 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18143 VT.getVectorNumElements());
18144 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18145 Mask.getValueType().getSizeInBits());
18146 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
18147 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
18148 DAG.getIntPtrConstant(0));
18150 SDValue Compressed = DAG.getNode(IntrData->Opc0, dl, VT, VMask,
18151 DataToCompress, DAG.getUNDEF(VT));
18152 return DAG.getStore(Chain, dl, Compressed, Addr,
18153 MachinePointerInfo(), false, false, 0);
18155 case EXPAND_FROM_MEM: {
18157 SDValue Mask = Op.getOperand(4);
18158 SDValue PathThru = Op.getOperand(3);
18159 SDValue Addr = Op.getOperand(2);
18160 SDValue Chain = Op.getOperand(0);
18161 EVT VT = Op.getValueType();
18163 if (isAllOnes(Mask)) // return just a load
18164 return DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(), false, false,
18166 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18167 VT.getVectorNumElements());
18168 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18169 Mask.getValueType().getSizeInBits());
18170 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
18171 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
18172 DAG.getIntPtrConstant(0));
18174 SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(),
18175 false, false, false, 0);
18177 SmallVector<SDValue, 2> Results;
18178 Results.push_back(DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToExpand,
18180 Results.push_back(Chain);
18181 return DAG.getMergeValues(Results, dl);
18186 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
18187 SelectionDAG &DAG) const {
18188 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
18189 MFI->setReturnAddressIsTaken(true);
18191 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
18194 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18196 EVT PtrVT = getPointerTy();
18199 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
18200 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18201 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
18202 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
18203 DAG.getNode(ISD::ADD, dl, PtrVT,
18204 FrameAddr, Offset),
18205 MachinePointerInfo(), false, false, false, 0);
18208 // Just load the return address.
18209 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
18210 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
18211 RetAddrFI, MachinePointerInfo(), false, false, false, 0);
18214 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
18215 MachineFunction &MF = DAG.getMachineFunction();
18216 MachineFrameInfo *MFI = MF.getFrameInfo();
18217 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
18218 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18219 EVT VT = Op.getValueType();
18221 MFI->setFrameAddressIsTaken(true);
18223 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
18224 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
18225 // is not possible to crawl up the stack without looking at the unwind codes
18227 int FrameAddrIndex = FuncInfo->getFAIndex();
18228 if (!FrameAddrIndex) {
18229 // Set up a frame object for the return address.
18230 unsigned SlotSize = RegInfo->getSlotSize();
18231 FrameAddrIndex = MF.getFrameInfo()->CreateFixedObject(
18232 SlotSize, /*Offset=*/INT64_MIN, /*IsImmutable=*/false);
18233 FuncInfo->setFAIndex(FrameAddrIndex);
18235 return DAG.getFrameIndex(FrameAddrIndex, VT);
18238 unsigned FrameReg =
18239 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
18240 SDLoc dl(Op); // FIXME probably not meaningful
18241 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18242 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
18243 (FrameReg == X86::EBP && VT == MVT::i32)) &&
18244 "Invalid Frame Register!");
18245 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
18247 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
18248 MachinePointerInfo(),
18249 false, false, false, 0);
18253 // FIXME? Maybe this could be a TableGen attribute on some registers and
18254 // this table could be generated automatically from RegInfo.
18255 unsigned X86TargetLowering::getRegisterByName(const char* RegName,
18257 unsigned Reg = StringSwitch<unsigned>(RegName)
18258 .Case("esp", X86::ESP)
18259 .Case("rsp", X86::RSP)
18263 report_fatal_error("Invalid register name global variable");
18266 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
18267 SelectionDAG &DAG) const {
18268 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18269 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
18272 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
18273 SDValue Chain = Op.getOperand(0);
18274 SDValue Offset = Op.getOperand(1);
18275 SDValue Handler = Op.getOperand(2);
18278 EVT PtrVT = getPointerTy();
18279 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18280 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
18281 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
18282 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
18283 "Invalid Frame Register!");
18284 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
18285 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
18287 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
18288 DAG.getIntPtrConstant(RegInfo->getSlotSize()));
18289 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
18290 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
18292 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
18294 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
18295 DAG.getRegister(StoreAddrReg, PtrVT));
18298 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
18299 SelectionDAG &DAG) const {
18301 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
18302 DAG.getVTList(MVT::i32, MVT::Other),
18303 Op.getOperand(0), Op.getOperand(1));
18306 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
18307 SelectionDAG &DAG) const {
18309 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
18310 Op.getOperand(0), Op.getOperand(1));
18313 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
18314 return Op.getOperand(0);
18317 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
18318 SelectionDAG &DAG) const {
18319 SDValue Root = Op.getOperand(0);
18320 SDValue Trmp = Op.getOperand(1); // trampoline
18321 SDValue FPtr = Op.getOperand(2); // nested function
18322 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
18325 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
18326 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
18328 if (Subtarget->is64Bit()) {
18329 SDValue OutChains[6];
18331 // Large code-model.
18332 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
18333 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
18335 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
18336 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
18338 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
18340 // Load the pointer to the nested function into R11.
18341 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
18342 SDValue Addr = Trmp;
18343 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18344 Addr, MachinePointerInfo(TrmpAddr),
18347 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18348 DAG.getConstant(2, MVT::i64));
18349 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
18350 MachinePointerInfo(TrmpAddr, 2),
18353 // Load the 'nest' parameter value into R10.
18354 // R10 is specified in X86CallingConv.td
18355 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
18356 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18357 DAG.getConstant(10, MVT::i64));
18358 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18359 Addr, MachinePointerInfo(TrmpAddr, 10),
18362 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18363 DAG.getConstant(12, MVT::i64));
18364 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
18365 MachinePointerInfo(TrmpAddr, 12),
18368 // Jump to the nested function.
18369 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
18370 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18371 DAG.getConstant(20, MVT::i64));
18372 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18373 Addr, MachinePointerInfo(TrmpAddr, 20),
18376 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
18377 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18378 DAG.getConstant(22, MVT::i64));
18379 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
18380 MachinePointerInfo(TrmpAddr, 22),
18383 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18385 const Function *Func =
18386 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
18387 CallingConv::ID CC = Func->getCallingConv();
18392 llvm_unreachable("Unsupported calling convention");
18393 case CallingConv::C:
18394 case CallingConv::X86_StdCall: {
18395 // Pass 'nest' parameter in ECX.
18396 // Must be kept in sync with X86CallingConv.td
18397 NestReg = X86::ECX;
18399 // Check that ECX wasn't needed by an 'inreg' parameter.
18400 FunctionType *FTy = Func->getFunctionType();
18401 const AttributeSet &Attrs = Func->getAttributes();
18403 if (!Attrs.isEmpty() && !Func->isVarArg()) {
18404 unsigned InRegCount = 0;
18407 for (FunctionType::param_iterator I = FTy->param_begin(),
18408 E = FTy->param_end(); I != E; ++I, ++Idx)
18409 if (Attrs.hasAttribute(Idx, Attribute::InReg))
18410 // FIXME: should only count parameters that are lowered to integers.
18411 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
18413 if (InRegCount > 2) {
18414 report_fatal_error("Nest register in use - reduce number of inreg"
18420 case CallingConv::X86_FastCall:
18421 case CallingConv::X86_ThisCall:
18422 case CallingConv::Fast:
18423 // Pass 'nest' parameter in EAX.
18424 // Must be kept in sync with X86CallingConv.td
18425 NestReg = X86::EAX;
18429 SDValue OutChains[4];
18430 SDValue Addr, Disp;
18432 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18433 DAG.getConstant(10, MVT::i32));
18434 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
18436 // This is storing the opcode for MOV32ri.
18437 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
18438 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
18439 OutChains[0] = DAG.getStore(Root, dl,
18440 DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
18441 Trmp, MachinePointerInfo(TrmpAddr),
18444 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18445 DAG.getConstant(1, MVT::i32));
18446 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
18447 MachinePointerInfo(TrmpAddr, 1),
18450 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
18451 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18452 DAG.getConstant(5, MVT::i32));
18453 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
18454 MachinePointerInfo(TrmpAddr, 5),
18457 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18458 DAG.getConstant(6, MVT::i32));
18459 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
18460 MachinePointerInfo(TrmpAddr, 6),
18463 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18467 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
18468 SelectionDAG &DAG) const {
18470 The rounding mode is in bits 11:10 of FPSR, and has the following
18472 00 Round to nearest
18477 FLT_ROUNDS, on the other hand, expects the following:
18484 To perform the conversion, we do:
18485 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
18488 MachineFunction &MF = DAG.getMachineFunction();
18489 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
18490 unsigned StackAlignment = TFI.getStackAlignment();
18491 MVT VT = Op.getSimpleValueType();
18494 // Save FP Control Word to stack slot
18495 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
18496 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
18498 MachineMemOperand *MMO =
18499 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
18500 MachineMemOperand::MOStore, 2, 2);
18502 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
18503 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
18504 DAG.getVTList(MVT::Other),
18505 Ops, MVT::i16, MMO);
18507 // Load FP Control Word from stack slot
18508 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
18509 MachinePointerInfo(), false, false, false, 0);
18511 // Transform as necessary
18513 DAG.getNode(ISD::SRL, DL, MVT::i16,
18514 DAG.getNode(ISD::AND, DL, MVT::i16,
18515 CWD, DAG.getConstant(0x800, MVT::i16)),
18516 DAG.getConstant(11, MVT::i8));
18518 DAG.getNode(ISD::SRL, DL, MVT::i16,
18519 DAG.getNode(ISD::AND, DL, MVT::i16,
18520 CWD, DAG.getConstant(0x400, MVT::i16)),
18521 DAG.getConstant(9, MVT::i8));
18524 DAG.getNode(ISD::AND, DL, MVT::i16,
18525 DAG.getNode(ISD::ADD, DL, MVT::i16,
18526 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
18527 DAG.getConstant(1, MVT::i16)),
18528 DAG.getConstant(3, MVT::i16));
18530 return DAG.getNode((VT.getSizeInBits() < 16 ?
18531 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
18534 static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
18535 MVT VT = Op.getSimpleValueType();
18537 unsigned NumBits = VT.getSizeInBits();
18540 Op = Op.getOperand(0);
18541 if (VT == MVT::i8) {
18542 // Zero extend to i32 since there is not an i8 bsr.
18544 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18547 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
18548 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18549 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18551 // If src is zero (i.e. bsr sets ZF), returns NumBits.
18554 DAG.getConstant(NumBits+NumBits-1, OpVT),
18555 DAG.getConstant(X86::COND_E, MVT::i8),
18558 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
18560 // Finally xor with NumBits-1.
18561 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18564 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18568 static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
18569 MVT VT = Op.getSimpleValueType();
18571 unsigned NumBits = VT.getSizeInBits();
18574 Op = Op.getOperand(0);
18575 if (VT == MVT::i8) {
18576 // Zero extend to i32 since there is not an i8 bsr.
18578 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18581 // Issue a bsr (scan bits in reverse).
18582 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18583 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18585 // And xor with NumBits-1.
18586 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18589 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18593 static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
18594 MVT VT = Op.getSimpleValueType();
18595 unsigned NumBits = VT.getSizeInBits();
18597 Op = Op.getOperand(0);
18599 // Issue a bsf (scan bits forward) which also sets EFLAGS.
18600 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
18601 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op);
18603 // If src is zero (i.e. bsf sets ZF), returns NumBits.
18606 DAG.getConstant(NumBits, VT),
18607 DAG.getConstant(X86::COND_E, MVT::i8),
18610 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
18613 // Lower256IntArith - Break a 256-bit integer operation into two new 128-bit
18614 // ones, and then concatenate the result back.
18615 static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
18616 MVT VT = Op.getSimpleValueType();
18618 assert(VT.is256BitVector() && VT.isInteger() &&
18619 "Unsupported value type for operation");
18621 unsigned NumElems = VT.getVectorNumElements();
18624 // Extract the LHS vectors
18625 SDValue LHS = Op.getOperand(0);
18626 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
18627 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
18629 // Extract the RHS vectors
18630 SDValue RHS = Op.getOperand(1);
18631 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
18632 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
18634 MVT EltVT = VT.getVectorElementType();
18635 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
18637 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
18638 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
18639 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
18642 static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) {
18643 assert(Op.getSimpleValueType().is256BitVector() &&
18644 Op.getSimpleValueType().isInteger() &&
18645 "Only handle AVX 256-bit vector integer operation");
18646 return Lower256IntArith(Op, DAG);
18649 static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
18650 assert(Op.getSimpleValueType().is256BitVector() &&
18651 Op.getSimpleValueType().isInteger() &&
18652 "Only handle AVX 256-bit vector integer operation");
18653 return Lower256IntArith(Op, DAG);
18656 static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
18657 SelectionDAG &DAG) {
18659 MVT VT = Op.getSimpleValueType();
18661 // Decompose 256-bit ops into smaller 128-bit ops.
18662 if (VT.is256BitVector() && !Subtarget->hasInt256())
18663 return Lower256IntArith(Op, DAG);
18665 SDValue A = Op.getOperand(0);
18666 SDValue B = Op.getOperand(1);
18668 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
18669 if (VT == MVT::v4i32) {
18670 assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() &&
18671 "Should not custom lower when pmuldq is available!");
18673 // Extract the odd parts.
18674 static const int UnpackMask[] = { 1, -1, 3, -1 };
18675 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
18676 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
18678 // Multiply the even parts.
18679 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B);
18680 // Now multiply odd parts.
18681 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds);
18683 Evens = DAG.getNode(ISD::BITCAST, dl, VT, Evens);
18684 Odds = DAG.getNode(ISD::BITCAST, dl, VT, Odds);
18686 // Merge the two vectors back together with a shuffle. This expands into 2
18688 static const int ShufMask[] = { 0, 4, 2, 6 };
18689 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
18692 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
18693 "Only know how to lower V2I64/V4I64/V8I64 multiply");
18695 // Ahi = psrlqi(a, 32);
18696 // Bhi = psrlqi(b, 32);
18698 // AloBlo = pmuludq(a, b);
18699 // AloBhi = pmuludq(a, Bhi);
18700 // AhiBlo = pmuludq(Ahi, b);
18702 // AloBhi = psllqi(AloBhi, 32);
18703 // AhiBlo = psllqi(AhiBlo, 32);
18704 // return AloBlo + AloBhi + AhiBlo;
18706 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
18707 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
18709 // Bit cast to 32-bit vectors for MULUDQ
18710 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 :
18711 (VT == MVT::v4i64) ? MVT::v8i32 : MVT::v16i32;
18712 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A);
18713 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B);
18714 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi);
18715 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi);
18717 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
18718 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
18719 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
18721 AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG);
18722 AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG);
18724 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
18725 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
18728 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
18729 assert(Subtarget->isTargetWin64() && "Unexpected target");
18730 EVT VT = Op.getValueType();
18731 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
18732 "Unexpected return type for lowering");
18736 switch (Op->getOpcode()) {
18737 default: llvm_unreachable("Unexpected request for libcall!");
18738 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
18739 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
18740 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
18741 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
18742 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
18743 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
18747 SDValue InChain = DAG.getEntryNode();
18749 TargetLowering::ArgListTy Args;
18750 TargetLowering::ArgListEntry Entry;
18751 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
18752 EVT ArgVT = Op->getOperand(i).getValueType();
18753 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
18754 "Unexpected argument type for lowering");
18755 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
18756 Entry.Node = StackPtr;
18757 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MachinePointerInfo(),
18759 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
18760 Entry.Ty = PointerType::get(ArgTy,0);
18761 Entry.isSExt = false;
18762 Entry.isZExt = false;
18763 Args.push_back(Entry);
18766 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
18769 TargetLowering::CallLoweringInfo CLI(DAG);
18770 CLI.setDebugLoc(dl).setChain(InChain)
18771 .setCallee(getLibcallCallingConv(LC),
18772 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()),
18773 Callee, std::move(Args), 0)
18774 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
18776 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
18777 return DAG.getNode(ISD::BITCAST, dl, VT, CallInfo.first);
18780 static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
18781 SelectionDAG &DAG) {
18782 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
18783 EVT VT = Op0.getValueType();
18786 assert((VT == MVT::v4i32 && Subtarget->hasSSE2()) ||
18787 (VT == MVT::v8i32 && Subtarget->hasInt256()));
18789 // PMULxD operations multiply each even value (starting at 0) of LHS with
18790 // the related value of RHS and produce a widen result.
18791 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18792 // => <2 x i64> <ae|cg>
18794 // In other word, to have all the results, we need to perform two PMULxD:
18795 // 1. one with the even values.
18796 // 2. one with the odd values.
18797 // To achieve #2, with need to place the odd values at an even position.
18799 // Place the odd value at an even position (basically, shift all values 1
18800 // step to the left):
18801 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
18802 // <a|b|c|d> => <b|undef|d|undef>
18803 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0, Mask);
18804 // <e|f|g|h> => <f|undef|h|undef>
18805 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1, Mask);
18807 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
18809 MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
18810 bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
18812 (!IsSigned || !Subtarget->hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
18813 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18814 // => <2 x i64> <ae|cg>
18815 SDValue Mul1 = DAG.getNode(ISD::BITCAST, dl, VT,
18816 DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
18817 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
18818 // => <2 x i64> <bf|dh>
18819 SDValue Mul2 = DAG.getNode(ISD::BITCAST, dl, VT,
18820 DAG.getNode(Opcode, dl, MulVT, Odd0, Odd1));
18822 // Shuffle it back into the right order.
18823 SDValue Highs, Lows;
18824 if (VT == MVT::v8i32) {
18825 const int HighMask[] = {1, 9, 3, 11, 5, 13, 7, 15};
18826 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18827 const int LowMask[] = {0, 8, 2, 10, 4, 12, 6, 14};
18828 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18830 const int HighMask[] = {1, 5, 3, 7};
18831 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18832 const int LowMask[] = {0, 4, 2, 6};
18833 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18836 // If we have a signed multiply but no PMULDQ fix up the high parts of a
18837 // unsigned multiply.
18838 if (IsSigned && !Subtarget->hasSSE41()) {
18840 DAG.getConstant(31, DAG.getTargetLoweringInfo().getShiftAmountTy(VT));
18841 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
18842 DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
18843 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
18844 DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0);
18846 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
18847 Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup);
18850 // The first result of MUL_LOHI is actually the low value, followed by the
18852 SDValue Ops[] = {Lows, Highs};
18853 return DAG.getMergeValues(Ops, dl);
18856 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
18857 const X86Subtarget *Subtarget) {
18858 MVT VT = Op.getSimpleValueType();
18860 SDValue R = Op.getOperand(0);
18861 SDValue Amt = Op.getOperand(1);
18863 // Optimize shl/srl/sra with constant shift amount.
18864 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
18865 if (auto *ShiftConst = BVAmt->getConstantSplatNode()) {
18866 uint64_t ShiftAmt = ShiftConst->getZExtValue();
18868 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
18869 (Subtarget->hasInt256() &&
18870 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18871 (Subtarget->hasAVX512() &&
18872 (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18873 if (Op.getOpcode() == ISD::SHL)
18874 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18876 if (Op.getOpcode() == ISD::SRL)
18877 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18879 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64)
18880 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18884 if (VT == MVT::v16i8) {
18885 if (Op.getOpcode() == ISD::SHL) {
18886 // Make a large shift.
18887 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18888 MVT::v8i16, R, ShiftAmt,
18890 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18891 // Zero out the rightmost bits.
18892 SmallVector<SDValue, 16> V(16,
18893 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18895 return DAG.getNode(ISD::AND, dl, VT, SHL,
18896 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18898 if (Op.getOpcode() == ISD::SRL) {
18899 // Make a large shift.
18900 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18901 MVT::v8i16, R, ShiftAmt,
18903 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18904 // Zero out the leftmost bits.
18905 SmallVector<SDValue, 16> V(16,
18906 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18908 return DAG.getNode(ISD::AND, dl, VT, SRL,
18909 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18911 if (Op.getOpcode() == ISD::SRA) {
18912 if (ShiftAmt == 7) {
18913 // R s>> 7 === R s< 0
18914 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18915 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18918 // R s>> a === ((R u>> a) ^ m) - m
18919 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18920 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt,
18922 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18923 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18924 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18927 llvm_unreachable("Unknown shift opcode.");
18930 if (Subtarget->hasInt256() && VT == MVT::v32i8) {
18931 if (Op.getOpcode() == ISD::SHL) {
18932 // Make a large shift.
18933 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18934 MVT::v16i16, R, ShiftAmt,
18936 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18937 // Zero out the rightmost bits.
18938 SmallVector<SDValue, 32> V(32,
18939 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18941 return DAG.getNode(ISD::AND, dl, VT, SHL,
18942 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18944 if (Op.getOpcode() == ISD::SRL) {
18945 // Make a large shift.
18946 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18947 MVT::v16i16, R, ShiftAmt,
18949 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18950 // Zero out the leftmost bits.
18951 SmallVector<SDValue, 32> V(32,
18952 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18954 return DAG.getNode(ISD::AND, dl, VT, SRL,
18955 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18957 if (Op.getOpcode() == ISD::SRA) {
18958 if (ShiftAmt == 7) {
18959 // R s>> 7 === R s< 0
18960 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18961 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18964 // R s>> a === ((R u>> a) ^ m) - m
18965 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18966 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt,
18968 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18969 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18970 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18973 llvm_unreachable("Unknown shift opcode.");
18978 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18979 if (!Subtarget->is64Bit() &&
18980 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) &&
18981 Amt.getOpcode() == ISD::BITCAST &&
18982 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18983 Amt = Amt.getOperand(0);
18984 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18985 VT.getVectorNumElements();
18986 unsigned RatioInLog2 = Log2_32_Ceil(Ratio);
18987 uint64_t ShiftAmt = 0;
18988 for (unsigned i = 0; i != Ratio; ++i) {
18989 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i));
18993 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2)));
18995 // Check remaining shift amounts.
18996 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18997 uint64_t ShAmt = 0;
18998 for (unsigned j = 0; j != Ratio; ++j) {
18999 ConstantSDNode *C =
19000 dyn_cast<ConstantSDNode>(Amt.getOperand(i + j));
19004 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2)));
19006 if (ShAmt != ShiftAmt)
19009 switch (Op.getOpcode()) {
19011 llvm_unreachable("Unknown shift opcode!");
19013 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
19016 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
19019 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
19027 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
19028 const X86Subtarget* Subtarget) {
19029 MVT VT = Op.getSimpleValueType();
19031 SDValue R = Op.getOperand(0);
19032 SDValue Amt = Op.getOperand(1);
19034 if ((VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) ||
19035 VT == MVT::v4i32 || VT == MVT::v8i16 ||
19036 (Subtarget->hasInt256() &&
19037 ((VT == MVT::v4i64 && Op.getOpcode() != ISD::SRA) ||
19038 VT == MVT::v8i32 || VT == MVT::v16i16)) ||
19039 (Subtarget->hasAVX512() && (VT == MVT::v8i64 || VT == MVT::v16i32))) {
19041 EVT EltVT = VT.getVectorElementType();
19043 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Amt)) {
19044 // Check if this build_vector node is doing a splat.
19045 // If so, then set BaseShAmt equal to the splat value.
19046 BaseShAmt = BV->getSplatValue();
19047 if (BaseShAmt && BaseShAmt.getOpcode() == ISD::UNDEF)
19048 BaseShAmt = SDValue();
19050 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
19051 Amt = Amt.getOperand(0);
19053 ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt);
19054 if (SVN && SVN->isSplat()) {
19055 unsigned SplatIdx = (unsigned)SVN->getSplatIndex();
19056 SDValue InVec = Amt.getOperand(0);
19057 if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
19058 assert((SplatIdx < InVec.getValueType().getVectorNumElements()) &&
19059 "Unexpected shuffle index found!");
19060 BaseShAmt = InVec.getOperand(SplatIdx);
19061 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
19062 if (ConstantSDNode *C =
19063 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) {
19064 if (C->getZExtValue() == SplatIdx)
19065 BaseShAmt = InVec.getOperand(1);
19070 // Avoid introducing an extract element from a shuffle.
19071 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec,
19072 DAG.getIntPtrConstant(SplatIdx));
19076 if (BaseShAmt.getNode()) {
19077 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
19078 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
19079 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
19080 else if (EltVT.bitsLT(MVT::i32))
19081 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
19083 switch (Op.getOpcode()) {
19085 llvm_unreachable("Unknown shift opcode!");
19087 switch (VT.SimpleTy) {
19088 default: return SDValue();
19097 return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG);
19100 switch (VT.SimpleTy) {
19101 default: return SDValue();
19108 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG);
19111 switch (VT.SimpleTy) {
19112 default: return SDValue();
19121 return getTargetVShiftNode(X86ISD::VSRLI, dl, VT, R, BaseShAmt, DAG);
19127 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
19128 if (!Subtarget->is64Bit() &&
19129 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64) ||
19130 (Subtarget->hasAVX512() && VT == MVT::v8i64)) &&
19131 Amt.getOpcode() == ISD::BITCAST &&
19132 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
19133 Amt = Amt.getOperand(0);
19134 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
19135 VT.getVectorNumElements();
19136 std::vector<SDValue> Vals(Ratio);
19137 for (unsigned i = 0; i != Ratio; ++i)
19138 Vals[i] = Amt.getOperand(i);
19139 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
19140 for (unsigned j = 0; j != Ratio; ++j)
19141 if (Vals[j] != Amt.getOperand(i + j))
19144 switch (Op.getOpcode()) {
19146 llvm_unreachable("Unknown shift opcode!");
19148 return DAG.getNode(X86ISD::VSHL, dl, VT, R, Op.getOperand(1));
19150 return DAG.getNode(X86ISD::VSRL, dl, VT, R, Op.getOperand(1));
19152 return DAG.getNode(X86ISD::VSRA, dl, VT, R, Op.getOperand(1));
19159 static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
19160 SelectionDAG &DAG) {
19161 MVT VT = Op.getSimpleValueType();
19163 SDValue R = Op.getOperand(0);
19164 SDValue Amt = Op.getOperand(1);
19167 assert(VT.isVector() && "Custom lowering only for vector shifts!");
19168 assert(Subtarget->hasSSE2() && "Only custom lower when we have SSE2!");
19170 V = LowerScalarImmediateShift(Op, DAG, Subtarget);
19174 V = LowerScalarVariableShift(Op, DAG, Subtarget);
19178 if (Subtarget->hasAVX512() && (VT == MVT::v16i32 || VT == MVT::v8i64))
19180 // AVX2 has VPSLLV/VPSRAV/VPSRLV.
19181 if (Subtarget->hasInt256()) {
19182 if (Op.getOpcode() == ISD::SRL &&
19183 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
19184 VT == MVT::v4i64 || VT == MVT::v8i32))
19186 if (Op.getOpcode() == ISD::SHL &&
19187 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
19188 VT == MVT::v4i64 || VT == MVT::v8i32))
19190 if (Op.getOpcode() == ISD::SRA && (VT == MVT::v4i32 || VT == MVT::v8i32))
19194 // If possible, lower this packed shift into a vector multiply instead of
19195 // expanding it into a sequence of scalar shifts.
19196 // Do this only if the vector shift count is a constant build_vector.
19197 if (Op.getOpcode() == ISD::SHL &&
19198 (VT == MVT::v8i16 || VT == MVT::v4i32 ||
19199 (Subtarget->hasInt256() && VT == MVT::v16i16)) &&
19200 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
19201 SmallVector<SDValue, 8> Elts;
19202 EVT SVT = VT.getScalarType();
19203 unsigned SVTBits = SVT.getSizeInBits();
19204 const APInt &One = APInt(SVTBits, 1);
19205 unsigned NumElems = VT.getVectorNumElements();
19207 for (unsigned i=0; i !=NumElems; ++i) {
19208 SDValue Op = Amt->getOperand(i);
19209 if (Op->getOpcode() == ISD::UNDEF) {
19210 Elts.push_back(Op);
19214 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
19215 const APInt &C = APInt(SVTBits, ND->getAPIntValue().getZExtValue());
19216 uint64_t ShAmt = C.getZExtValue();
19217 if (ShAmt >= SVTBits) {
19218 Elts.push_back(DAG.getUNDEF(SVT));
19221 Elts.push_back(DAG.getConstant(One.shl(ShAmt), SVT));
19223 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
19224 return DAG.getNode(ISD::MUL, dl, VT, R, BV);
19227 // Lower SHL with variable shift amount.
19228 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
19229 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT));
19231 Op = DAG.getNode(ISD::ADD, dl, VT, Op, DAG.getConstant(0x3f800000U, VT));
19232 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
19233 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
19234 return DAG.getNode(ISD::MUL, dl, VT, Op, R);
19237 // If possible, lower this shift as a sequence of two shifts by
19238 // constant plus a MOVSS/MOVSD instead of scalarizing it.
19240 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
19242 // Could be rewritten as:
19243 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
19245 // The advantage is that the two shifts from the example would be
19246 // lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
19247 // the vector shift into four scalar shifts plus four pairs of vector
19249 if ((VT == MVT::v8i16 || VT == MVT::v4i32) &&
19250 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
19251 unsigned TargetOpcode = X86ISD::MOVSS;
19252 bool CanBeSimplified;
19253 // The splat value for the first packed shift (the 'X' from the example).
19254 SDValue Amt1 = Amt->getOperand(0);
19255 // The splat value for the second packed shift (the 'Y' from the example).
19256 SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) :
19257 Amt->getOperand(2);
19259 // See if it is possible to replace this node with a sequence of
19260 // two shifts followed by a MOVSS/MOVSD
19261 if (VT == MVT::v4i32) {
19262 // Check if it is legal to use a MOVSS.
19263 CanBeSimplified = Amt2 == Amt->getOperand(2) &&
19264 Amt2 == Amt->getOperand(3);
19265 if (!CanBeSimplified) {
19266 // Otherwise, check if we can still simplify this node using a MOVSD.
19267 CanBeSimplified = Amt1 == Amt->getOperand(1) &&
19268 Amt->getOperand(2) == Amt->getOperand(3);
19269 TargetOpcode = X86ISD::MOVSD;
19270 Amt2 = Amt->getOperand(2);
19273 // Do similar checks for the case where the machine value type
19275 CanBeSimplified = Amt1 == Amt->getOperand(1);
19276 for (unsigned i=3; i != 8 && CanBeSimplified; ++i)
19277 CanBeSimplified = Amt2 == Amt->getOperand(i);
19279 if (!CanBeSimplified) {
19280 TargetOpcode = X86ISD::MOVSD;
19281 CanBeSimplified = true;
19282 Amt2 = Amt->getOperand(4);
19283 for (unsigned i=0; i != 4 && CanBeSimplified; ++i)
19284 CanBeSimplified = Amt1 == Amt->getOperand(i);
19285 for (unsigned j=4; j != 8 && CanBeSimplified; ++j)
19286 CanBeSimplified = Amt2 == Amt->getOperand(j);
19290 if (CanBeSimplified && isa<ConstantSDNode>(Amt1) &&
19291 isa<ConstantSDNode>(Amt2)) {
19292 // Replace this node with two shifts followed by a MOVSS/MOVSD.
19293 EVT CastVT = MVT::v4i32;
19295 DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), VT);
19296 SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
19298 DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), VT);
19299 SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
19300 if (TargetOpcode == X86ISD::MOVSD)
19301 CastVT = MVT::v2i64;
19302 SDValue BitCast1 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift1);
19303 SDValue BitCast2 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift2);
19304 SDValue Result = getTargetShuffleNode(TargetOpcode, dl, CastVT, BitCast2,
19306 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
19310 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
19311 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq.");
19314 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, VT));
19315 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op);
19317 // Turn 'a' into a mask suitable for VSELECT
19318 SDValue VSelM = DAG.getConstant(0x80, VT);
19319 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19320 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19322 SDValue CM1 = DAG.getConstant(0x0f, VT);
19323 SDValue CM2 = DAG.getConstant(0x3f, VT);
19325 // r = VSELECT(r, psllw(r & (char16)15, 4), a);
19326 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1);
19327 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 4, DAG);
19328 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19329 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19332 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19333 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19334 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19336 // r = VSELECT(r, psllw(r & (char16)63, 2), a);
19337 M = DAG.getNode(ISD::AND, dl, VT, R, CM2);
19338 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 2, DAG);
19339 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19340 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19343 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19344 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19345 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19347 // return VSELECT(r, r+r, a);
19348 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel,
19349 DAG.getNode(ISD::ADD, dl, VT, R, R), R);
19353 // It's worth extending once and using the v8i32 shifts for 16-bit types, but
19354 // the extra overheads to get from v16i8 to v8i32 make the existing SSE
19355 // solution better.
19356 if (Subtarget->hasInt256() && VT == MVT::v8i16) {
19357 MVT NewVT = VT == MVT::v8i16 ? MVT::v8i32 : MVT::v16i16;
19359 Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
19360 R = DAG.getNode(ExtOpc, dl, NewVT, R);
19361 Amt = DAG.getNode(ISD::ANY_EXTEND, dl, NewVT, Amt);
19362 return DAG.getNode(ISD::TRUNCATE, dl, VT,
19363 DAG.getNode(Op.getOpcode(), dl, NewVT, R, Amt));
19366 // Decompose 256-bit shifts into smaller 128-bit shifts.
19367 if (VT.is256BitVector()) {
19368 unsigned NumElems = VT.getVectorNumElements();
19369 MVT EltVT = VT.getVectorElementType();
19370 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19372 // Extract the two vectors
19373 SDValue V1 = Extract128BitVector(R, 0, DAG, dl);
19374 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl);
19376 // Recreate the shift amount vectors
19377 SDValue Amt1, Amt2;
19378 if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
19379 // Constant shift amount
19380 SmallVector<SDValue, 4> Amt1Csts;
19381 SmallVector<SDValue, 4> Amt2Csts;
19382 for (unsigned i = 0; i != NumElems/2; ++i)
19383 Amt1Csts.push_back(Amt->getOperand(i));
19384 for (unsigned i = NumElems/2; i != NumElems; ++i)
19385 Amt2Csts.push_back(Amt->getOperand(i));
19387 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt1Csts);
19388 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt2Csts);
19390 // Variable shift amount
19391 Amt1 = Extract128BitVector(Amt, 0, DAG, dl);
19392 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl);
19395 // Issue new vector shifts for the smaller types
19396 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1);
19397 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2);
19399 // Concatenate the result back
19400 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
19406 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
19407 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
19408 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
19409 // looks for this combo and may remove the "setcc" instruction if the "setcc"
19410 // has only one use.
19411 SDNode *N = Op.getNode();
19412 SDValue LHS = N->getOperand(0);
19413 SDValue RHS = N->getOperand(1);
19414 unsigned BaseOp = 0;
19417 switch (Op.getOpcode()) {
19418 default: llvm_unreachable("Unknown ovf instruction!");
19420 // A subtract of one will be selected as a INC. Note that INC doesn't
19421 // set CF, so we can't do this for UADDO.
19422 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19424 BaseOp = X86ISD::INC;
19425 Cond = X86::COND_O;
19428 BaseOp = X86ISD::ADD;
19429 Cond = X86::COND_O;
19432 BaseOp = X86ISD::ADD;
19433 Cond = X86::COND_B;
19436 // A subtract of one will be selected as a DEC. Note that DEC doesn't
19437 // set CF, so we can't do this for USUBO.
19438 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19440 BaseOp = X86ISD::DEC;
19441 Cond = X86::COND_O;
19444 BaseOp = X86ISD::SUB;
19445 Cond = X86::COND_O;
19448 BaseOp = X86ISD::SUB;
19449 Cond = X86::COND_B;
19452 BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL;
19453 Cond = X86::COND_O;
19455 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
19456 if (N->getValueType(0) == MVT::i8) {
19457 BaseOp = X86ISD::UMUL8;
19458 Cond = X86::COND_O;
19461 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
19463 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
19466 DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
19467 DAG.getConstant(X86::COND_O, MVT::i32),
19468 SDValue(Sum.getNode(), 2));
19470 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19474 // Also sets EFLAGS.
19475 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
19476 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
19479 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
19480 DAG.getConstant(Cond, MVT::i32),
19481 SDValue(Sum.getNode(), 1));
19483 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19486 // Sign extension of the low part of vector elements. This may be used either
19487 // when sign extend instructions are not available or if the vector element
19488 // sizes already match the sign-extended size. If the vector elements are in
19489 // their pre-extended size and sign extend instructions are available, that will
19490 // be handled by LowerSIGN_EXTEND.
19491 SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
19492 SelectionDAG &DAG) const {
19494 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
19495 MVT VT = Op.getSimpleValueType();
19497 if (!Subtarget->hasSSE2() || !VT.isVector())
19500 unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
19501 ExtraVT.getScalarType().getSizeInBits();
19503 switch (VT.SimpleTy) {
19504 default: return SDValue();
19507 if (!Subtarget->hasFp256())
19509 if (!Subtarget->hasInt256()) {
19510 // needs to be split
19511 unsigned NumElems = VT.getVectorNumElements();
19513 // Extract the LHS vectors
19514 SDValue LHS = Op.getOperand(0);
19515 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
19516 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
19518 MVT EltVT = VT.getVectorElementType();
19519 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19521 EVT ExtraEltVT = ExtraVT.getVectorElementType();
19522 unsigned ExtraNumElems = ExtraVT.getVectorNumElements();
19523 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT,
19525 SDValue Extra = DAG.getValueType(ExtraVT);
19527 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra);
19528 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra);
19530 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);
19535 SDValue Op0 = Op.getOperand(0);
19537 // This is a sign extension of some low part of vector elements without
19538 // changing the size of the vector elements themselves:
19539 // Shift-Left + Shift-Right-Algebraic.
19540 SDValue Shl = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Op0,
19542 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Shl, BitsDiff,
19548 /// Returns true if the operand type is exactly twice the native width, and
19549 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
19550 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
19551 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
19552 bool X86TargetLowering::needsCmpXchgNb(const Type *MemType) const {
19553 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
19556 return !Subtarget->is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
19557 else if (OpWidth == 128)
19558 return Subtarget->hasCmpxchg16b();
19563 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
19564 return needsCmpXchgNb(SI->getValueOperand()->getType());
19567 // Note: this turns large loads into lock cmpxchg8b/16b.
19568 // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
19569 bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
19570 auto PTy = cast<PointerType>(LI->getPointerOperand()->getType());
19571 return needsCmpXchgNb(PTy->getElementType());
19574 bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
19575 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19576 const Type *MemType = AI->getType();
19578 // If the operand is too big, we must see if cmpxchg8/16b is available
19579 // and default to library calls otherwise.
19580 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19581 return needsCmpXchgNb(MemType);
19583 AtomicRMWInst::BinOp Op = AI->getOperation();
19586 llvm_unreachable("Unknown atomic operation");
19587 case AtomicRMWInst::Xchg:
19588 case AtomicRMWInst::Add:
19589 case AtomicRMWInst::Sub:
19590 // It's better to use xadd, xsub or xchg for these in all cases.
19592 case AtomicRMWInst::Or:
19593 case AtomicRMWInst::And:
19594 case AtomicRMWInst::Xor:
19595 // If the atomicrmw's result isn't actually used, we can just add a "lock"
19596 // prefix to a normal instruction for these operations.
19597 return !AI->use_empty();
19598 case AtomicRMWInst::Nand:
19599 case AtomicRMWInst::Max:
19600 case AtomicRMWInst::Min:
19601 case AtomicRMWInst::UMax:
19602 case AtomicRMWInst::UMin:
19603 // These always require a non-trivial set of data operations on x86. We must
19604 // use a cmpxchg loop.
19609 static bool hasMFENCE(const X86Subtarget& Subtarget) {
19610 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
19611 // no-sse2). There isn't any reason to disable it if the target processor
19613 return Subtarget.hasSSE2() || Subtarget.is64Bit();
19617 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
19618 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19619 const Type *MemType = AI->getType();
19620 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
19621 // there is no benefit in turning such RMWs into loads, and it is actually
19622 // harmful as it introduces a mfence.
19623 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19626 auto Builder = IRBuilder<>(AI);
19627 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
19628 auto SynchScope = AI->getSynchScope();
19629 // We must restrict the ordering to avoid generating loads with Release or
19630 // ReleaseAcquire orderings.
19631 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
19632 auto Ptr = AI->getPointerOperand();
19634 // Before the load we need a fence. Here is an example lifted from
19635 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
19638 // x.store(1, relaxed);
19639 // r1 = y.fetch_add(0, release);
19641 // y.fetch_add(42, acquire);
19642 // r2 = x.load(relaxed);
19643 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
19644 // lowered to just a load without a fence. A mfence flushes the store buffer,
19645 // making the optimization clearly correct.
19646 // FIXME: it is required if isAtLeastRelease(Order) but it is not clear
19647 // otherwise, we might be able to be more agressive on relaxed idempotent
19648 // rmw. In practice, they do not look useful, so we don't try to be
19649 // especially clever.
19650 if (SynchScope == SingleThread) {
19651 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
19652 // the IR level, so we must wrap it in an intrinsic.
19654 } else if (hasMFENCE(*Subtarget)) {
19655 Function *MFence = llvm::Intrinsic::getDeclaration(M,
19656 Intrinsic::x86_sse2_mfence);
19657 Builder.CreateCall(MFence);
19659 // FIXME: it might make sense to use a locked operation here but on a
19660 // different cache-line to prevent cache-line bouncing. In practice it
19661 // is probably a small win, and x86 processors without mfence are rare
19662 // enough that we do not bother.
19666 // Finally we can emit the atomic load.
19667 LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
19668 AI->getType()->getPrimitiveSizeInBits());
19669 Loaded->setAtomic(Order, SynchScope);
19670 AI->replaceAllUsesWith(Loaded);
19671 AI->eraseFromParent();
19675 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
19676 SelectionDAG &DAG) {
19678 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
19679 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
19680 SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
19681 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
19683 // The only fence that needs an instruction is a sequentially-consistent
19684 // cross-thread fence.
19685 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
19686 if (hasMFENCE(*Subtarget))
19687 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
19689 SDValue Chain = Op.getOperand(0);
19690 SDValue Zero = DAG.getConstant(0, MVT::i32);
19692 DAG.getRegister(X86::ESP, MVT::i32), // Base
19693 DAG.getTargetConstant(1, MVT::i8), // Scale
19694 DAG.getRegister(0, MVT::i32), // Index
19695 DAG.getTargetConstant(0, MVT::i32), // Disp
19696 DAG.getRegister(0, MVT::i32), // Segment.
19700 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
19701 return SDValue(Res, 0);
19704 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
19705 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
19708 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
19709 SelectionDAG &DAG) {
19710 MVT T = Op.getSimpleValueType();
19714 switch(T.SimpleTy) {
19715 default: llvm_unreachable("Invalid value type!");
19716 case MVT::i8: Reg = X86::AL; size = 1; break;
19717 case MVT::i16: Reg = X86::AX; size = 2; break;
19718 case MVT::i32: Reg = X86::EAX; size = 4; break;
19720 assert(Subtarget->is64Bit() && "Node not type legal!");
19721 Reg = X86::RAX; size = 8;
19724 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
19725 Op.getOperand(2), SDValue());
19726 SDValue Ops[] = { cpIn.getValue(0),
19729 DAG.getTargetConstant(size, MVT::i8),
19730 cpIn.getValue(1) };
19731 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
19732 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
19733 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
19737 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
19738 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
19739 MVT::i32, cpOut.getValue(2));
19740 SDValue Success = DAG.getNode(X86ISD::SETCC, DL, Op->getValueType(1),
19741 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
19743 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
19744 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
19745 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
19749 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
19750 SelectionDAG &DAG) {
19751 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
19752 MVT DstVT = Op.getSimpleValueType();
19754 if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) {
19755 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19756 if (DstVT != MVT::f64)
19757 // This conversion needs to be expanded.
19760 SDValue InVec = Op->getOperand(0);
19762 unsigned NumElts = SrcVT.getVectorNumElements();
19763 EVT SVT = SrcVT.getVectorElementType();
19765 // Widen the vector in input in the case of MVT::v2i32.
19766 // Example: from MVT::v2i32 to MVT::v4i32.
19767 SmallVector<SDValue, 16> Elts;
19768 for (unsigned i = 0, e = NumElts; i != e; ++i)
19769 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec,
19770 DAG.getIntPtrConstant(i)));
19772 // Explicitly mark the extra elements as Undef.
19773 Elts.append(NumElts, DAG.getUNDEF(SVT));
19775 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
19776 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
19777 SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
19778 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
19779 DAG.getIntPtrConstant(0));
19782 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
19783 Subtarget->hasMMX() && "Unexpected custom BITCAST");
19784 assert((DstVT == MVT::i64 ||
19785 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
19786 "Unexpected custom BITCAST");
19787 // i64 <=> MMX conversions are Legal.
19788 if (SrcVT==MVT::i64 && DstVT.isVector())
19790 if (DstVT==MVT::i64 && SrcVT.isVector())
19792 // MMX <=> MMX conversions are Legal.
19793 if (SrcVT.isVector() && DstVT.isVector())
19795 // All other conversions need to be expanded.
19799 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
19800 SelectionDAG &DAG) {
19801 SDNode *Node = Op.getNode();
19804 Op = Op.getOperand(0);
19805 EVT VT = Op.getValueType();
19806 assert((VT.is128BitVector() || VT.is256BitVector()) &&
19807 "CTPOP lowering only implemented for 128/256-bit wide vector types");
19809 unsigned NumElts = VT.getVectorNumElements();
19810 EVT EltVT = VT.getVectorElementType();
19811 unsigned Len = EltVT.getSizeInBits();
19813 // This is the vectorized version of the "best" algorithm from
19814 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
19815 // with a minor tweak to use a series of adds + shifts instead of vector
19816 // multiplications. Implemented for the v2i64, v4i64, v4i32, v8i32 types:
19818 // v2i64, v4i64, v4i32 => Only profitable w/ popcnt disabled
19819 // v8i32 => Always profitable
19821 // FIXME: There a couple of possible improvements:
19823 // 1) Support for i8 and i16 vectors (needs measurements if popcnt enabled).
19824 // 2) Use strategies from http://wm.ite.pl/articles/sse-popcount.html
19826 assert(EltVT.isInteger() && (Len == 32 || Len == 64) && Len % 8 == 0 &&
19827 "CTPOP not implemented for this vector element type.");
19829 // X86 canonicalize ANDs to vXi64, generate the appropriate bitcasts to avoid
19830 // extra legalization.
19831 bool NeedsBitcast = EltVT == MVT::i32;
19832 MVT BitcastVT = VT.is256BitVector() ? MVT::v4i64 : MVT::v2i64;
19834 SDValue Cst55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), EltVT);
19835 SDValue Cst33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), EltVT);
19836 SDValue Cst0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), EltVT);
19838 // v = v - ((v >> 1) & 0x55555555...)
19839 SmallVector<SDValue, 8> Ones(NumElts, DAG.getConstant(1, EltVT));
19840 SDValue OnesV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ones);
19841 SDValue Srl = DAG.getNode(ISD::SRL, dl, VT, Op, OnesV);
19843 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19845 SmallVector<SDValue, 8> Mask55(NumElts, Cst55);
19846 SDValue M55 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask55);
19848 M55 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M55);
19850 SDValue And = DAG.getNode(ISD::AND, dl, Srl.getValueType(), Srl, M55);
19851 if (VT != And.getValueType())
19852 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19853 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Op, And);
19855 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
19856 SmallVector<SDValue, 8> Mask33(NumElts, Cst33);
19857 SDValue M33 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask33);
19858 SmallVector<SDValue, 8> Twos(NumElts, DAG.getConstant(2, EltVT));
19859 SDValue TwosV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Twos);
19861 Srl = DAG.getNode(ISD::SRL, dl, VT, Sub, TwosV);
19862 if (NeedsBitcast) {
19863 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19864 M33 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M33);
19865 Sub = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Sub);
19868 SDValue AndRHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Srl, M33);
19869 SDValue AndLHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Sub, M33);
19870 if (VT != AndRHS.getValueType()) {
19871 AndRHS = DAG.getNode(ISD::BITCAST, dl, VT, AndRHS);
19872 AndLHS = DAG.getNode(ISD::BITCAST, dl, VT, AndLHS);
19874 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, AndLHS, AndRHS);
19876 // v = (v + (v >> 4)) & 0x0F0F0F0F...
19877 SmallVector<SDValue, 8> Fours(NumElts, DAG.getConstant(4, EltVT));
19878 SDValue FoursV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Fours);
19879 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, FoursV);
19880 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19882 SmallVector<SDValue, 8> Mask0F(NumElts, Cst0F);
19883 SDValue M0F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask0F);
19884 if (NeedsBitcast) {
19885 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19886 M0F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M0F);
19888 And = DAG.getNode(ISD::AND, dl, M0F.getValueType(), Add, M0F);
19889 if (VT != And.getValueType())
19890 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19892 // The algorithm mentioned above uses:
19893 // v = (v * 0x01010101...) >> (Len - 8)
19895 // Change it to use vector adds + vector shifts which yield faster results on
19896 // Haswell than using vector integer multiplication.
19898 // For i32 elements:
19899 // v = v + (v >> 8)
19900 // v = v + (v >> 16)
19902 // For i64 elements:
19903 // v = v + (v >> 8)
19904 // v = v + (v >> 16)
19905 // v = v + (v >> 32)
19908 SmallVector<SDValue, 8> Csts;
19909 for (unsigned i = 8; i <= Len/2; i *= 2) {
19910 Csts.assign(NumElts, DAG.getConstant(i, EltVT));
19911 SDValue CstsV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Csts);
19912 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, CstsV);
19913 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19917 // The result is on the least significant 6-bits on i32 and 7-bits on i64.
19918 SDValue Cst3F = DAG.getConstant(APInt(Len, Len == 32 ? 0x3F : 0x7F), EltVT);
19919 SmallVector<SDValue, 8> Cst3FV(NumElts, Cst3F);
19920 SDValue M3F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Cst3FV);
19921 if (NeedsBitcast) {
19922 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19923 M3F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M3F);
19925 And = DAG.getNode(ISD::AND, dl, M3F.getValueType(), Add, M3F);
19926 if (VT != And.getValueType())
19927 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19932 static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
19933 SDNode *Node = Op.getNode();
19935 EVT T = Node->getValueType(0);
19936 SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
19937 DAG.getConstant(0, T), Node->getOperand(2));
19938 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
19939 cast<AtomicSDNode>(Node)->getMemoryVT(),
19940 Node->getOperand(0),
19941 Node->getOperand(1), negOp,
19942 cast<AtomicSDNode>(Node)->getMemOperand(),
19943 cast<AtomicSDNode>(Node)->getOrdering(),
19944 cast<AtomicSDNode>(Node)->getSynchScope());
19947 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
19948 SDNode *Node = Op.getNode();
19950 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
19952 // Convert seq_cst store -> xchg
19953 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
19954 // FIXME: On 32-bit, store -> fist or movq would be more efficient
19955 // (The only way to get a 16-byte store is cmpxchg16b)
19956 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
19957 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
19958 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
19959 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
19960 cast<AtomicSDNode>(Node)->getMemoryVT(),
19961 Node->getOperand(0),
19962 Node->getOperand(1), Node->getOperand(2),
19963 cast<AtomicSDNode>(Node)->getMemOperand(),
19964 cast<AtomicSDNode>(Node)->getOrdering(),
19965 cast<AtomicSDNode>(Node)->getSynchScope());
19966 return Swap.getValue(1);
19968 // Other atomic stores have a simple pattern.
19972 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
19973 EVT VT = Op.getNode()->getSimpleValueType(0);
19975 // Let legalize expand this if it isn't a legal type yet.
19976 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
19979 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
19982 bool ExtraOp = false;
19983 switch (Op.getOpcode()) {
19984 default: llvm_unreachable("Invalid code");
19985 case ISD::ADDC: Opc = X86ISD::ADD; break;
19986 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break;
19987 case ISD::SUBC: Opc = X86ISD::SUB; break;
19988 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break;
19992 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19994 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19995 Op.getOperand(1), Op.getOperand(2));
19998 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
19999 SelectionDAG &DAG) {
20000 assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit());
20002 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
20003 // which returns the values as { float, float } (in XMM0) or
20004 // { double, double } (which is returned in XMM0, XMM1).
20006 SDValue Arg = Op.getOperand(0);
20007 EVT ArgVT = Arg.getValueType();
20008 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
20010 TargetLowering::ArgListTy Args;
20011 TargetLowering::ArgListEntry Entry;
20015 Entry.isSExt = false;
20016 Entry.isZExt = false;
20017 Args.push_back(Entry);
20019 bool isF64 = ArgVT == MVT::f64;
20020 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
20021 // the small struct {f32, f32} is returned in (eax, edx). For f64,
20022 // the results are returned via SRet in memory.
20023 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret";
20024 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20025 SDValue Callee = DAG.getExternalSymbol(LibcallName, TLI.getPointerTy());
20027 Type *RetTy = isF64
20028 ? (Type*)StructType::get(ArgTy, ArgTy, nullptr)
20029 : (Type*)VectorType::get(ArgTy, 4);
20031 TargetLowering::CallLoweringInfo CLI(DAG);
20032 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
20033 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
20035 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
20038 // Returned in xmm0 and xmm1.
20039 return CallResult.first;
20041 // Returned in bits 0:31 and 32:64 xmm0.
20042 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
20043 CallResult.first, DAG.getIntPtrConstant(0));
20044 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
20045 CallResult.first, DAG.getIntPtrConstant(1));
20046 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
20047 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
20050 /// LowerOperation - Provide custom lowering hooks for some operations.
20052 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
20053 switch (Op.getOpcode()) {
20054 default: llvm_unreachable("Should not custom lower this!");
20055 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG);
20056 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
20057 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
20058 return LowerCMP_SWAP(Op, Subtarget, DAG);
20059 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
20060 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
20061 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
20062 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
20063 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
20064 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
20065 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
20066 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
20067 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
20068 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
20069 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
20070 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
20071 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
20072 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
20073 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
20074 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
20075 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
20076 case ISD::SHL_PARTS:
20077 case ISD::SRA_PARTS:
20078 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
20079 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
20080 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
20081 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
20082 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
20083 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
20084 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
20085 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
20086 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
20087 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
20088 case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG);
20090 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
20091 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
20092 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
20093 case ISD::SETCC: return LowerSETCC(Op, DAG);
20094 case ISD::SELECT: return LowerSELECT(Op, DAG);
20095 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
20096 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
20097 case ISD::VASTART: return LowerVASTART(Op, DAG);
20098 case ISD::VAARG: return LowerVAARG(Op, DAG);
20099 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
20100 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, Subtarget, DAG);
20101 case ISD::INTRINSIC_VOID:
20102 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
20103 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
20104 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
20105 case ISD::FRAME_TO_ARGS_OFFSET:
20106 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
20107 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
20108 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
20109 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
20110 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
20111 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
20112 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
20113 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
20114 case ISD::CTLZ: return LowerCTLZ(Op, DAG);
20115 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG);
20116 case ISD::CTTZ: return LowerCTTZ(Op, DAG);
20117 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
20118 case ISD::UMUL_LOHI:
20119 case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
20122 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
20128 case ISD::UMULO: return LowerXALUO(Op, DAG);
20129 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
20130 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
20134 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
20135 case ISD::ADD: return LowerADD(Op, DAG);
20136 case ISD::SUB: return LowerSUB(Op, DAG);
20137 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
20141 /// ReplaceNodeResults - Replace a node with an illegal result type
20142 /// with a new node built out of custom code.
20143 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
20144 SmallVectorImpl<SDValue>&Results,
20145 SelectionDAG &DAG) const {
20147 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20148 switch (N->getOpcode()) {
20150 llvm_unreachable("Do not know how to custom type legalize this operation!");
20151 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
20152 case X86ISD::FMINC:
20154 case X86ISD::FMAXC:
20155 case X86ISD::FMAX: {
20156 EVT VT = N->getValueType(0);
20157 if (VT != MVT::v2f32)
20158 llvm_unreachable("Unexpected type (!= v2f32) on FMIN/FMAX.");
20159 SDValue UNDEF = DAG.getUNDEF(VT);
20160 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
20161 N->getOperand(0), UNDEF);
20162 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
20163 N->getOperand(1), UNDEF);
20164 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
20167 case ISD::SIGN_EXTEND_INREG:
20172 // We don't want to expand or promote these.
20179 case ISD::UDIVREM: {
20180 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
20181 Results.push_back(V);
20184 case ISD::FP_TO_SINT:
20185 case ISD::FP_TO_UINT: {
20186 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
20188 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType()))
20191 std::pair<SDValue,SDValue> Vals =
20192 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
20193 SDValue FIST = Vals.first, StackSlot = Vals.second;
20194 if (FIST.getNode()) {
20195 EVT VT = N->getValueType(0);
20196 // Return a load from the stack slot.
20197 if (StackSlot.getNode())
20198 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
20199 MachinePointerInfo(),
20200 false, false, false, 0));
20202 Results.push_back(FIST);
20206 case ISD::UINT_TO_FP: {
20207 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20208 if (N->getOperand(0).getValueType() != MVT::v2i32 ||
20209 N->getValueType(0) != MVT::v2f32)
20211 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
20213 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
20215 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias);
20216 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
20217 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias));
20218 Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or);
20219 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
20220 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
20223 case ISD::FP_ROUND: {
20224 if (!TLI.isTypeLegal(N->getOperand(0).getValueType()))
20226 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
20227 Results.push_back(V);
20230 case ISD::INTRINSIC_W_CHAIN: {
20231 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
20233 default : llvm_unreachable("Do not know how to custom type "
20234 "legalize this intrinsic operation!");
20235 case Intrinsic::x86_rdtsc:
20236 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20238 case Intrinsic::x86_rdtscp:
20239 return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
20241 case Intrinsic::x86_rdpmc:
20242 return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
20245 case ISD::READCYCLECOUNTER: {
20246 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20249 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
20250 EVT T = N->getValueType(0);
20251 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
20252 bool Regs64bit = T == MVT::i128;
20253 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
20254 SDValue cpInL, cpInH;
20255 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20256 DAG.getConstant(0, HalfT));
20257 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20258 DAG.getConstant(1, HalfT));
20259 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
20260 Regs64bit ? X86::RAX : X86::EAX,
20262 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
20263 Regs64bit ? X86::RDX : X86::EDX,
20264 cpInH, cpInL.getValue(1));
20265 SDValue swapInL, swapInH;
20266 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20267 DAG.getConstant(0, HalfT));
20268 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20269 DAG.getConstant(1, HalfT));
20270 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl,
20271 Regs64bit ? X86::RBX : X86::EBX,
20272 swapInL, cpInH.getValue(1));
20273 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl,
20274 Regs64bit ? X86::RCX : X86::ECX,
20275 swapInH, swapInL.getValue(1));
20276 SDValue Ops[] = { swapInH.getValue(0),
20278 swapInH.getValue(1) };
20279 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
20280 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
20281 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG :
20282 X86ISD::LCMPXCHG8_DAG;
20283 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
20284 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
20285 Regs64bit ? X86::RAX : X86::EAX,
20286 HalfT, Result.getValue(1));
20287 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
20288 Regs64bit ? X86::RDX : X86::EDX,
20289 HalfT, cpOutL.getValue(2));
20290 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
20292 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
20293 MVT::i32, cpOutH.getValue(2));
20295 DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
20296 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
20297 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
20299 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
20300 Results.push_back(Success);
20301 Results.push_back(EFLAGS.getValue(1));
20304 case ISD::ATOMIC_SWAP:
20305 case ISD::ATOMIC_LOAD_ADD:
20306 case ISD::ATOMIC_LOAD_SUB:
20307 case ISD::ATOMIC_LOAD_AND:
20308 case ISD::ATOMIC_LOAD_OR:
20309 case ISD::ATOMIC_LOAD_XOR:
20310 case ISD::ATOMIC_LOAD_NAND:
20311 case ISD::ATOMIC_LOAD_MIN:
20312 case ISD::ATOMIC_LOAD_MAX:
20313 case ISD::ATOMIC_LOAD_UMIN:
20314 case ISD::ATOMIC_LOAD_UMAX:
20315 case ISD::ATOMIC_LOAD: {
20316 // Delegate to generic TypeLegalization. Situations we can really handle
20317 // should have already been dealt with by AtomicExpandPass.cpp.
20320 case ISD::BITCAST: {
20321 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20322 EVT DstVT = N->getValueType(0);
20323 EVT SrcVT = N->getOperand(0)->getValueType(0);
20325 if (SrcVT != MVT::f64 ||
20326 (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
20329 unsigned NumElts = DstVT.getVectorNumElements();
20330 EVT SVT = DstVT.getVectorElementType();
20331 EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
20332 SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
20333 MVT::v2f64, N->getOperand(0));
20334 SDValue ToVecInt = DAG.getNode(ISD::BITCAST, dl, WiderVT, Expanded);
20336 if (ExperimentalVectorWideningLegalization) {
20337 // If we are legalizing vectors by widening, we already have the desired
20338 // legal vector type, just return it.
20339 Results.push_back(ToVecInt);
20343 SmallVector<SDValue, 8> Elts;
20344 for (unsigned i = 0, e = NumElts; i != e; ++i)
20345 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
20346 ToVecInt, DAG.getIntPtrConstant(i)));
20348 Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
20353 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
20355 default: return nullptr;
20356 case X86ISD::BSF: return "X86ISD::BSF";
20357 case X86ISD::BSR: return "X86ISD::BSR";
20358 case X86ISD::SHLD: return "X86ISD::SHLD";
20359 case X86ISD::SHRD: return "X86ISD::SHRD";
20360 case X86ISD::FAND: return "X86ISD::FAND";
20361 case X86ISD::FANDN: return "X86ISD::FANDN";
20362 case X86ISD::FOR: return "X86ISD::FOR";
20363 case X86ISD::FXOR: return "X86ISD::FXOR";
20364 case X86ISD::FSRL: return "X86ISD::FSRL";
20365 case X86ISD::FILD: return "X86ISD::FILD";
20366 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
20367 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
20368 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
20369 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
20370 case X86ISD::FLD: return "X86ISD::FLD";
20371 case X86ISD::FST: return "X86ISD::FST";
20372 case X86ISD::CALL: return "X86ISD::CALL";
20373 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
20374 case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
20375 case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
20376 case X86ISD::BT: return "X86ISD::BT";
20377 case X86ISD::CMP: return "X86ISD::CMP";
20378 case X86ISD::COMI: return "X86ISD::COMI";
20379 case X86ISD::UCOMI: return "X86ISD::UCOMI";
20380 case X86ISD::CMPM: return "X86ISD::CMPM";
20381 case X86ISD::CMPMU: return "X86ISD::CMPMU";
20382 case X86ISD::SETCC: return "X86ISD::SETCC";
20383 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
20384 case X86ISD::FSETCC: return "X86ISD::FSETCC";
20385 case X86ISD::CMOV: return "X86ISD::CMOV";
20386 case X86ISD::BRCOND: return "X86ISD::BRCOND";
20387 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
20388 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
20389 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
20390 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
20391 case X86ISD::Wrapper: return "X86ISD::Wrapper";
20392 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
20393 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
20394 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
20395 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
20396 case X86ISD::PINSRB: return "X86ISD::PINSRB";
20397 case X86ISD::PINSRW: return "X86ISD::PINSRW";
20398 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
20399 case X86ISD::ANDNP: return "X86ISD::ANDNP";
20400 case X86ISD::PSIGN: return "X86ISD::PSIGN";
20401 case X86ISD::BLENDI: return "X86ISD::BLENDI";
20402 case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
20403 case X86ISD::SUBUS: return "X86ISD::SUBUS";
20404 case X86ISD::HADD: return "X86ISD::HADD";
20405 case X86ISD::HSUB: return "X86ISD::HSUB";
20406 case X86ISD::FHADD: return "X86ISD::FHADD";
20407 case X86ISD::FHSUB: return "X86ISD::FHSUB";
20408 case X86ISD::UMAX: return "X86ISD::UMAX";
20409 case X86ISD::UMIN: return "X86ISD::UMIN";
20410 case X86ISD::SMAX: return "X86ISD::SMAX";
20411 case X86ISD::SMIN: return "X86ISD::SMIN";
20412 case X86ISD::FMAX: return "X86ISD::FMAX";
20413 case X86ISD::FMIN: return "X86ISD::FMIN";
20414 case X86ISD::FMAXC: return "X86ISD::FMAXC";
20415 case X86ISD::FMINC: return "X86ISD::FMINC";
20416 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
20417 case X86ISD::FRCP: return "X86ISD::FRCP";
20418 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
20419 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
20420 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
20421 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
20422 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
20423 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
20424 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
20425 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
20426 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
20427 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
20428 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
20429 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
20430 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
20431 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
20432 case X86ISD::VZEXT: return "X86ISD::VZEXT";
20433 case X86ISD::VSEXT: return "X86ISD::VSEXT";
20434 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
20435 case X86ISD::VTRUNCM: return "X86ISD::VTRUNCM";
20436 case X86ISD::VINSERT: return "X86ISD::VINSERT";
20437 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
20438 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
20439 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
20440 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
20441 case X86ISD::VSHL: return "X86ISD::VSHL";
20442 case X86ISD::VSRL: return "X86ISD::VSRL";
20443 case X86ISD::VSRA: return "X86ISD::VSRA";
20444 case X86ISD::VSHLI: return "X86ISD::VSHLI";
20445 case X86ISD::VSRLI: return "X86ISD::VSRLI";
20446 case X86ISD::VSRAI: return "X86ISD::VSRAI";
20447 case X86ISD::CMPP: return "X86ISD::CMPP";
20448 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
20449 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
20450 case X86ISD::PCMPEQM: return "X86ISD::PCMPEQM";
20451 case X86ISD::PCMPGTM: return "X86ISD::PCMPGTM";
20452 case X86ISD::ADD: return "X86ISD::ADD";
20453 case X86ISD::SUB: return "X86ISD::SUB";
20454 case X86ISD::ADC: return "X86ISD::ADC";
20455 case X86ISD::SBB: return "X86ISD::SBB";
20456 case X86ISD::SMUL: return "X86ISD::SMUL";
20457 case X86ISD::UMUL: return "X86ISD::UMUL";
20458 case X86ISD::SMUL8: return "X86ISD::SMUL8";
20459 case X86ISD::UMUL8: return "X86ISD::UMUL8";
20460 case X86ISD::SDIVREM8_SEXT_HREG: return "X86ISD::SDIVREM8_SEXT_HREG";
20461 case X86ISD::UDIVREM8_ZEXT_HREG: return "X86ISD::UDIVREM8_ZEXT_HREG";
20462 case X86ISD::INC: return "X86ISD::INC";
20463 case X86ISD::DEC: return "X86ISD::DEC";
20464 case X86ISD::OR: return "X86ISD::OR";
20465 case X86ISD::XOR: return "X86ISD::XOR";
20466 case X86ISD::AND: return "X86ISD::AND";
20467 case X86ISD::BEXTR: return "X86ISD::BEXTR";
20468 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
20469 case X86ISD::PTEST: return "X86ISD::PTEST";
20470 case X86ISD::TESTP: return "X86ISD::TESTP";
20471 case X86ISD::TESTM: return "X86ISD::TESTM";
20472 case X86ISD::TESTNM: return "X86ISD::TESTNM";
20473 case X86ISD::KORTEST: return "X86ISD::KORTEST";
20474 case X86ISD::PACKSS: return "X86ISD::PACKSS";
20475 case X86ISD::PACKUS: return "X86ISD::PACKUS";
20476 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
20477 case X86ISD::VALIGN: return "X86ISD::VALIGN";
20478 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
20479 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
20480 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
20481 case X86ISD::SHUFP: return "X86ISD::SHUFP";
20482 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
20483 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
20484 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
20485 case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
20486 case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
20487 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
20488 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
20489 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
20490 case X86ISD::MOVSD: return "X86ISD::MOVSD";
20491 case X86ISD::MOVSS: return "X86ISD::MOVSS";
20492 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
20493 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
20494 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
20495 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
20496 case X86ISD::VEXTRACT: return "X86ISD::VEXTRACT";
20497 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
20498 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
20499 case X86ISD::VPERMV: return "X86ISD::VPERMV";
20500 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
20501 case X86ISD::VPERMIV3: return "X86ISD::VPERMIV3";
20502 case X86ISD::VPERMI: return "X86ISD::VPERMI";
20503 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
20504 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
20505 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
20506 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
20507 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
20508 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
20509 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
20510 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL";
20511 case X86ISD::SAHF: return "X86ISD::SAHF";
20512 case X86ISD::RDRAND: return "X86ISD::RDRAND";
20513 case X86ISD::RDSEED: return "X86ISD::RDSEED";
20514 case X86ISD::FMADD: return "X86ISD::FMADD";
20515 case X86ISD::FMSUB: return "X86ISD::FMSUB";
20516 case X86ISD::FNMADD: return "X86ISD::FNMADD";
20517 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
20518 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
20519 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
20520 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI";
20521 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI";
20522 case X86ISD::XTEST: return "X86ISD::XTEST";
20523 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
20524 case X86ISD::EXPAND: return "X86ISD::EXPAND";
20525 case X86ISD::SELECT: return "X86ISD::SELECT";
20526 case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
20527 case X86ISD::RCP28: return "X86ISD::RCP28";
20528 case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
20529 case X86ISD::FADD_RND: return "X86ISD::FADD_RND";
20530 case X86ISD::FSUB_RND: return "X86ISD::FSUB_RND";
20531 case X86ISD::FMUL_RND: return "X86ISD::FMUL_RND";
20532 case X86ISD::FDIV_RND: return "X86ISD::FDIV_RND";
20536 // isLegalAddressingMode - Return true if the addressing mode represented
20537 // by AM is legal for this target, for a load/store of the specified type.
20538 bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
20540 // X86 supports extremely general addressing modes.
20541 CodeModel::Model M = getTargetMachine().getCodeModel();
20542 Reloc::Model R = getTargetMachine().getRelocationModel();
20544 // X86 allows a sign-extended 32-bit immediate field as a displacement.
20545 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
20550 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
20552 // If a reference to this global requires an extra load, we can't fold it.
20553 if (isGlobalStubReference(GVFlags))
20556 // If BaseGV requires a register for the PIC base, we cannot also have a
20557 // BaseReg specified.
20558 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
20561 // If lower 4G is not available, then we must use rip-relative addressing.
20562 if ((M != CodeModel::Small || R != Reloc::Static) &&
20563 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
20567 switch (AM.Scale) {
20573 // These scales always work.
20578 // These scales are formed with basereg+scalereg. Only accept if there is
20583 default: // Other stuff never works.
20590 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
20591 unsigned Bits = Ty->getScalarSizeInBits();
20593 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
20594 // particularly cheaper than those without.
20598 // On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
20599 // variable shifts just as cheap as scalar ones.
20600 if (Subtarget->hasInt256() && (Bits == 32 || Bits == 64))
20603 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
20604 // fully general vector.
20608 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
20609 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20611 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
20612 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
20613 return NumBits1 > NumBits2;
20616 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
20617 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20620 if (!isTypeLegal(EVT::getEVT(Ty1)))
20623 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
20625 // Assuming the caller doesn't have a zeroext or signext return parameter,
20626 // truncation all the way down to i1 is valid.
20630 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
20631 return isInt<32>(Imm);
20634 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
20635 // Can also use sub to handle negated immediates.
20636 return isInt<32>(Imm);
20639 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
20640 if (!VT1.isInteger() || !VT2.isInteger())
20642 unsigned NumBits1 = VT1.getSizeInBits();
20643 unsigned NumBits2 = VT2.getSizeInBits();
20644 return NumBits1 > NumBits2;
20647 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
20648 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20649 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit();
20652 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
20653 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20654 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
20657 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
20658 EVT VT1 = Val.getValueType();
20659 if (isZExtFree(VT1, VT2))
20662 if (Val.getOpcode() != ISD::LOAD)
20665 if (!VT1.isSimple() || !VT1.isInteger() ||
20666 !VT2.isSimple() || !VT2.isInteger())
20669 switch (VT1.getSimpleVT().SimpleTy) {
20674 // X86 has 8, 16, and 32-bit zero-extending loads.
20681 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue) const { return true; }
20684 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
20685 if (!(Subtarget->hasFMA() || Subtarget->hasFMA4()))
20688 VT = VT.getScalarType();
20690 if (!VT.isSimple())
20693 switch (VT.getSimpleVT().SimpleTy) {
20704 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
20705 // i16 instructions are longer (0x66 prefix) and potentially slower.
20706 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
20709 /// isShuffleMaskLegal - Targets can use this to indicate that they only
20710 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
20711 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
20712 /// are assumed to be legal.
20714 X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
20716 if (!VT.isSimple())
20719 MVT SVT = VT.getSimpleVT();
20721 // Very little shuffling can be done for 64-bit vectors right now.
20722 if (VT.getSizeInBits() == 64)
20725 // This is an experimental legality test that is tailored to match the
20726 // legality test of the experimental lowering more closely. They are gated
20727 // separately to ease testing of performance differences.
20728 if (ExperimentalVectorShuffleLegality)
20729 // We only care that the types being shuffled are legal. The lowering can
20730 // handle any possible shuffle mask that results.
20731 return isTypeLegal(SVT);
20733 // If this is a single-input shuffle with no 128 bit lane crossings we can
20734 // lower it into pshufb.
20735 if ((SVT.is128BitVector() && Subtarget->hasSSSE3()) ||
20736 (SVT.is256BitVector() && Subtarget->hasInt256())) {
20737 bool isLegal = true;
20738 for (unsigned I = 0, E = M.size(); I != E; ++I) {
20739 if (M[I] >= (int)SVT.getVectorNumElements() ||
20740 ShuffleCrosses128bitLane(SVT, I, M[I])) {
20749 // FIXME: blends, shifts.
20750 return (SVT.getVectorNumElements() == 2 ||
20751 ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
20752 isMOVLMask(M, SVT) ||
20753 isCommutedMOVLMask(M, SVT) ||
20754 isMOVHLPSMask(M, SVT) ||
20755 isSHUFPMask(M, SVT) ||
20756 isSHUFPMask(M, SVT, /* Commuted */ true) ||
20757 isPSHUFDMask(M, SVT) ||
20758 isPSHUFDMask(M, SVT, /* SecondOperand */ true) ||
20759 isPSHUFHWMask(M, SVT, Subtarget->hasInt256()) ||
20760 isPSHUFLWMask(M, SVT, Subtarget->hasInt256()) ||
20761 isPALIGNRMask(M, SVT, Subtarget) ||
20762 isUNPCKLMask(M, SVT, Subtarget->hasInt256()) ||
20763 isUNPCKHMask(M, SVT, Subtarget->hasInt256()) ||
20764 isUNPCKL_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20765 isUNPCKH_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20766 isBlendMask(M, SVT, Subtarget->hasSSE41(), Subtarget->hasInt256()) ||
20767 (Subtarget->hasSSE41() && isINSERTPSMask(M, SVT)));
20771 X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
20773 if (!VT.isSimple())
20776 MVT SVT = VT.getSimpleVT();
20778 // This is an experimental legality test that is tailored to match the
20779 // legality test of the experimental lowering more closely. They are gated
20780 // separately to ease testing of performance differences.
20781 if (ExperimentalVectorShuffleLegality)
20782 // The new vector shuffle lowering is very good at managing zero-inputs.
20783 return isShuffleMaskLegal(Mask, VT);
20785 unsigned NumElts = SVT.getVectorNumElements();
20786 // FIXME: This collection of masks seems suspect.
20789 if (NumElts == 4 && SVT.is128BitVector()) {
20790 return (isMOVLMask(Mask, SVT) ||
20791 isCommutedMOVLMask(Mask, SVT, true) ||
20792 isSHUFPMask(Mask, SVT) ||
20793 isSHUFPMask(Mask, SVT, /* Commuted */ true) ||
20794 isBlendMask(Mask, SVT, Subtarget->hasSSE41(),
20795 Subtarget->hasInt256()));
20800 //===----------------------------------------------------------------------===//
20801 // X86 Scheduler Hooks
20802 //===----------------------------------------------------------------------===//
20804 /// Utility function to emit xbegin specifying the start of an RTM region.
20805 static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
20806 const TargetInstrInfo *TII) {
20807 DebugLoc DL = MI->getDebugLoc();
20809 const BasicBlock *BB = MBB->getBasicBlock();
20810 MachineFunction::iterator I = MBB;
20813 // For the v = xbegin(), we generate
20824 MachineBasicBlock *thisMBB = MBB;
20825 MachineFunction *MF = MBB->getParent();
20826 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
20827 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
20828 MF->insert(I, mainMBB);
20829 MF->insert(I, sinkMBB);
20831 // Transfer the remainder of BB and its successor edges to sinkMBB.
20832 sinkMBB->splice(sinkMBB->begin(), MBB,
20833 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
20834 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
20838 // # fallthrough to mainMBB
20839 // # abortion to sinkMBB
20840 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB);
20841 thisMBB->addSuccessor(mainMBB);
20842 thisMBB->addSuccessor(sinkMBB);
20846 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1);
20847 mainMBB->addSuccessor(sinkMBB);
20850 // EAX is live into the sinkMBB
20851 sinkMBB->addLiveIn(X86::EAX);
20852 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
20853 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20856 MI->eraseFromParent();
20860 // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
20861 // or XMM0_V32I8 in AVX all of this code can be replaced with that
20862 // in the .td file.
20863 static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
20864 const TargetInstrInfo *TII) {
20866 switch (MI->getOpcode()) {
20867 default: llvm_unreachable("illegal opcode!");
20868 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
20869 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
20870 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break;
20871 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break;
20872 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break;
20873 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break;
20874 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break;
20875 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
20878 DebugLoc dl = MI->getDebugLoc();
20879 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20881 unsigned NumArgs = MI->getNumOperands();
20882 for (unsigned i = 1; i < NumArgs; ++i) {
20883 MachineOperand &Op = MI->getOperand(i);
20884 if (!(Op.isReg() && Op.isImplicit()))
20885 MIB.addOperand(Op);
20887 if (MI->hasOneMemOperand())
20888 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20890 BuildMI(*BB, MI, dl,
20891 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20892 .addReg(X86::XMM0);
20894 MI->eraseFromParent();
20898 // FIXME: Custom handling because TableGen doesn't support multiple implicit
20899 // defs in an instruction pattern
20900 static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
20901 const TargetInstrInfo *TII) {
20903 switch (MI->getOpcode()) {
20904 default: llvm_unreachable("illegal opcode!");
20905 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
20906 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
20907 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break;
20908 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break;
20909 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break;
20910 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break;
20911 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break;
20912 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
20915 DebugLoc dl = MI->getDebugLoc();
20916 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20918 unsigned NumArgs = MI->getNumOperands(); // remove the results
20919 for (unsigned i = 1; i < NumArgs; ++i) {
20920 MachineOperand &Op = MI->getOperand(i);
20921 if (!(Op.isReg() && Op.isImplicit()))
20922 MIB.addOperand(Op);
20924 if (MI->hasOneMemOperand())
20925 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20927 BuildMI(*BB, MI, dl,
20928 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20931 MI->eraseFromParent();
20935 static MachineBasicBlock *EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
20936 const X86Subtarget *Subtarget) {
20937 DebugLoc dl = MI->getDebugLoc();
20938 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20939 // Address into RAX/EAX, other two args into ECX, EDX.
20940 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
20941 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
20942 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
20943 for (int i = 0; i < X86::AddrNumOperands; ++i)
20944 MIB.addOperand(MI->getOperand(i));
20946 unsigned ValOps = X86::AddrNumOperands;
20947 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
20948 .addReg(MI->getOperand(ValOps).getReg());
20949 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
20950 .addReg(MI->getOperand(ValOps+1).getReg());
20952 // The instruction doesn't actually take any operands though.
20953 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
20955 MI->eraseFromParent(); // The pseudo is gone now.
20959 MachineBasicBlock *
20960 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
20961 MachineBasicBlock *MBB) const {
20962 // Emit va_arg instruction on X86-64.
20964 // Operands to this pseudo-instruction:
20965 // 0 ) Output : destination address (reg)
20966 // 1-5) Input : va_list address (addr, i64mem)
20967 // 6 ) ArgSize : Size (in bytes) of vararg type
20968 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
20969 // 8 ) Align : Alignment of type
20970 // 9 ) EFLAGS (implicit-def)
20972 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
20973 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands");
20975 unsigned DestReg = MI->getOperand(0).getReg();
20976 MachineOperand &Base = MI->getOperand(1);
20977 MachineOperand &Scale = MI->getOperand(2);
20978 MachineOperand &Index = MI->getOperand(3);
20979 MachineOperand &Disp = MI->getOperand(4);
20980 MachineOperand &Segment = MI->getOperand(5);
20981 unsigned ArgSize = MI->getOperand(6).getImm();
20982 unsigned ArgMode = MI->getOperand(7).getImm();
20983 unsigned Align = MI->getOperand(8).getImm();
20985 // Memory Reference
20986 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
20987 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
20988 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
20990 // Machine Information
20991 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20992 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
20993 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
20994 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
20995 DebugLoc DL = MI->getDebugLoc();
20997 // struct va_list {
21000 // i64 overflow_area (address)
21001 // i64 reg_save_area (address)
21003 // sizeof(va_list) = 24
21004 // alignment(va_list) = 8
21006 unsigned TotalNumIntRegs = 6;
21007 unsigned TotalNumXMMRegs = 8;
21008 bool UseGPOffset = (ArgMode == 1);
21009 bool UseFPOffset = (ArgMode == 2);
21010 unsigned MaxOffset = TotalNumIntRegs * 8 +
21011 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
21013 /* Align ArgSize to a multiple of 8 */
21014 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
21015 bool NeedsAlign = (Align > 8);
21017 MachineBasicBlock *thisMBB = MBB;
21018 MachineBasicBlock *overflowMBB;
21019 MachineBasicBlock *offsetMBB;
21020 MachineBasicBlock *endMBB;
21022 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
21023 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
21024 unsigned OffsetReg = 0;
21026 if (!UseGPOffset && !UseFPOffset) {
21027 // If we only pull from the overflow region, we don't create a branch.
21028 // We don't need to alter control flow.
21029 OffsetDestReg = 0; // unused
21030 OverflowDestReg = DestReg;
21032 offsetMBB = nullptr;
21033 overflowMBB = thisMBB;
21036 // First emit code to check if gp_offset (or fp_offset) is below the bound.
21037 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
21038 // If not, pull from overflow_area. (branch to overflowMBB)
21043 // offsetMBB overflowMBB
21048 // Registers for the PHI in endMBB
21049 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
21050 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
21052 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
21053 MachineFunction *MF = MBB->getParent();
21054 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21055 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21056 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21058 MachineFunction::iterator MBBIter = MBB;
21061 // Insert the new basic blocks
21062 MF->insert(MBBIter, offsetMBB);
21063 MF->insert(MBBIter, overflowMBB);
21064 MF->insert(MBBIter, endMBB);
21066 // Transfer the remainder of MBB and its successor edges to endMBB.
21067 endMBB->splice(endMBB->begin(), thisMBB,
21068 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
21069 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
21071 // Make offsetMBB and overflowMBB successors of thisMBB
21072 thisMBB->addSuccessor(offsetMBB);
21073 thisMBB->addSuccessor(overflowMBB);
21075 // endMBB is a successor of both offsetMBB and overflowMBB
21076 offsetMBB->addSuccessor(endMBB);
21077 overflowMBB->addSuccessor(endMBB);
21079 // Load the offset value into a register
21080 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
21081 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
21085 .addDisp(Disp, UseFPOffset ? 4 : 0)
21086 .addOperand(Segment)
21087 .setMemRefs(MMOBegin, MMOEnd);
21089 // Check if there is enough room left to pull this argument.
21090 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
21092 .addImm(MaxOffset + 8 - ArgSizeA8);
21094 // Branch to "overflowMBB" if offset >= max
21095 // Fall through to "offsetMBB" otherwise
21096 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
21097 .addMBB(overflowMBB);
21100 // In offsetMBB, emit code to use the reg_save_area.
21102 assert(OffsetReg != 0);
21104 // Read the reg_save_area address.
21105 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
21106 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
21111 .addOperand(Segment)
21112 .setMemRefs(MMOBegin, MMOEnd);
21114 // Zero-extend the offset
21115 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
21116 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
21119 .addImm(X86::sub_32bit);
21121 // Add the offset to the reg_save_area to get the final address.
21122 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
21123 .addReg(OffsetReg64)
21124 .addReg(RegSaveReg);
21126 // Compute the offset for the next argument
21127 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
21128 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
21130 .addImm(UseFPOffset ? 16 : 8);
21132 // Store it back into the va_list.
21133 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
21137 .addDisp(Disp, UseFPOffset ? 4 : 0)
21138 .addOperand(Segment)
21139 .addReg(NextOffsetReg)
21140 .setMemRefs(MMOBegin, MMOEnd);
21143 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
21148 // Emit code to use overflow area
21151 // Load the overflow_area address into a register.
21152 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
21153 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
21158 .addOperand(Segment)
21159 .setMemRefs(MMOBegin, MMOEnd);
21161 // If we need to align it, do so. Otherwise, just copy the address
21162 // to OverflowDestReg.
21164 // Align the overflow address
21165 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2");
21166 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
21168 // aligned_addr = (addr + (align-1)) & ~(align-1)
21169 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
21170 .addReg(OverflowAddrReg)
21173 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
21175 .addImm(~(uint64_t)(Align-1));
21177 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
21178 .addReg(OverflowAddrReg);
21181 // Compute the next overflow address after this argument.
21182 // (the overflow address should be kept 8-byte aligned)
21183 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
21184 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
21185 .addReg(OverflowDestReg)
21186 .addImm(ArgSizeA8);
21188 // Store the new overflow address.
21189 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
21194 .addOperand(Segment)
21195 .addReg(NextAddrReg)
21196 .setMemRefs(MMOBegin, MMOEnd);
21198 // If we branched, emit the PHI to the front of endMBB.
21200 BuildMI(*endMBB, endMBB->begin(), DL,
21201 TII->get(X86::PHI), DestReg)
21202 .addReg(OffsetDestReg).addMBB(offsetMBB)
21203 .addReg(OverflowDestReg).addMBB(overflowMBB);
21206 // Erase the pseudo instruction
21207 MI->eraseFromParent();
21212 MachineBasicBlock *
21213 X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
21215 MachineBasicBlock *MBB) const {
21216 // Emit code to save XMM registers to the stack. The ABI says that the
21217 // number of registers to save is given in %al, so it's theoretically
21218 // possible to do an indirect jump trick to avoid saving all of them,
21219 // however this code takes a simpler approach and just executes all
21220 // of the stores if %al is non-zero. It's less code, and it's probably
21221 // easier on the hardware branch predictor, and stores aren't all that
21222 // expensive anyway.
21224 // Create the new basic blocks. One block contains all the XMM stores,
21225 // and one block is the final destination regardless of whether any
21226 // stores were performed.
21227 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
21228 MachineFunction *F = MBB->getParent();
21229 MachineFunction::iterator MBBIter = MBB;
21231 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
21232 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
21233 F->insert(MBBIter, XMMSaveMBB);
21234 F->insert(MBBIter, EndMBB);
21236 // Transfer the remainder of MBB and its successor edges to EndMBB.
21237 EndMBB->splice(EndMBB->begin(), MBB,
21238 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21239 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
21241 // The original block will now fall through to the XMM save block.
21242 MBB->addSuccessor(XMMSaveMBB);
21243 // The XMMSaveMBB will fall through to the end block.
21244 XMMSaveMBB->addSuccessor(EndMBB);
21246 // Now add the instructions.
21247 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21248 DebugLoc DL = MI->getDebugLoc();
21250 unsigned CountReg = MI->getOperand(0).getReg();
21251 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm();
21252 int64_t VarArgsFPOffset = MI->getOperand(2).getImm();
21254 if (!Subtarget->isTargetWin64()) {
21255 // If %al is 0, branch around the XMM save block.
21256 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
21257 BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
21258 MBB->addSuccessor(EndMBB);
21261 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
21262 // that was just emitted, but clearly shouldn't be "saved".
21263 assert((MI->getNumOperands() <= 3 ||
21264 !MI->getOperand(MI->getNumOperands() - 1).isReg() ||
21265 MI->getOperand(MI->getNumOperands() - 1).getReg() == X86::EFLAGS)
21266 && "Expected last argument to be EFLAGS");
21267 unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
21268 // In the XMM save block, save all the XMM argument registers.
21269 for (int i = 3, e = MI->getNumOperands() - 1; i != e; ++i) {
21270 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
21271 MachineMemOperand *MMO =
21272 F->getMachineMemOperand(
21273 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset),
21274 MachineMemOperand::MOStore,
21275 /*Size=*/16, /*Align=*/16);
21276 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
21277 .addFrameIndex(RegSaveFrameIndex)
21278 .addImm(/*Scale=*/1)
21279 .addReg(/*IndexReg=*/0)
21280 .addImm(/*Disp=*/Offset)
21281 .addReg(/*Segment=*/0)
21282 .addReg(MI->getOperand(i).getReg())
21283 .addMemOperand(MMO);
21286 MI->eraseFromParent(); // The pseudo instruction is gone now.
21291 // The EFLAGS operand of SelectItr might be missing a kill marker
21292 // because there were multiple uses of EFLAGS, and ISel didn't know
21293 // which to mark. Figure out whether SelectItr should have had a
21294 // kill marker, and set it if it should. Returns the correct kill
21296 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
21297 MachineBasicBlock* BB,
21298 const TargetRegisterInfo* TRI) {
21299 // Scan forward through BB for a use/def of EFLAGS.
21300 MachineBasicBlock::iterator miI(std::next(SelectItr));
21301 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
21302 const MachineInstr& mi = *miI;
21303 if (mi.readsRegister(X86::EFLAGS))
21305 if (mi.definesRegister(X86::EFLAGS))
21306 break; // Should have kill-flag - update below.
21309 // If we hit the end of the block, check whether EFLAGS is live into a
21311 if (miI == BB->end()) {
21312 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
21313 sEnd = BB->succ_end();
21314 sItr != sEnd; ++sItr) {
21315 MachineBasicBlock* succ = *sItr;
21316 if (succ->isLiveIn(X86::EFLAGS))
21321 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
21322 // out. SelectMI should have a kill flag on EFLAGS.
21323 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
21327 MachineBasicBlock *
21328 X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
21329 MachineBasicBlock *BB) const {
21330 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21331 DebugLoc DL = MI->getDebugLoc();
21333 // To "insert" a SELECT_CC instruction, we actually have to insert the
21334 // diamond control-flow pattern. The incoming instruction knows the
21335 // destination vreg to set, the condition code register to branch on, the
21336 // true/false values to select between, and a branch opcode to use.
21337 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21338 MachineFunction::iterator It = BB;
21344 // cmpTY ccX, r1, r2
21346 // fallthrough --> copy0MBB
21347 MachineBasicBlock *thisMBB = BB;
21348 MachineFunction *F = BB->getParent();
21349 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
21350 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
21351 F->insert(It, copy0MBB);
21352 F->insert(It, sinkMBB);
21354 // If the EFLAGS register isn't dead in the terminator, then claim that it's
21355 // live into the sink and copy blocks.
21356 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
21357 if (!MI->killsRegister(X86::EFLAGS) &&
21358 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) {
21359 copy0MBB->addLiveIn(X86::EFLAGS);
21360 sinkMBB->addLiveIn(X86::EFLAGS);
21363 // Transfer the remainder of BB and its successor edges to sinkMBB.
21364 sinkMBB->splice(sinkMBB->begin(), BB,
21365 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21366 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
21368 // Add the true and fallthrough blocks as its successors.
21369 BB->addSuccessor(copy0MBB);
21370 BB->addSuccessor(sinkMBB);
21372 // Create the conditional branch instruction.
21374 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
21375 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
21378 // %FalseValue = ...
21379 // # fallthrough to sinkMBB
21380 copy0MBB->addSuccessor(sinkMBB);
21383 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
21385 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21386 TII->get(X86::PHI), MI->getOperand(0).getReg())
21387 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
21388 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
21390 MI->eraseFromParent(); // The pseudo instruction is gone now.
21394 MachineBasicBlock *
21395 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
21396 MachineBasicBlock *BB) const {
21397 MachineFunction *MF = BB->getParent();
21398 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21399 DebugLoc DL = MI->getDebugLoc();
21400 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21402 assert(MF->shouldSplitStack());
21404 const bool Is64Bit = Subtarget->is64Bit();
21405 const bool IsLP64 = Subtarget->isTarget64BitLP64();
21407 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
21408 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
21411 // ... [Till the alloca]
21412 // If stacklet is not large enough, jump to mallocMBB
21415 // Allocate by subtracting from RSP
21416 // Jump to continueMBB
21419 // Allocate by call to runtime
21423 // [rest of original BB]
21426 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21427 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21428 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21430 MachineRegisterInfo &MRI = MF->getRegInfo();
21431 const TargetRegisterClass *AddrRegClass =
21432 getRegClassFor(getPointerTy());
21434 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21435 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21436 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
21437 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
21438 sizeVReg = MI->getOperand(1).getReg(),
21439 physSPReg = IsLP64 || Subtarget->isTargetNaCl64() ? X86::RSP : X86::ESP;
21441 MachineFunction::iterator MBBIter = BB;
21444 MF->insert(MBBIter, bumpMBB);
21445 MF->insert(MBBIter, mallocMBB);
21446 MF->insert(MBBIter, continueMBB);
21448 continueMBB->splice(continueMBB->begin(), BB,
21449 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21450 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
21452 // Add code to the main basic block to check if the stack limit has been hit,
21453 // and if so, jump to mallocMBB otherwise to bumpMBB.
21454 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
21455 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
21456 .addReg(tmpSPVReg).addReg(sizeVReg);
21457 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
21458 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
21459 .addReg(SPLimitVReg);
21460 BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
21462 // bumpMBB simply decreases the stack pointer, since we know the current
21463 // stacklet has enough space.
21464 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
21465 .addReg(SPLimitVReg);
21466 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
21467 .addReg(SPLimitVReg);
21468 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21470 // Calls into a routine in libgcc to allocate more space from the heap.
21471 const uint32_t *RegMask =
21472 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21474 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
21476 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21477 .addExternalSymbol("__morestack_allocate_stack_space")
21478 .addRegMask(RegMask)
21479 .addReg(X86::RDI, RegState::Implicit)
21480 .addReg(X86::RAX, RegState::ImplicitDefine);
21481 } else if (Is64Bit) {
21482 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
21484 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21485 .addExternalSymbol("__morestack_allocate_stack_space")
21486 .addRegMask(RegMask)
21487 .addReg(X86::EDI, RegState::Implicit)
21488 .addReg(X86::EAX, RegState::ImplicitDefine);
21490 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
21492 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
21493 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
21494 .addExternalSymbol("__morestack_allocate_stack_space")
21495 .addRegMask(RegMask)
21496 .addReg(X86::EAX, RegState::ImplicitDefine);
21500 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
21503 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
21504 .addReg(IsLP64 ? X86::RAX : X86::EAX);
21505 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21507 // Set up the CFG correctly.
21508 BB->addSuccessor(bumpMBB);
21509 BB->addSuccessor(mallocMBB);
21510 mallocMBB->addSuccessor(continueMBB);
21511 bumpMBB->addSuccessor(continueMBB);
21513 // Take care of the PHI nodes.
21514 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
21515 MI->getOperand(0).getReg())
21516 .addReg(mallocPtrVReg).addMBB(mallocMBB)
21517 .addReg(bumpSPPtrVReg).addMBB(bumpMBB);
21519 // Delete the original pseudo instruction.
21520 MI->eraseFromParent();
21523 return continueMBB;
21526 MachineBasicBlock *
21527 X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
21528 MachineBasicBlock *BB) const {
21529 DebugLoc DL = MI->getDebugLoc();
21531 assert(!Subtarget->isTargetMachO());
21533 X86FrameLowering::emitStackProbeCall(*BB->getParent(), *BB, MI, DL);
21535 MI->eraseFromParent(); // The pseudo instruction is gone now.
21539 MachineBasicBlock *
21540 X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
21541 MachineBasicBlock *BB) const {
21542 // This is pretty easy. We're taking the value that we received from
21543 // our load from the relocation, sticking it in either RDI (x86-64)
21544 // or EAX and doing an indirect call. The return value will then
21545 // be in the normal return register.
21546 MachineFunction *F = BB->getParent();
21547 const X86InstrInfo *TII = Subtarget->getInstrInfo();
21548 DebugLoc DL = MI->getDebugLoc();
21550 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
21551 assert(MI->getOperand(3).isGlobal() && "This should be a global");
21553 // Get a register mask for the lowered call.
21554 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
21555 // proper register mask.
21556 const uint32_t *RegMask =
21557 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21558 if (Subtarget->is64Bit()) {
21559 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21560 TII->get(X86::MOV64rm), X86::RDI)
21562 .addImm(0).addReg(0)
21563 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21564 MI->getOperand(3).getTargetFlags())
21566 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
21567 addDirectMem(MIB, X86::RDI);
21568 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
21569 } else if (F->getTarget().getRelocationModel() != Reloc::PIC_) {
21570 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21571 TII->get(X86::MOV32rm), X86::EAX)
21573 .addImm(0).addReg(0)
21574 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21575 MI->getOperand(3).getTargetFlags())
21577 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21578 addDirectMem(MIB, X86::EAX);
21579 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21581 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21582 TII->get(X86::MOV32rm), X86::EAX)
21583 .addReg(TII->getGlobalBaseReg(F))
21584 .addImm(0).addReg(0)
21585 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21586 MI->getOperand(3).getTargetFlags())
21588 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21589 addDirectMem(MIB, X86::EAX);
21590 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21593 MI->eraseFromParent(); // The pseudo instruction is gone now.
21597 MachineBasicBlock *
21598 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
21599 MachineBasicBlock *MBB) const {
21600 DebugLoc DL = MI->getDebugLoc();
21601 MachineFunction *MF = MBB->getParent();
21602 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21603 MachineRegisterInfo &MRI = MF->getRegInfo();
21605 const BasicBlock *BB = MBB->getBasicBlock();
21606 MachineFunction::iterator I = MBB;
21609 // Memory Reference
21610 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21611 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21614 unsigned MemOpndSlot = 0;
21616 unsigned CurOp = 0;
21618 DstReg = MI->getOperand(CurOp++).getReg();
21619 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
21620 assert(RC->hasType(MVT::i32) && "Invalid destination!");
21621 unsigned mainDstReg = MRI.createVirtualRegister(RC);
21622 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
21624 MemOpndSlot = CurOp;
21626 MVT PVT = getPointerTy();
21627 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21628 "Invalid Pointer Size!");
21630 // For v = setjmp(buf), we generate
21633 // buf[LabelOffset] = restoreMBB
21634 // SjLjSetup restoreMBB
21640 // v = phi(main, restore)
21643 // if base pointer being used, load it from frame
21646 MachineBasicBlock *thisMBB = MBB;
21647 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
21648 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
21649 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
21650 MF->insert(I, mainMBB);
21651 MF->insert(I, sinkMBB);
21652 MF->push_back(restoreMBB);
21654 MachineInstrBuilder MIB;
21656 // Transfer the remainder of BB and its successor edges to sinkMBB.
21657 sinkMBB->splice(sinkMBB->begin(), MBB,
21658 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21659 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
21662 unsigned PtrStoreOpc = 0;
21663 unsigned LabelReg = 0;
21664 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21665 Reloc::Model RM = MF->getTarget().getRelocationModel();
21666 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
21667 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC);
21669 // Prepare IP either in reg or imm.
21670 if (!UseImmLabel) {
21671 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
21672 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
21673 LabelReg = MRI.createVirtualRegister(PtrRC);
21674 if (Subtarget->is64Bit()) {
21675 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
21679 .addMBB(restoreMBB)
21682 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
21683 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
21684 .addReg(XII->getGlobalBaseReg(MF))
21687 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference())
21691 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
21693 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
21694 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21695 if (i == X86::AddrDisp)
21696 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset);
21698 MIB.addOperand(MI->getOperand(MemOpndSlot + i));
21701 MIB.addReg(LabelReg);
21703 MIB.addMBB(restoreMBB);
21704 MIB.setMemRefs(MMOBegin, MMOEnd);
21706 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
21707 .addMBB(restoreMBB);
21709 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21710 MIB.addRegMask(RegInfo->getNoPreservedMask());
21711 thisMBB->addSuccessor(mainMBB);
21712 thisMBB->addSuccessor(restoreMBB);
21716 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
21717 mainMBB->addSuccessor(sinkMBB);
21720 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21721 TII->get(X86::PHI), DstReg)
21722 .addReg(mainDstReg).addMBB(mainMBB)
21723 .addReg(restoreDstReg).addMBB(restoreMBB);
21726 if (RegInfo->hasBasePointer(*MF)) {
21727 const bool Uses64BitFramePtr =
21728 Subtarget->isTarget64BitLP64() || Subtarget->isTargetNaCl64();
21729 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
21730 X86FI->setRestoreBasePointer(MF);
21731 unsigned FramePtr = RegInfo->getFrameRegister(*MF);
21732 unsigned BasePtr = RegInfo->getBaseRegister();
21733 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
21734 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
21735 FramePtr, true, X86FI->getRestoreBasePointerOffset())
21736 .setMIFlag(MachineInstr::FrameSetup);
21738 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
21739 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
21740 restoreMBB->addSuccessor(sinkMBB);
21742 MI->eraseFromParent();
21746 MachineBasicBlock *
21747 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
21748 MachineBasicBlock *MBB) const {
21749 DebugLoc DL = MI->getDebugLoc();
21750 MachineFunction *MF = MBB->getParent();
21751 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21752 MachineRegisterInfo &MRI = MF->getRegInfo();
21754 // Memory Reference
21755 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21756 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21758 MVT PVT = getPointerTy();
21759 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21760 "Invalid Pointer Size!");
21762 const TargetRegisterClass *RC =
21763 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
21764 unsigned Tmp = MRI.createVirtualRegister(RC);
21765 // Since FP is only updated here but NOT referenced, it's treated as GPR.
21766 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21767 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
21768 unsigned SP = RegInfo->getStackRegister();
21770 MachineInstrBuilder MIB;
21772 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21773 const int64_t SPOffset = 2 * PVT.getStoreSize();
21775 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
21776 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
21779 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
21780 for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
21781 MIB.addOperand(MI->getOperand(i));
21782 MIB.setMemRefs(MMOBegin, MMOEnd);
21784 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
21785 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21786 if (i == X86::AddrDisp)
21787 MIB.addDisp(MI->getOperand(i), LabelOffset);
21789 MIB.addOperand(MI->getOperand(i));
21791 MIB.setMemRefs(MMOBegin, MMOEnd);
21793 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
21794 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21795 if (i == X86::AddrDisp)
21796 MIB.addDisp(MI->getOperand(i), SPOffset);
21798 MIB.addOperand(MI->getOperand(i));
21800 MIB.setMemRefs(MMOBegin, MMOEnd);
21802 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
21804 MI->eraseFromParent();
21808 // Replace 213-type (isel default) FMA3 instructions with 231-type for
21809 // accumulator loops. Writing back to the accumulator allows the coalescer
21810 // to remove extra copies in the loop.
21811 MachineBasicBlock *
21812 X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
21813 MachineBasicBlock *MBB) const {
21814 MachineOperand &AddendOp = MI->getOperand(3);
21816 // Bail out early if the addend isn't a register - we can't switch these.
21817 if (!AddendOp.isReg())
21820 MachineFunction &MF = *MBB->getParent();
21821 MachineRegisterInfo &MRI = MF.getRegInfo();
21823 // Check whether the addend is defined by a PHI:
21824 assert(MRI.hasOneDef(AddendOp.getReg()) && "Multiple defs in SSA?");
21825 MachineInstr &AddendDef = *MRI.def_instr_begin(AddendOp.getReg());
21826 if (!AddendDef.isPHI())
21829 // Look for the following pattern:
21831 // %addend = phi [%entry, 0], [%loop, %result]
21833 // %result<tied1> = FMA213 %m2<tied0>, %m1, %addend
21837 // %addend = phi [%entry, 0], [%loop, %result]
21839 // %result<tied1> = FMA231 %addend<tied0>, %m1, %m2
21841 for (unsigned i = 1, e = AddendDef.getNumOperands(); i < e; i += 2) {
21842 assert(AddendDef.getOperand(i).isReg());
21843 MachineOperand PHISrcOp = AddendDef.getOperand(i);
21844 MachineInstr &PHISrcInst = *MRI.def_instr_begin(PHISrcOp.getReg());
21845 if (&PHISrcInst == MI) {
21846 // Found a matching instruction.
21847 unsigned NewFMAOpc = 0;
21848 switch (MI->getOpcode()) {
21849 case X86::VFMADDPDr213r: NewFMAOpc = X86::VFMADDPDr231r; break;
21850 case X86::VFMADDPSr213r: NewFMAOpc = X86::VFMADDPSr231r; break;
21851 case X86::VFMADDSDr213r: NewFMAOpc = X86::VFMADDSDr231r; break;
21852 case X86::VFMADDSSr213r: NewFMAOpc = X86::VFMADDSSr231r; break;
21853 case X86::VFMSUBPDr213r: NewFMAOpc = X86::VFMSUBPDr231r; break;
21854 case X86::VFMSUBPSr213r: NewFMAOpc = X86::VFMSUBPSr231r; break;
21855 case X86::VFMSUBSDr213r: NewFMAOpc = X86::VFMSUBSDr231r; break;
21856 case X86::VFMSUBSSr213r: NewFMAOpc = X86::VFMSUBSSr231r; break;
21857 case X86::VFNMADDPDr213r: NewFMAOpc = X86::VFNMADDPDr231r; break;
21858 case X86::VFNMADDPSr213r: NewFMAOpc = X86::VFNMADDPSr231r; break;
21859 case X86::VFNMADDSDr213r: NewFMAOpc = X86::VFNMADDSDr231r; break;
21860 case X86::VFNMADDSSr213r: NewFMAOpc = X86::VFNMADDSSr231r; break;
21861 case X86::VFNMSUBPDr213r: NewFMAOpc = X86::VFNMSUBPDr231r; break;
21862 case X86::VFNMSUBPSr213r: NewFMAOpc = X86::VFNMSUBPSr231r; break;
21863 case X86::VFNMSUBSDr213r: NewFMAOpc = X86::VFNMSUBSDr231r; break;
21864 case X86::VFNMSUBSSr213r: NewFMAOpc = X86::VFNMSUBSSr231r; break;
21865 case X86::VFMADDSUBPDr213r: NewFMAOpc = X86::VFMADDSUBPDr231r; break;
21866 case X86::VFMADDSUBPSr213r: NewFMAOpc = X86::VFMADDSUBPSr231r; break;
21867 case X86::VFMSUBADDPDr213r: NewFMAOpc = X86::VFMSUBADDPDr231r; break;
21868 case X86::VFMSUBADDPSr213r: NewFMAOpc = X86::VFMSUBADDPSr231r; break;
21870 case X86::VFMADDPDr213rY: NewFMAOpc = X86::VFMADDPDr231rY; break;
21871 case X86::VFMADDPSr213rY: NewFMAOpc = X86::VFMADDPSr231rY; break;
21872 case X86::VFMSUBPDr213rY: NewFMAOpc = X86::VFMSUBPDr231rY; break;
21873 case X86::VFMSUBPSr213rY: NewFMAOpc = X86::VFMSUBPSr231rY; break;
21874 case X86::VFNMADDPDr213rY: NewFMAOpc = X86::VFNMADDPDr231rY; break;
21875 case X86::VFNMADDPSr213rY: NewFMAOpc = X86::VFNMADDPSr231rY; break;
21876 case X86::VFNMSUBPDr213rY: NewFMAOpc = X86::VFNMSUBPDr231rY; break;
21877 case X86::VFNMSUBPSr213rY: NewFMAOpc = X86::VFNMSUBPSr231rY; break;
21878 case X86::VFMADDSUBPDr213rY: NewFMAOpc = X86::VFMADDSUBPDr231rY; break;
21879 case X86::VFMADDSUBPSr213rY: NewFMAOpc = X86::VFMADDSUBPSr231rY; break;
21880 case X86::VFMSUBADDPDr213rY: NewFMAOpc = X86::VFMSUBADDPDr231rY; break;
21881 case X86::VFMSUBADDPSr213rY: NewFMAOpc = X86::VFMSUBADDPSr231rY; break;
21882 default: llvm_unreachable("Unrecognized FMA variant.");
21885 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
21886 MachineInstrBuilder MIB =
21887 BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
21888 .addOperand(MI->getOperand(0))
21889 .addOperand(MI->getOperand(3))
21890 .addOperand(MI->getOperand(2))
21891 .addOperand(MI->getOperand(1));
21892 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
21893 MI->eraseFromParent();
21900 MachineBasicBlock *
21901 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
21902 MachineBasicBlock *BB) const {
21903 switch (MI->getOpcode()) {
21904 default: llvm_unreachable("Unexpected instr type to insert");
21905 case X86::TAILJMPd64:
21906 case X86::TAILJMPr64:
21907 case X86::TAILJMPm64:
21908 case X86::TAILJMPd64_REX:
21909 case X86::TAILJMPr64_REX:
21910 case X86::TAILJMPm64_REX:
21911 llvm_unreachable("TAILJMP64 would not be touched here.");
21912 case X86::TCRETURNdi64:
21913 case X86::TCRETURNri64:
21914 case X86::TCRETURNmi64:
21916 case X86::WIN_ALLOCA:
21917 return EmitLoweredWinAlloca(MI, BB);
21918 case X86::SEG_ALLOCA_32:
21919 case X86::SEG_ALLOCA_64:
21920 return EmitLoweredSegAlloca(MI, BB);
21921 case X86::TLSCall_32:
21922 case X86::TLSCall_64:
21923 return EmitLoweredTLSCall(MI, BB);
21924 case X86::CMOV_GR8:
21925 case X86::CMOV_FR32:
21926 case X86::CMOV_FR64:
21927 case X86::CMOV_V4F32:
21928 case X86::CMOV_V2F64:
21929 case X86::CMOV_V2I64:
21930 case X86::CMOV_V8F32:
21931 case X86::CMOV_V4F64:
21932 case X86::CMOV_V4I64:
21933 case X86::CMOV_V16F32:
21934 case X86::CMOV_V8F64:
21935 case X86::CMOV_V8I64:
21936 case X86::CMOV_GR16:
21937 case X86::CMOV_GR32:
21938 case X86::CMOV_RFP32:
21939 case X86::CMOV_RFP64:
21940 case X86::CMOV_RFP80:
21941 return EmitLoweredSelect(MI, BB);
21943 case X86::FP32_TO_INT16_IN_MEM:
21944 case X86::FP32_TO_INT32_IN_MEM:
21945 case X86::FP32_TO_INT64_IN_MEM:
21946 case X86::FP64_TO_INT16_IN_MEM:
21947 case X86::FP64_TO_INT32_IN_MEM:
21948 case X86::FP64_TO_INT64_IN_MEM:
21949 case X86::FP80_TO_INT16_IN_MEM:
21950 case X86::FP80_TO_INT32_IN_MEM:
21951 case X86::FP80_TO_INT64_IN_MEM: {
21952 MachineFunction *F = BB->getParent();
21953 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21954 DebugLoc DL = MI->getDebugLoc();
21956 // Change the floating point control register to use "round towards zero"
21957 // mode when truncating to an integer value.
21958 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
21959 addFrameReference(BuildMI(*BB, MI, DL,
21960 TII->get(X86::FNSTCW16m)), CWFrameIdx);
21962 // Load the old value of the high byte of the control word...
21964 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
21965 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
21968 // Set the high part to be round to zero...
21969 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
21972 // Reload the modified control word now...
21973 addFrameReference(BuildMI(*BB, MI, DL,
21974 TII->get(X86::FLDCW16m)), CWFrameIdx);
21976 // Restore the memory image of control word to original value
21977 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
21980 // Get the X86 opcode to use.
21982 switch (MI->getOpcode()) {
21983 default: llvm_unreachable("illegal opcode!");
21984 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
21985 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
21986 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
21987 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
21988 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
21989 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
21990 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
21991 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
21992 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
21996 MachineOperand &Op = MI->getOperand(0);
21998 AM.BaseType = X86AddressMode::RegBase;
21999 AM.Base.Reg = Op.getReg();
22001 AM.BaseType = X86AddressMode::FrameIndexBase;
22002 AM.Base.FrameIndex = Op.getIndex();
22004 Op = MI->getOperand(1);
22006 AM.Scale = Op.getImm();
22007 Op = MI->getOperand(2);
22009 AM.IndexReg = Op.getImm();
22010 Op = MI->getOperand(3);
22011 if (Op.isGlobal()) {
22012 AM.GV = Op.getGlobal();
22014 AM.Disp = Op.getImm();
22016 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
22017 .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
22019 // Reload the original control word now.
22020 addFrameReference(BuildMI(*BB, MI, DL,
22021 TII->get(X86::FLDCW16m)), CWFrameIdx);
22023 MI->eraseFromParent(); // The pseudo instruction is gone now.
22026 // String/text processing lowering.
22027 case X86::PCMPISTRM128REG:
22028 case X86::VPCMPISTRM128REG:
22029 case X86::PCMPISTRM128MEM:
22030 case X86::VPCMPISTRM128MEM:
22031 case X86::PCMPESTRM128REG:
22032 case X86::VPCMPESTRM128REG:
22033 case X86::PCMPESTRM128MEM:
22034 case X86::VPCMPESTRM128MEM:
22035 assert(Subtarget->hasSSE42() &&
22036 "Target must have SSE4.2 or AVX features enabled");
22037 return EmitPCMPSTRM(MI, BB, Subtarget->getInstrInfo());
22039 // String/text processing lowering.
22040 case X86::PCMPISTRIREG:
22041 case X86::VPCMPISTRIREG:
22042 case X86::PCMPISTRIMEM:
22043 case X86::VPCMPISTRIMEM:
22044 case X86::PCMPESTRIREG:
22045 case X86::VPCMPESTRIREG:
22046 case X86::PCMPESTRIMEM:
22047 case X86::VPCMPESTRIMEM:
22048 assert(Subtarget->hasSSE42() &&
22049 "Target must have SSE4.2 or AVX features enabled");
22050 return EmitPCMPSTRI(MI, BB, Subtarget->getInstrInfo());
22052 // Thread synchronization.
22054 return EmitMonitor(MI, BB, Subtarget);
22058 return EmitXBegin(MI, BB, Subtarget->getInstrInfo());
22060 case X86::VASTART_SAVE_XMM_REGS:
22061 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
22063 case X86::VAARG_64:
22064 return EmitVAARG64WithCustomInserter(MI, BB);
22066 case X86::EH_SjLj_SetJmp32:
22067 case X86::EH_SjLj_SetJmp64:
22068 return emitEHSjLjSetJmp(MI, BB);
22070 case X86::EH_SjLj_LongJmp32:
22071 case X86::EH_SjLj_LongJmp64:
22072 return emitEHSjLjLongJmp(MI, BB);
22074 case TargetOpcode::STATEPOINT:
22075 // As an implementation detail, STATEPOINT shares the STACKMAP format at
22076 // this point in the process. We diverge later.
22077 return emitPatchPoint(MI, BB);
22079 case TargetOpcode::STACKMAP:
22080 case TargetOpcode::PATCHPOINT:
22081 return emitPatchPoint(MI, BB);
22083 case X86::VFMADDPDr213r:
22084 case X86::VFMADDPSr213r:
22085 case X86::VFMADDSDr213r:
22086 case X86::VFMADDSSr213r:
22087 case X86::VFMSUBPDr213r:
22088 case X86::VFMSUBPSr213r:
22089 case X86::VFMSUBSDr213r:
22090 case X86::VFMSUBSSr213r:
22091 case X86::VFNMADDPDr213r:
22092 case X86::VFNMADDPSr213r:
22093 case X86::VFNMADDSDr213r:
22094 case X86::VFNMADDSSr213r:
22095 case X86::VFNMSUBPDr213r:
22096 case X86::VFNMSUBPSr213r:
22097 case X86::VFNMSUBSDr213r:
22098 case X86::VFNMSUBSSr213r:
22099 case X86::VFMADDSUBPDr213r:
22100 case X86::VFMADDSUBPSr213r:
22101 case X86::VFMSUBADDPDr213r:
22102 case X86::VFMSUBADDPSr213r:
22103 case X86::VFMADDPDr213rY:
22104 case X86::VFMADDPSr213rY:
22105 case X86::VFMSUBPDr213rY:
22106 case X86::VFMSUBPSr213rY:
22107 case X86::VFNMADDPDr213rY:
22108 case X86::VFNMADDPSr213rY:
22109 case X86::VFNMSUBPDr213rY:
22110 case X86::VFNMSUBPSr213rY:
22111 case X86::VFMADDSUBPDr213rY:
22112 case X86::VFMADDSUBPSr213rY:
22113 case X86::VFMSUBADDPDr213rY:
22114 case X86::VFMSUBADDPSr213rY:
22115 return emitFMA3Instr(MI, BB);
22119 //===----------------------------------------------------------------------===//
22120 // X86 Optimization Hooks
22121 //===----------------------------------------------------------------------===//
22123 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
22126 const SelectionDAG &DAG,
22127 unsigned Depth) const {
22128 unsigned BitWidth = KnownZero.getBitWidth();
22129 unsigned Opc = Op.getOpcode();
22130 assert((Opc >= ISD::BUILTIN_OP_END ||
22131 Opc == ISD::INTRINSIC_WO_CHAIN ||
22132 Opc == ISD::INTRINSIC_W_CHAIN ||
22133 Opc == ISD::INTRINSIC_VOID) &&
22134 "Should use MaskedValueIsZero if you don't know whether Op"
22135 " is a target node!");
22137 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
22151 // These nodes' second result is a boolean.
22152 if (Op.getResNo() == 0)
22155 case X86ISD::SETCC:
22156 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
22158 case ISD::INTRINSIC_WO_CHAIN: {
22159 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
22160 unsigned NumLoBits = 0;
22163 case Intrinsic::x86_sse_movmsk_ps:
22164 case Intrinsic::x86_avx_movmsk_ps_256:
22165 case Intrinsic::x86_sse2_movmsk_pd:
22166 case Intrinsic::x86_avx_movmsk_pd_256:
22167 case Intrinsic::x86_mmx_pmovmskb:
22168 case Intrinsic::x86_sse2_pmovmskb_128:
22169 case Intrinsic::x86_avx2_pmovmskb: {
22170 // High bits of movmskp{s|d}, pmovmskb are known zero.
22172 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
22173 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break;
22174 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break;
22175 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break;
22176 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break;
22177 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break;
22178 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
22179 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
22181 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
22190 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
22192 const SelectionDAG &,
22193 unsigned Depth) const {
22194 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
22195 if (Op.getOpcode() == X86ISD::SETCC_CARRY)
22196 return Op.getValueType().getScalarType().getSizeInBits();
22202 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
22203 /// node is a GlobalAddress + offset.
22204 bool X86TargetLowering::isGAPlusOffset(SDNode *N,
22205 const GlobalValue* &GA,
22206 int64_t &Offset) const {
22207 if (N->getOpcode() == X86ISD::Wrapper) {
22208 if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
22209 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
22210 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
22214 return TargetLowering::isGAPlusOffset(N, GA, Offset);
22217 /// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the
22218 /// same as extracting the high 128-bit part of 256-bit vector and then
22219 /// inserting the result into the low part of a new 256-bit vector
22220 static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) {
22221 EVT VT = SVOp->getValueType(0);
22222 unsigned NumElems = VT.getVectorNumElements();
22224 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
22225 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j)
22226 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
22227 SVOp->getMaskElt(j) >= 0)
22233 /// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the
22234 /// same as extracting the low 128-bit part of 256-bit vector and then
22235 /// inserting the result into the high part of a new 256-bit vector
22236 static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) {
22237 EVT VT = SVOp->getValueType(0);
22238 unsigned NumElems = VT.getVectorNumElements();
22240 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22241 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j)
22242 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
22243 SVOp->getMaskElt(j) >= 0)
22249 /// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors.
22250 static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
22251 TargetLowering::DAGCombinerInfo &DCI,
22252 const X86Subtarget* Subtarget) {
22254 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
22255 SDValue V1 = SVOp->getOperand(0);
22256 SDValue V2 = SVOp->getOperand(1);
22257 EVT VT = SVOp->getValueType(0);
22258 unsigned NumElems = VT.getVectorNumElements();
22260 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
22261 V2.getOpcode() == ISD::CONCAT_VECTORS) {
22265 // V UNDEF BUILD_VECTOR UNDEF
22267 // CONCAT_VECTOR CONCAT_VECTOR
22270 // RESULT: V + zero extended
22272 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR ||
22273 V2.getOperand(1).getOpcode() != ISD::UNDEF ||
22274 V1.getOperand(1).getOpcode() != ISD::UNDEF)
22277 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()))
22280 // To match the shuffle mask, the first half of the mask should
22281 // be exactly the first vector, and all the rest a splat with the
22282 // first element of the second one.
22283 for (unsigned i = 0; i != NumElems/2; ++i)
22284 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) ||
22285 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems))
22288 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD.
22289 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) {
22290 if (Ld->hasNUsesOfValue(1, 0)) {
22291 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other);
22292 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
22294 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
22296 Ld->getPointerInfo(),
22297 Ld->getAlignment(),
22298 false/*isVolatile*/, true/*ReadMem*/,
22299 false/*WriteMem*/);
22301 // Make sure the newly-created LOAD is in the same position as Ld in
22302 // terms of dependency. We create a TokenFactor for Ld and ResNode,
22303 // and update uses of Ld's output chain to use the TokenFactor.
22304 if (Ld->hasAnyUseOfValue(1)) {
22305 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
22306 SDValue(Ld, 1), SDValue(ResNode.getNode(), 1));
22307 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
22308 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1),
22309 SDValue(ResNode.getNode(), 1));
22312 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode);
22316 // Emit a zeroed vector and insert the desired subvector on its
22318 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
22319 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
22320 return DCI.CombineTo(N, InsV);
22323 //===--------------------------------------------------------------------===//
22324 // Combine some shuffles into subvector extracts and inserts:
22327 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
22328 if (isShuffleHigh128VectorInsertLow(SVOp)) {
22329 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl);
22330 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl);
22331 return DCI.CombineTo(N, InsV);
22334 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22335 if (isShuffleLow128VectorInsertHigh(SVOp)) {
22336 SDValue V = Extract128BitVector(V1, 0, DAG, dl);
22337 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl);
22338 return DCI.CombineTo(N, InsV);
22344 /// \brief Combine an arbitrary chain of shuffles into a single instruction if
22347 /// This is the leaf of the recursive combinine below. When we have found some
22348 /// chain of single-use x86 shuffle instructions and accumulated the combined
22349 /// shuffle mask represented by them, this will try to pattern match that mask
22350 /// into either a single instruction if there is a special purpose instruction
22351 /// for this operation, or into a PSHUFB instruction which is a fully general
22352 /// instruction but should only be used to replace chains over a certain depth.
22353 static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
22354 int Depth, bool HasPSHUFB, SelectionDAG &DAG,
22355 TargetLowering::DAGCombinerInfo &DCI,
22356 const X86Subtarget *Subtarget) {
22357 assert(!Mask.empty() && "Cannot combine an empty shuffle mask!");
22359 // Find the operand that enters the chain. Note that multiple uses are OK
22360 // here, we're not going to remove the operand we find.
22361 SDValue Input = Op.getOperand(0);
22362 while (Input.getOpcode() == ISD::BITCAST)
22363 Input = Input.getOperand(0);
22365 MVT VT = Input.getSimpleValueType();
22366 MVT RootVT = Root.getSimpleValueType();
22369 // Just remove no-op shuffle masks.
22370 if (Mask.size() == 1) {
22371 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Input),
22376 // Use the float domain if the operand type is a floating point type.
22377 bool FloatDomain = VT.isFloatingPoint();
22379 // For floating point shuffles, we don't have free copies in the shuffle
22380 // instructions or the ability to load as part of the instruction, so
22381 // canonicalize their shuffles to UNPCK or MOV variants.
22383 // Note that even with AVX we prefer the PSHUFD form of shuffle for integer
22384 // vectors because it can have a load folded into it that UNPCK cannot. This
22385 // doesn't preclude something switching to the shorter encoding post-RA.
22387 if (Mask.equals(0, 0) || Mask.equals(1, 1)) {
22388 bool Lo = Mask.equals(0, 0);
22391 // Check if we have SSE3 which will let us use MOVDDUP. That instruction
22392 // is no slower than UNPCKLPD but has the option to fold the input operand
22393 // into even an unaligned memory load.
22394 if (Lo && Subtarget->hasSSE3()) {
22395 Shuffle = X86ISD::MOVDDUP;
22396 ShuffleVT = MVT::v2f64;
22398 // We have MOVLHPS and MOVHLPS throughout SSE and they encode smaller
22399 // than the UNPCK variants.
22400 Shuffle = Lo ? X86ISD::MOVLHPS : X86ISD::MOVHLPS;
22401 ShuffleVT = MVT::v4f32;
22403 if (Depth == 1 && Root->getOpcode() == Shuffle)
22404 return false; // Nothing to do!
22405 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22406 DCI.AddToWorklist(Op.getNode());
22407 if (Shuffle == X86ISD::MOVDDUP)
22408 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22410 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22411 DCI.AddToWorklist(Op.getNode());
22412 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22416 if (Subtarget->hasSSE3() &&
22417 (Mask.equals(0, 0, 2, 2) || Mask.equals(1, 1, 3, 3))) {
22418 bool Lo = Mask.equals(0, 0, 2, 2);
22419 unsigned Shuffle = Lo ? X86ISD::MOVSLDUP : X86ISD::MOVSHDUP;
22420 MVT ShuffleVT = MVT::v4f32;
22421 if (Depth == 1 && Root->getOpcode() == Shuffle)
22422 return false; // Nothing to do!
22423 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22424 DCI.AddToWorklist(Op.getNode());
22425 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22426 DCI.AddToWorklist(Op.getNode());
22427 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22431 if (Mask.equals(0, 0, 1, 1) || Mask.equals(2, 2, 3, 3)) {
22432 bool Lo = Mask.equals(0, 0, 1, 1);
22433 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22434 MVT ShuffleVT = MVT::v4f32;
22435 if (Depth == 1 && Root->getOpcode() == Shuffle)
22436 return false; // Nothing to do!
22437 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22438 DCI.AddToWorklist(Op.getNode());
22439 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22440 DCI.AddToWorklist(Op.getNode());
22441 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22447 // We always canonicalize the 8 x i16 and 16 x i8 shuffles into their UNPCK
22448 // variants as none of these have single-instruction variants that are
22449 // superior to the UNPCK formulation.
22450 if (!FloatDomain &&
22451 (Mask.equals(0, 0, 1, 1, 2, 2, 3, 3) ||
22452 Mask.equals(4, 4, 5, 5, 6, 6, 7, 7) ||
22453 Mask.equals(0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7) ||
22454 Mask.equals(8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
22456 bool Lo = Mask[0] == 0;
22457 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22458 if (Depth == 1 && Root->getOpcode() == Shuffle)
22459 return false; // Nothing to do!
22461 switch (Mask.size()) {
22463 ShuffleVT = MVT::v8i16;
22466 ShuffleVT = MVT::v16i8;
22469 llvm_unreachable("Impossible mask size!");
22471 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22472 DCI.AddToWorklist(Op.getNode());
22473 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22474 DCI.AddToWorklist(Op.getNode());
22475 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22480 // Don't try to re-form single instruction chains under any circumstances now
22481 // that we've done encoding canonicalization for them.
22485 // If we have 3 or more shuffle instructions or a chain involving PSHUFB, we
22486 // can replace them with a single PSHUFB instruction profitably. Intel's
22487 // manuals suggest only using PSHUFB if doing so replacing 5 instructions, but
22488 // in practice PSHUFB tends to be *very* fast so we're more aggressive.
22489 if ((Depth >= 3 || HasPSHUFB) && Subtarget->hasSSSE3()) {
22490 SmallVector<SDValue, 16> PSHUFBMask;
22491 assert(Mask.size() <= 16 && "Can't shuffle elements smaller than bytes!");
22492 int Ratio = 16 / Mask.size();
22493 for (unsigned i = 0; i < 16; ++i) {
22494 if (Mask[i / Ratio] == SM_SentinelUndef) {
22495 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
22498 int M = Mask[i / Ratio] != SM_SentinelZero
22499 ? Ratio * Mask[i / Ratio] + i % Ratio
22501 PSHUFBMask.push_back(DAG.getConstant(M, MVT::i8));
22503 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Input);
22504 DCI.AddToWorklist(Op.getNode());
22505 SDValue PSHUFBMaskOp =
22506 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, PSHUFBMask);
22507 DCI.AddToWorklist(PSHUFBMaskOp.getNode());
22508 Op = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, Op, PSHUFBMaskOp);
22509 DCI.AddToWorklist(Op.getNode());
22510 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22515 // Failed to find any combines.
22519 /// \brief Fully generic combining of x86 shuffle instructions.
22521 /// This should be the last combine run over the x86 shuffle instructions. Once
22522 /// they have been fully optimized, this will recursively consider all chains
22523 /// of single-use shuffle instructions, build a generic model of the cumulative
22524 /// shuffle operation, and check for simpler instructions which implement this
22525 /// operation. We use this primarily for two purposes:
22527 /// 1) Collapse generic shuffles to specialized single instructions when
22528 /// equivalent. In most cases, this is just an encoding size win, but
22529 /// sometimes we will collapse multiple generic shuffles into a single
22530 /// special-purpose shuffle.
22531 /// 2) Look for sequences of shuffle instructions with 3 or more total
22532 /// instructions, and replace them with the slightly more expensive SSSE3
22533 /// PSHUFB instruction if available. We do this as the last combining step
22534 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
22535 /// a suitable short sequence of other instructions. The PHUFB will either
22536 /// use a register or have to read from memory and so is slightly (but only
22537 /// slightly) more expensive than the other shuffle instructions.
22539 /// Because this is inherently a quadratic operation (for each shuffle in
22540 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
22541 /// This should never be an issue in practice as the shuffle lowering doesn't
22542 /// produce sequences of more than 8 instructions.
22544 /// FIXME: We will currently miss some cases where the redundant shuffling
22545 /// would simplify under the threshold for PSHUFB formation because of
22546 /// combine-ordering. To fix this, we should do the redundant instruction
22547 /// combining in this recursive walk.
22548 static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
22549 ArrayRef<int> RootMask,
22550 int Depth, bool HasPSHUFB,
22552 TargetLowering::DAGCombinerInfo &DCI,
22553 const X86Subtarget *Subtarget) {
22554 // Bound the depth of our recursive combine because this is ultimately
22555 // quadratic in nature.
22559 // Directly rip through bitcasts to find the underlying operand.
22560 while (Op.getOpcode() == ISD::BITCAST && Op.getOperand(0).hasOneUse())
22561 Op = Op.getOperand(0);
22563 MVT VT = Op.getSimpleValueType();
22564 if (!VT.isVector())
22565 return false; // Bail if we hit a non-vector.
22566 // FIXME: This routine should be taught about 256-bit shuffles, or a 256-bit
22567 // version should be added.
22568 if (VT.getSizeInBits() != 128)
22571 assert(Root.getSimpleValueType().isVector() &&
22572 "Shuffles operate on vector types!");
22573 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
22574 "Can only combine shuffles of the same vector register size.");
22576 if (!isTargetShuffle(Op.getOpcode()))
22578 SmallVector<int, 16> OpMask;
22580 bool HaveMask = getTargetShuffleMask(Op.getNode(), VT, OpMask, IsUnary);
22581 // We only can combine unary shuffles which we can decode the mask for.
22582 if (!HaveMask || !IsUnary)
22585 assert(VT.getVectorNumElements() == OpMask.size() &&
22586 "Different mask size from vector size!");
22587 assert(((RootMask.size() > OpMask.size() &&
22588 RootMask.size() % OpMask.size() == 0) ||
22589 (OpMask.size() > RootMask.size() &&
22590 OpMask.size() % RootMask.size() == 0) ||
22591 OpMask.size() == RootMask.size()) &&
22592 "The smaller number of elements must divide the larger.");
22593 int RootRatio = std::max<int>(1, OpMask.size() / RootMask.size());
22594 int OpRatio = std::max<int>(1, RootMask.size() / OpMask.size());
22595 assert(((RootRatio == 1 && OpRatio == 1) ||
22596 (RootRatio == 1) != (OpRatio == 1)) &&
22597 "Must not have a ratio for both incoming and op masks!");
22599 SmallVector<int, 16> Mask;
22600 Mask.reserve(std::max(OpMask.size(), RootMask.size()));
22602 // Merge this shuffle operation's mask into our accumulated mask. Note that
22603 // this shuffle's mask will be the first applied to the input, followed by the
22604 // root mask to get us all the way to the root value arrangement. The reason
22605 // for this order is that we are recursing up the operation chain.
22606 for (int i = 0, e = std::max(OpMask.size(), RootMask.size()); i < e; ++i) {
22607 int RootIdx = i / RootRatio;
22608 if (RootMask[RootIdx] < 0) {
22609 // This is a zero or undef lane, we're done.
22610 Mask.push_back(RootMask[RootIdx]);
22614 int RootMaskedIdx = RootMask[RootIdx] * RootRatio + i % RootRatio;
22615 int OpIdx = RootMaskedIdx / OpRatio;
22616 if (OpMask[OpIdx] < 0) {
22617 // The incoming lanes are zero or undef, it doesn't matter which ones we
22619 Mask.push_back(OpMask[OpIdx]);
22623 // Ok, we have non-zero lanes, map them through.
22624 Mask.push_back(OpMask[OpIdx] * OpRatio +
22625 RootMaskedIdx % OpRatio);
22628 // See if we can recurse into the operand to combine more things.
22629 switch (Op.getOpcode()) {
22630 case X86ISD::PSHUFB:
22632 case X86ISD::PSHUFD:
22633 case X86ISD::PSHUFHW:
22634 case X86ISD::PSHUFLW:
22635 if (Op.getOperand(0).hasOneUse() &&
22636 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22637 HasPSHUFB, DAG, DCI, Subtarget))
22641 case X86ISD::UNPCKL:
22642 case X86ISD::UNPCKH:
22643 assert(Op.getOperand(0) == Op.getOperand(1) && "We only combine unary shuffles!");
22644 // We can't check for single use, we have to check that this shuffle is the only user.
22645 if (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
22646 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22647 HasPSHUFB, DAG, DCI, Subtarget))
22652 // Minor canonicalization of the accumulated shuffle mask to make it easier
22653 // to match below. All this does is detect masks with squential pairs of
22654 // elements, and shrink them to the half-width mask. It does this in a loop
22655 // so it will reduce the size of the mask to the minimal width mask which
22656 // performs an equivalent shuffle.
22657 SmallVector<int, 16> WidenedMask;
22658 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
22659 Mask = std::move(WidenedMask);
22660 WidenedMask.clear();
22663 return combineX86ShuffleChain(Op, Root, Mask, Depth, HasPSHUFB, DAG, DCI,
22667 /// \brief Get the PSHUF-style mask from PSHUF node.
22669 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
22670 /// PSHUF-style masks that can be reused with such instructions.
22671 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
22672 SmallVector<int, 4> Mask;
22674 bool HaveMask = getTargetShuffleMask(N.getNode(), N.getSimpleValueType(), Mask, IsUnary);
22678 switch (N.getOpcode()) {
22679 case X86ISD::PSHUFD:
22681 case X86ISD::PSHUFLW:
22684 case X86ISD::PSHUFHW:
22685 Mask.erase(Mask.begin(), Mask.begin() + 4);
22686 for (int &M : Mask)
22690 llvm_unreachable("No valid shuffle instruction found!");
22694 /// \brief Search for a combinable shuffle across a chain ending in pshufd.
22696 /// We walk up the chain and look for a combinable shuffle, skipping over
22697 /// shuffles that we could hoist this shuffle's transformation past without
22698 /// altering anything.
22700 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
22702 TargetLowering::DAGCombinerInfo &DCI) {
22703 assert(N.getOpcode() == X86ISD::PSHUFD &&
22704 "Called with something other than an x86 128-bit half shuffle!");
22707 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
22708 // of the shuffles in the chain so that we can form a fresh chain to replace
22710 SmallVector<SDValue, 8> Chain;
22711 SDValue V = N.getOperand(0);
22712 for (; V.hasOneUse(); V = V.getOperand(0)) {
22713 switch (V.getOpcode()) {
22715 return SDValue(); // Nothing combined!
22718 // Skip bitcasts as we always know the type for the target specific
22722 case X86ISD::PSHUFD:
22723 // Found another dword shuffle.
22726 case X86ISD::PSHUFLW:
22727 // Check that the low words (being shuffled) are the identity in the
22728 // dword shuffle, and the high words are self-contained.
22729 if (Mask[0] != 0 || Mask[1] != 1 ||
22730 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
22733 Chain.push_back(V);
22736 case X86ISD::PSHUFHW:
22737 // Check that the high words (being shuffled) are the identity in the
22738 // dword shuffle, and the low words are self-contained.
22739 if (Mask[2] != 2 || Mask[3] != 3 ||
22740 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
22743 Chain.push_back(V);
22746 case X86ISD::UNPCKL:
22747 case X86ISD::UNPCKH:
22748 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
22749 // shuffle into a preceding word shuffle.
22750 if (V.getValueType() != MVT::v16i8 && V.getValueType() != MVT::v8i16)
22753 // Search for a half-shuffle which we can combine with.
22754 unsigned CombineOp =
22755 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
22756 if (V.getOperand(0) != V.getOperand(1) ||
22757 !V->isOnlyUserOf(V.getOperand(0).getNode()))
22759 Chain.push_back(V);
22760 V = V.getOperand(0);
22762 switch (V.getOpcode()) {
22764 return SDValue(); // Nothing to combine.
22766 case X86ISD::PSHUFLW:
22767 case X86ISD::PSHUFHW:
22768 if (V.getOpcode() == CombineOp)
22771 Chain.push_back(V);
22775 V = V.getOperand(0);
22779 } while (V.hasOneUse());
22782 // Break out of the loop if we break out of the switch.
22786 if (!V.hasOneUse())
22787 // We fell out of the loop without finding a viable combining instruction.
22790 // Merge this node's mask and our incoming mask.
22791 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22792 for (int &M : Mask)
22794 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
22795 getV4X86ShuffleImm8ForMask(Mask, DAG));
22797 // Rebuild the chain around this new shuffle.
22798 while (!Chain.empty()) {
22799 SDValue W = Chain.pop_back_val();
22801 if (V.getValueType() != W.getOperand(0).getValueType())
22802 V = DAG.getNode(ISD::BITCAST, DL, W.getOperand(0).getValueType(), V);
22804 switch (W.getOpcode()) {
22806 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
22808 case X86ISD::UNPCKL:
22809 case X86ISD::UNPCKH:
22810 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
22813 case X86ISD::PSHUFD:
22814 case X86ISD::PSHUFLW:
22815 case X86ISD::PSHUFHW:
22816 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
22820 if (V.getValueType() != N.getValueType())
22821 V = DAG.getNode(ISD::BITCAST, DL, N.getValueType(), V);
22823 // Return the new chain to replace N.
22827 /// \brief Search for a combinable shuffle across a chain ending in pshuflw or pshufhw.
22829 /// We walk up the chain, skipping shuffles of the other half and looking
22830 /// through shuffles which switch halves trying to find a shuffle of the same
22831 /// pair of dwords.
22832 static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
22834 TargetLowering::DAGCombinerInfo &DCI) {
22836 (N.getOpcode() == X86ISD::PSHUFLW || N.getOpcode() == X86ISD::PSHUFHW) &&
22837 "Called with something other than an x86 128-bit half shuffle!");
22839 unsigned CombineOpcode = N.getOpcode();
22841 // Walk up a single-use chain looking for a combinable shuffle.
22842 SDValue V = N.getOperand(0);
22843 for (; V.hasOneUse(); V = V.getOperand(0)) {
22844 switch (V.getOpcode()) {
22846 return false; // Nothing combined!
22849 // Skip bitcasts as we always know the type for the target specific
22853 case X86ISD::PSHUFLW:
22854 case X86ISD::PSHUFHW:
22855 if (V.getOpcode() == CombineOpcode)
22858 // Other-half shuffles are no-ops.
22861 // Break out of the loop if we break out of the switch.
22865 if (!V.hasOneUse())
22866 // We fell out of the loop without finding a viable combining instruction.
22869 // Combine away the bottom node as its shuffle will be accumulated into
22870 // a preceding shuffle.
22871 DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22873 // Record the old value.
22876 // Merge this node's mask and our incoming mask (adjusted to account for all
22877 // the pshufd instructions encountered).
22878 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22879 for (int &M : Mask)
22881 V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
22882 getV4X86ShuffleImm8ForMask(Mask, DAG));
22884 // Check that the shuffles didn't cancel each other out. If not, we need to
22885 // combine to the new one.
22887 // Replace the combinable shuffle with the combined one, updating all users
22888 // so that we re-evaluate the chain here.
22889 DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
22894 /// \brief Try to combine x86 target specific shuffles.
22895 static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
22896 TargetLowering::DAGCombinerInfo &DCI,
22897 const X86Subtarget *Subtarget) {
22899 MVT VT = N.getSimpleValueType();
22900 SmallVector<int, 4> Mask;
22902 switch (N.getOpcode()) {
22903 case X86ISD::PSHUFD:
22904 case X86ISD::PSHUFLW:
22905 case X86ISD::PSHUFHW:
22906 Mask = getPSHUFShuffleMask(N);
22907 assert(Mask.size() == 4);
22913 // Nuke no-op shuffles that show up after combining.
22914 if (isNoopShuffleMask(Mask))
22915 return DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22917 // Look for simplifications involving one or two shuffle instructions.
22918 SDValue V = N.getOperand(0);
22919 switch (N.getOpcode()) {
22922 case X86ISD::PSHUFLW:
22923 case X86ISD::PSHUFHW:
22924 assert(VT == MVT::v8i16);
22927 if (combineRedundantHalfShuffle(N, Mask, DAG, DCI))
22928 return SDValue(); // We combined away this shuffle, so we're done.
22930 // See if this reduces to a PSHUFD which is no more expensive and can
22931 // combine with more operations. Note that it has to at least flip the
22932 // dwords as otherwise it would have been removed as a no-op.
22933 if (Mask[0] == 2 && Mask[1] == 3 && Mask[2] == 0 && Mask[3] == 1) {
22934 int DMask[] = {0, 1, 2, 3};
22935 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
22936 DMask[DOffset + 0] = DOffset + 1;
22937 DMask[DOffset + 1] = DOffset + 0;
22938 V = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V);
22939 DCI.AddToWorklist(V.getNode());
22940 V = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V,
22941 getV4X86ShuffleImm8ForMask(DMask, DAG));
22942 DCI.AddToWorklist(V.getNode());
22943 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
22946 // Look for shuffle patterns which can be implemented as a single unpack.
22947 // FIXME: This doesn't handle the location of the PSHUFD generically, and
22948 // only works when we have a PSHUFD followed by two half-shuffles.
22949 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
22950 (V.getOpcode() == X86ISD::PSHUFLW ||
22951 V.getOpcode() == X86ISD::PSHUFHW) &&
22952 V.getOpcode() != N.getOpcode() &&
22954 SDValue D = V.getOperand(0);
22955 while (D.getOpcode() == ISD::BITCAST && D.hasOneUse())
22956 D = D.getOperand(0);
22957 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
22958 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22959 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
22960 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22961 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22963 for (int i = 0; i < 4; ++i) {
22964 WordMask[i + NOffset] = Mask[i] + NOffset;
22965 WordMask[i + VOffset] = VMask[i] + VOffset;
22967 // Map the word mask through the DWord mask.
22969 for (int i = 0; i < 8; ++i)
22970 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
22971 const int UnpackLoMask[] = {0, 0, 1, 1, 2, 2, 3, 3};
22972 const int UnpackHiMask[] = {4, 4, 5, 5, 6, 6, 7, 7};
22973 if (std::equal(std::begin(MappedMask), std::end(MappedMask),
22974 std::begin(UnpackLoMask)) ||
22975 std::equal(std::begin(MappedMask), std::end(MappedMask),
22976 std::begin(UnpackHiMask))) {
22977 // We can replace all three shuffles with an unpack.
22978 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, D.getOperand(0));
22979 DCI.AddToWorklist(V.getNode());
22980 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
22982 DL, MVT::v8i16, V, V);
22989 case X86ISD::PSHUFD:
22990 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG, DCI))
22999 /// \brief Try to combine a shuffle into a target-specific add-sub node.
23001 /// We combine this directly on the abstract vector shuffle nodes so it is
23002 /// easier to generically match. We also insert dummy vector shuffle nodes for
23003 /// the operands which explicitly discard the lanes which are unused by this
23004 /// operation to try to flow through the rest of the combiner the fact that
23005 /// they're unused.
23006 static SDValue combineShuffleToAddSub(SDNode *N, SelectionDAG &DAG) {
23008 EVT VT = N->getValueType(0);
23010 // We only handle target-independent shuffles.
23011 // FIXME: It would be easy and harmless to use the target shuffle mask
23012 // extraction tool to support more.
23013 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
23016 auto *SVN = cast<ShuffleVectorSDNode>(N);
23017 ArrayRef<int> Mask = SVN->getMask();
23018 SDValue V1 = N->getOperand(0);
23019 SDValue V2 = N->getOperand(1);
23021 // We require the first shuffle operand to be the SUB node, and the second to
23022 // be the ADD node.
23023 // FIXME: We should support the commuted patterns.
23024 if (V1->getOpcode() != ISD::FSUB || V2->getOpcode() != ISD::FADD)
23027 // If there are other uses of these operations we can't fold them.
23028 if (!V1->hasOneUse() || !V2->hasOneUse())
23031 // Ensure that both operations have the same operands. Note that we can
23032 // commute the FADD operands.
23033 SDValue LHS = V1->getOperand(0), RHS = V1->getOperand(1);
23034 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
23035 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
23038 // We're looking for blends between FADD and FSUB nodes. We insist on these
23039 // nodes being lined up in a specific expected pattern.
23040 if (!(isShuffleEquivalent(V1, V2, Mask, 0, 3) ||
23041 isShuffleEquivalent(V1, V2, Mask, 0, 5, 2, 7) ||
23042 isShuffleEquivalent(V1, V2, Mask, 0, 9, 2, 11, 4, 13, 6, 15)))
23045 // Only specific types are legal at this point, assert so we notice if and
23046 // when these change.
23047 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v8f32 ||
23048 VT == MVT::v4f64) &&
23049 "Unknown vector type encountered!");
23051 return DAG.getNode(X86ISD::ADDSUB, DL, VT, LHS, RHS);
23054 /// PerformShuffleCombine - Performs several different shuffle combines.
23055 static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
23056 TargetLowering::DAGCombinerInfo &DCI,
23057 const X86Subtarget *Subtarget) {
23059 SDValue N0 = N->getOperand(0);
23060 SDValue N1 = N->getOperand(1);
23061 EVT VT = N->getValueType(0);
23063 // Don't create instructions with illegal types after legalize types has run.
23064 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23065 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType()))
23068 // If we have legalized the vector types, look for blends of FADD and FSUB
23069 // nodes that we can fuse into an ADDSUB node.
23070 if (TLI.isTypeLegal(VT) && Subtarget->hasSSE3())
23071 if (SDValue AddSub = combineShuffleToAddSub(N, DAG))
23074 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode
23075 if (Subtarget->hasFp256() && VT.is256BitVector() &&
23076 N->getOpcode() == ISD::VECTOR_SHUFFLE)
23077 return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
23079 // During Type Legalization, when promoting illegal vector types,
23080 // the backend might introduce new shuffle dag nodes and bitcasts.
23082 // This code performs the following transformation:
23083 // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
23084 // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
23086 // We do this only if both the bitcast and the BINOP dag nodes have
23087 // one use. Also, perform this transformation only if the new binary
23088 // operation is legal. This is to avoid introducing dag nodes that
23089 // potentially need to be further expanded (or custom lowered) into a
23090 // less optimal sequence of dag nodes.
23091 if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
23092 N1.getOpcode() == ISD::UNDEF && N0.hasOneUse() &&
23093 N0.getOpcode() == ISD::BITCAST) {
23094 SDValue BC0 = N0.getOperand(0);
23095 EVT SVT = BC0.getValueType();
23096 unsigned Opcode = BC0.getOpcode();
23097 unsigned NumElts = VT.getVectorNumElements();
23099 if (BC0.hasOneUse() && SVT.isVector() &&
23100 SVT.getVectorNumElements() * 2 == NumElts &&
23101 TLI.isOperationLegal(Opcode, VT)) {
23102 bool CanFold = false;
23114 unsigned SVTNumElts = SVT.getVectorNumElements();
23115 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
23116 for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
23117 CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
23118 for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
23119 CanFold = SVOp->getMaskElt(i) < 0;
23122 SDValue BC00 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(0));
23123 SDValue BC01 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(1));
23124 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
23125 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, &SVOp->getMask()[0]);
23130 // Only handle 128 wide vector from here on.
23131 if (!VT.is128BitVector())
23134 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
23135 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
23136 // consecutive, non-overlapping, and in the right order.
23137 SmallVector<SDValue, 16> Elts;
23138 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
23139 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
23141 SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true);
23145 if (isTargetShuffle(N->getOpcode())) {
23147 PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget);
23148 if (Shuffle.getNode())
23151 // Try recursively combining arbitrary sequences of x86 shuffle
23152 // instructions into higher-order shuffles. We do this after combining
23153 // specific PSHUF instruction sequences into their minimal form so that we
23154 // can evaluate how many specialized shuffle instructions are involved in
23155 // a particular chain.
23156 SmallVector<int, 1> NonceMask; // Just a placeholder.
23157 NonceMask.push_back(0);
23158 if (combineX86ShufflesRecursively(SDValue(N, 0), SDValue(N, 0), NonceMask,
23159 /*Depth*/ 1, /*HasPSHUFB*/ false, DAG,
23161 return SDValue(); // This routine will use CombineTo to replace N.
23167 /// PerformTruncateCombine - Converts truncate operation to
23168 /// a sequence of vector shuffle operations.
23169 /// It is possible when we truncate 256-bit vector to 128-bit vector
23170 static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
23171 TargetLowering::DAGCombinerInfo &DCI,
23172 const X86Subtarget *Subtarget) {
23176 /// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target
23177 /// specific shuffle of a load can be folded into a single element load.
23178 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
23179 /// shuffles have been custom lowered so we need to handle those here.
23180 static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
23181 TargetLowering::DAGCombinerInfo &DCI) {
23182 if (DCI.isBeforeLegalizeOps())
23185 SDValue InVec = N->getOperand(0);
23186 SDValue EltNo = N->getOperand(1);
23188 if (!isa<ConstantSDNode>(EltNo))
23191 EVT OriginalVT = InVec.getValueType();
23193 if (InVec.getOpcode() == ISD::BITCAST) {
23194 // Don't duplicate a load with other uses.
23195 if (!InVec.hasOneUse())
23197 EVT BCVT = InVec.getOperand(0).getValueType();
23198 if (BCVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
23200 InVec = InVec.getOperand(0);
23203 EVT CurrentVT = InVec.getValueType();
23205 if (!isTargetShuffle(InVec.getOpcode()))
23208 // Don't duplicate a load with other uses.
23209 if (!InVec.hasOneUse())
23212 SmallVector<int, 16> ShuffleMask;
23214 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(),
23215 ShuffleMask, UnaryShuffle))
23218 // Select the input vector, guarding against out of range extract vector.
23219 unsigned NumElems = CurrentVT.getVectorNumElements();
23220 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
23221 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt];
23222 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0)
23223 : InVec.getOperand(1);
23225 // If inputs to shuffle are the same for both ops, then allow 2 uses
23226 unsigned AllowedUses = InVec.getNumOperands() > 1 &&
23227 InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
23229 if (LdNode.getOpcode() == ISD::BITCAST) {
23230 // Don't duplicate a load with other uses.
23231 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
23234 AllowedUses = 1; // only allow 1 load use if we have a bitcast
23235 LdNode = LdNode.getOperand(0);
23238 if (!ISD::isNormalLoad(LdNode.getNode()))
23241 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
23243 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
23246 EVT EltVT = N->getValueType(0);
23247 // If there's a bitcast before the shuffle, check if the load type and
23248 // alignment is valid.
23249 unsigned Align = LN0->getAlignment();
23250 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23251 unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
23252 EltVT.getTypeForEVT(*DAG.getContext()));
23254 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
23257 // All checks match so transform back to vector_shuffle so that DAG combiner
23258 // can finish the job
23261 // Create shuffle node taking into account the case that its a unary shuffle
23262 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT)
23263 : InVec.getOperand(1);
23264 Shuffle = DAG.getVectorShuffle(CurrentVT, dl,
23265 InVec.getOperand(0), Shuffle,
23267 Shuffle = DAG.getNode(ISD::BITCAST, dl, OriginalVT, Shuffle);
23268 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
23272 /// \brief Detect bitcasts between i32 to x86mmx low word. Since MMX types are
23273 /// special and don't usually play with other vector types, it's better to
23274 /// handle them early to be sure we emit efficient code by avoiding
23275 /// store-load conversions.
23276 static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG) {
23277 if (N->getValueType(0) != MVT::x86mmx ||
23278 N->getOperand(0)->getOpcode() != ISD::BUILD_VECTOR ||
23279 N->getOperand(0)->getValueType(0) != MVT::v2i32)
23282 SDValue V = N->getOperand(0);
23283 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V.getOperand(1));
23284 if (C && C->getZExtValue() == 0 && V.getOperand(0).getValueType() == MVT::i32)
23285 return DAG.getNode(X86ISD::MMX_MOVW2D, SDLoc(V.getOperand(0)),
23286 N->getValueType(0), V.getOperand(0));
23291 /// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
23292 /// generation and convert it from being a bunch of shuffles and extracts
23293 /// into a somewhat faster sequence. For i686, the best sequence is apparently
23294 /// storing the value and loading scalars back, while for x64 we should
23295 /// use 64-bit extracts and shifts.
23296 static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
23297 TargetLowering::DAGCombinerInfo &DCI) {
23298 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI);
23299 if (NewOp.getNode())
23302 SDValue InputVector = N->getOperand(0);
23304 // Detect mmx to i32 conversion through a v2i32 elt extract.
23305 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
23306 N->getValueType(0) == MVT::i32 &&
23307 InputVector.getValueType() == MVT::v2i32) {
23309 // The bitcast source is a direct mmx result.
23310 SDValue MMXSrc = InputVector.getNode()->getOperand(0);
23311 if (MMXSrc.getValueType() == MVT::x86mmx)
23312 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23313 N->getValueType(0),
23314 InputVector.getNode()->getOperand(0));
23316 // The mmx is indirect: (i64 extract_elt (v1i64 bitcast (x86mmx ...))).
23317 SDValue MMXSrcOp = MMXSrc.getOperand(0);
23318 if (MMXSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT && MMXSrc.hasOneUse() &&
23319 MMXSrc.getValueType() == MVT::i64 && MMXSrcOp.hasOneUse() &&
23320 MMXSrcOp.getOpcode() == ISD::BITCAST &&
23321 MMXSrcOp.getValueType() == MVT::v1i64 &&
23322 MMXSrcOp.getOperand(0).getValueType() == MVT::x86mmx)
23323 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23324 N->getValueType(0),
23325 MMXSrcOp.getOperand(0));
23328 // Only operate on vectors of 4 elements, where the alternative shuffling
23329 // gets to be more expensive.
23330 if (InputVector.getValueType() != MVT::v4i32)
23333 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
23334 // single use which is a sign-extend or zero-extend, and all elements are
23336 SmallVector<SDNode *, 4> Uses;
23337 unsigned ExtractedElements = 0;
23338 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
23339 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
23340 if (UI.getUse().getResNo() != InputVector.getResNo())
23343 SDNode *Extract = *UI;
23344 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
23347 if (Extract->getValueType(0) != MVT::i32)
23349 if (!Extract->hasOneUse())
23351 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
23352 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
23354 if (!isa<ConstantSDNode>(Extract->getOperand(1)))
23357 // Record which element was extracted.
23358 ExtractedElements |=
23359 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue();
23361 Uses.push_back(Extract);
23364 // If not all the elements were used, this may not be worthwhile.
23365 if (ExtractedElements != 15)
23368 // Ok, we've now decided to do the transformation.
23369 // If 64-bit shifts are legal, use the extract-shift sequence,
23370 // otherwise bounce the vector off the cache.
23371 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23373 SDLoc dl(InputVector);
23375 if (TLI.isOperationLegal(ISD::SRA, MVT::i64)) {
23376 SDValue Cst = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, InputVector);
23377 EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy();
23378 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23379 DAG.getConstant(0, VecIdxTy));
23380 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23381 DAG.getConstant(1, VecIdxTy));
23383 SDValue ShAmt = DAG.getConstant(32,
23384 DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64));
23385 Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
23386 Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23387 DAG.getNode(ISD::SRA, dl, MVT::i64, BottomHalf, ShAmt));
23388 Vals[2] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, TopHalf);
23389 Vals[3] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23390 DAG.getNode(ISD::SRA, dl, MVT::i64, TopHalf, ShAmt));
23392 // Store the value to a temporary stack slot.
23393 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
23394 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
23395 MachinePointerInfo(), false, false, 0);
23397 EVT ElementType = InputVector.getValueType().getVectorElementType();
23398 unsigned EltSize = ElementType.getSizeInBits() / 8;
23400 // Replace each use (extract) with a load of the appropriate element.
23401 for (unsigned i = 0; i < 4; ++i) {
23402 uint64_t Offset = EltSize * i;
23403 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
23405 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
23406 StackPtr, OffsetVal);
23408 // Load the scalar.
23409 Vals[i] = DAG.getLoad(ElementType, dl, Ch,
23410 ScalarAddr, MachinePointerInfo(),
23411 false, false, false, 0);
23416 // Replace the extracts
23417 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
23418 UE = Uses.end(); UI != UE; ++UI) {
23419 SDNode *Extract = *UI;
23421 SDValue Idx = Extract->getOperand(1);
23422 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
23423 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), Vals[IdxVal]);
23426 // The replacement was made in place; don't return anything.
23430 /// \brief Matches a VSELECT onto min/max or return 0 if the node doesn't match.
23431 static std::pair<unsigned, bool>
23432 matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, SDValue RHS,
23433 SelectionDAG &DAG, const X86Subtarget *Subtarget) {
23434 if (!VT.isVector())
23435 return std::make_pair(0, false);
23437 bool NeedSplit = false;
23438 switch (VT.getSimpleVT().SimpleTy) {
23439 default: return std::make_pair(0, false);
23442 if (!Subtarget->hasVLX())
23443 return std::make_pair(0, false);
23447 if (!Subtarget->hasBWI())
23448 return std::make_pair(0, false);
23452 if (!Subtarget->hasAVX512())
23453 return std::make_pair(0, false);
23458 if (!Subtarget->hasAVX2())
23460 if (!Subtarget->hasAVX())
23461 return std::make_pair(0, false);
23466 if (!Subtarget->hasSSE2())
23467 return std::make_pair(0, false);
23470 // SSE2 has only a small subset of the operations.
23471 bool hasUnsigned = Subtarget->hasSSE41() ||
23472 (Subtarget->hasSSE2() && VT == MVT::v16i8);
23473 bool hasSigned = Subtarget->hasSSE41() ||
23474 (Subtarget->hasSSE2() && VT == MVT::v8i16);
23476 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23479 // Check for x CC y ? x : y.
23480 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23481 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23486 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23489 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23492 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23495 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23497 // Check for x CC y ? y : x -- a min/max with reversed arms.
23498 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23499 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23504 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23507 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23510 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23513 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23517 return std::make_pair(Opc, NeedSplit);
23521 transformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
23522 const X86Subtarget *Subtarget) {
23524 SDValue Cond = N->getOperand(0);
23525 SDValue LHS = N->getOperand(1);
23526 SDValue RHS = N->getOperand(2);
23528 if (Cond.getOpcode() == ISD::SIGN_EXTEND) {
23529 SDValue CondSrc = Cond->getOperand(0);
23530 if (CondSrc->getOpcode() == ISD::SIGN_EXTEND_INREG)
23531 Cond = CondSrc->getOperand(0);
23534 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
23537 // A vselect where all conditions and data are constants can be optimized into
23538 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
23539 if (ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
23540 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
23543 unsigned MaskValue = 0;
23544 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
23547 MVT VT = N->getSimpleValueType(0);
23548 unsigned NumElems = VT.getVectorNumElements();
23549 SmallVector<int, 8> ShuffleMask(NumElems, -1);
23550 for (unsigned i = 0; i < NumElems; ++i) {
23551 // Be sure we emit undef where we can.
23552 if (Cond.getOperand(i)->getOpcode() == ISD::UNDEF)
23553 ShuffleMask[i] = -1;
23555 ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1);
23558 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23559 if (!TLI.isShuffleMaskLegal(ShuffleMask, VT))
23561 return DAG.getVectorShuffle(VT, dl, LHS, RHS, &ShuffleMask[0]);
23564 /// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
23566 static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
23567 TargetLowering::DAGCombinerInfo &DCI,
23568 const X86Subtarget *Subtarget) {
23570 SDValue Cond = N->getOperand(0);
23571 // Get the LHS/RHS of the select.
23572 SDValue LHS = N->getOperand(1);
23573 SDValue RHS = N->getOperand(2);
23574 EVT VT = LHS.getValueType();
23575 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23577 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
23578 // instructions match the semantics of the common C idiom x<y?x:y but not
23579 // x<=y?x:y, because of how they handle negative zero (which can be
23580 // ignored in unsafe-math mode).
23581 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
23582 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
23583 VT != MVT::f80 && (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
23584 (Subtarget->hasSSE2() ||
23585 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
23586 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23588 unsigned Opcode = 0;
23589 // Check for x CC y ? x : y.
23590 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23591 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23595 // Converting this to a min would handle NaNs incorrectly, and swapping
23596 // the operands would cause it to handle comparisons between positive
23597 // and negative zero incorrectly.
23598 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23599 if (!DAG.getTarget().Options.UnsafeFPMath &&
23600 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23602 std::swap(LHS, RHS);
23604 Opcode = X86ISD::FMIN;
23607 // Converting this to a min would handle comparisons between positive
23608 // and negative zero incorrectly.
23609 if (!DAG.getTarget().Options.UnsafeFPMath &&
23610 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23612 Opcode = X86ISD::FMIN;
23615 // Converting this to a min would handle both negative zeros and NaNs
23616 // incorrectly, but we can swap the operands to fix both.
23617 std::swap(LHS, RHS);
23621 Opcode = X86ISD::FMIN;
23625 // Converting this to a max would handle comparisons between positive
23626 // and negative zero incorrectly.
23627 if (!DAG.getTarget().Options.UnsafeFPMath &&
23628 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23630 Opcode = X86ISD::FMAX;
23633 // Converting this to a max would handle NaNs incorrectly, and swapping
23634 // the operands would cause it to handle comparisons between positive
23635 // and negative zero incorrectly.
23636 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23637 if (!DAG.getTarget().Options.UnsafeFPMath &&
23638 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23640 std::swap(LHS, RHS);
23642 Opcode = X86ISD::FMAX;
23645 // Converting this to a max would handle both negative zeros and NaNs
23646 // incorrectly, but we can swap the operands to fix both.
23647 std::swap(LHS, RHS);
23651 Opcode = X86ISD::FMAX;
23654 // Check for x CC y ? y : x -- a min/max with reversed arms.
23655 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23656 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23660 // Converting this to a min would handle comparisons between positive
23661 // and negative zero incorrectly, and swapping the operands would
23662 // cause it to handle NaNs incorrectly.
23663 if (!DAG.getTarget().Options.UnsafeFPMath &&
23664 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
23665 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23667 std::swap(LHS, RHS);
23669 Opcode = X86ISD::FMIN;
23672 // Converting this to a min would handle NaNs incorrectly.
23673 if (!DAG.getTarget().Options.UnsafeFPMath &&
23674 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
23676 Opcode = X86ISD::FMIN;
23679 // Converting this to a min would handle both negative zeros and NaNs
23680 // incorrectly, but we can swap the operands to fix both.
23681 std::swap(LHS, RHS);
23685 Opcode = X86ISD::FMIN;
23689 // Converting this to a max would handle NaNs incorrectly.
23690 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23692 Opcode = X86ISD::FMAX;
23695 // Converting this to a max would handle comparisons between positive
23696 // and negative zero incorrectly, and swapping the operands would
23697 // cause it to handle NaNs incorrectly.
23698 if (!DAG.getTarget().Options.UnsafeFPMath &&
23699 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
23700 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23702 std::swap(LHS, RHS);
23704 Opcode = X86ISD::FMAX;
23707 // Converting this to a max would handle both negative zeros and NaNs
23708 // incorrectly, but we can swap the operands to fix both.
23709 std::swap(LHS, RHS);
23713 Opcode = X86ISD::FMAX;
23719 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
23722 EVT CondVT = Cond.getValueType();
23723 if (Subtarget->hasAVX512() && VT.isVector() && CondVT.isVector() &&
23724 CondVT.getVectorElementType() == MVT::i1) {
23725 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
23726 // lowering on KNL. In this case we convert it to
23727 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
23728 // The same situation for all 128 and 256-bit vectors of i8 and i16.
23729 // Since SKX these selects have a proper lowering.
23730 EVT OpVT = LHS.getValueType();
23731 if ((OpVT.is128BitVector() || OpVT.is256BitVector()) &&
23732 (OpVT.getVectorElementType() == MVT::i8 ||
23733 OpVT.getVectorElementType() == MVT::i16) &&
23734 !(Subtarget->hasBWI() && Subtarget->hasVLX())) {
23735 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond);
23736 DCI.AddToWorklist(Cond.getNode());
23737 return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS);
23740 // If this is a select between two integer constants, try to do some
23742 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
23743 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS))
23744 // Don't do this for crazy integer types.
23745 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) {
23746 // If this is efficiently invertible, canonicalize the LHSC/RHSC values
23747 // so that TrueC (the true value) is larger than FalseC.
23748 bool NeedsCondInvert = false;
23750 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
23751 // Efficiently invertible.
23752 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
23753 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
23754 isa<ConstantSDNode>(Cond.getOperand(1))))) {
23755 NeedsCondInvert = true;
23756 std::swap(TrueC, FalseC);
23759 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
23760 if (FalseC->getAPIntValue() == 0 &&
23761 TrueC->getAPIntValue().isPowerOf2()) {
23762 if (NeedsCondInvert) // Invert the condition if needed.
23763 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23764 DAG.getConstant(1, Cond.getValueType()));
23766 // Zero extend the condition if needed.
23767 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
23769 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
23770 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
23771 DAG.getConstant(ShAmt, MVT::i8));
23774 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
23775 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
23776 if (NeedsCondInvert) // Invert the condition if needed.
23777 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23778 DAG.getConstant(1, Cond.getValueType()));
23780 // Zero extend the condition if needed.
23781 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
23782 FalseC->getValueType(0), Cond);
23783 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23784 SDValue(FalseC, 0));
23787 // Optimize cases that will turn into an LEA instruction. This requires
23788 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
23789 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
23790 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
23791 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
23793 bool isFastMultiplier = false;
23795 switch ((unsigned char)Diff) {
23797 case 1: // result = add base, cond
23798 case 2: // result = lea base( , cond*2)
23799 case 3: // result = lea base(cond, cond*2)
23800 case 4: // result = lea base( , cond*4)
23801 case 5: // result = lea base(cond, cond*4)
23802 case 8: // result = lea base( , cond*8)
23803 case 9: // result = lea base(cond, cond*8)
23804 isFastMultiplier = true;
23809 if (isFastMultiplier) {
23810 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
23811 if (NeedsCondInvert) // Invert the condition if needed.
23812 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23813 DAG.getConstant(1, Cond.getValueType()));
23815 // Zero extend the condition if needed.
23816 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
23818 // Scale the condition by the difference.
23820 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
23821 DAG.getConstant(Diff, Cond.getValueType()));
23823 // Add the base if non-zero.
23824 if (FalseC->getAPIntValue() != 0)
23825 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23826 SDValue(FalseC, 0));
23833 // Canonicalize max and min:
23834 // (x > y) ? x : y -> (x >= y) ? x : y
23835 // (x < y) ? x : y -> (x <= y) ? x : y
23836 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
23837 // the need for an extra compare
23838 // against zero. e.g.
23839 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
23841 // testl %edi, %edi
23843 // cmovgl %edi, %eax
23847 // cmovsl %eax, %edi
23848 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
23849 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23850 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23851 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23856 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
23857 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
23858 Cond.getOperand(0), Cond.getOperand(1), NewCC);
23859 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS);
23864 // Early exit check
23865 if (!TLI.isTypeLegal(VT))
23868 // Match VSELECTs into subs with unsigned saturation.
23869 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
23870 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
23871 ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
23872 (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
23873 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23875 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
23876 // left side invert the predicate to simplify logic below.
23878 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
23880 CC = ISD::getSetCCInverse(CC, true);
23881 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
23885 if (Other.getNode() && Other->getNumOperands() == 2 &&
23886 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) {
23887 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
23888 SDValue CondRHS = Cond->getOperand(1);
23890 // Look for a general sub with unsigned saturation first.
23891 // x >= y ? x-y : 0 --> subus x, y
23892 // x > y ? x-y : 0 --> subus x, y
23893 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
23894 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS))
23895 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS);
23897 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS))
23898 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
23899 if (auto *CondRHSBV = dyn_cast<BuildVectorSDNode>(CondRHS))
23900 if (auto *CondRHSConst = CondRHSBV->getConstantSplatNode())
23901 // If the RHS is a constant we have to reverse the const
23902 // canonicalization.
23903 // x > C-1 ? x+-C : 0 --> subus x, C
23904 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
23905 CondRHSConst->getAPIntValue() ==
23906 (-OpRHSConst->getAPIntValue() - 1))
23907 return DAG.getNode(
23908 X86ISD::SUBUS, DL, VT, OpLHS,
23909 DAG.getConstant(-OpRHSConst->getAPIntValue(), VT));
23911 // Another special case: If C was a sign bit, the sub has been
23912 // canonicalized into a xor.
23913 // FIXME: Would it be better to use computeKnownBits to determine
23914 // whether it's safe to decanonicalize the xor?
23915 // x s< 0 ? x^C : 0 --> subus x, C
23916 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
23917 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
23918 OpRHSConst->getAPIntValue().isSignBit())
23919 // Note that we have to rebuild the RHS constant here to ensure we
23920 // don't rely on particular values of undef lanes.
23921 return DAG.getNode(
23922 X86ISD::SUBUS, DL, VT, OpLHS,
23923 DAG.getConstant(OpRHSConst->getAPIntValue(), VT));
23928 // Try to match a min/max vector operation.
23929 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC) {
23930 std::pair<unsigned, bool> ret = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget);
23931 unsigned Opc = ret.first;
23932 bool NeedSplit = ret.second;
23934 if (Opc && NeedSplit) {
23935 unsigned NumElems = VT.getVectorNumElements();
23936 // Extract the LHS vectors
23937 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, DL);
23938 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, DL);
23940 // Extract the RHS vectors
23941 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, DL);
23942 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, DL);
23944 // Create min/max for each subvector
23945 LHS = DAG.getNode(Opc, DL, LHS1.getValueType(), LHS1, RHS1);
23946 RHS = DAG.getNode(Opc, DL, LHS2.getValueType(), LHS2, RHS2);
23948 // Merge the result
23949 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS, RHS);
23951 return DAG.getNode(Opc, DL, VT, LHS, RHS);
23954 // Simplify vector selection if condition value type matches vselect
23956 if (N->getOpcode() == ISD::VSELECT && CondVT == VT) {
23957 assert(Cond.getValueType().isVector() &&
23958 "vector select expects a vector selector!");
23960 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
23961 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
23963 // Try invert the condition if true value is not all 1s and false value
23965 if (!TValIsAllOnes && !FValIsAllZeros &&
23966 // Check if the selector will be produced by CMPP*/PCMP*
23967 Cond.getOpcode() == ISD::SETCC &&
23968 // Check if SETCC has already been promoted
23969 TLI.getSetCCResultType(*DAG.getContext(), VT) == CondVT) {
23970 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
23971 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
23973 if (TValIsAllZeros || FValIsAllOnes) {
23974 SDValue CC = Cond.getOperand(2);
23975 ISD::CondCode NewCC =
23976 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
23977 Cond.getOperand(0).getValueType().isInteger());
23978 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), NewCC);
23979 std::swap(LHS, RHS);
23980 TValIsAllOnes = FValIsAllOnes;
23981 FValIsAllZeros = TValIsAllZeros;
23985 if (TValIsAllOnes || FValIsAllZeros) {
23988 if (TValIsAllOnes && FValIsAllZeros)
23990 else if (TValIsAllOnes)
23991 Ret = DAG.getNode(ISD::OR, DL, CondVT, Cond,
23992 DAG.getNode(ISD::BITCAST, DL, CondVT, RHS));
23993 else if (FValIsAllZeros)
23994 Ret = DAG.getNode(ISD::AND, DL, CondVT, Cond,
23995 DAG.getNode(ISD::BITCAST, DL, CondVT, LHS));
23997 return DAG.getNode(ISD::BITCAST, DL, VT, Ret);
24001 // If we know that this node is legal then we know that it is going to be
24002 // matched by one of the SSE/AVX BLEND instructions. These instructions only
24003 // depend on the highest bit in each word. Try to use SimplifyDemandedBits
24004 // to simplify previous instructions.
24005 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
24006 !DCI.isBeforeLegalize() &&
24007 // We explicitly check against v8i16 and v16i16 because, although
24008 // they're marked as Custom, they might only be legal when Cond is a
24009 // build_vector of constants. This will be taken care in a later
24011 (TLI.isOperationLegalOrCustom(ISD::VSELECT, VT) && VT != MVT::v16i16 &&
24012 VT != MVT::v8i16) &&
24013 // Don't optimize vector of constants. Those are handled by
24014 // the generic code and all the bits must be properly set for
24015 // the generic optimizer.
24016 !ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
24017 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits();
24019 // Don't optimize vector selects that map to mask-registers.
24023 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
24024 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
24026 APInt KnownZero, KnownOne;
24027 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
24028 DCI.isBeforeLegalizeOps());
24029 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) ||
24030 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne,
24032 // If we changed the computation somewhere in the DAG, this change
24033 // will affect all users of Cond.
24034 // Make sure it is fine and update all the nodes so that we do not
24035 // use the generic VSELECT anymore. Otherwise, we may perform
24036 // wrong optimizations as we messed up with the actual expectation
24037 // for the vector boolean values.
24038 if (Cond != TLO.Old) {
24039 // Check all uses of that condition operand to check whether it will be
24040 // consumed by non-BLEND instructions, which may depend on all bits are
24042 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
24044 if (I->getOpcode() != ISD::VSELECT)
24045 // TODO: Add other opcodes eventually lowered into BLEND.
24048 // Update all the users of the condition, before committing the change,
24049 // so that the VSELECT optimizations that expect the correct vector
24050 // boolean value will not be triggered.
24051 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
24053 DAG.ReplaceAllUsesOfValueWith(
24055 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(*I), I->getValueType(0),
24056 Cond, I->getOperand(1), I->getOperand(2)));
24057 DCI.CommitTargetLoweringOpt(TLO);
24060 // At this point, only Cond is changed. Change the condition
24061 // just for N to keep the opportunity to optimize all other
24062 // users their own way.
24063 DAG.ReplaceAllUsesOfValueWith(
24065 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(N), N->getValueType(0),
24066 TLO.New, N->getOperand(1), N->getOperand(2)));
24071 // We should generate an X86ISD::BLENDI from a vselect if its argument
24072 // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
24073 // constants. This specific pattern gets generated when we split a
24074 // selector for a 512 bit vector in a machine without AVX512 (but with
24075 // 256-bit vectors), during legalization:
24077 // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
24079 // Iff we find this pattern and the build_vectors are built from
24080 // constants, we translate the vselect into a shuffle_vector that we
24081 // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
24082 if ((N->getOpcode() == ISD::VSELECT ||
24083 N->getOpcode() == X86ISD::SHRUNKBLEND) &&
24084 !DCI.isBeforeLegalize()) {
24085 SDValue Shuffle = transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
24086 if (Shuffle.getNode())
24093 // Check whether a boolean test is testing a boolean value generated by
24094 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
24097 // Simplify the following patterns:
24098 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
24099 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
24100 // to (Op EFLAGS Cond)
24102 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
24103 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
24104 // to (Op EFLAGS !Cond)
24106 // where Op could be BRCOND or CMOV.
24108 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
24109 // Quit if not CMP and SUB with its value result used.
24110 if (Cmp.getOpcode() != X86ISD::CMP &&
24111 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0)))
24114 // Quit if not used as a boolean value.
24115 if (CC != X86::COND_E && CC != X86::COND_NE)
24118 // Check CMP operands. One of them should be 0 or 1 and the other should be
24119 // an SetCC or extended from it.
24120 SDValue Op1 = Cmp.getOperand(0);
24121 SDValue Op2 = Cmp.getOperand(1);
24124 const ConstantSDNode* C = nullptr;
24125 bool needOppositeCond = (CC == X86::COND_E);
24126 bool checkAgainstTrue = false; // Is it a comparison against 1?
24128 if ((C = dyn_cast<ConstantSDNode>(Op1)))
24130 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
24132 else // Quit if all operands are not constants.
24135 if (C->getZExtValue() == 1) {
24136 needOppositeCond = !needOppositeCond;
24137 checkAgainstTrue = true;
24138 } else if (C->getZExtValue() != 0)
24139 // Quit if the constant is neither 0 or 1.
24142 bool truncatedToBoolWithAnd = false;
24143 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
24144 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
24145 SetCC.getOpcode() == ISD::TRUNCATE ||
24146 SetCC.getOpcode() == ISD::AND) {
24147 if (SetCC.getOpcode() == ISD::AND) {
24149 ConstantSDNode *CS;
24150 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(0))) &&
24151 CS->getZExtValue() == 1)
24153 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(1))) &&
24154 CS->getZExtValue() == 1)
24158 SetCC = SetCC.getOperand(OpIdx);
24159 truncatedToBoolWithAnd = true;
24161 SetCC = SetCC.getOperand(0);
24164 switch (SetCC.getOpcode()) {
24165 case X86ISD::SETCC_CARRY:
24166 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
24167 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
24168 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
24169 // truncated to i1 using 'and'.
24170 if (checkAgainstTrue && !truncatedToBoolWithAnd)
24172 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
24173 "Invalid use of SETCC_CARRY!");
24175 case X86ISD::SETCC:
24176 // Set the condition code or opposite one if necessary.
24177 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
24178 if (needOppositeCond)
24179 CC = X86::GetOppositeBranchCondition(CC);
24180 return SetCC.getOperand(1);
24181 case X86ISD::CMOV: {
24182 // Check whether false/true value has canonical one, i.e. 0 or 1.
24183 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
24184 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
24185 // Quit if true value is not a constant.
24188 // Quit if false value is not a constant.
24190 SDValue Op = SetCC.getOperand(0);
24191 // Skip 'zext' or 'trunc' node.
24192 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
24193 Op.getOpcode() == ISD::TRUNCATE)
24194 Op = Op.getOperand(0);
24195 // A special case for rdrand/rdseed, where 0 is set if false cond is
24197 if ((Op.getOpcode() != X86ISD::RDRAND &&
24198 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
24201 // Quit if false value is not the constant 0 or 1.
24202 bool FValIsFalse = true;
24203 if (FVal && FVal->getZExtValue() != 0) {
24204 if (FVal->getZExtValue() != 1)
24206 // If FVal is 1, opposite cond is needed.
24207 needOppositeCond = !needOppositeCond;
24208 FValIsFalse = false;
24210 // Quit if TVal is not the constant opposite of FVal.
24211 if (FValIsFalse && TVal->getZExtValue() != 1)
24213 if (!FValIsFalse && TVal->getZExtValue() != 0)
24215 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
24216 if (needOppositeCond)
24217 CC = X86::GetOppositeBranchCondition(CC);
24218 return SetCC.getOperand(3);
24225 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
24226 static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
24227 TargetLowering::DAGCombinerInfo &DCI,
24228 const X86Subtarget *Subtarget) {
24231 // If the flag operand isn't dead, don't touch this CMOV.
24232 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
24235 SDValue FalseOp = N->getOperand(0);
24236 SDValue TrueOp = N->getOperand(1);
24237 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
24238 SDValue Cond = N->getOperand(3);
24240 if (CC == X86::COND_E || CC == X86::COND_NE) {
24241 switch (Cond.getOpcode()) {
24245 // If operand of BSR / BSF are proven never zero, then ZF cannot be set.
24246 if (DAG.isKnownNeverZero(Cond.getOperand(0)))
24247 return (CC == X86::COND_E) ? FalseOp : TrueOp;
24253 Flags = checkBoolTestSetCCCombine(Cond, CC);
24254 if (Flags.getNode() &&
24255 // Extra check as FCMOV only supports a subset of X86 cond.
24256 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
24257 SDValue Ops[] = { FalseOp, TrueOp,
24258 DAG.getConstant(CC, MVT::i8), Flags };
24259 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
24262 // If this is a select between two integer constants, try to do some
24263 // optimizations. Note that the operands are ordered the opposite of SELECT
24265 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
24266 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
24267 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
24268 // larger than FalseC (the false value).
24269 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
24270 CC = X86::GetOppositeBranchCondition(CC);
24271 std::swap(TrueC, FalseC);
24272 std::swap(TrueOp, FalseOp);
24275 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
24276 // This is efficient for any integer data type (including i8/i16) and
24278 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
24279 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24280 DAG.getConstant(CC, MVT::i8), Cond);
24282 // Zero extend the condition if needed.
24283 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
24285 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
24286 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
24287 DAG.getConstant(ShAmt, MVT::i8));
24288 if (N->getNumValues() == 2) // Dead flag value?
24289 return DCI.CombineTo(N, Cond, SDValue());
24293 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
24294 // for any integer data type, including i8/i16.
24295 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
24296 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24297 DAG.getConstant(CC, MVT::i8), Cond);
24299 // Zero extend the condition if needed.
24300 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
24301 FalseC->getValueType(0), Cond);
24302 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24303 SDValue(FalseC, 0));
24305 if (N->getNumValues() == 2) // Dead flag value?
24306 return DCI.CombineTo(N, Cond, SDValue());
24310 // Optimize cases that will turn into an LEA instruction. This requires
24311 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
24312 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
24313 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
24314 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
24316 bool isFastMultiplier = false;
24318 switch ((unsigned char)Diff) {
24320 case 1: // result = add base, cond
24321 case 2: // result = lea base( , cond*2)
24322 case 3: // result = lea base(cond, cond*2)
24323 case 4: // result = lea base( , cond*4)
24324 case 5: // result = lea base(cond, cond*4)
24325 case 8: // result = lea base( , cond*8)
24326 case 9: // result = lea base(cond, cond*8)
24327 isFastMultiplier = true;
24332 if (isFastMultiplier) {
24333 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
24334 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24335 DAG.getConstant(CC, MVT::i8), Cond);
24336 // Zero extend the condition if needed.
24337 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
24339 // Scale the condition by the difference.
24341 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
24342 DAG.getConstant(Diff, Cond.getValueType()));
24344 // Add the base if non-zero.
24345 if (FalseC->getAPIntValue() != 0)
24346 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24347 SDValue(FalseC, 0));
24348 if (N->getNumValues() == 2) // Dead flag value?
24349 return DCI.CombineTo(N, Cond, SDValue());
24356 // Handle these cases:
24357 // (select (x != c), e, c) -> select (x != c), e, x),
24358 // (select (x == c), c, e) -> select (x == c), x, e)
24359 // where the c is an integer constant, and the "select" is the combination
24360 // of CMOV and CMP.
24362 // The rationale for this change is that the conditional-move from a constant
24363 // needs two instructions, however, conditional-move from a register needs
24364 // only one instruction.
24366 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
24367 // some instruction-combining opportunities. This opt needs to be
24368 // postponed as late as possible.
24370 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
24371 // the DCI.xxxx conditions are provided to postpone the optimization as
24372 // late as possible.
24374 ConstantSDNode *CmpAgainst = nullptr;
24375 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
24376 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
24377 !isa<ConstantSDNode>(Cond.getOperand(0))) {
24379 if (CC == X86::COND_NE &&
24380 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
24381 CC = X86::GetOppositeBranchCondition(CC);
24382 std::swap(TrueOp, FalseOp);
24385 if (CC == X86::COND_E &&
24386 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
24387 SDValue Ops[] = { FalseOp, Cond.getOperand(0),
24388 DAG.getConstant(CC, MVT::i8), Cond };
24389 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops);
24397 static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
24398 const X86Subtarget *Subtarget) {
24399 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
24401 default: return SDValue();
24402 // SSE/AVX/AVX2 blend intrinsics.
24403 case Intrinsic::x86_avx2_pblendvb:
24404 case Intrinsic::x86_avx2_pblendw:
24405 case Intrinsic::x86_avx2_pblendd_128:
24406 case Intrinsic::x86_avx2_pblendd_256:
24407 // Don't try to simplify this intrinsic if we don't have AVX2.
24408 if (!Subtarget->hasAVX2())
24411 case Intrinsic::x86_avx_blend_pd_256:
24412 case Intrinsic::x86_avx_blend_ps_256:
24413 case Intrinsic::x86_avx_blendv_pd_256:
24414 case Intrinsic::x86_avx_blendv_ps_256:
24415 // Don't try to simplify this intrinsic if we don't have AVX.
24416 if (!Subtarget->hasAVX())
24419 case Intrinsic::x86_sse41_pblendw:
24420 case Intrinsic::x86_sse41_blendpd:
24421 case Intrinsic::x86_sse41_blendps:
24422 case Intrinsic::x86_sse41_blendvps:
24423 case Intrinsic::x86_sse41_blendvpd:
24424 case Intrinsic::x86_sse41_pblendvb: {
24425 SDValue Op0 = N->getOperand(1);
24426 SDValue Op1 = N->getOperand(2);
24427 SDValue Mask = N->getOperand(3);
24429 // Don't try to simplify this intrinsic if we don't have SSE4.1.
24430 if (!Subtarget->hasSSE41())
24433 // fold (blend A, A, Mask) -> A
24436 // fold (blend A, B, allZeros) -> A
24437 if (ISD::isBuildVectorAllZeros(Mask.getNode()))
24439 // fold (blend A, B, allOnes) -> B
24440 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
24443 // Simplify the case where the mask is a constant i32 value.
24444 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Mask)) {
24445 if (C->isNullValue())
24447 if (C->isAllOnesValue())
24454 // Packed SSE2/AVX2 arithmetic shift immediate intrinsics.
24455 case Intrinsic::x86_sse2_psrai_w:
24456 case Intrinsic::x86_sse2_psrai_d:
24457 case Intrinsic::x86_avx2_psrai_w:
24458 case Intrinsic::x86_avx2_psrai_d:
24459 case Intrinsic::x86_sse2_psra_w:
24460 case Intrinsic::x86_sse2_psra_d:
24461 case Intrinsic::x86_avx2_psra_w:
24462 case Intrinsic::x86_avx2_psra_d: {
24463 SDValue Op0 = N->getOperand(1);
24464 SDValue Op1 = N->getOperand(2);
24465 EVT VT = Op0.getValueType();
24466 assert(VT.isVector() && "Expected a vector type!");
24468 if (isa<BuildVectorSDNode>(Op1))
24469 Op1 = Op1.getOperand(0);
24471 if (!isa<ConstantSDNode>(Op1))
24474 EVT SVT = VT.getVectorElementType();
24475 unsigned SVTBits = SVT.getSizeInBits();
24477 ConstantSDNode *CND = cast<ConstantSDNode>(Op1);
24478 const APInt &C = APInt(SVTBits, CND->getAPIntValue().getZExtValue());
24479 uint64_t ShAmt = C.getZExtValue();
24481 // Don't try to convert this shift into a ISD::SRA if the shift
24482 // count is bigger than or equal to the element size.
24483 if (ShAmt >= SVTBits)
24486 // Trivial case: if the shift count is zero, then fold this
24487 // into the first operand.
24491 // Replace this packed shift intrinsic with a target independent
24493 SDValue Splat = DAG.getConstant(C, VT);
24494 return DAG.getNode(ISD::SRA, SDLoc(N), VT, Op0, Splat);
24499 /// PerformMulCombine - Optimize a single multiply with constant into two
24500 /// in order to implement it with two cheaper instructions, e.g.
24501 /// LEA + SHL, LEA + LEA.
24502 static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
24503 TargetLowering::DAGCombinerInfo &DCI) {
24504 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
24507 EVT VT = N->getValueType(0);
24508 if (VT != MVT::i64 && VT != MVT::i32)
24511 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
24514 uint64_t MulAmt = C->getZExtValue();
24515 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
24518 uint64_t MulAmt1 = 0;
24519 uint64_t MulAmt2 = 0;
24520 if ((MulAmt % 9) == 0) {
24522 MulAmt2 = MulAmt / 9;
24523 } else if ((MulAmt % 5) == 0) {
24525 MulAmt2 = MulAmt / 5;
24526 } else if ((MulAmt % 3) == 0) {
24528 MulAmt2 = MulAmt / 3;
24531 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
24534 if (isPowerOf2_64(MulAmt2) &&
24535 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
24536 // If second multiplifer is pow2, issue it first. We want the multiply by
24537 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
24539 std::swap(MulAmt1, MulAmt2);
24542 if (isPowerOf2_64(MulAmt1))
24543 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
24544 DAG.getConstant(Log2_64(MulAmt1), MVT::i8));
24546 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
24547 DAG.getConstant(MulAmt1, VT));
24549 if (isPowerOf2_64(MulAmt2))
24550 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
24551 DAG.getConstant(Log2_64(MulAmt2), MVT::i8));
24553 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
24554 DAG.getConstant(MulAmt2, VT));
24556 // Do not add new nodes to DAG combiner worklist.
24557 DCI.CombineTo(N, NewMul, false);
24562 static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
24563 SDValue N0 = N->getOperand(0);
24564 SDValue N1 = N->getOperand(1);
24565 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
24566 EVT VT = N0.getValueType();
24568 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
24569 // since the result of setcc_c is all zero's or all ones.
24570 if (VT.isInteger() && !VT.isVector() &&
24571 N1C && N0.getOpcode() == ISD::AND &&
24572 N0.getOperand(1).getOpcode() == ISD::Constant) {
24573 SDValue N00 = N0.getOperand(0);
24574 if (N00.getOpcode() == X86ISD::SETCC_CARRY ||
24575 ((N00.getOpcode() == ISD::ANY_EXTEND ||
24576 N00.getOpcode() == ISD::ZERO_EXTEND) &&
24577 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) {
24578 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
24579 APInt ShAmt = N1C->getAPIntValue();
24580 Mask = Mask.shl(ShAmt);
24582 return DAG.getNode(ISD::AND, SDLoc(N), VT,
24583 N00, DAG.getConstant(Mask, VT));
24587 // Hardware support for vector shifts is sparse which makes us scalarize the
24588 // vector operations in many cases. Also, on sandybridge ADD is faster than
24590 // (shl V, 1) -> add V,V
24591 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
24592 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
24593 assert(N0.getValueType().isVector() && "Invalid vector shift type");
24594 // We shift all of the values by one. In many cases we do not have
24595 // hardware support for this operation. This is better expressed as an ADD
24597 if (N1SplatC->getZExtValue() == 1)
24598 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
24604 /// \brief Returns a vector of 0s if the node in input is a vector logical
24605 /// shift by a constant amount which is known to be bigger than or equal
24606 /// to the vector element size in bits.
24607 static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
24608 const X86Subtarget *Subtarget) {
24609 EVT VT = N->getValueType(0);
24611 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
24612 (!Subtarget->hasInt256() ||
24613 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
24616 SDValue Amt = N->getOperand(1);
24618 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
24619 if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
24620 APInt ShiftAmt = AmtSplat->getAPIntValue();
24621 unsigned MaxAmount = VT.getVectorElementType().getSizeInBits();
24623 // SSE2/AVX2 logical shifts always return a vector of 0s
24624 // if the shift amount is bigger than or equal to
24625 // the element size. The constant shift amount will be
24626 // encoded as a 8-bit immediate.
24627 if (ShiftAmt.trunc(8).uge(MaxAmount))
24628 return getZeroVector(VT, Subtarget, DAG, DL);
24634 /// PerformShiftCombine - Combine shifts.
24635 static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
24636 TargetLowering::DAGCombinerInfo &DCI,
24637 const X86Subtarget *Subtarget) {
24638 if (N->getOpcode() == ISD::SHL) {
24639 SDValue V = PerformSHLCombine(N, DAG);
24640 if (V.getNode()) return V;
24643 if (N->getOpcode() != ISD::SRA) {
24644 // Try to fold this logical shift into a zero vector.
24645 SDValue V = performShiftToAllZeros(N, DAG, Subtarget);
24646 if (V.getNode()) return V;
24652 // CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..))
24653 // where both setccs reference the same FP CMP, and rewrite for CMPEQSS
24654 // and friends. Likewise for OR -> CMPNEQSS.
24655 static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
24656 TargetLowering::DAGCombinerInfo &DCI,
24657 const X86Subtarget *Subtarget) {
24660 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
24661 // we're requiring SSE2 for both.
24662 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
24663 SDValue N0 = N->getOperand(0);
24664 SDValue N1 = N->getOperand(1);
24665 SDValue CMP0 = N0->getOperand(1);
24666 SDValue CMP1 = N1->getOperand(1);
24669 // The SETCCs should both refer to the same CMP.
24670 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
24673 SDValue CMP00 = CMP0->getOperand(0);
24674 SDValue CMP01 = CMP0->getOperand(1);
24675 EVT VT = CMP00.getValueType();
24677 if (VT == MVT::f32 || VT == MVT::f64) {
24678 bool ExpectingFlags = false;
24679 // Check for any users that want flags:
24680 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
24681 !ExpectingFlags && UI != UE; ++UI)
24682 switch (UI->getOpcode()) {
24687 ExpectingFlags = true;
24689 case ISD::CopyToReg:
24690 case ISD::SIGN_EXTEND:
24691 case ISD::ZERO_EXTEND:
24692 case ISD::ANY_EXTEND:
24696 if (!ExpectingFlags) {
24697 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
24698 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
24700 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
24701 X86::CondCode tmp = cc0;
24706 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
24707 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
24708 // FIXME: need symbolic constants for these magic numbers.
24709 // See X86ATTInstPrinter.cpp:printSSECC().
24710 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
24711 if (Subtarget->hasAVX512()) {
24712 SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
24713 CMP01, DAG.getConstant(x86cc, MVT::i8));
24714 if (N->getValueType(0) != MVT::i1)
24715 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0),
24719 SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
24720 CMP00.getValueType(), CMP00, CMP01,
24721 DAG.getConstant(x86cc, MVT::i8));
24723 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
24724 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
24726 if (is64BitFP && !Subtarget->is64Bit()) {
24727 // On a 32-bit target, we cannot bitcast the 64-bit float to a
24728 // 64-bit integer, since that's not a legal type. Since
24729 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
24730 // bits, but can do this little dance to extract the lowest 32 bits
24731 // and work with those going forward.
24732 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
24734 SDValue Vector32 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32,
24736 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
24737 Vector32, DAG.getIntPtrConstant(0));
24741 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, IntVT, OnesOrZeroesF);
24742 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
24743 DAG.getConstant(1, IntVT));
24744 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed);
24745 return OneBitOfTruth;
24753 /// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector
24754 /// so it can be folded inside ANDNP.
24755 static bool CanFoldXORWithAllOnes(const SDNode *N) {
24756 EVT VT = N->getValueType(0);
24758 // Match direct AllOnes for 128 and 256-bit vectors
24759 if (ISD::isBuildVectorAllOnes(N))
24762 // Look through a bit convert.
24763 if (N->getOpcode() == ISD::BITCAST)
24764 N = N->getOperand(0).getNode();
24766 // Sometimes the operand may come from a insert_subvector building a 256-bit
24768 if (VT.is256BitVector() &&
24769 N->getOpcode() == ISD::INSERT_SUBVECTOR) {
24770 SDValue V1 = N->getOperand(0);
24771 SDValue V2 = N->getOperand(1);
24773 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR &&
24774 V1.getOperand(0).getOpcode() == ISD::UNDEF &&
24775 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) &&
24776 ISD::isBuildVectorAllOnes(V2.getNode()))
24783 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
24784 // register. In most cases we actually compare or select YMM-sized registers
24785 // and mixing the two types creates horrible code. This method optimizes
24786 // some of the transition sequences.
24787 static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
24788 TargetLowering::DAGCombinerInfo &DCI,
24789 const X86Subtarget *Subtarget) {
24790 EVT VT = N->getValueType(0);
24791 if (!VT.is256BitVector())
24794 assert((N->getOpcode() == ISD::ANY_EXTEND ||
24795 N->getOpcode() == ISD::ZERO_EXTEND ||
24796 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
24798 SDValue Narrow = N->getOperand(0);
24799 EVT NarrowVT = Narrow->getValueType(0);
24800 if (!NarrowVT.is128BitVector())
24803 if (Narrow->getOpcode() != ISD::XOR &&
24804 Narrow->getOpcode() != ISD::AND &&
24805 Narrow->getOpcode() != ISD::OR)
24808 SDValue N0 = Narrow->getOperand(0);
24809 SDValue N1 = Narrow->getOperand(1);
24812 // The Left side has to be a trunc.
24813 if (N0.getOpcode() != ISD::TRUNCATE)
24816 // The type of the truncated inputs.
24817 EVT WideVT = N0->getOperand(0)->getValueType(0);
24821 // The right side has to be a 'trunc' or a constant vector.
24822 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE;
24823 ConstantSDNode *RHSConstSplat = nullptr;
24824 if (auto *RHSBV = dyn_cast<BuildVectorSDNode>(N1))
24825 RHSConstSplat = RHSBV->getConstantSplatNode();
24826 if (!RHSTrunc && !RHSConstSplat)
24829 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24831 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT))
24834 // Set N0 and N1 to hold the inputs to the new wide operation.
24835 N0 = N0->getOperand(0);
24836 if (RHSConstSplat) {
24837 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getScalarType(),
24838 SDValue(RHSConstSplat, 0));
24839 SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1);
24840 N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C);
24841 } else if (RHSTrunc) {
24842 N1 = N1->getOperand(0);
24845 // Generate the wide operation.
24846 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1);
24847 unsigned Opcode = N->getOpcode();
24849 case ISD::ANY_EXTEND:
24851 case ISD::ZERO_EXTEND: {
24852 unsigned InBits = NarrowVT.getScalarType().getSizeInBits();
24853 APInt Mask = APInt::getAllOnesValue(InBits);
24854 Mask = Mask.zext(VT.getScalarType().getSizeInBits());
24855 return DAG.getNode(ISD::AND, DL, VT,
24856 Op, DAG.getConstant(Mask, VT));
24858 case ISD::SIGN_EXTEND:
24859 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
24860 Op, DAG.getValueType(NarrowVT));
24862 llvm_unreachable("Unexpected opcode");
24866 static SDValue VectorZextCombine(SDNode *N, SelectionDAG &DAG,
24867 TargetLowering::DAGCombinerInfo &DCI,
24868 const X86Subtarget *Subtarget) {
24869 SDValue N0 = N->getOperand(0);
24870 SDValue N1 = N->getOperand(1);
24873 // A vector zext_in_reg may be represented as a shuffle,
24874 // feeding into a bitcast (this represents anyext) feeding into
24875 // an and with a mask.
24876 // We'd like to try to combine that into a shuffle with zero
24877 // plus a bitcast, removing the and.
24878 if (N0.getOpcode() != ISD::BITCAST ||
24879 N0.getOperand(0).getOpcode() != ISD::VECTOR_SHUFFLE)
24882 // The other side of the AND should be a splat of 2^C, where C
24883 // is the number of bits in the source type.
24884 if (N1.getOpcode() == ISD::BITCAST)
24885 N1 = N1.getOperand(0);
24886 if (N1.getOpcode() != ISD::BUILD_VECTOR)
24888 BuildVectorSDNode *Vector = cast<BuildVectorSDNode>(N1);
24890 ShuffleVectorSDNode *Shuffle = cast<ShuffleVectorSDNode>(N0.getOperand(0));
24891 EVT SrcType = Shuffle->getValueType(0);
24893 // We expect a single-source shuffle
24894 if (Shuffle->getOperand(1)->getOpcode() != ISD::UNDEF)
24897 unsigned SrcSize = SrcType.getScalarSizeInBits();
24899 APInt SplatValue, SplatUndef;
24900 unsigned SplatBitSize;
24902 if (!Vector->isConstantSplat(SplatValue, SplatUndef,
24903 SplatBitSize, HasAnyUndefs))
24906 unsigned ResSize = N1.getValueType().getScalarSizeInBits();
24907 // Make sure the splat matches the mask we expect
24908 if (SplatBitSize > ResSize ||
24909 (SplatValue + 1).exactLogBase2() != (int)SrcSize)
24912 // Make sure the input and output size make sense
24913 if (SrcSize >= ResSize || ResSize % SrcSize)
24916 // We expect a shuffle of the form <0, u, u, u, 1, u, u, u...>
24917 // The number of u's between each two values depends on the ratio between
24918 // the source and dest type.
24919 unsigned ZextRatio = ResSize / SrcSize;
24920 bool IsZext = true;
24921 for (unsigned i = 0; i < SrcType.getVectorNumElements(); ++i) {
24922 if (i % ZextRatio) {
24923 if (Shuffle->getMaskElt(i) > 0) {
24929 if (Shuffle->getMaskElt(i) != (int)(i / ZextRatio)) {
24930 // Expected element number
24940 // Ok, perform the transformation - replace the shuffle with
24941 // a shuffle of the form <0, k, k, k, 1, k, k, k> with zero
24942 // (instead of undef) where the k elements come from the zero vector.
24943 SmallVector<int, 8> Mask;
24944 unsigned NumElems = SrcType.getVectorNumElements();
24945 for (unsigned i = 0; i < NumElems; ++i)
24947 Mask.push_back(NumElems);
24949 Mask.push_back(i / ZextRatio);
24951 SDValue NewShuffle = DAG.getVectorShuffle(Shuffle->getValueType(0), DL,
24952 Shuffle->getOperand(0), DAG.getConstant(0, SrcType), Mask);
24953 return DAG.getNode(ISD::BITCAST, DL, N0.getValueType(), NewShuffle);
24956 static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
24957 TargetLowering::DAGCombinerInfo &DCI,
24958 const X86Subtarget *Subtarget) {
24959 if (DCI.isBeforeLegalizeOps())
24962 SDValue Zext = VectorZextCombine(N, DAG, DCI, Subtarget);
24963 if (Zext.getNode())
24966 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24970 EVT VT = N->getValueType(0);
24971 SDValue N0 = N->getOperand(0);
24972 SDValue N1 = N->getOperand(1);
24975 // Create BEXTR instructions
24976 // BEXTR is ((X >> imm) & (2**size-1))
24977 if (VT == MVT::i32 || VT == MVT::i64) {
24978 // Check for BEXTR.
24979 if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
24980 (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
24981 ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
24982 ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
24983 if (MaskNode && ShiftNode) {
24984 uint64_t Mask = MaskNode->getZExtValue();
24985 uint64_t Shift = ShiftNode->getZExtValue();
24986 if (isMask_64(Mask)) {
24987 uint64_t MaskSize = countPopulation(Mask);
24988 if (Shift + MaskSize <= VT.getSizeInBits())
24989 return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
24990 DAG.getConstant(Shift | (MaskSize << 8), VT));
24998 // Want to form ANDNP nodes:
24999 // 1) In the hopes of then easily combining them with OR and AND nodes
25000 // to form PBLEND/PSIGN.
25001 // 2) To match ANDN packed intrinsics
25002 if (VT != MVT::v2i64 && VT != MVT::v4i64)
25005 // Check LHS for vnot
25006 if (N0.getOpcode() == ISD::XOR &&
25007 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
25008 CanFoldXORWithAllOnes(N0.getOperand(1).getNode()))
25009 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1);
25011 // Check RHS for vnot
25012 if (N1.getOpcode() == ISD::XOR &&
25013 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
25014 CanFoldXORWithAllOnes(N1.getOperand(1).getNode()))
25015 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0);
25020 static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
25021 TargetLowering::DAGCombinerInfo &DCI,
25022 const X86Subtarget *Subtarget) {
25023 if (DCI.isBeforeLegalizeOps())
25026 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
25030 SDValue N0 = N->getOperand(0);
25031 SDValue N1 = N->getOperand(1);
25032 EVT VT = N->getValueType(0);
25034 // look for psign/blend
25035 if (VT == MVT::v2i64 || VT == MVT::v4i64) {
25036 if (!Subtarget->hasSSSE3() ||
25037 (VT == MVT::v4i64 && !Subtarget->hasInt256()))
25040 // Canonicalize pandn to RHS
25041 if (N0.getOpcode() == X86ISD::ANDNP)
25043 // or (and (m, y), (pandn m, x))
25044 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
25045 SDValue Mask = N1.getOperand(0);
25046 SDValue X = N1.getOperand(1);
25048 if (N0.getOperand(0) == Mask)
25049 Y = N0.getOperand(1);
25050 if (N0.getOperand(1) == Mask)
25051 Y = N0.getOperand(0);
25053 // Check to see if the mask appeared in both the AND and ANDNP and
25057 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
25058 // Look through mask bitcast.
25059 if (Mask.getOpcode() == ISD::BITCAST)
25060 Mask = Mask.getOperand(0);
25061 if (X.getOpcode() == ISD::BITCAST)
25062 X = X.getOperand(0);
25063 if (Y.getOpcode() == ISD::BITCAST)
25064 Y = Y.getOperand(0);
25066 EVT MaskVT = Mask.getValueType();
25068 // Validate that the Mask operand is a vector sra node.
25069 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
25070 // there is no psrai.b
25071 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
25072 unsigned SraAmt = ~0;
25073 if (Mask.getOpcode() == ISD::SRA) {
25074 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
25075 if (auto *AmtConst = AmtBV->getConstantSplatNode())
25076 SraAmt = AmtConst->getZExtValue();
25077 } else if (Mask.getOpcode() == X86ISD::VSRAI) {
25078 SDValue SraC = Mask.getOperand(1);
25079 SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
25081 if ((SraAmt + 1) != EltBits)
25086 // Now we know we at least have a plendvb with the mask val. See if
25087 // we can form a psignb/w/d.
25088 // psign = x.type == y.type == mask.type && y = sub(0, x);
25089 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
25090 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
25091 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
25092 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
25093 "Unsupported VT for PSIGN");
25094 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0));
25095 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
25097 // PBLENDVB only available on SSE 4.1
25098 if (!Subtarget->hasSSE41())
25101 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
25103 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X);
25104 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y);
25105 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask);
25106 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
25107 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
25111 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
25114 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
25115 MachineFunction &MF = DAG.getMachineFunction();
25117 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
25119 // SHLD/SHRD instructions have lower register pressure, but on some
25120 // platforms they have higher latency than the equivalent
25121 // series of shifts/or that would otherwise be generated.
25122 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
25123 // have higher latencies and we are not optimizing for size.
25124 if (!OptForSize && Subtarget->isSHLDSlow())
25127 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
25129 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
25131 if (!N0.hasOneUse() || !N1.hasOneUse())
25134 SDValue ShAmt0 = N0.getOperand(1);
25135 if (ShAmt0.getValueType() != MVT::i8)
25137 SDValue ShAmt1 = N1.getOperand(1);
25138 if (ShAmt1.getValueType() != MVT::i8)
25140 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
25141 ShAmt0 = ShAmt0.getOperand(0);
25142 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
25143 ShAmt1 = ShAmt1.getOperand(0);
25146 unsigned Opc = X86ISD::SHLD;
25147 SDValue Op0 = N0.getOperand(0);
25148 SDValue Op1 = N1.getOperand(0);
25149 if (ShAmt0.getOpcode() == ISD::SUB) {
25150 Opc = X86ISD::SHRD;
25151 std::swap(Op0, Op1);
25152 std::swap(ShAmt0, ShAmt1);
25155 unsigned Bits = VT.getSizeInBits();
25156 if (ShAmt1.getOpcode() == ISD::SUB) {
25157 SDValue Sum = ShAmt1.getOperand(0);
25158 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
25159 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
25160 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
25161 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
25162 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
25163 return DAG.getNode(Opc, DL, VT,
25165 DAG.getNode(ISD::TRUNCATE, DL,
25168 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
25169 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
25171 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits)
25172 return DAG.getNode(Opc, DL, VT,
25173 N0.getOperand(0), N1.getOperand(0),
25174 DAG.getNode(ISD::TRUNCATE, DL,
25181 // Generate NEG and CMOV for integer abs.
25182 static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
25183 EVT VT = N->getValueType(0);
25185 // Since X86 does not have CMOV for 8-bit integer, we don't convert
25186 // 8-bit integer abs to NEG and CMOV.
25187 if (VT.isInteger() && VT.getSizeInBits() == 8)
25190 SDValue N0 = N->getOperand(0);
25191 SDValue N1 = N->getOperand(1);
25194 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
25195 // and change it to SUB and CMOV.
25196 if (VT.isInteger() && N->getOpcode() == ISD::XOR &&
25197 N0.getOpcode() == ISD::ADD &&
25198 N0.getOperand(1) == N1 &&
25199 N1.getOpcode() == ISD::SRA &&
25200 N1.getOperand(0) == N0.getOperand(0))
25201 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1)))
25202 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) {
25203 // Generate SUB & CMOV.
25204 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
25205 DAG.getConstant(0, VT), N0.getOperand(0));
25207 SDValue Ops[] = { N0.getOperand(0), Neg,
25208 DAG.getConstant(X86::COND_GE, MVT::i8),
25209 SDValue(Neg.getNode(), 1) };
25210 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops);
25215 // PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes
25216 static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
25217 TargetLowering::DAGCombinerInfo &DCI,
25218 const X86Subtarget *Subtarget) {
25219 if (DCI.isBeforeLegalizeOps())
25222 if (Subtarget->hasCMov()) {
25223 SDValue RV = performIntegerAbsCombine(N, DAG);
25231 /// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
25232 static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
25233 TargetLowering::DAGCombinerInfo &DCI,
25234 const X86Subtarget *Subtarget) {
25235 LoadSDNode *Ld = cast<LoadSDNode>(N);
25236 EVT RegVT = Ld->getValueType(0);
25237 EVT MemVT = Ld->getMemoryVT();
25239 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25241 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
25242 // into two 16-byte operations.
25243 ISD::LoadExtType Ext = Ld->getExtensionType();
25244 unsigned Alignment = Ld->getAlignment();
25245 bool IsAligned = Alignment == 0 || Alignment >= MemVT.getSizeInBits()/8;
25246 if (RegVT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25247 !DCI.isBeforeLegalizeOps() && !IsAligned && Ext == ISD::NON_EXTLOAD) {
25248 unsigned NumElems = RegVT.getVectorNumElements();
25252 SDValue Ptr = Ld->getBasePtr();
25253 SDValue Increment = DAG.getConstant(16, TLI.getPointerTy());
25255 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
25257 SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
25258 Ld->getPointerInfo(), Ld->isVolatile(),
25259 Ld->isNonTemporal(), Ld->isInvariant(),
25261 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25262 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
25263 Ld->getPointerInfo(), Ld->isVolatile(),
25264 Ld->isNonTemporal(), Ld->isInvariant(),
25265 std::min(16U, Alignment));
25266 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
25268 Load2.getValue(1));
25270 SDValue NewVec = DAG.getUNDEF(RegVT);
25271 NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl);
25272 NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl);
25273 return DCI.CombineTo(N, NewVec, TF, true);
25279 /// PerformMLOADCombine - Resolve extending loads
25280 static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
25281 TargetLowering::DAGCombinerInfo &DCI,
25282 const X86Subtarget *Subtarget) {
25283 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
25284 if (Mld->getExtensionType() != ISD::SEXTLOAD)
25287 EVT VT = Mld->getValueType(0);
25288 unsigned NumElems = VT.getVectorNumElements();
25289 EVT LdVT = Mld->getMemoryVT();
25292 assert(LdVT != VT && "Cannot extend to the same type");
25293 unsigned ToSz = VT.getVectorElementType().getSizeInBits();
25294 unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
25295 // From, To sizes and ElemCount must be pow of two
25296 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
25297 "Unexpected size for extending masked load");
25299 unsigned SizeRatio = ToSz / FromSz;
25300 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
25302 // Create a type on which we perform the shuffle
25303 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25304 LdVT.getScalarType(), NumElems*SizeRatio);
25305 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25307 // Convert Src0 value
25308 SDValue WideSrc0 = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mld->getSrc0());
25309 if (Mld->getSrc0().getOpcode() != ISD::UNDEF) {
25310 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25311 for (unsigned i = 0; i != NumElems; ++i)
25312 ShuffleVec[i] = i * SizeRatio;
25314 // Can't shuffle using an illegal type.
25315 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
25316 && "WideVecVT should be legal");
25317 WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
25318 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
25320 // Prepare the new mask
25322 SDValue Mask = Mld->getMask();
25323 if (Mask.getValueType() == VT) {
25324 // Mask and original value have the same type
25325 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
25326 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25327 for (unsigned i = 0; i != NumElems; ++i)
25328 ShuffleVec[i] = i * SizeRatio;
25329 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
25330 ShuffleVec[i] = NumElems*SizeRatio;
25331 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
25332 DAG.getConstant(0, WideVecVT),
25336 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
25337 unsigned WidenNumElts = NumElems*SizeRatio;
25338 unsigned MaskNumElts = VT.getVectorNumElements();
25339 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
25342 unsigned NumConcat = WidenNumElts / MaskNumElts;
25343 SmallVector<SDValue, 16> Ops(NumConcat);
25344 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
25346 for (unsigned i = 1; i != NumConcat; ++i)
25349 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
25352 SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
25353 Mld->getBasePtr(), NewMask, WideSrc0,
25354 Mld->getMemoryVT(), Mld->getMemOperand(),
25356 SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd);
25357 return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
25360 /// PerformMSTORECombine - Resolve truncating stores
25361 static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
25362 const X86Subtarget *Subtarget) {
25363 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
25364 if (!Mst->isTruncatingStore())
25367 EVT VT = Mst->getValue().getValueType();
25368 unsigned NumElems = VT.getVectorNumElements();
25369 EVT StVT = Mst->getMemoryVT();
25372 assert(StVT != VT && "Cannot truncate to the same type");
25373 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25374 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25376 // From, To sizes and ElemCount must be pow of two
25377 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
25378 "Unexpected size for truncating masked store");
25379 // We are going to use the original vector elt for storing.
25380 // Accumulated smaller vector elements must be a multiple of the store size.
25381 assert (((NumElems * FromSz) % ToSz) == 0 &&
25382 "Unexpected ratio for truncating masked store");
25384 unsigned SizeRatio = FromSz / ToSz;
25385 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25387 // Create a type on which we perform the shuffle
25388 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25389 StVT.getScalarType(), NumElems*SizeRatio);
25391 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25393 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mst->getValue());
25394 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25395 for (unsigned i = 0; i != NumElems; ++i)
25396 ShuffleVec[i] = i * SizeRatio;
25398 // Can't shuffle using an illegal type.
25399 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
25400 && "WideVecVT should be legal");
25402 SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25403 DAG.getUNDEF(WideVecVT),
25407 SDValue Mask = Mst->getMask();
25408 if (Mask.getValueType() == VT) {
25409 // Mask and original value have the same type
25410 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
25411 for (unsigned i = 0; i != NumElems; ++i)
25412 ShuffleVec[i] = i * SizeRatio;
25413 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
25414 ShuffleVec[i] = NumElems*SizeRatio;
25415 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
25416 DAG.getConstant(0, WideVecVT),
25420 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
25421 unsigned WidenNumElts = NumElems*SizeRatio;
25422 unsigned MaskNumElts = VT.getVectorNumElements();
25423 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
25426 unsigned NumConcat = WidenNumElts / MaskNumElts;
25427 SmallVector<SDValue, 16> Ops(NumConcat);
25428 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
25430 for (unsigned i = 1; i != NumConcat; ++i)
25433 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
25436 return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal, Mst->getBasePtr(),
25437 NewMask, StVT, Mst->getMemOperand(), false);
25439 /// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
25440 static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
25441 const X86Subtarget *Subtarget) {
25442 StoreSDNode *St = cast<StoreSDNode>(N);
25443 EVT VT = St->getValue().getValueType();
25444 EVT StVT = St->getMemoryVT();
25446 SDValue StoredVal = St->getOperand(1);
25447 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25449 // If we are saving a concatenation of two XMM registers and 32-byte stores
25450 // are slow, such as on Sandy Bridge, perform two 16-byte stores.
25451 unsigned Alignment = St->getAlignment();
25452 bool IsAligned = Alignment == 0 || Alignment >= VT.getSizeInBits()/8;
25453 if (VT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25454 StVT == VT && !IsAligned) {
25455 unsigned NumElems = VT.getVectorNumElements();
25459 SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl);
25460 SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl);
25462 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy());
25463 SDValue Ptr0 = St->getBasePtr();
25464 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride);
25466 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0,
25467 St->getPointerInfo(), St->isVolatile(),
25468 St->isNonTemporal(), Alignment);
25469 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1,
25470 St->getPointerInfo(), St->isVolatile(),
25471 St->isNonTemporal(),
25472 std::min(16U, Alignment));
25473 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
25476 // Optimize trunc store (of multiple scalars) to shuffle and store.
25477 // First, pack all of the elements in one place. Next, store to memory
25478 // in fewer chunks.
25479 if (St->isTruncatingStore() && VT.isVector()) {
25480 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25481 unsigned NumElems = VT.getVectorNumElements();
25482 assert(StVT != VT && "Cannot truncate to the same type");
25483 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25484 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25486 // From, To sizes and ElemCount must be pow of two
25487 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
25488 // We are going to use the original vector elt for storing.
25489 // Accumulated smaller vector elements must be a multiple of the store size.
25490 if (0 != (NumElems * FromSz) % ToSz) return SDValue();
25492 unsigned SizeRatio = FromSz / ToSz;
25494 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25496 // Create a type on which we perform the shuffle
25497 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25498 StVT.getScalarType(), NumElems*SizeRatio);
25500 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25502 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue());
25503 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
25504 for (unsigned i = 0; i != NumElems; ++i)
25505 ShuffleVec[i] = i * SizeRatio;
25507 // Can't shuffle using an illegal type.
25508 if (!TLI.isTypeLegal(WideVecVT))
25511 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25512 DAG.getUNDEF(WideVecVT),
25514 // At this point all of the data is stored at the bottom of the
25515 // register. We now need to save it to mem.
25517 // Find the largest store unit
25518 MVT StoreType = MVT::i8;
25519 for (MVT Tp : MVT::integer_valuetypes()) {
25520 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
25524 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
25525 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
25526 (64 <= NumElems * ToSz))
25527 StoreType = MVT::f64;
25529 // Bitcast the original vector into a vector of store-size units
25530 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
25531 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
25532 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
25533 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff);
25534 SmallVector<SDValue, 8> Chains;
25535 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8,
25536 TLI.getPointerTy());
25537 SDValue Ptr = St->getBasePtr();
25539 // Perform one or more big stores into memory.
25540 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
25541 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
25542 StoreType, ShuffWide,
25543 DAG.getIntPtrConstant(i));
25544 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr,
25545 St->getPointerInfo(), St->isVolatile(),
25546 St->isNonTemporal(), St->getAlignment());
25547 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25548 Chains.push_back(Ch);
25551 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
25554 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
25555 // the FP state in cases where an emms may be missing.
25556 // A preferable solution to the general problem is to figure out the right
25557 // places to insert EMMS. This qualifies as a quick hack.
25559 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
25560 if (VT.getSizeInBits() != 64)
25563 const Function *F = DAG.getMachineFunction().getFunction();
25564 bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat);
25565 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
25566 && Subtarget->hasSSE2();
25567 if ((VT.isVector() ||
25568 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
25569 isa<LoadSDNode>(St->getValue()) &&
25570 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
25571 St->getChain().hasOneUse() && !St->isVolatile()) {
25572 SDNode* LdVal = St->getValue().getNode();
25573 LoadSDNode *Ld = nullptr;
25574 int TokenFactorIndex = -1;
25575 SmallVector<SDValue, 8> Ops;
25576 SDNode* ChainVal = St->getChain().getNode();
25577 // Must be a store of a load. We currently handle two cases: the load
25578 // is a direct child, and it's under an intervening TokenFactor. It is
25579 // possible to dig deeper under nested TokenFactors.
25580 if (ChainVal == LdVal)
25581 Ld = cast<LoadSDNode>(St->getChain());
25582 else if (St->getValue().hasOneUse() &&
25583 ChainVal->getOpcode() == ISD::TokenFactor) {
25584 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) {
25585 if (ChainVal->getOperand(i).getNode() == LdVal) {
25586 TokenFactorIndex = i;
25587 Ld = cast<LoadSDNode>(St->getValue());
25589 Ops.push_back(ChainVal->getOperand(i));
25593 if (!Ld || !ISD::isNormalLoad(Ld))
25596 // If this is not the MMX case, i.e. we are just turning i64 load/store
25597 // into f64 load/store, avoid the transformation if there are multiple
25598 // uses of the loaded value.
25599 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
25604 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
25605 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
25607 if (Subtarget->is64Bit() || F64IsLegal) {
25608 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
25609 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
25610 Ld->getPointerInfo(), Ld->isVolatile(),
25611 Ld->isNonTemporal(), Ld->isInvariant(),
25612 Ld->getAlignment());
25613 SDValue NewChain = NewLd.getValue(1);
25614 if (TokenFactorIndex != -1) {
25615 Ops.push_back(NewChain);
25616 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25618 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
25619 St->getPointerInfo(),
25620 St->isVolatile(), St->isNonTemporal(),
25621 St->getAlignment());
25624 // Otherwise, lower to two pairs of 32-bit loads / stores.
25625 SDValue LoAddr = Ld->getBasePtr();
25626 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
25627 DAG.getConstant(4, MVT::i32));
25629 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
25630 Ld->getPointerInfo(),
25631 Ld->isVolatile(), Ld->isNonTemporal(),
25632 Ld->isInvariant(), Ld->getAlignment());
25633 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
25634 Ld->getPointerInfo().getWithOffset(4),
25635 Ld->isVolatile(), Ld->isNonTemporal(),
25637 MinAlign(Ld->getAlignment(), 4));
25639 SDValue NewChain = LoLd.getValue(1);
25640 if (TokenFactorIndex != -1) {
25641 Ops.push_back(LoLd);
25642 Ops.push_back(HiLd);
25643 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25646 LoAddr = St->getBasePtr();
25647 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
25648 DAG.getConstant(4, MVT::i32));
25650 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
25651 St->getPointerInfo(),
25652 St->isVolatile(), St->isNonTemporal(),
25653 St->getAlignment());
25654 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
25655 St->getPointerInfo().getWithOffset(4),
25657 St->isNonTemporal(),
25658 MinAlign(St->getAlignment(), 4));
25659 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
25664 /// Return 'true' if this vector operation is "horizontal"
25665 /// and return the operands for the horizontal operation in LHS and RHS. A
25666 /// horizontal operation performs the binary operation on successive elements
25667 /// of its first operand, then on successive elements of its second operand,
25668 /// returning the resulting values in a vector. For example, if
25669 /// A = < float a0, float a1, float a2, float a3 >
25671 /// B = < float b0, float b1, float b2, float b3 >
25672 /// then the result of doing a horizontal operation on A and B is
25673 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
25674 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
25675 /// A horizontal-op B, for some already available A and B, and if so then LHS is
25676 /// set to A, RHS to B, and the routine returns 'true'.
25677 /// Note that the binary operation should have the property that if one of the
25678 /// operands is UNDEF then the result is UNDEF.
25679 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
25680 // Look for the following pattern: if
25681 // A = < float a0, float a1, float a2, float a3 >
25682 // B = < float b0, float b1, float b2, float b3 >
25684 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
25685 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
25686 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
25687 // which is A horizontal-op B.
25689 // At least one of the operands should be a vector shuffle.
25690 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
25691 RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
25694 MVT VT = LHS.getSimpleValueType();
25696 assert((VT.is128BitVector() || VT.is256BitVector()) &&
25697 "Unsupported vector type for horizontal add/sub");
25699 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to
25700 // operate independently on 128-bit lanes.
25701 unsigned NumElts = VT.getVectorNumElements();
25702 unsigned NumLanes = VT.getSizeInBits()/128;
25703 unsigned NumLaneElts = NumElts / NumLanes;
25704 assert((NumLaneElts % 2 == 0) &&
25705 "Vector type should have an even number of elements in each lane");
25706 unsigned HalfLaneElts = NumLaneElts/2;
25708 // View LHS in the form
25709 // LHS = VECTOR_SHUFFLE A, B, LMask
25710 // If LHS is not a shuffle then pretend it is the shuffle
25711 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
25712 // NOTE: in what follows a default initialized SDValue represents an UNDEF of
25715 SmallVector<int, 16> LMask(NumElts);
25716 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25717 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF)
25718 A = LHS.getOperand(0);
25719 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF)
25720 B = LHS.getOperand(1);
25721 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
25722 std::copy(Mask.begin(), Mask.end(), LMask.begin());
25724 if (LHS.getOpcode() != ISD::UNDEF)
25726 for (unsigned i = 0; i != NumElts; ++i)
25730 // Likewise, view RHS in the form
25731 // RHS = VECTOR_SHUFFLE C, D, RMask
25733 SmallVector<int, 16> RMask(NumElts);
25734 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25735 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF)
25736 C = RHS.getOperand(0);
25737 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF)
25738 D = RHS.getOperand(1);
25739 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
25740 std::copy(Mask.begin(), Mask.end(), RMask.begin());
25742 if (RHS.getOpcode() != ISD::UNDEF)
25744 for (unsigned i = 0; i != NumElts; ++i)
25748 // Check that the shuffles are both shuffling the same vectors.
25749 if (!(A == C && B == D) && !(A == D && B == C))
25752 // If everything is UNDEF then bail out: it would be better to fold to UNDEF.
25753 if (!A.getNode() && !B.getNode())
25756 // If A and B occur in reverse order in RHS, then "swap" them (which means
25757 // rewriting the mask).
25759 CommuteVectorShuffleMask(RMask, NumElts);
25761 // At this point LHS and RHS are equivalent to
25762 // LHS = VECTOR_SHUFFLE A, B, LMask
25763 // RHS = VECTOR_SHUFFLE A, B, RMask
25764 // Check that the masks correspond to performing a horizontal operation.
25765 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
25766 for (unsigned i = 0; i != NumLaneElts; ++i) {
25767 int LIdx = LMask[i+l], RIdx = RMask[i+l];
25769 // Ignore any UNDEF components.
25770 if (LIdx < 0 || RIdx < 0 ||
25771 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
25772 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
25775 // Check that successive elements are being operated on. If not, this is
25776 // not a horizontal operation.
25777 unsigned Src = (i/HalfLaneElts); // each lane is split between srcs
25778 int Index = 2*(i%HalfLaneElts) + NumElts*Src + l;
25779 if (!(LIdx == Index && RIdx == Index + 1) &&
25780 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
25785 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
25786 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
25790 /// Do target-specific dag combines on floating point adds.
25791 static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
25792 const X86Subtarget *Subtarget) {
25793 EVT VT = N->getValueType(0);
25794 SDValue LHS = N->getOperand(0);
25795 SDValue RHS = N->getOperand(1);
25797 // Try to synthesize horizontal adds from adds of shuffles.
25798 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25799 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25800 isHorizontalBinOp(LHS, RHS, true))
25801 return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS);
25805 /// Do target-specific dag combines on floating point subs.
25806 static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
25807 const X86Subtarget *Subtarget) {
25808 EVT VT = N->getValueType(0);
25809 SDValue LHS = N->getOperand(0);
25810 SDValue RHS = N->getOperand(1);
25812 // Try to synthesize horizontal subs from subs of shuffles.
25813 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25814 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25815 isHorizontalBinOp(LHS, RHS, false))
25816 return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS);
25820 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
25821 static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
25822 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
25824 // F[X]OR(0.0, x) -> x
25825 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25826 if (C->getValueAPF().isPosZero())
25827 return N->getOperand(1);
25829 // F[X]OR(x, 0.0) -> x
25830 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25831 if (C->getValueAPF().isPosZero())
25832 return N->getOperand(0);
25836 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
25837 static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
25838 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
25840 // Only perform optimizations if UnsafeMath is used.
25841 if (!DAG.getTarget().Options.UnsafeFPMath)
25844 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
25845 // into FMINC and FMAXC, which are Commutative operations.
25846 unsigned NewOp = 0;
25847 switch (N->getOpcode()) {
25848 default: llvm_unreachable("unknown opcode");
25849 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
25850 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
25853 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
25854 N->getOperand(0), N->getOperand(1));
25857 /// Do target-specific dag combines on X86ISD::FAND nodes.
25858 static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
25859 // FAND(0.0, x) -> 0.0
25860 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25861 if (C->getValueAPF().isPosZero())
25862 return N->getOperand(0);
25864 // FAND(x, 0.0) -> 0.0
25865 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25866 if (C->getValueAPF().isPosZero())
25867 return N->getOperand(1);
25872 /// Do target-specific dag combines on X86ISD::FANDN nodes
25873 static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG) {
25874 // FANDN(0.0, x) -> x
25875 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25876 if (C->getValueAPF().isPosZero())
25877 return N->getOperand(1);
25879 // FANDN(x, 0.0) -> 0.0
25880 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25881 if (C->getValueAPF().isPosZero())
25882 return N->getOperand(1);
25887 static SDValue PerformBTCombine(SDNode *N,
25889 TargetLowering::DAGCombinerInfo &DCI) {
25890 // BT ignores high bits in the bit index operand.
25891 SDValue Op1 = N->getOperand(1);
25892 if (Op1.hasOneUse()) {
25893 unsigned BitWidth = Op1.getValueSizeInBits();
25894 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
25895 APInt KnownZero, KnownOne;
25896 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
25897 !DCI.isBeforeLegalizeOps());
25898 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25899 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) ||
25900 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
25901 DCI.CommitTargetLoweringOpt(TLO);
25906 static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
25907 SDValue Op = N->getOperand(0);
25908 if (Op.getOpcode() == ISD::BITCAST)
25909 Op = Op.getOperand(0);
25910 EVT VT = N->getValueType(0), OpVT = Op.getValueType();
25911 if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
25912 VT.getVectorElementType().getSizeInBits() ==
25913 OpVT.getVectorElementType().getSizeInBits()) {
25914 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
25919 static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
25920 const X86Subtarget *Subtarget) {
25921 EVT VT = N->getValueType(0);
25922 if (!VT.isVector())
25925 SDValue N0 = N->getOperand(0);
25926 SDValue N1 = N->getOperand(1);
25927 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
25930 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
25931 // both SSE and AVX2 since there is no sign-extended shift right
25932 // operation on a vector with 64-bit elements.
25933 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
25934 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
25935 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
25936 N0.getOpcode() == ISD::SIGN_EXTEND)) {
25937 SDValue N00 = N0.getOperand(0);
25939 // EXTLOAD has a better solution on AVX2,
25940 // it may be replaced with X86ISD::VSEXT node.
25941 if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256())
25942 if (!ISD::isNormalLoad(N00.getNode()))
25945 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
25946 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
25948 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
25954 static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
25955 TargetLowering::DAGCombinerInfo &DCI,
25956 const X86Subtarget *Subtarget) {
25957 SDValue N0 = N->getOperand(0);
25958 EVT VT = N->getValueType(0);
25960 // (i8,i32 sext (sdivrem (i8 x, i8 y)) ->
25961 // (i8,i32 (sdivrem_sext_hreg (i8 x, i8 y)
25962 // This exposes the sext to the sdivrem lowering, so that it directly extends
25963 // from AH (which we otherwise need to do contortions to access).
25964 if (N0.getOpcode() == ISD::SDIVREM && N0.getResNo() == 1 &&
25965 N0.getValueType() == MVT::i8 && VT == MVT::i32) {
25967 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25968 SDValue R = DAG.getNode(X86ISD::SDIVREM8_SEXT_HREG, dl, NodeTys,
25969 N0.getOperand(0), N0.getOperand(1));
25970 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25971 return R.getValue(1);
25974 if (!DCI.isBeforeLegalizeOps())
25977 if (!Subtarget->hasFp256())
25980 if (VT.isVector() && VT.getSizeInBits() == 256) {
25981 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25989 static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
25990 const X86Subtarget* Subtarget) {
25992 EVT VT = N->getValueType(0);
25994 // Let legalize expand this if it isn't a legal type yet.
25995 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
25998 EVT ScalarVT = VT.getScalarType();
25999 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
26000 (!Subtarget->hasFMA() && !Subtarget->hasFMA4()))
26003 SDValue A = N->getOperand(0);
26004 SDValue B = N->getOperand(1);
26005 SDValue C = N->getOperand(2);
26007 bool NegA = (A.getOpcode() == ISD::FNEG);
26008 bool NegB = (B.getOpcode() == ISD::FNEG);
26009 bool NegC = (C.getOpcode() == ISD::FNEG);
26011 // Negative multiplication when NegA xor NegB
26012 bool NegMul = (NegA != NegB);
26014 A = A.getOperand(0);
26016 B = B.getOperand(0);
26018 C = C.getOperand(0);
26022 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB;
26024 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB;
26026 return DAG.getNode(Opcode, dl, VT, A, B, C);
26029 static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
26030 TargetLowering::DAGCombinerInfo &DCI,
26031 const X86Subtarget *Subtarget) {
26032 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
26033 // (and (i32 x86isd::setcc_carry), 1)
26034 // This eliminates the zext. This transformation is necessary because
26035 // ISD::SETCC is always legalized to i8.
26037 SDValue N0 = N->getOperand(0);
26038 EVT VT = N->getValueType(0);
26040 if (N0.getOpcode() == ISD::AND &&
26042 N0.getOperand(0).hasOneUse()) {
26043 SDValue N00 = N0.getOperand(0);
26044 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
26045 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
26046 if (!C || C->getZExtValue() != 1)
26048 return DAG.getNode(ISD::AND, dl, VT,
26049 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
26050 N00.getOperand(0), N00.getOperand(1)),
26051 DAG.getConstant(1, VT));
26055 if (N0.getOpcode() == ISD::TRUNCATE &&
26057 N0.getOperand(0).hasOneUse()) {
26058 SDValue N00 = N0.getOperand(0);
26059 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
26060 return DAG.getNode(ISD::AND, dl, VT,
26061 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
26062 N00.getOperand(0), N00.getOperand(1)),
26063 DAG.getConstant(1, VT));
26066 if (VT.is256BitVector()) {
26067 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
26072 // (i8,i32 zext (udivrem (i8 x, i8 y)) ->
26073 // (i8,i32 (udivrem_zext_hreg (i8 x, i8 y)
26074 // This exposes the zext to the udivrem lowering, so that it directly extends
26075 // from AH (which we otherwise need to do contortions to access).
26076 if (N0.getOpcode() == ISD::UDIVREM &&
26077 N0.getResNo() == 1 && N0.getValueType() == MVT::i8 &&
26078 (VT == MVT::i32 || VT == MVT::i64)) {
26079 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
26080 SDValue R = DAG.getNode(X86ISD::UDIVREM8_ZEXT_HREG, dl, NodeTys,
26081 N0.getOperand(0), N0.getOperand(1));
26082 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
26083 return R.getValue(1);
26089 // Optimize x == -y --> x+y == 0
26090 // x != -y --> x+y != 0
26091 static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
26092 const X86Subtarget* Subtarget) {
26093 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
26094 SDValue LHS = N->getOperand(0);
26095 SDValue RHS = N->getOperand(1);
26096 EVT VT = N->getValueType(0);
26099 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
26100 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0)))
26101 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) {
26102 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
26103 LHS.getValueType(), RHS, LHS.getOperand(1));
26104 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
26105 addV, DAG.getConstant(0, addV.getValueType()), CC);
26107 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB)
26108 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0)))
26109 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) {
26110 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
26111 RHS.getValueType(), LHS, RHS.getOperand(1));
26112 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
26113 addV, DAG.getConstant(0, addV.getValueType()), CC);
26116 if (VT.getScalarType() == MVT::i1) {
26117 bool IsSEXT0 = (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
26118 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
26119 bool IsVZero0 = ISD::isBuildVectorAllZeros(LHS.getNode());
26120 if (!IsSEXT0 && !IsVZero0)
26122 bool IsSEXT1 = (RHS.getOpcode() == ISD::SIGN_EXTEND) &&
26123 (RHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
26124 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
26126 if (!IsSEXT1 && !IsVZero1)
26129 if (IsSEXT0 && IsVZero1) {
26130 assert(VT == LHS.getOperand(0).getValueType() && "Uexpected operand type");
26131 if (CC == ISD::SETEQ)
26132 return DAG.getNOT(DL, LHS.getOperand(0), VT);
26133 return LHS.getOperand(0);
26135 if (IsSEXT1 && IsVZero0) {
26136 assert(VT == RHS.getOperand(0).getValueType() && "Uexpected operand type");
26137 if (CC == ISD::SETEQ)
26138 return DAG.getNOT(DL, RHS.getOperand(0), VT);
26139 return RHS.getOperand(0);
26146 static SDValue PerformINSERTPSCombine(SDNode *N, SelectionDAG &DAG,
26147 const X86Subtarget *Subtarget) {
26149 MVT VT = N->getOperand(1)->getSimpleValueType(0);
26150 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
26151 "X86insertps is only defined for v4x32");
26153 SDValue Ld = N->getOperand(1);
26154 if (MayFoldLoad(Ld)) {
26155 // Extract the countS bits from the immediate so we can get the proper
26156 // address when narrowing the vector load to a specific element.
26157 // When the second source op is a memory address, interps doesn't use
26158 // countS and just gets an f32 from that address.
26159 unsigned DestIndex =
26160 cast<ConstantSDNode>(N->getOperand(2))->getZExtValue() >> 6;
26161 Ld = NarrowVectorLoadToElement(cast<LoadSDNode>(Ld), DestIndex, DAG);
26165 // Create this as a scalar to vector to match the instruction pattern.
26166 SDValue LoadScalarToVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Ld);
26167 // countS bits are ignored when loading from memory on insertps, which
26168 // means we don't need to explicitly set them to 0.
26169 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N->getOperand(0),
26170 LoadScalarToVector, N->getOperand(2));
26173 // Helper function of PerformSETCCCombine. It is to materialize "setb reg"
26174 // as "sbb reg,reg", since it can be extended without zext and produces
26175 // an all-ones bit which is more useful than 0/1 in some cases.
26176 static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
26179 return DAG.getNode(ISD::AND, DL, VT,
26180 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
26181 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS),
26182 DAG.getConstant(1, VT));
26183 assert (VT == MVT::i1 && "Unexpected type for SECCC node");
26184 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1,
26185 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
26186 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS));
26189 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
26190 static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
26191 TargetLowering::DAGCombinerInfo &DCI,
26192 const X86Subtarget *Subtarget) {
26194 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
26195 SDValue EFLAGS = N->getOperand(1);
26197 if (CC == X86::COND_A) {
26198 // Try to convert COND_A into COND_B in an attempt to facilitate
26199 // materializing "setb reg".
26201 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
26202 // cannot take an immediate as its first operand.
26204 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
26205 EFLAGS.getValueType().isInteger() &&
26206 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
26207 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
26208 EFLAGS.getNode()->getVTList(),
26209 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
26210 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
26211 return MaterializeSETB(DL, NewEFLAGS, DAG, N->getSimpleValueType(0));
26215 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without
26216 // a zext and produces an all-ones bit which is more useful than 0/1 in some
26218 if (CC == X86::COND_B)
26219 return MaterializeSETB(DL, EFLAGS, DAG, N->getSimpleValueType(0));
26223 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
26224 if (Flags.getNode()) {
26225 SDValue Cond = DAG.getConstant(CC, MVT::i8);
26226 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
26232 // Optimize branch condition evaluation.
26234 static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
26235 TargetLowering::DAGCombinerInfo &DCI,
26236 const X86Subtarget *Subtarget) {
26238 SDValue Chain = N->getOperand(0);
26239 SDValue Dest = N->getOperand(1);
26240 SDValue EFLAGS = N->getOperand(3);
26241 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
26245 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
26246 if (Flags.getNode()) {
26247 SDValue Cond = DAG.getConstant(CC, MVT::i8);
26248 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
26255 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
26256 SelectionDAG &DAG) {
26257 // Take advantage of vector comparisons producing 0 or -1 in each lane to
26258 // optimize away operation when it's from a constant.
26260 // The general transformation is:
26261 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
26262 // AND(VECTOR_CMP(x,y), constant2)
26263 // constant2 = UNARYOP(constant)
26265 // Early exit if this isn't a vector operation, the operand of the
26266 // unary operation isn't a bitwise AND, or if the sizes of the operations
26267 // aren't the same.
26268 EVT VT = N->getValueType(0);
26269 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
26270 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
26271 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
26274 // Now check that the other operand of the AND is a constant. We could
26275 // make the transformation for non-constant splats as well, but it's unclear
26276 // that would be a benefit as it would not eliminate any operations, just
26277 // perform one more step in scalar code before moving to the vector unit.
26278 if (BuildVectorSDNode *BV =
26279 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
26280 // Bail out if the vector isn't a constant.
26281 if (!BV->isConstant())
26284 // Everything checks out. Build up the new and improved node.
26286 EVT IntVT = BV->getValueType(0);
26287 // Create a new constant of the appropriate type for the transformed
26289 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
26290 // The AND node needs bitcasts to/from an integer vector type around it.
26291 SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
26292 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
26293 N->getOperand(0)->getOperand(0), MaskConst);
26294 SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
26301 static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
26302 const X86Subtarget *Subtarget) {
26303 // First try to optimize away the conversion entirely when it's
26304 // conditionally from a constant. Vectors only.
26305 SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
26306 if (Res != SDValue())
26309 // Now move on to more general possibilities.
26310 SDValue Op0 = N->getOperand(0);
26311 EVT InVT = Op0->getValueType(0);
26313 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32))
26314 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) {
26316 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
26317 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
26318 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P);
26321 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
26322 // a 32-bit target where SSE doesn't support i64->FP operations.
26323 if (Op0.getOpcode() == ISD::LOAD) {
26324 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
26325 EVT VT = Ld->getValueType(0);
26326 if (!Ld->isVolatile() && !N->getValueType(0).isVector() &&
26327 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
26328 !Subtarget->is64Bit() && VT == MVT::i64) {
26329 SDValue FILDChain = Subtarget->getTargetLowering()->BuildFILD(
26330 SDValue(N, 0), Ld->getValueType(0), Ld->getChain(), Op0, DAG);
26331 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
26338 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
26339 static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
26340 X86TargetLowering::DAGCombinerInfo &DCI) {
26341 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
26342 // the result is either zero or one (depending on the input carry bit).
26343 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
26344 if (X86::isZeroNode(N->getOperand(0)) &&
26345 X86::isZeroNode(N->getOperand(1)) &&
26346 // We don't have a good way to replace an EFLAGS use, so only do this when
26348 SDValue(N, 1).use_empty()) {
26350 EVT VT = N->getValueType(0);
26351 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1));
26352 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
26353 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
26354 DAG.getConstant(X86::COND_B,MVT::i8),
26356 DAG.getConstant(1, VT));
26357 return DCI.CombineTo(N, Res1, CarryOut);
26363 // fold (add Y, (sete X, 0)) -> adc 0, Y
26364 // (add Y, (setne X, 0)) -> sbb -1, Y
26365 // (sub (sete X, 0), Y) -> sbb 0, Y
26366 // (sub (setne X, 0), Y) -> adc -1, Y
26367 static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
26370 // Look through ZExts.
26371 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
26372 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse())
26375 SDValue SetCC = Ext.getOperand(0);
26376 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse())
26379 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
26380 if (CC != X86::COND_E && CC != X86::COND_NE)
26383 SDValue Cmp = SetCC.getOperand(1);
26384 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
26385 !X86::isZeroNode(Cmp.getOperand(1)) ||
26386 !Cmp.getOperand(0).getValueType().isInteger())
26389 SDValue CmpOp0 = Cmp.getOperand(0);
26390 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0,
26391 DAG.getConstant(1, CmpOp0.getValueType()));
26393 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1);
26394 if (CC == X86::COND_NE)
26395 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB,
26396 DL, OtherVal.getValueType(), OtherVal,
26397 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp);
26398 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC,
26399 DL, OtherVal.getValueType(), OtherVal,
26400 DAG.getConstant(0, OtherVal.getValueType()), NewCmp);
26403 /// PerformADDCombine - Do target-specific dag combines on integer adds.
26404 static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG,
26405 const X86Subtarget *Subtarget) {
26406 EVT VT = N->getValueType(0);
26407 SDValue Op0 = N->getOperand(0);
26408 SDValue Op1 = N->getOperand(1);
26410 // Try to synthesize horizontal adds from adds of shuffles.
26411 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26412 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26413 isHorizontalBinOp(Op0, Op1, true))
26414 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
26416 return OptimizeConditionalInDecrement(N, DAG);
26419 static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
26420 const X86Subtarget *Subtarget) {
26421 SDValue Op0 = N->getOperand(0);
26422 SDValue Op1 = N->getOperand(1);
26424 // X86 can't encode an immediate LHS of a sub. See if we can push the
26425 // negation into a preceding instruction.
26426 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
26427 // If the RHS of the sub is a XOR with one use and a constant, invert the
26428 // immediate. Then add one to the LHS of the sub so we can turn
26429 // X-Y -> X+~Y+1, saving one register.
26430 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
26431 isa<ConstantSDNode>(Op1.getOperand(1))) {
26432 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
26433 EVT VT = Op0.getValueType();
26434 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
26436 DAG.getConstant(~XorC, VT));
26437 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
26438 DAG.getConstant(C->getAPIntValue()+1, VT));
26442 // Try to synthesize horizontal adds from adds of shuffles.
26443 EVT VT = N->getValueType(0);
26444 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26445 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26446 isHorizontalBinOp(Op0, Op1, true))
26447 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
26449 return OptimizeConditionalInDecrement(N, DAG);
26452 /// performVZEXTCombine - Performs build vector combines
26453 static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
26454 TargetLowering::DAGCombinerInfo &DCI,
26455 const X86Subtarget *Subtarget) {
26457 MVT VT = N->getSimpleValueType(0);
26458 SDValue Op = N->getOperand(0);
26459 MVT OpVT = Op.getSimpleValueType();
26460 MVT OpEltVT = OpVT.getVectorElementType();
26461 unsigned InputBits = OpEltVT.getSizeInBits() * VT.getVectorNumElements();
26463 // (vzext (bitcast (vzext (x)) -> (vzext x)
26465 while (V.getOpcode() == ISD::BITCAST)
26466 V = V.getOperand(0);
26468 if (V != Op && V.getOpcode() == X86ISD::VZEXT) {
26469 MVT InnerVT = V.getSimpleValueType();
26470 MVT InnerEltVT = InnerVT.getVectorElementType();
26472 // If the element sizes match exactly, we can just do one larger vzext. This
26473 // is always an exact type match as vzext operates on integer types.
26474 if (OpEltVT == InnerEltVT) {
26475 assert(OpVT == InnerVT && "Types must match for vzext!");
26476 return DAG.getNode(X86ISD::VZEXT, DL, VT, V.getOperand(0));
26479 // The only other way we can combine them is if only a single element of the
26480 // inner vzext is used in the input to the outer vzext.
26481 if (InnerEltVT.getSizeInBits() < InputBits)
26484 // In this case, the inner vzext is completely dead because we're going to
26485 // only look at bits inside of the low element. Just do the outer vzext on
26486 // a bitcast of the input to the inner.
26487 return DAG.getNode(X86ISD::VZEXT, DL, VT,
26488 DAG.getNode(ISD::BITCAST, DL, OpVT, V));
26491 // Check if we can bypass extracting and re-inserting an element of an input
26492 // vector. Essentialy:
26493 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
26494 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
26495 V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
26496 V.getOperand(0).getSimpleValueType().getSizeInBits() == InputBits) {
26497 SDValue ExtractedV = V.getOperand(0);
26498 SDValue OrigV = ExtractedV.getOperand(0);
26499 if (auto *ExtractIdx = dyn_cast<ConstantSDNode>(ExtractedV.getOperand(1)))
26500 if (ExtractIdx->getZExtValue() == 0) {
26501 MVT OrigVT = OrigV.getSimpleValueType();
26502 // Extract a subvector if necessary...
26503 if (OrigVT.getSizeInBits() > OpVT.getSizeInBits()) {
26504 int Ratio = OrigVT.getSizeInBits() / OpVT.getSizeInBits();
26505 OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(),
26506 OrigVT.getVectorNumElements() / Ratio);
26507 OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV,
26508 DAG.getIntPtrConstant(0));
26510 Op = DAG.getNode(ISD::BITCAST, DL, OpVT, OrigV);
26511 return DAG.getNode(X86ISD::VZEXT, DL, VT, Op);
26518 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
26519 DAGCombinerInfo &DCI) const {
26520 SelectionDAG &DAG = DCI.DAG;
26521 switch (N->getOpcode()) {
26523 case ISD::EXTRACT_VECTOR_ELT:
26524 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI);
26527 case X86ISD::SHRUNKBLEND:
26528 return PerformSELECTCombine(N, DAG, DCI, Subtarget);
26529 case ISD::BITCAST: return PerformBITCASTCombine(N, DAG);
26530 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget);
26531 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
26532 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
26533 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
26534 case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
26537 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget);
26538 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
26539 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
26540 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
26541 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
26542 case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget);
26543 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
26544 case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget);
26545 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, Subtarget);
26546 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
26547 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
26549 case X86ISD::FOR: return PerformFORCombine(N, DAG);
26551 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG);
26552 case X86ISD::FAND: return PerformFANDCombine(N, DAG);
26553 case X86ISD::FANDN: return PerformFANDNCombine(N, DAG);
26554 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
26555 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
26556 case ISD::ANY_EXTEND:
26557 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
26558 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
26559 case ISD::SIGN_EXTEND_INREG:
26560 return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
26561 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget);
26562 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG, Subtarget);
26563 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget);
26564 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
26565 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget);
26566 case X86ISD::SHUFP: // Handle all target specific shuffles
26567 case X86ISD::PALIGNR:
26568 case X86ISD::UNPCKH:
26569 case X86ISD::UNPCKL:
26570 case X86ISD::MOVHLPS:
26571 case X86ISD::MOVLHPS:
26572 case X86ISD::PSHUFB:
26573 case X86ISD::PSHUFD:
26574 case X86ISD::PSHUFHW:
26575 case X86ISD::PSHUFLW:
26576 case X86ISD::MOVSS:
26577 case X86ISD::MOVSD:
26578 case X86ISD::VPERMILPI:
26579 case X86ISD::VPERM2X128:
26580 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
26581 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
26582 case ISD::INTRINSIC_WO_CHAIN:
26583 return PerformINTRINSIC_WO_CHAINCombine(N, DAG, Subtarget);
26584 case X86ISD::INSERTPS: {
26585 if (getTargetMachine().getOptLevel() > CodeGenOpt::None)
26586 return PerformINSERTPSCombine(N, DAG, Subtarget);
26589 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DAG, Subtarget);
26595 /// isTypeDesirableForOp - Return true if the target has native support for
26596 /// the specified value type and it is 'desirable' to use the type for the
26597 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
26598 /// instruction encodings are longer and some i16 instructions are slow.
26599 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
26600 if (!isTypeLegal(VT))
26602 if (VT != MVT::i16)
26609 case ISD::SIGN_EXTEND:
26610 case ISD::ZERO_EXTEND:
26611 case ISD::ANY_EXTEND:
26624 /// IsDesirableToPromoteOp - This method query the target whether it is
26625 /// beneficial for dag combiner to promote the specified node. If true, it
26626 /// should return the desired promotion type by reference.
26627 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
26628 EVT VT = Op.getValueType();
26629 if (VT != MVT::i16)
26632 bool Promote = false;
26633 bool Commute = false;
26634 switch (Op.getOpcode()) {
26637 LoadSDNode *LD = cast<LoadSDNode>(Op);
26638 // If the non-extending load has a single use and it's not live out, then it
26639 // might be folded.
26640 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
26641 Op.hasOneUse()*/) {
26642 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
26643 UE = Op.getNode()->use_end(); UI != UE; ++UI) {
26644 // The only case where we'd want to promote LOAD (rather then it being
26645 // promoted as an operand is when it's only use is liveout.
26646 if (UI->getOpcode() != ISD::CopyToReg)
26653 case ISD::SIGN_EXTEND:
26654 case ISD::ZERO_EXTEND:
26655 case ISD::ANY_EXTEND:
26660 SDValue N0 = Op.getOperand(0);
26661 // Look out for (store (shl (load), x)).
26662 if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
26675 SDValue N0 = Op.getOperand(0);
26676 SDValue N1 = Op.getOperand(1);
26677 if (!Commute && MayFoldLoad(N1))
26679 // Avoid disabling potential load folding opportunities.
26680 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op)))
26682 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op)))
26692 //===----------------------------------------------------------------------===//
26693 // X86 Inline Assembly Support
26694 //===----------------------------------------------------------------------===//
26697 // Helper to match a string separated by whitespace.
26698 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) {
26699 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace.
26701 for (unsigned i = 0, e = args.size(); i != e; ++i) {
26702 StringRef piece(*args[i]);
26703 if (!s.startswith(piece)) // Check if the piece matches.
26706 s = s.substr(piece.size());
26707 StringRef::size_type pos = s.find_first_not_of(" \t");
26708 if (pos == 0) // We matched a prefix.
26716 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={};
26719 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
26721 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
26722 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
26723 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
26724 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
26726 if (AsmPieces.size() == 3)
26728 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
26735 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
26736 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
26738 std::string AsmStr = IA->getAsmString();
26740 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
26741 if (!Ty || Ty->getBitWidth() % 16 != 0)
26744 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
26745 SmallVector<StringRef, 4> AsmPieces;
26746 SplitString(AsmStr, AsmPieces, ";\n");
26748 switch (AsmPieces.size()) {
26749 default: return false;
26751 // FIXME: this should verify that we are targeting a 486 or better. If not,
26752 // we will turn this bswap into something that will be lowered to logical
26753 // ops instead of emitting the bswap asm. For now, we don't support 486 or
26754 // lower so don't worry about this.
26756 if (matchAsm(AsmPieces[0], "bswap", "$0") ||
26757 matchAsm(AsmPieces[0], "bswapl", "$0") ||
26758 matchAsm(AsmPieces[0], "bswapq", "$0") ||
26759 matchAsm(AsmPieces[0], "bswap", "${0:q}") ||
26760 matchAsm(AsmPieces[0], "bswapl", "${0:q}") ||
26761 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) {
26762 // No need to check constraints, nothing other than the equivalent of
26763 // "=r,0" would be valid here.
26764 return IntrinsicLowering::LowerToByteSwap(CI);
26767 // rorw $$8, ${0:w} --> llvm.bswap.i16
26768 if (CI->getType()->isIntegerTy(16) &&
26769 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26770 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") ||
26771 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) {
26773 const std::string &ConstraintsStr = IA->getConstraintString();
26774 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26775 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26776 if (clobbersFlagRegisters(AsmPieces))
26777 return IntrinsicLowering::LowerToByteSwap(CI);
26781 if (CI->getType()->isIntegerTy(32) &&
26782 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26783 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") &&
26784 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") &&
26785 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) {
26787 const std::string &ConstraintsStr = IA->getConstraintString();
26788 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26789 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26790 if (clobbersFlagRegisters(AsmPieces))
26791 return IntrinsicLowering::LowerToByteSwap(CI);
26794 if (CI->getType()->isIntegerTy(64)) {
26795 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
26796 if (Constraints.size() >= 2 &&
26797 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
26798 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
26799 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
26800 if (matchAsm(AsmPieces[0], "bswap", "%eax") &&
26801 matchAsm(AsmPieces[1], "bswap", "%edx") &&
26802 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx"))
26803 return IntrinsicLowering::LowerToByteSwap(CI);
26811 /// getConstraintType - Given a constraint letter, return the type of
26812 /// constraint it is for this target.
26813 X86TargetLowering::ConstraintType
26814 X86TargetLowering::getConstraintType(const std::string &Constraint) const {
26815 if (Constraint.size() == 1) {
26816 switch (Constraint[0]) {
26827 return C_RegisterClass;
26851 return TargetLowering::getConstraintType(Constraint);
26854 /// Examine constraint type and operand type and determine a weight value.
26855 /// This object must already have been set up with the operand type
26856 /// and the current alternative constraint selected.
26857 TargetLowering::ConstraintWeight
26858 X86TargetLowering::getSingleConstraintMatchWeight(
26859 AsmOperandInfo &info, const char *constraint) const {
26860 ConstraintWeight weight = CW_Invalid;
26861 Value *CallOperandVal = info.CallOperandVal;
26862 // If we don't have a value, we can't do a match,
26863 // but allow it at the lowest weight.
26864 if (!CallOperandVal)
26866 Type *type = CallOperandVal->getType();
26867 // Look at the constraint type.
26868 switch (*constraint) {
26870 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
26881 if (CallOperandVal->getType()->isIntegerTy())
26882 weight = CW_SpecificReg;
26887 if (type->isFloatingPointTy())
26888 weight = CW_SpecificReg;
26891 if (type->isX86_MMXTy() && Subtarget->hasMMX())
26892 weight = CW_SpecificReg;
26896 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) ||
26897 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256()))
26898 weight = CW_Register;
26901 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
26902 if (C->getZExtValue() <= 31)
26903 weight = CW_Constant;
26907 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26908 if (C->getZExtValue() <= 63)
26909 weight = CW_Constant;
26913 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26914 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
26915 weight = CW_Constant;
26919 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26920 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
26921 weight = CW_Constant;
26925 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26926 if (C->getZExtValue() <= 3)
26927 weight = CW_Constant;
26931 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26932 if (C->getZExtValue() <= 0xff)
26933 weight = CW_Constant;
26938 if (dyn_cast<ConstantFP>(CallOperandVal)) {
26939 weight = CW_Constant;
26943 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26944 if ((C->getSExtValue() >= -0x80000000LL) &&
26945 (C->getSExtValue() <= 0x7fffffffLL))
26946 weight = CW_Constant;
26950 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26951 if (C->getZExtValue() <= 0xffffffff)
26952 weight = CW_Constant;
26959 /// LowerXConstraint - try to replace an X constraint, which matches anything,
26960 /// with another that has more specific requirements based on the type of the
26961 /// corresponding operand.
26962 const char *X86TargetLowering::
26963 LowerXConstraint(EVT ConstraintVT) const {
26964 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
26965 // 'f' like normal targets.
26966 if (ConstraintVT.isFloatingPoint()) {
26967 if (Subtarget->hasSSE2())
26969 if (Subtarget->hasSSE1())
26973 return TargetLowering::LowerXConstraint(ConstraintVT);
26976 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
26977 /// vector. If it is invalid, don't add anything to Ops.
26978 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
26979 std::string &Constraint,
26980 std::vector<SDValue>&Ops,
26981 SelectionDAG &DAG) const {
26984 // Only support length 1 constraints for now.
26985 if (Constraint.length() > 1) return;
26987 char ConstraintLetter = Constraint[0];
26988 switch (ConstraintLetter) {
26991 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26992 if (C->getZExtValue() <= 31) {
26993 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26999 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
27000 if (C->getZExtValue() <= 63) {
27001 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
27007 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
27008 if (isInt<8>(C->getSExtValue())) {
27009 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
27015 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
27016 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
27017 (Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) {
27018 Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType());
27024 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
27025 if (C->getZExtValue() <= 3) {
27026 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
27032 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
27033 if (C->getZExtValue() <= 255) {
27034 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
27040 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
27041 if (C->getZExtValue() <= 127) {
27042 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
27048 // 32-bit signed value
27049 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
27050 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
27051 C->getSExtValue())) {
27052 // Widen to 64 bits here to get it sign extended.
27053 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
27056 // FIXME gcc accepts some relocatable values here too, but only in certain
27057 // memory models; it's complicated.
27062 // 32-bit unsigned value
27063 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
27064 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
27065 C->getZExtValue())) {
27066 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
27070 // FIXME gcc accepts some relocatable values here too, but only in certain
27071 // memory models; it's complicated.
27075 // Literal immediates are always ok.
27076 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
27077 // Widen to 64 bits here to get it sign extended.
27078 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64);
27082 // In any sort of PIC mode addresses need to be computed at runtime by
27083 // adding in a register or some sort of table lookup. These can't
27084 // be used as immediates.
27085 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
27088 // If we are in non-pic codegen mode, we allow the address of a global (with
27089 // an optional displacement) to be used with 'i'.
27090 GlobalAddressSDNode *GA = nullptr;
27091 int64_t Offset = 0;
27093 // Match either (GA), (GA+C), (GA+C1+C2), etc.
27095 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
27096 Offset += GA->getOffset();
27098 } else if (Op.getOpcode() == ISD::ADD) {
27099 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
27100 Offset += C->getZExtValue();
27101 Op = Op.getOperand(0);
27104 } else if (Op.getOpcode() == ISD::SUB) {
27105 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
27106 Offset += -C->getZExtValue();
27107 Op = Op.getOperand(0);
27112 // Otherwise, this isn't something we can handle, reject it.
27116 const GlobalValue *GV = GA->getGlobal();
27117 // If we require an extra load to get this address, as in PIC mode, we
27118 // can't accept it.
27119 if (isGlobalStubReference(
27120 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget())))
27123 Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
27124 GA->getValueType(0), Offset);
27129 if (Result.getNode()) {
27130 Ops.push_back(Result);
27133 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
27136 std::pair<unsigned, const TargetRegisterClass*>
27137 X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
27139 // First, see if this is a constraint that directly corresponds to an LLVM
27141 if (Constraint.size() == 1) {
27142 // GCC Constraint Letters
27143 switch (Constraint[0]) {
27145 // TODO: Slight differences here in allocation order and leaving
27146 // RIP in the class. Do they matter any more here than they do
27147 // in the normal allocation?
27148 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
27149 if (Subtarget->is64Bit()) {
27150 if (VT == MVT::i32 || VT == MVT::f32)
27151 return std::make_pair(0U, &X86::GR32RegClass);
27152 if (VT == MVT::i16)
27153 return std::make_pair(0U, &X86::GR16RegClass);
27154 if (VT == MVT::i8 || VT == MVT::i1)
27155 return std::make_pair(0U, &X86::GR8RegClass);
27156 if (VT == MVT::i64 || VT == MVT::f64)
27157 return std::make_pair(0U, &X86::GR64RegClass);
27160 // 32-bit fallthrough
27161 case 'Q': // Q_REGS
27162 if (VT == MVT::i32 || VT == MVT::f32)
27163 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
27164 if (VT == MVT::i16)
27165 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
27166 if (VT == MVT::i8 || VT == MVT::i1)
27167 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
27168 if (VT == MVT::i64)
27169 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
27171 case 'r': // GENERAL_REGS
27172 case 'l': // INDEX_REGS
27173 if (VT == MVT::i8 || VT == MVT::i1)
27174 return std::make_pair(0U, &X86::GR8RegClass);
27175 if (VT == MVT::i16)
27176 return std::make_pair(0U, &X86::GR16RegClass);
27177 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit())
27178 return std::make_pair(0U, &X86::GR32RegClass);
27179 return std::make_pair(0U, &X86::GR64RegClass);
27180 case 'R': // LEGACY_REGS
27181 if (VT == MVT::i8 || VT == MVT::i1)
27182 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
27183 if (VT == MVT::i16)
27184 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
27185 if (VT == MVT::i32 || !Subtarget->is64Bit())
27186 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
27187 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
27188 case 'f': // FP Stack registers.
27189 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
27190 // value to the correct fpstack register class.
27191 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
27192 return std::make_pair(0U, &X86::RFP32RegClass);
27193 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
27194 return std::make_pair(0U, &X86::RFP64RegClass);
27195 return std::make_pair(0U, &X86::RFP80RegClass);
27196 case 'y': // MMX_REGS if MMX allowed.
27197 if (!Subtarget->hasMMX()) break;
27198 return std::make_pair(0U, &X86::VR64RegClass);
27199 case 'Y': // SSE_REGS if SSE2 allowed
27200 if (!Subtarget->hasSSE2()) break;
27202 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
27203 if (!Subtarget->hasSSE1()) break;
27205 switch (VT.SimpleTy) {
27207 // Scalar SSE types.
27210 return std::make_pair(0U, &X86::FR32RegClass);
27213 return std::make_pair(0U, &X86::FR64RegClass);
27221 return std::make_pair(0U, &X86::VR128RegClass);
27229 return std::make_pair(0U, &X86::VR256RegClass);
27234 return std::make_pair(0U, &X86::VR512RegClass);
27240 // Use the default implementation in TargetLowering to convert the register
27241 // constraint into a member of a register class.
27242 std::pair<unsigned, const TargetRegisterClass*> Res;
27243 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
27245 // Not found as a standard register?
27247 // Map st(0) -> st(7) -> ST0
27248 if (Constraint.size() == 7 && Constraint[0] == '{' &&
27249 tolower(Constraint[1]) == 's' &&
27250 tolower(Constraint[2]) == 't' &&
27251 Constraint[3] == '(' &&
27252 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
27253 Constraint[5] == ')' &&
27254 Constraint[6] == '}') {
27256 Res.first = X86::FP0+Constraint[4]-'0';
27257 Res.second = &X86::RFP80RegClass;
27261 // GCC allows "st(0)" to be called just plain "st".
27262 if (StringRef("{st}").equals_lower(Constraint)) {
27263 Res.first = X86::FP0;
27264 Res.second = &X86::RFP80RegClass;
27269 if (StringRef("{flags}").equals_lower(Constraint)) {
27270 Res.first = X86::EFLAGS;
27271 Res.second = &X86::CCRRegClass;
27275 // 'A' means EAX + EDX.
27276 if (Constraint == "A") {
27277 Res.first = X86::EAX;
27278 Res.second = &X86::GR32_ADRegClass;
27284 // Otherwise, check to see if this is a register class of the wrong value
27285 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
27286 // turn into {ax},{dx}.
27287 if (Res.second->hasType(VT))
27288 return Res; // Correct type already, nothing to do.
27290 // All of the single-register GCC register classes map their values onto
27291 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
27292 // really want an 8-bit or 32-bit register, map to the appropriate register
27293 // class and return the appropriate register.
27294 if (Res.second == &X86::GR16RegClass) {
27295 if (VT == MVT::i8 || VT == MVT::i1) {
27296 unsigned DestReg = 0;
27297 switch (Res.first) {
27299 case X86::AX: DestReg = X86::AL; break;
27300 case X86::DX: DestReg = X86::DL; break;
27301 case X86::CX: DestReg = X86::CL; break;
27302 case X86::BX: DestReg = X86::BL; break;
27305 Res.first = DestReg;
27306 Res.second = &X86::GR8RegClass;
27308 } else if (VT == MVT::i32 || VT == MVT::f32) {
27309 unsigned DestReg = 0;
27310 switch (Res.first) {
27312 case X86::AX: DestReg = X86::EAX; break;
27313 case X86::DX: DestReg = X86::EDX; break;
27314 case X86::CX: DestReg = X86::ECX; break;
27315 case X86::BX: DestReg = X86::EBX; break;
27316 case X86::SI: DestReg = X86::ESI; break;
27317 case X86::DI: DestReg = X86::EDI; break;
27318 case X86::BP: DestReg = X86::EBP; break;
27319 case X86::SP: DestReg = X86::ESP; break;
27322 Res.first = DestReg;
27323 Res.second = &X86::GR32RegClass;
27325 } else if (VT == MVT::i64 || VT == MVT::f64) {
27326 unsigned DestReg = 0;
27327 switch (Res.first) {
27329 case X86::AX: DestReg = X86::RAX; break;
27330 case X86::DX: DestReg = X86::RDX; break;
27331 case X86::CX: DestReg = X86::RCX; break;
27332 case X86::BX: DestReg = X86::RBX; break;
27333 case X86::SI: DestReg = X86::RSI; break;
27334 case X86::DI: DestReg = X86::RDI; break;
27335 case X86::BP: DestReg = X86::RBP; break;
27336 case X86::SP: DestReg = X86::RSP; break;
27339 Res.first = DestReg;
27340 Res.second = &X86::GR64RegClass;
27343 } else if (Res.second == &X86::FR32RegClass ||
27344 Res.second == &X86::FR64RegClass ||
27345 Res.second == &X86::VR128RegClass ||
27346 Res.second == &X86::VR256RegClass ||
27347 Res.second == &X86::FR32XRegClass ||
27348 Res.second == &X86::FR64XRegClass ||
27349 Res.second == &X86::VR128XRegClass ||
27350 Res.second == &X86::VR256XRegClass ||
27351 Res.second == &X86::VR512RegClass) {
27352 // Handle references to XMM physical registers that got mapped into the
27353 // wrong class. This can happen with constraints like {xmm0} where the
27354 // target independent register mapper will just pick the first match it can
27355 // find, ignoring the required type.
27357 if (VT == MVT::f32 || VT == MVT::i32)
27358 Res.second = &X86::FR32RegClass;
27359 else if (VT == MVT::f64 || VT == MVT::i64)
27360 Res.second = &X86::FR64RegClass;
27361 else if (X86::VR128RegClass.hasType(VT))
27362 Res.second = &X86::VR128RegClass;
27363 else if (X86::VR256RegClass.hasType(VT))
27364 Res.second = &X86::VR256RegClass;
27365 else if (X86::VR512RegClass.hasType(VT))
27366 Res.second = &X86::VR512RegClass;
27372 int X86TargetLowering::getScalingFactorCost(const AddrMode &AM,
27374 // Scaling factors are not free at all.
27375 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
27376 // will take 2 allocations in the out of order engine instead of 1
27377 // for plain addressing mode, i.e. inst (reg1).
27379 // vaddps (%rsi,%drx), %ymm0, %ymm1
27380 // Requires two allocations (one for the load, one for the computation)
27382 // vaddps (%rsi), %ymm0, %ymm1
27383 // Requires just 1 allocation, i.e., freeing allocations for other operations
27384 // and having less micro operations to execute.
27386 // For some X86 architectures, this is even worse because for instance for
27387 // stores, the complex addressing mode forces the instruction to use the
27388 // "load" ports instead of the dedicated "store" port.
27389 // E.g., on Haswell:
27390 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
27391 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
27392 if (isLegalAddressingMode(AM, Ty))
27393 // Scale represents reg2 * scale, thus account for 1
27394 // as soon as we use a second register.
27395 return AM.Scale != 0;
27399 bool X86TargetLowering::isTargetFTOL() const {
27400 return Subtarget->isTargetKnownWindowsMSVC() && !Subtarget->is64Bit();