1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "X86ISelLowering.h"
16 #include "Utils/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "X86TargetMachine.h"
22 #include "X86TargetObjectFile.h"
23 #include "llvm/ADT/SmallBitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/ADT/VariadicFunction.h"
29 #include "llvm/CodeGen/IntrinsicLowering.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineJumpTableInfo.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/CallingConv.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/MC/MCAsmInfo.h"
46 #include "llvm/MC/MCContext.h"
47 #include "llvm/MC/MCExpr.h"
48 #include "llvm/MC/MCSymbol.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Target/TargetOptions.h"
54 #include "X86IntrinsicsInfo.h"
60 #define DEBUG_TYPE "x86-isel"
62 STATISTIC(NumTailCalls, "Number of tail calls");
64 static cl::opt<bool> ExperimentalVectorWideningLegalization(
65 "x86-experimental-vector-widening-legalization", cl::init(false),
66 cl::desc("Enable an experimental vector type legalization through widening "
67 "rather than promotion."),
70 static cl::opt<bool> ExperimentalVectorShuffleLowering(
71 "x86-experimental-vector-shuffle-lowering", cl::init(true),
72 cl::desc("Enable an experimental vector shuffle lowering code path."),
75 static cl::opt<bool> ExperimentalVectorShuffleLegality(
76 "x86-experimental-vector-shuffle-legality", cl::init(false),
77 cl::desc("Enable experimental shuffle legality based on the experimental "
78 "shuffle lowering. Should only be used with the experimental "
82 static cl::opt<int> ReciprocalEstimateRefinementSteps(
83 "x86-recip-refinement-steps", cl::init(1),
84 cl::desc("Specify the number of Newton-Raphson iterations applied to the "
85 "result of the hardware reciprocal estimate instruction."),
88 // Forward declarations.
89 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
92 static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
93 SelectionDAG &DAG, SDLoc dl,
94 unsigned vectorWidth) {
95 assert((vectorWidth == 128 || vectorWidth == 256) &&
96 "Unsupported vector width");
97 EVT VT = Vec.getValueType();
98 EVT ElVT = VT.getVectorElementType();
99 unsigned Factor = VT.getSizeInBits()/vectorWidth;
100 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
101 VT.getVectorNumElements()/Factor);
103 // Extract from UNDEF is UNDEF.
104 if (Vec.getOpcode() == ISD::UNDEF)
105 return DAG.getUNDEF(ResultVT);
107 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
108 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
110 // This is the index of the first element of the vectorWidth-bit chunk
112 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / vectorWidth)
115 // If the input is a buildvector just emit a smaller one.
116 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
117 return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
118 makeArrayRef(Vec->op_begin() + NormalizedIdxVal,
121 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
122 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
125 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
126 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
127 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
128 /// instructions or a simple subregister reference. Idx is an index in the
129 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
130 /// lowering EXTRACT_VECTOR_ELT operations easier.
131 static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
132 SelectionDAG &DAG, SDLoc dl) {
133 assert((Vec.getValueType().is256BitVector() ||
134 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
135 return ExtractSubVector(Vec, IdxVal, DAG, dl, 128);
138 /// Generate a DAG to grab 256-bits from a 512-bit vector.
139 static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal,
140 SelectionDAG &DAG, SDLoc dl) {
141 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
142 return ExtractSubVector(Vec, IdxVal, DAG, dl, 256);
145 static SDValue InsertSubVector(SDValue Result, SDValue Vec,
146 unsigned IdxVal, SelectionDAG &DAG,
147 SDLoc dl, unsigned vectorWidth) {
148 assert((vectorWidth == 128 || vectorWidth == 256) &&
149 "Unsupported vector width");
150 // Inserting UNDEF is Result
151 if (Vec.getOpcode() == ISD::UNDEF)
153 EVT VT = Vec.getValueType();
154 EVT ElVT = VT.getVectorElementType();
155 EVT ResultVT = Result.getValueType();
157 // Insert the relevant vectorWidth bits.
158 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
160 // This is the index of the first element of the vectorWidth-bit chunk
162 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/vectorWidth)
165 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
166 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
169 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
170 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
171 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
172 /// simple superregister reference. Idx is an index in the 128 bits
173 /// we want. It need not be aligned to a 128-bit boundary. That makes
174 /// lowering INSERT_VECTOR_ELT operations easier.
175 static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
176 SelectionDAG &DAG,SDLoc dl) {
177 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
178 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
181 static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
182 SelectionDAG &DAG, SDLoc dl) {
183 assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
184 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
187 /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128
188 /// instructions. This is used because creating CONCAT_VECTOR nodes of
189 /// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
190 /// large BUILD_VECTORS.
191 static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
192 unsigned NumElems, SelectionDAG &DAG,
194 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
195 return Insert128BitVector(V, V2, NumElems/2, DAG, dl);
198 static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
199 unsigned NumElems, SelectionDAG &DAG,
201 SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
202 return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
205 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
206 const X86Subtarget &STI)
207 : TargetLowering(TM), Subtarget(&STI) {
208 X86ScalarSSEf64 = Subtarget->hasSSE2();
209 X86ScalarSSEf32 = Subtarget->hasSSE1();
210 TD = getDataLayout();
212 // Set up the TargetLowering object.
213 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
215 // X86 is weird. It always uses i8 for shift amounts and setcc results.
216 setBooleanContents(ZeroOrOneBooleanContent);
217 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
218 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
220 // For 64-bit, since we have so many registers, use the ILP scheduler.
221 // For 32-bit, use the register pressure specific scheduling.
222 // For Atom, always use ILP scheduling.
223 if (Subtarget->isAtom())
224 setSchedulingPreference(Sched::ILP);
225 else if (Subtarget->is64Bit())
226 setSchedulingPreference(Sched::ILP);
228 setSchedulingPreference(Sched::RegPressure);
229 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
230 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
232 // Bypass expensive divides on Atom when compiling with O2.
233 if (TM.getOptLevel() >= CodeGenOpt::Default) {
234 if (Subtarget->hasSlowDivide32())
235 addBypassSlowDiv(32, 8);
236 if (Subtarget->hasSlowDivide64() && Subtarget->is64Bit())
237 addBypassSlowDiv(64, 16);
240 if (Subtarget->isTargetKnownWindowsMSVC()) {
241 // Setup Windows compiler runtime calls.
242 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
243 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
244 setLibcallName(RTLIB::SREM_I64, "_allrem");
245 setLibcallName(RTLIB::UREM_I64, "_aullrem");
246 setLibcallName(RTLIB::MUL_I64, "_allmul");
247 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
248 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
249 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
250 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
251 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
253 // The _ftol2 runtime function has an unusual calling conv, which
254 // is modeled by a special pseudo-instruction.
255 setLibcallName(RTLIB::FPTOUINT_F64_I64, nullptr);
256 setLibcallName(RTLIB::FPTOUINT_F32_I64, nullptr);
257 setLibcallName(RTLIB::FPTOUINT_F64_I32, nullptr);
258 setLibcallName(RTLIB::FPTOUINT_F32_I32, nullptr);
261 if (Subtarget->isTargetDarwin()) {
262 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
263 setUseUnderscoreSetJmp(false);
264 setUseUnderscoreLongJmp(false);
265 } else if (Subtarget->isTargetWindowsGNU()) {
266 // MS runtime is weird: it exports _setjmp, but longjmp!
267 setUseUnderscoreSetJmp(true);
268 setUseUnderscoreLongJmp(false);
270 setUseUnderscoreSetJmp(true);
271 setUseUnderscoreLongJmp(true);
274 // Set up the register classes.
275 addRegisterClass(MVT::i8, &X86::GR8RegClass);
276 addRegisterClass(MVT::i16, &X86::GR16RegClass);
277 addRegisterClass(MVT::i32, &X86::GR32RegClass);
278 if (Subtarget->is64Bit())
279 addRegisterClass(MVT::i64, &X86::GR64RegClass);
281 for (MVT VT : MVT::integer_valuetypes())
282 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
284 // We don't accept any truncstore of integer registers.
285 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
286 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
287 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
288 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
289 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
290 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
292 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
294 // SETOEQ and SETUNE require checking two conditions.
295 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
296 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
297 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
298 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
299 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
300 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
302 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
304 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
305 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
306 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
308 if (Subtarget->is64Bit()) {
309 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
310 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
311 } else if (!TM.Options.UseSoftFloat) {
312 // We have an algorithm for SSE2->double, and we turn this into a
313 // 64-bit FILD followed by conditional FADD for other targets.
314 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
315 // We have an algorithm for SSE2, and we turn this into a 64-bit
316 // FILD for other targets.
317 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
320 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
322 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
323 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
325 if (!TM.Options.UseSoftFloat) {
326 // SSE has no i16 to fp conversion, only i32
327 if (X86ScalarSSEf32) {
328 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
329 // f32 and f64 cases are Legal, f80 case is not
330 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
332 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
333 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
336 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
337 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
340 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
341 // are Legal, f80 is custom lowered.
342 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
343 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
345 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
347 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
348 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
350 if (X86ScalarSSEf32) {
351 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
352 // f32 and f64 cases are Legal, f80 case is not
353 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
355 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
356 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
359 // Handle FP_TO_UINT by promoting the destination to a larger signed
361 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
362 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
363 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
365 if (Subtarget->is64Bit()) {
366 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
367 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
368 } else if (!TM.Options.UseSoftFloat) {
369 // Since AVX is a superset of SSE3, only check for SSE here.
370 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3())
371 // Expand FP_TO_UINT into a select.
372 // FIXME: We would like to use a Custom expander here eventually to do
373 // the optimal thing for SSE vs. the default expansion in the legalizer.
374 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
376 // With SSE3 we can use fisttpll to convert to a signed i64; without
377 // SSE, we're stuck with a fistpll.
378 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
381 if (isTargetFTOL()) {
382 // Use the _ftol2 runtime function, which has a pseudo-instruction
383 // to handle its weird calling convention.
384 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
387 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
388 if (!X86ScalarSSEf64) {
389 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
390 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
391 if (Subtarget->is64Bit()) {
392 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
393 // Without SSE, i64->f64 goes through memory.
394 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
398 // Scalar integer divide and remainder are lowered to use operations that
399 // produce two results, to match the available instructions. This exposes
400 // the two-result form to trivial CSE, which is able to combine x/y and x%y
401 // into a single instruction.
403 // Scalar integer multiply-high is also lowered to use two-result
404 // operations, to match the available instructions. However, plain multiply
405 // (low) operations are left as Legal, as there are single-result
406 // instructions for this in x86. Using the two-result multiply instructions
407 // when both high and low results are needed must be arranged by dagcombine.
408 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
410 setOperationAction(ISD::MULHS, VT, Expand);
411 setOperationAction(ISD::MULHU, VT, Expand);
412 setOperationAction(ISD::SDIV, VT, Expand);
413 setOperationAction(ISD::UDIV, VT, Expand);
414 setOperationAction(ISD::SREM, VT, Expand);
415 setOperationAction(ISD::UREM, VT, Expand);
417 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences.
418 setOperationAction(ISD::ADDC, VT, Custom);
419 setOperationAction(ISD::ADDE, VT, Custom);
420 setOperationAction(ISD::SUBC, VT, Custom);
421 setOperationAction(ISD::SUBE, VT, Custom);
424 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
425 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
426 setOperationAction(ISD::BR_CC , MVT::f32, Expand);
427 setOperationAction(ISD::BR_CC , MVT::f64, Expand);
428 setOperationAction(ISD::BR_CC , MVT::f80, Expand);
429 setOperationAction(ISD::BR_CC , MVT::i8, Expand);
430 setOperationAction(ISD::BR_CC , MVT::i16, Expand);
431 setOperationAction(ISD::BR_CC , MVT::i32, Expand);
432 setOperationAction(ISD::BR_CC , MVT::i64, Expand);
433 setOperationAction(ISD::SELECT_CC , MVT::f32, Expand);
434 setOperationAction(ISD::SELECT_CC , MVT::f64, Expand);
435 setOperationAction(ISD::SELECT_CC , MVT::f80, Expand);
436 setOperationAction(ISD::SELECT_CC , MVT::i8, Expand);
437 setOperationAction(ISD::SELECT_CC , MVT::i16, Expand);
438 setOperationAction(ISD::SELECT_CC , MVT::i32, Expand);
439 setOperationAction(ISD::SELECT_CC , MVT::i64, Expand);
440 if (Subtarget->is64Bit())
441 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
442 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
443 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
444 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
445 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
446 setOperationAction(ISD::FREM , MVT::f32 , Expand);
447 setOperationAction(ISD::FREM , MVT::f64 , Expand);
448 setOperationAction(ISD::FREM , MVT::f80 , Expand);
449 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
451 // Promote the i8 variants and force them on up to i32 which has a shorter
453 setOperationAction(ISD::CTTZ , MVT::i8 , Promote);
454 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32);
455 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote);
456 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32);
457 if (Subtarget->hasBMI()) {
458 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand);
459 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand);
460 if (Subtarget->is64Bit())
461 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
463 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
464 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
465 if (Subtarget->is64Bit())
466 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
469 if (Subtarget->hasLZCNT()) {
470 // When promoting the i8 variants, force them to i32 for a shorter
472 setOperationAction(ISD::CTLZ , MVT::i8 , Promote);
473 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32);
474 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote);
475 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
476 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand);
477 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand);
478 if (Subtarget->is64Bit())
479 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
481 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
482 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
483 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
484 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
485 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
486 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
487 if (Subtarget->is64Bit()) {
488 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
489 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
493 // Special handling for half-precision floating point conversions.
494 // If we don't have F16C support, then lower half float conversions
495 // into library calls.
496 if (TM.Options.UseSoftFloat || !Subtarget->hasF16C()) {
497 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
498 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
501 // There's never any support for operations beyond MVT::f32.
502 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
503 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
504 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
505 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
507 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
508 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
509 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
510 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
511 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
512 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
514 if (Subtarget->hasPOPCNT()) {
515 setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
517 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
518 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
519 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
520 if (Subtarget->is64Bit())
521 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
524 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
526 if (!Subtarget->hasMOVBE())
527 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
529 // These should be promoted to a larger select which is supported.
530 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
531 // X86 wants to expand cmov itself.
532 setOperationAction(ISD::SELECT , MVT::i8 , Custom);
533 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
534 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
535 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
536 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
537 setOperationAction(ISD::SELECT , MVT::f80 , Custom);
538 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
539 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
540 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
541 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
542 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
543 setOperationAction(ISD::SETCC , MVT::f80 , Custom);
544 if (Subtarget->is64Bit()) {
545 setOperationAction(ISD::SELECT , MVT::i64 , Custom);
546 setOperationAction(ISD::SETCC , MVT::i64 , Custom);
548 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
549 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
550 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
551 // support continuation, user-level threading, and etc.. As a result, no
552 // other SjLj exception interfaces are implemented and please don't build
553 // your own exception handling based on them.
554 // LLVM/Clang supports zero-cost DWARF exception handling.
555 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
556 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
559 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
560 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
561 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
562 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
563 if (Subtarget->is64Bit())
564 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
565 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
566 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom);
567 if (Subtarget->is64Bit()) {
568 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
569 setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
570 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
571 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
572 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom);
574 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
575 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
576 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
577 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
578 if (Subtarget->is64Bit()) {
579 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
580 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
581 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
584 if (Subtarget->hasSSE1())
585 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
587 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
589 // Expand certain atomics
590 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
592 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
593 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
594 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
597 if (Subtarget->hasCmpxchg16b()) {
598 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
601 // FIXME - use subtarget debug flags
602 if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetELF() &&
603 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWin64()) {
604 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
607 if (Subtarget->is64Bit()) {
608 setExceptionPointerRegister(X86::RAX);
609 setExceptionSelectorRegister(X86::RDX);
611 setExceptionPointerRegister(X86::EAX);
612 setExceptionSelectorRegister(X86::EDX);
614 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
615 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
617 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
618 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
620 setOperationAction(ISD::TRAP, MVT::Other, Legal);
621 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
623 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
624 setOperationAction(ISD::VASTART , MVT::Other, Custom);
625 setOperationAction(ISD::VAEND , MVT::Other, Expand);
626 if (Subtarget->is64Bit() && !Subtarget->isTargetWin64()) {
627 // TargetInfo::X86_64ABIBuiltinVaList
628 setOperationAction(ISD::VAARG , MVT::Other, Custom);
629 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
631 // TargetInfo::CharPtrBuiltinVaList
632 setOperationAction(ISD::VAARG , MVT::Other, Expand);
633 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
636 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
637 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
639 setOperationAction(ISD::DYNAMIC_STACKALLOC, getPointerTy(), Custom);
641 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) {
642 // f32 and f64 use SSE.
643 // Set up the FP register classes.
644 addRegisterClass(MVT::f32, &X86::FR32RegClass);
645 addRegisterClass(MVT::f64, &X86::FR64RegClass);
647 // Use ANDPD to simulate FABS.
648 setOperationAction(ISD::FABS , MVT::f64, Custom);
649 setOperationAction(ISD::FABS , MVT::f32, Custom);
651 // Use XORP to simulate FNEG.
652 setOperationAction(ISD::FNEG , MVT::f64, Custom);
653 setOperationAction(ISD::FNEG , MVT::f32, Custom);
655 // Use ANDPD and ORPD to simulate FCOPYSIGN.
656 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
657 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
659 // Lower this to FGETSIGNx86 plus an AND.
660 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
661 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
663 // We don't support sin/cos/fmod
664 setOperationAction(ISD::FSIN , MVT::f64, Expand);
665 setOperationAction(ISD::FCOS , MVT::f64, Expand);
666 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
667 setOperationAction(ISD::FSIN , MVT::f32, Expand);
668 setOperationAction(ISD::FCOS , MVT::f32, Expand);
669 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
671 // Expand FP immediates into loads from the stack, except for the special
673 addLegalFPImmediate(APFloat(+0.0)); // xorpd
674 addLegalFPImmediate(APFloat(+0.0f)); // xorps
675 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) {
676 // Use SSE for f32, x87 for f64.
677 // Set up the FP register classes.
678 addRegisterClass(MVT::f32, &X86::FR32RegClass);
679 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
681 // Use ANDPS to simulate FABS.
682 setOperationAction(ISD::FABS , MVT::f32, Custom);
684 // Use XORP to simulate FNEG.
685 setOperationAction(ISD::FNEG , MVT::f32, Custom);
687 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
689 // Use ANDPS and ORPS to simulate FCOPYSIGN.
690 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
691 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
693 // We don't support sin/cos/fmod
694 setOperationAction(ISD::FSIN , MVT::f32, Expand);
695 setOperationAction(ISD::FCOS , MVT::f32, Expand);
696 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
698 // Special cases we handle for FP constants.
699 addLegalFPImmediate(APFloat(+0.0f)); // xorps
700 addLegalFPImmediate(APFloat(+0.0)); // FLD0
701 addLegalFPImmediate(APFloat(+1.0)); // FLD1
702 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
703 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
705 if (!TM.Options.UnsafeFPMath) {
706 setOperationAction(ISD::FSIN , MVT::f64, Expand);
707 setOperationAction(ISD::FCOS , MVT::f64, Expand);
708 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
710 } else if (!TM.Options.UseSoftFloat) {
711 // f32 and f64 in x87.
712 // Set up the FP register classes.
713 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
714 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
716 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
717 setOperationAction(ISD::UNDEF, MVT::f32, Expand);
718 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
719 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
721 if (!TM.Options.UnsafeFPMath) {
722 setOperationAction(ISD::FSIN , MVT::f64, Expand);
723 setOperationAction(ISD::FSIN , MVT::f32, Expand);
724 setOperationAction(ISD::FCOS , MVT::f64, Expand);
725 setOperationAction(ISD::FCOS , MVT::f32, Expand);
726 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
727 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
729 addLegalFPImmediate(APFloat(+0.0)); // FLD0
730 addLegalFPImmediate(APFloat(+1.0)); // FLD1
731 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
732 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
733 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
734 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
735 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
736 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
739 // We don't support FMA.
740 setOperationAction(ISD::FMA, MVT::f64, Expand);
741 setOperationAction(ISD::FMA, MVT::f32, Expand);
743 // Long double always uses X87.
744 if (!TM.Options.UseSoftFloat) {
745 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
746 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
747 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
749 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended);
750 addLegalFPImmediate(TmpFlt); // FLD0
752 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
755 APFloat TmpFlt2(+1.0);
756 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
758 addLegalFPImmediate(TmpFlt2); // FLD1
759 TmpFlt2.changeSign();
760 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
763 if (!TM.Options.UnsafeFPMath) {
764 setOperationAction(ISD::FSIN , MVT::f80, Expand);
765 setOperationAction(ISD::FCOS , MVT::f80, Expand);
766 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
769 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
770 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
771 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
772 setOperationAction(ISD::FRINT, MVT::f80, Expand);
773 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
774 setOperationAction(ISD::FMA, MVT::f80, Expand);
777 // Always use a library call for pow.
778 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
779 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
780 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
782 setOperationAction(ISD::FLOG, MVT::f80, Expand);
783 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
784 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
785 setOperationAction(ISD::FEXP, MVT::f80, Expand);
786 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
787 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
788 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
790 // First set operation action for all vector types to either promote
791 // (for widening) or expand (for scalarization). Then we will selectively
792 // turn on ones that can be effectively codegen'd.
793 for (MVT VT : MVT::vector_valuetypes()) {
794 setOperationAction(ISD::ADD , VT, Expand);
795 setOperationAction(ISD::SUB , VT, Expand);
796 setOperationAction(ISD::FADD, VT, Expand);
797 setOperationAction(ISD::FNEG, VT, Expand);
798 setOperationAction(ISD::FSUB, VT, Expand);
799 setOperationAction(ISD::MUL , VT, Expand);
800 setOperationAction(ISD::FMUL, VT, Expand);
801 setOperationAction(ISD::SDIV, VT, Expand);
802 setOperationAction(ISD::UDIV, VT, Expand);
803 setOperationAction(ISD::FDIV, VT, Expand);
804 setOperationAction(ISD::SREM, VT, Expand);
805 setOperationAction(ISD::UREM, VT, Expand);
806 setOperationAction(ISD::LOAD, VT, Expand);
807 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
808 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
809 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
810 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
811 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
812 setOperationAction(ISD::FABS, VT, Expand);
813 setOperationAction(ISD::FSIN, VT, Expand);
814 setOperationAction(ISD::FSINCOS, VT, Expand);
815 setOperationAction(ISD::FCOS, VT, Expand);
816 setOperationAction(ISD::FSINCOS, VT, Expand);
817 setOperationAction(ISD::FREM, VT, Expand);
818 setOperationAction(ISD::FMA, VT, Expand);
819 setOperationAction(ISD::FPOWI, VT, Expand);
820 setOperationAction(ISD::FSQRT, VT, Expand);
821 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
822 setOperationAction(ISD::FFLOOR, VT, Expand);
823 setOperationAction(ISD::FCEIL, VT, Expand);
824 setOperationAction(ISD::FTRUNC, VT, Expand);
825 setOperationAction(ISD::FRINT, VT, Expand);
826 setOperationAction(ISD::FNEARBYINT, VT, Expand);
827 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
828 setOperationAction(ISD::MULHS, VT, Expand);
829 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
830 setOperationAction(ISD::MULHU, VT, Expand);
831 setOperationAction(ISD::SDIVREM, VT, Expand);
832 setOperationAction(ISD::UDIVREM, VT, Expand);
833 setOperationAction(ISD::FPOW, VT, Expand);
834 setOperationAction(ISD::CTPOP, VT, Expand);
835 setOperationAction(ISD::CTTZ, VT, Expand);
836 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
837 setOperationAction(ISD::CTLZ, VT, Expand);
838 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
839 setOperationAction(ISD::SHL, VT, Expand);
840 setOperationAction(ISD::SRA, VT, Expand);
841 setOperationAction(ISD::SRL, VT, Expand);
842 setOperationAction(ISD::ROTL, VT, Expand);
843 setOperationAction(ISD::ROTR, VT, Expand);
844 setOperationAction(ISD::BSWAP, VT, Expand);
845 setOperationAction(ISD::SETCC, VT, Expand);
846 setOperationAction(ISD::FLOG, VT, Expand);
847 setOperationAction(ISD::FLOG2, VT, Expand);
848 setOperationAction(ISD::FLOG10, VT, Expand);
849 setOperationAction(ISD::FEXP, VT, Expand);
850 setOperationAction(ISD::FEXP2, VT, Expand);
851 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
852 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
853 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
854 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
855 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
856 setOperationAction(ISD::TRUNCATE, VT, Expand);
857 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
858 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
859 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
860 setOperationAction(ISD::VSELECT, VT, Expand);
861 setOperationAction(ISD::SELECT_CC, VT, Expand);
862 for (MVT InnerVT : MVT::vector_valuetypes()) {
863 setTruncStoreAction(InnerVT, VT, Expand);
865 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
866 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
868 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
869 // types, we have to deal with them whether we ask for Expansion or not.
870 // Setting Expand causes its own optimisation problems though, so leave
872 if (VT.getVectorElementType() == MVT::i1)
873 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
877 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
878 // with -msoft-float, disable use of MMX as well.
879 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) {
880 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
881 // No operations on x86mmx supported, everything uses intrinsics.
884 // MMX-sized vectors (other than x86mmx) are expected to be expanded
885 // into smaller operations.
886 setOperationAction(ISD::MULHS, MVT::v8i8, Expand);
887 setOperationAction(ISD::MULHS, MVT::v4i16, Expand);
888 setOperationAction(ISD::MULHS, MVT::v2i32, Expand);
889 setOperationAction(ISD::MULHS, MVT::v1i64, Expand);
890 setOperationAction(ISD::AND, MVT::v8i8, Expand);
891 setOperationAction(ISD::AND, MVT::v4i16, Expand);
892 setOperationAction(ISD::AND, MVT::v2i32, Expand);
893 setOperationAction(ISD::AND, MVT::v1i64, Expand);
894 setOperationAction(ISD::OR, MVT::v8i8, Expand);
895 setOperationAction(ISD::OR, MVT::v4i16, Expand);
896 setOperationAction(ISD::OR, MVT::v2i32, Expand);
897 setOperationAction(ISD::OR, MVT::v1i64, Expand);
898 setOperationAction(ISD::XOR, MVT::v8i8, Expand);
899 setOperationAction(ISD::XOR, MVT::v4i16, Expand);
900 setOperationAction(ISD::XOR, MVT::v2i32, Expand);
901 setOperationAction(ISD::XOR, MVT::v1i64, Expand);
902 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand);
903 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand);
904 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand);
905 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand);
906 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
907 setOperationAction(ISD::SELECT, MVT::v8i8, Expand);
908 setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
909 setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
910 setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
911 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand);
912 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand);
913 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand);
914 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
916 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) {
917 addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
919 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
920 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
921 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
922 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
923 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
924 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
925 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
926 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
927 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
928 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
929 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
930 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
931 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
934 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) {
935 addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
937 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
938 // registers cannot be used even for integer operations.
939 addRegisterClass(MVT::v16i8, &X86::VR128RegClass);
940 addRegisterClass(MVT::v8i16, &X86::VR128RegClass);
941 addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
942 addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
944 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
945 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
946 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
947 setOperationAction(ISD::ADD, MVT::v2i64, Legal);
948 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
949 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
950 setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
951 setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
952 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
953 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
954 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
955 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
956 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
957 setOperationAction(ISD::SUB, MVT::v2i64, Legal);
958 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
959 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
960 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
961 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
962 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
963 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
964 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
965 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
967 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
968 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
969 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
970 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
972 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
973 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
974 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
975 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
976 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
978 // Only provide customized ctpop vector bit twiddling for vector types we
979 // know to perform better than using the popcnt instructions on each vector
980 // element. If popcnt isn't supported, always provide the custom version.
981 if (!Subtarget->hasPOPCNT()) {
982 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
983 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom);
986 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
987 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
988 MVT VT = (MVT::SimpleValueType)i;
989 // Do not attempt to custom lower non-power-of-2 vectors
990 if (!isPowerOf2_32(VT.getVectorNumElements()))
992 // Do not attempt to custom lower non-128-bit vectors
993 if (!VT.is128BitVector())
995 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
996 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
997 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1000 // We support custom legalizing of sext and anyext loads for specific
1001 // memory vector types which we can load as a scalar (or sequence of
1002 // scalars) and extend in-register to a legal 128-bit vector type. For sext
1003 // loads these must work with a single scalar load.
1004 for (MVT VT : MVT::integer_vector_valuetypes()) {
1005 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
1006 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
1007 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
1008 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
1009 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
1010 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
1011 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
1012 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
1013 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
1016 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1017 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1018 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
1019 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
1020 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
1021 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
1023 if (Subtarget->is64Bit()) {
1024 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1025 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1028 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
1029 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
1030 MVT VT = (MVT::SimpleValueType)i;
1032 // Do not attempt to promote non-128-bit vectors
1033 if (!VT.is128BitVector())
1036 setOperationAction(ISD::AND, VT, Promote);
1037 AddPromotedToType (ISD::AND, VT, MVT::v2i64);
1038 setOperationAction(ISD::OR, VT, Promote);
1039 AddPromotedToType (ISD::OR, VT, MVT::v2i64);
1040 setOperationAction(ISD::XOR, VT, Promote);
1041 AddPromotedToType (ISD::XOR, VT, MVT::v2i64);
1042 setOperationAction(ISD::LOAD, VT, Promote);
1043 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
1044 setOperationAction(ISD::SELECT, VT, Promote);
1045 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64);
1048 // Custom lower v2i64 and v2f64 selects.
1049 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
1050 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
1051 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
1052 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
1054 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
1055 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
1057 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1058 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1059 // As there is no 64-bit GPR available, we need build a special custom
1060 // sequence to convert from v2i32 to v2f32.
1061 if (!Subtarget->is64Bit())
1062 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
1064 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1065 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
1067 for (MVT VT : MVT::fp_vector_valuetypes())
1068 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
1070 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1071 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1072 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1075 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
1076 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1077 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1078 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1079 setOperationAction(ISD::FRINT, MVT::f32, Legal);
1080 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1081 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1082 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1083 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1084 setOperationAction(ISD::FRINT, MVT::f64, Legal);
1085 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1087 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1088 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
1089 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1090 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
1091 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
1092 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
1093 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
1094 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
1095 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
1096 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
1098 // FIXME: Do we need to handle scalar-to-vector here?
1099 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1101 setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
1102 setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
1103 setOperationAction(ISD::VSELECT, MVT::v4i32, Custom);
1104 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
1105 setOperationAction(ISD::VSELECT, MVT::v8i16, Custom);
1106 // There is no BLENDI for byte vectors. We don't need to custom lower
1107 // some vselects for now.
1108 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1110 // SSE41 brings specific instructions for doing vector sign extend even in
1111 // cases where we don't have SRA.
1112 for (MVT VT : MVT::integer_vector_valuetypes()) {
1113 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
1114 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
1115 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
1118 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1119 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1120 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1121 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1122 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1123 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1124 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1126 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1127 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1128 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1129 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1130 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1131 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1133 // i8 and i16 vectors are custom because the source register and source
1134 // source memory operand types are not the same width. f32 vectors are
1135 // custom since the immediate controlling the insert encodes additional
1137 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1138 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1139 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1140 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1142 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
1143 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
1144 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
1145 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
1147 // FIXME: these should be Legal, but that's only for the case where
1148 // the index is constant. For now custom expand to deal with that.
1149 if (Subtarget->is64Bit()) {
1150 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1151 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1155 if (Subtarget->hasSSE2()) {
1156 setOperationAction(ISD::SRL, MVT::v8i16, Custom);
1157 setOperationAction(ISD::SRL, MVT::v16i8, Custom);
1159 setOperationAction(ISD::SHL, MVT::v8i16, Custom);
1160 setOperationAction(ISD::SHL, MVT::v16i8, Custom);
1162 setOperationAction(ISD::SRA, MVT::v8i16, Custom);
1163 setOperationAction(ISD::SRA, MVT::v16i8, Custom);
1165 // In the customized shift lowering, the legal cases in AVX2 will be
1167 setOperationAction(ISD::SRL, MVT::v2i64, Custom);
1168 setOperationAction(ISD::SRL, MVT::v4i32, Custom);
1170 setOperationAction(ISD::SHL, MVT::v2i64, Custom);
1171 setOperationAction(ISD::SHL, MVT::v4i32, Custom);
1173 setOperationAction(ISD::SRA, MVT::v4i32, Custom);
1176 if (!TM.Options.UseSoftFloat && Subtarget->hasFp256()) {
1177 addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
1178 addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
1179 addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
1180 addRegisterClass(MVT::v8f32, &X86::VR256RegClass);
1181 addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
1182 addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
1184 setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
1185 setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
1186 setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
1188 setOperationAction(ISD::FADD, MVT::v8f32, Legal);
1189 setOperationAction(ISD::FSUB, MVT::v8f32, Legal);
1190 setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
1191 setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
1192 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
1193 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal);
1194 setOperationAction(ISD::FCEIL, MVT::v8f32, Legal);
1195 setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal);
1196 setOperationAction(ISD::FRINT, MVT::v8f32, Legal);
1197 setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal);
1198 setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
1199 setOperationAction(ISD::FABS, MVT::v8f32, Custom);
1201 setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1202 setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1203 setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1204 setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1205 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1206 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1207 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
1208 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1209 setOperationAction(ISD::FRINT, MVT::v4f64, Legal);
1210 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal);
1211 setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
1212 setOperationAction(ISD::FABS, MVT::v4f64, Custom);
1214 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1215 // even though v8i16 is a legal type.
1216 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Promote);
1217 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Promote);
1218 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1220 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote);
1221 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1222 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
1224 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
1225 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
1227 for (MVT VT : MVT::fp_vector_valuetypes())
1228 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
1230 setOperationAction(ISD::SRL, MVT::v16i16, Custom);
1231 setOperationAction(ISD::SRL, MVT::v32i8, Custom);
1233 setOperationAction(ISD::SHL, MVT::v16i16, Custom);
1234 setOperationAction(ISD::SHL, MVT::v32i8, Custom);
1236 setOperationAction(ISD::SRA, MVT::v16i16, Custom);
1237 setOperationAction(ISD::SRA, MVT::v32i8, Custom);
1239 setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
1240 setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
1241 setOperationAction(ISD::SETCC, MVT::v8i32, Custom);
1242 setOperationAction(ISD::SETCC, MVT::v4i64, Custom);
1244 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1245 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1246 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1248 setOperationAction(ISD::VSELECT, MVT::v4f64, Custom);
1249 setOperationAction(ISD::VSELECT, MVT::v4i64, Custom);
1250 setOperationAction(ISD::VSELECT, MVT::v8i32, Custom);
1251 setOperationAction(ISD::VSELECT, MVT::v8f32, Custom);
1253 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1254 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
1255 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1256 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom);
1257 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
1258 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom);
1259 setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom);
1260 setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom);
1261 setOperationAction(ISD::ANY_EXTEND, MVT::v16i16, Custom);
1262 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1263 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1264 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1266 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) {
1267 setOperationAction(ISD::FMA, MVT::v8f32, Legal);
1268 setOperationAction(ISD::FMA, MVT::v4f64, Legal);
1269 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
1270 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
1271 setOperationAction(ISD::FMA, MVT::f32, Legal);
1272 setOperationAction(ISD::FMA, MVT::f64, Legal);
1275 if (Subtarget->hasInt256()) {
1276 setOperationAction(ISD::ADD, MVT::v4i64, Legal);
1277 setOperationAction(ISD::ADD, MVT::v8i32, Legal);
1278 setOperationAction(ISD::ADD, MVT::v16i16, Legal);
1279 setOperationAction(ISD::ADD, MVT::v32i8, Legal);
1281 setOperationAction(ISD::SUB, MVT::v4i64, Legal);
1282 setOperationAction(ISD::SUB, MVT::v8i32, Legal);
1283 setOperationAction(ISD::SUB, MVT::v16i16, Legal);
1284 setOperationAction(ISD::SUB, MVT::v32i8, Legal);
1286 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1287 setOperationAction(ISD::MUL, MVT::v8i32, Legal);
1288 setOperationAction(ISD::MUL, MVT::v16i16, Legal);
1289 // Don't lower v32i8 because there is no 128-bit byte mul
1291 setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
1292 setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
1293 setOperationAction(ISD::MULHU, MVT::v16i16, Legal);
1294 setOperationAction(ISD::MULHS, MVT::v16i16, Legal);
1296 setOperationAction(ISD::VSELECT, MVT::v16i16, Custom);
1297 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1299 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1300 // when we have a 256bit-wide blend with immediate.
1301 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1303 // Only provide customized ctpop vector bit twiddling for vector types we
1304 // know to perform better than using the popcnt instructions on each
1305 // vector element. If popcnt isn't supported, always provide the custom
1307 if (!Subtarget->hasPOPCNT())
1308 setOperationAction(ISD::CTPOP, MVT::v4i64, Custom);
1310 // Custom CTPOP always performs better on natively supported v8i32
1311 setOperationAction(ISD::CTPOP, MVT::v8i32, Custom);
1313 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1314 setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1315 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1316 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1317 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1318 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1319 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1321 setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1322 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1323 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1324 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1325 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1326 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1328 setOperationAction(ISD::ADD, MVT::v4i64, Custom);
1329 setOperationAction(ISD::ADD, MVT::v8i32, Custom);
1330 setOperationAction(ISD::ADD, MVT::v16i16, Custom);
1331 setOperationAction(ISD::ADD, MVT::v32i8, Custom);
1333 setOperationAction(ISD::SUB, MVT::v4i64, Custom);
1334 setOperationAction(ISD::SUB, MVT::v8i32, Custom);
1335 setOperationAction(ISD::SUB, MVT::v16i16, Custom);
1336 setOperationAction(ISD::SUB, MVT::v32i8, Custom);
1338 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1339 setOperationAction(ISD::MUL, MVT::v8i32, Custom);
1340 setOperationAction(ISD::MUL, MVT::v16i16, Custom);
1341 // Don't lower v32i8 because there is no 128-bit byte mul
1344 // In the customized shift lowering, the legal cases in AVX2 will be
1346 setOperationAction(ISD::SRL, MVT::v4i64, Custom);
1347 setOperationAction(ISD::SRL, MVT::v8i32, Custom);
1349 setOperationAction(ISD::SHL, MVT::v4i64, Custom);
1350 setOperationAction(ISD::SHL, MVT::v8i32, Custom);
1352 setOperationAction(ISD::SRA, MVT::v8i32, Custom);
1354 // Custom lower several nodes for 256-bit types.
1355 for (MVT VT : MVT::vector_valuetypes()) {
1356 if (VT.getScalarSizeInBits() >= 32) {
1357 setOperationAction(ISD::MLOAD, VT, Legal);
1358 setOperationAction(ISD::MSTORE, VT, Legal);
1360 // Extract subvector is special because the value type
1361 // (result) is 128-bit but the source is 256-bit wide.
1362 if (VT.is128BitVector()) {
1363 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1365 // Do not attempt to custom lower other non-256-bit vectors
1366 if (!VT.is256BitVector())
1369 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1370 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1371 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1372 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1373 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1374 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1375 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1378 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
1379 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) {
1380 MVT VT = (MVT::SimpleValueType)i;
1382 // Do not attempt to promote non-256-bit vectors
1383 if (!VT.is256BitVector())
1386 setOperationAction(ISD::AND, VT, Promote);
1387 AddPromotedToType (ISD::AND, VT, MVT::v4i64);
1388 setOperationAction(ISD::OR, VT, Promote);
1389 AddPromotedToType (ISD::OR, VT, MVT::v4i64);
1390 setOperationAction(ISD::XOR, VT, Promote);
1391 AddPromotedToType (ISD::XOR, VT, MVT::v4i64);
1392 setOperationAction(ISD::LOAD, VT, Promote);
1393 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
1394 setOperationAction(ISD::SELECT, VT, Promote);
1395 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
1399 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512()) {
1400 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1401 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1402 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1403 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1405 addRegisterClass(MVT::i1, &X86::VK1RegClass);
1406 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1407 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1409 for (MVT VT : MVT::fp_vector_valuetypes())
1410 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
1412 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1413 setOperationAction(ISD::SETCC, MVT::i1, Custom);
1414 setOperationAction(ISD::XOR, MVT::i1, Legal);
1415 setOperationAction(ISD::OR, MVT::i1, Legal);
1416 setOperationAction(ISD::AND, MVT::i1, Legal);
1417 setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
1418 setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
1419 setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
1420 setOperationAction(ISD::LOAD, MVT::v16i32, Legal);
1421 setOperationAction(ISD::LOAD, MVT::v16i1, Legal);
1423 setOperationAction(ISD::FADD, MVT::v16f32, Legal);
1424 setOperationAction(ISD::FSUB, MVT::v16f32, Legal);
1425 setOperationAction(ISD::FMUL, MVT::v16f32, Legal);
1426 setOperationAction(ISD::FDIV, MVT::v16f32, Legal);
1427 setOperationAction(ISD::FSQRT, MVT::v16f32, Legal);
1428 setOperationAction(ISD::FNEG, MVT::v16f32, Custom);
1430 setOperationAction(ISD::FADD, MVT::v8f64, Legal);
1431 setOperationAction(ISD::FSUB, MVT::v8f64, Legal);
1432 setOperationAction(ISD::FMUL, MVT::v8f64, Legal);
1433 setOperationAction(ISD::FDIV, MVT::v8f64, Legal);
1434 setOperationAction(ISD::FSQRT, MVT::v8f64, Legal);
1435 setOperationAction(ISD::FNEG, MVT::v8f64, Custom);
1436 setOperationAction(ISD::FMA, MVT::v8f64, Legal);
1437 setOperationAction(ISD::FMA, MVT::v16f32, Legal);
1439 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1440 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1441 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1442 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1443 if (Subtarget->is64Bit()) {
1444 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1445 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1446 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1447 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1449 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1450 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1451 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1452 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1453 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1454 setOperationAction(ISD::SINT_TO_FP, MVT::v8i1, Custom);
1455 setOperationAction(ISD::SINT_TO_FP, MVT::v16i1, Custom);
1456 setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Promote);
1457 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Promote);
1458 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1459 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1460 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1461 setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal);
1462 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1464 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
1465 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1466 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1467 setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
1468 setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
1469 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1470 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1471 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1472 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1473 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1474 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
1475 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
1476 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1478 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1479 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1480 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1481 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1482 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
1483 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Legal);
1485 setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
1486 setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
1488 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1490 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
1491 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
1492 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
1493 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
1494 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
1495 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
1496 setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
1497 setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
1498 setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
1500 setOperationAction(ISD::ADD, MVT::v8i64, Legal);
1501 setOperationAction(ISD::ADD, MVT::v16i32, Legal);
1503 setOperationAction(ISD::SUB, MVT::v8i64, Legal);
1504 setOperationAction(ISD::SUB, MVT::v16i32, Legal);
1506 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1508 setOperationAction(ISD::SRL, MVT::v8i64, Custom);
1509 setOperationAction(ISD::SRL, MVT::v16i32, Custom);
1511 setOperationAction(ISD::SHL, MVT::v8i64, Custom);
1512 setOperationAction(ISD::SHL, MVT::v16i32, Custom);
1514 setOperationAction(ISD::SRA, MVT::v8i64, Custom);
1515 setOperationAction(ISD::SRA, MVT::v16i32, Custom);
1517 setOperationAction(ISD::AND, MVT::v8i64, Legal);
1518 setOperationAction(ISD::OR, MVT::v8i64, Legal);
1519 setOperationAction(ISD::XOR, MVT::v8i64, Legal);
1520 setOperationAction(ISD::AND, MVT::v16i32, Legal);
1521 setOperationAction(ISD::OR, MVT::v16i32, Legal);
1522 setOperationAction(ISD::XOR, MVT::v16i32, Legal);
1524 if (Subtarget->hasCDI()) {
1525 setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
1526 setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
1529 // Custom lower several nodes.
1530 for (MVT VT : MVT::vector_valuetypes()) {
1531 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1532 // Extract subvector is special because the value type
1533 // (result) is 256/128-bit but the source is 512-bit wide.
1534 if (VT.is128BitVector() || VT.is256BitVector()) {
1535 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1537 if (VT.getVectorElementType() == MVT::i1)
1538 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1540 // Do not attempt to custom lower other non-512-bit vectors
1541 if (!VT.is512BitVector())
1544 if ( EltSize >= 32) {
1545 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1546 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1547 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1548 setOperationAction(ISD::VSELECT, VT, Legal);
1549 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1550 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1551 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1552 setOperationAction(ISD::MLOAD, VT, Legal);
1553 setOperationAction(ISD::MSTORE, VT, Legal);
1556 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1557 MVT VT = (MVT::SimpleValueType)i;
1559 // Do not attempt to promote non-512-bit vectors.
1560 if (!VT.is512BitVector())
1563 setOperationAction(ISD::SELECT, VT, Promote);
1564 AddPromotedToType (ISD::SELECT, VT, MVT::v8i64);
1568 if (!TM.Options.UseSoftFloat && Subtarget->hasBWI()) {
1569 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1570 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1572 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1573 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1575 setOperationAction(ISD::LOAD, MVT::v32i16, Legal);
1576 setOperationAction(ISD::LOAD, MVT::v64i8, Legal);
1577 setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
1578 setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
1579 setOperationAction(ISD::ADD, MVT::v32i16, Legal);
1580 setOperationAction(ISD::ADD, MVT::v64i8, Legal);
1581 setOperationAction(ISD::SUB, MVT::v32i16, Legal);
1582 setOperationAction(ISD::SUB, MVT::v64i8, Legal);
1583 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1585 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1586 const MVT VT = (MVT::SimpleValueType)i;
1588 const unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1590 // Do not attempt to promote non-512-bit vectors.
1591 if (!VT.is512BitVector())
1595 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1596 setOperationAction(ISD::VSELECT, VT, Legal);
1601 if (!TM.Options.UseSoftFloat && Subtarget->hasVLX()) {
1602 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1603 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1605 setOperationAction(ISD::SETCC, MVT::v4i1, Custom);
1606 setOperationAction(ISD::SETCC, MVT::v2i1, Custom);
1607 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Legal);
1609 setOperationAction(ISD::AND, MVT::v8i32, Legal);
1610 setOperationAction(ISD::OR, MVT::v8i32, Legal);
1611 setOperationAction(ISD::XOR, MVT::v8i32, Legal);
1612 setOperationAction(ISD::AND, MVT::v4i32, Legal);
1613 setOperationAction(ISD::OR, MVT::v4i32, Legal);
1614 setOperationAction(ISD::XOR, MVT::v4i32, Legal);
1617 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion
1618 // of this type with custom code.
1619 for (MVT VT : MVT::vector_valuetypes())
1620 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
1622 // We want to custom lower some of our intrinsics.
1623 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1624 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1625 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1626 if (!Subtarget->is64Bit())
1627 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1629 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1630 // handle type legalization for these operations here.
1632 // FIXME: We really should do custom legalization for addition and
1633 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1634 // than generic legalization for 64-bit multiplication-with-overflow, though.
1635 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) {
1636 // Add/Sub/Mul with overflow operations are custom lowered.
1638 setOperationAction(ISD::SADDO, VT, Custom);
1639 setOperationAction(ISD::UADDO, VT, Custom);
1640 setOperationAction(ISD::SSUBO, VT, Custom);
1641 setOperationAction(ISD::USUBO, VT, Custom);
1642 setOperationAction(ISD::SMULO, VT, Custom);
1643 setOperationAction(ISD::UMULO, VT, Custom);
1647 if (!Subtarget->is64Bit()) {
1648 // These libcalls are not available in 32-bit.
1649 setLibcallName(RTLIB::SHL_I128, nullptr);
1650 setLibcallName(RTLIB::SRL_I128, nullptr);
1651 setLibcallName(RTLIB::SRA_I128, nullptr);
1654 // Combine sin / cos into one node or libcall if possible.
1655 if (Subtarget->hasSinCos()) {
1656 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1657 setLibcallName(RTLIB::SINCOS_F64, "sincos");
1658 if (Subtarget->isTargetDarwin()) {
1659 // For MacOSX, we don't want the normal expansion of a libcall to sincos.
1660 // We want to issue a libcall to __sincos_stret to avoid memory traffic.
1661 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1662 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1666 if (Subtarget->isTargetWin64()) {
1667 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1668 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1669 setOperationAction(ISD::SREM, MVT::i128, Custom);
1670 setOperationAction(ISD::UREM, MVT::i128, Custom);
1671 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1672 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1675 // We have target-specific dag combine patterns for the following nodes:
1676 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1677 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1678 setTargetDAGCombine(ISD::BITCAST);
1679 setTargetDAGCombine(ISD::VSELECT);
1680 setTargetDAGCombine(ISD::SELECT);
1681 setTargetDAGCombine(ISD::SHL);
1682 setTargetDAGCombine(ISD::SRA);
1683 setTargetDAGCombine(ISD::SRL);
1684 setTargetDAGCombine(ISD::OR);
1685 setTargetDAGCombine(ISD::AND);
1686 setTargetDAGCombine(ISD::ADD);
1687 setTargetDAGCombine(ISD::FADD);
1688 setTargetDAGCombine(ISD::FSUB);
1689 setTargetDAGCombine(ISD::FMA);
1690 setTargetDAGCombine(ISD::SUB);
1691 setTargetDAGCombine(ISD::LOAD);
1692 setTargetDAGCombine(ISD::MLOAD);
1693 setTargetDAGCombine(ISD::STORE);
1694 setTargetDAGCombine(ISD::MSTORE);
1695 setTargetDAGCombine(ISD::ZERO_EXTEND);
1696 setTargetDAGCombine(ISD::ANY_EXTEND);
1697 setTargetDAGCombine(ISD::SIGN_EXTEND);
1698 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1699 setTargetDAGCombine(ISD::TRUNCATE);
1700 setTargetDAGCombine(ISD::SINT_TO_FP);
1701 setTargetDAGCombine(ISD::SETCC);
1702 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1703 setTargetDAGCombine(ISD::BUILD_VECTOR);
1704 setTargetDAGCombine(ISD::MUL);
1705 setTargetDAGCombine(ISD::XOR);
1707 computeRegisterProperties();
1709 // On Darwin, -Os means optimize for size without hurting performance,
1710 // do not reduce the limit.
1711 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1712 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8;
1713 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1714 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1715 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1716 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1717 setPrefLoopAlignment(4); // 2^4 bytes.
1719 // Predictable cmov don't hurt on atom because it's in-order.
1720 PredictableSelectIsExpensive = !Subtarget->isAtom();
1721 EnableExtLdPromotion = true;
1722 setPrefFunctionAlignment(4); // 2^4 bytes.
1724 verifyIntrinsicTables();
1727 // This has so far only been implemented for 64-bit MachO.
1728 bool X86TargetLowering::useLoadStackGuardNode() const {
1729 return Subtarget->isTargetMachO() && Subtarget->is64Bit();
1732 TargetLoweringBase::LegalizeTypeAction
1733 X86TargetLowering::getPreferredVectorAction(EVT VT) const {
1734 if (ExperimentalVectorWideningLegalization &&
1735 VT.getVectorNumElements() != 1 &&
1736 VT.getVectorElementType().getSimpleVT() != MVT::i1)
1737 return TypeWidenVector;
1739 return TargetLoweringBase::getPreferredVectorAction(VT);
1742 EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
1744 return Subtarget->hasAVX512() ? MVT::i1: MVT::i8;
1746 const unsigned NumElts = VT.getVectorNumElements();
1747 const EVT EltVT = VT.getVectorElementType();
1748 if (VT.is512BitVector()) {
1749 if (Subtarget->hasAVX512())
1750 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1751 EltVT == MVT::f32 || EltVT == MVT::f64)
1753 case 8: return MVT::v8i1;
1754 case 16: return MVT::v16i1;
1756 if (Subtarget->hasBWI())
1757 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1759 case 32: return MVT::v32i1;
1760 case 64: return MVT::v64i1;
1764 if (VT.is256BitVector() || VT.is128BitVector()) {
1765 if (Subtarget->hasVLX())
1766 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1767 EltVT == MVT::f32 || EltVT == MVT::f64)
1769 case 2: return MVT::v2i1;
1770 case 4: return MVT::v4i1;
1771 case 8: return MVT::v8i1;
1773 if (Subtarget->hasBWI() && Subtarget->hasVLX())
1774 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1776 case 8: return MVT::v8i1;
1777 case 16: return MVT::v16i1;
1778 case 32: return MVT::v32i1;
1782 return VT.changeVectorElementTypeToInteger();
1785 /// Helper for getByValTypeAlignment to determine
1786 /// the desired ByVal argument alignment.
1787 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
1790 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1791 if (VTy->getBitWidth() == 128)
1793 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1794 unsigned EltAlign = 0;
1795 getMaxByValAlign(ATy->getElementType(), EltAlign);
1796 if (EltAlign > MaxAlign)
1797 MaxAlign = EltAlign;
1798 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1799 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1800 unsigned EltAlign = 0;
1801 getMaxByValAlign(STy->getElementType(i), EltAlign);
1802 if (EltAlign > MaxAlign)
1803 MaxAlign = EltAlign;
1810 /// Return the desired alignment for ByVal aggregate
1811 /// function arguments in the caller parameter area. For X86, aggregates
1812 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
1813 /// are at 4-byte boundaries.
1814 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
1815 if (Subtarget->is64Bit()) {
1816 // Max of 8 and alignment of type.
1817 unsigned TyAlign = TD->getABITypeAlignment(Ty);
1824 if (Subtarget->hasSSE1())
1825 getMaxByValAlign(Ty, Align);
1829 /// Returns the target specific optimal type for load
1830 /// and store operations as a result of memset, memcpy, and memmove
1831 /// lowering. If DstAlign is zero that means it's safe to destination
1832 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
1833 /// means there isn't a need to check it against alignment requirement,
1834 /// probably because the source does not need to be loaded. If 'IsMemset' is
1835 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
1836 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
1837 /// source is constant so it does not need to be loaded.
1838 /// It returns EVT::Other if the type should be determined using generic
1839 /// target-independent logic.
1841 X86TargetLowering::getOptimalMemOpType(uint64_t Size,
1842 unsigned DstAlign, unsigned SrcAlign,
1843 bool IsMemset, bool ZeroMemset,
1845 MachineFunction &MF) const {
1846 const Function *F = MF.getFunction();
1847 if ((!IsMemset || ZeroMemset) &&
1848 !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
1850 (Subtarget->isUnalignedMemAccessFast() ||
1851 ((DstAlign == 0 || DstAlign >= 16) &&
1852 (SrcAlign == 0 || SrcAlign >= 16)))) {
1854 if (Subtarget->hasInt256())
1856 if (Subtarget->hasFp256())
1859 if (Subtarget->hasSSE2())
1861 if (Subtarget->hasSSE1())
1863 } else if (!MemcpyStrSrc && Size >= 8 &&
1864 !Subtarget->is64Bit() &&
1865 Subtarget->hasSSE2()) {
1866 // Do not use f64 to lower memcpy if source is string constant. It's
1867 // better to use i32 to avoid the loads.
1871 if (Subtarget->is64Bit() && Size >= 8)
1876 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
1878 return X86ScalarSSEf32;
1879 else if (VT == MVT::f64)
1880 return X86ScalarSSEf64;
1885 X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
1890 *Fast = Subtarget->isUnalignedMemAccessFast();
1894 /// Return the entry encoding for a jump table in the
1895 /// current function. The returned value is a member of the
1896 /// MachineJumpTableInfo::JTEntryKind enum.
1897 unsigned X86TargetLowering::getJumpTableEncoding() const {
1898 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
1900 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1901 Subtarget->isPICStyleGOT())
1902 return MachineJumpTableInfo::EK_Custom32;
1904 // Otherwise, use the normal jump table encoding heuristics.
1905 return TargetLowering::getJumpTableEncoding();
1909 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
1910 const MachineBasicBlock *MBB,
1911 unsigned uid,MCContext &Ctx) const{
1912 assert(MBB->getParent()->getTarget().getRelocationModel() == Reloc::PIC_ &&
1913 Subtarget->isPICStyleGOT());
1914 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
1916 return MCSymbolRefExpr::Create(MBB->getSymbol(),
1917 MCSymbolRefExpr::VK_GOTOFF, Ctx);
1920 /// Returns relocation base for the given PIC jumptable.
1921 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
1922 SelectionDAG &DAG) const {
1923 if (!Subtarget->is64Bit())
1924 // This doesn't have SDLoc associated with it, but is not really the
1925 // same as a Register.
1926 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy());
1930 /// This returns the relocation base for the given PIC jumptable,
1931 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
1932 const MCExpr *X86TargetLowering::
1933 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
1934 MCContext &Ctx) const {
1935 // X86-64 uses RIP relative addressing based on the jump table label.
1936 if (Subtarget->isPICStyleRIPRel())
1937 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
1939 // Otherwise, the reference is relative to the PIC base.
1940 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
1943 // FIXME: Why this routine is here? Move to RegInfo!
1944 std::pair<const TargetRegisterClass*, uint8_t>
1945 X86TargetLowering::findRepresentativeClass(MVT VT) const{
1946 const TargetRegisterClass *RRC = nullptr;
1948 switch (VT.SimpleTy) {
1950 return TargetLowering::findRepresentativeClass(VT);
1951 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
1952 RRC = Subtarget->is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
1955 RRC = &X86::VR64RegClass;
1957 case MVT::f32: case MVT::f64:
1958 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1959 case MVT::v4f32: case MVT::v2f64:
1960 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32:
1962 RRC = &X86::VR128RegClass;
1965 return std::make_pair(RRC, Cost);
1968 bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
1969 unsigned &Offset) const {
1970 if (!Subtarget->isTargetLinux())
1973 if (Subtarget->is64Bit()) {
1974 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
1976 if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
1988 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1989 unsigned DestAS) const {
1990 assert(SrcAS != DestAS && "Expected different address spaces!");
1992 return SrcAS < 256 && DestAS < 256;
1995 //===----------------------------------------------------------------------===//
1996 // Return Value Calling Convention Implementation
1997 //===----------------------------------------------------------------------===//
1999 #include "X86GenCallingConv.inc"
2002 X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2003 MachineFunction &MF, bool isVarArg,
2004 const SmallVectorImpl<ISD::OutputArg> &Outs,
2005 LLVMContext &Context) const {
2006 SmallVector<CCValAssign, 16> RVLocs;
2007 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2008 return CCInfo.CheckReturn(Outs, RetCC_X86);
2011 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2012 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2017 X86TargetLowering::LowerReturn(SDValue Chain,
2018 CallingConv::ID CallConv, bool isVarArg,
2019 const SmallVectorImpl<ISD::OutputArg> &Outs,
2020 const SmallVectorImpl<SDValue> &OutVals,
2021 SDLoc dl, SelectionDAG &DAG) const {
2022 MachineFunction &MF = DAG.getMachineFunction();
2023 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2025 SmallVector<CCValAssign, 16> RVLocs;
2026 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2027 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2030 SmallVector<SDValue, 6> RetOps;
2031 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2032 // Operand #1 = Bytes To Pop
2033 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(),
2036 // Copy the result values into the output registers.
2037 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2038 CCValAssign &VA = RVLocs[i];
2039 assert(VA.isRegLoc() && "Can only return in registers!");
2040 SDValue ValToCopy = OutVals[i];
2041 EVT ValVT = ValToCopy.getValueType();
2043 // Promote values to the appropriate types.
2044 if (VA.getLocInfo() == CCValAssign::SExt)
2045 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2046 else if (VA.getLocInfo() == CCValAssign::ZExt)
2047 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2048 else if (VA.getLocInfo() == CCValAssign::AExt)
2049 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2050 else if (VA.getLocInfo() == CCValAssign::BCvt)
2051 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy);
2053 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2054 "Unexpected FP-extend for return value.");
2056 // If this is x86-64, and we disabled SSE, we can't return FP values,
2057 // or SSE or MMX vectors.
2058 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2059 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2060 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
2061 report_fatal_error("SSE register return with SSE disabled");
2063 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2064 // llvm-gcc has never done it right and no one has noticed, so this
2065 // should be OK for now.
2066 if (ValVT == MVT::f64 &&
2067 (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
2068 report_fatal_error("SSE2 register return with SSE2 disabled");
2070 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2071 // the RET instruction and handled by the FP Stackifier.
2072 if (VA.getLocReg() == X86::FP0 ||
2073 VA.getLocReg() == X86::FP1) {
2074 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2075 // change the value to the FP stack register class.
2076 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2077 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2078 RetOps.push_back(ValToCopy);
2079 // Don't emit a copytoreg.
2083 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2084 // which is returned in RAX / RDX.
2085 if (Subtarget->is64Bit()) {
2086 if (ValVT == MVT::x86mmx) {
2087 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2088 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy);
2089 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2091 // If we don't have SSE2 available, convert to v4f32 so the generated
2092 // register is legal.
2093 if (!Subtarget->hasSSE2())
2094 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy);
2099 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
2100 Flag = Chain.getValue(1);
2101 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2104 // The x86-64 ABIs require that for returning structs by value we copy
2105 // the sret argument into %rax/%eax (depending on ABI) for the return.
2106 // Win32 requires us to put the sret argument to %eax as well.
2107 // We saved the argument into a virtual register in the entry block,
2108 // so now we copy the value out and into %rax/%eax.
2110 // Checking Function.hasStructRetAttr() here is insufficient because the IR
2111 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2112 // false, then an sret argument may be implicitly inserted in the SelDAG. In
2113 // either case FuncInfo->setSRetReturnReg() will have been called.
2114 if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
2115 assert((Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) &&
2116 "No need for an sret register");
2117 SDValue Val = DAG.getCopyFromReg(Chain, dl, SRetReg, getPointerTy());
2120 = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ?
2121 X86::RAX : X86::EAX;
2122 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2123 Flag = Chain.getValue(1);
2125 // RAX/EAX now acts like a return value.
2126 RetOps.push_back(DAG.getRegister(RetValReg, getPointerTy()));
2129 RetOps[0] = Chain; // Update chain.
2131 // Add the flag if we have it.
2133 RetOps.push_back(Flag);
2135 return DAG.getNode(X86ISD::RET_FLAG, dl, MVT::Other, RetOps);
2138 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2139 if (N->getNumValues() != 1)
2141 if (!N->hasNUsesOfValue(1, 0))
2144 SDValue TCChain = Chain;
2145 SDNode *Copy = *N->use_begin();
2146 if (Copy->getOpcode() == ISD::CopyToReg) {
2147 // If the copy has a glue operand, we conservatively assume it isn't safe to
2148 // perform a tail call.
2149 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2151 TCChain = Copy->getOperand(0);
2152 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2155 bool HasRet = false;
2156 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2158 if (UI->getOpcode() != X86ISD::RET_FLAG)
2160 // If we are returning more than one value, we can definitely
2161 // not make a tail call see PR19530
2162 if (UI->getNumOperands() > 4)
2164 if (UI->getNumOperands() == 4 &&
2165 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2178 X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
2179 ISD::NodeType ExtendKind) const {
2181 // TODO: Is this also valid on 32-bit?
2182 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND)
2183 ReturnMVT = MVT::i8;
2185 ReturnMVT = MVT::i32;
2187 EVT MinVT = getRegisterType(Context, ReturnMVT);
2188 return VT.bitsLT(MinVT) ? MinVT : VT;
2191 /// Lower the result values of a call into the
2192 /// appropriate copies out of appropriate physical registers.
2195 X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
2196 CallingConv::ID CallConv, bool isVarArg,
2197 const SmallVectorImpl<ISD::InputArg> &Ins,
2198 SDLoc dl, SelectionDAG &DAG,
2199 SmallVectorImpl<SDValue> &InVals) const {
2201 // Assign locations to each value returned by this call.
2202 SmallVector<CCValAssign, 16> RVLocs;
2203 bool Is64Bit = Subtarget->is64Bit();
2204 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2206 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2208 // Copy all of the result registers out of their specified physreg.
2209 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2210 CCValAssign &VA = RVLocs[i];
2211 EVT CopyVT = VA.getValVT();
2213 // If this is x86-64, and we disabled SSE, we can't return FP values
2214 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
2215 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
2216 report_fatal_error("SSE register return with SSE disabled");
2219 // If we prefer to use the value in xmm registers, copy it out as f80 and
2220 // use a truncate to move it from fp stack reg to xmm reg.
2221 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2222 isScalarFPTypeInSSEReg(VA.getValVT()))
2225 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
2226 CopyVT, InFlag).getValue(1);
2227 SDValue Val = Chain.getValue(0);
2229 if (CopyVT != VA.getValVT())
2230 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2231 // This truncation won't change the value.
2232 DAG.getIntPtrConstant(1));
2234 InFlag = Chain.getValue(2);
2235 InVals.push_back(Val);
2241 //===----------------------------------------------------------------------===//
2242 // C & StdCall & Fast Calling Convention implementation
2243 //===----------------------------------------------------------------------===//
2244 // StdCall calling convention seems to be standard for many Windows' API
2245 // routines and around. It differs from C calling convention just a little:
2246 // callee should clean up the stack, not caller. Symbols should be also
2247 // decorated in some fancy way :) It doesn't support any vector arguments.
2248 // For info on fast calling convention see Fast Calling Convention (tail call)
2249 // implementation LowerX86_32FastCCCallTo.
2251 /// CallIsStructReturn - Determines whether a call uses struct return
2253 enum StructReturnType {
2258 static StructReturnType
2259 callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
2261 return NotStructReturn;
2263 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2264 if (!Flags.isSRet())
2265 return NotStructReturn;
2266 if (Flags.isInReg())
2267 return RegStructReturn;
2268 return StackStructReturn;
2271 /// Determines whether a function uses struct return semantics.
2272 static StructReturnType
2273 argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
2275 return NotStructReturn;
2277 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2278 if (!Flags.isSRet())
2279 return NotStructReturn;
2280 if (Flags.isInReg())
2281 return RegStructReturn;
2282 return StackStructReturn;
2285 /// Make a copy of an aggregate at address specified by "Src" to address
2286 /// "Dst" with size and alignment information specified by the specific
2287 /// parameter attribute. The copy will be passed as a byval function parameter.
2289 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
2290 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
2292 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
2294 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2295 /*isVolatile*/false, /*AlwaysInline=*/true,
2296 MachinePointerInfo(), MachinePointerInfo());
2299 /// Return true if the calling convention is one that
2300 /// supports tail call optimization.
2301 static bool IsTailCallConvention(CallingConv::ID CC) {
2302 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2303 CC == CallingConv::HiPE);
2306 /// \brief Return true if the calling convention is a C calling convention.
2307 static bool IsCCallConvention(CallingConv::ID CC) {
2308 return (CC == CallingConv::C || CC == CallingConv::X86_64_Win64 ||
2309 CC == CallingConv::X86_64_SysV);
2312 bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
2313 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
2317 CallingConv::ID CalleeCC = CS.getCallingConv();
2318 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
2324 /// Return true if the function is being made into
2325 /// a tailcall target by changing its ABI.
2326 static bool FuncIsMadeTailCallSafe(CallingConv::ID CC,
2327 bool GuaranteedTailCallOpt) {
2328 return GuaranteedTailCallOpt && IsTailCallConvention(CC);
2332 X86TargetLowering::LowerMemArgument(SDValue Chain,
2333 CallingConv::ID CallConv,
2334 const SmallVectorImpl<ISD::InputArg> &Ins,
2335 SDLoc dl, SelectionDAG &DAG,
2336 const CCValAssign &VA,
2337 MachineFrameInfo *MFI,
2339 // Create the nodes corresponding to a load from this parameter slot.
2340 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2341 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(
2342 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
2343 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
2346 // If value is passed by pointer we have address passed instead of the value
2348 if (VA.getLocInfo() == CCValAssign::Indirect)
2349 ValVT = VA.getLocVT();
2351 ValVT = VA.getValVT();
2353 // FIXME: For now, all byval parameter objects are marked mutable. This can be
2354 // changed with more analysis.
2355 // In case of tail call optimization mark all arguments mutable. Since they
2356 // could be overwritten by lowering of arguments in case of a tail call.
2357 if (Flags.isByVal()) {
2358 unsigned Bytes = Flags.getByValSize();
2359 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
2360 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable);
2361 return DAG.getFrameIndex(FI, getPointerTy());
2363 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
2364 VA.getLocMemOffset(), isImmutable);
2365 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2366 return DAG.getLoad(ValVT, dl, Chain, FIN,
2367 MachinePointerInfo::getFixedStack(FI),
2368 false, false, false, 0);
2372 // FIXME: Get this from tablegen.
2373 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
2374 const X86Subtarget *Subtarget) {
2375 assert(Subtarget->is64Bit());
2377 if (Subtarget->isCallingConvWin64(CallConv)) {
2378 static const MCPhysReg GPR64ArgRegsWin64[] = {
2379 X86::RCX, X86::RDX, X86::R8, X86::R9
2381 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
2384 static const MCPhysReg GPR64ArgRegs64Bit[] = {
2385 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
2387 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
2390 // FIXME: Get this from tablegen.
2391 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
2392 CallingConv::ID CallConv,
2393 const X86Subtarget *Subtarget) {
2394 assert(Subtarget->is64Bit());
2395 if (Subtarget->isCallingConvWin64(CallConv)) {
2396 // The XMM registers which might contain var arg parameters are shadowed
2397 // in their paired GPR. So we only need to save the GPR to their home
2399 // TODO: __vectorcall will change this.
2403 const Function *Fn = MF.getFunction();
2404 bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat);
2405 assert(!(MF.getTarget().Options.UseSoftFloat && NoImplicitFloatOps) &&
2406 "SSE register cannot be used when SSE is disabled!");
2407 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps ||
2408 !Subtarget->hasSSE1())
2409 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
2413 static const MCPhysReg XMMArgRegs64Bit[] = {
2414 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2415 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2417 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
2421 X86TargetLowering::LowerFormalArguments(SDValue Chain,
2422 CallingConv::ID CallConv,
2424 const SmallVectorImpl<ISD::InputArg> &Ins,
2427 SmallVectorImpl<SDValue> &InVals)
2429 MachineFunction &MF = DAG.getMachineFunction();
2430 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2432 const Function* Fn = MF.getFunction();
2433 if (Fn->hasExternalLinkage() &&
2434 Subtarget->isTargetCygMing() &&
2435 Fn->getName() == "main")
2436 FuncInfo->setForceFramePointer(true);
2438 MachineFrameInfo *MFI = MF.getFrameInfo();
2439 bool Is64Bit = Subtarget->is64Bit();
2440 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2442 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2443 "Var args not supported with calling convention fastcc, ghc or hipe");
2445 // Assign locations to all of the incoming arguments.
2446 SmallVector<CCValAssign, 16> ArgLocs;
2447 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2449 // Allocate shadow area for Win64
2451 CCInfo.AllocateStack(32, 8);
2453 CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
2455 unsigned LastVal = ~0U;
2457 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2458 CCValAssign &VA = ArgLocs[i];
2459 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
2461 assert(VA.getValNo() != LastVal &&
2462 "Don't support value assigned to multiple locs yet");
2464 LastVal = VA.getValNo();
2466 if (VA.isRegLoc()) {
2467 EVT RegVT = VA.getLocVT();
2468 const TargetRegisterClass *RC;
2469 if (RegVT == MVT::i32)
2470 RC = &X86::GR32RegClass;
2471 else if (Is64Bit && RegVT == MVT::i64)
2472 RC = &X86::GR64RegClass;
2473 else if (RegVT == MVT::f32)
2474 RC = &X86::FR32RegClass;
2475 else if (RegVT == MVT::f64)
2476 RC = &X86::FR64RegClass;
2477 else if (RegVT.is512BitVector())
2478 RC = &X86::VR512RegClass;
2479 else if (RegVT.is256BitVector())
2480 RC = &X86::VR256RegClass;
2481 else if (RegVT.is128BitVector())
2482 RC = &X86::VR128RegClass;
2483 else if (RegVT == MVT::x86mmx)
2484 RC = &X86::VR64RegClass;
2485 else if (RegVT == MVT::i1)
2486 RC = &X86::VK1RegClass;
2487 else if (RegVT == MVT::v8i1)
2488 RC = &X86::VK8RegClass;
2489 else if (RegVT == MVT::v16i1)
2490 RC = &X86::VK16RegClass;
2491 else if (RegVT == MVT::v32i1)
2492 RC = &X86::VK32RegClass;
2493 else if (RegVT == MVT::v64i1)
2494 RC = &X86::VK64RegClass;
2496 llvm_unreachable("Unknown argument type!");
2498 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2499 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2501 // If this is an 8 or 16-bit value, it is really passed promoted to 32
2502 // bits. Insert an assert[sz]ext to capture this, then truncate to the
2504 if (VA.getLocInfo() == CCValAssign::SExt)
2505 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2506 DAG.getValueType(VA.getValVT()));
2507 else if (VA.getLocInfo() == CCValAssign::ZExt)
2508 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2509 DAG.getValueType(VA.getValVT()));
2510 else if (VA.getLocInfo() == CCValAssign::BCvt)
2511 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
2513 if (VA.isExtInLoc()) {
2514 // Handle MMX values passed in XMM regs.
2515 if (RegVT.isVector())
2516 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
2518 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2521 assert(VA.isMemLoc());
2522 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i);
2525 // If value is passed via pointer - do a load.
2526 if (VA.getLocInfo() == CCValAssign::Indirect)
2527 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
2528 MachinePointerInfo(), false, false, false, 0);
2530 InVals.push_back(ArgValue);
2533 if (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) {
2534 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2535 // The x86-64 ABIs require that for returning structs by value we copy
2536 // the sret argument into %rax/%eax (depending on ABI) for the return.
2537 // Win32 requires us to put the sret argument to %eax as well.
2538 // Save the argument into a virtual register so that we can access it
2539 // from the return points.
2540 if (Ins[i].Flags.isSRet()) {
2541 unsigned Reg = FuncInfo->getSRetReturnReg();
2543 MVT PtrTy = getPointerTy();
2544 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
2545 FuncInfo->setSRetReturnReg(Reg);
2547 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[i]);
2548 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
2554 unsigned StackSize = CCInfo.getNextStackOffset();
2555 // Align stack specially for tail calls.
2556 if (FuncIsMadeTailCallSafe(CallConv,
2557 MF.getTarget().Options.GuaranteedTailCallOpt))
2558 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
2560 // If the function takes variable number of arguments, make a frame index for
2561 // the start of the first vararg value... for expansion of llvm.va_start. We
2562 // can skip this if there are no va_start calls.
2563 if (MFI->hasVAStart() &&
2564 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
2565 CallConv != CallingConv::X86_ThisCall))) {
2566 FuncInfo->setVarArgsFrameIndex(
2567 MFI->CreateFixedObject(1, StackSize, true));
2570 // Figure out if XMM registers are in use.
2571 assert(!(MF.getTarget().Options.UseSoftFloat &&
2572 Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
2573 "SSE register cannot be used when SSE is disabled!");
2575 // 64-bit calling conventions support varargs and register parameters, so we
2576 // have to do extra work to spill them in the prologue.
2577 if (Is64Bit && isVarArg && MFI->hasVAStart()) {
2578 // Find the first unallocated argument registers.
2579 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
2580 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
2581 unsigned NumIntRegs =
2582 CCInfo.getFirstUnallocated(ArgGPRs.data(), ArgGPRs.size());
2583 unsigned NumXMMRegs =
2584 CCInfo.getFirstUnallocated(ArgXMMs.data(), ArgXMMs.size());
2585 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
2586 "SSE register cannot be used when SSE is disabled!");
2588 // Gather all the live in physical registers.
2589 SmallVector<SDValue, 6> LiveGPRs;
2590 SmallVector<SDValue, 8> LiveXMMRegs;
2592 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
2593 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
2595 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
2597 if (!ArgXMMs.empty()) {
2598 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2599 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
2600 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
2601 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
2602 LiveXMMRegs.push_back(
2603 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
2608 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
2609 // Get to the caller-allocated home save location. Add 8 to account
2610 // for the return address.
2611 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
2612 FuncInfo->setRegSaveFrameIndex(
2613 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
2614 // Fixup to set vararg frame on shadow area (4 x i64).
2616 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
2618 // For X86-64, if there are vararg parameters that are passed via
2619 // registers, then we must store them to their spots on the stack so
2620 // they may be loaded by deferencing the result of va_next.
2621 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
2622 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
2623 FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
2624 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
2627 // Store the integer parameter registers.
2628 SmallVector<SDValue, 8> MemOps;
2629 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
2631 unsigned Offset = FuncInfo->getVarArgsGPOffset();
2632 for (SDValue Val : LiveGPRs) {
2633 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
2634 DAG.getIntPtrConstant(Offset));
2636 DAG.getStore(Val.getValue(1), dl, Val, FIN,
2637 MachinePointerInfo::getFixedStack(
2638 FuncInfo->getRegSaveFrameIndex(), Offset),
2640 MemOps.push_back(Store);
2644 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
2645 // Now store the XMM (fp + vector) parameter registers.
2646 SmallVector<SDValue, 12> SaveXMMOps;
2647 SaveXMMOps.push_back(Chain);
2648 SaveXMMOps.push_back(ALVal);
2649 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2650 FuncInfo->getRegSaveFrameIndex()));
2651 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2652 FuncInfo->getVarArgsFPOffset()));
2653 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
2655 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
2656 MVT::Other, SaveXMMOps));
2659 if (!MemOps.empty())
2660 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
2663 if (isVarArg && MFI->hasMustTailInVarArgFunc()) {
2664 // Find the largest legal vector type.
2665 MVT VecVT = MVT::Other;
2666 // FIXME: Only some x86_32 calling conventions support AVX512.
2667 if (Subtarget->hasAVX512() &&
2668 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
2669 CallConv == CallingConv::Intel_OCL_BI)))
2670 VecVT = MVT::v16f32;
2671 else if (Subtarget->hasAVX())
2673 else if (Subtarget->hasSSE2())
2676 // We forward some GPRs and some vector types.
2677 SmallVector<MVT, 2> RegParmTypes;
2678 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
2679 RegParmTypes.push_back(IntVT);
2680 if (VecVT != MVT::Other)
2681 RegParmTypes.push_back(VecVT);
2683 // Compute the set of forwarded registers. The rest are scratch.
2684 SmallVectorImpl<ForwardedRegister> &Forwards =
2685 FuncInfo->getForwardedMustTailRegParms();
2686 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
2688 // Conservatively forward AL on x86_64, since it might be used for varargs.
2689 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
2690 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2691 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
2694 // Copy all forwards from physical to virtual registers.
2695 for (ForwardedRegister &F : Forwards) {
2696 // FIXME: Can we use a less constrained schedule?
2697 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
2698 F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
2699 Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
2703 // Some CCs need callee pop.
2704 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
2705 MF.getTarget().Options.GuaranteedTailCallOpt)) {
2706 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
2708 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
2709 // If this is an sret function, the return should pop the hidden pointer.
2710 if (!Is64Bit && !IsTailCallConvention(CallConv) &&
2711 !Subtarget->getTargetTriple().isOSMSVCRT() &&
2712 argsAreStructReturn(Ins) == StackStructReturn)
2713 FuncInfo->setBytesToPopOnReturn(4);
2717 // RegSaveFrameIndex is X86-64 only.
2718 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
2719 if (CallConv == CallingConv::X86_FastCall ||
2720 CallConv == CallingConv::X86_ThisCall)
2721 // fastcc functions can't have varargs.
2722 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
2725 FuncInfo->setArgumentStackSize(StackSize);
2731 X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
2732 SDValue StackPtr, SDValue Arg,
2733 SDLoc dl, SelectionDAG &DAG,
2734 const CCValAssign &VA,
2735 ISD::ArgFlagsTy Flags) const {
2736 unsigned LocMemOffset = VA.getLocMemOffset();
2737 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
2738 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
2739 if (Flags.isByVal())
2740 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
2742 return DAG.getStore(Chain, dl, Arg, PtrOff,
2743 MachinePointerInfo::getStack(LocMemOffset),
2747 /// Emit a load of return address if tail call
2748 /// optimization is performed and it is required.
2750 X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
2751 SDValue &OutRetAddr, SDValue Chain,
2752 bool IsTailCall, bool Is64Bit,
2753 int FPDiff, SDLoc dl) const {
2754 // Adjust the Return address stack slot.
2755 EVT VT = getPointerTy();
2756 OutRetAddr = getReturnAddressFrameIndex(DAG);
2758 // Load the "old" Return address.
2759 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
2760 false, false, false, 0);
2761 return SDValue(OutRetAddr.getNode(), 1);
2764 /// Emit a store of the return address if tail call
2765 /// optimization is performed and it is required (FPDiff!=0).
2766 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
2767 SDValue Chain, SDValue RetAddrFrIdx,
2768 EVT PtrVT, unsigned SlotSize,
2769 int FPDiff, SDLoc dl) {
2770 // Store the return address to the appropriate stack slot.
2771 if (!FPDiff) return Chain;
2772 // Calculate the new stack slot for the return address.
2773 int NewReturnAddrFI =
2774 MF.getFrameInfo()->CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
2776 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
2777 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
2778 MachinePointerInfo::getFixedStack(NewReturnAddrFI),
2784 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2785 SmallVectorImpl<SDValue> &InVals) const {
2786 SelectionDAG &DAG = CLI.DAG;
2788 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2789 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2790 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2791 SDValue Chain = CLI.Chain;
2792 SDValue Callee = CLI.Callee;
2793 CallingConv::ID CallConv = CLI.CallConv;
2794 bool &isTailCall = CLI.IsTailCall;
2795 bool isVarArg = CLI.IsVarArg;
2797 MachineFunction &MF = DAG.getMachineFunction();
2798 bool Is64Bit = Subtarget->is64Bit();
2799 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2800 StructReturnType SR = callIsStructReturn(Outs);
2801 bool IsSibcall = false;
2802 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
2804 if (MF.getTarget().Options.DisableTailCalls)
2807 bool IsMustTail = CLI.CS && CLI.CS->isMustTailCall();
2809 // Force this to be a tail call. The verifier rules are enough to ensure
2810 // that we can lower this successfully without moving the return address
2813 } else if (isTailCall) {
2814 // Check if it's really possible to do a tail call.
2815 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
2816 isVarArg, SR != NotStructReturn,
2817 MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
2818 Outs, OutVals, Ins, DAG);
2820 // Sibcalls are automatically detected tailcalls which do not require
2822 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
2829 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2830 "Var args not supported with calling convention fastcc, ghc or hipe");
2832 // Analyze operands of the call, assigning locations to each operand.
2833 SmallVector<CCValAssign, 16> ArgLocs;
2834 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2836 // Allocate shadow area for Win64
2838 CCInfo.AllocateStack(32, 8);
2840 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
2842 // Get a count of how many bytes are to be pushed on the stack.
2843 unsigned NumBytes = CCInfo.getNextStackOffset();
2845 // This is a sibcall. The memory operands are available in caller's
2846 // own caller's stack.
2848 else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2849 IsTailCallConvention(CallConv))
2850 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
2853 if (isTailCall && !IsSibcall && !IsMustTail) {
2854 // Lower arguments at fp - stackoffset + fpdiff.
2855 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
2857 FPDiff = NumBytesCallerPushed - NumBytes;
2859 // Set the delta of movement of the returnaddr stackslot.
2860 // But only set if delta is greater than previous delta.
2861 if (FPDiff < X86Info->getTCReturnAddrDelta())
2862 X86Info->setTCReturnAddrDelta(FPDiff);
2865 unsigned NumBytesToPush = NumBytes;
2866 unsigned NumBytesToPop = NumBytes;
2868 // If we have an inalloca argument, all stack space has already been allocated
2869 // for us and be right at the top of the stack. We don't support multiple
2870 // arguments passed in memory when using inalloca.
2871 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
2873 if (!ArgLocs.back().isMemLoc())
2874 report_fatal_error("cannot use inalloca attribute on a register "
2876 if (ArgLocs.back().getLocMemOffset() != 0)
2877 report_fatal_error("any parameter with the inalloca attribute must be "
2878 "the only memory argument");
2882 Chain = DAG.getCALLSEQ_START(
2883 Chain, DAG.getIntPtrConstant(NumBytesToPush, true), dl);
2885 SDValue RetAddrFrIdx;
2886 // Load return address for tail calls.
2887 if (isTailCall && FPDiff)
2888 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2889 Is64Bit, FPDiff, dl);
2891 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2892 SmallVector<SDValue, 8> MemOpChains;
2895 // Walk the register/memloc assignments, inserting copies/loads. In the case
2896 // of tail call optimization arguments are handle later.
2897 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2898 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2899 // Skip inalloca arguments, they have already been written.
2900 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2901 if (Flags.isInAlloca())
2904 CCValAssign &VA = ArgLocs[i];
2905 EVT RegVT = VA.getLocVT();
2906 SDValue Arg = OutVals[i];
2907 bool isByVal = Flags.isByVal();
2909 // Promote the value if needed.
2910 switch (VA.getLocInfo()) {
2911 default: llvm_unreachable("Unknown loc info!");
2912 case CCValAssign::Full: break;
2913 case CCValAssign::SExt:
2914 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
2916 case CCValAssign::ZExt:
2917 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
2919 case CCValAssign::AExt:
2920 if (RegVT.is128BitVector()) {
2921 // Special case: passing MMX values in XMM registers.
2922 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
2923 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
2924 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
2926 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
2928 case CCValAssign::BCvt:
2929 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg);
2931 case CCValAssign::Indirect: {
2932 // Store the argument.
2933 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
2934 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2935 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot,
2936 MachinePointerInfo::getFixedStack(FI),
2943 if (VA.isRegLoc()) {
2944 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2945 if (isVarArg && IsWin64) {
2946 // Win64 ABI requires argument XMM reg to be copied to the corresponding
2947 // shadow reg if callee is a varargs function.
2948 unsigned ShadowReg = 0;
2949 switch (VA.getLocReg()) {
2950 case X86::XMM0: ShadowReg = X86::RCX; break;
2951 case X86::XMM1: ShadowReg = X86::RDX; break;
2952 case X86::XMM2: ShadowReg = X86::R8; break;
2953 case X86::XMM3: ShadowReg = X86::R9; break;
2956 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
2958 } else if (!IsSibcall && (!isTailCall || isByVal)) {
2959 assert(VA.isMemLoc());
2960 if (!StackPtr.getNode())
2961 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
2963 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2964 dl, DAG, VA, Flags));
2968 if (!MemOpChains.empty())
2969 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2971 if (Subtarget->isPICStyleGOT()) {
2972 // ELF / PIC requires GOT in the EBX register before function calls via PLT
2975 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX),
2976 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy())));
2978 // If we are tail calling and generating PIC/GOT style code load the
2979 // address of the callee into ECX. The value in ecx is used as target of
2980 // the tail jump. This is done to circumvent the ebx/callee-saved problem
2981 // for tail calls on PIC/GOT architectures. Normally we would just put the
2982 // address of GOT into ebx and then call target@PLT. But for tail calls
2983 // ebx would be restored (since ebx is callee saved) before jumping to the
2986 // Note: The actual moving to ECX is done further down.
2987 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
2988 if (G && !G->getGlobal()->hasHiddenVisibility() &&
2989 !G->getGlobal()->hasProtectedVisibility())
2990 Callee = LowerGlobalAddress(Callee, DAG);
2991 else if (isa<ExternalSymbolSDNode>(Callee))
2992 Callee = LowerExternalSymbol(Callee, DAG);
2996 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
2997 // From AMD64 ABI document:
2998 // For calls that may call functions that use varargs or stdargs
2999 // (prototype-less calls or calls to functions containing ellipsis (...) in
3000 // the declaration) %al is used as hidden argument to specify the number
3001 // of SSE registers used. The contents of %al do not need to match exactly
3002 // the number of registers, but must be an ubound on the number of SSE
3003 // registers used and is in the range 0 - 8 inclusive.
3005 // Count the number of XMM registers allocated.
3006 static const MCPhysReg XMMArgRegs[] = {
3007 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3008 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3010 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
3011 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3012 && "SSE registers cannot be used when SSE is disabled");
3014 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3015 DAG.getConstant(NumXMMRegs, MVT::i8)));
3018 if (isVarArg && IsMustTail) {
3019 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3020 for (const auto &F : Forwards) {
3021 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3022 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3026 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3027 // don't need this because the eligibility check rejects calls that require
3028 // shuffling arguments passed in memory.
3029 if (!IsSibcall && isTailCall) {
3030 // Force all the incoming stack arguments to be loaded from the stack
3031 // before any new outgoing arguments are stored to the stack, because the
3032 // outgoing stack slots may alias the incoming argument stack slots, and
3033 // the alias isn't otherwise explicit. This is slightly more conservative
3034 // than necessary, because it means that each store effectively depends
3035 // on every argument instead of just those arguments it would clobber.
3036 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3038 SmallVector<SDValue, 8> MemOpChains2;
3041 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3042 CCValAssign &VA = ArgLocs[i];
3045 assert(VA.isMemLoc());
3046 SDValue Arg = OutVals[i];
3047 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3048 // Skip inalloca arguments. They don't require any work.
3049 if (Flags.isInAlloca())
3051 // Create frame index.
3052 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3053 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3054 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
3055 FIN = DAG.getFrameIndex(FI, getPointerTy());
3057 if (Flags.isByVal()) {
3058 // Copy relative to framepointer.
3059 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
3060 if (!StackPtr.getNode())
3061 StackPtr = DAG.getCopyFromReg(Chain, dl,
3062 RegInfo->getStackRegister(),
3064 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
3066 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3070 // Store relative to framepointer.
3071 MemOpChains2.push_back(
3072 DAG.getStore(ArgChain, dl, Arg, FIN,
3073 MachinePointerInfo::getFixedStack(FI),
3078 if (!MemOpChains2.empty())
3079 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3081 // Store the return address to the appropriate stack slot.
3082 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3083 getPointerTy(), RegInfo->getSlotSize(),
3087 // Build a sequence of copy-to-reg nodes chained together with token chain
3088 // and flag operands which copy the outgoing args into registers.
3090 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3091 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3092 RegsToPass[i].second, InFlag);
3093 InFlag = Chain.getValue(1);
3096 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3097 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3098 // In the 64-bit large code model, we have to make all calls
3099 // through a register, since the call instruction's 32-bit
3100 // pc-relative offset may not be large enough to hold the whole
3102 } else if (Callee->getOpcode() == ISD::GlobalAddress) {
3103 // If the callee is a GlobalAddress node (quite common, every direct call
3104 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
3106 GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
3108 // We should use extra load for direct calls to dllimported functions in
3110 const GlobalValue *GV = G->getGlobal();
3111 if (!GV->hasDLLImportStorageClass()) {
3112 unsigned char OpFlags = 0;
3113 bool ExtraLoad = false;
3114 unsigned WrapperKind = ISD::DELETED_NODE;
3116 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
3117 // external symbols most go through the PLT in PIC mode. If the symbol
3118 // has hidden or protected visibility, or if it is static or local, then
3119 // we don't need to use the PLT - we can directly call it.
3120 if (Subtarget->isTargetELF() &&
3121 DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
3122 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
3123 OpFlags = X86II::MO_PLT;
3124 } else if (Subtarget->isPICStyleStubAny() &&
3125 (GV->isDeclaration() || GV->isWeakForLinker()) &&
3126 (!Subtarget->getTargetTriple().isMacOSX() ||
3127 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3128 // PC-relative references to external symbols should go through $stub,
3129 // unless we're building with the leopard linker or later, which
3130 // automatically synthesizes these stubs.
3131 OpFlags = X86II::MO_DARWIN_STUB;
3132 } else if (Subtarget->isPICStyleRIPRel() && isa<Function>(GV) &&
3133 cast<Function>(GV)->hasFnAttribute(Attribute::NonLazyBind)) {
3134 // If the function is marked as non-lazy, generate an indirect call
3135 // which loads from the GOT directly. This avoids runtime overhead
3136 // at the cost of eager binding (and one extra byte of encoding).
3137 OpFlags = X86II::MO_GOTPCREL;
3138 WrapperKind = X86ISD::WrapperRIP;
3142 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
3143 G->getOffset(), OpFlags);
3145 // Add a wrapper if needed.
3146 if (WrapperKind != ISD::DELETED_NODE)
3147 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee);
3148 // Add extra indirection if needed.
3150 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
3151 MachinePointerInfo::getGOT(),
3152 false, false, false, 0);
3154 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3155 unsigned char OpFlags = 0;
3157 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
3158 // external symbols should go through the PLT.
3159 if (Subtarget->isTargetELF() &&
3160 DAG.getTarget().getRelocationModel() == Reloc::PIC_) {
3161 OpFlags = X86II::MO_PLT;
3162 } else if (Subtarget->isPICStyleStubAny() &&
3163 (!Subtarget->getTargetTriple().isMacOSX() ||
3164 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3165 // PC-relative references to external symbols should go through $stub,
3166 // unless we're building with the leopard linker or later, which
3167 // automatically synthesizes these stubs.
3168 OpFlags = X86II::MO_DARWIN_STUB;
3171 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
3173 } else if (Subtarget->isTarget64BitILP32() &&
3174 Callee->getValueType(0) == MVT::i32) {
3175 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
3176 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
3179 // Returns a chain & a flag for retval copy to use.
3180 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3181 SmallVector<SDValue, 8> Ops;
3183 if (!IsSibcall && isTailCall) {
3184 Chain = DAG.getCALLSEQ_END(Chain,
3185 DAG.getIntPtrConstant(NumBytesToPop, true),
3186 DAG.getIntPtrConstant(0, true), InFlag, dl);
3187 InFlag = Chain.getValue(1);
3190 Ops.push_back(Chain);
3191 Ops.push_back(Callee);
3194 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
3196 // Add argument registers to the end of the list so that they are known live
3198 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3199 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3200 RegsToPass[i].second.getValueType()));
3202 // Add a register mask operand representing the call-preserved registers.
3203 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
3204 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
3205 assert(Mask && "Missing call preserved mask for calling convention");
3206 Ops.push_back(DAG.getRegisterMask(Mask));
3208 if (InFlag.getNode())
3209 Ops.push_back(InFlag);
3213 //// If this is the first return lowered for this function, add the regs
3214 //// to the liveout set for the function.
3215 // This isn't right, although it's probably harmless on x86; liveouts
3216 // should be computed from returns not tail calls. Consider a void
3217 // function making a tail call to a function returning int.
3218 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
3221 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
3222 InFlag = Chain.getValue(1);
3224 // Create the CALLSEQ_END node.
3225 unsigned NumBytesForCalleeToPop;
3226 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3227 DAG.getTarget().Options.GuaranteedTailCallOpt))
3228 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
3229 else if (!Is64Bit && !IsTailCallConvention(CallConv) &&
3230 !Subtarget->getTargetTriple().isOSMSVCRT() &&
3231 SR == StackStructReturn)
3232 // If this is a call to a struct-return function, the callee
3233 // pops the hidden struct pointer, so we have to push it back.
3234 // This is common for Darwin/X86, Linux & Mingw32 targets.
3235 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
3236 NumBytesForCalleeToPop = 4;
3238 NumBytesForCalleeToPop = 0; // Callee pops nothing.
3240 // Returns a flag for retval copy to use.
3242 Chain = DAG.getCALLSEQ_END(Chain,
3243 DAG.getIntPtrConstant(NumBytesToPop, true),
3244 DAG.getIntPtrConstant(NumBytesForCalleeToPop,
3247 InFlag = Chain.getValue(1);
3250 // Handle result values, copying them out of physregs into vregs that we
3252 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
3253 Ins, dl, DAG, InVals);
3256 //===----------------------------------------------------------------------===//
3257 // Fast Calling Convention (tail call) implementation
3258 //===----------------------------------------------------------------------===//
3260 // Like std call, callee cleans arguments, convention except that ECX is
3261 // reserved for storing the tail called function address. Only 2 registers are
3262 // free for argument passing (inreg). Tail call optimization is performed
3264 // * tailcallopt is enabled
3265 // * caller/callee are fastcc
3266 // On X86_64 architecture with GOT-style position independent code only local
3267 // (within module) calls are supported at the moment.
3268 // To keep the stack aligned according to platform abi the function
3269 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
3270 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
3271 // If a tail called function callee has more arguments than the caller the
3272 // caller needs to make sure that there is room to move the RETADDR to. This is
3273 // achieved by reserving an area the size of the argument delta right after the
3274 // original RETADDR, but before the saved framepointer or the spilled registers
3275 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
3287 /// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
3288 /// for a 16 byte align requirement.
3290 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
3291 SelectionDAG& DAG) const {
3292 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3293 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
3294 unsigned StackAlignment = TFI.getStackAlignment();
3295 uint64_t AlignMask = StackAlignment - 1;
3296 int64_t Offset = StackSize;
3297 unsigned SlotSize = RegInfo->getSlotSize();
3298 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
3299 // Number smaller than 12 so just add the difference.
3300 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
3302 // Mask out lower bits, add stackalignment once plus the 12 bytes.
3303 Offset = ((~AlignMask) & Offset) + StackAlignment +
3304 (StackAlignment-SlotSize);
3309 /// MatchingStackOffset - Return true if the given stack call argument is
3310 /// already available in the same position (relatively) of the caller's
3311 /// incoming argument stack.
3313 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
3314 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
3315 const X86InstrInfo *TII) {
3316 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
3318 if (Arg.getOpcode() == ISD::CopyFromReg) {
3319 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
3320 if (!TargetRegisterInfo::isVirtualRegister(VR))
3322 MachineInstr *Def = MRI->getVRegDef(VR);
3325 if (!Flags.isByVal()) {
3326 if (!TII->isLoadFromStackSlot(Def, FI))
3329 unsigned Opcode = Def->getOpcode();
3330 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
3331 Opcode == X86::LEA64_32r) &&
3332 Def->getOperand(1).isFI()) {
3333 FI = Def->getOperand(1).getIndex();
3334 Bytes = Flags.getByValSize();
3338 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
3339 if (Flags.isByVal())
3340 // ByVal argument is passed in as a pointer but it's now being
3341 // dereferenced. e.g.
3342 // define @foo(%struct.X* %A) {
3343 // tail call @bar(%struct.X* byval %A)
3346 SDValue Ptr = Ld->getBasePtr();
3347 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
3350 FI = FINode->getIndex();
3351 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
3352 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
3353 FI = FINode->getIndex();
3354 Bytes = Flags.getByValSize();
3358 assert(FI != INT_MAX);
3359 if (!MFI->isFixedObjectIndex(FI))
3361 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
3364 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
3365 /// for tail call optimization. Targets which want to do tail call
3366 /// optimization should implement this function.
3368 X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
3369 CallingConv::ID CalleeCC,
3371 bool isCalleeStructRet,
3372 bool isCallerStructRet,
3374 const SmallVectorImpl<ISD::OutputArg> &Outs,
3375 const SmallVectorImpl<SDValue> &OutVals,
3376 const SmallVectorImpl<ISD::InputArg> &Ins,
3377 SelectionDAG &DAG) const {
3378 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
3381 // If -tailcallopt is specified, make fastcc functions tail-callable.
3382 const MachineFunction &MF = DAG.getMachineFunction();
3383 const Function *CallerF = MF.getFunction();
3385 // If the function return type is x86_fp80 and the callee return type is not,
3386 // then the FP_EXTEND of the call result is not a nop. It's not safe to
3387 // perform a tailcall optimization here.
3388 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
3391 CallingConv::ID CallerCC = CallerF->getCallingConv();
3392 bool CCMatch = CallerCC == CalleeCC;
3393 bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
3394 bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
3396 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
3397 if (IsTailCallConvention(CalleeCC) && CCMatch)
3402 // Look for obvious safe cases to perform tail call optimization that do not
3403 // require ABI changes. This is what gcc calls sibcall.
3405 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
3406 // emit a special epilogue.
3407 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3408 if (RegInfo->needsStackRealignment(MF))
3411 // Also avoid sibcall optimization if either caller or callee uses struct
3412 // return semantics.
3413 if (isCalleeStructRet || isCallerStructRet)
3416 // An stdcall/thiscall caller is expected to clean up its arguments; the
3417 // callee isn't going to do that.
3418 // FIXME: this is more restrictive than needed. We could produce a tailcall
3419 // when the stack adjustment matches. For example, with a thiscall that takes
3420 // only one argument.
3421 if (!CCMatch && (CallerCC == CallingConv::X86_StdCall ||
3422 CallerCC == CallingConv::X86_ThisCall))
3425 // Do not sibcall optimize vararg calls unless all arguments are passed via
3427 if (isVarArg && !Outs.empty()) {
3429 // Optimizing for varargs on Win64 is unlikely to be safe without
3430 // additional testing.
3431 if (IsCalleeWin64 || IsCallerWin64)
3434 SmallVector<CCValAssign, 16> ArgLocs;
3435 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3438 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3439 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
3440 if (!ArgLocs[i].isRegLoc())
3444 // If the call result is in ST0 / ST1, it needs to be popped off the x87
3445 // stack. Therefore, if it's not used by the call it is not safe to optimize
3446 // this into a sibcall.
3447 bool Unused = false;
3448 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3455 SmallVector<CCValAssign, 16> RVLocs;
3456 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), RVLocs,
3458 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3459 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
3460 CCValAssign &VA = RVLocs[i];
3461 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
3466 // If the calling conventions do not match, then we'd better make sure the
3467 // results are returned in the same way as what the caller expects.
3469 SmallVector<CCValAssign, 16> RVLocs1;
3470 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
3472 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
3474 SmallVector<CCValAssign, 16> RVLocs2;
3475 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
3477 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
3479 if (RVLocs1.size() != RVLocs2.size())
3481 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
3482 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
3484 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
3486 if (RVLocs1[i].isRegLoc()) {
3487 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
3490 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
3496 // If the callee takes no arguments then go on to check the results of the
3498 if (!Outs.empty()) {
3499 // Check if stack adjustment is needed. For now, do not do this if any
3500 // argument is passed on the stack.
3501 SmallVector<CCValAssign, 16> ArgLocs;
3502 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3505 // Allocate shadow area for Win64
3507 CCInfo.AllocateStack(32, 8);
3509 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3510 if (CCInfo.getNextStackOffset()) {
3511 MachineFunction &MF = DAG.getMachineFunction();
3512 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn())
3515 // Check if the arguments are already laid out in the right way as
3516 // the caller's fixed stack objects.
3517 MachineFrameInfo *MFI = MF.getFrameInfo();
3518 const MachineRegisterInfo *MRI = &MF.getRegInfo();
3519 const X86InstrInfo *TII = Subtarget->getInstrInfo();
3520 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3521 CCValAssign &VA = ArgLocs[i];
3522 SDValue Arg = OutVals[i];
3523 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3524 if (VA.getLocInfo() == CCValAssign::Indirect)
3526 if (!VA.isRegLoc()) {
3527 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
3534 // If the tailcall address may be in a register, then make sure it's
3535 // possible to register allocate for it. In 32-bit, the call address can
3536 // only target EAX, EDX, or ECX since the tail call must be scheduled after
3537 // callee-saved registers are restored. These happen to be the same
3538 // registers used to pass 'inreg' arguments so watch out for those.
3539 if (!Subtarget->is64Bit() &&
3540 ((!isa<GlobalAddressSDNode>(Callee) &&
3541 !isa<ExternalSymbolSDNode>(Callee)) ||
3542 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
3543 unsigned NumInRegs = 0;
3544 // In PIC we need an extra register to formulate the address computation
3546 unsigned MaxInRegs =
3547 (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
3549 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3550 CCValAssign &VA = ArgLocs[i];
3553 unsigned Reg = VA.getLocReg();
3556 case X86::EAX: case X86::EDX: case X86::ECX:
3557 if (++NumInRegs == MaxInRegs)
3569 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
3570 const TargetLibraryInfo *libInfo) const {
3571 return X86::createFastISel(funcInfo, libInfo);
3574 //===----------------------------------------------------------------------===//
3575 // Other Lowering Hooks
3576 //===----------------------------------------------------------------------===//
3578 static bool MayFoldLoad(SDValue Op) {
3579 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
3582 static bool MayFoldIntoStore(SDValue Op) {
3583 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
3586 static bool isTargetShuffle(unsigned Opcode) {
3588 default: return false;
3589 case X86ISD::BLENDI:
3590 case X86ISD::PSHUFB:
3591 case X86ISD::PSHUFD:
3592 case X86ISD::PSHUFHW:
3593 case X86ISD::PSHUFLW:
3595 case X86ISD::PALIGNR:
3596 case X86ISD::MOVLHPS:
3597 case X86ISD::MOVLHPD:
3598 case X86ISD::MOVHLPS:
3599 case X86ISD::MOVLPS:
3600 case X86ISD::MOVLPD:
3601 case X86ISD::MOVSHDUP:
3602 case X86ISD::MOVSLDUP:
3603 case X86ISD::MOVDDUP:
3606 case X86ISD::UNPCKL:
3607 case X86ISD::UNPCKH:
3608 case X86ISD::VPERMILPI:
3609 case X86ISD::VPERM2X128:
3610 case X86ISD::VPERMI:
3615 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3616 SDValue V1, SelectionDAG &DAG) {
3618 default: llvm_unreachable("Unknown x86 shuffle node");
3619 case X86ISD::MOVSHDUP:
3620 case X86ISD::MOVSLDUP:
3621 case X86ISD::MOVDDUP:
3622 return DAG.getNode(Opc, dl, VT, V1);
3626 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3627 SDValue V1, unsigned TargetMask,
3628 SelectionDAG &DAG) {
3630 default: llvm_unreachable("Unknown x86 shuffle node");
3631 case X86ISD::PSHUFD:
3632 case X86ISD::PSHUFHW:
3633 case X86ISD::PSHUFLW:
3634 case X86ISD::VPERMILPI:
3635 case X86ISD::VPERMI:
3636 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8));
3640 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3641 SDValue V1, SDValue V2, unsigned TargetMask,
3642 SelectionDAG &DAG) {
3644 default: llvm_unreachable("Unknown x86 shuffle node");
3645 case X86ISD::PALIGNR:
3646 case X86ISD::VALIGN:
3648 case X86ISD::VPERM2X128:
3649 return DAG.getNode(Opc, dl, VT, V1, V2,
3650 DAG.getConstant(TargetMask, MVT::i8));
3654 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3655 SDValue V1, SDValue V2, SelectionDAG &DAG) {
3657 default: llvm_unreachable("Unknown x86 shuffle node");
3658 case X86ISD::MOVLHPS:
3659 case X86ISD::MOVLHPD:
3660 case X86ISD::MOVHLPS:
3661 case X86ISD::MOVLPS:
3662 case X86ISD::MOVLPD:
3665 case X86ISD::UNPCKL:
3666 case X86ISD::UNPCKH:
3667 return DAG.getNode(Opc, dl, VT, V1, V2);
3671 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
3672 MachineFunction &MF = DAG.getMachineFunction();
3673 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3674 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3675 int ReturnAddrIndex = FuncInfo->getRAIndex();
3677 if (ReturnAddrIndex == 0) {
3678 // Set up a frame object for the return address.
3679 unsigned SlotSize = RegInfo->getSlotSize();
3680 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize,
3683 FuncInfo->setRAIndex(ReturnAddrIndex);
3686 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
3689 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
3690 bool hasSymbolicDisplacement) {
3691 // Offset should fit into 32 bit immediate field.
3692 if (!isInt<32>(Offset))
3695 // If we don't have a symbolic displacement - we don't have any extra
3697 if (!hasSymbolicDisplacement)
3700 // FIXME: Some tweaks might be needed for medium code model.
3701 if (M != CodeModel::Small && M != CodeModel::Kernel)
3704 // For small code model we assume that latest object is 16MB before end of 31
3705 // bits boundary. We may also accept pretty large negative constants knowing
3706 // that all objects are in the positive half of address space.
3707 if (M == CodeModel::Small && Offset < 16*1024*1024)
3710 // For kernel code model we know that all object resist in the negative half
3711 // of 32bits address space. We may not accept negative offsets, since they may
3712 // be just off and we may accept pretty large positive ones.
3713 if (M == CodeModel::Kernel && Offset >= 0)
3719 /// isCalleePop - Determines whether the callee is required to pop its
3720 /// own arguments. Callee pop is necessary to support tail calls.
3721 bool X86::isCalleePop(CallingConv::ID CallingConv,
3722 bool is64Bit, bool IsVarArg, bool TailCallOpt) {
3723 switch (CallingConv) {
3726 case CallingConv::X86_StdCall:
3727 case CallingConv::X86_FastCall:
3728 case CallingConv::X86_ThisCall:
3730 case CallingConv::Fast:
3731 case CallingConv::GHC:
3732 case CallingConv::HiPE:
3739 /// \brief Return true if the condition is an unsigned comparison operation.
3740 static bool isX86CCUnsigned(unsigned X86CC) {
3742 default: llvm_unreachable("Invalid integer condition!");
3743 case X86::COND_E: return true;
3744 case X86::COND_G: return false;
3745 case X86::COND_GE: return false;
3746 case X86::COND_L: return false;
3747 case X86::COND_LE: return false;
3748 case X86::COND_NE: return true;
3749 case X86::COND_B: return true;
3750 case X86::COND_A: return true;
3751 case X86::COND_BE: return true;
3752 case X86::COND_AE: return true;
3754 llvm_unreachable("covered switch fell through?!");
3757 /// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
3758 /// specific condition code, returning the condition code and the LHS/RHS of the
3759 /// comparison to make.
3760 static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
3761 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
3763 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
3764 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
3765 // X > -1 -> X == 0, jump !sign.
3766 RHS = DAG.getConstant(0, RHS.getValueType());
3767 return X86::COND_NS;
3769 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
3770 // X < 0 -> X == 0, jump on sign.
3773 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
3775 RHS = DAG.getConstant(0, RHS.getValueType());
3776 return X86::COND_LE;
3780 switch (SetCCOpcode) {
3781 default: llvm_unreachable("Invalid integer condition!");
3782 case ISD::SETEQ: return X86::COND_E;
3783 case ISD::SETGT: return X86::COND_G;
3784 case ISD::SETGE: return X86::COND_GE;
3785 case ISD::SETLT: return X86::COND_L;
3786 case ISD::SETLE: return X86::COND_LE;
3787 case ISD::SETNE: return X86::COND_NE;
3788 case ISD::SETULT: return X86::COND_B;
3789 case ISD::SETUGT: return X86::COND_A;
3790 case ISD::SETULE: return X86::COND_BE;
3791 case ISD::SETUGE: return X86::COND_AE;
3795 // First determine if it is required or is profitable to flip the operands.
3797 // If LHS is a foldable load, but RHS is not, flip the condition.
3798 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
3799 !ISD::isNON_EXTLoad(RHS.getNode())) {
3800 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
3801 std::swap(LHS, RHS);
3804 switch (SetCCOpcode) {
3810 std::swap(LHS, RHS);
3814 // On a floating point condition, the flags are set as follows:
3816 // 0 | 0 | 0 | X > Y
3817 // 0 | 0 | 1 | X < Y
3818 // 1 | 0 | 0 | X == Y
3819 // 1 | 1 | 1 | unordered
3820 switch (SetCCOpcode) {
3821 default: llvm_unreachable("Condcode should be pre-legalized away");
3823 case ISD::SETEQ: return X86::COND_E;
3824 case ISD::SETOLT: // flipped
3826 case ISD::SETGT: return X86::COND_A;
3827 case ISD::SETOLE: // flipped
3829 case ISD::SETGE: return X86::COND_AE;
3830 case ISD::SETUGT: // flipped
3832 case ISD::SETLT: return X86::COND_B;
3833 case ISD::SETUGE: // flipped
3835 case ISD::SETLE: return X86::COND_BE;
3837 case ISD::SETNE: return X86::COND_NE;
3838 case ISD::SETUO: return X86::COND_P;
3839 case ISD::SETO: return X86::COND_NP;
3841 case ISD::SETUNE: return X86::COND_INVALID;
3845 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
3846 /// code. Current x86 isa includes the following FP cmov instructions:
3847 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
3848 static bool hasFPCMov(unsigned X86CC) {
3864 /// isFPImmLegal - Returns true if the target can instruction select the
3865 /// specified FP immediate natively. If false, the legalizer will
3866 /// materialize the FP immediate as a load from a constant pool.
3867 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
3868 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
3869 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
3875 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
3876 ISD::LoadExtType ExtTy,
3878 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
3879 // relocation target a movq or addq instruction: don't let the load shrink.
3880 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
3881 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
3882 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
3883 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
3887 /// \brief Returns true if it is beneficial to convert a load of a constant
3888 /// to just the constant itself.
3889 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
3891 assert(Ty->isIntegerTy());
3893 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3894 if (BitSize == 0 || BitSize > 64)
3899 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
3900 unsigned Index) const {
3901 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
3904 return (Index == 0 || Index == ResVT.getVectorNumElements());
3907 bool X86TargetLowering::isCheapToSpeculateCttz() const {
3908 // Speculate cttz only if we can directly use TZCNT.
3909 return Subtarget->hasBMI();
3912 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
3913 // Speculate ctlz only if we can directly use LZCNT.
3914 return Subtarget->hasLZCNT();
3917 /// isUndefOrInRange - Return true if Val is undef or if its value falls within
3918 /// the specified range (L, H].
3919 static bool isUndefOrInRange(int Val, int Low, int Hi) {
3920 return (Val < 0) || (Val >= Low && Val < Hi);
3923 /// isUndefOrEqual - Val is either less than zero (undef) or equal to the
3924 /// specified value.
3925 static bool isUndefOrEqual(int Val, int CmpVal) {
3926 return (Val < 0 || Val == CmpVal);
3929 /// isSequentialOrUndefInRange - Return true if every element in Mask, beginning
3930 /// from position Pos and ending in Pos+Size, falls within the specified
3931 /// sequential range (Low, Low+Size]. or is undef.
3932 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
3933 unsigned Pos, unsigned Size, int Low) {
3934 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
3935 if (!isUndefOrEqual(Mask[i], Low))
3940 /// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
3941 /// is suitable for input to PSHUFD. That is, it doesn't reference the other
3942 /// operand - by default will match for first operand.
3943 static bool isPSHUFDMask(ArrayRef<int> Mask, MVT VT,
3944 bool TestSecondOperand = false) {
3945 if (VT != MVT::v4f32 && VT != MVT::v4i32 &&
3946 VT != MVT::v2f64 && VT != MVT::v2i64)
3949 unsigned NumElems = VT.getVectorNumElements();
3950 unsigned Lo = TestSecondOperand ? NumElems : 0;
3951 unsigned Hi = Lo + NumElems;
3953 for (unsigned i = 0; i < NumElems; ++i)
3954 if (!isUndefOrInRange(Mask[i], (int)Lo, (int)Hi))
3960 /// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
3961 /// is suitable for input to PSHUFHW.
3962 static bool isPSHUFHWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3963 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3966 // Lower quadword copied in order or undef.
3967 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0))
3970 // Upper quadword shuffled.
3971 for (unsigned i = 4; i != 8; ++i)
3972 if (!isUndefOrInRange(Mask[i], 4, 8))
3975 if (VT == MVT::v16i16) {
3976 // Lower quadword copied in order or undef.
3977 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8))
3980 // Upper quadword shuffled.
3981 for (unsigned i = 12; i != 16; ++i)
3982 if (!isUndefOrInRange(Mask[i], 12, 16))
3989 /// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
3990 /// is suitable for input to PSHUFLW.
3991 static bool isPSHUFLWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3992 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3995 // Upper quadword copied in order.
3996 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4))
3999 // Lower quadword shuffled.
4000 for (unsigned i = 0; i != 4; ++i)
4001 if (!isUndefOrInRange(Mask[i], 0, 4))
4004 if (VT == MVT::v16i16) {
4005 // Upper quadword copied in order.
4006 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12))
4009 // Lower quadword shuffled.
4010 for (unsigned i = 8; i != 12; ++i)
4011 if (!isUndefOrInRange(Mask[i], 8, 12))
4018 /// \brief Return true if the mask specifies a shuffle of elements that is
4019 /// suitable for input to intralane (palignr) or interlane (valign) vector
4021 static bool isAlignrMask(ArrayRef<int> Mask, MVT VT, bool InterLane) {
4022 unsigned NumElts = VT.getVectorNumElements();
4023 unsigned NumLanes = InterLane ? 1: VT.getSizeInBits()/128;
4024 unsigned NumLaneElts = NumElts/NumLanes;
4026 // Do not handle 64-bit element shuffles with palignr.
4027 if (NumLaneElts == 2)
4030 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) {
4032 for (i = 0; i != NumLaneElts; ++i) {
4037 // Lane is all undef, go to next lane
4038 if (i == NumLaneElts)
4041 int Start = Mask[i+l];
4043 // Make sure its in this lane in one of the sources
4044 if (!isUndefOrInRange(Start, l, l+NumLaneElts) &&
4045 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts))
4048 // If not lane 0, then we must match lane 0
4049 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l))
4052 // Correct second source to be contiguous with first source
4053 if (Start >= (int)NumElts)
4054 Start -= NumElts - NumLaneElts;
4056 // Make sure we're shifting in the right direction.
4057 if (Start <= (int)(i+l))
4062 // Check the rest of the elements to see if they are consecutive.
4063 for (++i; i != NumLaneElts; ++i) {
4064 int Idx = Mask[i+l];
4066 // Make sure its in this lane
4067 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) &&
4068 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts))
4071 // If not lane 0, then we must match lane 0
4072 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l))
4075 if (Idx >= (int)NumElts)
4076 Idx -= NumElts - NumLaneElts;
4078 if (!isUndefOrEqual(Idx, Start+i))
4087 /// \brief Return true if the node specifies a shuffle of elements that is
4088 /// suitable for input to PALIGNR.
4089 static bool isPALIGNRMask(ArrayRef<int> Mask, MVT VT,
4090 const X86Subtarget *Subtarget) {
4091 if ((VT.is128BitVector() && !Subtarget->hasSSSE3()) ||
4092 (VT.is256BitVector() && !Subtarget->hasInt256()) ||
4093 VT.is512BitVector())
4094 // FIXME: Add AVX512BW.
4097 return isAlignrMask(Mask, VT, false);
4100 /// \brief Return true if the node specifies a shuffle of elements that is
4101 /// suitable for input to VALIGN.
4102 static bool isVALIGNMask(ArrayRef<int> Mask, MVT VT,
4103 const X86Subtarget *Subtarget) {
4104 // FIXME: Add AVX512VL.
4105 if (!VT.is512BitVector() || !Subtarget->hasAVX512())
4107 return isAlignrMask(Mask, VT, true);
4110 /// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
4111 /// the two vector operands have swapped position.
4112 static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask,
4113 unsigned NumElems) {
4114 for (unsigned i = 0; i != NumElems; ++i) {
4118 else if (idx < (int)NumElems)
4119 Mask[i] = idx + NumElems;
4121 Mask[i] = idx - NumElems;
4125 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
4126 /// specifies a shuffle of elements that is suitable for input to 128/256-bit
4127 /// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be
4128 /// reverse of what x86 shuffles want.
4129 static bool isSHUFPMask(ArrayRef<int> Mask, MVT VT, bool Commuted = false) {
4131 unsigned NumElems = VT.getVectorNumElements();
4132 unsigned NumLanes = VT.getSizeInBits()/128;
4133 unsigned NumLaneElems = NumElems/NumLanes;
4135 if (NumLaneElems != 2 && NumLaneElems != 4)
4138 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4139 bool symmetricMaskRequired =
4140 (VT.getSizeInBits() >= 256) && (EltSize == 32);
4142 // VSHUFPSY divides the resulting vector into 4 chunks.
4143 // The sources are also splitted into 4 chunks, and each destination
4144 // chunk must come from a different source chunk.
4146 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0
4147 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9
4149 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4,
4150 // Y3..Y0, Y3..Y0, X3..X0, X3..X0
4152 // VSHUFPDY divides the resulting vector into 4 chunks.
4153 // The sources are also splitted into 4 chunks, and each destination
4154 // chunk must come from a different source chunk.
4156 // SRC1 => X3 X2 X1 X0
4157 // SRC2 => Y3 Y2 Y1 Y0
4159 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0
4161 SmallVector<int, 4> MaskVal(NumLaneElems, -1);
4162 unsigned HalfLaneElems = NumLaneElems/2;
4163 for (unsigned l = 0; l != NumElems; l += NumLaneElems) {
4164 for (unsigned i = 0; i != NumLaneElems; ++i) {
4165 int Idx = Mask[i+l];
4166 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0);
4167 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems))
4169 // For VSHUFPSY, the mask of the second half must be the same as the
4170 // first but with the appropriate offsets. This works in the same way as
4171 // VPERMILPS works with masks.
4172 if (!symmetricMaskRequired || Idx < 0)
4174 if (MaskVal[i] < 0) {
4175 MaskVal[i] = Idx - l;
4178 if ((signed)(Idx - l) != MaskVal[i])
4186 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
4187 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
4188 static bool isMOVHLPSMask(ArrayRef<int> Mask, MVT VT) {
4189 if (!VT.is128BitVector())
4192 unsigned NumElems = VT.getVectorNumElements();
4197 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
4198 return isUndefOrEqual(Mask[0], 6) &&
4199 isUndefOrEqual(Mask[1], 7) &&
4200 isUndefOrEqual(Mask[2], 2) &&
4201 isUndefOrEqual(Mask[3], 3);
4204 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
4205 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
4207 static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, MVT VT) {
4208 if (!VT.is128BitVector())
4211 unsigned NumElems = VT.getVectorNumElements();
4216 return isUndefOrEqual(Mask[0], 2) &&
4217 isUndefOrEqual(Mask[1], 3) &&
4218 isUndefOrEqual(Mask[2], 2) &&
4219 isUndefOrEqual(Mask[3], 3);
4222 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
4223 /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
4224 static bool isMOVLPMask(ArrayRef<int> Mask, MVT VT) {
4225 if (!VT.is128BitVector())
4228 unsigned NumElems = VT.getVectorNumElements();
4230 if (NumElems != 2 && NumElems != 4)
4233 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4234 if (!isUndefOrEqual(Mask[i], i + NumElems))
4237 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
4238 if (!isUndefOrEqual(Mask[i], i))
4244 /// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
4245 /// specifies a shuffle of elements that is suitable for input to MOVLHPS.
4246 static bool isMOVLHPSMask(ArrayRef<int> Mask, MVT VT) {
4247 if (!VT.is128BitVector())
4250 unsigned NumElems = VT.getVectorNumElements();
4252 if (NumElems != 2 && NumElems != 4)
4255 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4256 if (!isUndefOrEqual(Mask[i], i))
4259 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4260 if (!isUndefOrEqual(Mask[i + e], i + NumElems))
4266 /// isINSERTPSMask - Return true if the specified VECTOR_SHUFFLE operand
4267 /// specifies a shuffle of elements that is suitable for input to INSERTPS.
4268 /// i. e: If all but one element come from the same vector.
4269 static bool isINSERTPSMask(ArrayRef<int> Mask, MVT VT) {
4270 // TODO: Deal with AVX's VINSERTPS
4271 if (!VT.is128BitVector() || (VT != MVT::v4f32 && VT != MVT::v4i32))
4274 unsigned CorrectPosV1 = 0;
4275 unsigned CorrectPosV2 = 0;
4276 for (int i = 0, e = (int)VT.getVectorNumElements(); i != e; ++i) {
4277 if (Mask[i] == -1) {
4285 else if (Mask[i] == i + 4)
4289 if (CorrectPosV1 == 3 || CorrectPosV2 == 3)
4290 // We have 3 elements (undefs count as elements from any vector) from one
4291 // vector, and one from another.
4298 // Some special combinations that can be optimized.
4301 SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp,
4302 SelectionDAG &DAG) {
4303 MVT VT = SVOp->getSimpleValueType(0);
4306 if (VT != MVT::v8i32 && VT != MVT::v8f32)
4309 ArrayRef<int> Mask = SVOp->getMask();
4311 // These are the special masks that may be optimized.
4312 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14};
4313 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15};
4314 bool MatchEvenMask = true;
4315 bool MatchOddMask = true;
4316 for (int i=0; i<8; ++i) {
4317 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i]))
4318 MatchEvenMask = false;
4319 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i]))
4320 MatchOddMask = false;
4323 if (!MatchEvenMask && !MatchOddMask)
4326 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT);
4328 SDValue Op0 = SVOp->getOperand(0);
4329 SDValue Op1 = SVOp->getOperand(1);
4331 if (MatchEvenMask) {
4332 // Shift the second operand right to 32 bits.
4333 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 };
4334 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask);
4336 // Shift the first operand left to 32 bits.
4337 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 };
4338 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask);
4340 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15};
4341 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask);
4344 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
4345 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
4346 static bool isUNPCKLMask(ArrayRef<int> Mask, MVT VT,
4347 bool HasInt256, bool V2IsSplat = false) {
4349 assert(VT.getSizeInBits() >= 128 &&
4350 "Unsupported vector type for unpckl");
4352 unsigned NumElts = VT.getVectorNumElements();
4353 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4354 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4357 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4358 "Unsupported vector type for unpckh");
4360 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4361 unsigned NumLanes = VT.getSizeInBits()/128;
4362 unsigned NumLaneElts = NumElts/NumLanes;
4364 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4365 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4366 int BitI = Mask[l+i];
4367 int BitI1 = Mask[l+i+1];
4368 if (!isUndefOrEqual(BitI, j))
4371 if (!isUndefOrEqual(BitI1, NumElts))
4374 if (!isUndefOrEqual(BitI1, j + NumElts))
4383 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
4384 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
4385 static bool isUNPCKHMask(ArrayRef<int> Mask, MVT VT,
4386 bool HasInt256, bool V2IsSplat = false) {
4387 assert(VT.getSizeInBits() >= 128 &&
4388 "Unsupported vector type for unpckh");
4390 unsigned NumElts = VT.getVectorNumElements();
4391 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4392 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4395 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4396 "Unsupported vector type for unpckh");
4398 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4399 unsigned NumLanes = VT.getSizeInBits()/128;
4400 unsigned NumLaneElts = NumElts/NumLanes;
4402 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4403 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4404 int BitI = Mask[l+i];
4405 int BitI1 = Mask[l+i+1];
4406 if (!isUndefOrEqual(BitI, j))
4409 if (isUndefOrEqual(BitI1, NumElts))
4412 if (!isUndefOrEqual(BitI1, j+NumElts))
4420 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
4421 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
4423 static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4424 unsigned NumElts = VT.getVectorNumElements();
4425 bool Is256BitVec = VT.is256BitVector();
4427 if (VT.is512BitVector())
4429 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4430 "Unsupported vector type for unpckh");
4432 if (Is256BitVec && NumElts != 4 && NumElts != 8 &&
4433 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4436 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern
4437 // FIXME: Need a better way to get rid of this, there's no latency difference
4438 // between UNPCKLPD and MOVDDUP, the later should always be checked first and
4439 // the former later. We should also remove the "_undef" special mask.
4440 if (NumElts == 4 && Is256BitVec)
4443 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4444 // independently on 128-bit lanes.
4445 unsigned NumLanes = VT.getSizeInBits()/128;
4446 unsigned NumLaneElts = NumElts/NumLanes;
4448 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4449 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4450 int BitI = Mask[l+i];
4451 int BitI1 = Mask[l+i+1];
4453 if (!isUndefOrEqual(BitI, j))
4455 if (!isUndefOrEqual(BitI1, j))
4463 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
4464 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
4466 static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4467 unsigned NumElts = VT.getVectorNumElements();
4469 if (VT.is512BitVector())
4472 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4473 "Unsupported vector type for unpckh");
4475 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4476 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4479 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4480 // independently on 128-bit lanes.
4481 unsigned NumLanes = VT.getSizeInBits()/128;
4482 unsigned NumLaneElts = NumElts/NumLanes;
4484 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4485 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4486 int BitI = Mask[l+i];
4487 int BitI1 = Mask[l+i+1];
4488 if (!isUndefOrEqual(BitI, j))
4490 if (!isUndefOrEqual(BitI1, j))
4497 // Match for INSERTI64x4 INSERTF64x4 instructions (src0[0], src1[0]) or
4498 // (src1[0], src0[1]), manipulation with 256-bit sub-vectors
4499 static bool isINSERT64x4Mask(ArrayRef<int> Mask, MVT VT, unsigned int *Imm) {
4500 if (!VT.is512BitVector())
4503 unsigned NumElts = VT.getVectorNumElements();
4504 unsigned HalfSize = NumElts/2;
4505 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, 0)) {
4506 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, NumElts)) {
4511 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, NumElts)) {
4512 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, HalfSize)) {
4520 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
4521 /// specifies a shuffle of elements that is suitable for input to MOVSS,
4522 /// MOVSD, and MOVD, i.e. setting the lowest element.
4523 static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) {
4524 if (VT.getVectorElementType().getSizeInBits() < 32)
4526 if (!VT.is128BitVector())
4529 unsigned NumElts = VT.getVectorNumElements();
4531 if (!isUndefOrEqual(Mask[0], NumElts))
4534 for (unsigned i = 1; i != NumElts; ++i)
4535 if (!isUndefOrEqual(Mask[i], i))
4541 /// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered
4542 /// as permutations between 128-bit chunks or halves. As an example: this
4544 /// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15>
4545 /// The first half comes from the second half of V1 and the second half from the
4546 /// the second half of V2.
4547 static bool isVPERM2X128Mask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4548 if (!HasFp256 || !VT.is256BitVector())
4551 // The shuffle result is divided into half A and half B. In total the two
4552 // sources have 4 halves, namely: C, D, E, F. The final values of A and
4553 // B must come from C, D, E or F.
4554 unsigned HalfSize = VT.getVectorNumElements()/2;
4555 bool MatchA = false, MatchB = false;
4557 // Check if A comes from one of C, D, E, F.
4558 for (unsigned Half = 0; Half != 4; ++Half) {
4559 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) {
4565 // Check if B comes from one of C, D, E, F.
4566 for (unsigned Half = 0; Half != 4; ++Half) {
4567 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) {
4573 return MatchA && MatchB;
4576 /// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle
4577 /// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions.
4578 static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) {
4579 MVT VT = SVOp->getSimpleValueType(0);
4581 unsigned HalfSize = VT.getVectorNumElements()/2;
4583 unsigned FstHalf = 0, SndHalf = 0;
4584 for (unsigned i = 0; i < HalfSize; ++i) {
4585 if (SVOp->getMaskElt(i) > 0) {
4586 FstHalf = SVOp->getMaskElt(i)/HalfSize;
4590 for (unsigned i = HalfSize; i < HalfSize*2; ++i) {
4591 if (SVOp->getMaskElt(i) > 0) {
4592 SndHalf = SVOp->getMaskElt(i)/HalfSize;
4597 return (FstHalf | (SndHalf << 4));
4600 // Symmetric in-lane mask. Each lane has 4 elements (for imm8)
4601 static bool isPermImmMask(ArrayRef<int> Mask, MVT VT, unsigned& Imm8) {
4602 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4606 unsigned NumElts = VT.getVectorNumElements();
4608 if (VT.is128BitVector() || (VT.is256BitVector() && EltSize == 64)) {
4609 for (unsigned i = 0; i != NumElts; ++i) {
4612 Imm8 |= Mask[i] << (i*2);
4617 unsigned LaneSize = 4;
4618 SmallVector<int, 4> MaskVal(LaneSize, -1);
4620 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4621 for (unsigned i = 0; i != LaneSize; ++i) {
4622 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4626 if (MaskVal[i] < 0) {
4627 MaskVal[i] = Mask[i+l] - l;
4628 Imm8 |= MaskVal[i] << (i*2);
4631 if (Mask[i+l] != (signed)(MaskVal[i]+l))
4638 /// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand
4639 /// specifies a shuffle of elements that is suitable for input to VPERMILPD*.
4640 /// Note that VPERMIL mask matching is different depending whether theunderlying
4641 /// type is 32 or 64. In the VPERMILPS the high half of the mask should point
4642 /// to the same elements of the low, but to the higher half of the source.
4643 /// In VPERMILPD the two lanes could be shuffled independently of each other
4644 /// with the same restriction that lanes can't be crossed. Also handles PSHUFDY.
4645 static bool isVPERMILPMask(ArrayRef<int> Mask, MVT VT) {
4646 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4647 if (VT.getSizeInBits() < 256 || EltSize < 32)
4649 bool symmetricMaskRequired = (EltSize == 32);
4650 unsigned NumElts = VT.getVectorNumElements();
4652 unsigned NumLanes = VT.getSizeInBits()/128;
4653 unsigned LaneSize = NumElts/NumLanes;
4654 // 2 or 4 elements in one lane
4656 SmallVector<int, 4> ExpectedMaskVal(LaneSize, -1);
4657 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4658 for (unsigned i = 0; i != LaneSize; ++i) {
4659 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4661 if (symmetricMaskRequired) {
4662 if (ExpectedMaskVal[i] < 0 && Mask[i+l] >= 0) {
4663 ExpectedMaskVal[i] = Mask[i+l] - l;
4666 if (!isUndefOrEqual(Mask[i+l], ExpectedMaskVal[i]+l))
4674 /// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse
4675 /// of what x86 movss want. X86 movs requires the lowest element to be lowest
4676 /// element of vector 2 and the other elements to come from vector 1 in order.
4677 static bool isCommutedMOVLMask(ArrayRef<int> Mask, MVT VT,
4678 bool V2IsSplat = false, bool V2IsUndef = false) {
4679 if (!VT.is128BitVector())
4682 unsigned NumOps = VT.getVectorNumElements();
4683 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
4686 if (!isUndefOrEqual(Mask[0], 0))
4689 for (unsigned i = 1; i != NumOps; ++i)
4690 if (!(isUndefOrEqual(Mask[i], i+NumOps) ||
4691 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) ||
4692 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps))))
4698 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4699 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
4700 /// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7>
4701 static bool isMOVSHDUPMask(ArrayRef<int> Mask, MVT VT,
4702 const X86Subtarget *Subtarget) {
4703 if (!Subtarget->hasSSE3())
4706 unsigned NumElems = VT.getVectorNumElements();
4708 if ((VT.is128BitVector() && NumElems != 4) ||
4709 (VT.is256BitVector() && NumElems != 8) ||
4710 (VT.is512BitVector() && NumElems != 16))
4713 // "i+1" is the value the indexed mask element must have
4714 for (unsigned i = 0; i != NumElems; i += 2)
4715 if (!isUndefOrEqual(Mask[i], i+1) ||
4716 !isUndefOrEqual(Mask[i+1], i+1))
4722 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4723 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
4724 /// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6>
4725 static bool isMOVSLDUPMask(ArrayRef<int> Mask, MVT VT,
4726 const X86Subtarget *Subtarget) {
4727 if (!Subtarget->hasSSE3())
4730 unsigned NumElems = VT.getVectorNumElements();
4732 if ((VT.is128BitVector() && NumElems != 4) ||
4733 (VT.is256BitVector() && NumElems != 8) ||
4734 (VT.is512BitVector() && NumElems != 16))
4737 // "i" is the value the indexed mask element must have
4738 for (unsigned i = 0; i != NumElems; i += 2)
4739 if (!isUndefOrEqual(Mask[i], i) ||
4740 !isUndefOrEqual(Mask[i+1], i))
4746 /// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand
4747 /// specifies a shuffle of elements that is suitable for input to 256-bit
4748 /// version of MOVDDUP.
4749 static bool isMOVDDUPYMask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4750 if (!HasFp256 || !VT.is256BitVector())
4753 unsigned NumElts = VT.getVectorNumElements();
4757 for (unsigned i = 0; i != NumElts/2; ++i)
4758 if (!isUndefOrEqual(Mask[i], 0))
4760 for (unsigned i = NumElts/2; i != NumElts; ++i)
4761 if (!isUndefOrEqual(Mask[i], NumElts/2))
4766 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4767 /// specifies a shuffle of elements that is suitable for input to 128-bit
4768 /// version of MOVDDUP.
4769 static bool isMOVDDUPMask(ArrayRef<int> Mask, MVT VT) {
4770 if (!VT.is128BitVector())
4773 unsigned e = VT.getVectorNumElements() / 2;
4774 for (unsigned i = 0; i != e; ++i)
4775 if (!isUndefOrEqual(Mask[i], i))
4777 for (unsigned i = 0; i != e; ++i)
4778 if (!isUndefOrEqual(Mask[e+i], i))
4783 /// isVEXTRACTIndex - Return true if the specified
4784 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
4785 /// suitable for instruction that extract 128 or 256 bit vectors
4786 static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
4787 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4788 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4791 // The index should be aligned on a vecWidth-bit boundary.
4793 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4795 MVT VT = N->getSimpleValueType(0);
4796 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4797 bool Result = (Index * ElSize) % vecWidth == 0;
4802 /// isVINSERTIndex - Return true if the specified INSERT_SUBVECTOR
4803 /// operand specifies a subvector insert that is suitable for input to
4804 /// insertion of 128 or 256-bit subvectors
4805 static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) {
4806 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4807 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4809 // The index should be aligned on a vecWidth-bit boundary.
4811 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4813 MVT VT = N->getSimpleValueType(0);
4814 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4815 bool Result = (Index * ElSize) % vecWidth == 0;
4820 bool X86::isVINSERT128Index(SDNode *N) {
4821 return isVINSERTIndex(N, 128);
4824 bool X86::isVINSERT256Index(SDNode *N) {
4825 return isVINSERTIndex(N, 256);
4828 bool X86::isVEXTRACT128Index(SDNode *N) {
4829 return isVEXTRACTIndex(N, 128);
4832 bool X86::isVEXTRACT256Index(SDNode *N) {
4833 return isVEXTRACTIndex(N, 256);
4836 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
4837 /// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions.
4838 /// Handles 128-bit and 256-bit.
4839 static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
4840 MVT VT = N->getSimpleValueType(0);
4842 assert((VT.getSizeInBits() >= 128) &&
4843 "Unsupported vector type for PSHUF/SHUFP");
4845 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate
4846 // independently on 128-bit lanes.
4847 unsigned NumElts = VT.getVectorNumElements();
4848 unsigned NumLanes = VT.getSizeInBits()/128;
4849 unsigned NumLaneElts = NumElts/NumLanes;
4851 assert((NumLaneElts == 2 || NumLaneElts == 4 || NumLaneElts == 8) &&
4852 "Only supports 2, 4 or 8 elements per lane");
4854 unsigned Shift = (NumLaneElts >= 4) ? 1 : 0;
4856 for (unsigned i = 0; i != NumElts; ++i) {
4857 int Elt = N->getMaskElt(i);
4858 if (Elt < 0) continue;
4859 Elt &= NumLaneElts - 1;
4860 unsigned ShAmt = (i << Shift) % 8;
4861 Mask |= Elt << ShAmt;
4867 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
4868 /// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction.
4869 static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) {
4870 MVT VT = N->getSimpleValueType(0);
4872 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4873 "Unsupported vector type for PSHUFHW");
4875 unsigned NumElts = VT.getVectorNumElements();
4878 for (unsigned l = 0; l != NumElts; l += 8) {
4879 // 8 nodes per lane, but we only care about the last 4.
4880 for (unsigned i = 0; i < 4; ++i) {
4881 int Elt = N->getMaskElt(l+i+4);
4882 if (Elt < 0) continue;
4883 Elt &= 0x3; // only 2-bits.
4884 Mask |= Elt << (i * 2);
4891 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
4892 /// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction.
4893 static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) {
4894 MVT VT = N->getSimpleValueType(0);
4896 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4897 "Unsupported vector type for PSHUFHW");
4899 unsigned NumElts = VT.getVectorNumElements();
4902 for (unsigned l = 0; l != NumElts; l += 8) {
4903 // 8 nodes per lane, but we only care about the first 4.
4904 for (unsigned i = 0; i < 4; ++i) {
4905 int Elt = N->getMaskElt(l+i);
4906 if (Elt < 0) continue;
4907 Elt &= 0x3; // only 2-bits
4908 Mask |= Elt << (i * 2);
4915 /// \brief Return the appropriate immediate to shuffle the specified
4916 /// VECTOR_SHUFFLE mask with the PALIGNR (if InterLane is false) or with
4917 /// VALIGN (if Interlane is true) instructions.
4918 static unsigned getShuffleAlignrImmediate(ShuffleVectorSDNode *SVOp,
4920 MVT VT = SVOp->getSimpleValueType(0);
4921 unsigned EltSize = InterLane ? 1 :
4922 VT.getVectorElementType().getSizeInBits() >> 3;
4924 unsigned NumElts = VT.getVectorNumElements();
4925 unsigned NumLanes = VT.is512BitVector() ? 1 : VT.getSizeInBits()/128;
4926 unsigned NumLaneElts = NumElts/NumLanes;
4930 for (i = 0; i != NumElts; ++i) {
4931 Val = SVOp->getMaskElt(i);
4935 if (Val >= (int)NumElts)
4936 Val -= NumElts - NumLaneElts;
4938 assert(Val - i > 0 && "PALIGNR imm should be positive");
4939 return (Val - i) * EltSize;
4942 /// \brief Return the appropriate immediate to shuffle the specified
4943 /// VECTOR_SHUFFLE mask with the PALIGNR instruction.
4944 static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) {
4945 return getShuffleAlignrImmediate(SVOp, false);
4948 /// \brief Return the appropriate immediate to shuffle the specified
4949 /// VECTOR_SHUFFLE mask with the VALIGN instruction.
4950 static unsigned getShuffleVALIGNImmediate(ShuffleVectorSDNode *SVOp) {
4951 return getShuffleAlignrImmediate(SVOp, true);
4955 static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) {
4956 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4957 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4958 llvm_unreachable("Illegal extract subvector for VEXTRACT");
4961 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4963 MVT VecVT = N->getOperand(0).getSimpleValueType();
4964 MVT ElVT = VecVT.getVectorElementType();
4966 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4967 return Index / NumElemsPerChunk;
4970 static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) {
4971 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4972 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4973 llvm_unreachable("Illegal insert subvector for VINSERT");
4976 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4978 MVT VecVT = N->getSimpleValueType(0);
4979 MVT ElVT = VecVT.getVectorElementType();
4981 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4982 return Index / NumElemsPerChunk;
4985 /// getExtractVEXTRACT128Immediate - Return the appropriate immediate
4986 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128
4987 /// and VINSERTI128 instructions.
4988 unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) {
4989 return getExtractVEXTRACTImmediate(N, 128);
4992 /// getExtractVEXTRACT256Immediate - Return the appropriate immediate
4993 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF64x4
4994 /// and VINSERTI64x4 instructions.
4995 unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) {
4996 return getExtractVEXTRACTImmediate(N, 256);
4999 /// getInsertVINSERT128Immediate - Return the appropriate immediate
5000 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128
5001 /// and VINSERTI128 instructions.
5002 unsigned X86::getInsertVINSERT128Immediate(SDNode *N) {
5003 return getInsertVINSERTImmediate(N, 128);
5006 /// getInsertVINSERT256Immediate - Return the appropriate immediate
5007 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF46x4
5008 /// and VINSERTI64x4 instructions.
5009 unsigned X86::getInsertVINSERT256Immediate(SDNode *N) {
5010 return getInsertVINSERTImmediate(N, 256);
5013 /// isZero - Returns true if Elt is a constant integer zero
5014 static bool isZero(SDValue V) {
5015 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
5016 return C && C->isNullValue();
5019 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
5021 bool X86::isZeroNode(SDValue Elt) {
5024 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Elt))
5025 return CFP->getValueAPF().isPosZero();
5029 /// ShouldXformToMOVHLPS - Return true if the node should be transformed to
5030 /// match movhlps. The lower half elements should come from upper half of
5031 /// V1 (and in order), and the upper half elements should come from the upper
5032 /// half of V2 (and in order).
5033 static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, MVT VT) {
5034 if (!VT.is128BitVector())
5036 if (VT.getVectorNumElements() != 4)
5038 for (unsigned i = 0, e = 2; i != e; ++i)
5039 if (!isUndefOrEqual(Mask[i], i+2))
5041 for (unsigned i = 2; i != 4; ++i)
5042 if (!isUndefOrEqual(Mask[i], i+4))
5047 /// isScalarLoadToVector - Returns true if the node is a scalar load that
5048 /// is promoted to a vector. It also returns the LoadSDNode by reference if
5050 static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = nullptr) {
5051 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR)
5053 N = N->getOperand(0).getNode();
5054 if (!ISD::isNON_EXTLoad(N))
5057 *LD = cast<LoadSDNode>(N);
5061 // Test whether the given value is a vector value which will be legalized
5063 static bool WillBeConstantPoolLoad(SDNode *N) {
5064 if (N->getOpcode() != ISD::BUILD_VECTOR)
5067 // Check for any non-constant elements.
5068 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
5069 switch (N->getOperand(i).getNode()->getOpcode()) {
5071 case ISD::ConstantFP:
5078 // Vectors of all-zeros and all-ones are materialized with special
5079 // instructions rather than being loaded.
5080 return !ISD::isBuildVectorAllZeros(N) &&
5081 !ISD::isBuildVectorAllOnes(N);
5084 /// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
5085 /// match movlp{s|d}. The lower half elements should come from lower half of
5086 /// V1 (and in order), and the upper half elements should come from the upper
5087 /// half of V2 (and in order). And since V1 will become the source of the
5088 /// MOVLP, it must be either a vector load or a scalar load to vector.
5089 static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
5090 ArrayRef<int> Mask, MVT VT) {
5091 if (!VT.is128BitVector())
5094 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
5096 // Is V2 is a vector load, don't do this transformation. We will try to use
5097 // load folding shufps op.
5098 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2))
5101 unsigned NumElems = VT.getVectorNumElements();
5103 if (NumElems != 2 && NumElems != 4)
5105 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
5106 if (!isUndefOrEqual(Mask[i], i))
5108 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
5109 if (!isUndefOrEqual(Mask[i], i+NumElems))
5114 /// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
5115 /// to an zero vector.
5116 /// FIXME: move to dag combiner / method on ShuffleVectorSDNode
5117 static bool isZeroShuffle(ShuffleVectorSDNode *N) {
5118 SDValue V1 = N->getOperand(0);
5119 SDValue V2 = N->getOperand(1);
5120 unsigned NumElems = N->getValueType(0).getVectorNumElements();
5121 for (unsigned i = 0; i != NumElems; ++i) {
5122 int Idx = N->getMaskElt(i);
5123 if (Idx >= (int)NumElems) {
5124 unsigned Opc = V2.getOpcode();
5125 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode()))
5127 if (Opc != ISD::BUILD_VECTOR ||
5128 !X86::isZeroNode(V2.getOperand(Idx-NumElems)))
5130 } else if (Idx >= 0) {
5131 unsigned Opc = V1.getOpcode();
5132 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode()))
5134 if (Opc != ISD::BUILD_VECTOR ||
5135 !X86::isZeroNode(V1.getOperand(Idx)))
5142 /// getZeroVector - Returns a vector of specified type with all zero elements.
5144 static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
5145 SelectionDAG &DAG, SDLoc dl) {
5146 assert(VT.isVector() && "Expected a vector type");
5148 // Always build SSE zero vectors as <4 x i32> bitcasted
5149 // to their dest type. This ensures they get CSE'd.
5151 if (VT.is128BitVector()) { // SSE
5152 if (Subtarget->hasSSE2()) { // SSE2
5153 SDValue Cst = DAG.getConstant(0, MVT::i32);
5154 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5156 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5157 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
5159 } else if (VT.is256BitVector()) { // AVX
5160 if (Subtarget->hasInt256()) { // AVX2
5161 SDValue Cst = DAG.getConstant(0, MVT::i32);
5162 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5163 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5165 // 256-bit logic and arithmetic instructions in AVX are all
5166 // floating-point, no support for integer ops. Emit fp zeroed vectors.
5167 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5168 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5169 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops);
5171 } else if (VT.is512BitVector()) { // AVX-512
5172 SDValue Cst = DAG.getConstant(0, MVT::i32);
5173 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
5174 Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5175 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
5176 } else if (VT.getScalarType() == MVT::i1) {
5177 assert(VT.getVectorNumElements() <= 16 && "Unexpected vector type");
5178 SDValue Cst = DAG.getConstant(0, MVT::i1);
5179 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
5180 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
5182 llvm_unreachable("Unexpected vector type");
5184 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5187 /// getOnesVector - Returns a vector of specified type with all bits set.
5188 /// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with
5189 /// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately.
5190 /// Then bitcast to their original type, ensuring they get CSE'd.
5191 static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG,
5193 assert(VT.isVector() && "Expected a vector type");
5195 SDValue Cst = DAG.getConstant(~0U, MVT::i32);
5197 if (VT.is256BitVector()) {
5198 if (HasInt256) { // AVX2
5199 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5200 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5202 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5203 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
5205 } else if (VT.is128BitVector()) {
5206 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5208 llvm_unreachable("Unexpected vector type");
5210 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5213 /// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
5214 /// that point to V2 points to its first element.
5215 static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) {
5216 for (unsigned i = 0; i != NumElems; ++i) {
5217 if (Mask[i] > (int)NumElems) {
5223 /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
5224 /// operation of specified width.
5225 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
5227 unsigned NumElems = VT.getVectorNumElements();
5228 SmallVector<int, 8> Mask;
5229 Mask.push_back(NumElems);
5230 for (unsigned i = 1; i != NumElems; ++i)
5232 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5235 /// getUnpackl - Returns a vector_shuffle node for an unpackl operation.
5236 static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5238 unsigned NumElems = VT.getVectorNumElements();
5239 SmallVector<int, 8> Mask;
5240 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
5242 Mask.push_back(i + NumElems);
5244 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5247 /// getUnpackh - Returns a vector_shuffle node for an unpackh operation.
5248 static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5250 unsigned NumElems = VT.getVectorNumElements();
5251 SmallVector<int, 8> Mask;
5252 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) {
5253 Mask.push_back(i + Half);
5254 Mask.push_back(i + NumElems + Half);
5256 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5259 // PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by
5260 // a generic shuffle instruction because the target has no such instructions.
5261 // Generate shuffles which repeat i16 and i8 several times until they can be
5262 // represented by v4f32 and then be manipulated by target suported shuffles.
5263 static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) {
5264 MVT VT = V.getSimpleValueType();
5265 int NumElems = VT.getVectorNumElements();
5268 while (NumElems > 4) {
5269 if (EltNo < NumElems/2) {
5270 V = getUnpackl(DAG, dl, VT, V, V);
5272 V = getUnpackh(DAG, dl, VT, V, V);
5273 EltNo -= NumElems/2;
5280 /// getLegalSplat - Generate a legal splat with supported x86 shuffles
5281 static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) {
5282 MVT VT = V.getSimpleValueType();
5285 if (VT.is128BitVector()) {
5286 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V);
5287 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
5288 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32),
5290 } else if (VT.is256BitVector()) {
5291 // To use VPERMILPS to splat scalars, the second half of indicies must
5292 // refer to the higher part, which is a duplication of the lower one,
5293 // because VPERMILPS can only handle in-lane permutations.
5294 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo,
5295 EltNo+4, EltNo+4, EltNo+4, EltNo+4 };
5297 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V);
5298 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32),
5301 llvm_unreachable("Vector size not supported");
5303 return DAG.getNode(ISD::BITCAST, dl, VT, V);
5306 /// PromoteSplat - Splat is promoted to target supported vector shuffles.
5307 static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
5308 MVT SrcVT = SV->getSimpleValueType(0);
5309 SDValue V1 = SV->getOperand(0);
5312 int EltNo = SV->getSplatIndex();
5313 int NumElems = SrcVT.getVectorNumElements();
5314 bool Is256BitVec = SrcVT.is256BitVector();
5316 assert(((SrcVT.is128BitVector() && NumElems > 4) || Is256BitVec) &&
5317 "Unknown how to promote splat for type");
5319 // Extract the 128-bit part containing the splat element and update
5320 // the splat element index when it refers to the higher register.
5322 V1 = Extract128BitVector(V1, EltNo, DAG, dl);
5323 if (EltNo >= NumElems/2)
5324 EltNo -= NumElems/2;
5327 // All i16 and i8 vector types can't be used directly by a generic shuffle
5328 // instruction because the target has no such instruction. Generate shuffles
5329 // which repeat i16 and i8 several times until they fit in i32, and then can
5330 // be manipulated by target suported shuffles.
5331 MVT EltVT = SrcVT.getVectorElementType();
5332 if (EltVT == MVT::i8 || EltVT == MVT::i16)
5333 V1 = PromoteSplati8i16(V1, DAG, EltNo);
5335 // Recreate the 256-bit vector and place the same 128-bit vector
5336 // into the low and high part. This is necessary because we want
5337 // to use VPERM* to shuffle the vectors
5339 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1);
5342 return getLegalSplat(DAG, V1, EltNo);
5345 /// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
5346 /// vector of zero or undef vector. This produces a shuffle where the low
5347 /// element of V2 is swizzled into the zero/undef vector, landing at element
5348 /// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
5349 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
5351 const X86Subtarget *Subtarget,
5352 SelectionDAG &DAG) {
5353 MVT VT = V2.getSimpleValueType();
5355 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
5356 unsigned NumElems = VT.getVectorNumElements();
5357 SmallVector<int, 16> MaskVec;
5358 for (unsigned i = 0; i != NumElems; ++i)
5359 // If this is the insertion idx, put the low elt of V2 here.
5360 MaskVec.push_back(i == Idx ? NumElems : i);
5361 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]);
5364 /// getTargetShuffleMask - Calculates the shuffle mask corresponding to the
5365 /// target specific opcode. Returns true if the Mask could be calculated. Sets
5366 /// IsUnary to true if only uses one source. Note that this will set IsUnary for
5367 /// shuffles which use a single input multiple times, and in those cases it will
5368 /// adjust the mask to only have indices within that single input.
5369 static bool getTargetShuffleMask(SDNode *N, MVT VT,
5370 SmallVectorImpl<int> &Mask, bool &IsUnary) {
5371 unsigned NumElems = VT.getVectorNumElements();
5375 bool IsFakeUnary = false;
5376 switch(N->getOpcode()) {
5377 case X86ISD::BLENDI:
5378 ImmN = N->getOperand(N->getNumOperands()-1);
5379 DecodeBLENDMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5382 ImmN = N->getOperand(N->getNumOperands()-1);
5383 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5384 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5386 case X86ISD::UNPCKH:
5387 DecodeUNPCKHMask(VT, Mask);
5388 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5390 case X86ISD::UNPCKL:
5391 DecodeUNPCKLMask(VT, Mask);
5392 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5394 case X86ISD::MOVHLPS:
5395 DecodeMOVHLPSMask(NumElems, Mask);
5396 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5398 case X86ISD::MOVLHPS:
5399 DecodeMOVLHPSMask(NumElems, Mask);
5400 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5402 case X86ISD::PALIGNR:
5403 ImmN = N->getOperand(N->getNumOperands()-1);
5404 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5406 case X86ISD::PSHUFD:
5407 case X86ISD::VPERMILPI:
5408 ImmN = N->getOperand(N->getNumOperands()-1);
5409 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5412 case X86ISD::PSHUFHW:
5413 ImmN = N->getOperand(N->getNumOperands()-1);
5414 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5417 case X86ISD::PSHUFLW:
5418 ImmN = N->getOperand(N->getNumOperands()-1);
5419 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5422 case X86ISD::PSHUFB: {
5424 SDValue MaskNode = N->getOperand(1);
5425 while (MaskNode->getOpcode() == ISD::BITCAST)
5426 MaskNode = MaskNode->getOperand(0);
5428 if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
5429 // If we have a build-vector, then things are easy.
5430 EVT VT = MaskNode.getValueType();
5431 assert(VT.isVector() &&
5432 "Can't produce a non-vector with a build_vector!");
5433 if (!VT.isInteger())
5436 int NumBytesPerElement = VT.getVectorElementType().getSizeInBits() / 8;
5438 SmallVector<uint64_t, 32> RawMask;
5439 for (int i = 0, e = MaskNode->getNumOperands(); i < e; ++i) {
5440 SDValue Op = MaskNode->getOperand(i);
5441 if (Op->getOpcode() == ISD::UNDEF) {
5442 RawMask.push_back((uint64_t)SM_SentinelUndef);
5445 auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
5448 APInt MaskElement = CN->getAPIntValue();
5450 // We now have to decode the element which could be any integer size and
5451 // extract each byte of it.
5452 for (int j = 0; j < NumBytesPerElement; ++j) {
5453 // Note that this is x86 and so always little endian: the low byte is
5454 // the first byte of the mask.
5455 RawMask.push_back(MaskElement.getLoBits(8).getZExtValue());
5456 MaskElement = MaskElement.lshr(8);
5459 DecodePSHUFBMask(RawMask, Mask);
5463 auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
5467 SDValue Ptr = MaskLoad->getBasePtr();
5468 if (Ptr->getOpcode() == X86ISD::Wrapper)
5469 Ptr = Ptr->getOperand(0);
5471 auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
5472 if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
5475 if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
5476 DecodePSHUFBMask(C, Mask);
5484 case X86ISD::VPERMI:
5485 ImmN = N->getOperand(N->getNumOperands()-1);
5486 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5491 DecodeScalarMoveMask(VT, /* IsLoad */ false, Mask);
5493 case X86ISD::VPERM2X128:
5494 ImmN = N->getOperand(N->getNumOperands()-1);
5495 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5496 if (Mask.empty()) return false;
5498 case X86ISD::MOVSLDUP:
5499 DecodeMOVSLDUPMask(VT, Mask);
5502 case X86ISD::MOVSHDUP:
5503 DecodeMOVSHDUPMask(VT, Mask);
5506 case X86ISD::MOVDDUP:
5507 DecodeMOVDDUPMask(VT, Mask);
5510 case X86ISD::MOVLHPD:
5511 case X86ISD::MOVLPD:
5512 case X86ISD::MOVLPS:
5513 // Not yet implemented
5515 default: llvm_unreachable("unknown target shuffle node");
5518 // If we have a fake unary shuffle, the shuffle mask is spread across two
5519 // inputs that are actually the same node. Re-map the mask to always point
5520 // into the first input.
5523 if (M >= (int)Mask.size())
5529 /// getShuffleScalarElt - Returns the scalar element that will make up the ith
5530 /// element of the result of the vector shuffle.
5531 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
5534 return SDValue(); // Limit search depth.
5536 SDValue V = SDValue(N, 0);
5537 EVT VT = V.getValueType();
5538 unsigned Opcode = V.getOpcode();
5540 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
5541 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
5542 int Elt = SV->getMaskElt(Index);
5545 return DAG.getUNDEF(VT.getVectorElementType());
5547 unsigned NumElems = VT.getVectorNumElements();
5548 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
5549 : SV->getOperand(1);
5550 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
5553 // Recurse into target specific vector shuffles to find scalars.
5554 if (isTargetShuffle(Opcode)) {
5555 MVT ShufVT = V.getSimpleValueType();
5556 unsigned NumElems = ShufVT.getVectorNumElements();
5557 SmallVector<int, 16> ShuffleMask;
5560 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary))
5563 int Elt = ShuffleMask[Index];
5565 return DAG.getUNDEF(ShufVT.getVectorElementType());
5567 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0)
5569 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
5573 // Actual nodes that may contain scalar elements
5574 if (Opcode == ISD::BITCAST) {
5575 V = V.getOperand(0);
5576 EVT SrcVT = V.getValueType();
5577 unsigned NumElems = VT.getVectorNumElements();
5579 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
5583 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
5584 return (Index == 0) ? V.getOperand(0)
5585 : DAG.getUNDEF(VT.getVectorElementType());
5587 if (V.getOpcode() == ISD::BUILD_VECTOR)
5588 return V.getOperand(Index);
5593 /// getNumOfConsecutiveZeros - Return the number of elements of a vector
5594 /// shuffle operation which come from a consecutively from a zero. The
5595 /// search can start in two different directions, from left or right.
5596 /// We count undefs as zeros until PreferredNum is reached.
5597 static unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp,
5598 unsigned NumElems, bool ZerosFromLeft,
5600 unsigned PreferredNum = -1U) {
5601 unsigned NumZeros = 0;
5602 for (unsigned i = 0; i != NumElems; ++i) {
5603 unsigned Index = ZerosFromLeft ? i : NumElems - i - 1;
5604 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0);
5608 if (X86::isZeroNode(Elt))
5610 else if (Elt.getOpcode() == ISD::UNDEF) // Undef as zero up to PreferredNum.
5611 NumZeros = std::min(NumZeros + 1, PreferredNum);
5619 /// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE)
5620 /// correspond consecutively to elements from one of the vector operands,
5621 /// starting from its index OpIdx. Also tell OpNum which source vector operand.
5623 bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp,
5624 unsigned MaskI, unsigned MaskE, unsigned OpIdx,
5625 unsigned NumElems, unsigned &OpNum) {
5626 bool SeenV1 = false;
5627 bool SeenV2 = false;
5629 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) {
5630 int Idx = SVOp->getMaskElt(i);
5631 // Ignore undef indicies
5635 if (Idx < (int)NumElems)
5640 // Only accept consecutive elements from the same vector
5641 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2))
5645 OpNum = SeenV1 ? 0 : 1;
5649 /// isVectorShiftRight - Returns true if the shuffle can be implemented as a
5650 /// logical left shift of a vector.
5651 static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5652 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5654 SVOp->getSimpleValueType(0).getVectorNumElements();
5655 unsigned NumZeros = getNumOfConsecutiveZeros(
5656 SVOp, NumElems, false /* check zeros from right */, DAG,
5657 SVOp->getMaskElt(0));
5663 // Considering the elements in the mask that are not consecutive zeros,
5664 // check if they consecutively come from only one of the source vectors.
5666 // V1 = {X, A, B, C} 0
5668 // vector_shuffle V1, V2 <1, 2, 3, X>
5670 if (!isShuffleMaskConsecutive(SVOp,
5671 0, // Mask Start Index
5672 NumElems-NumZeros, // Mask End Index(exclusive)
5673 NumZeros, // Where to start looking in the src vector
5674 NumElems, // Number of elements in vector
5675 OpSrc)) // Which source operand ?
5680 ShVal = SVOp->getOperand(OpSrc);
5684 /// isVectorShiftLeft - Returns true if the shuffle can be implemented as a
5685 /// logical left shift of a vector.
5686 static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5687 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5689 SVOp->getSimpleValueType(0).getVectorNumElements();
5690 unsigned NumZeros = getNumOfConsecutiveZeros(
5691 SVOp, NumElems, true /* check zeros from left */, DAG,
5692 NumElems - SVOp->getMaskElt(NumElems - 1) - 1);
5698 // Considering the elements in the mask that are not consecutive zeros,
5699 // check if they consecutively come from only one of the source vectors.
5701 // 0 { A, B, X, X } = V2
5703 // vector_shuffle V1, V2 <X, X, 4, 5>
5705 if (!isShuffleMaskConsecutive(SVOp,
5706 NumZeros, // Mask Start Index
5707 NumElems, // Mask End Index(exclusive)
5708 0, // Where to start looking in the src vector
5709 NumElems, // Number of elements in vector
5710 OpSrc)) // Which source operand ?
5715 ShVal = SVOp->getOperand(OpSrc);
5719 /// isVectorShift - Returns true if the shuffle can be implemented as a
5720 /// logical left or right shift of a vector.
5721 static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5722 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5723 // Although the logic below support any bitwidth size, there are no
5724 // shift instructions which handle more than 128-bit vectors.
5725 if (!SVOp->getSimpleValueType(0).is128BitVector())
5728 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) ||
5729 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt))
5735 /// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
5737 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
5738 unsigned NumNonZero, unsigned NumZero,
5740 const X86Subtarget* Subtarget,
5741 const TargetLowering &TLI) {
5748 for (unsigned i = 0; i < 16; ++i) {
5749 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
5750 if (ThisIsNonZero && First) {
5752 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5754 V = DAG.getUNDEF(MVT::v8i16);
5759 SDValue ThisElt, LastElt;
5760 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
5761 if (LastIsNonZero) {
5762 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
5763 MVT::i16, Op.getOperand(i-1));
5765 if (ThisIsNonZero) {
5766 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
5767 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
5768 ThisElt, DAG.getConstant(8, MVT::i8));
5770 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
5774 if (ThisElt.getNode())
5775 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
5776 DAG.getIntPtrConstant(i/2));
5780 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V);
5783 /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
5785 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
5786 unsigned NumNonZero, unsigned NumZero,
5788 const X86Subtarget* Subtarget,
5789 const TargetLowering &TLI) {
5796 for (unsigned i = 0; i < 8; ++i) {
5797 bool isNonZero = (NonZeros & (1 << i)) != 0;
5801 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5803 V = DAG.getUNDEF(MVT::v8i16);
5806 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
5807 MVT::v8i16, V, Op.getOperand(i),
5808 DAG.getIntPtrConstant(i));
5815 /// LowerBuildVectorv4x32 - Custom lower build_vector of v4i32 or v4f32.
5816 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
5817 const X86Subtarget *Subtarget,
5818 const TargetLowering &TLI) {
5819 // Find all zeroable elements.
5821 for (int i=0; i < 4; ++i) {
5822 SDValue Elt = Op->getOperand(i);
5823 Zeroable[i] = (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt));
5825 assert(std::count_if(&Zeroable[0], &Zeroable[4],
5826 [](bool M) { return !M; }) > 1 &&
5827 "We expect at least two non-zero elements!");
5829 // We only know how to deal with build_vector nodes where elements are either
5830 // zeroable or extract_vector_elt with constant index.
5831 SDValue FirstNonZero;
5832 unsigned FirstNonZeroIdx;
5833 for (unsigned i=0; i < 4; ++i) {
5836 SDValue Elt = Op->getOperand(i);
5837 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5838 !isa<ConstantSDNode>(Elt.getOperand(1)))
5840 // Make sure that this node is extracting from a 128-bit vector.
5841 MVT VT = Elt.getOperand(0).getSimpleValueType();
5842 if (!VT.is128BitVector())
5844 if (!FirstNonZero.getNode()) {
5846 FirstNonZeroIdx = i;
5850 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
5851 SDValue V1 = FirstNonZero.getOperand(0);
5852 MVT VT = V1.getSimpleValueType();
5854 // See if this build_vector can be lowered as a blend with zero.
5856 unsigned EltMaskIdx, EltIdx;
5858 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
5859 if (Zeroable[EltIdx]) {
5860 // The zero vector will be on the right hand side.
5861 Mask[EltIdx] = EltIdx+4;
5865 Elt = Op->getOperand(EltIdx);
5866 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
5867 EltMaskIdx = cast<ConstantSDNode>(Elt.getOperand(1))->getZExtValue();
5868 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
5870 Mask[EltIdx] = EltIdx;
5874 // Let the shuffle legalizer deal with blend operations.
5875 SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
5876 if (V1.getSimpleValueType() != VT)
5877 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), VT, V1);
5878 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, &Mask[0]);
5881 // See if we can lower this build_vector to a INSERTPS.
5882 if (!Subtarget->hasSSE41())
5885 SDValue V2 = Elt.getOperand(0);
5886 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
5889 bool CanFold = true;
5890 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
5894 SDValue Current = Op->getOperand(i);
5895 SDValue SrcVector = Current->getOperand(0);
5898 CanFold = SrcVector == V1 &&
5899 cast<ConstantSDNode>(Current.getOperand(1))->getZExtValue() == i;
5905 assert(V1.getNode() && "Expected at least two non-zero elements!");
5906 if (V1.getSimpleValueType() != MVT::v4f32)
5907 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), MVT::v4f32, V1);
5908 if (V2.getSimpleValueType() != MVT::v4f32)
5909 V2 = DAG.getNode(ISD::BITCAST, SDLoc(V2), MVT::v4f32, V2);
5911 // Ok, we can emit an INSERTPS instruction.
5913 for (int i = 0; i < 4; ++i)
5917 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
5918 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
5919 SDValue Result = DAG.getNode(X86ISD::INSERTPS, SDLoc(Op), MVT::v4f32, V1, V2,
5920 DAG.getIntPtrConstant(InsertPSMask));
5921 return DAG.getNode(ISD::BITCAST, SDLoc(Op), VT, Result);
5924 /// Return a vector logical shift node.
5925 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
5926 unsigned NumBits, SelectionDAG &DAG,
5927 const TargetLowering &TLI, SDLoc dl) {
5928 assert(VT.is128BitVector() && "Unknown type for VShift");
5929 MVT ShVT = MVT::v2i64;
5930 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
5931 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
5932 MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(SrcOp.getValueType());
5933 SDValue ShiftVal = DAG.getConstant(NumBits, ScalarShiftTy);
5934 return DAG.getNode(ISD::BITCAST, dl, VT,
5935 DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
5939 LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
5941 // Check if the scalar load can be widened into a vector load. And if
5942 // the address is "base + cst" see if the cst can be "absorbed" into
5943 // the shuffle mask.
5944 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
5945 SDValue Ptr = LD->getBasePtr();
5946 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
5948 EVT PVT = LD->getValueType(0);
5949 if (PVT != MVT::i32 && PVT != MVT::f32)
5954 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
5955 FI = FINode->getIndex();
5957 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
5958 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
5959 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5960 Offset = Ptr.getConstantOperandVal(1);
5961 Ptr = Ptr.getOperand(0);
5966 // FIXME: 256-bit vector instructions don't require a strict alignment,
5967 // improve this code to support it better.
5968 unsigned RequiredAlign = VT.getSizeInBits()/8;
5969 SDValue Chain = LD->getChain();
5970 // Make sure the stack object alignment is at least 16 or 32.
5971 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
5972 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
5973 if (MFI->isFixedObjectIndex(FI)) {
5974 // Can't change the alignment. FIXME: It's possible to compute
5975 // the exact stack offset and reference FI + adjust offset instead.
5976 // If someone *really* cares about this. That's the way to implement it.
5979 MFI->setObjectAlignment(FI, RequiredAlign);
5983 // (Offset % 16 or 32) must be multiple of 4. Then address is then
5984 // Ptr + (Offset & ~15).
5987 if ((Offset % RequiredAlign) & 3)
5989 int64_t StartOffset = Offset & ~(RequiredAlign-1);
5991 Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(),
5992 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType()));
5994 int EltNo = (Offset - StartOffset) >> 2;
5995 unsigned NumElems = VT.getVectorNumElements();
5997 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
5998 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
5999 LD->getPointerInfo().getWithOffset(StartOffset),
6000 false, false, false, 0);
6002 SmallVector<int, 8> Mask;
6003 for (unsigned i = 0; i != NumElems; ++i)
6004 Mask.push_back(EltNo);
6006 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
6012 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
6013 /// elements can be replaced by a single large load which has the same value as
6014 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
6016 /// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
6018 /// FIXME: we'd also like to handle the case where the last elements are zero
6019 /// rather than undef via VZEXT_LOAD, but we do not detect that case today.
6020 /// There's even a handy isZeroNode for that purpose.
6021 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
6022 SDLoc &DL, SelectionDAG &DAG,
6023 bool isAfterLegalize) {
6024 unsigned NumElems = Elts.size();
6026 LoadSDNode *LDBase = nullptr;
6027 unsigned LastLoadedElt = -1U;
6029 // For each element in the initializer, see if we've found a load or an undef.
6030 // If we don't find an initial load element, or later load elements are
6031 // non-consecutive, bail out.
6032 for (unsigned i = 0; i < NumElems; ++i) {
6033 SDValue Elt = Elts[i];
6034 // Look through a bitcast.
6035 if (Elt.getNode() && Elt.getOpcode() == ISD::BITCAST)
6036 Elt = Elt.getOperand(0);
6037 if (!Elt.getNode() ||
6038 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
6041 if (Elt.getNode()->getOpcode() == ISD::UNDEF)
6043 LDBase = cast<LoadSDNode>(Elt.getNode());
6047 if (Elt.getOpcode() == ISD::UNDEF)
6050 LoadSDNode *LD = cast<LoadSDNode>(Elt);
6051 EVT LdVT = Elt.getValueType();
6052 // Each loaded element must be the correct fractional portion of the
6053 // requested vector load.
6054 if (LdVT.getSizeInBits() != VT.getSizeInBits() / NumElems)
6056 if (!DAG.isConsecutiveLoad(LD, LDBase, LdVT.getSizeInBits() / 8, i))
6061 // If we have found an entire vector of loads and undefs, then return a large
6062 // load of the entire vector width starting at the base pointer. If we found
6063 // consecutive loads for the low half, generate a vzext_load node.
6064 if (LastLoadedElt == NumElems - 1) {
6065 assert(LDBase && "Did not find base load for merging consecutive loads");
6066 EVT EltVT = LDBase->getValueType(0);
6067 // Ensure that the input vector size for the merged loads matches the
6068 // cumulative size of the input elements.
6069 if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems)
6072 if (isAfterLegalize &&
6073 !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
6076 SDValue NewLd = SDValue();
6078 NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
6079 LDBase->getPointerInfo(), LDBase->isVolatile(),
6080 LDBase->isNonTemporal(), LDBase->isInvariant(),
6081 LDBase->getAlignment());
6083 if (LDBase->hasAnyUseOfValue(1)) {
6084 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6086 SDValue(NewLd.getNode(), 1));
6087 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6088 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6089 SDValue(NewLd.getNode(), 1));
6095 //TODO: The code below fires only for for loading the low v2i32 / v2f32
6096 //of a v4i32 / v4f32. It's probably worth generalizing.
6097 EVT EltVT = VT.getVectorElementType();
6098 if (NumElems == 4 && LastLoadedElt == 1 && (EltVT.getSizeInBits() == 32) &&
6099 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
6100 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
6101 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
6103 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::i64,
6104 LDBase->getPointerInfo(),
6105 LDBase->getAlignment(),
6106 false/*isVolatile*/, true/*ReadMem*/,
6109 // Make sure the newly-created LOAD is in the same position as LDBase in
6110 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and
6111 // update uses of LDBase's output chain to use the TokenFactor.
6112 if (LDBase->hasAnyUseOfValue(1)) {
6113 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6114 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1));
6115 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6116 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6117 SDValue(ResNode.getNode(), 1));
6120 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode);
6125 /// LowerVectorBroadcast - Attempt to use the vbroadcast instruction
6126 /// to generate a splat value for the following cases:
6127 /// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
6128 /// 2. A splat shuffle which uses a scalar_to_vector node which comes from
6129 /// a scalar load, or a constant.
6130 /// The VBROADCAST node is returned when a pattern is found,
6131 /// or SDValue() otherwise.
6132 static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
6133 SelectionDAG &DAG) {
6134 // VBROADCAST requires AVX.
6135 // TODO: Splats could be generated for non-AVX CPUs using SSE
6136 // instructions, but there's less potential gain for only 128-bit vectors.
6137 if (!Subtarget->hasAVX())
6140 MVT VT = Op.getSimpleValueType();
6143 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
6144 "Unsupported vector type for broadcast.");
6149 switch (Op.getOpcode()) {
6151 // Unknown pattern found.
6154 case ISD::BUILD_VECTOR: {
6155 auto *BVOp = cast<BuildVectorSDNode>(Op.getNode());
6156 BitVector UndefElements;
6157 SDValue Splat = BVOp->getSplatValue(&UndefElements);
6159 // We need a splat of a single value to use broadcast, and it doesn't
6160 // make any sense if the value is only in one element of the vector.
6161 if (!Splat || (VT.getVectorNumElements() - UndefElements.count()) <= 1)
6165 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6166 Ld.getOpcode() == ISD::ConstantFP);
6168 // Make sure that all of the users of a non-constant load are from the
6169 // BUILD_VECTOR node.
6170 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
6175 case ISD::VECTOR_SHUFFLE: {
6176 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
6178 // Shuffles must have a splat mask where the first element is
6180 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0)
6183 SDValue Sc = Op.getOperand(0);
6184 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
6185 Sc.getOpcode() != ISD::BUILD_VECTOR) {
6187 if (!Subtarget->hasInt256())
6190 // Use the register form of the broadcast instruction available on AVX2.
6191 if (VT.getSizeInBits() >= 256)
6192 Sc = Extract128BitVector(Sc, 0, DAG, dl);
6193 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
6196 Ld = Sc.getOperand(0);
6197 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6198 Ld.getOpcode() == ISD::ConstantFP);
6200 // The scalar_to_vector node and the suspected
6201 // load node must have exactly one user.
6202 // Constants may have multiple users.
6204 // AVX-512 has register version of the broadcast
6205 bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() &&
6206 Ld.getValueType().getSizeInBits() >= 32;
6207 if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
6214 unsigned ScalarSize = Ld.getValueType().getSizeInBits();
6215 bool IsGE256 = (VT.getSizeInBits() >= 256);
6217 // When optimizing for size, generate up to 5 extra bytes for a broadcast
6218 // instruction to save 8 or more bytes of constant pool data.
6219 // TODO: If multiple splats are generated to load the same constant,
6220 // it may be detrimental to overall size. There needs to be a way to detect
6221 // that condition to know if this is truly a size win.
6222 const Function *F = DAG.getMachineFunction().getFunction();
6223 bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize);
6225 // Handle broadcasting a single constant scalar from the constant pool
6227 // On Sandybridge (no AVX2), it is still better to load a constant vector
6228 // from the constant pool and not to broadcast it from a scalar.
6229 // But override that restriction when optimizing for size.
6230 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
6231 if (ConstSplatVal && (Subtarget->hasAVX2() || OptForSize)) {
6232 EVT CVT = Ld.getValueType();
6233 assert(!CVT.isVector() && "Must not broadcast a vector type");
6235 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
6236 // For size optimization, also splat v2f64 and v2i64, and for size opt
6237 // with AVX2, also splat i8 and i16.
6238 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
6239 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6240 (OptForSize && (ScalarSize == 64 || Subtarget->hasAVX2()))) {
6241 const Constant *C = nullptr;
6242 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
6243 C = CI->getConstantIntValue();
6244 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
6245 C = CF->getConstantFPValue();
6247 assert(C && "Invalid constant type");
6249 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6250 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
6251 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
6252 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP,
6253 MachinePointerInfo::getConstantPool(),
6254 false, false, false, Alignment);
6256 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6260 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
6262 // Handle AVX2 in-register broadcasts.
6263 if (!IsLoad && Subtarget->hasInt256() &&
6264 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
6265 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6267 // The scalar source must be a normal load.
6271 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6272 (Subtarget->hasVLX() && ScalarSize == 64))
6273 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6275 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
6276 // double since there is no vbroadcastsd xmm
6277 if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) {
6278 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
6279 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6282 // Unsupported broadcast.
6286 /// \brief For an EXTRACT_VECTOR_ELT with a constant index return the real
6287 /// underlying vector and index.
6289 /// Modifies \p ExtractedFromVec to the real vector and returns the real
6291 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
6293 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
6294 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
6297 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
6299 // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
6301 // (extract_vector_elt (vector_shuffle<2,u,u,u>
6302 // (extract_subvector (v8f32 %vreg0), Constant<4>),
6305 // In this case the vector is the extract_subvector expression and the index
6306 // is 2, as specified by the shuffle.
6307 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
6308 SDValue ShuffleVec = SVOp->getOperand(0);
6309 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
6310 assert(ShuffleVecVT.getVectorElementType() ==
6311 ExtractedFromVec.getSimpleValueType().getVectorElementType());
6313 int ShuffleIdx = SVOp->getMaskElt(Idx);
6314 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
6315 ExtractedFromVec = ShuffleVec;
6321 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
6322 MVT VT = Op.getSimpleValueType();
6324 // Skip if insert_vec_elt is not supported.
6325 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6326 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
6330 unsigned NumElems = Op.getNumOperands();
6334 SmallVector<unsigned, 4> InsertIndices;
6335 SmallVector<int, 8> Mask(NumElems, -1);
6337 for (unsigned i = 0; i != NumElems; ++i) {
6338 unsigned Opc = Op.getOperand(i).getOpcode();
6340 if (Opc == ISD::UNDEF)
6343 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
6344 // Quit if more than 1 elements need inserting.
6345 if (InsertIndices.size() > 1)
6348 InsertIndices.push_back(i);
6352 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
6353 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
6354 // Quit if non-constant index.
6355 if (!isa<ConstantSDNode>(ExtIdx))
6357 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
6359 // Quit if extracted from vector of different type.
6360 if (ExtractedFromVec.getValueType() != VT)
6363 if (!VecIn1.getNode())
6364 VecIn1 = ExtractedFromVec;
6365 else if (VecIn1 != ExtractedFromVec) {
6366 if (!VecIn2.getNode())
6367 VecIn2 = ExtractedFromVec;
6368 else if (VecIn2 != ExtractedFromVec)
6369 // Quit if more than 2 vectors to shuffle
6373 if (ExtractedFromVec == VecIn1)
6375 else if (ExtractedFromVec == VecIn2)
6376 Mask[i] = Idx + NumElems;
6379 if (!VecIn1.getNode())
6382 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
6383 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]);
6384 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
6385 unsigned Idx = InsertIndices[i];
6386 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
6387 DAG.getIntPtrConstant(Idx));
6393 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
6395 X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
6397 MVT VT = Op.getSimpleValueType();
6398 assert((VT.getVectorElementType() == MVT::i1) && (VT.getSizeInBits() <= 16) &&
6399 "Unexpected type in LowerBUILD_VECTORvXi1!");
6402 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6403 SDValue Cst = DAG.getTargetConstant(0, MVT::i1);
6404 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6405 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6408 if (ISD::isBuildVectorAllOnes(Op.getNode())) {
6409 SDValue Cst = DAG.getTargetConstant(1, MVT::i1);
6410 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6411 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6414 bool AllContants = true;
6415 uint64_t Immediate = 0;
6416 int NonConstIdx = -1;
6417 bool IsSplat = true;
6418 unsigned NumNonConsts = 0;
6419 unsigned NumConsts = 0;
6420 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
6421 SDValue In = Op.getOperand(idx);
6422 if (In.getOpcode() == ISD::UNDEF)
6424 if (!isa<ConstantSDNode>(In)) {
6425 AllContants = false;
6430 if (cast<ConstantSDNode>(In)->getZExtValue())
6431 Immediate |= (1ULL << idx);
6433 if (In != Op.getOperand(0))
6438 SDValue FullMask = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1,
6439 DAG.getConstant(Immediate, MVT::i16));
6440 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, FullMask,
6441 DAG.getIntPtrConstant(0));
6444 if (NumNonConsts == 1 && NonConstIdx != 0) {
6447 SDValue VecAsImm = DAG.getConstant(Immediate,
6448 MVT::getIntegerVT(VT.getSizeInBits()));
6449 DstVec = DAG.getNode(ISD::BITCAST, dl, VT, VecAsImm);
6452 DstVec = DAG.getUNDEF(VT);
6453 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
6454 Op.getOperand(NonConstIdx),
6455 DAG.getIntPtrConstant(NonConstIdx));
6457 if (!IsSplat && (NonConstIdx != 0))
6458 llvm_unreachable("Unsupported BUILD_VECTOR operation");
6459 MVT SelectVT = (VT == MVT::v16i1)? MVT::i16 : MVT::i8;
6462 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6463 DAG.getConstant(-1, SelectVT),
6464 DAG.getConstant(0, SelectVT));
6466 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6467 DAG.getConstant((Immediate | 1), SelectVT),
6468 DAG.getConstant(Immediate, SelectVT));
6469 return DAG.getNode(ISD::BITCAST, dl, VT, Select);
6472 /// \brief Return true if \p N implements a horizontal binop and return the
6473 /// operands for the horizontal binop into V0 and V1.
6475 /// This is a helper function of PerformBUILD_VECTORCombine.
6476 /// This function checks that the build_vector \p N in input implements a
6477 /// horizontal operation. Parameter \p Opcode defines the kind of horizontal
6478 /// operation to match.
6479 /// For example, if \p Opcode is equal to ISD::ADD, then this function
6480 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
6481 /// is equal to ISD::SUB, then this function checks if this is a horizontal
6484 /// This function only analyzes elements of \p N whose indices are
6485 /// in range [BaseIdx, LastIdx).
6486 static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
6488 unsigned BaseIdx, unsigned LastIdx,
6489 SDValue &V0, SDValue &V1) {
6490 EVT VT = N->getValueType(0);
6492 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
6493 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
6494 "Invalid Vector in input!");
6496 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
6497 bool CanFold = true;
6498 unsigned ExpectedVExtractIdx = BaseIdx;
6499 unsigned NumElts = LastIdx - BaseIdx;
6500 V0 = DAG.getUNDEF(VT);
6501 V1 = DAG.getUNDEF(VT);
6503 // Check if N implements a horizontal binop.
6504 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
6505 SDValue Op = N->getOperand(i + BaseIdx);
6508 if (Op->getOpcode() == ISD::UNDEF) {
6509 // Update the expected vector extract index.
6510 if (i * 2 == NumElts)
6511 ExpectedVExtractIdx = BaseIdx;
6512 ExpectedVExtractIdx += 2;
6516 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
6521 SDValue Op0 = Op.getOperand(0);
6522 SDValue Op1 = Op.getOperand(1);
6524 // Try to match the following pattern:
6525 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
6526 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6527 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6528 Op0.getOperand(0) == Op1.getOperand(0) &&
6529 isa<ConstantSDNode>(Op0.getOperand(1)) &&
6530 isa<ConstantSDNode>(Op1.getOperand(1)));
6534 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6535 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
6537 if (i * 2 < NumElts) {
6538 if (V0.getOpcode() == ISD::UNDEF)
6539 V0 = Op0.getOperand(0);
6541 if (V1.getOpcode() == ISD::UNDEF)
6542 V1 = Op0.getOperand(0);
6543 if (i * 2 == NumElts)
6544 ExpectedVExtractIdx = BaseIdx;
6547 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
6548 if (I0 == ExpectedVExtractIdx)
6549 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
6550 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
6551 // Try to match the following dag sequence:
6552 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
6553 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
6557 ExpectedVExtractIdx += 2;
6563 /// \brief Emit a sequence of two 128-bit horizontal add/sub followed by
6564 /// a concat_vector.
6566 /// This is a helper function of PerformBUILD_VECTORCombine.
6567 /// This function expects two 256-bit vectors called V0 and V1.
6568 /// At first, each vector is split into two separate 128-bit vectors.
6569 /// Then, the resulting 128-bit vectors are used to implement two
6570 /// horizontal binary operations.
6572 /// The kind of horizontal binary operation is defined by \p X86Opcode.
6574 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
6575 /// the two new horizontal binop.
6576 /// When Mode is set, the first horizontal binop dag node would take as input
6577 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
6578 /// horizontal binop dag node would take as input the lower 128-bit of V1
6579 /// and the upper 128-bit of V1.
6581 /// HADD V0_LO, V0_HI
6582 /// HADD V1_LO, V1_HI
6584 /// Otherwise, the first horizontal binop dag node takes as input the lower
6585 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
6586 /// dag node takes the the upper 128-bit of V0 and the upper 128-bit of V1.
6588 /// HADD V0_LO, V1_LO
6589 /// HADD V0_HI, V1_HI
6591 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
6592 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
6593 /// the upper 128-bits of the result.
6594 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
6595 SDLoc DL, SelectionDAG &DAG,
6596 unsigned X86Opcode, bool Mode,
6597 bool isUndefLO, bool isUndefHI) {
6598 EVT VT = V0.getValueType();
6599 assert(VT.is256BitVector() && VT == V1.getValueType() &&
6600 "Invalid nodes in input!");
6602 unsigned NumElts = VT.getVectorNumElements();
6603 SDValue V0_LO = Extract128BitVector(V0, 0, DAG, DL);
6604 SDValue V0_HI = Extract128BitVector(V0, NumElts/2, DAG, DL);
6605 SDValue V1_LO = Extract128BitVector(V1, 0, DAG, DL);
6606 SDValue V1_HI = Extract128BitVector(V1, NumElts/2, DAG, DL);
6607 EVT NewVT = V0_LO.getValueType();
6609 SDValue LO = DAG.getUNDEF(NewVT);
6610 SDValue HI = DAG.getUNDEF(NewVT);
6613 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6614 if (!isUndefLO && V0->getOpcode() != ISD::UNDEF)
6615 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
6616 if (!isUndefHI && V1->getOpcode() != ISD::UNDEF)
6617 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
6619 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6620 if (!isUndefLO && (V0_LO->getOpcode() != ISD::UNDEF ||
6621 V1_LO->getOpcode() != ISD::UNDEF))
6622 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
6624 if (!isUndefHI && (V0_HI->getOpcode() != ISD::UNDEF ||
6625 V1_HI->getOpcode() != ISD::UNDEF))
6626 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
6629 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
6632 /// \brief Try to fold a build_vector that performs an 'addsub' into the
6633 /// sequence of 'vadd + vsub + blendi'.
6634 static SDValue matchAddSub(const BuildVectorSDNode *BV, SelectionDAG &DAG,
6635 const X86Subtarget *Subtarget) {
6637 EVT VT = BV->getValueType(0);
6638 unsigned NumElts = VT.getVectorNumElements();
6639 SDValue InVec0 = DAG.getUNDEF(VT);
6640 SDValue InVec1 = DAG.getUNDEF(VT);
6642 assert((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
6643 VT == MVT::v2f64) && "build_vector with an invalid type found!");
6645 // Odd-numbered elements in the input build vector are obtained from
6646 // adding two integer/float elements.
6647 // Even-numbered elements in the input build vector are obtained from
6648 // subtracting two integer/float elements.
6649 unsigned ExpectedOpcode = ISD::FSUB;
6650 unsigned NextExpectedOpcode = ISD::FADD;
6651 bool AddFound = false;
6652 bool SubFound = false;
6654 for (unsigned i = 0, e = NumElts; i != e; ++i) {
6655 SDValue Op = BV->getOperand(i);
6657 // Skip 'undef' values.
6658 unsigned Opcode = Op.getOpcode();
6659 if (Opcode == ISD::UNDEF) {
6660 std::swap(ExpectedOpcode, NextExpectedOpcode);
6664 // Early exit if we found an unexpected opcode.
6665 if (Opcode != ExpectedOpcode)
6668 SDValue Op0 = Op.getOperand(0);
6669 SDValue Op1 = Op.getOperand(1);
6671 // Try to match the following pattern:
6672 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
6673 // Early exit if we cannot match that sequence.
6674 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6675 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6676 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
6677 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
6678 Op0.getOperand(1) != Op1.getOperand(1))
6681 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6685 // We found a valid add/sub node. Update the information accordingly.
6691 // Update InVec0 and InVec1.
6692 if (InVec0.getOpcode() == ISD::UNDEF)
6693 InVec0 = Op0.getOperand(0);
6694 if (InVec1.getOpcode() == ISD::UNDEF)
6695 InVec1 = Op1.getOperand(0);
6697 // Make sure that operands in input to each add/sub node always
6698 // come from a same pair of vectors.
6699 if (InVec0 != Op0.getOperand(0)) {
6700 if (ExpectedOpcode == ISD::FSUB)
6703 // FADD is commutable. Try to commute the operands
6704 // and then test again.
6705 std::swap(Op0, Op1);
6706 if (InVec0 != Op0.getOperand(0))
6710 if (InVec1 != Op1.getOperand(0))
6713 // Update the pair of expected opcodes.
6714 std::swap(ExpectedOpcode, NextExpectedOpcode);
6717 // Don't try to fold this build_vector into an ADDSUB if the inputs are undef.
6718 if (AddFound && SubFound && InVec0.getOpcode() != ISD::UNDEF &&
6719 InVec1.getOpcode() != ISD::UNDEF)
6720 return DAG.getNode(X86ISD::ADDSUB, DL, VT, InVec0, InVec1);
6725 static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
6726 const X86Subtarget *Subtarget) {
6728 EVT VT = N->getValueType(0);
6729 unsigned NumElts = VT.getVectorNumElements();
6730 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
6731 SDValue InVec0, InVec1;
6733 // Try to match an ADDSUB.
6734 if ((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
6735 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
6736 SDValue Value = matchAddSub(BV, DAG, Subtarget);
6737 if (Value.getNode())
6741 // Try to match horizontal ADD/SUB.
6742 unsigned NumUndefsLO = 0;
6743 unsigned NumUndefsHI = 0;
6744 unsigned Half = NumElts/2;
6746 // Count the number of UNDEF operands in the build_vector in input.
6747 for (unsigned i = 0, e = Half; i != e; ++i)
6748 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6751 for (unsigned i = Half, e = NumElts; i != e; ++i)
6752 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6755 // Early exit if this is either a build_vector of all UNDEFs or all the
6756 // operands but one are UNDEF.
6757 if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
6760 if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget->hasSSE3()) {
6761 // Try to match an SSE3 float HADD/HSUB.
6762 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6763 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6765 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6766 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6767 } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) {
6768 // Try to match an SSSE3 integer HADD/HSUB.
6769 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6770 return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
6772 if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6773 return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
6776 if (!Subtarget->hasAVX())
6779 if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
6780 // Try to match an AVX horizontal add/sub of packed single/double
6781 // precision floating point values from 256-bit vectors.
6782 SDValue InVec2, InVec3;
6783 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
6784 isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
6785 ((InVec0.getOpcode() == ISD::UNDEF ||
6786 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6787 ((InVec1.getOpcode() == ISD::UNDEF ||
6788 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6789 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6791 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
6792 isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
6793 ((InVec0.getOpcode() == ISD::UNDEF ||
6794 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6795 ((InVec1.getOpcode() == ISD::UNDEF ||
6796 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6797 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6798 } else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
6799 // Try to match an AVX2 horizontal add/sub of signed integers.
6800 SDValue InVec2, InVec3;
6802 bool CanFold = true;
6804 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
6805 isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
6806 ((InVec0.getOpcode() == ISD::UNDEF ||
6807 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6808 ((InVec1.getOpcode() == ISD::UNDEF ||
6809 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6810 X86Opcode = X86ISD::HADD;
6811 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
6812 isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
6813 ((InVec0.getOpcode() == ISD::UNDEF ||
6814 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6815 ((InVec1.getOpcode() == ISD::UNDEF ||
6816 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6817 X86Opcode = X86ISD::HSUB;
6822 // Fold this build_vector into a single horizontal add/sub.
6823 // Do this only if the target has AVX2.
6824 if (Subtarget->hasAVX2())
6825 return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
6827 // Do not try to expand this build_vector into a pair of horizontal
6828 // add/sub if we can emit a pair of scalar add/sub.
6829 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6832 // Convert this build_vector into a pair of horizontal binop followed by
6834 bool isUndefLO = NumUndefsLO == Half;
6835 bool isUndefHI = NumUndefsHI == Half;
6836 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
6837 isUndefLO, isUndefHI);
6841 if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
6842 VT == MVT::v16i16) && Subtarget->hasAVX()) {
6844 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6845 X86Opcode = X86ISD::HADD;
6846 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6847 X86Opcode = X86ISD::HSUB;
6848 else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6849 X86Opcode = X86ISD::FHADD;
6850 else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6851 X86Opcode = X86ISD::FHSUB;
6855 // Don't try to expand this build_vector into a pair of horizontal add/sub
6856 // if we can simply emit a pair of scalar add/sub.
6857 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6860 // Convert this build_vector into two horizontal add/sub followed by
6862 bool isUndefLO = NumUndefsLO == Half;
6863 bool isUndefHI = NumUndefsHI == Half;
6864 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
6865 isUndefLO, isUndefHI);
6872 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
6875 MVT VT = Op.getSimpleValueType();
6876 MVT ExtVT = VT.getVectorElementType();
6877 unsigned NumElems = Op.getNumOperands();
6879 // Generate vectors for predicate vectors.
6880 if (VT.getScalarType() == MVT::i1 && Subtarget->hasAVX512())
6881 return LowerBUILD_VECTORvXi1(Op, DAG);
6883 // Vectors containing all zeros can be matched by pxor and xorps later
6884 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6885 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
6886 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
6887 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
6890 return getZeroVector(VT, Subtarget, DAG, dl);
6893 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
6894 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
6895 // vpcmpeqd on 256-bit vectors.
6896 if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
6897 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256()))
6900 if (!VT.is512BitVector())
6901 return getOnesVector(VT, Subtarget->hasInt256(), DAG, dl);
6904 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
6905 if (Broadcast.getNode())
6908 unsigned EVTBits = ExtVT.getSizeInBits();
6910 unsigned NumZero = 0;
6911 unsigned NumNonZero = 0;
6912 unsigned NonZeros = 0;
6913 bool IsAllConstants = true;
6914 SmallSet<SDValue, 8> Values;
6915 for (unsigned i = 0; i < NumElems; ++i) {
6916 SDValue Elt = Op.getOperand(i);
6917 if (Elt.getOpcode() == ISD::UNDEF)
6920 if (Elt.getOpcode() != ISD::Constant &&
6921 Elt.getOpcode() != ISD::ConstantFP)
6922 IsAllConstants = false;
6923 if (X86::isZeroNode(Elt))
6926 NonZeros |= (1 << i);
6931 // All undef vector. Return an UNDEF. All zero vectors were handled above.
6932 if (NumNonZero == 0)
6933 return DAG.getUNDEF(VT);
6935 // Special case for single non-zero, non-undef, element.
6936 if (NumNonZero == 1) {
6937 unsigned Idx = countTrailingZeros(NonZeros);
6938 SDValue Item = Op.getOperand(Idx);
6940 // If this is an insertion of an i64 value on x86-32, and if the top bits of
6941 // the value are obviously zero, truncate the value to i32 and do the
6942 // insertion that way. Only do this if the value is non-constant or if the
6943 // value is a constant being inserted into element 0. It is cheaper to do
6944 // a constant pool load than it is to do a movd + shuffle.
6945 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
6946 (!IsAllConstants || Idx == 0)) {
6947 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
6949 assert(VT == MVT::v2i64 && "Expected an SSE value type!");
6950 EVT VecVT = MVT::v4i32;
6951 unsigned VecElts = 4;
6953 // Truncate the value (which may itself be a constant) to i32, and
6954 // convert it to a vector with movd (S2V+shuffle to zero extend).
6955 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
6956 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
6958 // If using the new shuffle lowering, just directly insert this.
6959 if (ExperimentalVectorShuffleLowering)
6961 ISD::BITCAST, dl, VT,
6962 getShuffleVectorZeroOrUndef(Item, Idx * 2, true, Subtarget, DAG));
6964 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6966 // Now we have our 32-bit value zero extended in the low element of
6967 // a vector. If Idx != 0, swizzle it into place.
6969 SmallVector<int, 4> Mask;
6970 Mask.push_back(Idx);
6971 for (unsigned i = 1; i != VecElts; ++i)
6973 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT),
6976 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
6980 // If we have a constant or non-constant insertion into the low element of
6981 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
6982 // the rest of the elements. This will be matched as movd/movq/movss/movsd
6983 // depending on what the source datatype is.
6986 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6988 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
6989 (ExtVT == MVT::i64 && Subtarget->is64Bit())) {
6990 if (VT.is256BitVector() || VT.is512BitVector()) {
6991 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
6992 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
6993 Item, DAG.getIntPtrConstant(0));
6995 assert(VT.is128BitVector() && "Expected an SSE value type!");
6996 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6997 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
6998 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7001 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
7002 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
7003 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
7004 if (VT.is256BitVector()) {
7005 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
7006 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
7008 assert(VT.is128BitVector() && "Expected an SSE value type!");
7009 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7011 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
7015 // Is it a vector logical left shift?
7016 if (NumElems == 2 && Idx == 1 &&
7017 X86::isZeroNode(Op.getOperand(0)) &&
7018 !X86::isZeroNode(Op.getOperand(1))) {
7019 unsigned NumBits = VT.getSizeInBits();
7020 return getVShift(true, VT,
7021 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
7022 VT, Op.getOperand(1)),
7023 NumBits/2, DAG, *this, dl);
7026 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
7029 // Otherwise, if this is a vector with i32 or f32 elements, and the element
7030 // is a non-constant being inserted into an element other than the low one,
7031 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
7032 // movd/movss) to move this into the low element, then shuffle it into
7034 if (EVTBits == 32) {
7035 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7037 // If using the new shuffle lowering, just directly insert this.
7038 if (ExperimentalVectorShuffleLowering)
7039 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
7041 // Turn it into a shuffle of zero and zero-extended scalar to vector.
7042 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG);
7043 SmallVector<int, 8> MaskVec;
7044 for (unsigned i = 0; i != NumElems; ++i)
7045 MaskVec.push_back(i == Idx ? 0 : 1);
7046 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]);
7050 // Splat is obviously ok. Let legalizer expand it to a shuffle.
7051 if (Values.size() == 1) {
7052 if (EVTBits == 32) {
7053 // Instead of a shuffle like this:
7054 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
7055 // Check if it's possible to issue this instead.
7056 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
7057 unsigned Idx = countTrailingZeros(NonZeros);
7058 SDValue Item = Op.getOperand(Idx);
7059 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
7060 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
7065 // A vector full of immediates; various special cases are already
7066 // handled, so this is best done with a single constant-pool load.
7070 // For AVX-length vectors, see if we can use a vector load to get all of the
7071 // elements, otherwise build the individual 128-bit pieces and use
7072 // shuffles to put them in place.
7073 if (VT.is256BitVector() || VT.is512BitVector()) {
7074 SmallVector<SDValue, 64> V;
7075 for (unsigned i = 0; i != NumElems; ++i)
7076 V.push_back(Op.getOperand(i));
7078 // Check for a build vector of consecutive loads.
7079 if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
7082 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
7084 // Build both the lower and upper subvector.
7085 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7086 makeArrayRef(&V[0], NumElems/2));
7087 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7088 makeArrayRef(&V[NumElems / 2], NumElems/2));
7090 // Recreate the wider vector with the lower and upper part.
7091 if (VT.is256BitVector())
7092 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7093 return Concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7096 // Let legalizer expand 2-wide build_vectors.
7097 if (EVTBits == 64) {
7098 if (NumNonZero == 1) {
7099 // One half is zero or undef.
7100 unsigned Idx = countTrailingZeros(NonZeros);
7101 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
7102 Op.getOperand(Idx));
7103 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
7108 // If element VT is < 32 bits, convert it to inserts into a zero vector.
7109 if (EVTBits == 8 && NumElems == 16) {
7110 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
7112 if (V.getNode()) return V;
7115 if (EVTBits == 16 && NumElems == 8) {
7116 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
7118 if (V.getNode()) return V;
7121 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
7122 if (EVTBits == 32 && NumElems == 4) {
7123 SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget, *this);
7128 // If element VT is == 32 bits, turn it into a number of shuffles.
7129 SmallVector<SDValue, 8> V(NumElems);
7130 if (NumElems == 4 && NumZero > 0) {
7131 for (unsigned i = 0; i < 4; ++i) {
7132 bool isZero = !(NonZeros & (1 << i));
7134 V[i] = getZeroVector(VT, Subtarget, DAG, dl);
7136 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7139 for (unsigned i = 0; i < 2; ++i) {
7140 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
7143 V[i] = V[i*2]; // Must be a zero vector.
7146 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]);
7149 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]);
7152 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]);
7157 bool Reverse1 = (NonZeros & 0x3) == 2;
7158 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
7162 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
7163 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
7165 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
7168 if (Values.size() > 1 && VT.is128BitVector()) {
7169 // Check for a build vector of consecutive loads.
7170 for (unsigned i = 0; i < NumElems; ++i)
7171 V[i] = Op.getOperand(i);
7173 // Check for elements which are consecutive loads.
7174 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false);
7178 // Check for a build vector from mostly shuffle plus few inserting.
7179 SDValue Sh = buildFromShuffleMostly(Op, DAG);
7183 // For SSE 4.1, use insertps to put the high elements into the low element.
7184 if (Subtarget->hasSSE41()) {
7186 if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
7187 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
7189 Result = DAG.getUNDEF(VT);
7191 for (unsigned i = 1; i < NumElems; ++i) {
7192 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
7193 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
7194 Op.getOperand(i), DAG.getIntPtrConstant(i));
7199 // Otherwise, expand into a number of unpckl*, start by extending each of
7200 // our (non-undef) elements to the full vector width with the element in the
7201 // bottom slot of the vector (which generates no code for SSE).
7202 for (unsigned i = 0; i < NumElems; ++i) {
7203 if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
7204 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7206 V[i] = DAG.getUNDEF(VT);
7209 // Next, we iteratively mix elements, e.g. for v4f32:
7210 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
7211 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
7212 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
7213 unsigned EltStride = NumElems >> 1;
7214 while (EltStride != 0) {
7215 for (unsigned i = 0; i < EltStride; ++i) {
7216 // If V[i+EltStride] is undef and this is the first round of mixing,
7217 // then it is safe to just drop this shuffle: V[i] is already in the
7218 // right place, the one element (since it's the first round) being
7219 // inserted as undef can be dropped. This isn't safe for successive
7220 // rounds because they will permute elements within both vectors.
7221 if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
7222 EltStride == NumElems/2)
7225 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
7234 // LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction
7235 // to create 256-bit vectors from two other 128-bit ones.
7236 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7238 MVT ResVT = Op.getSimpleValueType();
7240 assert((ResVT.is256BitVector() ||
7241 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
7243 SDValue V1 = Op.getOperand(0);
7244 SDValue V2 = Op.getOperand(1);
7245 unsigned NumElems = ResVT.getVectorNumElements();
7246 if(ResVT.is256BitVector())
7247 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7249 if (Op.getNumOperands() == 4) {
7250 MVT HalfVT = MVT::getVectorVT(ResVT.getScalarType(),
7251 ResVT.getVectorNumElements()/2);
7252 SDValue V3 = Op.getOperand(2);
7253 SDValue V4 = Op.getOperand(3);
7254 return Concat256BitVectors(Concat128BitVectors(V1, V2, HalfVT, NumElems/2, DAG, dl),
7255 Concat128BitVectors(V3, V4, HalfVT, NumElems/2, DAG, dl), ResVT, NumElems, DAG, dl);
7257 return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7260 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7261 MVT LLVM_ATTRIBUTE_UNUSED VT = Op.getSimpleValueType();
7262 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
7263 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
7264 Op.getNumOperands() == 4)));
7266 // AVX can use the vinsertf128 instruction to create 256-bit vectors
7267 // from two other 128-bit ones.
7269 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
7270 return LowerAVXCONCAT_VECTORS(Op, DAG);
7274 //===----------------------------------------------------------------------===//
7275 // Vector shuffle lowering
7277 // This is an experimental code path for lowering vector shuffles on x86. It is
7278 // designed to handle arbitrary vector shuffles and blends, gracefully
7279 // degrading performance as necessary. It works hard to recognize idiomatic
7280 // shuffles and lower them to optimal instruction patterns without leaving
7281 // a framework that allows reasonably efficient handling of all vector shuffle
7283 //===----------------------------------------------------------------------===//
7285 /// \brief Tiny helper function to identify a no-op mask.
7287 /// This is a somewhat boring predicate function. It checks whether the mask
7288 /// array input, which is assumed to be a single-input shuffle mask of the kind
7289 /// used by the X86 shuffle instructions (not a fully general
7290 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
7291 /// in-place shuffle are 'no-op's.
7292 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
7293 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7294 if (Mask[i] != -1 && Mask[i] != i)
7299 /// \brief Helper function to classify a mask as a single-input mask.
7301 /// This isn't a generic single-input test because in the vector shuffle
7302 /// lowering we canonicalize single inputs to be the first input operand. This
7303 /// means we can more quickly test for a single input by only checking whether
7304 /// an input from the second operand exists. We also assume that the size of
7305 /// mask corresponds to the size of the input vectors which isn't true in the
7306 /// fully general case.
7307 static bool isSingleInputShuffleMask(ArrayRef<int> Mask) {
7309 if (M >= (int)Mask.size())
7314 /// \brief Test whether there are elements crossing 128-bit lanes in this
7317 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
7318 /// and we routinely test for these.
7319 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
7320 int LaneSize = 128 / VT.getScalarSizeInBits();
7321 int Size = Mask.size();
7322 for (int i = 0; i < Size; ++i)
7323 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
7328 /// \brief Test whether a shuffle mask is equivalent within each 128-bit lane.
7330 /// This checks a shuffle mask to see if it is performing the same
7331 /// 128-bit lane-relative shuffle in each 128-bit lane. This trivially implies
7332 /// that it is also not lane-crossing. It may however involve a blend from the
7333 /// same lane of a second vector.
7335 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
7336 /// non-trivial to compute in the face of undef lanes. The representation is
7337 /// *not* suitable for use with existing 128-bit shuffles as it will contain
7338 /// entries from both V1 and V2 inputs to the wider mask.
7340 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
7341 SmallVectorImpl<int> &RepeatedMask) {
7342 int LaneSize = 128 / VT.getScalarSizeInBits();
7343 RepeatedMask.resize(LaneSize, -1);
7344 int Size = Mask.size();
7345 for (int i = 0; i < Size; ++i) {
7348 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
7349 // This entry crosses lanes, so there is no way to model this shuffle.
7352 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
7353 if (RepeatedMask[i % LaneSize] == -1)
7354 // This is the first non-undef entry in this slot of a 128-bit lane.
7355 RepeatedMask[i % LaneSize] =
7356 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + Size;
7357 else if (RepeatedMask[i % LaneSize] + (i / LaneSize) * LaneSize != Mask[i])
7358 // Found a mismatch with the repeated mask.
7364 /// \brief Base case helper for testing a single mask element.
7365 static bool isShuffleEquivalentImpl(SDValue V1, SDValue V2,
7366 BuildVectorSDNode *BV1,
7367 BuildVectorSDNode *BV2, ArrayRef<int> Mask,
7369 int Size = Mask.size();
7370 if (Mask[i] != -1 && Mask[i] != Arg) {
7371 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
7372 auto *ArgsBV = Arg < Size ? BV1 : BV2;
7373 if (!MaskBV || !ArgsBV ||
7374 MaskBV->getOperand(Mask[i] % Size) != ArgsBV->getOperand(Arg % Size))
7380 /// \brief Recursive helper to peel off and test each mask element.
7381 template <typename... Ts>
7382 static bool isShuffleEquivalentImpl(SDValue V1, SDValue V2,
7383 BuildVectorSDNode *BV1,
7384 BuildVectorSDNode *BV2, ArrayRef<int> Mask,
7385 int i, int Arg, Ts... Args) {
7386 if (!isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, i, Arg))
7389 return isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, i + 1, Args...);
7392 /// \brief Checks whether a shuffle mask is equivalent to an explicit list of
7395 /// This is a fast way to test a shuffle mask against a fixed pattern:
7397 /// if (isShuffleEquivalent(Mask, 3, 2, 1, 0)) { ... }
7399 /// It returns true if the mask is exactly as wide as the argument list, and
7400 /// each element of the mask is either -1 (signifying undef) or the value given
7401 /// in the argument.
7402 template <typename... Ts>
7403 static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
7405 if (Mask.size() != sizeof...(Args))
7408 // If the values are build vectors, we can look through them to find
7409 // equivalent inputs that make the shuffles equivalent.
7410 auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
7411 auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
7413 // Recursively peel off arguments and test them against the mask.
7414 return isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, 0, Args...);
7417 /// \brief Get a 4-lane 8-bit shuffle immediate for a mask.
7419 /// This helper function produces an 8-bit shuffle immediate corresponding to
7420 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
7421 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
7424 /// NB: We rely heavily on "undef" masks preserving the input lane.
7425 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,
7426 SelectionDAG &DAG) {
7427 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
7428 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
7429 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
7430 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
7431 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
7434 Imm |= (Mask[0] == -1 ? 0 : Mask[0]) << 0;
7435 Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2;
7436 Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4;
7437 Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6;
7438 return DAG.getConstant(Imm, MVT::i8);
7441 /// \brief Try to emit a blend instruction for a shuffle.
7443 /// This doesn't do any checks for the availability of instructions for blending
7444 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
7445 /// be matched in the backend with the type given. What it does check for is
7446 /// that the shuffle mask is in fact a blend.
7447 static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
7448 SDValue V2, ArrayRef<int> Mask,
7449 const X86Subtarget *Subtarget,
7450 SelectionDAG &DAG) {
7452 unsigned BlendMask = 0;
7453 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7454 if (Mask[i] >= Size) {
7455 if (Mask[i] != i + Size)
7456 return SDValue(); // Shuffled V2 input!
7457 BlendMask |= 1u << i;
7460 if (Mask[i] >= 0 && Mask[i] != i)
7461 return SDValue(); // Shuffled V1 input!
7463 switch (VT.SimpleTy) {
7468 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
7469 DAG.getConstant(BlendMask, MVT::i8));
7473 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7477 // If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
7478 // that instruction.
7479 if (Subtarget->hasAVX2()) {
7480 // Scale the blend by the number of 32-bit dwords per element.
7481 int Scale = VT.getScalarSizeInBits() / 32;
7483 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7484 if (Mask[i] >= Size)
7485 for (int j = 0; j < Scale; ++j)
7486 BlendMask |= 1u << (i * Scale + j);
7488 MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32;
7489 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7490 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7491 return DAG.getNode(ISD::BITCAST, DL, VT,
7492 DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
7493 DAG.getConstant(BlendMask, MVT::i8)));
7497 // For integer shuffles we need to expand the mask and cast the inputs to
7498 // v8i16s prior to blending.
7499 int Scale = 8 / VT.getVectorNumElements();
7501 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7502 if (Mask[i] >= Size)
7503 for (int j = 0; j < Scale; ++j)
7504 BlendMask |= 1u << (i * Scale + j);
7506 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
7507 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
7508 return DAG.getNode(ISD::BITCAST, DL, VT,
7509 DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
7510 DAG.getConstant(BlendMask, MVT::i8)));
7514 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7515 SmallVector<int, 8> RepeatedMask;
7516 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
7517 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
7518 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
7520 for (int i = 0; i < 8; ++i)
7521 if (RepeatedMask[i] >= 16)
7522 BlendMask |= 1u << i;
7523 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
7524 DAG.getConstant(BlendMask, MVT::i8));
7530 // Scale the blend by the number of bytes per element.
7531 int Scale = VT.getScalarSizeInBits() / 8;
7533 // This form of blend is always done on bytes. Compute the byte vector
7535 MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
7537 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
7538 // mix of LLVM's code generator and the x86 backend. We tell the code
7539 // generator that boolean values in the elements of an x86 vector register
7540 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
7541 // mapping a select to operand #1, and 'false' mapping to operand #2. The
7542 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
7543 // of the element (the remaining are ignored) and 0 in that high bit would
7544 // mean operand #1 while 1 in the high bit would mean operand #2. So while
7545 // the LLVM model for boolean values in vector elements gets the relevant
7546 // bit set, it is set backwards and over constrained relative to x86's
7548 SmallVector<SDValue, 32> VSELECTMask;
7549 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7550 for (int j = 0; j < Scale; ++j)
7551 VSELECTMask.push_back(
7552 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
7553 : DAG.getConstant(Mask[i] < Size ? -1 : 0, MVT::i8));
7555 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7556 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7558 ISD::BITCAST, DL, VT,
7559 DAG.getNode(ISD::VSELECT, DL, BlendVT,
7560 DAG.getNode(ISD::BUILD_VECTOR, DL, BlendVT, VSELECTMask),
7565 llvm_unreachable("Not a supported integer vector type!");
7569 /// \brief Try to lower as a blend of elements from two inputs followed by
7570 /// a single-input permutation.
7572 /// This matches the pattern where we can blend elements from two inputs and
7573 /// then reduce the shuffle to a single-input permutation.
7574 static SDValue lowerVectorShuffleAsBlendAndPermute(SDLoc DL, MVT VT, SDValue V1,
7577 SelectionDAG &DAG) {
7578 // We build up the blend mask while checking whether a blend is a viable way
7579 // to reduce the shuffle.
7580 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7581 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
7583 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7587 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
7589 if (BlendMask[Mask[i] % Size] == -1)
7590 BlendMask[Mask[i] % Size] = Mask[i];
7591 else if (BlendMask[Mask[i] % Size] != Mask[i])
7592 return SDValue(); // Can't blend in the needed input!
7594 PermuteMask[i] = Mask[i] % Size;
7597 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7598 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
7601 /// \brief Generic routine to decompose a shuffle and blend into indepndent
7602 /// blends and permutes.
7604 /// This matches the extremely common pattern for handling combined
7605 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
7606 /// operations. It will try to pick the best arrangement of shuffles and
7608 static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
7612 SelectionDAG &DAG) {
7613 // Shuffle the input elements into the desired positions in V1 and V2 and
7614 // blend them together.
7615 SmallVector<int, 32> V1Mask(Mask.size(), -1);
7616 SmallVector<int, 32> V2Mask(Mask.size(), -1);
7617 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7618 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7619 if (Mask[i] >= 0 && Mask[i] < Size) {
7620 V1Mask[i] = Mask[i];
7622 } else if (Mask[i] >= Size) {
7623 V2Mask[i] = Mask[i] - Size;
7624 BlendMask[i] = i + Size;
7627 // Try to lower with the simpler initial blend strategy unless one of the
7628 // input shuffles would be a no-op. We prefer to shuffle inputs as the
7629 // shuffle may be able to fold with a load or other benefit. However, when
7630 // we'll have to do 2x as many shuffles in order to achieve this, blending
7631 // first is a better strategy.
7632 if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask))
7633 if (SDValue BlendPerm =
7634 lowerVectorShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask, DAG))
7637 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
7638 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
7639 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7642 /// \brief Try to lower a vector shuffle as a byte rotation.
7644 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
7645 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
7646 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
7647 /// try to generically lower a vector shuffle through such an pattern. It
7648 /// does not check for the profitability of lowering either as PALIGNR or
7649 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
7650 /// This matches shuffle vectors that look like:
7652 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
7654 /// Essentially it concatenates V1 and V2, shifts right by some number of
7655 /// elements, and takes the low elements as the result. Note that while this is
7656 /// specified as a *right shift* because x86 is little-endian, it is a *left
7657 /// rotate* of the vector lanes.
7658 static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
7661 const X86Subtarget *Subtarget,
7662 SelectionDAG &DAG) {
7663 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7665 int NumElts = Mask.size();
7666 int NumLanes = VT.getSizeInBits() / 128;
7667 int NumLaneElts = NumElts / NumLanes;
7669 // We need to detect various ways of spelling a rotation:
7670 // [11, 12, 13, 14, 15, 0, 1, 2]
7671 // [-1, 12, 13, 14, -1, -1, 1, -1]
7672 // [-1, -1, -1, -1, -1, -1, 1, 2]
7673 // [ 3, 4, 5, 6, 7, 8, 9, 10]
7674 // [-1, 4, 5, 6, -1, -1, 9, -1]
7675 // [-1, 4, 5, 6, -1, -1, -1, -1]
7678 for (int l = 0; l < NumElts; l += NumLaneElts) {
7679 for (int i = 0; i < NumLaneElts; ++i) {
7680 if (Mask[l + i] == -1)
7682 assert(Mask[l + i] >= 0 && "Only -1 is a valid negative mask element!");
7684 // Get the mod-Size index and lane correct it.
7685 int LaneIdx = (Mask[l + i] % NumElts) - l;
7686 // Make sure it was in this lane.
7687 if (LaneIdx < 0 || LaneIdx >= NumLaneElts)
7690 // Determine where a rotated vector would have started.
7691 int StartIdx = i - LaneIdx;
7693 // The identity rotation isn't interesting, stop.
7696 // If we found the tail of a vector the rotation must be the missing
7697 // front. If we found the head of a vector, it must be how much of the
7699 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumLaneElts - StartIdx;
7702 Rotation = CandidateRotation;
7703 else if (Rotation != CandidateRotation)
7704 // The rotations don't match, so we can't match this mask.
7707 // Compute which value this mask is pointing at.
7708 SDValue MaskV = Mask[l + i] < NumElts ? V1 : V2;
7710 // Compute which of the two target values this index should be assigned
7711 // to. This reflects whether the high elements are remaining or the low
7712 // elements are remaining.
7713 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
7715 // Either set up this value if we've not encountered it before, or check
7716 // that it remains consistent.
7719 else if (TargetV != MaskV)
7720 // This may be a rotation, but it pulls from the inputs in some
7721 // unsupported interleaving.
7726 // Check that we successfully analyzed the mask, and normalize the results.
7727 assert(Rotation != 0 && "Failed to locate a viable rotation!");
7728 assert((Lo || Hi) && "Failed to find a rotated input vector!");
7734 // The actual rotate instruction rotates bytes, so we need to scale the
7735 // rotation based on how many bytes are in the vector lane.
7736 int Scale = 16 / NumLaneElts;
7738 // SSSE3 targets can use the palignr instruction.
7739 if (Subtarget->hasSSSE3()) {
7740 // Cast the inputs to i8 vector of correct length to match PALIGNR.
7741 MVT AlignVT = MVT::getVectorVT(MVT::i8, 16 * NumLanes);
7742 Lo = DAG.getNode(ISD::BITCAST, DL, AlignVT, Lo);
7743 Hi = DAG.getNode(ISD::BITCAST, DL, AlignVT, Hi);
7745 return DAG.getNode(ISD::BITCAST, DL, VT,
7746 DAG.getNode(X86ISD::PALIGNR, DL, AlignVT, Hi, Lo,
7747 DAG.getConstant(Rotation * Scale, MVT::i8)));
7750 assert(VT.getSizeInBits() == 128 &&
7751 "Rotate-based lowering only supports 128-bit lowering!");
7752 assert(Mask.size() <= 16 &&
7753 "Can shuffle at most 16 bytes in a 128-bit vector!");
7755 // Default SSE2 implementation
7756 int LoByteShift = 16 - Rotation * Scale;
7757 int HiByteShift = Rotation * Scale;
7759 // Cast the inputs to v2i64 to match PSLLDQ/PSRLDQ.
7760 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Lo);
7761 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Hi);
7763 SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo,
7764 DAG.getConstant(8 * LoByteShift, MVT::i8));
7765 SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi,
7766 DAG.getConstant(8 * HiByteShift, MVT::i8));
7767 return DAG.getNode(ISD::BITCAST, DL, VT,
7768 DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift));
7771 /// \brief Compute whether each element of a shuffle is zeroable.
7773 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
7774 /// Either it is an undef element in the shuffle mask, the element of the input
7775 /// referenced is undef, or the element of the input referenced is known to be
7776 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
7777 /// as many lanes with this technique as possible to simplify the remaining
7779 static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
7780 SDValue V1, SDValue V2) {
7781 SmallBitVector Zeroable(Mask.size(), false);
7783 while (V1.getOpcode() == ISD::BITCAST)
7784 V1 = V1->getOperand(0);
7785 while (V2.getOpcode() == ISD::BITCAST)
7786 V2 = V2->getOperand(0);
7788 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
7789 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
7791 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7793 // Handle the easy cases.
7794 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
7799 // If this is an index into a build_vector node (which has the same number
7800 // of elements), dig out the input value and use it.
7801 SDValue V = M < Size ? V1 : V2;
7802 if (V.getOpcode() != ISD::BUILD_VECTOR || Size != (int)V.getNumOperands())
7805 SDValue Input = V.getOperand(M % Size);
7806 // The UNDEF opcode check really should be dead code here, but not quite
7807 // worth asserting on (it isn't invalid, just unexpected).
7808 if (Input.getOpcode() == ISD::UNDEF || X86::isZeroNode(Input))
7815 /// \brief Try to emit a bitmask instruction for a shuffle.
7817 /// This handles cases where we can model a blend exactly as a bitmask due to
7818 /// one of the inputs being zeroable.
7819 static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1,
7820 SDValue V2, ArrayRef<int> Mask,
7821 SelectionDAG &DAG) {
7822 MVT EltVT = VT.getScalarType();
7823 int NumEltBits = EltVT.getSizeInBits();
7824 MVT IntEltVT = MVT::getIntegerVT(NumEltBits);
7825 SDValue Zero = DAG.getConstant(0, IntEltVT);
7826 SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), IntEltVT);
7827 if (EltVT.isFloatingPoint()) {
7828 Zero = DAG.getNode(ISD::BITCAST, DL, EltVT, Zero);
7829 AllOnes = DAG.getNode(ISD::BITCAST, DL, EltVT, AllOnes);
7831 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
7832 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7834 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7837 if (Mask[i] % Size != i)
7838 return SDValue(); // Not a blend.
7840 V = Mask[i] < Size ? V1 : V2;
7841 else if (V != (Mask[i] < Size ? V1 : V2))
7842 return SDValue(); // Can only let one input through the mask.
7844 VMaskOps[i] = AllOnes;
7847 return SDValue(); // No non-zeroable elements!
7849 SDValue VMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, VMaskOps);
7850 V = DAG.getNode(VT.isFloatingPoint()
7851 ? (unsigned) X86ISD::FAND : (unsigned) ISD::AND,
7856 /// \brief Try to lower a vector shuffle as a byte shift (shifts in zeros).
7858 /// Attempts to match a shuffle mask against the PSRLDQ and PSLLDQ
7859 /// byte-shift instructions. The mask must consist of a shifted sequential
7860 /// shuffle from one of the input vectors and zeroable elements for the
7861 /// remaining 'shifted in' elements.
7862 static SDValue lowerVectorShuffleAsByteShift(SDLoc DL, MVT VT, SDValue V1,
7863 SDValue V2, ArrayRef<int> Mask,
7864 SelectionDAG &DAG) {
7865 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7867 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7869 int NumElts = VT.getVectorNumElements();
7870 int NumLanes = VT.getSizeInBits() / 128;
7871 int NumLaneElts = NumElts / NumLanes;
7872 int Scale = 16 / NumLaneElts;
7873 MVT ShiftVT = MVT::getVectorVT(MVT::i64, 2 * NumLanes);
7875 // PSLLDQ : (little-endian) left byte shift
7876 // [ zz, 0, 1, 2, 3, 4, 5, 6]
7877 // [ zz, zz, -1, -1, 2, 3, 4, -1]
7878 // [ zz, zz, zz, zz, zz, zz, -1, 1]
7879 // PSRLDQ : (little-endian) right byte shift
7880 // [ 5, 6, 7, zz, zz, zz, zz, zz]
7881 // [ -1, 5, 6, 7, zz, zz, zz, zz]
7882 // [ 1, 2, -1, -1, -1, -1, zz, zz]
7883 auto MatchByteShift = [&](int Shift) -> SDValue {
7884 bool MatchLeft = true, MatchRight = true;
7885 for (int l = 0; l < NumElts; l += NumLaneElts) {
7886 for (int i = 0; i < Shift; ++i)
7887 MatchLeft &= Zeroable[l + i];
7888 for (int i = NumLaneElts - Shift; i < NumLaneElts; ++i)
7889 MatchRight &= Zeroable[l + i];
7891 if (!(MatchLeft || MatchRight))
7894 bool MatchV1 = true, MatchV2 = true;
7895 for (int l = 0; l < NumElts; l += NumLaneElts) {
7896 unsigned Pos = MatchLeft ? Shift + l : l;
7897 unsigned Low = MatchLeft ? l : Shift + l;
7898 unsigned Len = NumLaneElts - Shift;
7899 MatchV1 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low);
7900 MatchV2 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low + NumElts);
7902 if (!(MatchV1 || MatchV2))
7905 int ByteShift = Shift * Scale;
7906 unsigned Op = MatchRight ? X86ISD::VSRLDQ : X86ISD::VSHLDQ;
7907 SDValue V = MatchV1 ? V1 : V2;
7908 V = DAG.getNode(ISD::BITCAST, DL, ShiftVT, V);
7909 V = DAG.getNode(Op, DL, ShiftVT, V,
7910 DAG.getConstant(ByteShift * 8, MVT::i8));
7911 return DAG.getNode(ISD::BITCAST, DL, VT, V);
7914 for (int Shift = 1; Shift < NumLaneElts; ++Shift)
7915 if (SDValue S = MatchByteShift(Shift))
7922 /// \brief Try to lower a vector shuffle as a bit shift (shifts in zeros).
7924 /// Attempts to match a shuffle mask against the PSRL(W/D/Q) and PSLL(W/D/Q)
7925 /// SSE2 and AVX2 logical bit-shift instructions. The function matches
7926 /// elements from one of the input vectors shuffled to the left or right
7927 /// with zeroable elements 'shifted in'.
7928 static SDValue lowerVectorShuffleAsBitShift(SDLoc DL, MVT VT, SDValue V1,
7929 SDValue V2, ArrayRef<int> Mask,
7930 SelectionDAG &DAG) {
7931 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7933 int Size = Mask.size();
7934 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
7936 // PSRL : (little-endian) right bit shift.
7939 // PSHL : (little-endian) left bit shift.
7941 // [ -1, 4, zz, -1 ]
7942 auto MatchBitShift = [&](int Shift, int Scale) -> SDValue {
7943 MVT ShiftSVT = MVT::getIntegerVT(VT.getScalarSizeInBits() * Scale);
7944 MVT ShiftVT = MVT::getVectorVT(ShiftSVT, Size / Scale);
7945 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
7946 "Illegal integer vector type");
7948 bool MatchLeft = true, MatchRight = true;
7949 for (int i = 0; i != Size; i += Scale) {
7950 for (int j = 0; j != Shift; ++j) {
7951 MatchLeft &= Zeroable[i + j];
7953 for (int j = Scale - Shift; j != Scale; ++j) {
7954 MatchRight &= Zeroable[i + j];
7957 if (!(MatchLeft || MatchRight))
7960 bool MatchV1 = true, MatchV2 = true;
7961 for (int i = 0; i != Size; i += Scale) {
7962 unsigned Pos = MatchLeft ? i + Shift : i;
7963 unsigned Low = MatchLeft ? i : i + Shift;
7964 unsigned Len = Scale - Shift;
7965 MatchV1 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low);
7966 MatchV2 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low + Size);
7968 if (!(MatchV1 || MatchV2))
7971 // Cast the inputs to ShiftVT to match VSRLI/VSHLI and back again.
7972 unsigned OpCode = MatchLeft ? X86ISD::VSHLI : X86ISD::VSRLI;
7973 int ShiftAmt = Shift * VT.getScalarSizeInBits();
7974 SDValue V = MatchV1 ? V1 : V2;
7975 V = DAG.getNode(ISD::BITCAST, DL, ShiftVT, V);
7976 V = DAG.getNode(OpCode, DL, ShiftVT, V, DAG.getConstant(ShiftAmt, MVT::i8));
7977 return DAG.getNode(ISD::BITCAST, DL, VT, V);
7980 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
7981 // keep doubling the size of the integer elements up to that. We can
7982 // then shift the elements of the integer vector by whole multiples of
7983 // their width within the elements of the larger integer vector. Test each
7984 // multiple to see if we can find a match with the moved element indices
7985 // and that the shifted in elements are all zeroable.
7986 for (int Scale = 2; Scale * VT.getScalarSizeInBits() <= 64; Scale *= 2)
7987 for (int Shift = 1; Shift != Scale; ++Shift)
7988 if (SDValue BitShift = MatchBitShift(Shift, Scale))
7995 /// \brief Lower a vector shuffle as a zero or any extension.
7997 /// Given a specific number of elements, element bit width, and extension
7998 /// stride, produce either a zero or any extension based on the available
7999 /// features of the subtarget.
8000 static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
8001 SDLoc DL, MVT VT, int Scale, bool AnyExt, SDValue InputV,
8002 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8003 assert(Scale > 1 && "Need a scale to extend.");
8004 int NumElements = VT.getVectorNumElements();
8005 int EltBits = VT.getScalarSizeInBits();
8006 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
8007 "Only 8, 16, and 32 bit elements can be extended.");
8008 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
8010 // Found a valid zext mask! Try various lowering strategies based on the
8011 // input type and available ISA extensions.
8012 if (Subtarget->hasSSE41()) {
8013 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
8014 NumElements / Scale);
8015 return DAG.getNode(ISD::BITCAST, DL, VT,
8016 DAG.getNode(X86ISD::VZEXT, DL, ExtVT, InputV));
8019 // For any extends we can cheat for larger element sizes and use shuffle
8020 // instructions that can fold with a load and/or copy.
8021 if (AnyExt && EltBits == 32) {
8022 int PSHUFDMask[4] = {0, -1, 1, -1};
8024 ISD::BITCAST, DL, VT,
8025 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8026 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
8027 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
8029 if (AnyExt && EltBits == 16 && Scale > 2) {
8030 int PSHUFDMask[4] = {0, -1, 0, -1};
8031 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8032 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
8033 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG));
8034 int PSHUFHWMask[4] = {1, -1, -1, -1};
8036 ISD::BITCAST, DL, VT,
8037 DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16,
8038 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, InputV),
8039 getV4X86ShuffleImm8ForMask(PSHUFHWMask, DAG)));
8042 // If this would require more than 2 unpack instructions to expand, use
8043 // pshufb when available. We can only use more than 2 unpack instructions
8044 // when zero extending i8 elements which also makes it easier to use pshufb.
8045 if (Scale > 4 && EltBits == 8 && Subtarget->hasSSSE3()) {
8046 assert(NumElements == 16 && "Unexpected byte vector width!");
8047 SDValue PSHUFBMask[16];
8048 for (int i = 0; i < 16; ++i)
8050 DAG.getConstant((i % Scale == 0) ? i / Scale : 0x80, MVT::i8);
8051 InputV = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, InputV);
8052 return DAG.getNode(ISD::BITCAST, DL, VT,
8053 DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
8054 DAG.getNode(ISD::BUILD_VECTOR, DL,
8055 MVT::v16i8, PSHUFBMask)));
8058 // Otherwise emit a sequence of unpacks.
8060 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
8061 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
8062 : getZeroVector(InputVT, Subtarget, DAG, DL);
8063 InputV = DAG.getNode(ISD::BITCAST, DL, InputVT, InputV);
8064 InputV = DAG.getNode(X86ISD::UNPCKL, DL, InputVT, InputV, Ext);
8068 } while (Scale > 1);
8069 return DAG.getNode(ISD::BITCAST, DL, VT, InputV);
8072 /// \brief Try to lower a vector shuffle as a zero extension on any microarch.
8074 /// This routine will try to do everything in its power to cleverly lower
8075 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
8076 /// check for the profitability of this lowering, it tries to aggressively
8077 /// match this pattern. It will use all of the micro-architectural details it
8078 /// can to emit an efficient lowering. It handles both blends with all-zero
8079 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
8080 /// masking out later).
8082 /// The reason we have dedicated lowering for zext-style shuffles is that they
8083 /// are both incredibly common and often quite performance sensitive.
8084 static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
8085 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8086 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8087 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8089 int Bits = VT.getSizeInBits();
8090 int NumElements = VT.getVectorNumElements();
8091 assert(VT.getScalarSizeInBits() <= 32 &&
8092 "Exceeds 32-bit integer zero extension limit");
8093 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
8095 // Define a helper function to check a particular ext-scale and lower to it if
8097 auto Lower = [&](int Scale) -> SDValue {
8100 for (int i = 0; i < NumElements; ++i) {
8102 continue; // Valid anywhere but doesn't tell us anything.
8103 if (i % Scale != 0) {
8104 // Each of the extended elements need to be zeroable.
8108 // We no longer are in the anyext case.
8113 // Each of the base elements needs to be consecutive indices into the
8114 // same input vector.
8115 SDValue V = Mask[i] < NumElements ? V1 : V2;
8118 else if (InputV != V)
8119 return SDValue(); // Flip-flopping inputs.
8121 if (Mask[i] % NumElements != i / Scale)
8122 return SDValue(); // Non-consecutive strided elements.
8125 // If we fail to find an input, we have a zero-shuffle which should always
8126 // have already been handled.
8127 // FIXME: Maybe handle this here in case during blending we end up with one?
8131 return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
8132 DL, VT, Scale, AnyExt, InputV, Subtarget, DAG);
8135 // The widest scale possible for extending is to a 64-bit integer.
8136 assert(Bits % 64 == 0 &&
8137 "The number of bits in a vector must be divisible by 64 on x86!");
8138 int NumExtElements = Bits / 64;
8140 // Each iteration, try extending the elements half as much, but into twice as
8142 for (; NumExtElements < NumElements; NumExtElements *= 2) {
8143 assert(NumElements % NumExtElements == 0 &&
8144 "The input vector size must be divisible by the extended size.");
8145 if (SDValue V = Lower(NumElements / NumExtElements))
8149 // General extends failed, but 128-bit vectors may be able to use MOVQ.
8153 // Returns one of the source operands if the shuffle can be reduced to a
8154 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
8155 auto CanZExtLowHalf = [&]() {
8156 for (int i = NumElements / 2; i != NumElements; ++i)
8159 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
8161 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
8166 if (SDValue V = CanZExtLowHalf()) {
8167 V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V);
8168 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
8169 return DAG.getNode(ISD::BITCAST, DL, VT, V);
8172 // No viable ext lowering found.
8176 /// \brief Try to get a scalar value for a specific element of a vector.
8178 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
8179 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
8180 SelectionDAG &DAG) {
8181 MVT VT = V.getSimpleValueType();
8182 MVT EltVT = VT.getVectorElementType();
8183 while (V.getOpcode() == ISD::BITCAST)
8184 V = V.getOperand(0);
8185 // If the bitcasts shift the element size, we can't extract an equivalent
8187 MVT NewVT = V.getSimpleValueType();
8188 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
8191 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8192 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR))
8193 return DAG.getNode(ISD::BITCAST, SDLoc(V), EltVT, V.getOperand(Idx));
8198 /// \brief Helper to test for a load that can be folded with x86 shuffles.
8200 /// This is particularly important because the set of instructions varies
8201 /// significantly based on whether the operand is a load or not.
8202 static bool isShuffleFoldableLoad(SDValue V) {
8203 while (V.getOpcode() == ISD::BITCAST)
8204 V = V.getOperand(0);
8206 return ISD::isNON_EXTLoad(V.getNode());
8209 /// \brief Try to lower insertion of a single element into a zero vector.
8211 /// This is a common pattern that we have especially efficient patterns to lower
8212 /// across all subtarget feature sets.
8213 static SDValue lowerVectorShuffleAsElementInsertion(
8214 MVT VT, SDLoc DL, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8215 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8216 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8218 MVT EltVT = VT.getVectorElementType();
8220 int V2Index = std::find_if(Mask.begin(), Mask.end(),
8221 [&Mask](int M) { return M >= (int)Mask.size(); }) -
8223 bool IsV1Zeroable = true;
8224 for (int i = 0, Size = Mask.size(); i < Size; ++i)
8225 if (i != V2Index && !Zeroable[i]) {
8226 IsV1Zeroable = false;
8230 // Check for a single input from a SCALAR_TO_VECTOR node.
8231 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
8232 // all the smarts here sunk into that routine. However, the current
8233 // lowering of BUILD_VECTOR makes that nearly impossible until the old
8234 // vector shuffle lowering is dead.
8235 if (SDValue V2S = getScalarValueForVectorElement(
8236 V2, Mask[V2Index] - Mask.size(), DAG)) {
8237 // We need to zext the scalar if it is smaller than an i32.
8238 V2S = DAG.getNode(ISD::BITCAST, DL, EltVT, V2S);
8239 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
8240 // Using zext to expand a narrow element won't work for non-zero
8245 // Zero-extend directly to i32.
8247 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
8249 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
8250 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
8251 EltVT == MVT::i16) {
8252 // Either not inserting from the low element of the input or the input
8253 // element size is too small to use VZEXT_MOVL to clear the high bits.
8257 if (!IsV1Zeroable) {
8258 // If V1 can't be treated as a zero vector we have fewer options to lower
8259 // this. We can't support integer vectors or non-zero targets cheaply, and
8260 // the V1 elements can't be permuted in any way.
8261 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
8262 if (!VT.isFloatingPoint() || V2Index != 0)
8264 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
8265 V1Mask[V2Index] = -1;
8266 if (!isNoopShuffleMask(V1Mask))
8268 // This is essentially a special case blend operation, but if we have
8269 // general purpose blend operations, they are always faster. Bail and let
8270 // the rest of the lowering handle these as blends.
8271 if (Subtarget->hasSSE41())
8274 // Otherwise, use MOVSD or MOVSS.
8275 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
8276 "Only two types of floating point element types to handle!");
8277 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
8281 // This lowering only works for the low element with floating point vectors.
8282 if (VT.isFloatingPoint() && V2Index != 0)
8285 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
8287 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8290 // If we have 4 or fewer lanes we can cheaply shuffle the element into
8291 // the desired position. Otherwise it is more efficient to do a vector
8292 // shift left. We know that we can do a vector shift left because all
8293 // the inputs are zero.
8294 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
8295 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
8296 V2Shuffle[V2Index] = 0;
8297 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
8299 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V2);
8301 X86ISD::VSHLDQ, DL, MVT::v2i64, V2,
8303 V2Index * EltVT.getSizeInBits(),
8304 DAG.getTargetLoweringInfo().getScalarShiftAmountTy(MVT::v2i64)));
8305 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8311 /// \brief Try to lower broadcast of a single element.
8313 /// For convenience, this code also bundles all of the subtarget feature set
8314 /// filtering. While a little annoying to re-dispatch on type here, there isn't
8315 /// a convenient way to factor it out.
8316 static SDValue lowerVectorShuffleAsBroadcast(MVT VT, SDLoc DL, SDValue V,
8318 const X86Subtarget *Subtarget,
8319 SelectionDAG &DAG) {
8320 if (!Subtarget->hasAVX())
8322 if (VT.isInteger() && !Subtarget->hasAVX2())
8325 // Check that the mask is a broadcast.
8326 int BroadcastIdx = -1;
8328 if (M >= 0 && BroadcastIdx == -1)
8330 else if (M >= 0 && M != BroadcastIdx)
8333 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
8334 "a sorted mask where the broadcast "
8337 // Go up the chain of (vector) values to try and find a scalar load that
8338 // we can combine with the broadcast.
8340 switch (V.getOpcode()) {
8341 case ISD::CONCAT_VECTORS: {
8342 int OperandSize = Mask.size() / V.getNumOperands();
8343 V = V.getOperand(BroadcastIdx / OperandSize);
8344 BroadcastIdx %= OperandSize;
8348 case ISD::INSERT_SUBVECTOR: {
8349 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
8350 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
8354 int BeginIdx = (int)ConstantIdx->getZExtValue();
8356 BeginIdx + (int)VInner.getValueType().getVectorNumElements();
8357 if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) {
8358 BroadcastIdx -= BeginIdx;
8369 // Check if this is a broadcast of a scalar. We special case lowering
8370 // for scalars so that we can more effectively fold with loads.
8371 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8372 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
8373 V = V.getOperand(BroadcastIdx);
8375 // If the scalar isn't a load we can't broadcast from it in AVX1, only with
8377 if (!Subtarget->hasAVX2() && !isShuffleFoldableLoad(V))
8379 } else if (BroadcastIdx != 0 || !Subtarget->hasAVX2()) {
8380 // We can't broadcast from a vector register w/o AVX2, and we can only
8381 // broadcast from the zero-element of a vector register.
8385 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, V);
8388 // Check for whether we can use INSERTPS to perform the shuffle. We only use
8389 // INSERTPS when the V1 elements are already in the correct locations
8390 // because otherwise we can just always use two SHUFPS instructions which
8391 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
8392 // perform INSERTPS if a single V1 element is out of place and all V2
8393 // elements are zeroable.
8394 static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
8396 SelectionDAG &DAG) {
8397 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8398 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8399 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8400 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8402 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8405 int V1DstIndex = -1;
8406 int V2DstIndex = -1;
8407 bool V1UsedInPlace = false;
8409 for (int i = 0; i < 4; ++i) {
8410 // Synthesize a zero mask from the zeroable elements (includes undefs).
8416 // Flag if we use any V1 inputs in place.
8418 V1UsedInPlace = true;
8422 // We can only insert a single non-zeroable element.
8423 if (V1DstIndex != -1 || V2DstIndex != -1)
8427 // V1 input out of place for insertion.
8430 // V2 input for insertion.
8435 // Don't bother if we have no (non-zeroable) element for insertion.
8436 if (V1DstIndex == -1 && V2DstIndex == -1)
8439 // Determine element insertion src/dst indices. The src index is from the
8440 // start of the inserted vector, not the start of the concatenated vector.
8441 unsigned V2SrcIndex = 0;
8442 if (V1DstIndex != -1) {
8443 // If we have a V1 input out of place, we use V1 as the V2 element insertion
8444 // and don't use the original V2 at all.
8445 V2SrcIndex = Mask[V1DstIndex];
8446 V2DstIndex = V1DstIndex;
8449 V2SrcIndex = Mask[V2DstIndex] - 4;
8452 // If no V1 inputs are used in place, then the result is created only from
8453 // the zero mask and the V2 insertion - so remove V1 dependency.
8455 V1 = DAG.getUNDEF(MVT::v4f32);
8457 unsigned InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
8458 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
8460 // Insert the V2 element into the desired position.
8462 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
8463 DAG.getConstant(InsertPSMask, MVT::i8));
8466 /// \brief Handle lowering of 2-lane 64-bit floating point shuffles.
8468 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
8469 /// support for floating point shuffles but not integer shuffles. These
8470 /// instructions will incur a domain crossing penalty on some chips though so
8471 /// it is better to avoid lowering through this for integer vectors where
8473 static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8474 const X86Subtarget *Subtarget,
8475 SelectionDAG &DAG) {
8477 assert(Op.getSimpleValueType() == MVT::v2f64 && "Bad shuffle type!");
8478 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8479 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8480 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8481 ArrayRef<int> Mask = SVOp->getMask();
8482 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8484 if (isSingleInputShuffleMask(Mask)) {
8485 // Use low duplicate instructions for masks that match their pattern.
8486 if (Subtarget->hasSSE3())
8487 if (isShuffleEquivalent(V1, V2, Mask, 0, 0))
8488 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, V1);
8490 // Straight shuffle of a single input vector. Simulate this by using the
8491 // single input as both of the "inputs" to this instruction..
8492 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
8494 if (Subtarget->hasAVX()) {
8495 // If we have AVX, we can use VPERMILPS which will allow folding a load
8496 // into the shuffle.
8497 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
8498 DAG.getConstant(SHUFPDMask, MVT::i8));
8501 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1,
8502 DAG.getConstant(SHUFPDMask, MVT::i8));
8504 assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
8505 assert(Mask[1] >= 2 && "Non-canonicalized blend!");
8507 // If we have a single input, insert that into V1 if we can do so cheaply.
8508 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8509 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8510 MVT::v2f64, DL, V1, V2, Mask, Subtarget, DAG))
8512 // Try inverting the insertion since for v2 masks it is easy to do and we
8513 // can't reliably sort the mask one way or the other.
8514 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8515 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8516 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8517 MVT::v2f64, DL, V2, V1, InverseMask, Subtarget, DAG))
8521 // Try to use one of the special instruction patterns to handle two common
8522 // blend patterns if a zero-blend above didn't work.
8523 if (isShuffleEquivalent(V1, V2, Mask, 0, 3) || isShuffleEquivalent(V1, V2, Mask, 1, 3))
8524 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
8525 // We can either use a special instruction to load over the low double or
8526 // to move just the low double.
8528 isShuffleFoldableLoad(V1S) ? X86ISD::MOVLPD : X86ISD::MOVSD,
8530 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
8532 if (Subtarget->hasSSE41())
8533 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
8537 // Use dedicated unpack instructions for masks that match their pattern.
8538 if (isShuffleEquivalent(V1, V2, Mask, 0, 2))
8539 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2f64, V1, V2);
8540 if (isShuffleEquivalent(V1, V2, Mask, 1, 3))
8541 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2f64, V1, V2);
8543 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
8544 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V2,
8545 DAG.getConstant(SHUFPDMask, MVT::i8));
8548 /// \brief Handle lowering of 2-lane 64-bit integer shuffles.
8550 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
8551 /// the integer unit to minimize domain crossing penalties. However, for blends
8552 /// it falls back to the floating point shuffle operation with appropriate bit
8554 static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8555 const X86Subtarget *Subtarget,
8556 SelectionDAG &DAG) {
8558 assert(Op.getSimpleValueType() == MVT::v2i64 && "Bad shuffle type!");
8559 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8560 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8561 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8562 ArrayRef<int> Mask = SVOp->getMask();
8563 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8565 if (isSingleInputShuffleMask(Mask)) {
8566 // Check for being able to broadcast a single element.
8567 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v2i64, DL, V1,
8568 Mask, Subtarget, DAG))
8571 // Straight shuffle of a single input vector. For everything from SSE2
8572 // onward this has a single fast instruction with no scary immediates.
8573 // We have to map the mask as it is actually a v4i32 shuffle instruction.
8574 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V1);
8575 int WidenedMask[4] = {
8576 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
8577 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
8579 ISD::BITCAST, DL, MVT::v2i64,
8580 DAG.getNode(X86ISD::PSHUFD, SDLoc(Op), MVT::v4i32, V1,
8581 getV4X86ShuffleImm8ForMask(WidenedMask, DAG)));
8584 // Try to use byte shift instructions.
8585 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8586 DL, MVT::v2i64, V1, V2, Mask, DAG))
8589 // If we have a single input from V2 insert that into V1 if we can do so
8591 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8592 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8593 MVT::v2i64, DL, V1, V2, Mask, Subtarget, DAG))
8595 // Try inverting the insertion since for v2 masks it is easy to do and we
8596 // can't reliably sort the mask one way or the other.
8597 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8598 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8599 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8600 MVT::v2i64, DL, V2, V1, InverseMask, Subtarget, DAG))
8604 // We have different paths for blend lowering, but they all must use the
8605 // *exact* same predicate.
8606 bool IsBlendSupported = Subtarget->hasSSE41();
8607 if (IsBlendSupported)
8608 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
8612 // Use dedicated unpack instructions for masks that match their pattern.
8613 if (isShuffleEquivalent(V1, V2, Mask, 0, 2))
8614 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, V1, V2);
8615 if (isShuffleEquivalent(V1, V2, Mask, 1, 3))
8616 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2i64, V1, V2);
8618 // Try to use byte rotation instructions.
8619 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8620 if (Subtarget->hasSSSE3())
8621 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8622 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
8625 // If we have direct support for blends, we should lower by decomposing into
8626 // a permute. That will be faster than the domain cross.
8627 if (IsBlendSupported)
8628 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2,
8631 // We implement this with SHUFPD which is pretty lame because it will likely
8632 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
8633 // However, all the alternatives are still more cycles and newer chips don't
8634 // have this problem. It would be really nice if x86 had better shuffles here.
8635 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V1);
8636 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V2);
8637 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
8638 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
8641 /// \brief Test whether this can be lowered with a single SHUFPS instruction.
8643 /// This is used to disable more specialized lowerings when the shufps lowering
8644 /// will happen to be efficient.
8645 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
8646 // This routine only handles 128-bit shufps.
8647 assert(Mask.size() == 4 && "Unsupported mask size!");
8649 // To lower with a single SHUFPS we need to have the low half and high half
8650 // each requiring a single input.
8651 if (Mask[0] != -1 && Mask[1] != -1 && (Mask[0] < 4) != (Mask[1] < 4))
8653 if (Mask[2] != -1 && Mask[3] != -1 && (Mask[2] < 4) != (Mask[3] < 4))
8659 /// \brief Lower a vector shuffle using the SHUFPS instruction.
8661 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
8662 /// It makes no assumptions about whether this is the *best* lowering, it simply
8664 static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
8665 ArrayRef<int> Mask, SDValue V1,
8666 SDValue V2, SelectionDAG &DAG) {
8667 SDValue LowV = V1, HighV = V2;
8668 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
8671 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8673 if (NumV2Elements == 1) {
8675 std::find_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }) -
8678 // Compute the index adjacent to V2Index and in the same half by toggling
8680 int V2AdjIndex = V2Index ^ 1;
8682 if (Mask[V2AdjIndex] == -1) {
8683 // Handles all the cases where we have a single V2 element and an undef.
8684 // This will only ever happen in the high lanes because we commute the
8685 // vector otherwise.
8687 std::swap(LowV, HighV);
8688 NewMask[V2Index] -= 4;
8690 // Handle the case where the V2 element ends up adjacent to a V1 element.
8691 // To make this work, blend them together as the first step.
8692 int V1Index = V2AdjIndex;
8693 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
8694 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
8695 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8697 // Now proceed to reconstruct the final blend as we have the necessary
8698 // high or low half formed.
8705 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
8706 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
8708 } else if (NumV2Elements == 2) {
8709 if (Mask[0] < 4 && Mask[1] < 4) {
8710 // Handle the easy case where we have V1 in the low lanes and V2 in the
8714 } else if (Mask[2] < 4 && Mask[3] < 4) {
8715 // We also handle the reversed case because this utility may get called
8716 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
8717 // arrange things in the right direction.
8723 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
8724 // trying to place elements directly, just blend them and set up the final
8725 // shuffle to place them.
8727 // The first two blend mask elements are for V1, the second two are for
8729 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
8730 Mask[2] < 4 ? Mask[2] : Mask[3],
8731 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
8732 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
8733 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
8734 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8736 // Now we do a normal shuffle of V1 by giving V1 as both operands to
8739 NewMask[0] = Mask[0] < 4 ? 0 : 2;
8740 NewMask[1] = Mask[0] < 4 ? 2 : 0;
8741 NewMask[2] = Mask[2] < 4 ? 1 : 3;
8742 NewMask[3] = Mask[2] < 4 ? 3 : 1;
8745 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
8746 getV4X86ShuffleImm8ForMask(NewMask, DAG));
8749 /// \brief Lower 4-lane 32-bit floating point shuffles.
8751 /// Uses instructions exclusively from the floating point unit to minimize
8752 /// domain crossing penalties, as these are sufficient to implement all v4f32
8754 static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8755 const X86Subtarget *Subtarget,
8756 SelectionDAG &DAG) {
8758 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8759 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8760 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8761 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8762 ArrayRef<int> Mask = SVOp->getMask();
8763 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8766 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8768 if (NumV2Elements == 0) {
8769 // Check for being able to broadcast a single element.
8770 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f32, DL, V1,
8771 Mask, Subtarget, DAG))
8774 // Use even/odd duplicate instructions for masks that match their pattern.
8775 if (Subtarget->hasSSE3()) {
8776 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2))
8777 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
8778 if (isShuffleEquivalent(V1, V2, Mask, 1, 1, 3, 3))
8779 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
8782 if (Subtarget->hasAVX()) {
8783 // If we have AVX, we can use VPERMILPS which will allow folding a load
8784 // into the shuffle.
8785 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
8786 getV4X86ShuffleImm8ForMask(Mask, DAG));
8789 // Otherwise, use a straight shuffle of a single input vector. We pass the
8790 // input vector to both operands to simulate this with a SHUFPS.
8791 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
8792 getV4X86ShuffleImm8ForMask(Mask, DAG));
8795 // There are special ways we can lower some single-element blends. However, we
8796 // have custom ways we can lower more complex single-element blends below that
8797 // we defer to if both this and BLENDPS fail to match, so restrict this to
8798 // when the V2 input is targeting element 0 of the mask -- that is the fast
8800 if (NumV2Elements == 1 && Mask[0] >= 4)
8801 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4f32, DL, V1, V2,
8802 Mask, Subtarget, DAG))
8805 if (Subtarget->hasSSE41()) {
8806 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
8810 // Use INSERTPS if we can complete the shuffle efficiently.
8811 if (SDValue V = lowerVectorShuffleAsInsertPS(Op, V1, V2, Mask, DAG))
8814 if (!isSingleSHUFPSMask(Mask))
8815 if (SDValue BlendPerm = lowerVectorShuffleAsBlendAndPermute(
8816 DL, MVT::v4f32, V1, V2, Mask, DAG))
8820 // Use dedicated unpack instructions for masks that match their pattern.
8821 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 1, 5))
8822 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V1, V2);
8823 if (isShuffleEquivalent(V1, V2, Mask, 2, 6, 3, 7))
8824 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V1, V2);
8826 // Otherwise fall back to a SHUFPS lowering strategy.
8827 return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
8830 /// \brief Lower 4-lane i32 vector shuffles.
8832 /// We try to handle these with integer-domain shuffles where we can, but for
8833 /// blends we use the floating point domain blend instructions.
8834 static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8835 const X86Subtarget *Subtarget,
8836 SelectionDAG &DAG) {
8838 assert(Op.getSimpleValueType() == MVT::v4i32 && "Bad shuffle type!");
8839 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8840 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8841 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8842 ArrayRef<int> Mask = SVOp->getMask();
8843 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8845 // Whenever we can lower this as a zext, that instruction is strictly faster
8846 // than any alternative. It also allows us to fold memory operands into the
8847 // shuffle in many cases.
8848 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2,
8849 Mask, Subtarget, DAG))
8853 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8855 if (NumV2Elements == 0) {
8856 // Check for being able to broadcast a single element.
8857 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i32, DL, V1,
8858 Mask, Subtarget, DAG))
8861 // Straight shuffle of a single input vector. For everything from SSE2
8862 // onward this has a single fast instruction with no scary immediates.
8863 // We coerce the shuffle pattern to be compatible with UNPCK instructions
8864 // but we aren't actually going to use the UNPCK instruction because doing
8865 // so prevents folding a load into this instruction or making a copy.
8866 const int UnpackLoMask[] = {0, 0, 1, 1};
8867 const int UnpackHiMask[] = {2, 2, 3, 3};
8868 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 1, 1))
8869 Mask = UnpackLoMask;
8870 else if (isShuffleEquivalent(V1, V2, Mask, 2, 2, 3, 3))
8871 Mask = UnpackHiMask;
8873 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
8874 getV4X86ShuffleImm8ForMask(Mask, DAG));
8877 // Try to use bit shift instructions.
8878 if (SDValue Shift = lowerVectorShuffleAsBitShift(
8879 DL, MVT::v4i32, V1, V2, Mask, DAG))
8882 // Try to use byte shift instructions.
8883 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8884 DL, MVT::v4i32, V1, V2, Mask, DAG))
8887 // There are special ways we can lower some single-element blends.
8888 if (NumV2Elements == 1)
8889 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4i32, DL, V1, V2,
8890 Mask, Subtarget, DAG))
8893 // We have different paths for blend lowering, but they all must use the
8894 // *exact* same predicate.
8895 bool IsBlendSupported = Subtarget->hasSSE41();
8896 if (IsBlendSupported)
8897 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
8901 if (SDValue Masked =
8902 lowerVectorShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask, DAG))
8905 // Use dedicated unpack instructions for masks that match their pattern.
8906 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 1, 5))
8907 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V1, V2);
8908 if (isShuffleEquivalent(V1, V2, Mask, 2, 6, 3, 7))
8909 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V1, V2);
8911 // Try to use byte rotation instructions.
8912 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8913 if (Subtarget->hasSSSE3())
8914 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8915 DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
8918 // If we have direct support for blends, we should lower by decomposing into
8919 // a permute. That will be faster than the domain cross.
8920 if (IsBlendSupported)
8921 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2,
8924 // We implement this with SHUFPS because it can blend from two vectors.
8925 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
8926 // up the inputs, bypassing domain shift penalties that we would encur if we
8927 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
8929 return DAG.getNode(ISD::BITCAST, DL, MVT::v4i32,
8930 DAG.getVectorShuffle(
8932 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V1),
8933 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V2), Mask));
8936 /// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
8937 /// shuffle lowering, and the most complex part.
8939 /// The lowering strategy is to try to form pairs of input lanes which are
8940 /// targeted at the same half of the final vector, and then use a dword shuffle
8941 /// to place them onto the right half, and finally unpack the paired lanes into
8942 /// their final position.
8944 /// The exact breakdown of how to form these dword pairs and align them on the
8945 /// correct sides is really tricky. See the comments within the function for
8946 /// more of the details.
8947 static SDValue lowerV8I16SingleInputVectorShuffle(
8948 SDLoc DL, SDValue V, MutableArrayRef<int> Mask,
8949 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8950 assert(V.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
8951 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
8952 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
8954 SmallVector<int, 4> LoInputs;
8955 std::copy_if(LoMask.begin(), LoMask.end(), std::back_inserter(LoInputs),
8956 [](int M) { return M >= 0; });
8957 std::sort(LoInputs.begin(), LoInputs.end());
8958 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
8959 SmallVector<int, 4> HiInputs;
8960 std::copy_if(HiMask.begin(), HiMask.end(), std::back_inserter(HiInputs),
8961 [](int M) { return M >= 0; });
8962 std::sort(HiInputs.begin(), HiInputs.end());
8963 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
8965 std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
8966 int NumHToL = LoInputs.size() - NumLToL;
8968 std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
8969 int NumHToH = HiInputs.size() - NumLToH;
8970 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
8971 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
8972 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
8973 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
8975 // Check for being able to broadcast a single element.
8976 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i16, DL, V,
8977 Mask, Subtarget, DAG))
8980 // Try to use bit shift instructions.
8981 if (SDValue Shift = lowerVectorShuffleAsBitShift(
8982 DL, MVT::v8i16, V, V, Mask, DAG))
8985 // Try to use byte shift instructions.
8986 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8987 DL, MVT::v8i16, V, V, Mask, DAG))
8990 // Use dedicated unpack instructions for masks that match their pattern.
8991 if (isShuffleEquivalent(V, V, Mask, 0, 0, 1, 1, 2, 2, 3, 3))
8992 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V, V);
8993 if (isShuffleEquivalent(V, V, Mask, 4, 4, 5, 5, 6, 6, 7, 7))
8994 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V, V);
8996 // Try to use byte rotation instructions.
8997 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8998 DL, MVT::v8i16, V, V, Mask, Subtarget, DAG))
9001 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
9002 // such inputs we can swap two of the dwords across the half mark and end up
9003 // with <=2 inputs to each half in each half. Once there, we can fall through
9004 // to the generic code below. For example:
9006 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
9007 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
9009 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
9010 // and an existing 2-into-2 on the other half. In this case we may have to
9011 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
9012 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
9013 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
9014 // because any other situation (including a 3-into-1 or 1-into-3 in the other
9015 // half than the one we target for fixing) will be fixed when we re-enter this
9016 // path. We will also combine away any sequence of PSHUFD instructions that
9017 // result into a single instruction. Here is an example of the tricky case:
9019 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
9020 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
9022 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
9024 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
9025 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
9027 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
9028 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
9030 // The result is fine to be handled by the generic logic.
9031 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
9032 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
9033 int AOffset, int BOffset) {
9034 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
9035 "Must call this with A having 3 or 1 inputs from the A half.");
9036 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
9037 "Must call this with B having 1 or 3 inputs from the B half.");
9038 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
9039 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
9041 // Compute the index of dword with only one word among the three inputs in
9042 // a half by taking the sum of the half with three inputs and subtracting
9043 // the sum of the actual three inputs. The difference is the remaining
9046 int &TripleDWord = AToAInputs.size() == 3 ? ADWord : BDWord;
9047 int &OneInputDWord = AToAInputs.size() == 3 ? BDWord : ADWord;
9048 int TripleInputOffset = AToAInputs.size() == 3 ? AOffset : BOffset;
9049 ArrayRef<int> TripleInputs = AToAInputs.size() == 3 ? AToAInputs : BToAInputs;
9050 int OneInput = AToAInputs.size() == 3 ? BToAInputs[0] : AToAInputs[0];
9051 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
9052 int TripleNonInputIdx =
9053 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
9054 TripleDWord = TripleNonInputIdx / 2;
9056 // We use xor with one to compute the adjacent DWord to whichever one the
9058 OneInputDWord = (OneInput / 2) ^ 1;
9060 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
9061 // and BToA inputs. If there is also such a problem with the BToB and AToB
9062 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
9063 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
9064 // is essential that we don't *create* a 3<-1 as then we might oscillate.
9065 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
9066 // Compute how many inputs will be flipped by swapping these DWords. We
9068 // to balance this to ensure we don't form a 3-1 shuffle in the other
9070 int NumFlippedAToBInputs =
9071 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
9072 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
9073 int NumFlippedBToBInputs =
9074 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
9075 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
9076 if ((NumFlippedAToBInputs == 1 &&
9077 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
9078 (NumFlippedBToBInputs == 1 &&
9079 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
9080 // We choose whether to fix the A half or B half based on whether that
9081 // half has zero flipped inputs. At zero, we may not be able to fix it
9082 // with that half. We also bias towards fixing the B half because that
9083 // will more commonly be the high half, and we have to bias one way.
9084 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
9085 ArrayRef<int> Inputs) {
9086 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
9087 bool IsFixIdxInput = std::find(Inputs.begin(), Inputs.end(),
9088 PinnedIdx ^ 1) != Inputs.end();
9089 // Determine whether the free index is in the flipped dword or the
9090 // unflipped dword based on where the pinned index is. We use this bit
9091 // in an xor to conditionally select the adjacent dword.
9092 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
9093 bool IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9094 FixFreeIdx) != Inputs.end();
9095 if (IsFixIdxInput == IsFixFreeIdxInput)
9097 IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9098 FixFreeIdx) != Inputs.end();
9099 assert(IsFixIdxInput != IsFixFreeIdxInput &&
9100 "We need to be changing the number of flipped inputs!");
9101 int PSHUFHalfMask[] = {0, 1, 2, 3};
9102 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
9103 V = DAG.getNode(FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
9105 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DAG));
9108 if (M != -1 && M == FixIdx)
9110 else if (M != -1 && M == FixFreeIdx)
9113 if (NumFlippedBToBInputs != 0) {
9115 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9116 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
9118 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
9120 AToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9121 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
9126 int PSHUFDMask[] = {0, 1, 2, 3};
9127 PSHUFDMask[ADWord] = BDWord;
9128 PSHUFDMask[BDWord] = ADWord;
9129 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9130 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9131 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9132 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9134 // Adjust the mask to match the new locations of A and B.
9136 if (M != -1 && M/2 == ADWord)
9137 M = 2 * BDWord + M % 2;
9138 else if (M != -1 && M/2 == BDWord)
9139 M = 2 * ADWord + M % 2;
9141 // Recurse back into this routine to re-compute state now that this isn't
9142 // a 3 and 1 problem.
9143 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9146 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
9147 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
9148 else if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
9149 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
9151 // At this point there are at most two inputs to the low and high halves from
9152 // each half. That means the inputs can always be grouped into dwords and
9153 // those dwords can then be moved to the correct half with a dword shuffle.
9154 // We use at most one low and one high word shuffle to collect these paired
9155 // inputs into dwords, and finally a dword shuffle to place them.
9156 int PSHUFLMask[4] = {-1, -1, -1, -1};
9157 int PSHUFHMask[4] = {-1, -1, -1, -1};
9158 int PSHUFDMask[4] = {-1, -1, -1, -1};
9160 // First fix the masks for all the inputs that are staying in their
9161 // original halves. This will then dictate the targets of the cross-half
9163 auto fixInPlaceInputs =
9164 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
9165 MutableArrayRef<int> SourceHalfMask,
9166 MutableArrayRef<int> HalfMask, int HalfOffset) {
9167 if (InPlaceInputs.empty())
9169 if (InPlaceInputs.size() == 1) {
9170 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9171 InPlaceInputs[0] - HalfOffset;
9172 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
9175 if (IncomingInputs.empty()) {
9176 // Just fix all of the in place inputs.
9177 for (int Input : InPlaceInputs) {
9178 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
9179 PSHUFDMask[Input / 2] = Input / 2;
9184 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
9185 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9186 InPlaceInputs[0] - HalfOffset;
9187 // Put the second input next to the first so that they are packed into
9188 // a dword. We find the adjacent index by toggling the low bit.
9189 int AdjIndex = InPlaceInputs[0] ^ 1;
9190 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
9191 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
9192 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
9194 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
9195 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
9197 // Now gather the cross-half inputs and place them into a free dword of
9198 // their target half.
9199 // FIXME: This operation could almost certainly be simplified dramatically to
9200 // look more like the 3-1 fixing operation.
9201 auto moveInputsToRightHalf = [&PSHUFDMask](
9202 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
9203 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
9204 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
9206 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
9207 return SourceHalfMask[Word] != -1 && SourceHalfMask[Word] != Word;
9209 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
9211 int LowWord = Word & ~1;
9212 int HighWord = Word | 1;
9213 return isWordClobbered(SourceHalfMask, LowWord) ||
9214 isWordClobbered(SourceHalfMask, HighWord);
9217 if (IncomingInputs.empty())
9220 if (ExistingInputs.empty()) {
9221 // Map any dwords with inputs from them into the right half.
9222 for (int Input : IncomingInputs) {
9223 // If the source half mask maps over the inputs, turn those into
9224 // swaps and use the swapped lane.
9225 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
9226 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == -1) {
9227 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
9228 Input - SourceOffset;
9229 // We have to swap the uses in our half mask in one sweep.
9230 for (int &M : HalfMask)
9231 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
9233 else if (M == Input)
9234 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9236 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
9237 Input - SourceOffset &&
9238 "Previous placement doesn't match!");
9240 // Note that this correctly re-maps both when we do a swap and when
9241 // we observe the other side of the swap above. We rely on that to
9242 // avoid swapping the members of the input list directly.
9243 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9246 // Map the input's dword into the correct half.
9247 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == -1)
9248 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
9250 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
9252 "Previous placement doesn't match!");
9255 // And just directly shift any other-half mask elements to be same-half
9256 // as we will have mirrored the dword containing the element into the
9257 // same position within that half.
9258 for (int &M : HalfMask)
9259 if (M >= SourceOffset && M < SourceOffset + 4) {
9260 M = M - SourceOffset + DestOffset;
9261 assert(M >= 0 && "This should never wrap below zero!");
9266 // Ensure we have the input in a viable dword of its current half. This
9267 // is particularly tricky because the original position may be clobbered
9268 // by inputs being moved and *staying* in that half.
9269 if (IncomingInputs.size() == 1) {
9270 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9271 int InputFixed = std::find(std::begin(SourceHalfMask),
9272 std::end(SourceHalfMask), -1) -
9273 std::begin(SourceHalfMask) + SourceOffset;
9274 SourceHalfMask[InputFixed - SourceOffset] =
9275 IncomingInputs[0] - SourceOffset;
9276 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
9278 IncomingInputs[0] = InputFixed;
9280 } else if (IncomingInputs.size() == 2) {
9281 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
9282 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9283 // We have two non-adjacent or clobbered inputs we need to extract from
9284 // the source half. To do this, we need to map them into some adjacent
9285 // dword slot in the source mask.
9286 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
9287 IncomingInputs[1] - SourceOffset};
9289 // If there is a free slot in the source half mask adjacent to one of
9290 // the inputs, place the other input in it. We use (Index XOR 1) to
9291 // compute an adjacent index.
9292 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
9293 SourceHalfMask[InputsFixed[0] ^ 1] == -1) {
9294 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
9295 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9296 InputsFixed[1] = InputsFixed[0] ^ 1;
9297 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
9298 SourceHalfMask[InputsFixed[1] ^ 1] == -1) {
9299 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
9300 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
9301 InputsFixed[0] = InputsFixed[1] ^ 1;
9302 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] == -1 &&
9303 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] == -1) {
9304 // The two inputs are in the same DWord but it is clobbered and the
9305 // adjacent DWord isn't used at all. Move both inputs to the free
9307 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
9308 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
9309 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
9310 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
9312 // The only way we hit this point is if there is no clobbering
9313 // (because there are no off-half inputs to this half) and there is no
9314 // free slot adjacent to one of the inputs. In this case, we have to
9315 // swap an input with a non-input.
9316 for (int i = 0; i < 4; ++i)
9317 assert((SourceHalfMask[i] == -1 || SourceHalfMask[i] == i) &&
9318 "We can't handle any clobbers here!");
9319 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
9320 "Cannot have adjacent inputs here!");
9322 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9323 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
9325 // We also have to update the final source mask in this case because
9326 // it may need to undo the above swap.
9327 for (int &M : FinalSourceHalfMask)
9328 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
9329 M = InputsFixed[1] + SourceOffset;
9330 else if (M == InputsFixed[1] + SourceOffset)
9331 M = (InputsFixed[0] ^ 1) + SourceOffset;
9333 InputsFixed[1] = InputsFixed[0] ^ 1;
9336 // Point everything at the fixed inputs.
9337 for (int &M : HalfMask)
9338 if (M == IncomingInputs[0])
9339 M = InputsFixed[0] + SourceOffset;
9340 else if (M == IncomingInputs[1])
9341 M = InputsFixed[1] + SourceOffset;
9343 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
9344 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
9347 llvm_unreachable("Unhandled input size!");
9350 // Now hoist the DWord down to the right half.
9351 int FreeDWord = (PSHUFDMask[DestOffset / 2] == -1 ? 0 : 1) + DestOffset / 2;
9352 assert(PSHUFDMask[FreeDWord] == -1 && "DWord not free");
9353 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
9354 for (int &M : HalfMask)
9355 for (int Input : IncomingInputs)
9357 M = FreeDWord * 2 + Input % 2;
9359 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
9360 /*SourceOffset*/ 4, /*DestOffset*/ 0);
9361 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
9362 /*SourceOffset*/ 0, /*DestOffset*/ 4);
9364 // Now enact all the shuffles we've computed to move the inputs into their
9366 if (!isNoopShuffleMask(PSHUFLMask))
9367 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9368 getV4X86ShuffleImm8ForMask(PSHUFLMask, DAG));
9369 if (!isNoopShuffleMask(PSHUFHMask))
9370 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9371 getV4X86ShuffleImm8ForMask(PSHUFHMask, DAG));
9372 if (!isNoopShuffleMask(PSHUFDMask))
9373 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9374 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9375 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9376 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9378 // At this point, each half should contain all its inputs, and we can then
9379 // just shuffle them into their final position.
9380 assert(std::count_if(LoMask.begin(), LoMask.end(),
9381 [](int M) { return M >= 4; }) == 0 &&
9382 "Failed to lift all the high half inputs to the low mask!");
9383 assert(std::count_if(HiMask.begin(), HiMask.end(),
9384 [](int M) { return M >= 0 && M < 4; }) == 0 &&
9385 "Failed to lift all the low half inputs to the high mask!");
9387 // Do a half shuffle for the low mask.
9388 if (!isNoopShuffleMask(LoMask))
9389 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9390 getV4X86ShuffleImm8ForMask(LoMask, DAG));
9392 // Do a half shuffle with the high mask after shifting its values down.
9393 for (int &M : HiMask)
9396 if (!isNoopShuffleMask(HiMask))
9397 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9398 getV4X86ShuffleImm8ForMask(HiMask, DAG));
9403 /// \brief Detect whether the mask pattern should be lowered through
9406 /// This essentially tests whether viewing the mask as an interleaving of two
9407 /// sub-sequences reduces the cross-input traffic of a blend operation. If so,
9408 /// lowering it through interleaving is a significantly better strategy.
9409 static bool shouldLowerAsInterleaving(ArrayRef<int> Mask) {
9410 int NumEvenInputs[2] = {0, 0};
9411 int NumOddInputs[2] = {0, 0};
9412 int NumLoInputs[2] = {0, 0};
9413 int NumHiInputs[2] = {0, 0};
9414 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
9418 int InputIdx = Mask[i] >= Size;
9421 ++NumLoInputs[InputIdx];
9423 ++NumHiInputs[InputIdx];
9426 ++NumEvenInputs[InputIdx];
9428 ++NumOddInputs[InputIdx];
9431 // The minimum number of cross-input results for both the interleaved and
9432 // split cases. If interleaving results in fewer cross-input results, return
9434 int InterleavedCrosses = std::min(NumEvenInputs[1] + NumOddInputs[0],
9435 NumEvenInputs[0] + NumOddInputs[1]);
9436 int SplitCrosses = std::min(NumLoInputs[1] + NumHiInputs[0],
9437 NumLoInputs[0] + NumHiInputs[1]);
9438 return InterleavedCrosses < SplitCrosses;
9441 /// \brief Blend two v8i16 vectors using a naive unpack strategy.
9443 /// This strategy only works when the inputs from each vector fit into a single
9444 /// half of that vector, and generally there are not so many inputs as to leave
9445 /// the in-place shuffles required highly constrained (and thus expensive). It
9446 /// shifts all the inputs into a single side of both input vectors and then
9447 /// uses an unpack to interleave these inputs in a single vector. At that
9448 /// point, we will fall back on the generic single input shuffle lowering.
9449 static SDValue lowerV8I16BasicBlendVectorShuffle(SDLoc DL, SDValue V1,
9451 MutableArrayRef<int> Mask,
9452 const X86Subtarget *Subtarget,
9453 SelectionDAG &DAG) {
9454 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9455 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9456 SmallVector<int, 3> LoV1Inputs, HiV1Inputs, LoV2Inputs, HiV2Inputs;
9457 for (int i = 0; i < 8; ++i)
9458 if (Mask[i] >= 0 && Mask[i] < 4)
9459 LoV1Inputs.push_back(i);
9460 else if (Mask[i] >= 4 && Mask[i] < 8)
9461 HiV1Inputs.push_back(i);
9462 else if (Mask[i] >= 8 && Mask[i] < 12)
9463 LoV2Inputs.push_back(i);
9464 else if (Mask[i] >= 12)
9465 HiV2Inputs.push_back(i);
9467 int NumV1Inputs = LoV1Inputs.size() + HiV1Inputs.size();
9468 int NumV2Inputs = LoV2Inputs.size() + HiV2Inputs.size();
9471 assert(NumV1Inputs > 0 && NumV1Inputs <= 3 && "At most 3 inputs supported");
9472 assert(NumV2Inputs > 0 && NumV2Inputs <= 3 && "At most 3 inputs supported");
9473 assert(NumV1Inputs + NumV2Inputs <= 4 && "At most 4 combined inputs");
9475 bool MergeFromLo = LoV1Inputs.size() + LoV2Inputs.size() >=
9476 HiV1Inputs.size() + HiV2Inputs.size();
9478 auto moveInputsToHalf = [&](SDValue V, ArrayRef<int> LoInputs,
9479 ArrayRef<int> HiInputs, bool MoveToLo,
9481 ArrayRef<int> GoodInputs = MoveToLo ? LoInputs : HiInputs;
9482 ArrayRef<int> BadInputs = MoveToLo ? HiInputs : LoInputs;
9483 if (BadInputs.empty())
9486 int MoveMask[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9487 int MoveOffset = MoveToLo ? 0 : 4;
9489 if (GoodInputs.empty()) {
9490 for (int BadInput : BadInputs) {
9491 MoveMask[Mask[BadInput] % 4 + MoveOffset] = Mask[BadInput] - MaskOffset;
9492 Mask[BadInput] = Mask[BadInput] % 4 + MoveOffset + MaskOffset;
9495 if (GoodInputs.size() == 2) {
9496 // If the low inputs are spread across two dwords, pack them into
9498 MoveMask[MoveOffset] = Mask[GoodInputs[0]] - MaskOffset;
9499 MoveMask[MoveOffset + 1] = Mask[GoodInputs[1]] - MaskOffset;
9500 Mask[GoodInputs[0]] = MoveOffset + MaskOffset;
9501 Mask[GoodInputs[1]] = MoveOffset + 1 + MaskOffset;
9503 // Otherwise pin the good inputs.
9504 for (int GoodInput : GoodInputs)
9505 MoveMask[Mask[GoodInput] - MaskOffset] = Mask[GoodInput] - MaskOffset;
9508 if (BadInputs.size() == 2) {
9509 // If we have two bad inputs then there may be either one or two good
9510 // inputs fixed in place. Find a fixed input, and then find the *other*
9511 // two adjacent indices by using modular arithmetic.
9513 std::find_if(std::begin(MoveMask) + MoveOffset, std::end(MoveMask),
9514 [](int M) { return M >= 0; }) -
9515 std::begin(MoveMask);
9517 ((((GoodMaskIdx - MoveOffset) & ~1) + 2) % 4) + MoveOffset;
9518 assert(MoveMask[MoveMaskIdx] == -1 && "Expected empty slot");
9519 assert(MoveMask[MoveMaskIdx + 1] == -1 && "Expected empty slot");
9520 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9521 MoveMask[MoveMaskIdx + 1] = Mask[BadInputs[1]] - MaskOffset;
9522 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9523 Mask[BadInputs[1]] = MoveMaskIdx + 1 + MaskOffset;
9525 assert(BadInputs.size() == 1 && "All sizes handled");
9526 int MoveMaskIdx = std::find(std::begin(MoveMask) + MoveOffset,
9527 std::end(MoveMask), -1) -
9528 std::begin(MoveMask);
9529 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9530 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9534 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9537 V1 = moveInputsToHalf(V1, LoV1Inputs, HiV1Inputs, MergeFromLo,
9539 V2 = moveInputsToHalf(V2, LoV2Inputs, HiV2Inputs, MergeFromLo,
9542 // FIXME: Select an interleaving of the merge of V1 and V2 that minimizes
9543 // cross-half traffic in the final shuffle.
9545 // Munge the mask to be a single-input mask after the unpack merges the
9549 M = 2 * (M % 4) + (M / 8);
9551 return DAG.getVectorShuffle(
9552 MVT::v8i16, DL, DAG.getNode(MergeFromLo ? X86ISD::UNPCKL : X86ISD::UNPCKH,
9553 DL, MVT::v8i16, V1, V2),
9554 DAG.getUNDEF(MVT::v8i16), Mask);
9557 /// \brief Generic lowering of 8-lane i16 shuffles.
9559 /// This handles both single-input shuffles and combined shuffle/blends with
9560 /// two inputs. The single input shuffles are immediately delegated to
9561 /// a dedicated lowering routine.
9563 /// The blends are lowered in one of three fundamental ways. If there are few
9564 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
9565 /// of the input is significantly cheaper when lowered as an interleaving of
9566 /// the two inputs, try to interleave them. Otherwise, blend the low and high
9567 /// halves of the inputs separately (making them have relatively few inputs)
9568 /// and then concatenate them.
9569 static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9570 const X86Subtarget *Subtarget,
9571 SelectionDAG &DAG) {
9573 assert(Op.getSimpleValueType() == MVT::v8i16 && "Bad shuffle type!");
9574 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9575 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9576 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9577 ArrayRef<int> OrigMask = SVOp->getMask();
9578 int MaskStorage[8] = {OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9579 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7]};
9580 MutableArrayRef<int> Mask(MaskStorage);
9582 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
9584 // Whenever we can lower this as a zext, that instruction is strictly faster
9585 // than any alternative.
9586 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9587 DL, MVT::v8i16, V1, V2, OrigMask, Subtarget, DAG))
9590 auto isV1 = [](int M) { return M >= 0 && M < 8; };
9591 auto isV2 = [](int M) { return M >= 8; };
9593 int NumV1Inputs = std::count_if(Mask.begin(), Mask.end(), isV1);
9594 int NumV2Inputs = std::count_if(Mask.begin(), Mask.end(), isV2);
9596 if (NumV2Inputs == 0)
9597 return lowerV8I16SingleInputVectorShuffle(DL, V1, Mask, Subtarget, DAG);
9599 assert(NumV1Inputs > 0 && "All single-input shuffles should be canonicalized "
9600 "to be V1-input shuffles.");
9602 // Try to use bit shift instructions.
9603 if (SDValue Shift = lowerVectorShuffleAsBitShift(
9604 DL, MVT::v8i16, V1, V2, Mask, DAG))
9607 // Try to use byte shift instructions.
9608 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9609 DL, MVT::v8i16, V1, V2, Mask, DAG))
9612 // There are special ways we can lower some single-element blends.
9613 if (NumV2Inputs == 1)
9614 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v8i16, DL, V1, V2,
9615 Mask, Subtarget, DAG))
9618 // We have different paths for blend lowering, but they all must use the
9619 // *exact* same predicate.
9620 bool IsBlendSupported = Subtarget->hasSSE41();
9621 if (IsBlendSupported)
9622 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
9626 if (SDValue Masked =
9627 lowerVectorShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask, DAG))
9630 // Use dedicated unpack instructions for masks that match their pattern.
9631 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 2, 10, 3, 11))
9632 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V1, V2);
9633 if (isShuffleEquivalent(V1, V2, Mask, 4, 12, 5, 13, 6, 14, 7, 15))
9634 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V1, V2);
9636 // Try to use byte rotation instructions.
9637 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9638 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
9641 if (NumV1Inputs + NumV2Inputs <= 4)
9642 return lowerV8I16BasicBlendVectorShuffle(DL, V1, V2, Mask, Subtarget, DAG);
9644 // Check whether an interleaving lowering is likely to be more efficient.
9645 // This isn't perfect but it is a strong heuristic that tends to work well on
9646 // the kinds of shuffles that show up in practice.
9648 // FIXME: Handle 1x, 2x, and 4x interleaving.
9649 if (shouldLowerAsInterleaving(Mask)) {
9650 // FIXME: Figure out whether we should pack these into the low or high
9653 int EMask[8], OMask[8];
9654 for (int i = 0; i < 4; ++i) {
9655 EMask[i] = Mask[2*i];
9656 OMask[i] = Mask[2*i + 1];
9661 SDValue Evens = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, EMask);
9662 SDValue Odds = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, OMask);
9664 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, Evens, Odds);
9667 // If we have direct support for blends, we should lower by decomposing into
9669 if (IsBlendSupported)
9670 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
9673 int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9674 int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9676 for (int i = 0; i < 4; ++i) {
9677 LoBlendMask[i] = Mask[i];
9678 HiBlendMask[i] = Mask[i + 4];
9681 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
9682 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
9683 LoV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, LoV);
9684 HiV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, HiV);
9686 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9687 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, LoV, HiV));
9690 /// \brief Check whether a compaction lowering can be done by dropping even
9691 /// elements and compute how many times even elements must be dropped.
9693 /// This handles shuffles which take every Nth element where N is a power of
9694 /// two. Example shuffle masks:
9696 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
9697 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
9698 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
9699 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
9700 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
9701 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
9703 /// Any of these lanes can of course be undef.
9705 /// This routine only supports N <= 3.
9706 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
9709 /// \returns N above, or the number of times even elements must be dropped if
9710 /// there is such a number. Otherwise returns zero.
9711 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
9712 // Figure out whether we're looping over two inputs or just one.
9713 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9715 // The modulus for the shuffle vector entries is based on whether this is
9716 // a single input or not.
9717 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
9718 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
9719 "We should only be called with masks with a power-of-2 size!");
9721 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
9723 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
9724 // and 2^3 simultaneously. This is because we may have ambiguity with
9725 // partially undef inputs.
9726 bool ViableForN[3] = {true, true, true};
9728 for (int i = 0, e = Mask.size(); i < e; ++i) {
9729 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
9734 bool IsAnyViable = false;
9735 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9736 if (ViableForN[j]) {
9739 // The shuffle mask must be equal to (i * 2^N) % M.
9740 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
9743 ViableForN[j] = false;
9745 // Early exit if we exhaust the possible powers of two.
9750 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9754 // Return 0 as there is no viable power of two.
9758 /// \brief Generic lowering of v16i8 shuffles.
9760 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
9761 /// detect any complexity reducing interleaving. If that doesn't help, it uses
9762 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
9763 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
9765 static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9766 const X86Subtarget *Subtarget,
9767 SelectionDAG &DAG) {
9769 assert(Op.getSimpleValueType() == MVT::v16i8 && "Bad shuffle type!");
9770 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9771 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9772 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9773 ArrayRef<int> OrigMask = SVOp->getMask();
9774 assert(OrigMask.size() == 16 && "Unexpected mask size for v16 shuffle!");
9776 // Try to use bit shift instructions.
9777 if (SDValue Shift = lowerVectorShuffleAsBitShift(
9778 DL, MVT::v16i8, V1, V2, OrigMask, DAG))
9781 // Try to use byte shift instructions.
9782 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9783 DL, MVT::v16i8, V1, V2, OrigMask, DAG))
9786 // Try to use byte rotation instructions.
9787 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9788 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9791 // Try to use a zext lowering.
9792 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9793 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9796 int MaskStorage[16] = {
9797 OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9798 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7],
9799 OrigMask[8], OrigMask[9], OrigMask[10], OrigMask[11],
9800 OrigMask[12], OrigMask[13], OrigMask[14], OrigMask[15]};
9801 MutableArrayRef<int> Mask(MaskStorage);
9802 MutableArrayRef<int> LoMask = Mask.slice(0, 8);
9803 MutableArrayRef<int> HiMask = Mask.slice(8, 8);
9806 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 16; });
9808 // For single-input shuffles, there are some nicer lowering tricks we can use.
9809 if (NumV2Elements == 0) {
9810 // Check for being able to broadcast a single element.
9811 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i8, DL, V1,
9812 Mask, Subtarget, DAG))
9815 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
9816 // Notably, this handles splat and partial-splat shuffles more efficiently.
9817 // However, it only makes sense if the pre-duplication shuffle simplifies
9818 // things significantly. Currently, this means we need to be able to
9819 // express the pre-duplication shuffle as an i16 shuffle.
9821 // FIXME: We should check for other patterns which can be widened into an
9822 // i16 shuffle as well.
9823 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
9824 for (int i = 0; i < 16; i += 2)
9825 if (Mask[i] != -1 && Mask[i + 1] != -1 && Mask[i] != Mask[i + 1])
9830 auto tryToWidenViaDuplication = [&]() -> SDValue {
9831 if (!canWidenViaDuplication(Mask))
9833 SmallVector<int, 4> LoInputs;
9834 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(LoInputs),
9835 [](int M) { return M >= 0 && M < 8; });
9836 std::sort(LoInputs.begin(), LoInputs.end());
9837 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
9839 SmallVector<int, 4> HiInputs;
9840 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(HiInputs),
9841 [](int M) { return M >= 8; });
9842 std::sort(HiInputs.begin(), HiInputs.end());
9843 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
9846 bool TargetLo = LoInputs.size() >= HiInputs.size();
9847 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
9848 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
9850 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9851 SmallDenseMap<int, int, 8> LaneMap;
9852 for (int I : InPlaceInputs) {
9853 PreDupI16Shuffle[I/2] = I/2;
9856 int j = TargetLo ? 0 : 4, je = j + 4;
9857 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
9858 // Check if j is already a shuffle of this input. This happens when
9859 // there are two adjacent bytes after we move the low one.
9860 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
9861 // If we haven't yet mapped the input, search for a slot into which
9863 while (j < je && PreDupI16Shuffle[j] != -1)
9867 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
9870 // Map this input with the i16 shuffle.
9871 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
9874 // Update the lane map based on the mapping we ended up with.
9875 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
9878 ISD::BITCAST, DL, MVT::v16i8,
9879 DAG.getVectorShuffle(MVT::v8i16, DL,
9880 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9881 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
9883 // Unpack the bytes to form the i16s that will be shuffled into place.
9884 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9885 MVT::v16i8, V1, V1);
9887 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9888 for (int i = 0; i < 16; ++i)
9889 if (Mask[i] != -1) {
9890 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
9891 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
9892 if (PostDupI16Shuffle[i / 2] == -1)
9893 PostDupI16Shuffle[i / 2] = MappedMask;
9895 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
9896 "Conflicting entrties in the original shuffle!");
9899 ISD::BITCAST, DL, MVT::v16i8,
9900 DAG.getVectorShuffle(MVT::v8i16, DL,
9901 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9902 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
9904 if (SDValue V = tryToWidenViaDuplication())
9908 // Check whether an interleaving lowering is likely to be more efficient.
9909 // This isn't perfect but it is a strong heuristic that tends to work well on
9910 // the kinds of shuffles that show up in practice.
9912 // FIXME: We need to handle other interleaving widths (i16, i32, ...).
9913 if (shouldLowerAsInterleaving(Mask)) {
9914 int NumLoHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9915 return (M >= 0 && M < 8) || (M >= 16 && M < 24);
9917 int NumHiHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9918 return (M >= 8 && M < 16) || M >= 24;
9920 int EMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9921 -1, -1, -1, -1, -1, -1, -1, -1};
9922 int OMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9923 -1, -1, -1, -1, -1, -1, -1, -1};
9924 bool UnpackLo = NumLoHalf >= NumHiHalf;
9925 MutableArrayRef<int> TargetEMask(UnpackLo ? EMask : EMask + 8, 8);
9926 MutableArrayRef<int> TargetOMask(UnpackLo ? OMask : OMask + 8, 8);
9927 for (int i = 0; i < 8; ++i) {
9928 TargetEMask[i] = Mask[2 * i];
9929 TargetOMask[i] = Mask[2 * i + 1];
9932 SDValue Evens = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, EMask);
9933 SDValue Odds = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, OMask);
9935 return DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9936 MVT::v16i8, Evens, Odds);
9939 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
9940 // with PSHUFB. It is important to do this before we attempt to generate any
9941 // blends but after all of the single-input lowerings. If the single input
9942 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
9943 // want to preserve that and we can DAG combine any longer sequences into
9944 // a PSHUFB in the end. But once we start blending from multiple inputs,
9945 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
9946 // and there are *very* few patterns that would actually be faster than the
9947 // PSHUFB approach because of its ability to zero lanes.
9949 // FIXME: The only exceptions to the above are blends which are exact
9950 // interleavings with direct instructions supporting them. We currently don't
9951 // handle those well here.
9952 if (Subtarget->hasSSSE3()) {
9955 bool V1InUse = false;
9956 bool V2InUse = false;
9957 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
9959 for (int i = 0; i < 16; ++i) {
9960 if (Mask[i] == -1) {
9961 V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
9963 const int ZeroMask = 0x80;
9964 int V1Idx = (Mask[i] < 16 ? Mask[i] : ZeroMask);
9965 int V2Idx = (Mask[i] < 16 ? ZeroMask : Mask[i] - 16);
9967 V1Idx = V2Idx = ZeroMask;
9968 V1Mask[i] = DAG.getConstant(V1Idx, MVT::i8);
9969 V2Mask[i] = DAG.getConstant(V2Idx, MVT::i8);
9970 V1InUse |= (ZeroMask != V1Idx);
9971 V2InUse |= (ZeroMask != V2Idx);
9975 // If both V1 and V2 are in use and we can use a direct blend, do so. This
9976 // avoids using blends to handle blends-with-zero which is important as
9977 // a single pshufb is significantly faster for that.
9978 if (V1InUse && V2InUse && Subtarget->hasSSE41())
9979 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i8, V1, V2, Mask,
9985 V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V1,
9986 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
9988 V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V2,
9989 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
9991 // If we need shuffled inputs from both, blend the two.
9992 if (V1InUse && V2InUse)
9993 return DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
9995 return V1; // Single inputs are easy.
9997 return V2; // Single inputs are easy.
9998 // Shuffling to a zeroable vector.
9999 return getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
10002 // There are special ways we can lower some single-element blends.
10003 if (NumV2Elements == 1)
10004 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v16i8, DL, V1, V2,
10005 Mask, Subtarget, DAG))
10008 // Check whether a compaction lowering can be done. This handles shuffles
10009 // which take every Nth element for some even N. See the helper function for
10012 // We special case these as they can be particularly efficiently handled with
10013 // the PACKUSB instruction on x86 and they show up in common patterns of
10014 // rearranging bytes to truncate wide elements.
10015 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask)) {
10016 // NumEvenDrops is the power of two stride of the elements. Another way of
10017 // thinking about it is that we need to drop the even elements this many
10018 // times to get the original input.
10019 bool IsSingleInput = isSingleInputShuffleMask(Mask);
10021 // First we need to zero all the dropped bytes.
10022 assert(NumEvenDrops <= 3 &&
10023 "No support for dropping even elements more than 3 times.");
10024 // We use the mask type to pick which bytes are preserved based on how many
10025 // elements are dropped.
10026 MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
10027 SDValue ByteClearMask =
10028 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8,
10029 DAG.getConstant(0xFF, MaskVTs[NumEvenDrops - 1]));
10030 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
10031 if (!IsSingleInput)
10032 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
10034 // Now pack things back together.
10035 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
10036 V2 = IsSingleInput ? V1 : DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
10037 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
10038 for (int i = 1; i < NumEvenDrops; ++i) {
10039 Result = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, Result);
10040 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
10046 int V1LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10047 int V1HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10048 int V2LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10049 int V2HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10051 auto buildBlendMasks = [](MutableArrayRef<int> HalfMask,
10052 MutableArrayRef<int> V1HalfBlendMask,
10053 MutableArrayRef<int> V2HalfBlendMask) {
10054 for (int i = 0; i < 8; ++i)
10055 if (HalfMask[i] >= 0 && HalfMask[i] < 16) {
10056 V1HalfBlendMask[i] = HalfMask[i];
10058 } else if (HalfMask[i] >= 16) {
10059 V2HalfBlendMask[i] = HalfMask[i] - 16;
10060 HalfMask[i] = i + 8;
10063 buildBlendMasks(LoMask, V1LoBlendMask, V2LoBlendMask);
10064 buildBlendMasks(HiMask, V1HiBlendMask, V2HiBlendMask);
10066 SDValue Zero = getZeroVector(MVT::v8i16, Subtarget, DAG, DL);
10068 auto buildLoAndHiV8s = [&](SDValue V, MutableArrayRef<int> LoBlendMask,
10069 MutableArrayRef<int> HiBlendMask) {
10071 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
10072 // them out and avoid using UNPCK{L,H} to extract the elements of V as
10074 if (std::none_of(LoBlendMask.begin(), LoBlendMask.end(),
10075 [](int M) { return M >= 0 && M % 2 == 1; }) &&
10076 std::none_of(HiBlendMask.begin(), HiBlendMask.end(),
10077 [](int M) { return M >= 0 && M % 2 == 1; })) {
10078 // Use a mask to drop the high bytes.
10079 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
10080 V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, V1,
10081 DAG.getConstant(0x00FF, MVT::v8i16));
10083 // This will be a single vector shuffle instead of a blend so nuke V2.
10084 V2 = DAG.getUNDEF(MVT::v8i16);
10086 // Squash the masks to point directly into V1.
10087 for (int &M : LoBlendMask)
10090 for (int &M : HiBlendMask)
10094 // Otherwise just unpack the low half of V into V1 and the high half into
10095 // V2 so that we can blend them as i16s.
10096 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
10097 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
10098 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
10099 DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
10102 SDValue BlendedLo = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
10103 SDValue BlendedHi = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
10104 return std::make_pair(BlendedLo, BlendedHi);
10106 SDValue V1Lo, V1Hi, V2Lo, V2Hi;
10107 std::tie(V1Lo, V1Hi) = buildLoAndHiV8s(V1, V1LoBlendMask, V1HiBlendMask);
10108 std::tie(V2Lo, V2Hi) = buildLoAndHiV8s(V2, V2LoBlendMask, V2HiBlendMask);
10110 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Lo, V2Lo, LoMask);
10111 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Hi, V2Hi, HiMask);
10113 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
10116 /// \brief Dispatching routine to lower various 128-bit x86 vector shuffles.
10118 /// This routine breaks down the specific type of 128-bit shuffle and
10119 /// dispatches to the lowering routines accordingly.
10120 static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10121 MVT VT, const X86Subtarget *Subtarget,
10122 SelectionDAG &DAG) {
10123 switch (VT.SimpleTy) {
10125 return lowerV2I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10127 return lowerV2F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10129 return lowerV4I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10131 return lowerV4F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10133 return lowerV8I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
10135 return lowerV16I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
10138 llvm_unreachable("Unimplemented!");
10142 /// \brief Helper function to test whether a shuffle mask could be
10143 /// simplified by widening the elements being shuffled.
10145 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
10146 /// leaves it in an unspecified state.
10148 /// NOTE: This must handle normal vector shuffle masks and *target* vector
10149 /// shuffle masks. The latter have the special property of a '-2' representing
10150 /// a zero-ed lane of a vector.
10151 static bool canWidenShuffleElements(ArrayRef<int> Mask,
10152 SmallVectorImpl<int> &WidenedMask) {
10153 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
10154 // If both elements are undef, its trivial.
10155 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] == SM_SentinelUndef) {
10156 WidenedMask.push_back(SM_SentinelUndef);
10160 // Check for an undef mask and a mask value properly aligned to fit with
10161 // a pair of values. If we find such a case, use the non-undef mask's value.
10162 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] >= 0 && Mask[i + 1] % 2 == 1) {
10163 WidenedMask.push_back(Mask[i + 1] / 2);
10166 if (Mask[i + 1] == SM_SentinelUndef && Mask[i] >= 0 && Mask[i] % 2 == 0) {
10167 WidenedMask.push_back(Mask[i] / 2);
10171 // When zeroing, we need to spread the zeroing across both lanes to widen.
10172 if (Mask[i] == SM_SentinelZero || Mask[i + 1] == SM_SentinelZero) {
10173 if ((Mask[i] == SM_SentinelZero || Mask[i] == SM_SentinelUndef) &&
10174 (Mask[i + 1] == SM_SentinelZero || Mask[i + 1] == SM_SentinelUndef)) {
10175 WidenedMask.push_back(SM_SentinelZero);
10181 // Finally check if the two mask values are adjacent and aligned with
10183 if (Mask[i] != SM_SentinelUndef && Mask[i] % 2 == 0 && Mask[i] + 1 == Mask[i + 1]) {
10184 WidenedMask.push_back(Mask[i] / 2);
10188 // Otherwise we can't safely widen the elements used in this shuffle.
10191 assert(WidenedMask.size() == Mask.size() / 2 &&
10192 "Incorrect size of mask after widening the elements!");
10197 /// \brief Generic routine to split vector shuffle into half-sized shuffles.
10199 /// This routine just extracts two subvectors, shuffles them independently, and
10200 /// then concatenates them back together. This should work effectively with all
10201 /// AVX vector shuffle types.
10202 static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10203 SDValue V2, ArrayRef<int> Mask,
10204 SelectionDAG &DAG) {
10205 assert(VT.getSizeInBits() >= 256 &&
10206 "Only for 256-bit or wider vector shuffles!");
10207 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
10208 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
10210 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
10211 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
10213 int NumElements = VT.getVectorNumElements();
10214 int SplitNumElements = NumElements / 2;
10215 MVT ScalarVT = VT.getScalarType();
10216 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
10218 // Rather than splitting build-vectors, just build two narrower build
10219 // vectors. This helps shuffling with splats and zeros.
10220 auto SplitVector = [&](SDValue V) {
10221 while (V.getOpcode() == ISD::BITCAST)
10222 V = V->getOperand(0);
10224 MVT OrigVT = V.getSimpleValueType();
10225 int OrigNumElements = OrigVT.getVectorNumElements();
10226 int OrigSplitNumElements = OrigNumElements / 2;
10227 MVT OrigScalarVT = OrigVT.getScalarType();
10228 MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
10232 auto *BV = dyn_cast<BuildVectorSDNode>(V);
10234 LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10235 DAG.getIntPtrConstant(0));
10236 HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10237 DAG.getIntPtrConstant(OrigSplitNumElements));
10240 SmallVector<SDValue, 16> LoOps, HiOps;
10241 for (int i = 0; i < OrigSplitNumElements; ++i) {
10242 LoOps.push_back(BV->getOperand(i));
10243 HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
10245 LoV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, LoOps);
10246 HiV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, HiOps);
10248 return std::make_pair(DAG.getNode(ISD::BITCAST, DL, SplitVT, LoV),
10249 DAG.getNode(ISD::BITCAST, DL, SplitVT, HiV));
10252 SDValue LoV1, HiV1, LoV2, HiV2;
10253 std::tie(LoV1, HiV1) = SplitVector(V1);
10254 std::tie(LoV2, HiV2) = SplitVector(V2);
10256 // Now create two 4-way blends of these half-width vectors.
10257 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
10258 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
10259 SmallVector<int, 32> V1BlendMask, V2BlendMask, BlendMask;
10260 for (int i = 0; i < SplitNumElements; ++i) {
10261 int M = HalfMask[i];
10262 if (M >= NumElements) {
10263 if (M >= NumElements + SplitNumElements)
10267 V2BlendMask.push_back(M - NumElements);
10268 V1BlendMask.push_back(-1);
10269 BlendMask.push_back(SplitNumElements + i);
10270 } else if (M >= 0) {
10271 if (M >= SplitNumElements)
10275 V2BlendMask.push_back(-1);
10276 V1BlendMask.push_back(M);
10277 BlendMask.push_back(i);
10279 V2BlendMask.push_back(-1);
10280 V1BlendMask.push_back(-1);
10281 BlendMask.push_back(-1);
10285 // Because the lowering happens after all combining takes place, we need to
10286 // manually combine these blend masks as much as possible so that we create
10287 // a minimal number of high-level vector shuffle nodes.
10289 // First try just blending the halves of V1 or V2.
10290 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
10291 return DAG.getUNDEF(SplitVT);
10292 if (!UseLoV2 && !UseHiV2)
10293 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10294 if (!UseLoV1 && !UseHiV1)
10295 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10297 SDValue V1Blend, V2Blend;
10298 if (UseLoV1 && UseHiV1) {
10300 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10302 // We only use half of V1 so map the usage down into the final blend mask.
10303 V1Blend = UseLoV1 ? LoV1 : HiV1;
10304 for (int i = 0; i < SplitNumElements; ++i)
10305 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
10306 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
10308 if (UseLoV2 && UseHiV2) {
10310 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10312 // We only use half of V2 so map the usage down into the final blend mask.
10313 V2Blend = UseLoV2 ? LoV2 : HiV2;
10314 for (int i = 0; i < SplitNumElements; ++i)
10315 if (BlendMask[i] >= SplitNumElements)
10316 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
10318 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
10320 SDValue Lo = HalfBlend(LoMask);
10321 SDValue Hi = HalfBlend(HiMask);
10322 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
10325 /// \brief Either split a vector in halves or decompose the shuffles and the
10328 /// This is provided as a good fallback for many lowerings of non-single-input
10329 /// shuffles with more than one 128-bit lane. In those cases, we want to select
10330 /// between splitting the shuffle into 128-bit components and stitching those
10331 /// back together vs. extracting the single-input shuffles and blending those
10333 static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
10334 SDValue V2, ArrayRef<int> Mask,
10335 SelectionDAG &DAG) {
10336 assert(!isSingleInputShuffleMask(Mask) && "This routine must not be used to "
10337 "lower single-input shuffles as it "
10338 "could then recurse on itself.");
10339 int Size = Mask.size();
10341 // If this can be modeled as a broadcast of two elements followed by a blend,
10342 // prefer that lowering. This is especially important because broadcasts can
10343 // often fold with memory operands.
10344 auto DoBothBroadcast = [&] {
10345 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
10348 if (V2BroadcastIdx == -1)
10349 V2BroadcastIdx = M - Size;
10350 else if (M - Size != V2BroadcastIdx)
10352 } else if (M >= 0) {
10353 if (V1BroadcastIdx == -1)
10354 V1BroadcastIdx = M;
10355 else if (M != V1BroadcastIdx)
10360 if (DoBothBroadcast())
10361 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
10364 // If the inputs all stem from a single 128-bit lane of each input, then we
10365 // split them rather than blending because the split will decompose to
10366 // unusually few instructions.
10367 int LaneCount = VT.getSizeInBits() / 128;
10368 int LaneSize = Size / LaneCount;
10369 SmallBitVector LaneInputs[2];
10370 LaneInputs[0].resize(LaneCount, false);
10371 LaneInputs[1].resize(LaneCount, false);
10372 for (int i = 0; i < Size; ++i)
10374 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
10375 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
10376 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10378 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
10379 // that the decomposed single-input shuffles don't end up here.
10380 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10383 /// \brief Lower a vector shuffle crossing multiple 128-bit lanes as
10384 /// a permutation and blend of those lanes.
10386 /// This essentially blends the out-of-lane inputs to each lane into the lane
10387 /// from a permuted copy of the vector. This lowering strategy results in four
10388 /// instructions in the worst case for a single-input cross lane shuffle which
10389 /// is lower than any other fully general cross-lane shuffle strategy I'm aware
10390 /// of. Special cases for each particular shuffle pattern should be handled
10391 /// prior to trying this lowering.
10392 static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT,
10393 SDValue V1, SDValue V2,
10394 ArrayRef<int> Mask,
10395 SelectionDAG &DAG) {
10396 // FIXME: This should probably be generalized for 512-bit vectors as well.
10397 assert(VT.getSizeInBits() == 256 && "Only for 256-bit vector shuffles!");
10398 int LaneSize = Mask.size() / 2;
10400 // If there are only inputs from one 128-bit lane, splitting will in fact be
10401 // less expensive. The flags track wether the given lane contains an element
10402 // that crosses to another lane.
10403 bool LaneCrossing[2] = {false, false};
10404 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10405 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10406 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
10407 if (!LaneCrossing[0] || !LaneCrossing[1])
10408 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10410 if (isSingleInputShuffleMask(Mask)) {
10411 SmallVector<int, 32> FlippedBlendMask;
10412 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10413 FlippedBlendMask.push_back(
10414 Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
10416 : Mask[i] % LaneSize +
10417 (i / LaneSize) * LaneSize + Size));
10419 // Flip the vector, and blend the results which should now be in-lane. The
10420 // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
10421 // 5 for the high source. The value 3 selects the high half of source 2 and
10422 // the value 2 selects the low half of source 2. We only use source 2 to
10423 // allow folding it into a memory operand.
10424 unsigned PERMMask = 3 | 2 << 4;
10425 SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
10426 V1, DAG.getConstant(PERMMask, MVT::i8));
10427 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
10430 // This now reduces to two single-input shuffles of V1 and V2 which at worst
10431 // will be handled by the above logic and a blend of the results, much like
10432 // other patterns in AVX.
10433 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10436 /// \brief Handle lowering 2-lane 128-bit shuffles.
10437 static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10438 SDValue V2, ArrayRef<int> Mask,
10439 const X86Subtarget *Subtarget,
10440 SelectionDAG &DAG) {
10441 // Blends are faster and handle all the non-lane-crossing cases.
10442 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask,
10446 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
10447 VT.getVectorNumElements() / 2);
10448 // Check for patterns which can be matched with a single insert of a 128-bit
10450 if (isShuffleEquivalent(V1, V2, Mask, 0, 1, 0, 1) ||
10451 isShuffleEquivalent(V1, V2, Mask, 0, 1, 4, 5)) {
10452 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10453 DAG.getIntPtrConstant(0));
10454 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
10455 Mask[2] < 4 ? V1 : V2, DAG.getIntPtrConstant(0));
10456 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10458 if (isShuffleEquivalent(V1, V2, Mask, 0, 1, 6, 7)) {
10459 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10460 DAG.getIntPtrConstant(0));
10461 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
10462 DAG.getIntPtrConstant(2));
10463 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10466 // Otherwise form a 128-bit permutation.
10467 // FIXME: Detect zero-vector inputs and use the VPERM2X128 to zero that half.
10468 unsigned PermMask = Mask[0] / 2 | (Mask[2] / 2) << 4;
10469 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
10470 DAG.getConstant(PermMask, MVT::i8));
10473 /// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
10474 /// shuffling each lane.
10476 /// This will only succeed when the result of fixing the 128-bit lanes results
10477 /// in a single-input non-lane-crossing shuffle with a repeating shuffle mask in
10478 /// each 128-bit lanes. This handles many cases where we can quickly blend away
10479 /// the lane crosses early and then use simpler shuffles within each lane.
10481 /// FIXME: It might be worthwhile at some point to support this without
10482 /// requiring the 128-bit lane-relative shuffles to be repeating, but currently
10483 /// in x86 only floating point has interesting non-repeating shuffles, and even
10484 /// those are still *marginally* more expensive.
10485 static SDValue lowerVectorShuffleByMerging128BitLanes(
10486 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
10487 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
10488 assert(!isSingleInputShuffleMask(Mask) &&
10489 "This is only useful with multiple inputs.");
10491 int Size = Mask.size();
10492 int LaneSize = 128 / VT.getScalarSizeInBits();
10493 int NumLanes = Size / LaneSize;
10494 assert(NumLanes > 1 && "Only handles 256-bit and wider shuffles.");
10496 // See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
10497 // check whether the in-128-bit lane shuffles share a repeating pattern.
10498 SmallVector<int, 4> Lanes;
10499 Lanes.resize(NumLanes, -1);
10500 SmallVector<int, 4> InLaneMask;
10501 InLaneMask.resize(LaneSize, -1);
10502 for (int i = 0; i < Size; ++i) {
10506 int j = i / LaneSize;
10508 if (Lanes[j] < 0) {
10509 // First entry we've seen for this lane.
10510 Lanes[j] = Mask[i] / LaneSize;
10511 } else if (Lanes[j] != Mask[i] / LaneSize) {
10512 // This doesn't match the lane selected previously!
10516 // Check that within each lane we have a consistent shuffle mask.
10517 int k = i % LaneSize;
10518 if (InLaneMask[k] < 0) {
10519 InLaneMask[k] = Mask[i] % LaneSize;
10520 } else if (InLaneMask[k] != Mask[i] % LaneSize) {
10521 // This doesn't fit a repeating in-lane mask.
10526 // First shuffle the lanes into place.
10527 MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
10528 VT.getSizeInBits() / 64);
10529 SmallVector<int, 8> LaneMask;
10530 LaneMask.resize(NumLanes * 2, -1);
10531 for (int i = 0; i < NumLanes; ++i)
10532 if (Lanes[i] >= 0) {
10533 LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
10534 LaneMask[2 * i + 1] = 2*Lanes[i] + 1;
10537 V1 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V1);
10538 V2 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V2);
10539 SDValue LaneShuffle = DAG.getVectorShuffle(LaneVT, DL, V1, V2, LaneMask);
10541 // Cast it back to the type we actually want.
10542 LaneShuffle = DAG.getNode(ISD::BITCAST, DL, VT, LaneShuffle);
10544 // Now do a simple shuffle that isn't lane crossing.
10545 SmallVector<int, 8> NewMask;
10546 NewMask.resize(Size, -1);
10547 for (int i = 0; i < Size; ++i)
10549 NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
10550 assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) &&
10551 "Must not introduce lane crosses at this point!");
10553 return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask);
10556 /// \brief Test whether the specified input (0 or 1) is in-place blended by the
10559 /// This returns true if the elements from a particular input are already in the
10560 /// slot required by the given mask and require no permutation.
10561 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
10562 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
10563 int Size = Mask.size();
10564 for (int i = 0; i < Size; ++i)
10565 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
10571 /// \brief Handle lowering of 4-lane 64-bit floating point shuffles.
10573 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
10574 /// isn't available.
10575 static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10576 const X86Subtarget *Subtarget,
10577 SelectionDAG &DAG) {
10579 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10580 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10581 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10582 ArrayRef<int> Mask = SVOp->getMask();
10583 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10585 SmallVector<int, 4> WidenedMask;
10586 if (canWidenShuffleElements(Mask, WidenedMask))
10587 return lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask, Subtarget,
10590 if (isSingleInputShuffleMask(Mask)) {
10591 // Check for being able to broadcast a single element.
10592 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f64, DL, V1,
10593 Mask, Subtarget, DAG))
10596 // Use low duplicate instructions for masks that match their pattern.
10597 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2))
10598 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
10600 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
10601 // Non-half-crossing single input shuffles can be lowerid with an
10602 // interleaved permutation.
10603 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
10604 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
10605 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
10606 DAG.getConstant(VPERMILPMask, MVT::i8));
10609 // With AVX2 we have direct support for this permutation.
10610 if (Subtarget->hasAVX2())
10611 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
10612 getV4X86ShuffleImm8ForMask(Mask, DAG));
10614 // Otherwise, fall back.
10615 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
10619 // X86 has dedicated unpack instructions that can handle specific blend
10620 // operations: UNPCKH and UNPCKL.
10621 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 2, 6))
10622 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V1, V2);
10623 if (isShuffleEquivalent(V1, V2, Mask, 1, 5, 3, 7))
10624 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V1, V2);
10626 // If we have a single input to the zero element, insert that into V1 if we
10627 // can do so cheaply.
10628 int NumV2Elements =
10629 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
10630 if (NumV2Elements == 1 && Mask[0] >= 4)
10631 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
10632 MVT::v4f64, DL, V1, V2, Mask, Subtarget, DAG))
10635 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
10639 // Check if the blend happens to exactly fit that of SHUFPD.
10640 if ((Mask[0] == -1 || Mask[0] < 2) &&
10641 (Mask[1] == -1 || (Mask[1] >= 4 && Mask[1] < 6)) &&
10642 (Mask[2] == -1 || (Mask[2] >= 2 && Mask[2] < 4)) &&
10643 (Mask[3] == -1 || Mask[3] >= 6)) {
10644 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 5) << 1) |
10645 ((Mask[2] == 3) << 2) | ((Mask[3] == 7) << 3);
10646 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V1, V2,
10647 DAG.getConstant(SHUFPDMask, MVT::i8));
10649 if ((Mask[0] == -1 || (Mask[0] >= 4 && Mask[0] < 6)) &&
10650 (Mask[1] == -1 || Mask[1] < 2) &&
10651 (Mask[2] == -1 || Mask[2] >= 6) &&
10652 (Mask[3] == -1 || (Mask[3] >= 2 && Mask[3] < 4))) {
10653 unsigned SHUFPDMask = (Mask[0] == 5) | ((Mask[1] == 1) << 1) |
10654 ((Mask[2] == 7) << 2) | ((Mask[3] == 3) << 3);
10655 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V2, V1,
10656 DAG.getConstant(SHUFPDMask, MVT::i8));
10659 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10660 // shuffle. However, if we have AVX2 and either inputs are already in place,
10661 // we will be able to shuffle even across lanes the other input in a single
10662 // instruction so skip this pattern.
10663 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10664 isShuffleMaskInputInPlace(1, Mask))))
10665 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10666 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
10669 // If we have AVX2 then we always want to lower with a blend because an v4 we
10670 // can fully permute the elements.
10671 if (Subtarget->hasAVX2())
10672 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
10675 // Otherwise fall back on generic lowering.
10676 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, DAG);
10679 /// \brief Handle lowering of 4-lane 64-bit integer shuffles.
10681 /// This routine is only called when we have AVX2 and thus a reasonable
10682 /// instruction set for v4i64 shuffling..
10683 static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10684 const X86Subtarget *Subtarget,
10685 SelectionDAG &DAG) {
10687 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10688 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10689 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10690 ArrayRef<int> Mask = SVOp->getMask();
10691 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10692 assert(Subtarget->hasAVX2() && "We can only lower v4i64 with AVX2!");
10694 SmallVector<int, 4> WidenedMask;
10695 if (canWidenShuffleElements(Mask, WidenedMask))
10696 return lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask, Subtarget,
10699 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
10703 // Check for being able to broadcast a single element.
10704 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i64, DL, V1,
10705 Mask, Subtarget, DAG))
10708 // When the shuffle is mirrored between the 128-bit lanes of the unit, we can
10709 // use lower latency instructions that will operate on both 128-bit lanes.
10710 SmallVector<int, 2> RepeatedMask;
10711 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
10712 if (isSingleInputShuffleMask(Mask)) {
10713 int PSHUFDMask[] = {-1, -1, -1, -1};
10714 for (int i = 0; i < 2; ++i)
10715 if (RepeatedMask[i] >= 0) {
10716 PSHUFDMask[2 * i] = 2 * RepeatedMask[i];
10717 PSHUFDMask[2 * i + 1] = 2 * RepeatedMask[i] + 1;
10719 return DAG.getNode(
10720 ISD::BITCAST, DL, MVT::v4i64,
10721 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
10722 DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, V1),
10723 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
10727 // AVX2 provides a direct instruction for permuting a single input across
10729 if (isSingleInputShuffleMask(Mask))
10730 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
10731 getV4X86ShuffleImm8ForMask(Mask, DAG));
10733 // Try to use byte shift instructions.
10734 if (SDValue Shift = lowerVectorShuffleAsByteShift(
10735 DL, MVT::v4i64, V1, V2, Mask, DAG))
10738 // Use dedicated unpack instructions for masks that match their pattern.
10739 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 2, 6))
10740 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V1, V2);
10741 if (isShuffleEquivalent(V1, V2, Mask, 1, 5, 3, 7))
10742 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V1, V2);
10744 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10745 // shuffle. However, if we have AVX2 and either inputs are already in place,
10746 // we will be able to shuffle even across lanes the other input in a single
10747 // instruction so skip this pattern.
10748 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10749 isShuffleMaskInputInPlace(1, Mask))))
10750 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10751 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
10754 // Otherwise fall back on generic blend lowering.
10755 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
10759 /// \brief Handle lowering of 8-lane 32-bit floating point shuffles.
10761 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
10762 /// isn't available.
10763 static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10764 const X86Subtarget *Subtarget,
10765 SelectionDAG &DAG) {
10767 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10768 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10769 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10770 ArrayRef<int> Mask = SVOp->getMask();
10771 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10773 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
10777 // Check for being able to broadcast a single element.
10778 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8f32, DL, V1,
10779 Mask, Subtarget, DAG))
10782 // If the shuffle mask is repeated in each 128-bit lane, we have many more
10783 // options to efficiently lower the shuffle.
10784 SmallVector<int, 4> RepeatedMask;
10785 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
10786 assert(RepeatedMask.size() == 4 &&
10787 "Repeated masks must be half the mask width!");
10789 // Use even/odd duplicate instructions for masks that match their pattern.
10790 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2, 4, 4, 6, 6))
10791 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
10792 if (isShuffleEquivalent(V1, V2, Mask, 1, 1, 3, 3, 5, 5, 7, 7))
10793 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
10795 if (isSingleInputShuffleMask(Mask))
10796 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
10797 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10799 // Use dedicated unpack instructions for masks that match their pattern.
10800 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10801 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V1, V2);
10802 if (isShuffleEquivalent(V1, V2, Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10803 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V1, V2);
10805 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
10806 // have already handled any direct blends. We also need to squash the
10807 // repeated mask into a simulated v4f32 mask.
10808 for (int i = 0; i < 4; ++i)
10809 if (RepeatedMask[i] >= 8)
10810 RepeatedMask[i] -= 4;
10811 return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
10814 // If we have a single input shuffle with different shuffle patterns in the
10815 // two 128-bit lanes use the variable mask to VPERMILPS.
10816 if (isSingleInputShuffleMask(Mask)) {
10817 SDValue VPermMask[8];
10818 for (int i = 0; i < 8; ++i)
10819 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10820 : DAG.getConstant(Mask[i], MVT::i32);
10821 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
10822 return DAG.getNode(
10823 X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
10824 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask));
10826 if (Subtarget->hasAVX2())
10827 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32,
10828 DAG.getNode(ISD::BITCAST, DL, MVT::v8f32,
10829 DAG.getNode(ISD::BUILD_VECTOR, DL,
10830 MVT::v8i32, VPermMask)),
10833 // Otherwise, fall back.
10834 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
10838 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10840 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10841 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
10844 // If we have AVX2 then we always want to lower with a blend because at v8 we
10845 // can fully permute the elements.
10846 if (Subtarget->hasAVX2())
10847 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
10850 // Otherwise fall back on generic lowering.
10851 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, DAG);
10854 /// \brief Handle lowering of 8-lane 32-bit integer shuffles.
10856 /// This routine is only called when we have AVX2 and thus a reasonable
10857 /// instruction set for v8i32 shuffling..
10858 static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10859 const X86Subtarget *Subtarget,
10860 SelectionDAG &DAG) {
10862 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10863 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10864 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10865 ArrayRef<int> Mask = SVOp->getMask();
10866 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10867 assert(Subtarget->hasAVX2() && "We can only lower v8i32 with AVX2!");
10869 // Whenever we can lower this as a zext, that instruction is strictly faster
10870 // than any alternative. It also allows us to fold memory operands into the
10871 // shuffle in many cases.
10872 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2,
10873 Mask, Subtarget, DAG))
10876 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
10880 // Check for being able to broadcast a single element.
10881 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i32, DL, V1,
10882 Mask, Subtarget, DAG))
10885 // If the shuffle mask is repeated in each 128-bit lane we can use more
10886 // efficient instructions that mirror the shuffles across the two 128-bit
10888 SmallVector<int, 4> RepeatedMask;
10889 if (is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask)) {
10890 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
10891 if (isSingleInputShuffleMask(Mask))
10892 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
10893 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10895 // Use dedicated unpack instructions for masks that match their pattern.
10896 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10897 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V1, V2);
10898 if (isShuffleEquivalent(V1, V2, Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10899 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V1, V2);
10902 // Try to use bit shift instructions.
10903 if (SDValue Shift = lowerVectorShuffleAsBitShift(
10904 DL, MVT::v8i32, V1, V2, Mask, DAG))
10907 // Try to use byte shift instructions.
10908 if (SDValue Shift = lowerVectorShuffleAsByteShift(
10909 DL, MVT::v8i32, V1, V2, Mask, DAG))
10912 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
10913 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10916 // If the shuffle patterns aren't repeated but it is a single input, directly
10917 // generate a cross-lane VPERMD instruction.
10918 if (isSingleInputShuffleMask(Mask)) {
10919 SDValue VPermMask[8];
10920 for (int i = 0; i < 8; ++i)
10921 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10922 : DAG.getConstant(Mask[i], MVT::i32);
10923 return DAG.getNode(
10924 X86ISD::VPERMV, DL, MVT::v8i32,
10925 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
10928 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10930 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10931 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10934 // Otherwise fall back on generic blend lowering.
10935 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
10939 /// \brief Handle lowering of 16-lane 16-bit integer shuffles.
10941 /// This routine is only called when we have AVX2 and thus a reasonable
10942 /// instruction set for v16i16 shuffling..
10943 static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10944 const X86Subtarget *Subtarget,
10945 SelectionDAG &DAG) {
10947 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10948 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10949 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10950 ArrayRef<int> Mask = SVOp->getMask();
10951 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10952 assert(Subtarget->hasAVX2() && "We can only lower v16i16 with AVX2!");
10954 // Whenever we can lower this as a zext, that instruction is strictly faster
10955 // than any alternative. It also allows us to fold memory operands into the
10956 // shuffle in many cases.
10957 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v16i16, V1, V2,
10958 Mask, Subtarget, DAG))
10961 // Check for being able to broadcast a single element.
10962 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i16, DL, V1,
10963 Mask, Subtarget, DAG))
10966 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
10970 // Use dedicated unpack instructions for masks that match their pattern.
10971 if (isShuffleEquivalent(V1, V2, Mask,
10972 // First 128-bit lane:
10973 0, 16, 1, 17, 2, 18, 3, 19,
10974 // Second 128-bit lane:
10975 8, 24, 9, 25, 10, 26, 11, 27))
10976 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i16, V1, V2);
10977 if (isShuffleEquivalent(V1, V2, Mask,
10978 // First 128-bit lane:
10979 4, 20, 5, 21, 6, 22, 7, 23,
10980 // Second 128-bit lane:
10981 12, 28, 13, 29, 14, 30, 15, 31))
10982 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i16, V1, V2);
10984 // Try to use bit shift instructions.
10985 if (SDValue Shift = lowerVectorShuffleAsBitShift(
10986 DL, MVT::v16i16, V1, V2, Mask, DAG))
10989 // Try to use byte shift instructions.
10990 if (SDValue Shift = lowerVectorShuffleAsByteShift(
10991 DL, MVT::v16i16, V1, V2, Mask, DAG))
10994 // Try to use byte rotation instructions.
10995 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
10996 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
10999 if (isSingleInputShuffleMask(Mask)) {
11000 // There are no generalized cross-lane shuffle operations available on i16
11002 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
11003 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
11006 SDValue PSHUFBMask[32];
11007 for (int i = 0; i < 16; ++i) {
11008 if (Mask[i] == -1) {
11009 PSHUFBMask[2 * i] = PSHUFBMask[2 * i + 1] = DAG.getUNDEF(MVT::i8);
11013 int M = i < 8 ? Mask[i] : Mask[i] - 8;
11014 assert(M >= 0 && M < 8 && "Invalid single-input mask!");
11015 PSHUFBMask[2 * i] = DAG.getConstant(2 * M, MVT::i8);
11016 PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, MVT::i8);
11018 return DAG.getNode(
11019 ISD::BITCAST, DL, MVT::v16i16,
11021 X86ISD::PSHUFB, DL, MVT::v32i8,
11022 DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1),
11023 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask)));
11026 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11028 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11029 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
11032 // Otherwise fall back on generic lowering.
11033 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG);
11036 /// \brief Handle lowering of 32-lane 8-bit integer shuffles.
11038 /// This routine is only called when we have AVX2 and thus a reasonable
11039 /// instruction set for v32i8 shuffling..
11040 static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11041 const X86Subtarget *Subtarget,
11042 SelectionDAG &DAG) {
11044 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
11045 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
11046 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11047 ArrayRef<int> Mask = SVOp->getMask();
11048 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
11049 assert(Subtarget->hasAVX2() && "We can only lower v32i8 with AVX2!");
11051 // Whenever we can lower this as a zext, that instruction is strictly faster
11052 // than any alternative. It also allows us to fold memory operands into the
11053 // shuffle in many cases.
11054 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2,
11055 Mask, Subtarget, DAG))
11058 // Check for being able to broadcast a single element.
11059 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v32i8, DL, V1,
11060 Mask, Subtarget, DAG))
11063 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
11067 // Use dedicated unpack instructions for masks that match their pattern.
11068 // Note that these are repeated 128-bit lane unpacks, not unpacks across all
11070 if (isShuffleEquivalent(
11072 // First 128-bit lane:
11073 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39,
11074 // Second 128-bit lane:
11075 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55))
11076 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v32i8, V1, V2);
11077 if (isShuffleEquivalent(
11079 // First 128-bit lane:
11080 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47,
11081 // Second 128-bit lane:
11082 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63))
11083 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v32i8, V1, V2);
11085 // Try to use bit shift instructions.
11086 if (SDValue Shift = lowerVectorShuffleAsBitShift(
11087 DL, MVT::v32i8, V1, V2, Mask, DAG))
11090 // Try to use byte shift instructions.
11091 if (SDValue Shift = lowerVectorShuffleAsByteShift(
11092 DL, MVT::v32i8, V1, V2, Mask, DAG))
11095 // Try to use byte rotation instructions.
11096 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
11097 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
11100 if (isSingleInputShuffleMask(Mask)) {
11101 // There are no generalized cross-lane shuffle operations available on i8
11103 if (is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
11104 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2,
11107 SDValue PSHUFBMask[32];
11108 for (int i = 0; i < 32; ++i)
11111 ? DAG.getUNDEF(MVT::i8)
11112 : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, MVT::i8);
11114 return DAG.getNode(
11115 X86ISD::PSHUFB, DL, MVT::v32i8, V1,
11116 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask));
11119 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11121 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11122 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
11125 // Otherwise fall back on generic lowering.
11126 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG);
11129 /// \brief High-level routine to lower various 256-bit x86 vector shuffles.
11131 /// This routine either breaks down the specific type of a 256-bit x86 vector
11132 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
11133 /// together based on the available instructions.
11134 static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11135 MVT VT, const X86Subtarget *Subtarget,
11136 SelectionDAG &DAG) {
11138 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11139 ArrayRef<int> Mask = SVOp->getMask();
11141 // There is a really nice hard cut-over between AVX1 and AVX2 that means we can
11142 // check for those subtargets here and avoid much of the subtarget querying in
11143 // the per-vector-type lowering routines. With AVX1 we have essentially *zero*
11144 // ability to manipulate a 256-bit vector with integer types. Since we'll use
11145 // floating point types there eventually, just immediately cast everything to
11146 // a float and operate entirely in that domain.
11147 if (VT.isInteger() && !Subtarget->hasAVX2()) {
11148 int ElementBits = VT.getScalarSizeInBits();
11149 if (ElementBits < 32)
11150 // No floating point type available, decompose into 128-bit vectors.
11151 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11153 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
11154 VT.getVectorNumElements());
11155 V1 = DAG.getNode(ISD::BITCAST, DL, FpVT, V1);
11156 V2 = DAG.getNode(ISD::BITCAST, DL, FpVT, V2);
11157 return DAG.getNode(ISD::BITCAST, DL, VT,
11158 DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
11161 switch (VT.SimpleTy) {
11163 return lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11165 return lowerV4I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11167 return lowerV8F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11169 return lowerV8I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11171 return lowerV16I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11173 return lowerV32I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11176 llvm_unreachable("Not a valid 256-bit x86 vector type!");
11180 /// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
11181 static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11182 const X86Subtarget *Subtarget,
11183 SelectionDAG &DAG) {
11185 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11186 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11187 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11188 ArrayRef<int> Mask = SVOp->getMask();
11189 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11191 // X86 has dedicated unpack instructions that can handle specific blend
11192 // operations: UNPCKH and UNPCKL.
11193 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 2, 10, 4, 12, 6, 14))
11194 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f64, V1, V2);
11195 if (isShuffleEquivalent(V1, V2, Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11196 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f64, V1, V2);
11198 // FIXME: Implement direct support for this type!
11199 return splitAndLowerVectorShuffle(DL, MVT::v8f64, V1, V2, Mask, DAG);
11202 /// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
11203 static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11204 const X86Subtarget *Subtarget,
11205 SelectionDAG &DAG) {
11207 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11208 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11209 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11210 ArrayRef<int> Mask = SVOp->getMask();
11211 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11213 // Use dedicated unpack instructions for masks that match their pattern.
11214 if (isShuffleEquivalent(V1, V2, Mask,
11215 0, 16, 1, 17, 4, 20, 5, 21,
11216 8, 24, 9, 25, 12, 28, 13, 29))
11217 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16f32, V1, V2);
11218 if (isShuffleEquivalent(V1, V2, Mask,
11219 2, 18, 3, 19, 6, 22, 7, 23,
11220 10, 26, 11, 27, 14, 30, 15, 31))
11221 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16f32, V1, V2);
11223 // FIXME: Implement direct support for this type!
11224 return splitAndLowerVectorShuffle(DL, MVT::v16f32, V1, V2, Mask, DAG);
11227 /// \brief Handle lowering of 8-lane 64-bit integer shuffles.
11228 static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11229 const X86Subtarget *Subtarget,
11230 SelectionDAG &DAG) {
11232 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11233 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11234 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11235 ArrayRef<int> Mask = SVOp->getMask();
11236 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11238 // X86 has dedicated unpack instructions that can handle specific blend
11239 // operations: UNPCKH and UNPCKL.
11240 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 2, 10, 4, 12, 6, 14))
11241 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i64, V1, V2);
11242 if (isShuffleEquivalent(V1, V2, Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11243 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i64, V1, V2);
11245 // FIXME: Implement direct support for this type!
11246 return splitAndLowerVectorShuffle(DL, MVT::v8i64, V1, V2, Mask, DAG);
11249 /// \brief Handle lowering of 16-lane 32-bit integer shuffles.
11250 static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11251 const X86Subtarget *Subtarget,
11252 SelectionDAG &DAG) {
11254 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11255 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11256 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11257 ArrayRef<int> Mask = SVOp->getMask();
11258 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11260 // Use dedicated unpack instructions for masks that match their pattern.
11261 if (isShuffleEquivalent(V1, V2, Mask,
11262 0, 16, 1, 17, 4, 20, 5, 21,
11263 8, 24, 9, 25, 12, 28, 13, 29))
11264 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i32, V1, V2);
11265 if (isShuffleEquivalent(V1, V2, Mask,
11266 2, 18, 3, 19, 6, 22, 7, 23,
11267 10, 26, 11, 27, 14, 30, 15, 31))
11268 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i32, V1, V2);
11270 // FIXME: Implement direct support for this type!
11271 return splitAndLowerVectorShuffle(DL, MVT::v16i32, V1, V2, Mask, DAG);
11274 /// \brief Handle lowering of 32-lane 16-bit integer shuffles.
11275 static SDValue lowerV32I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11276 const X86Subtarget *Subtarget,
11277 SelectionDAG &DAG) {
11279 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11280 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11281 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11282 ArrayRef<int> Mask = SVOp->getMask();
11283 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
11284 assert(Subtarget->hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
11286 // FIXME: Implement direct support for this type!
11287 return splitAndLowerVectorShuffle(DL, MVT::v32i16, V1, V2, Mask, DAG);
11290 /// \brief Handle lowering of 64-lane 8-bit integer shuffles.
11291 static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11292 const X86Subtarget *Subtarget,
11293 SelectionDAG &DAG) {
11295 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11296 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11297 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11298 ArrayRef<int> Mask = SVOp->getMask();
11299 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
11300 assert(Subtarget->hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
11302 // FIXME: Implement direct support for this type!
11303 return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
11306 /// \brief High-level routine to lower various 512-bit x86 vector shuffles.
11308 /// This routine either breaks down the specific type of a 512-bit x86 vector
11309 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
11310 /// together based on the available instructions.
11311 static SDValue lower512BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11312 MVT VT, const X86Subtarget *Subtarget,
11313 SelectionDAG &DAG) {
11315 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11316 ArrayRef<int> Mask = SVOp->getMask();
11317 assert(Subtarget->hasAVX512() &&
11318 "Cannot lower 512-bit vectors w/ basic ISA!");
11320 // Check for being able to broadcast a single element.
11321 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(VT.SimpleTy, DL, V1,
11322 Mask, Subtarget, DAG))
11325 // Dispatch to each element type for lowering. If we don't have supprot for
11326 // specific element type shuffles at 512 bits, immediately split them and
11327 // lower them. Each lowering routine of a given type is allowed to assume that
11328 // the requisite ISA extensions for that element type are available.
11329 switch (VT.SimpleTy) {
11331 return lowerV8F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11333 return lowerV16F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11335 return lowerV8I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11337 return lowerV16I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11339 if (Subtarget->hasBWI())
11340 return lowerV32I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11343 if (Subtarget->hasBWI())
11344 return lowerV64I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11348 llvm_unreachable("Not a valid 512-bit x86 vector type!");
11351 // Otherwise fall back on splitting.
11352 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11355 /// \brief Top-level lowering for x86 vector shuffles.
11357 /// This handles decomposition, canonicalization, and lowering of all x86
11358 /// vector shuffles. Most of the specific lowering strategies are encapsulated
11359 /// above in helper routines. The canonicalization attempts to widen shuffles
11360 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
11361 /// s.t. only one of the two inputs needs to be tested, etc.
11362 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
11363 SelectionDAG &DAG) {
11364 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11365 ArrayRef<int> Mask = SVOp->getMask();
11366 SDValue V1 = Op.getOperand(0);
11367 SDValue V2 = Op.getOperand(1);
11368 MVT VT = Op.getSimpleValueType();
11369 int NumElements = VT.getVectorNumElements();
11372 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
11374 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
11375 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11376 if (V1IsUndef && V2IsUndef)
11377 return DAG.getUNDEF(VT);
11379 // When we create a shuffle node we put the UNDEF node to second operand,
11380 // but in some cases the first operand may be transformed to UNDEF.
11381 // In this case we should just commute the node.
11383 return DAG.getCommutedVectorShuffle(*SVOp);
11385 // Check for non-undef masks pointing at an undef vector and make the masks
11386 // undef as well. This makes it easier to match the shuffle based solely on
11390 if (M >= NumElements) {
11391 SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
11392 for (int &M : NewMask)
11393 if (M >= NumElements)
11395 return DAG.getVectorShuffle(VT, dl, V1, V2, NewMask);
11398 // We actually see shuffles that are entirely re-arrangements of a set of
11399 // zero inputs. This mostly happens while decomposing complex shuffles into
11400 // simple ones. Directly lower these as a buildvector of zeros.
11401 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
11402 if (Zeroable.all())
11403 return getZeroVector(VT, Subtarget, DAG, dl);
11405 // Try to collapse shuffles into using a vector type with fewer elements but
11406 // wider element types. We cap this to not form integers or floating point
11407 // elements wider than 64 bits, but it might be interesting to form i128
11408 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
11409 SmallVector<int, 16> WidenedMask;
11410 if (VT.getScalarSizeInBits() < 64 &&
11411 canWidenShuffleElements(Mask, WidenedMask)) {
11412 MVT NewEltVT = VT.isFloatingPoint()
11413 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
11414 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
11415 MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
11416 // Make sure that the new vector type is legal. For example, v2f64 isn't
11418 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
11419 V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
11420 V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
11421 return DAG.getNode(ISD::BITCAST, dl, VT,
11422 DAG.getVectorShuffle(NewVT, dl, V1, V2, WidenedMask));
11426 int NumV1Elements = 0, NumUndefElements = 0, NumV2Elements = 0;
11427 for (int M : SVOp->getMask())
11429 ++NumUndefElements;
11430 else if (M < NumElements)
11435 // Commute the shuffle as needed such that more elements come from V1 than
11436 // V2. This allows us to match the shuffle pattern strictly on how many
11437 // elements come from V1 without handling the symmetric cases.
11438 if (NumV2Elements > NumV1Elements)
11439 return DAG.getCommutedVectorShuffle(*SVOp);
11441 // When the number of V1 and V2 elements are the same, try to minimize the
11442 // number of uses of V2 in the low half of the vector. When that is tied,
11443 // ensure that the sum of indices for V1 is equal to or lower than the sum
11444 // indices for V2. When those are equal, try to ensure that the number of odd
11445 // indices for V1 is lower than the number of odd indices for V2.
11446 if (NumV1Elements == NumV2Elements) {
11447 int LowV1Elements = 0, LowV2Elements = 0;
11448 for (int M : SVOp->getMask().slice(0, NumElements / 2))
11449 if (M >= NumElements)
11453 if (LowV2Elements > LowV1Elements) {
11454 return DAG.getCommutedVectorShuffle(*SVOp);
11455 } else if (LowV2Elements == LowV1Elements) {
11456 int SumV1Indices = 0, SumV2Indices = 0;
11457 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11458 if (SVOp->getMask()[i] >= NumElements)
11460 else if (SVOp->getMask()[i] >= 0)
11462 if (SumV2Indices < SumV1Indices) {
11463 return DAG.getCommutedVectorShuffle(*SVOp);
11464 } else if (SumV2Indices == SumV1Indices) {
11465 int NumV1OddIndices = 0, NumV2OddIndices = 0;
11466 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11467 if (SVOp->getMask()[i] >= NumElements)
11468 NumV2OddIndices += i % 2;
11469 else if (SVOp->getMask()[i] >= 0)
11470 NumV1OddIndices += i % 2;
11471 if (NumV2OddIndices < NumV1OddIndices)
11472 return DAG.getCommutedVectorShuffle(*SVOp);
11477 // For each vector width, delegate to a specialized lowering routine.
11478 if (VT.getSizeInBits() == 128)
11479 return lower128BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11481 if (VT.getSizeInBits() == 256)
11482 return lower256BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11484 // Force AVX-512 vectors to be scalarized for now.
11485 // FIXME: Implement AVX-512 support!
11486 if (VT.getSizeInBits() == 512)
11487 return lower512BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11489 llvm_unreachable("Unimplemented!");
11493 //===----------------------------------------------------------------------===//
11494 // Legacy vector shuffle lowering
11496 // This code is the legacy code handling vector shuffles until the above
11497 // replaces its functionality and performance.
11498 //===----------------------------------------------------------------------===//
11500 static bool isBlendMask(ArrayRef<int> MaskVals, MVT VT, bool hasSSE41,
11501 bool hasInt256, unsigned *MaskOut = nullptr) {
11502 MVT EltVT = VT.getVectorElementType();
11504 // There is no blend with immediate in AVX-512.
11505 if (VT.is512BitVector())
11508 if (!hasSSE41 || EltVT == MVT::i8)
11510 if (!hasInt256 && VT == MVT::v16i16)
11513 unsigned MaskValue = 0;
11514 unsigned NumElems = VT.getVectorNumElements();
11515 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
11516 unsigned NumLanes = (NumElems - 1) / 8 + 1;
11517 unsigned NumElemsInLane = NumElems / NumLanes;
11519 // Blend for v16i16 should be symmetric for both lanes.
11520 for (unsigned i = 0; i < NumElemsInLane; ++i) {
11522 int SndLaneEltIdx = (NumLanes == 2) ? MaskVals[i + NumElemsInLane] : -1;
11523 int EltIdx = MaskVals[i];
11525 if ((EltIdx < 0 || EltIdx == (int)i) &&
11526 (SndLaneEltIdx < 0 || SndLaneEltIdx == (int)(i + NumElemsInLane)))
11529 if (((unsigned)EltIdx == (i + NumElems)) &&
11530 (SndLaneEltIdx < 0 ||
11531 (unsigned)SndLaneEltIdx == i + NumElems + NumElemsInLane))
11532 MaskValue |= (1 << i);
11538 *MaskOut = MaskValue;
11542 // Try to lower a shuffle node into a simple blend instruction.
11543 // This function assumes isBlendMask returns true for this
11544 // SuffleVectorSDNode
11545 static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
11546 unsigned MaskValue,
11547 const X86Subtarget *Subtarget,
11548 SelectionDAG &DAG) {
11549 MVT VT = SVOp->getSimpleValueType(0);
11550 MVT EltVT = VT.getVectorElementType();
11551 assert(isBlendMask(SVOp->getMask(), VT, Subtarget->hasSSE41(),
11552 Subtarget->hasInt256() && "Trying to lower a "
11553 "VECTOR_SHUFFLE to a Blend but "
11554 "with the wrong mask"));
11555 SDValue V1 = SVOp->getOperand(0);
11556 SDValue V2 = SVOp->getOperand(1);
11558 unsigned NumElems = VT.getVectorNumElements();
11560 // Convert i32 vectors to floating point if it is not AVX2.
11561 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
11563 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
11564 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
11566 V1 = DAG.getNode(ISD::BITCAST, dl, VT, V1);
11567 V2 = DAG.getNode(ISD::BITCAST, dl, VT, V2);
11570 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, V1, V2,
11571 DAG.getConstant(MaskValue, MVT::i32));
11572 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
11575 /// In vector type \p VT, return true if the element at index \p InputIdx
11576 /// falls on a different 128-bit lane than \p OutputIdx.
11577 static bool ShuffleCrosses128bitLane(MVT VT, unsigned InputIdx,
11578 unsigned OutputIdx) {
11579 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
11580 return InputIdx * EltSize / 128 != OutputIdx * EltSize / 128;
11583 /// Generate a PSHUFB if possible. Selects elements from \p V1 according to
11584 /// \p MaskVals. MaskVals[OutputIdx] = InputIdx specifies that we want to
11585 /// shuffle the element at InputIdx in V1 to OutputIdx in the result. If \p
11586 /// MaskVals refers to elements outside of \p V1 or is undef (-1), insert a
11588 static SDValue getPSHUFB(ArrayRef<int> MaskVals, SDValue V1, SDLoc &dl,
11589 SelectionDAG &DAG) {
11590 MVT VT = V1.getSimpleValueType();
11591 assert(VT.is128BitVector() || VT.is256BitVector());
11593 MVT EltVT = VT.getVectorElementType();
11594 unsigned EltSizeInBytes = EltVT.getSizeInBits() / 8;
11595 unsigned NumElts = VT.getVectorNumElements();
11597 SmallVector<SDValue, 32> PshufbMask;
11598 for (unsigned OutputIdx = 0; OutputIdx < NumElts; ++OutputIdx) {
11599 int InputIdx = MaskVals[OutputIdx];
11600 unsigned InputByteIdx;
11602 if (InputIdx < 0 || NumElts <= (unsigned)InputIdx)
11603 InputByteIdx = 0x80;
11605 // Cross lane is not allowed.
11606 if (ShuffleCrosses128bitLane(VT, InputIdx, OutputIdx))
11608 InputByteIdx = InputIdx * EltSizeInBytes;
11609 // Index is an byte offset within the 128-bit lane.
11610 InputByteIdx &= 0xf;
11613 for (unsigned j = 0; j < EltSizeInBytes; ++j) {
11614 PshufbMask.push_back(DAG.getConstant(InputByteIdx, MVT::i8));
11615 if (InputByteIdx != 0x80)
11620 MVT ShufVT = MVT::getVectorVT(MVT::i8, PshufbMask.size());
11622 V1 = DAG.getNode(ISD::BITCAST, dl, ShufVT, V1);
11623 return DAG.getNode(X86ISD::PSHUFB, dl, ShufVT, V1,
11624 DAG.getNode(ISD::BUILD_VECTOR, dl, ShufVT, PshufbMask));
11627 // v8i16 shuffles - Prefer shuffles in the following order:
11628 // 1. [all] pshuflw, pshufhw, optional move
11629 // 2. [ssse3] 1 x pshufb
11630 // 3. [ssse3] 2 x pshufb + 1 x por
11631 // 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw)
11633 LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
11634 SelectionDAG &DAG) {
11635 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11636 SDValue V1 = SVOp->getOperand(0);
11637 SDValue V2 = SVOp->getOperand(1);
11639 SmallVector<int, 8> MaskVals;
11641 // Determine if more than 1 of the words in each of the low and high quadwords
11642 // of the result come from the same quadword of one of the two inputs. Undef
11643 // mask values count as coming from any quadword, for better codegen.
11645 // Lo/HiQuad[i] = j indicates how many words from the ith quad of the input
11646 // feeds this quad. For i, 0 and 1 refer to V1, 2 and 3 refer to V2.
11647 unsigned LoQuad[] = { 0, 0, 0, 0 };
11648 unsigned HiQuad[] = { 0, 0, 0, 0 };
11649 // Indices of quads used.
11650 std::bitset<4> InputQuads;
11651 for (unsigned i = 0; i < 8; ++i) {
11652 unsigned *Quad = i < 4 ? LoQuad : HiQuad;
11653 int EltIdx = SVOp->getMaskElt(i);
11654 MaskVals.push_back(EltIdx);
11662 ++Quad[EltIdx / 4];
11663 InputQuads.set(EltIdx / 4);
11666 int BestLoQuad = -1;
11667 unsigned MaxQuad = 1;
11668 for (unsigned i = 0; i < 4; ++i) {
11669 if (LoQuad[i] > MaxQuad) {
11671 MaxQuad = LoQuad[i];
11675 int BestHiQuad = -1;
11677 for (unsigned i = 0; i < 4; ++i) {
11678 if (HiQuad[i] > MaxQuad) {
11680 MaxQuad = HiQuad[i];
11684 // For SSSE3, If all 8 words of the result come from only 1 quadword of each
11685 // of the two input vectors, shuffle them into one input vector so only a
11686 // single pshufb instruction is necessary. If there are more than 2 input
11687 // quads, disable the next transformation since it does not help SSSE3.
11688 bool V1Used = InputQuads[0] || InputQuads[1];
11689 bool V2Used = InputQuads[2] || InputQuads[3];
11690 if (Subtarget->hasSSSE3()) {
11691 if (InputQuads.count() == 2 && V1Used && V2Used) {
11692 BestLoQuad = InputQuads[0] ? 0 : 1;
11693 BestHiQuad = InputQuads[2] ? 2 : 3;
11695 if (InputQuads.count() > 2) {
11701 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update
11702 // the shuffle mask. If a quad is scored as -1, that means that it contains
11703 // words from all 4 input quadwords.
11705 if (BestLoQuad >= 0 || BestHiQuad >= 0) {
11707 BestLoQuad < 0 ? 0 : BestLoQuad,
11708 BestHiQuad < 0 ? 1 : BestHiQuad
11710 NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
11711 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1),
11712 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]);
11713 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV);
11715 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
11716 // source words for the shuffle, to aid later transformations.
11717 bool AllWordsInNewV = true;
11718 bool InOrder[2] = { true, true };
11719 for (unsigned i = 0; i != 8; ++i) {
11720 int idx = MaskVals[i];
11722 InOrder[i/4] = false;
11723 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad)
11725 AllWordsInNewV = false;
11729 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV;
11730 if (AllWordsInNewV) {
11731 for (int i = 0; i != 8; ++i) {
11732 int idx = MaskVals[i];
11735 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4;
11736 if ((idx != i) && idx < 4)
11738 if ((idx != i) && idx > 3)
11747 // If we've eliminated the use of V2, and the new mask is a pshuflw or
11748 // pshufhw, that's as cheap as it gets. Return the new shuffle.
11749 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) {
11750 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW;
11751 unsigned TargetMask = 0;
11752 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV,
11753 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]);
11754 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11755 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp):
11756 getShufflePSHUFLWImmediate(SVOp);
11757 V1 = NewV.getOperand(0);
11758 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG);
11762 // Promote splats to a larger type which usually leads to more efficient code.
11763 // FIXME: Is this true if pshufb is available?
11764 if (SVOp->isSplat())
11765 return PromoteSplat(SVOp, DAG);
11767 // If we have SSSE3, and all words of the result are from 1 input vector,
11768 // case 2 is generated, otherwise case 3 is generated. If no SSSE3
11769 // is present, fall back to case 4.
11770 if (Subtarget->hasSSSE3()) {
11771 SmallVector<SDValue,16> pshufbMask;
11773 // If we have elements from both input vectors, set the high bit of the
11774 // shuffle mask element to zero out elements that come from V2 in the V1
11775 // mask, and elements that come from V1 in the V2 mask, so that the two
11776 // results can be OR'd together.
11777 bool TwoInputs = V1Used && V2Used;
11778 V1 = getPSHUFB(MaskVals, V1, dl, DAG);
11780 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11782 // Calculate the shuffle mask for the second input, shuffle it, and
11783 // OR it with the first shuffled input.
11784 CommuteVectorShuffleMask(MaskVals, 8);
11785 V2 = getPSHUFB(MaskVals, V2, dl, DAG);
11786 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11787 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11790 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
11791 // and update MaskVals with new element order.
11792 std::bitset<8> InOrder;
11793 if (BestLoQuad >= 0) {
11794 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 };
11795 for (int i = 0; i != 4; ++i) {
11796 int idx = MaskVals[i];
11799 } else if ((idx / 4) == BestLoQuad) {
11800 MaskV[i] = idx & 3;
11804 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11807 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11808 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11809 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16,
11810 NewV.getOperand(0),
11811 getShufflePSHUFLWImmediate(SVOp), DAG);
11815 // If BestHi >= 0, generate a pshufhw to put the high elements in order,
11816 // and update MaskVals with the new element order.
11817 if (BestHiQuad >= 0) {
11818 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 };
11819 for (unsigned i = 4; i != 8; ++i) {
11820 int idx = MaskVals[i];
11823 } else if ((idx / 4) == BestHiQuad) {
11824 MaskV[i] = (idx & 3) + 4;
11828 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11831 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11832 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11833 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16,
11834 NewV.getOperand(0),
11835 getShufflePSHUFHWImmediate(SVOp), DAG);
11839 // In case BestHi & BestLo were both -1, which means each quadword has a word
11840 // from each of the four input quadwords, calculate the InOrder bitvector now
11841 // before falling through to the insert/extract cleanup.
11842 if (BestLoQuad == -1 && BestHiQuad == -1) {
11844 for (int i = 0; i != 8; ++i)
11845 if (MaskVals[i] < 0 || MaskVals[i] == i)
11849 // The other elements are put in the right place using pextrw and pinsrw.
11850 for (unsigned i = 0; i != 8; ++i) {
11853 int EltIdx = MaskVals[i];
11856 SDValue ExtOp = (EltIdx < 8) ?
11857 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1,
11858 DAG.getIntPtrConstant(EltIdx)) :
11859 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2,
11860 DAG.getIntPtrConstant(EltIdx - 8));
11861 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp,
11862 DAG.getIntPtrConstant(i));
11867 /// \brief v16i16 shuffles
11869 /// FIXME: We only support generation of a single pshufb currently. We can
11870 /// generalize the other applicable cases from LowerVECTOR_SHUFFLEv8i16 as
11871 /// well (e.g 2 x pshufb + 1 x por).
11873 LowerVECTOR_SHUFFLEv16i16(SDValue Op, SelectionDAG &DAG) {
11874 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11875 SDValue V1 = SVOp->getOperand(0);
11876 SDValue V2 = SVOp->getOperand(1);
11879 if (V2.getOpcode() != ISD::UNDEF)
11882 SmallVector<int, 16> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11883 return getPSHUFB(MaskVals, V1, dl, DAG);
11886 // v16i8 shuffles - Prefer shuffles in the following order:
11887 // 1. [ssse3] 1 x pshufb
11888 // 2. [ssse3] 2 x pshufb + 1 x por
11889 // 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw
11890 static SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
11891 const X86Subtarget* Subtarget,
11892 SelectionDAG &DAG) {
11893 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11894 SDValue V1 = SVOp->getOperand(0);
11895 SDValue V2 = SVOp->getOperand(1);
11897 ArrayRef<int> MaskVals = SVOp->getMask();
11899 // Promote splats to a larger type which usually leads to more efficient code.
11900 // FIXME: Is this true if pshufb is available?
11901 if (SVOp->isSplat())
11902 return PromoteSplat(SVOp, DAG);
11904 // If we have SSSE3, case 1 is generated when all result bytes come from
11905 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is
11906 // present, fall back to case 3.
11908 // If SSSE3, use 1 pshufb instruction per vector with elements in the result.
11909 if (Subtarget->hasSSSE3()) {
11910 SmallVector<SDValue,16> pshufbMask;
11912 // If all result elements are from one input vector, then only translate
11913 // undef mask values to 0x80 (zero out result) in the pshufb mask.
11915 // Otherwise, we have elements from both input vectors, and must zero out
11916 // elements that come from V2 in the first mask, and V1 in the second mask
11917 // so that we can OR them together.
11918 for (unsigned i = 0; i != 16; ++i) {
11919 int EltIdx = MaskVals[i];
11920 if (EltIdx < 0 || EltIdx >= 16)
11922 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11924 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
11925 DAG.getNode(ISD::BUILD_VECTOR, dl,
11926 MVT::v16i8, pshufbMask));
11928 // As PSHUFB will zero elements with negative indices, it's safe to ignore
11929 // the 2nd operand if it's undefined or zero.
11930 if (V2.getOpcode() == ISD::UNDEF ||
11931 ISD::isBuildVectorAllZeros(V2.getNode()))
11934 // Calculate the shuffle mask for the second input, shuffle it, and
11935 // OR it with the first shuffled input.
11936 pshufbMask.clear();
11937 for (unsigned i = 0; i != 16; ++i) {
11938 int EltIdx = MaskVals[i];
11939 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16;
11940 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11942 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
11943 DAG.getNode(ISD::BUILD_VECTOR, dl,
11944 MVT::v16i8, pshufbMask));
11945 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11948 // No SSSE3 - Calculate in place words and then fix all out of place words
11949 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from
11950 // the 16 different words that comprise the two doublequadword input vectors.
11951 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11952 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
11954 for (int i = 0; i != 8; ++i) {
11955 int Elt0 = MaskVals[i*2];
11956 int Elt1 = MaskVals[i*2+1];
11958 // This word of the result is all undef, skip it.
11959 if (Elt0 < 0 && Elt1 < 0)
11962 // This word of the result is already in the correct place, skip it.
11963 if ((Elt0 == i*2) && (Elt1 == i*2+1))
11966 SDValue Elt0Src = Elt0 < 16 ? V1 : V2;
11967 SDValue Elt1Src = Elt1 < 16 ? V1 : V2;
11970 // If Elt0 and Elt1 are defined, are consecutive, and can be load
11971 // using a single extract together, load it and store it.
11972 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) {
11973 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11974 DAG.getIntPtrConstant(Elt1 / 2));
11975 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11976 DAG.getIntPtrConstant(i));
11980 // If Elt1 is defined, extract it from the appropriate source. If the
11981 // source byte is not also odd, shift the extracted word left 8 bits
11982 // otherwise clear the bottom 8 bits if we need to do an or.
11984 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11985 DAG.getIntPtrConstant(Elt1 / 2));
11986 if ((Elt1 & 1) == 0)
11987 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
11989 TLI.getShiftAmountTy(InsElt.getValueType())));
11990 else if (Elt0 >= 0)
11991 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
11992 DAG.getConstant(0xFF00, MVT::i16));
11994 // If Elt0 is defined, extract it from the appropriate source. If the
11995 // source byte is not also even, shift the extracted word right 8 bits. If
11996 // Elt1 was also defined, OR the extracted values together before
11997 // inserting them in the result.
11999 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
12000 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2));
12001 if ((Elt0 & 1) != 0)
12002 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
12004 TLI.getShiftAmountTy(InsElt0.getValueType())));
12005 else if (Elt1 >= 0)
12006 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
12007 DAG.getConstant(0x00FF, MVT::i16));
12008 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0)
12011 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
12012 DAG.getIntPtrConstant(i));
12014 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV);
12017 // v32i8 shuffles - Translate to VPSHUFB if possible.
12019 SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp,
12020 const X86Subtarget *Subtarget,
12021 SelectionDAG &DAG) {
12022 MVT VT = SVOp->getSimpleValueType(0);
12023 SDValue V1 = SVOp->getOperand(0);
12024 SDValue V2 = SVOp->getOperand(1);
12026 SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
12028 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
12029 bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode());
12030 bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode());
12032 // VPSHUFB may be generated if
12033 // (1) one of input vector is undefined or zeroinitializer.
12034 // The mask value 0x80 puts 0 in the corresponding slot of the vector.
12035 // And (2) the mask indexes don't cross the 128-bit lane.
12036 if (VT != MVT::v32i8 || !Subtarget->hasInt256() ||
12037 (!V2IsUndef && !V2IsAllZero && !V1IsAllZero))
12040 if (V1IsAllZero && !V2IsAllZero) {
12041 CommuteVectorShuffleMask(MaskVals, 32);
12044 return getPSHUFB(MaskVals, V1, dl, DAG);
12047 /// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
12048 /// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be
12049 /// done when every pair / quad of shuffle mask elements point to elements in
12050 /// the right sequence. e.g.
12051 /// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15>
12053 SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
12054 SelectionDAG &DAG) {
12055 MVT VT = SVOp->getSimpleValueType(0);
12057 unsigned NumElems = VT.getVectorNumElements();
12060 switch (VT.SimpleTy) {
12061 default: llvm_unreachable("Unexpected!");
12064 return SDValue(SVOp, 0);
12065 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break;
12066 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break;
12067 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break;
12068 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break;
12069 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break;
12070 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break;
12073 SmallVector<int, 8> MaskVec;
12074 for (unsigned i = 0; i != NumElems; i += Scale) {
12076 for (unsigned j = 0; j != Scale; ++j) {
12077 int EltIdx = SVOp->getMaskElt(i+j);
12081 StartIdx = (EltIdx / Scale);
12082 if (EltIdx != (int)(StartIdx*Scale + j))
12085 MaskVec.push_back(StartIdx);
12088 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0));
12089 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1));
12090 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
12093 /// getVZextMovL - Return a zero-extending vector move low node.
12095 static SDValue getVZextMovL(MVT VT, MVT OpVT,
12096 SDValue SrcOp, SelectionDAG &DAG,
12097 const X86Subtarget *Subtarget, SDLoc dl) {
12098 if (VT == MVT::v2f64 || VT == MVT::v4f32) {
12099 LoadSDNode *LD = nullptr;
12100 if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
12101 LD = dyn_cast<LoadSDNode>(SrcOp);
12103 // movssrr and movsdrr do not clear top bits. Try to use movd, movq
12105 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
12106 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) &&
12107 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
12108 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST &&
12109 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {
12111 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
12112 return DAG.getNode(ISD::BITCAST, dl, VT,
12113 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
12114 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
12116 SrcOp.getOperand(0)
12122 return DAG.getNode(ISD::BITCAST, dl, VT,
12123 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
12124 DAG.getNode(ISD::BITCAST, dl,
12128 /// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles
12129 /// which could not be matched by any known target speficic shuffle
12131 LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
12133 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG);
12134 if (NewOp.getNode())
12137 MVT VT = SVOp->getSimpleValueType(0);
12139 unsigned NumElems = VT.getVectorNumElements();
12140 unsigned NumLaneElems = NumElems / 2;
12143 MVT EltVT = VT.getVectorElementType();
12144 MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems);
12147 SmallVector<int, 16> Mask;
12148 for (unsigned l = 0; l < 2; ++l) {
12149 // Build a shuffle mask for the output, discovering on the fly which
12150 // input vectors to use as shuffle operands (recorded in InputUsed).
12151 // If building a suitable shuffle vector proves too hard, then bail
12152 // out with UseBuildVector set.
12153 bool UseBuildVector = false;
12154 int InputUsed[2] = { -1, -1 }; // Not yet discovered.
12155 unsigned LaneStart = l * NumLaneElems;
12156 for (unsigned i = 0; i != NumLaneElems; ++i) {
12157 // The mask element. This indexes into the input.
12158 int Idx = SVOp->getMaskElt(i+LaneStart);
12160 // the mask element does not index into any input vector.
12161 Mask.push_back(-1);
12165 // The input vector this mask element indexes into.
12166 int Input = Idx / NumLaneElems;
12168 // Turn the index into an offset from the start of the input vector.
12169 Idx -= Input * NumLaneElems;
12171 // Find or create a shuffle vector operand to hold this input.
12173 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) {
12174 if (InputUsed[OpNo] == Input)
12175 // This input vector is already an operand.
12177 if (InputUsed[OpNo] < 0) {
12178 // Create a new operand for this input vector.
12179 InputUsed[OpNo] = Input;
12184 if (OpNo >= array_lengthof(InputUsed)) {
12185 // More than two input vectors used! Give up on trying to create a
12186 // shuffle vector. Insert all elements into a BUILD_VECTOR instead.
12187 UseBuildVector = true;
12191 // Add the mask index for the new shuffle vector.
12192 Mask.push_back(Idx + OpNo * NumLaneElems);
12195 if (UseBuildVector) {
12196 SmallVector<SDValue, 16> SVOps;
12197 for (unsigned i = 0; i != NumLaneElems; ++i) {
12198 // The mask element. This indexes into the input.
12199 int Idx = SVOp->getMaskElt(i+LaneStart);
12201 SVOps.push_back(DAG.getUNDEF(EltVT));
12205 // The input vector this mask element indexes into.
12206 int Input = Idx / NumElems;
12208 // Turn the index into an offset from the start of the input vector.
12209 Idx -= Input * NumElems;
12211 // Extract the vector element by hand.
12212 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
12213 SVOp->getOperand(Input),
12214 DAG.getIntPtrConstant(Idx)));
12217 // Construct the output using a BUILD_VECTOR.
12218 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, SVOps);
12219 } else if (InputUsed[0] < 0) {
12220 // No input vectors were used! The result is undefined.
12221 Output[l] = DAG.getUNDEF(NVT);
12223 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2),
12224 (InputUsed[0] % 2) * NumLaneElems,
12226 // If only one input was used, use an undefined vector for the other.
12227 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) :
12228 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2),
12229 (InputUsed[1] % 2) * NumLaneElems, DAG, dl);
12230 // At least one input vector was used. Create a new shuffle vector.
12231 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]);
12237 // Concatenate the result back
12238 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]);
12241 /// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with
12242 /// 4 elements, and match them with several different shuffle types.
12244 LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
12245 SDValue V1 = SVOp->getOperand(0);
12246 SDValue V2 = SVOp->getOperand(1);
12248 MVT VT = SVOp->getSimpleValueType(0);
12250 assert(VT.is128BitVector() && "Unsupported vector size");
12252 std::pair<int, int> Locs[4];
12253 int Mask1[] = { -1, -1, -1, -1 };
12254 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end());
12256 unsigned NumHi = 0;
12257 unsigned NumLo = 0;
12258 for (unsigned i = 0; i != 4; ++i) {
12259 int Idx = PermMask[i];
12261 Locs[i] = std::make_pair(-1, -1);
12263 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!");
12265 Locs[i] = std::make_pair(0, NumLo);
12266 Mask1[NumLo] = Idx;
12269 Locs[i] = std::make_pair(1, NumHi);
12271 Mask1[2+NumHi] = Idx;
12277 if (NumLo <= 2 && NumHi <= 2) {
12278 // If no more than two elements come from either vector. This can be
12279 // implemented with two shuffles. First shuffle gather the elements.
12280 // The second shuffle, which takes the first shuffle as both of its
12281 // vector operands, put the elements into the right order.
12282 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12284 int Mask2[] = { -1, -1, -1, -1 };
12286 for (unsigned i = 0; i != 4; ++i)
12287 if (Locs[i].first != -1) {
12288 unsigned Idx = (i < 2) ? 0 : 4;
12289 Idx += Locs[i].first * 2 + Locs[i].second;
12293 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]);
12296 if (NumLo == 3 || NumHi == 3) {
12297 // Otherwise, we must have three elements from one vector, call it X, and
12298 // one element from the other, call it Y. First, use a shufps to build an
12299 // intermediate vector with the one element from Y and the element from X
12300 // that will be in the same half in the final destination (the indexes don't
12301 // matter). Then, use a shufps to build the final vector, taking the half
12302 // containing the element from Y from the intermediate, and the other half
12305 // Normalize it so the 3 elements come from V1.
12306 CommuteVectorShuffleMask(PermMask, 4);
12310 // Find the element from V2.
12312 for (HiIndex = 0; HiIndex < 3; ++HiIndex) {
12313 int Val = PermMask[HiIndex];
12320 Mask1[0] = PermMask[HiIndex];
12322 Mask1[2] = PermMask[HiIndex^1];
12324 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12326 if (HiIndex >= 2) {
12327 Mask1[0] = PermMask[0];
12328 Mask1[1] = PermMask[1];
12329 Mask1[2] = HiIndex & 1 ? 6 : 4;
12330 Mask1[3] = HiIndex & 1 ? 4 : 6;
12331 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12334 Mask1[0] = HiIndex & 1 ? 2 : 0;
12335 Mask1[1] = HiIndex & 1 ? 0 : 2;
12336 Mask1[2] = PermMask[2];
12337 Mask1[3] = PermMask[3];
12342 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]);
12345 // Break it into (shuffle shuffle_hi, shuffle_lo).
12346 int LoMask[] = { -1, -1, -1, -1 };
12347 int HiMask[] = { -1, -1, -1, -1 };
12349 int *MaskPtr = LoMask;
12350 unsigned MaskIdx = 0;
12351 unsigned LoIdx = 0;
12352 unsigned HiIdx = 2;
12353 for (unsigned i = 0; i != 4; ++i) {
12360 int Idx = PermMask[i];
12362 Locs[i] = std::make_pair(-1, -1);
12363 } else if (Idx < 4) {
12364 Locs[i] = std::make_pair(MaskIdx, LoIdx);
12365 MaskPtr[LoIdx] = Idx;
12368 Locs[i] = std::make_pair(MaskIdx, HiIdx);
12369 MaskPtr[HiIdx] = Idx;
12374 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]);
12375 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]);
12376 int MaskOps[] = { -1, -1, -1, -1 };
12377 for (unsigned i = 0; i != 4; ++i)
12378 if (Locs[i].first != -1)
12379 MaskOps[i] = Locs[i].first * 4 + Locs[i].second;
12380 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]);
12383 static bool MayFoldVectorLoad(SDValue V) {
12384 while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
12385 V = V.getOperand(0);
12387 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
12388 V = V.getOperand(0);
12389 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR &&
12390 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF)
12391 // BUILD_VECTOR (load), undef
12392 V = V.getOperand(0);
12394 return MayFoldLoad(V);
12398 SDValue getMOVDDup(SDValue &Op, SDLoc &dl, SDValue V1, SelectionDAG &DAG) {
12399 MVT VT = Op.getSimpleValueType();
12401 // Canonicalize to v2f64.
12402 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
12403 return DAG.getNode(ISD::BITCAST, dl, VT,
12404 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64,
12409 SDValue getMOVLowToHigh(SDValue &Op, SDLoc &dl, SelectionDAG &DAG,
12411 SDValue V1 = Op.getOperand(0);
12412 SDValue V2 = Op.getOperand(1);
12413 MVT VT = Op.getSimpleValueType();
12415 assert(VT != MVT::v2i64 && "unsupported shuffle type");
12417 if (HasSSE2 && VT == MVT::v2f64)
12418 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG);
12420 // v4f32 or v4i32: canonicalize to v4f32 (which is legal for SSE1)
12421 return DAG.getNode(ISD::BITCAST, dl, VT,
12422 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32,
12423 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1),
12424 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG));
12428 SDValue getMOVHighToLow(SDValue &Op, SDLoc &dl, SelectionDAG &DAG) {
12429 SDValue V1 = Op.getOperand(0);
12430 SDValue V2 = Op.getOperand(1);
12431 MVT VT = Op.getSimpleValueType();
12433 assert((VT == MVT::v4i32 || VT == MVT::v4f32) &&
12434 "unsupported shuffle type");
12436 if (V2.getOpcode() == ISD::UNDEF)
12440 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG);
12444 SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
12445 SDValue V1 = Op.getOperand(0);
12446 SDValue V2 = Op.getOperand(1);
12447 MVT VT = Op.getSimpleValueType();
12448 unsigned NumElems = VT.getVectorNumElements();
12450 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second
12451 // operand of these instructions is only memory, so check if there's a
12452 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the
12454 bool CanFoldLoad = false;
12456 // Trivial case, when V2 comes from a load.
12457 if (MayFoldVectorLoad(V2))
12458 CanFoldLoad = true;
12460 // When V1 is a load, it can be folded later into a store in isel, example:
12461 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1)
12463 // (MOVLPSmr addr:$src1, VR128:$src2)
12464 // So, recognize this potential and also use MOVLPS or MOVLPD
12465 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op))
12466 CanFoldLoad = true;
12468 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12470 if (HasSSE2 && NumElems == 2)
12471 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
12474 // If we don't care about the second element, proceed to use movss.
12475 if (SVOp->getMaskElt(1) != -1)
12476 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG);
12479 // movl and movlp will both match v2i64, but v2i64 is never matched by
12480 // movl earlier because we make it strict to avoid messing with the movlp load
12481 // folding logic (see the code above getMOVLP call). Match it here then,
12482 // this is horrible, but will stay like this until we move all shuffle
12483 // matching to x86 specific nodes. Note that for the 1st condition all
12484 // types are matched with movsd.
12486 // FIXME: isMOVLMask should be checked and matched before getMOVLP,
12487 // as to remove this logic from here, as much as possible
12488 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT))
12489 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12490 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12493 assert(VT != MVT::v4i32 && "unsupported shuffle type");
12495 // Invert the operand order and use SHUFPS to match it.
12496 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1,
12497 getShuffleSHUFImmediate(SVOp), DAG);
12500 static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index,
12501 SelectionDAG &DAG) {
12503 MVT VT = Load->getSimpleValueType(0);
12504 MVT EVT = VT.getVectorElementType();
12505 SDValue Addr = Load->getOperand(1);
12506 SDValue NewAddr = DAG.getNode(
12507 ISD::ADD, dl, Addr.getSimpleValueType(), Addr,
12508 DAG.getConstant(Index * EVT.getStoreSize(), Addr.getSimpleValueType()));
12511 DAG.getLoad(EVT, dl, Load->getChain(), NewAddr,
12512 DAG.getMachineFunction().getMachineMemOperand(
12513 Load->getMemOperand(), 0, EVT.getStoreSize()));
12517 // It is only safe to call this function if isINSERTPSMask is true for
12518 // this shufflevector mask.
12519 static SDValue getINSERTPS(ShuffleVectorSDNode *SVOp, SDLoc &dl,
12520 SelectionDAG &DAG) {
12521 // Generate an insertps instruction when inserting an f32 from memory onto a
12522 // v4f32 or when copying a member from one v4f32 to another.
12523 // We also use it for transferring i32 from one register to another,
12524 // since it simply copies the same bits.
12525 // If we're transferring an i32 from memory to a specific element in a
12526 // register, we output a generic DAG that will match the PINSRD
12528 MVT VT = SVOp->getSimpleValueType(0);
12529 MVT EVT = VT.getVectorElementType();
12530 SDValue V1 = SVOp->getOperand(0);
12531 SDValue V2 = SVOp->getOperand(1);
12532 auto Mask = SVOp->getMask();
12533 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
12534 "unsupported vector type for insertps/pinsrd");
12536 auto FromV1Predicate = [](const int &i) { return i < 4 && i > -1; };
12537 auto FromV2Predicate = [](const int &i) { return i >= 4; };
12538 int FromV1 = std::count_if(Mask.begin(), Mask.end(), FromV1Predicate);
12542 unsigned DestIndex;
12546 DestIndex = std::find_if(Mask.begin(), Mask.end(), FromV1Predicate) -
12549 // If we have 1 element from each vector, we have to check if we're
12550 // changing V1's element's place. If so, we're done. Otherwise, we
12551 // should assume we're changing V2's element's place and behave
12553 int FromV2 = std::count_if(Mask.begin(), Mask.end(), FromV2Predicate);
12554 assert(DestIndex <= INT32_MAX && "truncated destination index");
12555 if (FromV1 == FromV2 &&
12556 static_cast<int>(DestIndex) == Mask[DestIndex] % 4) {
12560 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12563 assert(std::count_if(Mask.begin(), Mask.end(), FromV2Predicate) == 1 &&
12564 "More than one element from V1 and from V2, or no elements from one "
12565 "of the vectors. This case should not have returned true from "
12570 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12573 // Get an index into the source vector in the range [0,4) (the mask is
12574 // in the range [0,8) because it can address V1 and V2)
12575 unsigned SrcIndex = Mask[DestIndex] % 4;
12576 if (MayFoldLoad(From)) {
12577 // Trivial case, when From comes from a load and is only used by the
12578 // shuffle. Make it use insertps from the vector that we need from that
12581 NarrowVectorLoadToElement(cast<LoadSDNode>(From), SrcIndex, DAG);
12582 if (!NewLoad.getNode())
12585 if (EVT == MVT::f32) {
12586 // Create this as a scalar to vector to match the instruction pattern.
12587 SDValue LoadScalarToVector =
12588 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, NewLoad);
12589 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4);
12590 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, LoadScalarToVector,
12592 } else { // EVT == MVT::i32
12593 // If we're getting an i32 from memory, use an INSERT_VECTOR_ELT
12594 // instruction, to match the PINSRD instruction, which loads an i32 to a
12595 // certain vector element.
12596 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, To, NewLoad,
12597 DAG.getConstant(DestIndex, MVT::i32));
12601 // Vector-element-to-vector
12602 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4 | SrcIndex << 6);
12603 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, From, InsertpsMask);
12606 // Reduce a vector shuffle to zext.
12607 static SDValue LowerVectorIntExtend(SDValue Op, const X86Subtarget *Subtarget,
12608 SelectionDAG &DAG) {
12609 // PMOVZX is only available from SSE41.
12610 if (!Subtarget->hasSSE41())
12613 MVT VT = Op.getSimpleValueType();
12615 // Only AVX2 support 256-bit vector integer extending.
12616 if (!Subtarget->hasInt256() && VT.is256BitVector())
12619 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12621 SDValue V1 = Op.getOperand(0);
12622 SDValue V2 = Op.getOperand(1);
12623 unsigned NumElems = VT.getVectorNumElements();
12625 // Extending is an unary operation and the element type of the source vector
12626 // won't be equal to or larger than i64.
12627 if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() ||
12628 VT.getVectorElementType() == MVT::i64)
12631 // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4.
12632 unsigned Shift = 1; // Start from 2, i.e. 1 << 1.
12633 while ((1U << Shift) < NumElems) {
12634 if (SVOp->getMaskElt(1U << Shift) == 1)
12637 // The maximal ratio is 8, i.e. from i8 to i64.
12642 // Check the shuffle mask.
12643 unsigned Mask = (1U << Shift) - 1;
12644 for (unsigned i = 0; i != NumElems; ++i) {
12645 int EltIdx = SVOp->getMaskElt(i);
12646 if ((i & Mask) != 0 && EltIdx != -1)
12648 if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift))
12652 unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift;
12653 MVT NeVT = MVT::getIntegerVT(NBits);
12654 MVT NVT = MVT::getVectorVT(NeVT, NumElems >> Shift);
12656 if (!DAG.getTargetLoweringInfo().isTypeLegal(NVT))
12659 return DAG.getNode(ISD::BITCAST, DL, VT,
12660 DAG.getNode(X86ISD::VZEXT, DL, NVT, V1));
12663 static SDValue NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
12664 SelectionDAG &DAG) {
12665 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12666 MVT VT = Op.getSimpleValueType();
12668 SDValue V1 = Op.getOperand(0);
12669 SDValue V2 = Op.getOperand(1);
12671 if (isZeroShuffle(SVOp))
12672 return getZeroVector(VT, Subtarget, DAG, dl);
12674 // Handle splat operations
12675 if (SVOp->isSplat()) {
12676 // Use vbroadcast whenever the splat comes from a foldable load
12677 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
12678 if (Broadcast.getNode())
12682 // Check integer expanding shuffles.
12683 SDValue NewOp = LowerVectorIntExtend(Op, Subtarget, DAG);
12684 if (NewOp.getNode())
12687 // If the shuffle can be profitably rewritten as a narrower shuffle, then
12689 if (VT == MVT::v8i16 || VT == MVT::v16i8 || VT == MVT::v16i16 ||
12690 VT == MVT::v32i8) {
12691 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12692 if (NewOp.getNode())
12693 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
12694 } else if (VT.is128BitVector() && Subtarget->hasSSE2()) {
12695 // FIXME: Figure out a cleaner way to do this.
12696 if (ISD::isBuildVectorAllZeros(V2.getNode())) {
12697 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12698 if (NewOp.getNode()) {
12699 MVT NewVT = NewOp.getSimpleValueType();
12700 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(),
12701 NewVT, true, false))
12702 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), DAG, Subtarget,
12705 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
12706 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12707 if (NewOp.getNode()) {
12708 MVT NewVT = NewOp.getSimpleValueType();
12709 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT))
12710 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), DAG, Subtarget,
12719 X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
12720 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12721 SDValue V1 = Op.getOperand(0);
12722 SDValue V2 = Op.getOperand(1);
12723 MVT VT = Op.getSimpleValueType();
12725 unsigned NumElems = VT.getVectorNumElements();
12726 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
12727 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
12728 bool V1IsSplat = false;
12729 bool V2IsSplat = false;
12730 bool HasSSE2 = Subtarget->hasSSE2();
12731 bool HasFp256 = Subtarget->hasFp256();
12732 bool HasInt256 = Subtarget->hasInt256();
12733 MachineFunction &MF = DAG.getMachineFunction();
12735 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
12737 // Check if we should use the experimental vector shuffle lowering. If so,
12738 // delegate completely to that code path.
12739 if (ExperimentalVectorShuffleLowering)
12740 return lowerVectorShuffle(Op, Subtarget, DAG);
12742 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
12744 if (V1IsUndef && V2IsUndef)
12745 return DAG.getUNDEF(VT);
12747 // When we create a shuffle node we put the UNDEF node to second operand,
12748 // but in some cases the first operand may be transformed to UNDEF.
12749 // In this case we should just commute the node.
12751 return DAG.getCommutedVectorShuffle(*SVOp);
12753 // Vector shuffle lowering takes 3 steps:
12755 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable
12756 // narrowing and commutation of operands should be handled.
12757 // 2) Matching of shuffles with known shuffle masks to x86 target specific
12759 // 3) Rewriting of unmatched masks into new generic shuffle operations,
12760 // so the shuffle can be broken into other shuffles and the legalizer can
12761 // try the lowering again.
12763 // The general idea is that no vector_shuffle operation should be left to
12764 // be matched during isel, all of them must be converted to a target specific
12767 // Normalize the input vectors. Here splats, zeroed vectors, profitable
12768 // narrowing and commutation of operands should be handled. The actual code
12769 // doesn't include all of those, work in progress...
12770 SDValue NewOp = NormalizeVectorShuffle(Op, Subtarget, DAG);
12771 if (NewOp.getNode())
12774 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end());
12776 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and
12777 // unpckh_undef). Only use pshufd if speed is more important than size.
12778 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12779 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12780 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12781 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12783 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() &&
12784 V2IsUndef && MayFoldVectorLoad(V1))
12785 return getMOVDDup(Op, dl, V1, DAG);
12787 if (isMOVHLPS_v_undef_Mask(M, VT))
12788 return getMOVHighToLow(Op, dl, DAG);
12790 // Use to match splats
12791 if (HasSSE2 && isUNPCKHMask(M, VT, HasInt256) && V2IsUndef &&
12792 (VT == MVT::v2f64 || VT == MVT::v2i64))
12793 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12795 if (isPSHUFDMask(M, VT)) {
12796 // The actual implementation will match the mask in the if above and then
12797 // during isel it can match several different instructions, not only pshufd
12798 // as its name says, sad but true, emulate the behavior for now...
12799 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64)))
12800 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG);
12802 unsigned TargetMask = getShuffleSHUFImmediate(SVOp);
12804 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32))
12805 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG);
12807 if (HasFp256 && (VT == MVT::v4f32 || VT == MVT::v2f64))
12808 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1, TargetMask,
12811 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1,
12815 if (isPALIGNRMask(M, VT, Subtarget))
12816 return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2,
12817 getShufflePALIGNRImmediate(SVOp),
12820 if (isVALIGNMask(M, VT, Subtarget))
12821 return getTargetShuffleNode(X86ISD::VALIGN, dl, VT, V1, V2,
12822 getShuffleVALIGNImmediate(SVOp),
12825 // Check if this can be converted into a logical shift.
12826 bool isLeft = false;
12827 unsigned ShAmt = 0;
12829 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt);
12830 if (isShift && ShVal.hasOneUse()) {
12831 // If the shifted value has multiple uses, it may be cheaper to use
12832 // v_set0 + movlhps or movhlps, etc.
12833 MVT EltVT = VT.getVectorElementType();
12834 ShAmt *= EltVT.getSizeInBits();
12835 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12838 if (isMOVLMask(M, VT)) {
12839 if (ISD::isBuildVectorAllZeros(V1.getNode()))
12840 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
12841 if (!isMOVLPMask(M, VT)) {
12842 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
12843 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12845 if (VT == MVT::v4i32 || VT == MVT::v4f32)
12846 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12850 // FIXME: fold these into legal mask.
12851 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasInt256))
12852 return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
12854 if (isMOVHLPSMask(M, VT))
12855 return getMOVHighToLow(Op, dl, DAG);
12857 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget))
12858 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG);
12860 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget))
12861 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG);
12863 if (isMOVLPMask(M, VT))
12864 return getMOVLP(Op, dl, DAG, HasSSE2);
12866 if (ShouldXformToMOVHLPS(M, VT) ||
12867 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT))
12868 return DAG.getCommutedVectorShuffle(*SVOp);
12871 // No better options. Use a vshldq / vsrldq.
12872 MVT EltVT = VT.getVectorElementType();
12873 ShAmt *= EltVT.getSizeInBits();
12874 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12877 bool Commuted = false;
12878 // FIXME: This should also accept a bitcast of a splat? Be careful, not
12879 // 1,1,1,1 -> v8i16 though.
12880 BitVector UndefElements;
12881 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V1.getNode()))
12882 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12884 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V2.getNode()))
12885 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12888 // Canonicalize the splat or undef, if present, to be on the RHS.
12889 if (!V2IsUndef && V1IsSplat && !V2IsSplat) {
12890 CommuteVectorShuffleMask(M, NumElems);
12892 std::swap(V1IsSplat, V2IsSplat);
12896 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) {
12897 // Shuffling low element of v1 into undef, just return v1.
12900 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which
12901 // the instruction selector will not match, so get a canonical MOVL with
12902 // swapped operands to undo the commute.
12903 return getMOVL(DAG, dl, VT, V2, V1);
12906 if (isUNPCKLMask(M, VT, HasInt256))
12907 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12909 if (isUNPCKHMask(M, VT, HasInt256))
12910 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12913 // Normalize mask so all entries that point to V2 points to its first
12914 // element then try to match unpck{h|l} again. If match, return a
12915 // new vector_shuffle with the corrected mask.p
12916 SmallVector<int, 8> NewMask(M.begin(), M.end());
12917 NormalizeMask(NewMask, NumElems);
12918 if (isUNPCKLMask(NewMask, VT, HasInt256, true))
12919 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12920 if (isUNPCKHMask(NewMask, VT, HasInt256, true))
12921 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12925 // Commute is back and try unpck* again.
12926 // FIXME: this seems wrong.
12927 CommuteVectorShuffleMask(M, NumElems);
12929 std::swap(V1IsSplat, V2IsSplat);
12931 if (isUNPCKLMask(M, VT, HasInt256))
12932 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12934 if (isUNPCKHMask(M, VT, HasInt256))
12935 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12938 // Normalize the node to match x86 shuffle ops if needed
12939 if (!V2IsUndef && (isSHUFPMask(M, VT, /* Commuted */ true)))
12940 return DAG.getCommutedVectorShuffle(*SVOp);
12942 // The checks below are all present in isShuffleMaskLegal, but they are
12943 // inlined here right now to enable us to directly emit target specific
12944 // nodes, and remove one by one until they don't return Op anymore.
12946 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) &&
12947 SVOp->getSplatIndex() == 0 && V2IsUndef) {
12948 if (VT == MVT::v2f64 || VT == MVT::v2i64)
12949 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12952 if (isPSHUFHWMask(M, VT, HasInt256))
12953 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1,
12954 getShufflePSHUFHWImmediate(SVOp),
12957 if (isPSHUFLWMask(M, VT, HasInt256))
12958 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1,
12959 getShufflePSHUFLWImmediate(SVOp),
12962 unsigned MaskValue;
12963 if (isBlendMask(M, VT, Subtarget->hasSSE41(), HasInt256, &MaskValue))
12964 return LowerVECTOR_SHUFFLEtoBlend(SVOp, MaskValue, Subtarget, DAG);
12966 if (isSHUFPMask(M, VT))
12967 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2,
12968 getShuffleSHUFImmediate(SVOp), DAG);
12970 if (isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12971 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12972 if (isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12973 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12975 //===--------------------------------------------------------------------===//
12976 // Generate target specific nodes for 128 or 256-bit shuffles only
12977 // supported in the AVX instruction set.
12980 // Handle VMOVDDUPY permutations
12981 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasFp256))
12982 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG);
12984 // Handle VPERMILPS/D* permutations
12985 if (isVPERMILPMask(M, VT)) {
12986 if ((HasInt256 && VT == MVT::v8i32) || VT == MVT::v16i32)
12987 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1,
12988 getShuffleSHUFImmediate(SVOp), DAG);
12989 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1,
12990 getShuffleSHUFImmediate(SVOp), DAG);
12994 if (VT.is512BitVector() && isINSERT64x4Mask(M, VT, &Idx))
12995 return Insert256BitVector(V1, Extract256BitVector(V2, 0, DAG, dl),
12996 Idx*(NumElems/2), DAG, dl);
12998 // Handle VPERM2F128/VPERM2I128 permutations
12999 if (isVPERM2X128Mask(M, VT, HasFp256))
13000 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1,
13001 V2, getShuffleVPERM2X128Immediate(SVOp), DAG);
13003 if (Subtarget->hasSSE41() && isINSERTPSMask(M, VT))
13004 return getINSERTPS(SVOp, dl, DAG);
13007 if (V2IsUndef && HasInt256 && isPermImmMask(M, VT, Imm8))
13008 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, Imm8, DAG);
13010 if ((V2IsUndef && HasInt256 && VT.is256BitVector() && NumElems == 8) ||
13011 VT.is512BitVector()) {
13012 MVT MaskEltVT = MVT::getIntegerVT(VT.getVectorElementType().getSizeInBits());
13013 MVT MaskVectorVT = MVT::getVectorVT(MaskEltVT, NumElems);
13014 SmallVector<SDValue, 16> permclMask;
13015 for (unsigned i = 0; i != NumElems; ++i) {
13016 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MaskEltVT));
13019 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVectorVT, permclMask);
13021 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32
13022 return DAG.getNode(X86ISD::VPERMV, dl, VT,
13023 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1);
13024 return DAG.getNode(X86ISD::VPERMV3, dl, VT, V1,
13025 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V2);
13028 //===--------------------------------------------------------------------===//
13029 // Since no target specific shuffle was selected for this generic one,
13030 // lower it into other known shuffles. FIXME: this isn't true yet, but
13031 // this is the plan.
13034 // Handle v8i16 specifically since SSE can do byte extraction and insertion.
13035 if (VT == MVT::v8i16) {
13036 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG);
13037 if (NewOp.getNode())
13041 if (VT == MVT::v16i16 && HasInt256) {
13042 SDValue NewOp = LowerVECTOR_SHUFFLEv16i16(Op, DAG);
13043 if (NewOp.getNode())
13047 if (VT == MVT::v16i8) {
13048 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, Subtarget, DAG);
13049 if (NewOp.getNode())
13053 if (VT == MVT::v32i8) {
13054 SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG);
13055 if (NewOp.getNode())
13059 // Handle all 128-bit wide vectors with 4 elements, and match them with
13060 // several different shuffle types.
13061 if (NumElems == 4 && VT.is128BitVector())
13062 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG);
13064 // Handle general 256-bit shuffles
13065 if (VT.is256BitVector())
13066 return LowerVECTOR_SHUFFLE_256(SVOp, DAG);
13071 // This function assumes its argument is a BUILD_VECTOR of constants or
13072 // undef SDNodes. i.e: ISD::isBuildVectorOfConstantSDNodes(BuildVector) is
13074 static bool BUILD_VECTORtoBlendMask(BuildVectorSDNode *BuildVector,
13075 unsigned &MaskValue) {
13077 unsigned NumElems = BuildVector->getNumOperands();
13078 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
13079 unsigned NumLanes = (NumElems - 1) / 8 + 1;
13080 unsigned NumElemsInLane = NumElems / NumLanes;
13082 // Blend for v16i16 should be symetric for the both lanes.
13083 for (unsigned i = 0; i < NumElemsInLane; ++i) {
13084 SDValue EltCond = BuildVector->getOperand(i);
13085 SDValue SndLaneEltCond =
13086 (NumLanes == 2) ? BuildVector->getOperand(i + NumElemsInLane) : EltCond;
13088 int Lane1Cond = -1, Lane2Cond = -1;
13089 if (isa<ConstantSDNode>(EltCond))
13090 Lane1Cond = !isZero(EltCond);
13091 if (isa<ConstantSDNode>(SndLaneEltCond))
13092 Lane2Cond = !isZero(SndLaneEltCond);
13094 if (Lane1Cond == Lane2Cond || Lane2Cond < 0)
13095 // Lane1Cond != 0, means we want the first argument.
13096 // Lane1Cond == 0, means we want the second argument.
13097 // The encoding of this argument is 0 for the first argument, 1
13098 // for the second. Therefore, invert the condition.
13099 MaskValue |= !Lane1Cond << i;
13100 else if (Lane1Cond < 0)
13101 MaskValue |= !Lane2Cond << i;
13108 /// \brief Try to lower a VSELECT instruction to an immediate-controlled blend
13110 static SDValue lowerVSELECTtoBLENDI(SDValue Op, const X86Subtarget *Subtarget,
13111 SelectionDAG &DAG) {
13112 SDValue Cond = Op.getOperand(0);
13113 SDValue LHS = Op.getOperand(1);
13114 SDValue RHS = Op.getOperand(2);
13116 MVT VT = Op.getSimpleValueType();
13117 MVT EltVT = VT.getVectorElementType();
13118 unsigned NumElems = VT.getVectorNumElements();
13120 // There is no blend with immediate in AVX-512.
13121 if (VT.is512BitVector())
13124 if (!Subtarget->hasSSE41() || EltVT == MVT::i8)
13126 if (!Subtarget->hasInt256() && VT == MVT::v16i16)
13129 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
13132 // Check the mask for BLEND and build the value.
13133 unsigned MaskValue = 0;
13134 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
13137 // Convert i32 vectors to floating point if it is not AVX2.
13138 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
13140 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
13141 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
13143 LHS = DAG.getNode(ISD::BITCAST, dl, VT, LHS);
13144 RHS = DAG.getNode(ISD::BITCAST, dl, VT, RHS);
13147 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, LHS, RHS,
13148 DAG.getConstant(MaskValue, MVT::i32));
13149 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
13152 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
13153 // A vselect where all conditions and data are constants can be optimized into
13154 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
13155 if (ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(0).getNode()) &&
13156 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(1).getNode()) &&
13157 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(2).getNode()))
13160 SDValue BlendOp = lowerVSELECTtoBLENDI(Op, Subtarget, DAG);
13161 if (BlendOp.getNode())
13164 // Some types for vselect were previously set to Expand, not Legal or
13165 // Custom. Return an empty SDValue so we fall-through to Expand, after
13166 // the Custom lowering phase.
13167 MVT VT = Op.getSimpleValueType();
13168 switch (VT.SimpleTy) {
13173 if (Subtarget->hasBWI() && Subtarget->hasVLX())
13178 // We couldn't create a "Blend with immediate" node.
13179 // This node should still be legal, but we'll have to emit a blendv*
13184 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
13185 MVT VT = Op.getSimpleValueType();
13188 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
13191 if (VT.getSizeInBits() == 8) {
13192 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
13193 Op.getOperand(0), Op.getOperand(1));
13194 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13195 DAG.getValueType(VT));
13196 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13199 if (VT.getSizeInBits() == 16) {
13200 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13201 // If Idx is 0, it's cheaper to do a move instead of a pextrw.
13203 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13204 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13205 DAG.getNode(ISD::BITCAST, dl,
13208 Op.getOperand(1)));
13209 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
13210 Op.getOperand(0), Op.getOperand(1));
13211 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13212 DAG.getValueType(VT));
13213 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13216 if (VT == MVT::f32) {
13217 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
13218 // the result back to FR32 register. It's only worth matching if the
13219 // result has a single use which is a store or a bitcast to i32. And in
13220 // the case of a store, it's not worth it if the index is a constant 0,
13221 // because a MOVSSmr can be used instead, which is smaller and faster.
13222 if (!Op.hasOneUse())
13224 SDNode *User = *Op.getNode()->use_begin();
13225 if ((User->getOpcode() != ISD::STORE ||
13226 (isa<ConstantSDNode>(Op.getOperand(1)) &&
13227 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
13228 (User->getOpcode() != ISD::BITCAST ||
13229 User->getValueType(0) != MVT::i32))
13231 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13232 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
13235 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
13238 if (VT == MVT::i32 || VT == MVT::i64) {
13239 // ExtractPS/pextrq works with constant index.
13240 if (isa<ConstantSDNode>(Op.getOperand(1)))
13246 /// Extract one bit from mask vector, like v16i1 or v8i1.
13247 /// AVX-512 feature.
13249 X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const {
13250 SDValue Vec = Op.getOperand(0);
13252 MVT VecVT = Vec.getSimpleValueType();
13253 SDValue Idx = Op.getOperand(1);
13254 MVT EltVT = Op.getSimpleValueType();
13256 assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
13257 assert((VecVT.getVectorNumElements() <= 16 || Subtarget->hasBWI()) &&
13258 "Unexpected vector type in ExtractBitFromMaskVector");
13260 // variable index can't be handled in mask registers,
13261 // extend vector to VR512
13262 if (!isa<ConstantSDNode>(Idx)) {
13263 MVT ExtVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13264 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Vec);
13265 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
13266 ExtVT.getVectorElementType(), Ext, Idx);
13267 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
13270 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13271 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13272 if (!Subtarget->hasDQI() && (VecVT.getVectorNumElements() <= 8))
13273 rc = getRegClassFor(MVT::v16i1);
13274 unsigned MaxSift = rc->getSize()*8 - 1;
13275 Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
13276 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13277 Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
13278 DAG.getConstant(MaxSift, MVT::i8));
13279 return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec,
13280 DAG.getIntPtrConstant(0));
13284 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
13285 SelectionDAG &DAG) const {
13287 SDValue Vec = Op.getOperand(0);
13288 MVT VecVT = Vec.getSimpleValueType();
13289 SDValue Idx = Op.getOperand(1);
13291 if (Op.getSimpleValueType() == MVT::i1)
13292 return ExtractBitFromMaskVector(Op, DAG);
13294 if (!isa<ConstantSDNode>(Idx)) {
13295 if (VecVT.is512BitVector() ||
13296 (VecVT.is256BitVector() && Subtarget->hasInt256() &&
13297 VecVT.getVectorElementType().getSizeInBits() == 32)) {
13300 MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
13301 MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
13302 MaskEltVT.getSizeInBits());
13304 Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT);
13305 SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT,
13306 getZeroVector(MaskVT, Subtarget, DAG, dl),
13307 Idx, DAG.getConstant(0, getPointerTy()));
13308 SDValue Perm = DAG.getNode(X86ISD::VPERMV, dl, VecVT, Mask, Vec);
13309 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(),
13310 Perm, DAG.getConstant(0, getPointerTy()));
13315 // If this is a 256-bit vector result, first extract the 128-bit vector and
13316 // then extract the element from the 128-bit vector.
13317 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
13319 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13320 // Get the 128-bit vector.
13321 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl);
13322 MVT EltVT = VecVT.getVectorElementType();
13324 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
13326 //if (IdxVal >= NumElems/2)
13327 // IdxVal -= NumElems/2;
13328 IdxVal -= (IdxVal/ElemsPerChunk)*ElemsPerChunk;
13329 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
13330 DAG.getConstant(IdxVal, MVT::i32));
13333 assert(VecVT.is128BitVector() && "Unexpected vector length");
13335 if (Subtarget->hasSSE41()) {
13336 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
13341 MVT VT = Op.getSimpleValueType();
13342 // TODO: handle v16i8.
13343 if (VT.getSizeInBits() == 16) {
13344 SDValue Vec = Op.getOperand(0);
13345 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13347 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13348 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13349 DAG.getNode(ISD::BITCAST, dl,
13351 Op.getOperand(1)));
13352 // Transform it so it match pextrw which produces a 32-bit result.
13353 MVT EltVT = MVT::i32;
13354 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT,
13355 Op.getOperand(0), Op.getOperand(1));
13356 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
13357 DAG.getValueType(VT));
13358 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13361 if (VT.getSizeInBits() == 32) {
13362 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13366 // SHUFPS the element to the lowest double word, then movss.
13367 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 };
13368 MVT VVT = Op.getOperand(0).getSimpleValueType();
13369 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13370 DAG.getUNDEF(VVT), Mask);
13371 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13372 DAG.getIntPtrConstant(0));
13375 if (VT.getSizeInBits() == 64) {
13376 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
13377 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
13378 // to match extract_elt for f64.
13379 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13383 // UNPCKHPD the element to the lowest double word, then movsd.
13384 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
13385 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
13386 int Mask[2] = { 1, -1 };
13387 MVT VVT = Op.getOperand(0).getSimpleValueType();
13388 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13389 DAG.getUNDEF(VVT), Mask);
13390 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13391 DAG.getIntPtrConstant(0));
13397 /// Insert one bit to mask vector, like v16i1 or v8i1.
13398 /// AVX-512 feature.
13400 X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
13402 SDValue Vec = Op.getOperand(0);
13403 SDValue Elt = Op.getOperand(1);
13404 SDValue Idx = Op.getOperand(2);
13405 MVT VecVT = Vec.getSimpleValueType();
13407 if (!isa<ConstantSDNode>(Idx)) {
13408 // Non constant index. Extend source and destination,
13409 // insert element and then truncate the result.
13410 MVT ExtVecVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13411 MVT ExtEltVT = (VecVT == MVT::v8i1 ? MVT::i64 : MVT::i32);
13412 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
13413 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVecVT, Vec),
13414 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx);
13415 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
13418 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13419 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
13420 if (Vec.getOpcode() == ISD::UNDEF)
13421 return DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13422 DAG.getConstant(IdxVal, MVT::i8));
13423 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13424 unsigned MaxSift = rc->getSize()*8 - 1;
13425 EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13426 DAG.getConstant(MaxSift, MVT::i8));
13427 EltInVec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, EltInVec,
13428 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13429 return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
13432 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
13433 SelectionDAG &DAG) const {
13434 MVT VT = Op.getSimpleValueType();
13435 MVT EltVT = VT.getVectorElementType();
13437 if (EltVT == MVT::i1)
13438 return InsertBitToMaskVector(Op, DAG);
13441 SDValue N0 = Op.getOperand(0);
13442 SDValue N1 = Op.getOperand(1);
13443 SDValue N2 = Op.getOperand(2);
13444 if (!isa<ConstantSDNode>(N2))
13446 auto *N2C = cast<ConstantSDNode>(N2);
13447 unsigned IdxVal = N2C->getZExtValue();
13449 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
13450 // into that, and then insert the subvector back into the result.
13451 if (VT.is256BitVector() || VT.is512BitVector()) {
13452 // Get the desired 128-bit vector half.
13453 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
13455 // Insert the element into the desired half.
13456 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
13457 unsigned IdxIn128 = IdxVal - (IdxVal / NumEltsIn128) * NumEltsIn128;
13459 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
13460 DAG.getConstant(IdxIn128, MVT::i32));
13462 // Insert the changed part back to the 256-bit vector
13463 return Insert128BitVector(N0, V, IdxVal, DAG, dl);
13465 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
13467 if (Subtarget->hasSSE41()) {
13468 if (EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) {
13470 if (VT == MVT::v8i16) {
13471 Opc = X86ISD::PINSRW;
13473 assert(VT == MVT::v16i8);
13474 Opc = X86ISD::PINSRB;
13477 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
13479 if (N1.getValueType() != MVT::i32)
13480 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13481 if (N2.getValueType() != MVT::i32)
13482 N2 = DAG.getIntPtrConstant(IdxVal);
13483 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
13486 if (EltVT == MVT::f32) {
13487 // Bits [7:6] of the constant are the source select. This will always be
13488 // zero here. The DAG Combiner may combine an extract_elt index into
13490 // bits. For example (insert (extract, 3), 2) could be matched by
13492 // the '3' into bits [7:6] of X86ISD::INSERTPS.
13493 // Bits [5:4] of the constant are the destination select. This is the
13494 // value of the incoming immediate.
13495 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
13496 // combine either bitwise AND or insert of float 0.0 to set these bits.
13497 N2 = DAG.getIntPtrConstant(IdxVal << 4);
13498 // Create this as a scalar to vector..
13499 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
13500 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
13503 if (EltVT == MVT::i32 || EltVT == MVT::i64) {
13504 // PINSR* works with constant index.
13509 if (EltVT == MVT::i8)
13512 if (EltVT.getSizeInBits() == 16) {
13513 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
13514 // as its second argument.
13515 if (N1.getValueType() != MVT::i32)
13516 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13517 if (N2.getValueType() != MVT::i32)
13518 N2 = DAG.getIntPtrConstant(IdxVal);
13519 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
13524 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
13526 MVT OpVT = Op.getSimpleValueType();
13528 // If this is a 256-bit vector result, first insert into a 128-bit
13529 // vector and then insert into the 256-bit vector.
13530 if (!OpVT.is128BitVector()) {
13531 // Insert into a 128-bit vector.
13532 unsigned SizeFactor = OpVT.getSizeInBits()/128;
13533 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
13534 OpVT.getVectorNumElements() / SizeFactor);
13536 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
13538 // Insert the 128-bit vector.
13539 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
13542 if (OpVT == MVT::v1i64 &&
13543 Op.getOperand(0).getValueType() == MVT::i64)
13544 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
13546 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
13547 assert(OpVT.is128BitVector() && "Expected an SSE type!");
13548 return DAG.getNode(ISD::BITCAST, dl, OpVT,
13549 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
13552 // Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
13553 // a simple subregister reference or explicit instructions to grab
13554 // upper bits of a vector.
13555 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13556 SelectionDAG &DAG) {
13558 SDValue In = Op.getOperand(0);
13559 SDValue Idx = Op.getOperand(1);
13560 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13561 MVT ResVT = Op.getSimpleValueType();
13562 MVT InVT = In.getSimpleValueType();
13564 if (Subtarget->hasFp256()) {
13565 if (ResVT.is128BitVector() &&
13566 (InVT.is256BitVector() || InVT.is512BitVector()) &&
13567 isa<ConstantSDNode>(Idx)) {
13568 return Extract128BitVector(In, IdxVal, DAG, dl);
13570 if (ResVT.is256BitVector() && InVT.is512BitVector() &&
13571 isa<ConstantSDNode>(Idx)) {
13572 return Extract256BitVector(In, IdxVal, DAG, dl);
13578 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
13579 // simple superregister reference or explicit instructions to insert
13580 // the upper bits of a vector.
13581 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13582 SelectionDAG &DAG) {
13583 if (!Subtarget->hasAVX())
13587 SDValue Vec = Op.getOperand(0);
13588 SDValue SubVec = Op.getOperand(1);
13589 SDValue Idx = Op.getOperand(2);
13591 if (!isa<ConstantSDNode>(Idx))
13594 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13595 MVT OpVT = Op.getSimpleValueType();
13596 MVT SubVecVT = SubVec.getSimpleValueType();
13598 // Fold two 16-byte subvector loads into one 32-byte load:
13599 // (insert_subvector (insert_subvector undef, (load addr), 0),
13600 // (load addr + 16), Elts/2)
13602 if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
13603 Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
13604 OpVT.is256BitVector() && SubVecVT.is128BitVector() &&
13605 !Subtarget->isUnalignedMem32Slow()) {
13606 SDValue SubVec2 = Vec.getOperand(1);
13607 if (auto *Idx2 = dyn_cast<ConstantSDNode>(Vec.getOperand(2))) {
13608 if (Idx2->getZExtValue() == 0) {
13609 SDValue Ops[] = { SubVec2, SubVec };
13610 SDValue LD = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, false);
13617 if ((OpVT.is256BitVector() || OpVT.is512BitVector()) &&
13618 SubVecVT.is128BitVector())
13619 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
13621 if (OpVT.is512BitVector() && SubVecVT.is256BitVector())
13622 return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
13627 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
13628 // their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
13629 // one of the above mentioned nodes. It has to be wrapped because otherwise
13630 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
13631 // be used to form addressing mode. These wrapped nodes will be selected
13634 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
13635 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
13637 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13638 // global base reg.
13639 unsigned char OpFlag = 0;
13640 unsigned WrapperKind = X86ISD::Wrapper;
13641 CodeModel::Model M = DAG.getTarget().getCodeModel();
13643 if (Subtarget->isPICStyleRIPRel() &&
13644 (M == CodeModel::Small || M == CodeModel::Kernel))
13645 WrapperKind = X86ISD::WrapperRIP;
13646 else if (Subtarget->isPICStyleGOT())
13647 OpFlag = X86II::MO_GOTOFF;
13648 else if (Subtarget->isPICStyleStubPIC())
13649 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13651 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
13652 CP->getAlignment(),
13653 CP->getOffset(), OpFlag);
13655 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13656 // With PIC, the address is actually $g + Offset.
13658 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13659 DAG.getNode(X86ISD::GlobalBaseReg,
13660 SDLoc(), getPointerTy()),
13667 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
13668 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
13670 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13671 // global base reg.
13672 unsigned char OpFlag = 0;
13673 unsigned WrapperKind = X86ISD::Wrapper;
13674 CodeModel::Model M = DAG.getTarget().getCodeModel();
13676 if (Subtarget->isPICStyleRIPRel() &&
13677 (M == CodeModel::Small || M == CodeModel::Kernel))
13678 WrapperKind = X86ISD::WrapperRIP;
13679 else if (Subtarget->isPICStyleGOT())
13680 OpFlag = X86II::MO_GOTOFF;
13681 else if (Subtarget->isPICStyleStubPIC())
13682 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13684 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
13687 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13689 // With PIC, the address is actually $g + Offset.
13691 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13692 DAG.getNode(X86ISD::GlobalBaseReg,
13693 SDLoc(), getPointerTy()),
13700 X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
13701 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
13703 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13704 // global base reg.
13705 unsigned char OpFlag = 0;
13706 unsigned WrapperKind = X86ISD::Wrapper;
13707 CodeModel::Model M = DAG.getTarget().getCodeModel();
13709 if (Subtarget->isPICStyleRIPRel() &&
13710 (M == CodeModel::Small || M == CodeModel::Kernel)) {
13711 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF())
13712 OpFlag = X86II::MO_GOTPCREL;
13713 WrapperKind = X86ISD::WrapperRIP;
13714 } else if (Subtarget->isPICStyleGOT()) {
13715 OpFlag = X86II::MO_GOT;
13716 } else if (Subtarget->isPICStyleStubPIC()) {
13717 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE;
13718 } else if (Subtarget->isPICStyleStubNoDynamic()) {
13719 OpFlag = X86II::MO_DARWIN_NONLAZY;
13722 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag);
13725 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13727 // With PIC, the address is actually $g + Offset.
13728 if (DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
13729 !Subtarget->is64Bit()) {
13730 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13731 DAG.getNode(X86ISD::GlobalBaseReg,
13732 SDLoc(), getPointerTy()),
13736 // For symbols that require a load from a stub to get the address, emit the
13738 if (isGlobalStubReference(OpFlag))
13739 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result,
13740 MachinePointerInfo::getGOT(), false, false, false, 0);
13746 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
13747 // Create the TargetBlockAddressAddress node.
13748 unsigned char OpFlags =
13749 Subtarget->ClassifyBlockAddressReference();
13750 CodeModel::Model M = DAG.getTarget().getCodeModel();
13751 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
13752 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
13754 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset,
13757 if (Subtarget->isPICStyleRIPRel() &&
13758 (M == CodeModel::Small || M == CodeModel::Kernel))
13759 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13761 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13763 // With PIC, the address is actually $g + Offset.
13764 if (isGlobalRelativeToPICBase(OpFlags)) {
13765 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13766 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13774 X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
13775 int64_t Offset, SelectionDAG &DAG) const {
13776 // Create the TargetGlobalAddress node, folding in the constant
13777 // offset if it is legal.
13778 unsigned char OpFlags =
13779 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget());
13780 CodeModel::Model M = DAG.getTarget().getCodeModel();
13782 if (OpFlags == X86II::MO_NO_FLAG &&
13783 X86::isOffsetSuitableForCodeModel(Offset, M)) {
13784 // A direct static reference to a global.
13785 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
13788 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
13791 if (Subtarget->isPICStyleRIPRel() &&
13792 (M == CodeModel::Small || M == CodeModel::Kernel))
13793 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13795 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13797 // With PIC, the address is actually $g + Offset.
13798 if (isGlobalRelativeToPICBase(OpFlags)) {
13799 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13800 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13804 // For globals that require a load from a stub to get the address, emit the
13806 if (isGlobalStubReference(OpFlags))
13807 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
13808 MachinePointerInfo::getGOT(), false, false, false, 0);
13810 // If there was a non-zero offset that we didn't fold, create an explicit
13811 // addition for it.
13813 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result,
13814 DAG.getConstant(Offset, getPointerTy()));
13820 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
13821 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
13822 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
13823 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
13827 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
13828 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
13829 unsigned char OperandFlags, bool LocalDynamic = false) {
13830 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13831 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13833 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13834 GA->getValueType(0),
13838 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
13842 SDValue Ops[] = { Chain, TGA, *InFlag };
13843 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13845 SDValue Ops[] = { Chain, TGA };
13846 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13849 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
13850 MFI->setAdjustsStack(true);
13851 MFI->setHasCalls(true);
13853 SDValue Flag = Chain.getValue(1);
13854 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
13857 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
13859 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13862 SDLoc dl(GA); // ? function entry point might be better
13863 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13864 DAG.getNode(X86ISD::GlobalBaseReg,
13865 SDLoc(), PtrVT), InFlag);
13866 InFlag = Chain.getValue(1);
13868 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
13871 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
13873 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13875 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
13876 X86::RAX, X86II::MO_TLSGD);
13879 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
13885 // Get the start address of the TLS block for this module.
13886 X86MachineFunctionInfo* MFI = DAG.getMachineFunction()
13887 .getInfo<X86MachineFunctionInfo>();
13888 MFI->incNumLocalDynamicTLSAccesses();
13892 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
13893 X86II::MO_TLSLD, /*LocalDynamic=*/true);
13896 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13897 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
13898 InFlag = Chain.getValue(1);
13899 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
13900 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
13903 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
13907 unsigned char OperandFlags = X86II::MO_DTPOFF;
13908 unsigned WrapperKind = X86ISD::Wrapper;
13909 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13910 GA->getValueType(0),
13911 GA->getOffset(), OperandFlags);
13912 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13914 // Add x@dtpoff with the base.
13915 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
13918 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
13919 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13920 const EVT PtrVT, TLSModel::Model model,
13921 bool is64Bit, bool isPIC) {
13924 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
13925 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
13926 is64Bit ? 257 : 256));
13928 SDValue ThreadPointer =
13929 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0),
13930 MachinePointerInfo(Ptr), false, false, false, 0);
13932 unsigned char OperandFlags = 0;
13933 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
13935 unsigned WrapperKind = X86ISD::Wrapper;
13936 if (model == TLSModel::LocalExec) {
13937 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
13938 } else if (model == TLSModel::InitialExec) {
13940 OperandFlags = X86II::MO_GOTTPOFF;
13941 WrapperKind = X86ISD::WrapperRIP;
13943 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
13946 llvm_unreachable("Unexpected model");
13949 // emit "addl x@ntpoff,%eax" (local exec)
13950 // or "addl x@indntpoff,%eax" (initial exec)
13951 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
13953 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
13954 GA->getOffset(), OperandFlags);
13955 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13957 if (model == TLSModel::InitialExec) {
13958 if (isPIC && !is64Bit) {
13959 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
13960 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
13964 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
13965 MachinePointerInfo::getGOT(), false, false, false, 0);
13968 // The address of the thread local variable is the add of the thread
13969 // pointer with the offset of the variable.
13970 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
13974 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
13976 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
13977 const GlobalValue *GV = GA->getGlobal();
13979 if (Subtarget->isTargetELF()) {
13980 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
13983 case TLSModel::GeneralDynamic:
13984 if (Subtarget->is64Bit())
13985 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
13986 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
13987 case TLSModel::LocalDynamic:
13988 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(),
13989 Subtarget->is64Bit());
13990 case TLSModel::InitialExec:
13991 case TLSModel::LocalExec:
13992 return LowerToTLSExecModel(
13993 GA, DAG, getPointerTy(), model, Subtarget->is64Bit(),
13994 DAG.getTarget().getRelocationModel() == Reloc::PIC_);
13996 llvm_unreachable("Unknown TLS model.");
13999 if (Subtarget->isTargetDarwin()) {
14000 // Darwin only has one model of TLS. Lower to that.
14001 unsigned char OpFlag = 0;
14002 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
14003 X86ISD::WrapperRIP : X86ISD::Wrapper;
14005 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
14006 // global base reg.
14007 bool PIC32 = (DAG.getTarget().getRelocationModel() == Reloc::PIC_) &&
14008 !Subtarget->is64Bit();
14010 OpFlag = X86II::MO_TLVP_PIC_BASE;
14012 OpFlag = X86II::MO_TLVP;
14014 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
14015 GA->getValueType(0),
14016 GA->getOffset(), OpFlag);
14017 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
14019 // With PIC32, the address is actually $g + Offset.
14021 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
14022 DAG.getNode(X86ISD::GlobalBaseReg,
14023 SDLoc(), getPointerTy()),
14026 // Lowering the machine isd will make sure everything is in the right
14028 SDValue Chain = DAG.getEntryNode();
14029 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
14030 SDValue Args[] = { Chain, Offset };
14031 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
14033 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
14034 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
14035 MFI->setAdjustsStack(true);
14037 // And our return value (tls address) is in the standard call return value
14039 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
14040 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(),
14041 Chain.getValue(1));
14044 if (Subtarget->isTargetKnownWindowsMSVC() ||
14045 Subtarget->isTargetWindowsGNU()) {
14046 // Just use the implicit TLS architecture
14047 // Need to generate someting similar to:
14048 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
14050 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
14051 // mov rcx, qword [rdx+rcx*8]
14052 // mov eax, .tls$:tlsvar
14053 // [rax+rcx] contains the address
14054 // Windows 64bit: gs:0x58
14055 // Windows 32bit: fs:__tls_array
14058 SDValue Chain = DAG.getEntryNode();
14060 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
14061 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
14062 // use its literal value of 0x2C.
14063 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit()
14064 ? Type::getInt8PtrTy(*DAG.getContext(),
14066 : Type::getInt32PtrTy(*DAG.getContext(),
14070 Subtarget->is64Bit()
14071 ? DAG.getIntPtrConstant(0x58)
14072 : (Subtarget->isTargetWindowsGNU()
14073 ? DAG.getIntPtrConstant(0x2C)
14074 : DAG.getExternalSymbol("_tls_array", getPointerTy()));
14076 SDValue ThreadPointer =
14077 DAG.getLoad(getPointerTy(), dl, Chain, TlsArray,
14078 MachinePointerInfo(Ptr), false, false, false, 0);
14080 // Load the _tls_index variable
14081 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy());
14082 if (Subtarget->is64Bit())
14083 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain,
14084 IDX, MachinePointerInfo(), MVT::i32,
14085 false, false, false, 0);
14087 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(),
14088 false, false, false, 0);
14090 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()),
14092 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale);
14094 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX);
14095 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(),
14096 false, false, false, 0);
14098 // Get the offset of start of .tls section
14099 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
14100 GA->getValueType(0),
14101 GA->getOffset(), X86II::MO_SECREL);
14102 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA);
14104 // The address of the thread local variable is the add of the thread
14105 // pointer with the offset of the variable.
14106 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset);
14109 llvm_unreachable("TLS not implemented for this target.");
14112 /// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values
14113 /// and take a 2 x i32 value to shift plus a shift amount.
14114 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
14115 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
14116 MVT VT = Op.getSimpleValueType();
14117 unsigned VTBits = VT.getSizeInBits();
14119 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
14120 SDValue ShOpLo = Op.getOperand(0);
14121 SDValue ShOpHi = Op.getOperand(1);
14122 SDValue ShAmt = Op.getOperand(2);
14123 // X86ISD::SHLD and X86ISD::SHRD have defined overflow behavior but the
14124 // generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
14126 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14127 DAG.getConstant(VTBits - 1, MVT::i8));
14128 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
14129 DAG.getConstant(VTBits - 1, MVT::i8))
14130 : DAG.getConstant(0, VT);
14132 SDValue Tmp2, Tmp3;
14133 if (Op.getOpcode() == ISD::SHL_PARTS) {
14134 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
14135 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
14137 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
14138 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
14141 // If the shift amount is larger or equal than the width of a part we can't
14142 // rely on the results of shld/shrd. Insert a test and select the appropriate
14143 // values for large shift amounts.
14144 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14145 DAG.getConstant(VTBits, MVT::i8));
14146 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
14147 AndNode, DAG.getConstant(0, MVT::i8));
14150 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
14151 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
14152 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
14154 if (Op.getOpcode() == ISD::SHL_PARTS) {
14155 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14156 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14158 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14159 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14162 SDValue Ops[2] = { Lo, Hi };
14163 return DAG.getMergeValues(Ops, dl);
14166 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
14167 SelectionDAG &DAG) const {
14168 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
14171 if (SrcVT.isVector()) {
14172 if (SrcVT.getVectorElementType() == MVT::i1) {
14173 MVT IntegerVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
14174 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14175 DAG.getNode(ISD::SIGN_EXTEND, dl, IntegerVT,
14176 Op.getOperand(0)));
14181 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
14182 "Unknown SINT_TO_FP to lower!");
14184 // These are really Legal; return the operand so the caller accepts it as
14186 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
14188 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
14189 Subtarget->is64Bit()) {
14193 unsigned Size = SrcVT.getSizeInBits()/8;
14194 MachineFunction &MF = DAG.getMachineFunction();
14195 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false);
14196 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14197 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14199 MachinePointerInfo::getFixedStack(SSFI),
14201 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
14204 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
14206 SelectionDAG &DAG) const {
14210 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
14212 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
14214 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
14216 unsigned ByteSize = SrcVT.getSizeInBits()/8;
14218 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
14219 MachineMemOperand *MMO;
14221 int SSFI = FI->getIndex();
14223 DAG.getMachineFunction()
14224 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14225 MachineMemOperand::MOLoad, ByteSize, ByteSize);
14227 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
14228 StackSlot = StackSlot.getOperand(1);
14230 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
14231 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
14233 Tys, Ops, SrcVT, MMO);
14236 Chain = Result.getValue(1);
14237 SDValue InFlag = Result.getValue(2);
14239 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
14240 // shouldn't be necessary except that RFP cannot be live across
14241 // multiple blocks. When stackifier is fixed, they can be uncoupled.
14242 MachineFunction &MF = DAG.getMachineFunction();
14243 unsigned SSFISize = Op.getValueType().getSizeInBits()/8;
14244 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false);
14245 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14246 Tys = DAG.getVTList(MVT::Other);
14248 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
14250 MachineMemOperand *MMO =
14251 DAG.getMachineFunction()
14252 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14253 MachineMemOperand::MOStore, SSFISize, SSFISize);
14255 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
14256 Ops, Op.getValueType(), MMO);
14257 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot,
14258 MachinePointerInfo::getFixedStack(SSFI),
14259 false, false, false, 0);
14265 // LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
14266 SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
14267 SelectionDAG &DAG) const {
14268 // This algorithm is not obvious. Here it is what we're trying to output:
14271 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
14272 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
14274 haddpd %xmm0, %xmm0
14276 pshufd $0x4e, %xmm0, %xmm1
14282 LLVMContext *Context = DAG.getContext();
14284 // Build some magic constants.
14285 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
14286 Constant *C0 = ConstantDataVector::get(*Context, CV0);
14287 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
14289 SmallVector<Constant*,2> CV1;
14291 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14292 APInt(64, 0x4330000000000000ULL))));
14294 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14295 APInt(64, 0x4530000000000000ULL))));
14296 Constant *C1 = ConstantVector::get(CV1);
14297 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
14299 // Load the 64-bit value into an XMM register.
14300 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
14302 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
14303 MachinePointerInfo::getConstantPool(),
14304 false, false, false, 16);
14305 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32,
14306 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1),
14309 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
14310 MachinePointerInfo::getConstantPool(),
14311 false, false, false, 16);
14312 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1);
14313 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
14316 if (Subtarget->hasSSE3()) {
14317 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
14318 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
14320 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub);
14321 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32,
14323 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64,
14324 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle),
14328 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
14329 DAG.getIntPtrConstant(0));
14332 // LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
14333 SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
14334 SelectionDAG &DAG) const {
14336 // FP constant to bias correct the final result.
14337 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
14340 // Load the 32-bit value into an XMM register.
14341 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
14344 // Zero out the upper parts of the register.
14345 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
14347 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14348 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
14349 DAG.getIntPtrConstant(0));
14351 // Or the load with the bias.
14352 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
14353 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14354 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14355 MVT::v2f64, Load)),
14356 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14357 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14358 MVT::v2f64, Bias)));
14359 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14360 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or),
14361 DAG.getIntPtrConstant(0));
14363 // Subtract the bias.
14364 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
14366 // Handle final rounding.
14367 EVT DestVT = Op.getValueType();
14369 if (DestVT.bitsLT(MVT::f64))
14370 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
14371 DAG.getIntPtrConstant(0));
14372 if (DestVT.bitsGT(MVT::f64))
14373 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
14375 // Handle final rounding.
14379 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
14380 const X86Subtarget &Subtarget) {
14381 // The algorithm is the following:
14382 // #ifdef __SSE4_1__
14383 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14384 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14385 // (uint4) 0x53000000, 0xaa);
14387 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14388 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14390 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14391 // return (float4) lo + fhi;
14394 SDValue V = Op->getOperand(0);
14395 EVT VecIntVT = V.getValueType();
14396 bool Is128 = VecIntVT == MVT::v4i32;
14397 EVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
14398 // If we convert to something else than the supported type, e.g., to v4f64,
14400 if (VecFloatVT != Op->getValueType(0))
14403 unsigned NumElts = VecIntVT.getVectorNumElements();
14404 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
14405 "Unsupported custom type");
14406 assert(NumElts <= 8 && "The size of the constant array must be fixed");
14408 // In the #idef/#else code, we have in common:
14409 // - The vector of constants:
14415 // Create the splat vector for 0x4b000000.
14416 SDValue CstLow = DAG.getConstant(0x4b000000, MVT::i32);
14417 SDValue CstLowArray[] = {CstLow, CstLow, CstLow, CstLow,
14418 CstLow, CstLow, CstLow, CstLow};
14419 SDValue VecCstLow = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14420 makeArrayRef(&CstLowArray[0], NumElts));
14421 // Create the splat vector for 0x53000000.
14422 SDValue CstHigh = DAG.getConstant(0x53000000, MVT::i32);
14423 SDValue CstHighArray[] = {CstHigh, CstHigh, CstHigh, CstHigh,
14424 CstHigh, CstHigh, CstHigh, CstHigh};
14425 SDValue VecCstHigh = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14426 makeArrayRef(&CstHighArray[0], NumElts));
14428 // Create the right shift.
14429 SDValue CstShift = DAG.getConstant(16, MVT::i32);
14430 SDValue CstShiftArray[] = {CstShift, CstShift, CstShift, CstShift,
14431 CstShift, CstShift, CstShift, CstShift};
14432 SDValue VecCstShift = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14433 makeArrayRef(&CstShiftArray[0], NumElts));
14434 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
14437 if (Subtarget.hasSSE41()) {
14438 EVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
14439 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14440 SDValue VecCstLowBitcast =
14441 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstLow);
14442 SDValue VecBitcast = DAG.getNode(ISD::BITCAST, DL, VecI16VT, V);
14443 // Low will be bitcasted right away, so do not bother bitcasting back to its
14445 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
14446 VecCstLowBitcast, DAG.getConstant(0xaa, MVT::i32));
14447 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14448 // (uint4) 0x53000000, 0xaa);
14449 SDValue VecCstHighBitcast =
14450 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstHigh);
14451 SDValue VecShiftBitcast =
14452 DAG.getNode(ISD::BITCAST, DL, VecI16VT, HighShift);
14453 // High will be bitcasted right away, so do not bother bitcasting back to
14454 // its original type.
14455 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
14456 VecCstHighBitcast, DAG.getConstant(0xaa, MVT::i32));
14458 SDValue CstMask = DAG.getConstant(0xffff, MVT::i32);
14459 SDValue VecCstMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, CstMask,
14460 CstMask, CstMask, CstMask);
14461 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14462 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
14463 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
14465 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14466 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
14469 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
14470 SDValue CstFAdd = DAG.getConstantFP(
14471 APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), MVT::f32);
14472 SDValue CstFAddArray[] = {CstFAdd, CstFAdd, CstFAdd, CstFAdd,
14473 CstFAdd, CstFAdd, CstFAdd, CstFAdd};
14474 SDValue VecCstFAdd = DAG.getNode(ISD::BUILD_VECTOR, DL, VecFloatVT,
14475 makeArrayRef(&CstFAddArray[0], NumElts));
14477 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14478 SDValue HighBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, High);
14480 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
14481 // return (float4) lo + fhi;
14482 SDValue LowBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, Low);
14483 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
14486 SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
14487 SelectionDAG &DAG) const {
14488 SDValue N0 = Op.getOperand(0);
14489 MVT SVT = N0.getSimpleValueType();
14492 switch (SVT.SimpleTy) {
14494 llvm_unreachable("Custom UINT_TO_FP is not supported!");
14499 MVT NVT = MVT::getVectorVT(MVT::i32, SVT.getVectorNumElements());
14500 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14501 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0));
14505 return lowerUINT_TO_FP_vXi32(Op, DAG, *Subtarget);
14507 llvm_unreachable(nullptr);
14510 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
14511 SelectionDAG &DAG) const {
14512 SDValue N0 = Op.getOperand(0);
14515 if (Op.getValueType().isVector())
14516 return lowerUINT_TO_FP_vec(Op, DAG);
14518 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
14519 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
14520 // the optimization here.
14521 if (DAG.SignBitIsZero(N0))
14522 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
14524 MVT SrcVT = N0.getSimpleValueType();
14525 MVT DstVT = Op.getSimpleValueType();
14526 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
14527 return LowerUINT_TO_FP_i64(Op, DAG);
14528 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
14529 return LowerUINT_TO_FP_i32(Op, DAG);
14530 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
14533 // Make a 64-bit buffer, and use it to build an FILD.
14534 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
14535 if (SrcVT == MVT::i32) {
14536 SDValue WordOff = DAG.getConstant(4, getPointerTy());
14537 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
14538 getPointerTy(), StackSlot, WordOff);
14539 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14540 StackSlot, MachinePointerInfo(),
14542 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
14543 OffsetSlot, MachinePointerInfo(),
14545 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
14549 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
14550 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14551 StackSlot, MachinePointerInfo(),
14553 // For i64 source, we need to add the appropriate power of 2 if the input
14554 // was negative. This is the same as the optimization in
14555 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
14556 // we must be careful to do the computation in x87 extended precision, not
14557 // in SSE. (The generic code can't know it's OK to do this, or how to.)
14558 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
14559 MachineMemOperand *MMO =
14560 DAG.getMachineFunction()
14561 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14562 MachineMemOperand::MOLoad, 8, 8);
14564 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
14565 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
14566 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
14569 APInt FF(32, 0x5F800000ULL);
14571 // Check whether the sign bit is set.
14572 SDValue SignSet = DAG.getSetCC(dl,
14573 getSetCCResultType(*DAG.getContext(), MVT::i64),
14574 Op.getOperand(0), DAG.getConstant(0, MVT::i64),
14577 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
14578 SDValue FudgePtr = DAG.getConstantPool(
14579 ConstantInt::get(*DAG.getContext(), FF.zext(64)),
14582 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
14583 SDValue Zero = DAG.getIntPtrConstant(0);
14584 SDValue Four = DAG.getIntPtrConstant(4);
14585 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
14587 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset);
14589 // Load the value out, extending it from f32 to f80.
14590 // FIXME: Avoid the extend by constructing the right constant pool?
14591 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(),
14592 FudgePtr, MachinePointerInfo::getConstantPool(),
14593 MVT::f32, false, false, false, 4);
14594 // Extend everything to 80 bits to force it to be done on x87.
14595 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
14596 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0));
14599 std::pair<SDValue,SDValue>
14600 X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
14601 bool IsSigned, bool IsReplace) const {
14604 EVT DstTy = Op.getValueType();
14606 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) {
14607 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
14611 assert(DstTy.getSimpleVT() <= MVT::i64 &&
14612 DstTy.getSimpleVT() >= MVT::i16 &&
14613 "Unknown FP_TO_INT to lower!");
14615 // These are really Legal.
14616 if (DstTy == MVT::i32 &&
14617 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14618 return std::make_pair(SDValue(), SDValue());
14619 if (Subtarget->is64Bit() &&
14620 DstTy == MVT::i64 &&
14621 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14622 return std::make_pair(SDValue(), SDValue());
14624 // We lower FP->int64 either into FISTP64 followed by a load from a temporary
14625 // stack slot, or into the FTOL runtime function.
14626 MachineFunction &MF = DAG.getMachineFunction();
14627 unsigned MemSize = DstTy.getSizeInBits()/8;
14628 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14629 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14632 if (!IsSigned && isIntegerTypeFTOL(DstTy))
14633 Opc = X86ISD::WIN_FTOL;
14635 switch (DstTy.getSimpleVT().SimpleTy) {
14636 default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
14637 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
14638 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
14639 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
14642 SDValue Chain = DAG.getEntryNode();
14643 SDValue Value = Op.getOperand(0);
14644 EVT TheVT = Op.getOperand(0).getValueType();
14645 // FIXME This causes a redundant load/store if the SSE-class value is already
14646 // in memory, such as if it is on the callstack.
14647 if (isScalarFPTypeInSSEReg(TheVT)) {
14648 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
14649 Chain = DAG.getStore(Chain, DL, Value, StackSlot,
14650 MachinePointerInfo::getFixedStack(SSFI),
14652 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
14654 Chain, StackSlot, DAG.getValueType(TheVT)
14657 MachineMemOperand *MMO =
14658 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14659 MachineMemOperand::MOLoad, MemSize, MemSize);
14660 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
14661 Chain = Value.getValue(1);
14662 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14663 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14666 MachineMemOperand *MMO =
14667 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14668 MachineMemOperand::MOStore, MemSize, MemSize);
14670 if (Opc != X86ISD::WIN_FTOL) {
14671 // Build the FP_TO_INT*_IN_MEM
14672 SDValue Ops[] = { Chain, Value, StackSlot };
14673 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
14675 return std::make_pair(FIST, StackSlot);
14677 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL,
14678 DAG.getVTList(MVT::Other, MVT::Glue),
14680 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX,
14681 MVT::i32, ftol.getValue(1));
14682 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX,
14683 MVT::i32, eax.getValue(2));
14684 SDValue Ops[] = { eax, edx };
14685 SDValue pair = IsReplace
14686 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops)
14687 : DAG.getMergeValues(Ops, DL);
14688 return std::make_pair(pair, SDValue());
14692 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
14693 const X86Subtarget *Subtarget) {
14694 MVT VT = Op->getSimpleValueType(0);
14695 SDValue In = Op->getOperand(0);
14696 MVT InVT = In.getSimpleValueType();
14699 // Optimize vectors in AVX mode:
14702 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
14703 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
14704 // Concat upper and lower parts.
14707 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
14708 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
14709 // Concat upper and lower parts.
14712 if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) &&
14713 ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) &&
14714 ((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
14717 if (Subtarget->hasInt256())
14718 return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
14720 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
14721 SDValue Undef = DAG.getUNDEF(InVT);
14722 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
14723 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14724 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14726 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(),
14727 VT.getVectorNumElements()/2);
14729 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo);
14730 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi);
14732 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
14735 static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
14736 SelectionDAG &DAG) {
14737 MVT VT = Op->getSimpleValueType(0);
14738 SDValue In = Op->getOperand(0);
14739 MVT InVT = In.getSimpleValueType();
14741 unsigned int NumElts = VT.getVectorNumElements();
14742 if (NumElts != 8 && NumElts != 16)
14745 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
14746 return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
14748 EVT ExtVT = (NumElts == 8)? MVT::v8i64 : MVT::v16i32;
14749 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14750 // Now we have only mask extension
14751 assert(InVT.getVectorElementType() == MVT::i1);
14752 SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
14753 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14754 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
14755 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14756 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14757 MachinePointerInfo::getConstantPool(),
14758 false, false, false, Alignment);
14760 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, DL, ExtVT, In, Ld);
14761 if (VT.is512BitVector())
14763 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Brcst);
14766 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14767 SelectionDAG &DAG) {
14768 if (Subtarget->hasFp256()) {
14769 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14777 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14778 SelectionDAG &DAG) {
14780 MVT VT = Op.getSimpleValueType();
14781 SDValue In = Op.getOperand(0);
14782 MVT SVT = In.getSimpleValueType();
14784 if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
14785 return LowerZERO_EXTEND_AVX512(Op, DAG);
14787 if (Subtarget->hasFp256()) {
14788 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14793 assert(!VT.is256BitVector() || !SVT.is128BitVector() ||
14794 VT.getVectorNumElements() != SVT.getVectorNumElements());
14798 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
14800 MVT VT = Op.getSimpleValueType();
14801 SDValue In = Op.getOperand(0);
14802 MVT InVT = In.getSimpleValueType();
14804 if (VT == MVT::i1) {
14805 assert((InVT.isInteger() && (InVT.getSizeInBits() <= 64)) &&
14806 "Invalid scalar TRUNCATE operation");
14807 if (InVT.getSizeInBits() >= 32)
14809 In = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, In);
14810 return DAG.getNode(ISD::TRUNCATE, DL, VT, In);
14812 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
14813 "Invalid TRUNCATE operation");
14815 if (InVT.is512BitVector() || VT.getVectorElementType() == MVT::i1) {
14816 if (VT.getVectorElementType().getSizeInBits() >=8)
14817 return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
14819 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
14820 unsigned NumElts = InVT.getVectorNumElements();
14821 assert ((NumElts == 8 || NumElts == 16) && "Unexpected vector type");
14822 if (InVT.getSizeInBits() < 512) {
14823 MVT ExtVT = (NumElts == 16)? MVT::v16i32 : MVT::v8i64;
14824 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
14828 SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
14829 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14830 SDValue CP = DAG.getConstantPool(C, getPointerTy());
14831 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14832 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14833 MachinePointerInfo::getConstantPool(),
14834 false, false, false, Alignment);
14835 SDValue OneV = DAG.getNode(X86ISD::VBROADCAST, DL, InVT, Ld);
14836 SDValue And = DAG.getNode(ISD::AND, DL, InVT, OneV, In);
14837 return DAG.getNode(X86ISD::TESTM, DL, VT, And, And);
14840 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
14841 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
14842 if (Subtarget->hasInt256()) {
14843 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
14844 In = DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, In);
14845 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32),
14847 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
14848 DAG.getIntPtrConstant(0));
14851 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14852 DAG.getIntPtrConstant(0));
14853 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14854 DAG.getIntPtrConstant(2));
14855 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14856 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14857 static const int ShufMask[] = {0, 2, 4, 6};
14858 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
14861 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
14862 // On AVX2, v8i32 -> v8i16 becomed PSHUFB.
14863 if (Subtarget->hasInt256()) {
14864 In = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, In);
14866 SmallVector<SDValue,32> pshufbMask;
14867 for (unsigned i = 0; i < 2; ++i) {
14868 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8));
14869 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8));
14870 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8));
14871 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8));
14872 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8));
14873 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8));
14874 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8));
14875 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8));
14876 for (unsigned j = 0; j < 8; ++j)
14877 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
14879 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
14880 In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
14881 In = DAG.getNode(ISD::BITCAST, DL, MVT::v4i64, In);
14883 static const int ShufMask[] = {0, 2, -1, -1};
14884 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64),
14886 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14887 DAG.getIntPtrConstant(0));
14888 return DAG.getNode(ISD::BITCAST, DL, VT, In);
14891 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14892 DAG.getIntPtrConstant(0));
14894 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14895 DAG.getIntPtrConstant(4));
14897 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpLo);
14898 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpHi);
14900 // The PSHUFB mask:
14901 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
14902 -1, -1, -1, -1, -1, -1, -1, -1};
14904 SDValue Undef = DAG.getUNDEF(MVT::v16i8);
14905 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, Undef, ShufMask1);
14906 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, Undef, ShufMask1);
14908 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14909 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14911 // The MOVLHPS Mask:
14912 static const int ShufMask2[] = {0, 1, 4, 5};
14913 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
14914 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, res);
14917 // Handle truncation of V256 to V128 using shuffles.
14918 if (!VT.is128BitVector() || !InVT.is256BitVector())
14921 assert(Subtarget->hasFp256() && "256-bit vector without AVX!");
14923 unsigned NumElems = VT.getVectorNumElements();
14924 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
14926 SmallVector<int, 16> MaskVec(NumElems * 2, -1);
14927 // Prepare truncation shuffle mask
14928 for (unsigned i = 0; i != NumElems; ++i)
14929 MaskVec[i] = i * 2;
14930 SDValue V = DAG.getVectorShuffle(NVT, DL,
14931 DAG.getNode(ISD::BITCAST, DL, NVT, In),
14932 DAG.getUNDEF(NVT), &MaskVec[0]);
14933 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
14934 DAG.getIntPtrConstant(0));
14937 SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
14938 SelectionDAG &DAG) const {
14939 assert(!Op.getSimpleValueType().isVector());
14941 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14942 /*IsSigned=*/ true, /*IsReplace=*/ false);
14943 SDValue FIST = Vals.first, StackSlot = Vals.second;
14944 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
14945 if (!FIST.getNode()) return Op;
14947 if (StackSlot.getNode())
14948 // Load the result.
14949 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14950 FIST, StackSlot, MachinePointerInfo(),
14951 false, false, false, 0);
14953 // The node is the result.
14957 SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
14958 SelectionDAG &DAG) const {
14959 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14960 /*IsSigned=*/ false, /*IsReplace=*/ false);
14961 SDValue FIST = Vals.first, StackSlot = Vals.second;
14962 assert(FIST.getNode() && "Unexpected failure");
14964 if (StackSlot.getNode())
14965 // Load the result.
14966 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14967 FIST, StackSlot, MachinePointerInfo(),
14968 false, false, false, 0);
14970 // The node is the result.
14974 static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
14976 MVT VT = Op.getSimpleValueType();
14977 SDValue In = Op.getOperand(0);
14978 MVT SVT = In.getSimpleValueType();
14980 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
14982 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
14983 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
14984 In, DAG.getUNDEF(SVT)));
14987 /// The only differences between FABS and FNEG are the mask and the logic op.
14988 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
14989 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
14990 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
14991 "Wrong opcode for lowering FABS or FNEG.");
14993 bool IsFABS = (Op.getOpcode() == ISD::FABS);
14995 // If this is a FABS and it has an FNEG user, bail out to fold the combination
14996 // into an FNABS. We'll lower the FABS after that if it is still in use.
14998 for (SDNode *User : Op->uses())
14999 if (User->getOpcode() == ISD::FNEG)
15002 SDValue Op0 = Op.getOperand(0);
15003 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
15006 MVT VT = Op.getSimpleValueType();
15007 // Assume scalar op for initialization; update for vector if needed.
15008 // Note that there are no scalar bitwise logical SSE/AVX instructions, so we
15009 // generate a 16-byte vector constant and logic op even for the scalar case.
15010 // Using a 16-byte mask allows folding the load of the mask with
15011 // the logic op, so it can save (~4 bytes) on code size.
15013 unsigned NumElts = VT == MVT::f64 ? 2 : 4;
15014 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
15015 // decide if we should generate a 16-byte constant mask when we only need 4 or
15016 // 8 bytes for the scalar case.
15017 if (VT.isVector()) {
15018 EltVT = VT.getVectorElementType();
15019 NumElts = VT.getVectorNumElements();
15022 unsigned EltBits = EltVT.getSizeInBits();
15023 LLVMContext *Context = DAG.getContext();
15024 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
15026 IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignBit(EltBits);
15027 Constant *C = ConstantInt::get(*Context, MaskElt);
15028 C = ConstantVector::getSplat(NumElts, C);
15029 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15030 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy());
15031 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
15032 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
15033 MachinePointerInfo::getConstantPool(),
15034 false, false, false, Alignment);
15036 if (VT.isVector()) {
15037 // For a vector, cast operands to a vector type, perform the logic op,
15038 // and cast the result back to the original value type.
15039 MVT VecVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
15040 SDValue MaskCasted = DAG.getNode(ISD::BITCAST, dl, VecVT, Mask);
15041 SDValue Operand = IsFNABS ?
15042 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0.getOperand(0)) :
15043 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0);
15044 unsigned BitOp = IsFABS ? ISD::AND : IsFNABS ? ISD::OR : ISD::XOR;
15045 return DAG.getNode(ISD::BITCAST, dl, VT,
15046 DAG.getNode(BitOp, dl, VecVT, Operand, MaskCasted));
15049 // If not vector, then scalar.
15050 unsigned BitOp = IsFABS ? X86ISD::FAND : IsFNABS ? X86ISD::FOR : X86ISD::FXOR;
15051 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
15052 return DAG.getNode(BitOp, dl, VT, Operand, Mask);
15055 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
15056 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15057 LLVMContext *Context = DAG.getContext();
15058 SDValue Op0 = Op.getOperand(0);
15059 SDValue Op1 = Op.getOperand(1);
15061 MVT VT = Op.getSimpleValueType();
15062 MVT SrcVT = Op1.getSimpleValueType();
15064 // If second operand is smaller, extend it first.
15065 if (SrcVT.bitsLT(VT)) {
15066 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1);
15069 // And if it is bigger, shrink it first.
15070 if (SrcVT.bitsGT(VT)) {
15071 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1));
15075 // At this point the operands and the result should have the same
15076 // type, and that won't be f80 since that is not custom lowered.
15078 const fltSemantics &Sem =
15079 VT == MVT::f64 ? APFloat::IEEEdouble : APFloat::IEEEsingle;
15080 const unsigned SizeInBits = VT.getSizeInBits();
15082 SmallVector<Constant *, 4> CV(
15083 VT == MVT::f64 ? 2 : 4,
15084 ConstantFP::get(*Context, APFloat(Sem, APInt(SizeInBits, 0))));
15086 // First, clear all bits but the sign bit from the second operand (sign).
15087 CV[0] = ConstantFP::get(*Context,
15088 APFloat(Sem, APInt::getHighBitsSet(SizeInBits, 1)));
15089 Constant *C = ConstantVector::get(CV);
15090 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
15091 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
15092 MachinePointerInfo::getConstantPool(),
15093 false, false, false, 16);
15094 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1);
15096 // Next, clear the sign bit from the first operand (magnitude).
15097 // If it's a constant, we can clear it here.
15098 if (ConstantFPSDNode *Op0CN = dyn_cast<ConstantFPSDNode>(Op0)) {
15099 APFloat APF = Op0CN->getValueAPF();
15100 // If the magnitude is a positive zero, the sign bit alone is enough.
15101 if (APF.isPosZero())
15104 CV[0] = ConstantFP::get(*Context, APF);
15106 CV[0] = ConstantFP::get(
15108 APFloat(Sem, APInt::getLowBitsSet(SizeInBits, SizeInBits - 1)));
15110 C = ConstantVector::get(CV);
15111 CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
15112 SDValue Val = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
15113 MachinePointerInfo::getConstantPool(),
15114 false, false, false, 16);
15115 // If the magnitude operand wasn't a constant, we need to AND out the sign.
15116 if (!isa<ConstantFPSDNode>(Op0))
15117 Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Val);
15119 // OR the magnitude value with the sign bit.
15120 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit);
15123 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
15124 SDValue N0 = Op.getOperand(0);
15126 MVT VT = Op.getSimpleValueType();
15128 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
15129 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
15130 DAG.getConstant(1, VT));
15131 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT));
15134 // Check whether an OR'd tree is PTEST-able.
15135 static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
15136 SelectionDAG &DAG) {
15137 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
15139 if (!Subtarget->hasSSE41())
15142 if (!Op->hasOneUse())
15145 SDNode *N = Op.getNode();
15148 SmallVector<SDValue, 8> Opnds;
15149 DenseMap<SDValue, unsigned> VecInMap;
15150 SmallVector<SDValue, 8> VecIns;
15151 EVT VT = MVT::Other;
15153 // Recognize a special case where a vector is casted into wide integer to
15155 Opnds.push_back(N->getOperand(0));
15156 Opnds.push_back(N->getOperand(1));
15158 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
15159 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
15160 // BFS traverse all OR'd operands.
15161 if (I->getOpcode() == ISD::OR) {
15162 Opnds.push_back(I->getOperand(0));
15163 Opnds.push_back(I->getOperand(1));
15164 // Re-evaluate the number of nodes to be traversed.
15165 e += 2; // 2 more nodes (LHS and RHS) are pushed.
15169 // Quit if a non-EXTRACT_VECTOR_ELT
15170 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
15173 // Quit if without a constant index.
15174 SDValue Idx = I->getOperand(1);
15175 if (!isa<ConstantSDNode>(Idx))
15178 SDValue ExtractedFromVec = I->getOperand(0);
15179 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
15180 if (M == VecInMap.end()) {
15181 VT = ExtractedFromVec.getValueType();
15182 // Quit if not 128/256-bit vector.
15183 if (!VT.is128BitVector() && !VT.is256BitVector())
15185 // Quit if not the same type.
15186 if (VecInMap.begin() != VecInMap.end() &&
15187 VT != VecInMap.begin()->first.getValueType())
15189 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
15190 VecIns.push_back(ExtractedFromVec);
15192 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
15195 assert((VT.is128BitVector() || VT.is256BitVector()) &&
15196 "Not extracted from 128-/256-bit vector.");
15198 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
15200 for (DenseMap<SDValue, unsigned>::const_iterator
15201 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
15202 // Quit if not all elements are used.
15203 if (I->second != FullMask)
15207 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
15209 // Cast all vectors into TestVT for PTEST.
15210 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
15211 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]);
15213 // If more than one full vectors are evaluated, OR them first before PTEST.
15214 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
15215 // Each iteration will OR 2 nodes and append the result until there is only
15216 // 1 node left, i.e. the final OR'd value of all vectors.
15217 SDValue LHS = VecIns[Slot];
15218 SDValue RHS = VecIns[Slot + 1];
15219 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
15222 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
15223 VecIns.back(), VecIns.back());
15226 /// \brief return true if \c Op has a use that doesn't just read flags.
15227 static bool hasNonFlagsUse(SDValue Op) {
15228 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
15230 SDNode *User = *UI;
15231 unsigned UOpNo = UI.getOperandNo();
15232 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
15233 // Look pass truncate.
15234 UOpNo = User->use_begin().getOperandNo();
15235 User = *User->use_begin();
15238 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
15239 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
15245 /// Emit nodes that will be selected as "test Op0,Op0", or something
15247 SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
15248 SelectionDAG &DAG) const {
15249 if (Op.getValueType() == MVT::i1) {
15250 SDValue ExtOp = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Op);
15251 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, ExtOp,
15252 DAG.getConstant(0, MVT::i8));
15254 // CF and OF aren't always set the way we want. Determine which
15255 // of these we need.
15256 bool NeedCF = false;
15257 bool NeedOF = false;
15260 case X86::COND_A: case X86::COND_AE:
15261 case X86::COND_B: case X86::COND_BE:
15264 case X86::COND_G: case X86::COND_GE:
15265 case X86::COND_L: case X86::COND_LE:
15266 case X86::COND_O: case X86::COND_NO: {
15267 // Check if we really need to set the
15268 // Overflow flag. If NoSignedWrap is present
15269 // that is not actually needed.
15270 switch (Op->getOpcode()) {
15275 const BinaryWithFlagsSDNode *BinNode =
15276 cast<BinaryWithFlagsSDNode>(Op.getNode());
15277 if (BinNode->hasNoSignedWrap())
15287 // See if we can use the EFLAGS value from the operand instead of
15288 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
15289 // we prove that the arithmetic won't overflow, we can't use OF or CF.
15290 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
15291 // Emit a CMP with 0, which is the TEST pattern.
15292 //if (Op.getValueType() == MVT::i1)
15293 // return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op,
15294 // DAG.getConstant(0, MVT::i1));
15295 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15296 DAG.getConstant(0, Op.getValueType()));
15298 unsigned Opcode = 0;
15299 unsigned NumOperands = 0;
15301 // Truncate operations may prevent the merge of the SETCC instruction
15302 // and the arithmetic instruction before it. Attempt to truncate the operands
15303 // of the arithmetic instruction and use a reduced bit-width instruction.
15304 bool NeedTruncation = false;
15305 SDValue ArithOp = Op;
15306 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
15307 SDValue Arith = Op->getOperand(0);
15308 // Both the trunc and the arithmetic op need to have one user each.
15309 if (Arith->hasOneUse())
15310 switch (Arith.getOpcode()) {
15317 NeedTruncation = true;
15323 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
15324 // which may be the result of a CAST. We use the variable 'Op', which is the
15325 // non-casted variable when we check for possible users.
15326 switch (ArithOp.getOpcode()) {
15328 // Due to an isel shortcoming, be conservative if this add is likely to be
15329 // selected as part of a load-modify-store instruction. When the root node
15330 // in a match is a store, isel doesn't know how to remap non-chain non-flag
15331 // uses of other nodes in the match, such as the ADD in this case. This
15332 // leads to the ADD being left around and reselected, with the result being
15333 // two adds in the output. Alas, even if none our users are stores, that
15334 // doesn't prove we're O.K. Ergo, if we have any parents that aren't
15335 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
15336 // climbing the DAG back to the root, and it doesn't seem to be worth the
15338 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15339 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15340 if (UI->getOpcode() != ISD::CopyToReg &&
15341 UI->getOpcode() != ISD::SETCC &&
15342 UI->getOpcode() != ISD::STORE)
15345 if (ConstantSDNode *C =
15346 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
15347 // An add of one will be selected as an INC.
15348 if (C->getAPIntValue() == 1 && !Subtarget->slowIncDec()) {
15349 Opcode = X86ISD::INC;
15354 // An add of negative one (subtract of one) will be selected as a DEC.
15355 if (C->getAPIntValue().isAllOnesValue() && !Subtarget->slowIncDec()) {
15356 Opcode = X86ISD::DEC;
15362 // Otherwise use a regular EFLAGS-setting add.
15363 Opcode = X86ISD::ADD;
15368 // If we have a constant logical shift that's only used in a comparison
15369 // against zero turn it into an equivalent AND. This allows turning it into
15370 // a TEST instruction later.
15371 if ((X86CC == X86::COND_E || X86CC == X86::COND_NE) && Op->hasOneUse() &&
15372 isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
15373 EVT VT = Op.getValueType();
15374 unsigned BitWidth = VT.getSizeInBits();
15375 unsigned ShAmt = Op->getConstantOperandVal(1);
15376 if (ShAmt >= BitWidth) // Avoid undefined shifts.
15378 APInt Mask = ArithOp.getOpcode() == ISD::SRL
15379 ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
15380 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
15381 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
15383 SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
15384 DAG.getConstant(Mask, VT));
15385 DAG.ReplaceAllUsesWith(Op, New);
15391 // If the primary and result isn't used, don't bother using X86ISD::AND,
15392 // because a TEST instruction will be better.
15393 if (!hasNonFlagsUse(Op))
15399 // Due to the ISEL shortcoming noted above, be conservative if this op is
15400 // likely to be selected as part of a load-modify-store instruction.
15401 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15402 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15403 if (UI->getOpcode() == ISD::STORE)
15406 // Otherwise use a regular EFLAGS-setting instruction.
15407 switch (ArithOp.getOpcode()) {
15408 default: llvm_unreachable("unexpected operator!");
15409 case ISD::SUB: Opcode = X86ISD::SUB; break;
15410 case ISD::XOR: Opcode = X86ISD::XOR; break;
15411 case ISD::AND: Opcode = X86ISD::AND; break;
15413 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
15414 SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG);
15415 if (EFLAGS.getNode())
15418 Opcode = X86ISD::OR;
15432 return SDValue(Op.getNode(), 1);
15438 // If we found that truncation is beneficial, perform the truncation and
15440 if (NeedTruncation) {
15441 EVT VT = Op.getValueType();
15442 SDValue WideVal = Op->getOperand(0);
15443 EVT WideVT = WideVal.getValueType();
15444 unsigned ConvertedOp = 0;
15445 // Use a target machine opcode to prevent further DAGCombine
15446 // optimizations that may separate the arithmetic operations
15447 // from the setcc node.
15448 switch (WideVal.getOpcode()) {
15450 case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
15451 case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
15452 case ISD::AND: ConvertedOp = X86ISD::AND; break;
15453 case ISD::OR: ConvertedOp = X86ISD::OR; break;
15454 case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
15458 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15459 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
15460 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
15461 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
15462 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1);
15468 // Emit a CMP with 0, which is the TEST pattern.
15469 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15470 DAG.getConstant(0, Op.getValueType()));
15472 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
15473 SmallVector<SDValue, 4> Ops;
15474 for (unsigned i = 0; i != NumOperands; ++i)
15475 Ops.push_back(Op.getOperand(i));
15477 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
15478 DAG.ReplaceAllUsesWith(Op, New);
15479 return SDValue(New.getNode(), 1);
15482 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
15484 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
15485 SDLoc dl, SelectionDAG &DAG) const {
15486 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) {
15487 if (C->getAPIntValue() == 0)
15488 return EmitTest(Op0, X86CC, dl, DAG);
15490 if (Op0.getValueType() == MVT::i1)
15491 llvm_unreachable("Unexpected comparison operation for MVT::i1 operands");
15494 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
15495 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
15496 // Do the comparison at i32 if it's smaller, besides the Atom case.
15497 // This avoids subregister aliasing issues. Keep the smaller reference
15498 // if we're optimizing for size, however, as that'll allow better folding
15499 // of memory operations.
15500 if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
15501 !DAG.getMachineFunction().getFunction()->hasFnAttribute(
15502 Attribute::MinSize) &&
15503 !Subtarget->isAtom()) {
15504 unsigned ExtendOp =
15505 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
15506 Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
15507 Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
15509 // Use SUB instead of CMP to enable CSE between SUB and CMP.
15510 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
15511 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs,
15513 return SDValue(Sub.getNode(), 1);
15515 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
15518 /// Convert a comparison if required by the subtarget.
15519 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
15520 SelectionDAG &DAG) const {
15521 // If the subtarget does not support the FUCOMI instruction, floating-point
15522 // comparisons have to be converted.
15523 if (Subtarget->hasCMov() ||
15524 Cmp.getOpcode() != X86ISD::CMP ||
15525 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
15526 !Cmp.getOperand(1).getValueType().isFloatingPoint())
15529 // The instruction selector will select an FUCOM instruction instead of
15530 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
15531 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
15532 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
15534 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
15535 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
15536 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
15537 DAG.getConstant(8, MVT::i8));
15538 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
15539 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
15542 /// The minimum architected relative accuracy is 2^-12. We need one
15543 /// Newton-Raphson step to have a good float result (24 bits of precision).
15544 SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
15545 DAGCombinerInfo &DCI,
15546 unsigned &RefinementSteps,
15547 bool &UseOneConstNR) const {
15548 // FIXME: We should use instruction latency models to calculate the cost of
15549 // each potential sequence, but this is very hard to do reliably because
15550 // at least Intel's Core* chips have variable timing based on the number of
15551 // significant digits in the divisor and/or sqrt operand.
15552 if (!Subtarget->useSqrtEst())
15555 EVT VT = Op.getValueType();
15557 // SSE1 has rsqrtss and rsqrtps.
15558 // TODO: Add support for AVX512 (v16f32).
15559 // It is likely not profitable to do this for f64 because a double-precision
15560 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
15561 // instructions: convert to single, rsqrtss, convert back to double, refine
15562 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
15563 // along with FMA, this could be a throughput win.
15564 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15565 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15566 RefinementSteps = 1;
15567 UseOneConstNR = false;
15568 return DCI.DAG.getNode(X86ISD::FRSQRT, SDLoc(Op), VT, Op);
15573 /// The minimum architected relative accuracy is 2^-12. We need one
15574 /// Newton-Raphson step to have a good float result (24 bits of precision).
15575 SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
15576 DAGCombinerInfo &DCI,
15577 unsigned &RefinementSteps) const {
15578 // FIXME: We should use instruction latency models to calculate the cost of
15579 // each potential sequence, but this is very hard to do reliably because
15580 // at least Intel's Core* chips have variable timing based on the number of
15581 // significant digits in the divisor.
15582 if (!Subtarget->useReciprocalEst())
15585 EVT VT = Op.getValueType();
15587 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
15588 // TODO: Add support for AVX512 (v16f32).
15589 // It is likely not profitable to do this for f64 because a double-precision
15590 // reciprocal estimate with refinement on x86 prior to FMA requires
15591 // 15 instructions: convert to single, rcpss, convert back to double, refine
15592 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
15593 // along with FMA, this could be a throughput win.
15594 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15595 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15596 RefinementSteps = ReciprocalEstimateRefinementSteps;
15597 return DCI.DAG.getNode(X86ISD::FRCP, SDLoc(Op), VT, Op);
15602 static bool isAllOnes(SDValue V) {
15603 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
15604 return C && C->isAllOnesValue();
15607 /// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
15608 /// if it's possible.
15609 SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
15610 SDLoc dl, SelectionDAG &DAG) const {
15611 SDValue Op0 = And.getOperand(0);
15612 SDValue Op1 = And.getOperand(1);
15613 if (Op0.getOpcode() == ISD::TRUNCATE)
15614 Op0 = Op0.getOperand(0);
15615 if (Op1.getOpcode() == ISD::TRUNCATE)
15616 Op1 = Op1.getOperand(0);
15619 if (Op1.getOpcode() == ISD::SHL)
15620 std::swap(Op0, Op1);
15621 if (Op0.getOpcode() == ISD::SHL) {
15622 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0)))
15623 if (And00C->getZExtValue() == 1) {
15624 // If we looked past a truncate, check that it's only truncating away
15626 unsigned BitWidth = Op0.getValueSizeInBits();
15627 unsigned AndBitWidth = And.getValueSizeInBits();
15628 if (BitWidth > AndBitWidth) {
15630 DAG.computeKnownBits(Op0, Zeros, Ones);
15631 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
15635 RHS = Op0.getOperand(1);
15637 } else if (Op1.getOpcode() == ISD::Constant) {
15638 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
15639 uint64_t AndRHSVal = AndRHS->getZExtValue();
15640 SDValue AndLHS = Op0;
15642 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
15643 LHS = AndLHS.getOperand(0);
15644 RHS = AndLHS.getOperand(1);
15647 // Use BT if the immediate can't be encoded in a TEST instruction.
15648 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
15650 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType());
15654 if (LHS.getNode()) {
15655 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT
15656 // instruction. Since the shift amount is in-range-or-undefined, we know
15657 // that doing a bittest on the i32 value is ok. We extend to i32 because
15658 // the encoding for the i16 version is larger than the i32 version.
15659 // Also promote i16 to i32 for performance / code size reason.
15660 if (LHS.getValueType() == MVT::i8 ||
15661 LHS.getValueType() == MVT::i16)
15662 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
15664 // If the operand types disagree, extend the shift amount to match. Since
15665 // BT ignores high bits (like shifts) we can use anyextend.
15666 if (LHS.getValueType() != RHS.getValueType())
15667 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS);
15669 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
15670 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
15671 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15672 DAG.getConstant(Cond, MVT::i8), BT);
15678 /// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point
15680 static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
15685 // SSE Condition code mapping:
15694 switch (SetCCOpcode) {
15695 default: llvm_unreachable("Unexpected SETCC condition");
15697 case ISD::SETEQ: SSECC = 0; break;
15699 case ISD::SETGT: Swap = true; // Fallthrough
15701 case ISD::SETOLT: SSECC = 1; break;
15703 case ISD::SETGE: Swap = true; // Fallthrough
15705 case ISD::SETOLE: SSECC = 2; break;
15706 case ISD::SETUO: SSECC = 3; break;
15708 case ISD::SETNE: SSECC = 4; break;
15709 case ISD::SETULE: Swap = true; // Fallthrough
15710 case ISD::SETUGE: SSECC = 5; break;
15711 case ISD::SETULT: Swap = true; // Fallthrough
15712 case ISD::SETUGT: SSECC = 6; break;
15713 case ISD::SETO: SSECC = 7; break;
15715 case ISD::SETONE: SSECC = 8; break;
15718 std::swap(Op0, Op1);
15723 // Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128
15724 // ones, and then concatenate the result back.
15725 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
15726 MVT VT = Op.getSimpleValueType();
15728 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
15729 "Unsupported value type for operation");
15731 unsigned NumElems = VT.getVectorNumElements();
15733 SDValue CC = Op.getOperand(2);
15735 // Extract the LHS vectors
15736 SDValue LHS = Op.getOperand(0);
15737 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
15738 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
15740 // Extract the RHS vectors
15741 SDValue RHS = Op.getOperand(1);
15742 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
15743 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
15745 // Issue the operation on the smaller types and concatenate the result back
15746 MVT EltVT = VT.getVectorElementType();
15747 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
15748 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
15749 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
15750 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
15753 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
15754 const X86Subtarget *Subtarget) {
15755 SDValue Op0 = Op.getOperand(0);
15756 SDValue Op1 = Op.getOperand(1);
15757 SDValue CC = Op.getOperand(2);
15758 MVT VT = Op.getSimpleValueType();
15761 assert(Op0.getValueType().getVectorElementType().getSizeInBits() >= 8 &&
15762 Op.getValueType().getScalarType() == MVT::i1 &&
15763 "Cannot set masked compare for this operation");
15765 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15767 bool Unsigned = false;
15770 switch (SetCCOpcode) {
15771 default: llvm_unreachable("Unexpected SETCC condition");
15772 case ISD::SETNE: SSECC = 4; break;
15773 case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
15774 case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
15775 case ISD::SETLT: Swap = true; //fall-through
15776 case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
15777 case ISD::SETULT: SSECC = 1; Unsigned = true; break;
15778 case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
15779 case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
15780 case ISD::SETULE: Unsigned = true; //fall-through
15781 case ISD::SETLE: SSECC = 2; break;
15785 std::swap(Op0, Op1);
15787 return DAG.getNode(Opc, dl, VT, Op0, Op1);
15788 Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
15789 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15790 DAG.getConstant(SSECC, MVT::i8));
15793 /// \brief Try to turn a VSETULT into a VSETULE by modifying its second
15794 /// operand \p Op1. If non-trivial (for example because it's not constant)
15795 /// return an empty value.
15796 static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
15798 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
15802 MVT VT = Op1.getSimpleValueType();
15803 MVT EVT = VT.getVectorElementType();
15804 unsigned n = VT.getVectorNumElements();
15805 SmallVector<SDValue, 8> ULTOp1;
15807 for (unsigned i = 0; i < n; ++i) {
15808 ConstantSDNode *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
15809 if (!Elt || Elt->isOpaque() || Elt->getValueType(0) != EVT)
15812 // Avoid underflow.
15813 APInt Val = Elt->getAPIntValue();
15817 ULTOp1.push_back(DAG.getConstant(Val - 1, EVT));
15820 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
15823 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
15824 SelectionDAG &DAG) {
15825 SDValue Op0 = Op.getOperand(0);
15826 SDValue Op1 = Op.getOperand(1);
15827 SDValue CC = Op.getOperand(2);
15828 MVT VT = Op.getSimpleValueType();
15829 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15830 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
15835 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
15836 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
15839 unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
15840 unsigned Opc = X86ISD::CMPP;
15841 if (Subtarget->hasAVX512() && VT.getVectorElementType() == MVT::i1) {
15842 assert(VT.getVectorNumElements() <= 16);
15843 Opc = X86ISD::CMPM;
15845 // In the two special cases we can't handle, emit two comparisons.
15848 unsigned CombineOpc;
15849 if (SetCCOpcode == ISD::SETUEQ) {
15850 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR;
15852 assert(SetCCOpcode == ISD::SETONE);
15853 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND;
15856 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15857 DAG.getConstant(CC0, MVT::i8));
15858 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15859 DAG.getConstant(CC1, MVT::i8));
15860 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
15862 // Handle all other FP comparisons here.
15863 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15864 DAG.getConstant(SSECC, MVT::i8));
15867 // Break 256-bit integer vector compare into smaller ones.
15868 if (VT.is256BitVector() && !Subtarget->hasInt256())
15869 return Lower256IntVSETCC(Op, DAG);
15871 bool MaskResult = (VT.getVectorElementType() == MVT::i1);
15872 EVT OpVT = Op1.getValueType();
15873 if (Subtarget->hasAVX512()) {
15874 if (Op1.getValueType().is512BitVector() ||
15875 (Subtarget->hasBWI() && Subtarget->hasVLX()) ||
15876 (MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32))
15877 return LowerIntVSETCC_AVX512(Op, DAG, Subtarget);
15879 // In AVX-512 architecture setcc returns mask with i1 elements,
15880 // But there is no compare instruction for i8 and i16 elements in KNL.
15881 // We are not talking about 512-bit operands in this case, these
15882 // types are illegal.
15884 (OpVT.getVectorElementType().getSizeInBits() < 32 &&
15885 OpVT.getVectorElementType().getSizeInBits() >= 8))
15886 return DAG.getNode(ISD::TRUNCATE, dl, VT,
15887 DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
15890 // We are handling one of the integer comparisons here. Since SSE only has
15891 // GT and EQ comparisons for integer, swapping operands and multiple
15892 // operations may be required for some comparisons.
15894 bool Swap = false, Invert = false, FlipSigns = false, MinMax = false;
15895 bool Subus = false;
15897 switch (SetCCOpcode) {
15898 default: llvm_unreachable("Unexpected SETCC condition");
15899 case ISD::SETNE: Invert = true;
15900 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
15901 case ISD::SETLT: Swap = true;
15902 case ISD::SETGT: Opc = X86ISD::PCMPGT; break;
15903 case ISD::SETGE: Swap = true;
15904 case ISD::SETLE: Opc = X86ISD::PCMPGT;
15905 Invert = true; break;
15906 case ISD::SETULT: Swap = true;
15907 case ISD::SETUGT: Opc = X86ISD::PCMPGT;
15908 FlipSigns = true; break;
15909 case ISD::SETUGE: Swap = true;
15910 case ISD::SETULE: Opc = X86ISD::PCMPGT;
15911 FlipSigns = true; Invert = true; break;
15914 // Special case: Use min/max operations for SETULE/SETUGE
15915 MVT VET = VT.getVectorElementType();
15917 (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
15918 || (Subtarget->hasSSE2() && (VET == MVT::i8));
15921 switch (SetCCOpcode) {
15923 case ISD::SETULE: Opc = X86ISD::UMIN; MinMax = true; break;
15924 case ISD::SETUGE: Opc = X86ISD::UMAX; MinMax = true; break;
15927 if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
15930 bool hasSubus = Subtarget->hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
15931 if (!MinMax && hasSubus) {
15932 // As another special case, use PSUBUS[BW] when it's profitable. E.g. for
15934 // t = psubus Op0, Op1
15935 // pcmpeq t, <0..0>
15936 switch (SetCCOpcode) {
15938 case ISD::SETULT: {
15939 // If the comparison is against a constant we can turn this into a
15940 // setule. With psubus, setule does not require a swap. This is
15941 // beneficial because the constant in the register is no longer
15942 // destructed as the destination so it can be hoisted out of a loop.
15943 // Only do this pre-AVX since vpcmp* is no longer destructive.
15944 if (Subtarget->hasAVX())
15946 SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
15947 if (ULEOp1.getNode()) {
15949 Subus = true; Invert = false; Swap = false;
15953 // Psubus is better than flip-sign because it requires no inversion.
15954 case ISD::SETUGE: Subus = true; Invert = false; Swap = true; break;
15955 case ISD::SETULE: Subus = true; Invert = false; Swap = false; break;
15959 Opc = X86ISD::SUBUS;
15965 std::swap(Op0, Op1);
15967 // Check that the operation in question is available (most are plain SSE2,
15968 // but PCMPGTQ and PCMPEQQ have different requirements).
15969 if (VT == MVT::v2i64) {
15970 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) {
15971 assert(Subtarget->hasSSE2() && "Don't know how to lower!");
15973 // First cast everything to the right type.
15974 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15975 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15977 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15978 // bits of the inputs before performing those operations. The lower
15979 // compare is always unsigned.
15982 SB = DAG.getConstant(0x80000000U, MVT::v4i32);
15984 SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32);
15985 SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32);
15986 SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
15987 Sign, Zero, Sign, Zero);
15989 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
15990 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
15992 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
15993 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
15994 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
15996 // Create masks for only the low parts/high parts of the 64 bit integers.
15997 static const int MaskHi[] = { 1, 1, 3, 3 };
15998 static const int MaskLo[] = { 0, 0, 2, 2 };
15999 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
16000 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
16001 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
16003 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
16004 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
16007 Result = DAG.getNOT(dl, Result, MVT::v4i32);
16009 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
16012 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) {
16013 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
16014 // pcmpeqd + pshufd + pand.
16015 assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!");
16017 // First cast everything to the right type.
16018 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
16019 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
16022 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
16024 // Make sure the lower and upper halves are both all-ones.
16025 static const int Mask[] = { 1, 0, 3, 2 };
16026 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
16027 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
16030 Result = DAG.getNOT(dl, Result, MVT::v4i32);
16032 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
16036 // Since SSE has no unsigned integer comparisons, we need to flip the sign
16037 // bits of the inputs before performing those operations.
16039 EVT EltVT = VT.getVectorElementType();
16040 SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT);
16041 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB);
16042 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB);
16045 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
16047 // If the logical-not of the result is required, perform that now.
16049 Result = DAG.getNOT(dl, Result, VT);
16052 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
16055 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
16056 getZeroVector(VT, Subtarget, DAG, dl));
16061 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
16063 MVT VT = Op.getSimpleValueType();
16065 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
16067 assert(((!Subtarget->hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
16068 && "SetCC type must be 8-bit or 1-bit integer");
16069 SDValue Op0 = Op.getOperand(0);
16070 SDValue Op1 = Op.getOperand(1);
16072 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
16074 // Optimize to BT if possible.
16075 // Lower (X & (1 << N)) == 0 to BT(X, N).
16076 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
16077 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
16078 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() &&
16079 Op1.getOpcode() == ISD::Constant &&
16080 cast<ConstantSDNode>(Op1)->isNullValue() &&
16081 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16082 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG);
16083 if (NewSetCC.getNode()) {
16085 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
16090 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
16092 if (Op1.getOpcode() == ISD::Constant &&
16093 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
16094 cast<ConstantSDNode>(Op1)->isNullValue()) &&
16095 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16097 // If the input is a setcc, then reuse the input setcc or use a new one with
16098 // the inverted condition.
16099 if (Op0.getOpcode() == X86ISD::SETCC) {
16100 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
16101 bool Invert = (CC == ISD::SETNE) ^
16102 cast<ConstantSDNode>(Op1)->isNullValue();
16106 CCode = X86::GetOppositeBranchCondition(CCode);
16107 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16108 DAG.getConstant(CCode, MVT::i8),
16109 Op0.getOperand(1));
16111 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
16115 if ((Op0.getValueType() == MVT::i1) && (Op1.getOpcode() == ISD::Constant) &&
16116 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1) &&
16117 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16119 ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
16120 return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, MVT::i1), NewCC);
16123 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
16124 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
16125 if (X86CC == X86::COND_INVALID)
16128 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
16129 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
16130 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16131 DAG.getConstant(X86CC, MVT::i8), EFLAGS);
16133 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
16137 // isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
16138 static bool isX86LogicalCmp(SDValue Op) {
16139 unsigned Opc = Op.getNode()->getOpcode();
16140 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
16141 Opc == X86ISD::SAHF)
16143 if (Op.getResNo() == 1 &&
16144 (Opc == X86ISD::ADD ||
16145 Opc == X86ISD::SUB ||
16146 Opc == X86ISD::ADC ||
16147 Opc == X86ISD::SBB ||
16148 Opc == X86ISD::SMUL ||
16149 Opc == X86ISD::UMUL ||
16150 Opc == X86ISD::INC ||
16151 Opc == X86ISD::DEC ||
16152 Opc == X86ISD::OR ||
16153 Opc == X86ISD::XOR ||
16154 Opc == X86ISD::AND))
16157 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
16163 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
16164 if (V.getOpcode() != ISD::TRUNCATE)
16167 SDValue VOp0 = V.getOperand(0);
16168 unsigned InBits = VOp0.getValueSizeInBits();
16169 unsigned Bits = V.getValueSizeInBits();
16170 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
16173 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
16174 bool addTest = true;
16175 SDValue Cond = Op.getOperand(0);
16176 SDValue Op1 = Op.getOperand(1);
16177 SDValue Op2 = Op.getOperand(2);
16179 EVT VT = Op1.getValueType();
16182 // Lower fp selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
16183 // are available. Otherwise fp cmovs get lowered into a less efficient branch
16184 // sequence later on.
16185 if (Cond.getOpcode() == ISD::SETCC &&
16186 ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
16187 (Subtarget->hasSSE1() && VT == MVT::f32)) &&
16188 VT == Cond.getOperand(0).getValueType() && Cond->hasOneUse()) {
16189 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
16190 int SSECC = translateX86FSETCC(
16191 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
16194 if (Subtarget->hasAVX512()) {
16195 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
16196 DAG.getConstant(SSECC, MVT::i8));
16197 return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
16199 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
16200 DAG.getConstant(SSECC, MVT::i8));
16201 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
16202 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
16203 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
16207 if (Cond.getOpcode() == ISD::SETCC) {
16208 SDValue NewCond = LowerSETCC(Cond, DAG);
16209 if (NewCond.getNode())
16213 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
16214 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
16215 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
16216 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
16217 if (Cond.getOpcode() == X86ISD::SETCC &&
16218 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
16219 isZero(Cond.getOperand(1).getOperand(1))) {
16220 SDValue Cmp = Cond.getOperand(1);
16222 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
16224 if ((isAllOnes(Op1) || isAllOnes(Op2)) &&
16225 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
16226 SDValue Y = isAllOnes(Op2) ? Op1 : Op2;
16228 SDValue CmpOp0 = Cmp.getOperand(0);
16229 // Apply further optimizations for special cases
16230 // (select (x != 0), -1, 0) -> neg & sbb
16231 // (select (x == 0), 0, -1) -> neg & sbb
16232 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y))
16233 if (YC->isNullValue() &&
16234 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) {
16235 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
16236 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs,
16237 DAG.getConstant(0, CmpOp0.getValueType()),
16239 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16240 DAG.getConstant(X86::COND_B, MVT::i8),
16241 SDValue(Neg.getNode(), 1));
16245 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
16246 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
16247 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16249 SDValue Res = // Res = 0 or -1.
16250 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16251 DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
16253 if (isAllOnes(Op1) != (CondCode == X86::COND_E))
16254 Res = DAG.getNOT(DL, Res, Res.getValueType());
16256 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2);
16257 if (!N2C || !N2C->isNullValue())
16258 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
16263 // Look past (and (setcc_carry (cmp ...)), 1).
16264 if (Cond.getOpcode() == ISD::AND &&
16265 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16266 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16267 if (C && C->getAPIntValue() == 1)
16268 Cond = Cond.getOperand(0);
16271 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16272 // setting operand in place of the X86ISD::SETCC.
16273 unsigned CondOpcode = Cond.getOpcode();
16274 if (CondOpcode == X86ISD::SETCC ||
16275 CondOpcode == X86ISD::SETCC_CARRY) {
16276 CC = Cond.getOperand(0);
16278 SDValue Cmp = Cond.getOperand(1);
16279 unsigned Opc = Cmp.getOpcode();
16280 MVT VT = Op.getSimpleValueType();
16282 bool IllegalFPCMov = false;
16283 if (VT.isFloatingPoint() && !VT.isVector() &&
16284 !isScalarFPTypeInSSEReg(VT)) // FPStack?
16285 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
16287 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
16288 Opc == X86ISD::BT) { // FIXME
16292 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16293 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16294 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16295 Cond.getOperand(0).getValueType() != MVT::i8)) {
16296 SDValue LHS = Cond.getOperand(0);
16297 SDValue RHS = Cond.getOperand(1);
16298 unsigned X86Opcode;
16301 switch (CondOpcode) {
16302 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16303 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16304 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16305 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16306 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16307 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16308 default: llvm_unreachable("unexpected overflowing operator");
16310 if (CondOpcode == ISD::UMULO)
16311 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16314 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16316 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
16318 if (CondOpcode == ISD::UMULO)
16319 Cond = X86Op.getValue(2);
16321 Cond = X86Op.getValue(1);
16323 CC = DAG.getConstant(X86Cond, MVT::i8);
16328 // Look pass the truncate if the high bits are known zero.
16329 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16330 Cond = Cond.getOperand(0);
16332 // We know the result of AND is compared against zero. Try to match
16334 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16335 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG);
16336 if (NewSetCC.getNode()) {
16337 CC = NewSetCC.getOperand(0);
16338 Cond = NewSetCC.getOperand(1);
16345 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16346 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
16349 // a < b ? -1 : 0 -> RES = ~setcc_carry
16350 // a < b ? 0 : -1 -> RES = setcc_carry
16351 // a >= b ? -1 : 0 -> RES = setcc_carry
16352 // a >= b ? 0 : -1 -> RES = ~setcc_carry
16353 if (Cond.getOpcode() == X86ISD::SUB) {
16354 Cond = ConvertCmpIfNecessary(Cond, DAG);
16355 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
16357 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
16358 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) {
16359 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16360 DAG.getConstant(X86::COND_B, MVT::i8), Cond);
16361 if (isAllOnes(Op1) != (CondCode == X86::COND_B))
16362 return DAG.getNOT(DL, Res, Res.getValueType());
16367 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
16368 // widen the cmov and push the truncate through. This avoids introducing a new
16369 // branch during isel and doesn't add any extensions.
16370 if (Op.getValueType() == MVT::i8 &&
16371 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
16372 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
16373 if (T1.getValueType() == T2.getValueType() &&
16374 // Blacklist CopyFromReg to avoid partial register stalls.
16375 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
16376 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
16377 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond);
16378 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
16382 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
16383 // condition is true.
16384 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
16385 SDValue Ops[] = { Op2, Op1, CC, Cond };
16386 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops);
16389 static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, const X86Subtarget *Subtarget,
16390 SelectionDAG &DAG) {
16391 MVT VT = Op->getSimpleValueType(0);
16392 SDValue In = Op->getOperand(0);
16393 MVT InVT = In.getSimpleValueType();
16394 MVT VTElt = VT.getVectorElementType();
16395 MVT InVTElt = InVT.getVectorElementType();
16399 if ((InVTElt == MVT::i1) &&
16400 (((Subtarget->hasBWI() && Subtarget->hasVLX() &&
16401 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() <= 16)) ||
16403 ((Subtarget->hasBWI() && VT.is512BitVector() &&
16404 VTElt.getSizeInBits() <= 16)) ||
16406 ((Subtarget->hasDQI() && Subtarget->hasVLX() &&
16407 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) ||
16409 ((Subtarget->hasDQI() && VT.is512BitVector() &&
16410 VTElt.getSizeInBits() >= 32))))
16411 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16413 unsigned int NumElts = VT.getVectorNumElements();
16415 if (NumElts != 8 && NumElts != 16)
16418 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) {
16419 if (In.getOpcode() == X86ISD::VSEXT || In.getOpcode() == X86ISD::VZEXT)
16420 return DAG.getNode(In.getOpcode(), dl, VT, In.getOperand(0));
16421 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16424 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16425 assert (InVT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
16427 MVT ExtVT = (NumElts == 8) ? MVT::v8i64 : MVT::v16i32;
16428 Constant *C = ConstantInt::get(*DAG.getContext(),
16429 APInt::getAllOnesValue(ExtVT.getScalarType().getSizeInBits()));
16431 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
16432 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
16433 SDValue Ld = DAG.getLoad(ExtVT.getScalarType(), dl, DAG.getEntryNode(), CP,
16434 MachinePointerInfo::getConstantPool(),
16435 false, false, false, Alignment);
16436 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, dl, ExtVT, In, Ld);
16437 if (VT.is512BitVector())
16439 return DAG.getNode(X86ISD::VTRUNC, dl, VT, Brcst);
16442 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
16443 SelectionDAG &DAG) {
16444 MVT VT = Op->getSimpleValueType(0);
16445 SDValue In = Op->getOperand(0);
16446 MVT InVT = In.getSimpleValueType();
16449 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
16450 return LowerSIGN_EXTEND_AVX512(Op, Subtarget, DAG);
16452 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) &&
16453 (VT != MVT::v8i32 || InVT != MVT::v8i16) &&
16454 (VT != MVT::v16i16 || InVT != MVT::v16i8))
16457 if (Subtarget->hasInt256())
16458 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16460 // Optimize vectors in AVX mode
16461 // Sign extend v8i16 to v8i32 and
16464 // Divide input vector into two parts
16465 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
16466 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
16467 // concat the vectors to original VT
16469 unsigned NumElems = InVT.getVectorNumElements();
16470 SDValue Undef = DAG.getUNDEF(InVT);
16472 SmallVector<int,8> ShufMask1(NumElems, -1);
16473 for (unsigned i = 0; i != NumElems/2; ++i)
16476 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]);
16478 SmallVector<int,8> ShufMask2(NumElems, -1);
16479 for (unsigned i = 0; i != NumElems/2; ++i)
16480 ShufMask2[i] = i + NumElems/2;
16482 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]);
16484 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(),
16485 VT.getVectorNumElements()/2);
16487 OpLo = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpLo);
16488 OpHi = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpHi);
16490 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
16493 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
16494 // may emit an illegal shuffle but the expansion is still better than scalar
16495 // code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
16496 // we'll emit a shuffle and a arithmetic shift.
16497 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
16498 // TODO: It is possible to support ZExt by zeroing the undef values during
16499 // the shuffle phase or after the shuffle.
16500 static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
16501 SelectionDAG &DAG) {
16502 MVT RegVT = Op.getSimpleValueType();
16503 assert(RegVT.isVector() && "We only custom lower vector sext loads.");
16504 assert(RegVT.isInteger() &&
16505 "We only custom lower integer vector sext loads.");
16507 // Nothing useful we can do without SSE2 shuffles.
16508 assert(Subtarget->hasSSE2() && "We only custom lower sext loads with SSE2.");
16510 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
16512 EVT MemVT = Ld->getMemoryVT();
16513 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16514 unsigned RegSz = RegVT.getSizeInBits();
16516 ISD::LoadExtType Ext = Ld->getExtensionType();
16518 assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
16519 && "Only anyext and sext are currently implemented.");
16520 assert(MemVT != RegVT && "Cannot extend to the same type");
16521 assert(MemVT.isVector() && "Must load a vector from memory");
16523 unsigned NumElems = RegVT.getVectorNumElements();
16524 unsigned MemSz = MemVT.getSizeInBits();
16525 assert(RegSz > MemSz && "Register size must be greater than the mem size");
16527 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) {
16528 // The only way in which we have a legal 256-bit vector result but not the
16529 // integer 256-bit operations needed to directly lower a sextload is if we
16530 // have AVX1 but not AVX2. In that case, we can always emit a sextload to
16531 // a 128-bit vector and a normal sign_extend to 256-bits that should get
16532 // correctly legalized. We do this late to allow the canonical form of
16533 // sextload to persist throughout the rest of the DAG combiner -- it wants
16534 // to fold together any extensions it can, and so will fuse a sign_extend
16535 // of an sextload into a sextload targeting a wider value.
16537 if (MemSz == 128) {
16538 // Just switch this to a normal load.
16539 assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
16540 "it must be a legal 128-bit vector "
16542 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
16543 Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(),
16544 Ld->isInvariant(), Ld->getAlignment());
16546 assert(MemSz < 128 &&
16547 "Can't extend a type wider than 128 bits to a 256 bit vector!");
16548 // Do an sext load to a 128-bit vector type. We want to use the same
16549 // number of elements, but elements half as wide. This will end up being
16550 // recursively lowered by this routine, but will succeed as we definitely
16551 // have all the necessary features if we're using AVX1.
16553 EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
16554 EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
16556 DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
16557 Ld->getPointerInfo(), MemVT, Ld->isVolatile(),
16558 Ld->isNonTemporal(), Ld->isInvariant(),
16559 Ld->getAlignment());
16562 // Replace chain users with the new chain.
16563 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
16564 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
16566 // Finally, do a normal sign-extend to the desired register.
16567 return DAG.getSExtOrTrunc(Load, dl, RegVT);
16570 // All sizes must be a power of two.
16571 assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
16572 "Non-power-of-two elements are not custom lowered!");
16574 // Attempt to load the original value using scalar loads.
16575 // Find the largest scalar type that divides the total loaded size.
16576 MVT SclrLoadTy = MVT::i8;
16577 for (MVT Tp : MVT::integer_valuetypes()) {
16578 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
16583 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
16584 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
16586 SclrLoadTy = MVT::f64;
16588 // Calculate the number of scalar loads that we need to perform
16589 // in order to load our vector from memory.
16590 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
16592 assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
16593 "Can only lower sext loads with a single scalar load!");
16595 unsigned loadRegZize = RegSz;
16596 if (Ext == ISD::SEXTLOAD && RegSz == 256)
16599 // Represent our vector as a sequence of elements which are the
16600 // largest scalar that we can load.
16601 EVT LoadUnitVecVT = EVT::getVectorVT(
16602 *DAG.getContext(), SclrLoadTy, loadRegZize / SclrLoadTy.getSizeInBits());
16604 // Represent the data using the same element type that is stored in
16605 // memory. In practice, we ''widen'' MemVT.
16607 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
16608 loadRegZize / MemVT.getScalarType().getSizeInBits());
16610 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
16611 "Invalid vector type");
16613 // We can't shuffle using an illegal type.
16614 assert(TLI.isTypeLegal(WideVecVT) &&
16615 "We only lower types that form legal widened vector types");
16617 SmallVector<SDValue, 8> Chains;
16618 SDValue Ptr = Ld->getBasePtr();
16619 SDValue Increment =
16620 DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, TLI.getPointerTy());
16621 SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
16623 for (unsigned i = 0; i < NumLoads; ++i) {
16624 // Perform a single load.
16625 SDValue ScalarLoad =
16626 DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
16627 Ld->isVolatile(), Ld->isNonTemporal(), Ld->isInvariant(),
16628 Ld->getAlignment());
16629 Chains.push_back(ScalarLoad.getValue(1));
16630 // Create the first element type using SCALAR_TO_VECTOR in order to avoid
16631 // another round of DAGCombining.
16633 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
16635 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
16636 ScalarLoad, DAG.getIntPtrConstant(i));
16638 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
16641 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
16643 // Bitcast the loaded value to a vector of the original element type, in
16644 // the size of the target vector type.
16645 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res);
16646 unsigned SizeRatio = RegSz / MemSz;
16648 if (Ext == ISD::SEXTLOAD) {
16649 // If we have SSE4.1, we can directly emit a VSEXT node.
16650 if (Subtarget->hasSSE41()) {
16651 SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec);
16652 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16656 // Otherwise we'll shuffle the small elements in the high bits of the
16657 // larger type and perform an arithmetic shift. If the shift is not legal
16658 // it's better to scalarize.
16659 assert(TLI.isOperationLegalOrCustom(ISD::SRA, RegVT) &&
16660 "We can't implement a sext load without an arithmetic right shift!");
16662 // Redistribute the loaded elements into the different locations.
16663 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16664 for (unsigned i = 0; i != NumElems; ++i)
16665 ShuffleVec[i * SizeRatio + SizeRatio - 1] = i;
16667 SDValue Shuff = DAG.getVectorShuffle(
16668 WideVecVT, dl, SlicedVec, DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16670 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16672 // Build the arithmetic shift.
16673 unsigned Amt = RegVT.getVectorElementType().getSizeInBits() -
16674 MemVT.getVectorElementType().getSizeInBits();
16676 DAG.getNode(ISD::SRA, dl, RegVT, Shuff, DAG.getConstant(Amt, RegVT));
16678 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16682 // Redistribute the loaded elements into the different locations.
16683 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16684 for (unsigned i = 0; i != NumElems; ++i)
16685 ShuffleVec[i * SizeRatio] = i;
16687 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
16688 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16690 // Bitcast to the requested type.
16691 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16692 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16696 // isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
16697 // ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart
16698 // from the AND / OR.
16699 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
16700 Opc = Op.getOpcode();
16701 if (Opc != ISD::OR && Opc != ISD::AND)
16703 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16704 Op.getOperand(0).hasOneUse() &&
16705 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
16706 Op.getOperand(1).hasOneUse());
16709 // isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and
16710 // 1 and that the SETCC node has a single use.
16711 static bool isXor1OfSetCC(SDValue Op) {
16712 if (Op.getOpcode() != ISD::XOR)
16714 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
16715 if (N1C && N1C->getAPIntValue() == 1) {
16716 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16717 Op.getOperand(0).hasOneUse();
16722 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
16723 bool addTest = true;
16724 SDValue Chain = Op.getOperand(0);
16725 SDValue Cond = Op.getOperand(1);
16726 SDValue Dest = Op.getOperand(2);
16729 bool Inverted = false;
16731 if (Cond.getOpcode() == ISD::SETCC) {
16732 // Check for setcc([su]{add,sub,mul}o == 0).
16733 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
16734 isa<ConstantSDNode>(Cond.getOperand(1)) &&
16735 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() &&
16736 Cond.getOperand(0).getResNo() == 1 &&
16737 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
16738 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
16739 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
16740 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
16741 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
16742 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
16744 Cond = Cond.getOperand(0);
16746 SDValue NewCond = LowerSETCC(Cond, DAG);
16747 if (NewCond.getNode())
16752 // FIXME: LowerXALUO doesn't handle these!!
16753 else if (Cond.getOpcode() == X86ISD::ADD ||
16754 Cond.getOpcode() == X86ISD::SUB ||
16755 Cond.getOpcode() == X86ISD::SMUL ||
16756 Cond.getOpcode() == X86ISD::UMUL)
16757 Cond = LowerXALUO(Cond, DAG);
16760 // Look pass (and (setcc_carry (cmp ...)), 1).
16761 if (Cond.getOpcode() == ISD::AND &&
16762 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16763 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16764 if (C && C->getAPIntValue() == 1)
16765 Cond = Cond.getOperand(0);
16768 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16769 // setting operand in place of the X86ISD::SETCC.
16770 unsigned CondOpcode = Cond.getOpcode();
16771 if (CondOpcode == X86ISD::SETCC ||
16772 CondOpcode == X86ISD::SETCC_CARRY) {
16773 CC = Cond.getOperand(0);
16775 SDValue Cmp = Cond.getOperand(1);
16776 unsigned Opc = Cmp.getOpcode();
16777 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
16778 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
16782 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
16786 // These can only come from an arithmetic instruction with overflow,
16787 // e.g. SADDO, UADDO.
16788 Cond = Cond.getNode()->getOperand(1);
16794 CondOpcode = Cond.getOpcode();
16795 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16796 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16797 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16798 Cond.getOperand(0).getValueType() != MVT::i8)) {
16799 SDValue LHS = Cond.getOperand(0);
16800 SDValue RHS = Cond.getOperand(1);
16801 unsigned X86Opcode;
16804 // Keep this in sync with LowerXALUO, otherwise we might create redundant
16805 // instructions that can't be removed afterwards (i.e. X86ISD::ADD and
16807 switch (CondOpcode) {
16808 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16810 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16812 X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
16815 X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16816 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16818 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16820 X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
16823 X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16824 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16825 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16826 default: llvm_unreachable("unexpected overflowing operator");
16829 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
16830 if (CondOpcode == ISD::UMULO)
16831 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16834 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16836 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
16838 if (CondOpcode == ISD::UMULO)
16839 Cond = X86Op.getValue(2);
16841 Cond = X86Op.getValue(1);
16843 CC = DAG.getConstant(X86Cond, MVT::i8);
16847 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
16848 SDValue Cmp = Cond.getOperand(0).getOperand(1);
16849 if (CondOpc == ISD::OR) {
16850 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
16851 // two branches instead of an explicit OR instruction with a
16853 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16854 isX86LogicalCmp(Cmp)) {
16855 CC = Cond.getOperand(0).getOperand(0);
16856 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16857 Chain, Dest, CC, Cmp);
16858 CC = Cond.getOperand(1).getOperand(0);
16862 } else { // ISD::AND
16863 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
16864 // two branches instead of an explicit AND instruction with a
16865 // separate test. However, we only do this if this block doesn't
16866 // have a fall-through edge, because this requires an explicit
16867 // jmp when the condition is false.
16868 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16869 isX86LogicalCmp(Cmp) &&
16870 Op.getNode()->hasOneUse()) {
16871 X86::CondCode CCode =
16872 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16873 CCode = X86::GetOppositeBranchCondition(CCode);
16874 CC = DAG.getConstant(CCode, MVT::i8);
16875 SDNode *User = *Op.getNode()->use_begin();
16876 // Look for an unconditional branch following this conditional branch.
16877 // We need this because we need to reverse the successors in order
16878 // to implement FCMP_OEQ.
16879 if (User->getOpcode() == ISD::BR) {
16880 SDValue FalseBB = User->getOperand(1);
16882 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16883 assert(NewBR == User);
16887 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16888 Chain, Dest, CC, Cmp);
16889 X86::CondCode CCode =
16890 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
16891 CCode = X86::GetOppositeBranchCondition(CCode);
16892 CC = DAG.getConstant(CCode, MVT::i8);
16898 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
16899 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
16900 // It should be transformed during dag combiner except when the condition
16901 // is set by a arithmetics with overflow node.
16902 X86::CondCode CCode =
16903 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16904 CCode = X86::GetOppositeBranchCondition(CCode);
16905 CC = DAG.getConstant(CCode, MVT::i8);
16906 Cond = Cond.getOperand(0).getOperand(1);
16908 } else if (Cond.getOpcode() == ISD::SETCC &&
16909 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
16910 // For FCMP_OEQ, we can emit
16911 // two branches instead of an explicit AND instruction with a
16912 // separate test. However, we only do this if this block doesn't
16913 // have a fall-through edge, because this requires an explicit
16914 // jmp when the condition is false.
16915 if (Op.getNode()->hasOneUse()) {
16916 SDNode *User = *Op.getNode()->use_begin();
16917 // Look for an unconditional branch following this conditional branch.
16918 // We need this because we need to reverse the successors in order
16919 // to implement FCMP_OEQ.
16920 if (User->getOpcode() == ISD::BR) {
16921 SDValue FalseBB = User->getOperand(1);
16923 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16924 assert(NewBR == User);
16928 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16929 Cond.getOperand(0), Cond.getOperand(1));
16930 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16931 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16932 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16933 Chain, Dest, CC, Cmp);
16934 CC = DAG.getConstant(X86::COND_P, MVT::i8);
16939 } else if (Cond.getOpcode() == ISD::SETCC &&
16940 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
16941 // For FCMP_UNE, we can emit
16942 // two branches instead of an explicit AND instruction with a
16943 // separate test. However, we only do this if this block doesn't
16944 // have a fall-through edge, because this requires an explicit
16945 // jmp when the condition is false.
16946 if (Op.getNode()->hasOneUse()) {
16947 SDNode *User = *Op.getNode()->use_begin();
16948 // Look for an unconditional branch following this conditional branch.
16949 // We need this because we need to reverse the successors in order
16950 // to implement FCMP_UNE.
16951 if (User->getOpcode() == ISD::BR) {
16952 SDValue FalseBB = User->getOperand(1);
16954 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16955 assert(NewBR == User);
16958 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16959 Cond.getOperand(0), Cond.getOperand(1));
16960 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16961 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16962 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16963 Chain, Dest, CC, Cmp);
16964 CC = DAG.getConstant(X86::COND_NP, MVT::i8);
16974 // Look pass the truncate if the high bits are known zero.
16975 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16976 Cond = Cond.getOperand(0);
16978 // We know the result of AND is compared against zero. Try to match
16980 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16981 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG);
16982 if (NewSetCC.getNode()) {
16983 CC = NewSetCC.getOperand(0);
16984 Cond = NewSetCC.getOperand(1);
16991 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
16992 CC = DAG.getConstant(X86Cond, MVT::i8);
16993 Cond = EmitTest(Cond, X86Cond, dl, DAG);
16995 Cond = ConvertCmpIfNecessary(Cond, DAG);
16996 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16997 Chain, Dest, CC, Cond);
17000 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
17001 // Calls to _alloca are needed to probe the stack when allocating more than 4k
17002 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
17003 // that the guard pages used by the OS virtual memory manager are allocated in
17004 // correct sequence.
17006 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
17007 SelectionDAG &DAG) const {
17008 MachineFunction &MF = DAG.getMachineFunction();
17009 bool SplitStack = MF.shouldSplitStack();
17010 bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMachO()) ||
17015 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
17016 SDNode* Node = Op.getNode();
17018 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
17019 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
17020 " not tell us which reg is the stack pointer!");
17021 EVT VT = Node->getValueType(0);
17022 SDValue Tmp1 = SDValue(Node, 0);
17023 SDValue Tmp2 = SDValue(Node, 1);
17024 SDValue Tmp3 = Node->getOperand(2);
17025 SDValue Chain = Tmp1.getOperand(0);
17027 // Chain the dynamic stack allocation so that it doesn't modify the stack
17028 // pointer when other instructions are using the stack.
17029 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true),
17032 SDValue Size = Tmp2.getOperand(1);
17033 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
17034 Chain = SP.getValue(1);
17035 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
17036 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
17037 unsigned StackAlign = TFI.getStackAlignment();
17038 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
17039 if (Align > StackAlign)
17040 Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
17041 DAG.getConstant(-(uint64_t)Align, VT));
17042 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
17044 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
17045 DAG.getIntPtrConstant(0, true), SDValue(),
17048 SDValue Ops[2] = { Tmp1, Tmp2 };
17049 return DAG.getMergeValues(Ops, dl);
17053 SDValue Chain = Op.getOperand(0);
17054 SDValue Size = Op.getOperand(1);
17055 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
17056 EVT VT = Op.getNode()->getValueType(0);
17058 bool Is64Bit = Subtarget->is64Bit();
17059 EVT SPTy = getPointerTy();
17062 MachineRegisterInfo &MRI = MF.getRegInfo();
17065 // The 64 bit implementation of segmented stacks needs to clobber both r10
17066 // r11. This makes it impossible to use it along with nested parameters.
17067 const Function *F = MF.getFunction();
17069 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
17071 if (I->hasNestAttr())
17072 report_fatal_error("Cannot use segmented stacks with functions that "
17073 "have nested arguments.");
17076 const TargetRegisterClass *AddrRegClass =
17077 getRegClassFor(getPointerTy());
17078 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
17079 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
17080 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
17081 DAG.getRegister(Vreg, SPTy));
17082 SDValue Ops1[2] = { Value, Chain };
17083 return DAG.getMergeValues(Ops1, dl);
17086 const unsigned Reg = (Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX);
17088 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag);
17089 Flag = Chain.getValue(1);
17090 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
17092 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
17094 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17095 unsigned SPReg = RegInfo->getStackRegister();
17096 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
17097 Chain = SP.getValue(1);
17100 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
17101 DAG.getConstant(-(uint64_t)Align, VT));
17102 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
17105 SDValue Ops1[2] = { SP, Chain };
17106 return DAG.getMergeValues(Ops1, dl);
17110 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
17111 MachineFunction &MF = DAG.getMachineFunction();
17112 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
17114 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
17117 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) {
17118 // vastart just stores the address of the VarArgsFrameIndex slot into the
17119 // memory location argument.
17120 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
17122 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
17123 MachinePointerInfo(SV), false, false, 0);
17127 // gp_offset (0 - 6 * 8)
17128 // fp_offset (48 - 48 + 8 * 16)
17129 // overflow_arg_area (point to parameters coming in memory).
17131 SmallVector<SDValue, 8> MemOps;
17132 SDValue FIN = Op.getOperand(1);
17134 SDValue Store = DAG.getStore(Op.getOperand(0), DL,
17135 DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
17137 FIN, MachinePointerInfo(SV), false, false, 0);
17138 MemOps.push_back(Store);
17141 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17142 FIN, DAG.getIntPtrConstant(4));
17143 Store = DAG.getStore(Op.getOperand(0), DL,
17144 DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
17146 FIN, MachinePointerInfo(SV, 4), false, false, 0);
17147 MemOps.push_back(Store);
17149 // Store ptr to overflow_arg_area
17150 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17151 FIN, DAG.getIntPtrConstant(4));
17152 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
17154 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
17155 MachinePointerInfo(SV, 8),
17157 MemOps.push_back(Store);
17159 // Store ptr to reg_save_area.
17160 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17161 FIN, DAG.getIntPtrConstant(8));
17162 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
17164 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN,
17165 MachinePointerInfo(SV, 16), false, false, 0);
17166 MemOps.push_back(Store);
17167 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
17170 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
17171 assert(Subtarget->is64Bit() &&
17172 "LowerVAARG only handles 64-bit va_arg!");
17173 assert((Subtarget->isTargetLinux() ||
17174 Subtarget->isTargetDarwin()) &&
17175 "Unhandled target in LowerVAARG");
17176 assert(Op.getNode()->getNumOperands() == 4);
17177 SDValue Chain = Op.getOperand(0);
17178 SDValue SrcPtr = Op.getOperand(1);
17179 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
17180 unsigned Align = Op.getConstantOperandVal(3);
17183 EVT ArgVT = Op.getNode()->getValueType(0);
17184 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
17185 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy);
17188 // Decide which area this value should be read from.
17189 // TODO: Implement the AMD64 ABI in its entirety. This simple
17190 // selection mechanism works only for the basic types.
17191 if (ArgVT == MVT::f80) {
17192 llvm_unreachable("va_arg for f80 not yet implemented");
17193 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
17194 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
17195 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
17196 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
17198 llvm_unreachable("Unhandled argument type in LowerVAARG");
17201 if (ArgMode == 2) {
17202 // Sanity Check: Make sure using fp_offset makes sense.
17203 assert(!DAG.getTarget().Options.UseSoftFloat &&
17204 !(DAG.getMachineFunction().getFunction()->hasFnAttribute(
17205 Attribute::NoImplicitFloat)) &&
17206 Subtarget->hasSSE1());
17209 // Insert VAARG_64 node into the DAG
17210 // VAARG_64 returns two values: Variable Argument Address, Chain
17211 SmallVector<SDValue, 11> InstOps;
17212 InstOps.push_back(Chain);
17213 InstOps.push_back(SrcPtr);
17214 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32));
17215 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8));
17216 InstOps.push_back(DAG.getConstant(Align, MVT::i32));
17217 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other);
17218 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
17219 VTs, InstOps, MVT::i64,
17220 MachinePointerInfo(SV),
17222 /*Volatile=*/false,
17224 /*WriteMem=*/true);
17225 Chain = VAARG.getValue(1);
17227 // Load the next argument and return it
17228 return DAG.getLoad(ArgVT, dl,
17231 MachinePointerInfo(),
17232 false, false, false, 0);
17235 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
17236 SelectionDAG &DAG) {
17237 // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
17238 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
17239 SDValue Chain = Op.getOperand(0);
17240 SDValue DstPtr = Op.getOperand(1);
17241 SDValue SrcPtr = Op.getOperand(2);
17242 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
17243 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
17246 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
17247 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
17249 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
17252 // getTargetVShiftByConstNode - Handle vector element shifts where the shift
17253 // amount is a constant. Takes immediate version of shift as input.
17254 static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
17255 SDValue SrcOp, uint64_t ShiftAmt,
17256 SelectionDAG &DAG) {
17257 MVT ElementType = VT.getVectorElementType();
17259 // Fold this packed shift into its first operand if ShiftAmt is 0.
17263 // Check for ShiftAmt >= element width
17264 if (ShiftAmt >= ElementType.getSizeInBits()) {
17265 if (Opc == X86ISD::VSRAI)
17266 ShiftAmt = ElementType.getSizeInBits() - 1;
17268 return DAG.getConstant(0, VT);
17271 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
17272 && "Unknown target vector shift-by-constant node");
17274 // Fold this packed vector shift into a build vector if SrcOp is a
17275 // vector of Constants or UNDEFs, and SrcOp valuetype is the same as VT.
17276 if (VT == SrcOp.getSimpleValueType() &&
17277 ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
17278 SmallVector<SDValue, 8> Elts;
17279 unsigned NumElts = SrcOp->getNumOperands();
17280 ConstantSDNode *ND;
17283 default: llvm_unreachable(nullptr);
17284 case X86ISD::VSHLI:
17285 for (unsigned i=0; i!=NumElts; ++i) {
17286 SDValue CurrentOp = SrcOp->getOperand(i);
17287 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17288 Elts.push_back(CurrentOp);
17291 ND = cast<ConstantSDNode>(CurrentOp);
17292 const APInt &C = ND->getAPIntValue();
17293 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), ElementType));
17296 case X86ISD::VSRLI:
17297 for (unsigned i=0; i!=NumElts; ++i) {
17298 SDValue CurrentOp = SrcOp->getOperand(i);
17299 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17300 Elts.push_back(CurrentOp);
17303 ND = cast<ConstantSDNode>(CurrentOp);
17304 const APInt &C = ND->getAPIntValue();
17305 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), ElementType));
17308 case X86ISD::VSRAI:
17309 for (unsigned i=0; i!=NumElts; ++i) {
17310 SDValue CurrentOp = SrcOp->getOperand(i);
17311 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17312 Elts.push_back(CurrentOp);
17315 ND = cast<ConstantSDNode>(CurrentOp);
17316 const APInt &C = ND->getAPIntValue();
17317 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), ElementType));
17322 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
17325 return DAG.getNode(Opc, dl, VT, SrcOp, DAG.getConstant(ShiftAmt, MVT::i8));
17328 // getTargetVShiftNode - Handle vector element shifts where the shift amount
17329 // may or may not be a constant. Takes immediate version of shift as input.
17330 static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
17331 SDValue SrcOp, SDValue ShAmt,
17332 SelectionDAG &DAG) {
17333 MVT SVT = ShAmt.getSimpleValueType();
17334 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
17336 // Catch shift-by-constant.
17337 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
17338 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
17339 CShAmt->getZExtValue(), DAG);
17341 // Change opcode to non-immediate version
17343 default: llvm_unreachable("Unknown target vector shift node");
17344 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break;
17345 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break;
17346 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
17349 const X86Subtarget &Subtarget =
17350 static_cast<const X86Subtarget &>(DAG.getSubtarget());
17351 if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
17352 ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
17353 // Let the shuffle legalizer expand this shift amount node.
17354 SDValue Op0 = ShAmt.getOperand(0);
17355 Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0);
17356 ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, &Subtarget, DAG);
17358 // Need to build a vector containing shift amount.
17359 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
17360 SmallVector<SDValue, 4> ShOps;
17361 ShOps.push_back(ShAmt);
17362 if (SVT == MVT::i32) {
17363 ShOps.push_back(DAG.getConstant(0, SVT));
17364 ShOps.push_back(DAG.getUNDEF(SVT));
17366 ShOps.push_back(DAG.getUNDEF(SVT));
17368 MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
17369 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps);
17372 // The return type has to be a 128-bit type with the same element
17373 // type as the input type.
17374 MVT EltVT = VT.getVectorElementType();
17375 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
17377 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt);
17378 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
17381 /// \brief Return (and \p Op, \p Mask) for compare instructions or
17382 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
17383 /// necessary casting for \p Mask when lowering masking intrinsics.
17384 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
17385 SDValue PreservedSrc,
17386 const X86Subtarget *Subtarget,
17387 SelectionDAG &DAG) {
17388 EVT VT = Op.getValueType();
17389 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(),
17390 MVT::i1, VT.getVectorNumElements());
17391 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17392 Mask.getValueType().getSizeInBits());
17395 assert(MaskVT.isSimple() && "invalid mask type");
17397 if (isAllOnes(Mask))
17400 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
17401 // are extracted by EXTRACT_SUBVECTOR.
17402 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17403 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17404 DAG.getIntPtrConstant(0));
17406 switch (Op.getOpcode()) {
17408 case X86ISD::PCMPEQM:
17409 case X86ISD::PCMPGTM:
17411 case X86ISD::CMPMU:
17412 return DAG.getNode(ISD::AND, dl, VT, Op, VMask);
17414 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17415 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17416 return DAG.getNode(ISD::VSELECT, dl, VT, VMask, Op, PreservedSrc);
17419 /// \brief Creates an SDNode for a predicated scalar operation.
17420 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
17421 /// The mask is comming as MVT::i8 and it should be truncated
17422 /// to MVT::i1 while lowering masking intrinsics.
17423 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
17424 /// "X86select" instead of "vselect". We just can't create the "vselect" node for
17425 /// a scalar instruction.
17426 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
17427 SDValue PreservedSrc,
17428 const X86Subtarget *Subtarget,
17429 SelectionDAG &DAG) {
17430 if (isAllOnes(Mask))
17433 EVT VT = Op.getValueType();
17435 // The mask should be of type MVT::i1
17436 SDValue IMask = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Mask);
17438 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17439 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17440 return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc);
17443 static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17444 SelectionDAG &DAG) {
17446 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17447 EVT VT = Op.getValueType();
17448 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
17450 switch(IntrData->Type) {
17451 case INTR_TYPE_1OP:
17452 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
17453 case INTR_TYPE_2OP:
17454 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17456 case INTR_TYPE_3OP:
17457 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17458 Op.getOperand(2), Op.getOperand(3));
17459 case INTR_TYPE_1OP_MASK_RM: {
17460 SDValue Src = Op.getOperand(1);
17461 SDValue Src0 = Op.getOperand(2);
17462 SDValue Mask = Op.getOperand(3);
17463 SDValue RoundingMode = Op.getOperand(4);
17464 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
17466 Mask, Src0, Subtarget, DAG);
17468 case INTR_TYPE_SCALAR_MASK_RM: {
17469 SDValue Src1 = Op.getOperand(1);
17470 SDValue Src2 = Op.getOperand(2);
17471 SDValue Src0 = Op.getOperand(3);
17472 SDValue Mask = Op.getOperand(4);
17473 SDValue RoundingMode = Op.getOperand(5);
17474 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
17476 Mask, Src0, Subtarget, DAG);
17478 case INTR_TYPE_2OP_MASK: {
17479 SDValue Mask = Op.getOperand(4);
17480 SDValue PassThru = Op.getOperand(3);
17481 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17482 if (IntrWithRoundingModeOpcode != 0) {
17483 unsigned Round = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
17484 if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION) {
17485 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17486 dl, Op.getValueType(),
17487 Op.getOperand(1), Op.getOperand(2),
17488 Op.getOperand(3), Op.getOperand(5)),
17489 Mask, PassThru, Subtarget, DAG);
17492 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
17495 Mask, PassThru, Subtarget, DAG);
17497 case FMA_OP_MASK: {
17498 SDValue Src1 = Op.getOperand(1);
17499 SDValue Src2 = Op.getOperand(2);
17500 SDValue Src3 = Op.getOperand(3);
17501 SDValue Mask = Op.getOperand(4);
17502 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17503 if (IntrWithRoundingModeOpcode != 0) {
17504 SDValue Rnd = Op.getOperand(5);
17505 if (cast<ConstantSDNode>(Rnd)->getZExtValue() !=
17506 X86::STATIC_ROUNDING::CUR_DIRECTION)
17507 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17508 dl, Op.getValueType(),
17509 Src1, Src2, Src3, Rnd),
17510 Mask, Src1, Subtarget, DAG);
17512 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
17513 dl, Op.getValueType(),
17515 Mask, Src1, Subtarget, DAG);
17518 case CMP_MASK_CC: {
17519 // Comparison intrinsics with masks.
17520 // Example of transformation:
17521 // (i8 (int_x86_avx512_mask_pcmpeq_q_128
17522 // (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
17524 // (v8i1 (insert_subvector undef,
17525 // (v2i1 (and (PCMPEQM %a, %b),
17526 // (extract_subvector
17527 // (v8i1 (bitcast %mask)), 0))), 0))))
17528 EVT VT = Op.getOperand(1).getValueType();
17529 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17530 VT.getVectorNumElements());
17531 SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3);
17532 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17533 Mask.getValueType().getSizeInBits());
17535 if (IntrData->Type == CMP_MASK_CC) {
17536 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17537 Op.getOperand(2), Op.getOperand(3));
17539 assert(IntrData->Type == CMP_MASK && "Unexpected intrinsic type!");
17540 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17543 SDValue CmpMask = getVectorMaskingNode(Cmp, Mask,
17544 DAG.getTargetConstant(0, MaskVT),
17546 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
17547 DAG.getUNDEF(BitcastVT), CmpMask,
17548 DAG.getIntPtrConstant(0));
17549 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
17551 case COMI: { // Comparison intrinsics
17552 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
17553 SDValue LHS = Op.getOperand(1);
17554 SDValue RHS = Op.getOperand(2);
17555 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG);
17556 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!");
17557 SDValue Cond = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
17558 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17559 DAG.getConstant(X86CC, MVT::i8), Cond);
17560 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17563 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
17564 Op.getOperand(1), Op.getOperand(2), DAG);
17566 return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl,
17567 Op.getSimpleValueType(),
17569 Op.getOperand(2), DAG),
17570 Op.getOperand(4), Op.getOperand(3), Subtarget,
17572 case COMPRESS_EXPAND_IN_REG: {
17573 SDValue Mask = Op.getOperand(3);
17574 SDValue DataToCompress = Op.getOperand(1);
17575 SDValue PassThru = Op.getOperand(2);
17576 if (isAllOnes(Mask)) // return data as is
17577 return Op.getOperand(1);
17578 EVT VT = Op.getValueType();
17579 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17580 VT.getVectorNumElements());
17581 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17582 Mask.getValueType().getSizeInBits());
17584 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17585 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17586 DAG.getIntPtrConstant(0));
17588 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToCompress,
17592 SDValue Mask = Op.getOperand(3);
17593 EVT VT = Op.getValueType();
17594 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17595 VT.getVectorNumElements());
17596 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17597 Mask.getValueType().getSizeInBits());
17599 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17600 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17601 DAG.getIntPtrConstant(0));
17602 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1),
17611 default: return SDValue(); // Don't custom lower most intrinsics.
17613 case Intrinsic::x86_avx512_mask_valign_q_512:
17614 case Intrinsic::x86_avx512_mask_valign_d_512:
17615 // Vector source operands are swapped.
17616 return getVectorMaskingNode(DAG.getNode(X86ISD::VALIGN, dl,
17617 Op.getValueType(), Op.getOperand(2),
17620 Op.getOperand(5), Op.getOperand(4),
17623 // ptest and testp intrinsics. The intrinsic these come from are designed to
17624 // return an integer value, not just an instruction so lower it to the ptest
17625 // or testp pattern and a setcc for the result.
17626 case Intrinsic::x86_sse41_ptestz:
17627 case Intrinsic::x86_sse41_ptestc:
17628 case Intrinsic::x86_sse41_ptestnzc:
17629 case Intrinsic::x86_avx_ptestz_256:
17630 case Intrinsic::x86_avx_ptestc_256:
17631 case Intrinsic::x86_avx_ptestnzc_256:
17632 case Intrinsic::x86_avx_vtestz_ps:
17633 case Intrinsic::x86_avx_vtestc_ps:
17634 case Intrinsic::x86_avx_vtestnzc_ps:
17635 case Intrinsic::x86_avx_vtestz_pd:
17636 case Intrinsic::x86_avx_vtestc_pd:
17637 case Intrinsic::x86_avx_vtestnzc_pd:
17638 case Intrinsic::x86_avx_vtestz_ps_256:
17639 case Intrinsic::x86_avx_vtestc_ps_256:
17640 case Intrinsic::x86_avx_vtestnzc_ps_256:
17641 case Intrinsic::x86_avx_vtestz_pd_256:
17642 case Intrinsic::x86_avx_vtestc_pd_256:
17643 case Intrinsic::x86_avx_vtestnzc_pd_256: {
17644 bool IsTestPacked = false;
17647 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
17648 case Intrinsic::x86_avx_vtestz_ps:
17649 case Intrinsic::x86_avx_vtestz_pd:
17650 case Intrinsic::x86_avx_vtestz_ps_256:
17651 case Intrinsic::x86_avx_vtestz_pd_256:
17652 IsTestPacked = true; // Fallthrough
17653 case Intrinsic::x86_sse41_ptestz:
17654 case Intrinsic::x86_avx_ptestz_256:
17656 X86CC = X86::COND_E;
17658 case Intrinsic::x86_avx_vtestc_ps:
17659 case Intrinsic::x86_avx_vtestc_pd:
17660 case Intrinsic::x86_avx_vtestc_ps_256:
17661 case Intrinsic::x86_avx_vtestc_pd_256:
17662 IsTestPacked = true; // Fallthrough
17663 case Intrinsic::x86_sse41_ptestc:
17664 case Intrinsic::x86_avx_ptestc_256:
17666 X86CC = X86::COND_B;
17668 case Intrinsic::x86_avx_vtestnzc_ps:
17669 case Intrinsic::x86_avx_vtestnzc_pd:
17670 case Intrinsic::x86_avx_vtestnzc_ps_256:
17671 case Intrinsic::x86_avx_vtestnzc_pd_256:
17672 IsTestPacked = true; // Fallthrough
17673 case Intrinsic::x86_sse41_ptestnzc:
17674 case Intrinsic::x86_avx_ptestnzc_256:
17676 X86CC = X86::COND_A;
17680 SDValue LHS = Op.getOperand(1);
17681 SDValue RHS = Op.getOperand(2);
17682 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
17683 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
17684 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17685 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
17686 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17688 case Intrinsic::x86_avx512_kortestz_w:
17689 case Intrinsic::x86_avx512_kortestc_w: {
17690 unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz_w)? X86::COND_E: X86::COND_B;
17691 SDValue LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(1));
17692 SDValue RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(2));
17693 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17694 SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
17695 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test);
17696 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17699 case Intrinsic::x86_sse42_pcmpistria128:
17700 case Intrinsic::x86_sse42_pcmpestria128:
17701 case Intrinsic::x86_sse42_pcmpistric128:
17702 case Intrinsic::x86_sse42_pcmpestric128:
17703 case Intrinsic::x86_sse42_pcmpistrio128:
17704 case Intrinsic::x86_sse42_pcmpestrio128:
17705 case Intrinsic::x86_sse42_pcmpistris128:
17706 case Intrinsic::x86_sse42_pcmpestris128:
17707 case Intrinsic::x86_sse42_pcmpistriz128:
17708 case Intrinsic::x86_sse42_pcmpestriz128: {
17712 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
17713 case Intrinsic::x86_sse42_pcmpistria128:
17714 Opcode = X86ISD::PCMPISTRI;
17715 X86CC = X86::COND_A;
17717 case Intrinsic::x86_sse42_pcmpestria128:
17718 Opcode = X86ISD::PCMPESTRI;
17719 X86CC = X86::COND_A;
17721 case Intrinsic::x86_sse42_pcmpistric128:
17722 Opcode = X86ISD::PCMPISTRI;
17723 X86CC = X86::COND_B;
17725 case Intrinsic::x86_sse42_pcmpestric128:
17726 Opcode = X86ISD::PCMPESTRI;
17727 X86CC = X86::COND_B;
17729 case Intrinsic::x86_sse42_pcmpistrio128:
17730 Opcode = X86ISD::PCMPISTRI;
17731 X86CC = X86::COND_O;
17733 case Intrinsic::x86_sse42_pcmpestrio128:
17734 Opcode = X86ISD::PCMPESTRI;
17735 X86CC = X86::COND_O;
17737 case Intrinsic::x86_sse42_pcmpistris128:
17738 Opcode = X86ISD::PCMPISTRI;
17739 X86CC = X86::COND_S;
17741 case Intrinsic::x86_sse42_pcmpestris128:
17742 Opcode = X86ISD::PCMPESTRI;
17743 X86CC = X86::COND_S;
17745 case Intrinsic::x86_sse42_pcmpistriz128:
17746 Opcode = X86ISD::PCMPISTRI;
17747 X86CC = X86::COND_E;
17749 case Intrinsic::x86_sse42_pcmpestriz128:
17750 Opcode = X86ISD::PCMPESTRI;
17751 X86CC = X86::COND_E;
17754 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17755 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17756 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps);
17757 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17758 DAG.getConstant(X86CC, MVT::i8),
17759 SDValue(PCMP.getNode(), 1));
17760 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17763 case Intrinsic::x86_sse42_pcmpistri128:
17764 case Intrinsic::x86_sse42_pcmpestri128: {
17766 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
17767 Opcode = X86ISD::PCMPISTRI;
17769 Opcode = X86ISD::PCMPESTRI;
17771 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17772 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17773 return DAG.getNode(Opcode, dl, VTs, NewOps);
17778 static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17779 SDValue Src, SDValue Mask, SDValue Base,
17780 SDValue Index, SDValue ScaleOp, SDValue Chain,
17781 const X86Subtarget * Subtarget) {
17783 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17784 assert(C && "Invalid scale type");
17785 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17786 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17787 Index.getSimpleValueType().getVectorNumElements());
17789 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17791 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17793 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17794 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
17795 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17796 SDValue Segment = DAG.getRegister(0, MVT::i32);
17797 if (Src.getOpcode() == ISD::UNDEF)
17798 Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
17799 SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17800 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17801 SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
17802 return DAG.getMergeValues(RetOps, dl);
17805 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17806 SDValue Src, SDValue Mask, SDValue Base,
17807 SDValue Index, SDValue ScaleOp, SDValue Chain) {
17809 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17810 assert(C && "Invalid scale type");
17811 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17812 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17813 SDValue Segment = DAG.getRegister(0, MVT::i32);
17814 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17815 Index.getSimpleValueType().getVectorNumElements());
17817 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17819 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17821 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17822 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
17823 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
17824 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17825 return SDValue(Res, 1);
17828 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17829 SDValue Mask, SDValue Base, SDValue Index,
17830 SDValue ScaleOp, SDValue Chain) {
17832 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17833 assert(C && "Invalid scale type");
17834 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17835 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17836 SDValue Segment = DAG.getRegister(0, MVT::i32);
17838 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
17840 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17842 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17844 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17845 //SDVTList VTs = DAG.getVTList(MVT::Other);
17846 SDValue Ops[] = {MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17847 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
17848 return SDValue(Res, 0);
17851 // getReadPerformanceCounter - Handles the lowering of builtin intrinsics that
17852 // read performance monitor counters (x86_rdpmc).
17853 static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
17854 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17855 SmallVectorImpl<SDValue> &Results) {
17856 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17857 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17860 // The ECX register is used to select the index of the performance counter
17862 SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
17864 SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
17866 // Reads the content of a 64-bit performance counter and returns it in the
17867 // registers EDX:EAX.
17868 if (Subtarget->is64Bit()) {
17869 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17870 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17873 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17874 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17877 Chain = HI.getValue(1);
17879 if (Subtarget->is64Bit()) {
17880 // The EAX register is loaded with the low-order 32 bits. The EDX register
17881 // is loaded with the supported high-order bits of the counter.
17882 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17883 DAG.getConstant(32, MVT::i8));
17884 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17885 Results.push_back(Chain);
17889 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17890 SDValue Ops[] = { LO, HI };
17891 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17892 Results.push_back(Pair);
17893 Results.push_back(Chain);
17896 // getReadTimeStampCounter - Handles the lowering of builtin intrinsics that
17897 // read the time stamp counter (x86_rdtsc and x86_rdtscp). This function is
17898 // also used to custom lower READCYCLECOUNTER nodes.
17899 static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
17900 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17901 SmallVectorImpl<SDValue> &Results) {
17902 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17903 SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
17906 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
17907 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
17908 // and the EAX register is loaded with the low-order 32 bits.
17909 if (Subtarget->is64Bit()) {
17910 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17911 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17914 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17915 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17918 SDValue Chain = HI.getValue(1);
17920 if (Opcode == X86ISD::RDTSCP_DAG) {
17921 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17923 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
17924 // the ECX register. Add 'ecx' explicitly to the chain.
17925 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
17927 // Explicitly store the content of ECX at the location passed in input
17928 // to the 'rdtscp' intrinsic.
17929 Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
17930 MachinePointerInfo(), false, false, 0);
17933 if (Subtarget->is64Bit()) {
17934 // The EDX register is loaded with the high-order 32 bits of the MSR, and
17935 // the EAX register is loaded with the low-order 32 bits.
17936 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17937 DAG.getConstant(32, MVT::i8));
17938 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17939 Results.push_back(Chain);
17943 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17944 SDValue Ops[] = { LO, HI };
17945 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17946 Results.push_back(Pair);
17947 Results.push_back(Chain);
17950 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
17951 SelectionDAG &DAG) {
17952 SmallVector<SDValue, 2> Results;
17954 getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
17956 return DAG.getMergeValues(Results, DL);
17960 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17961 SelectionDAG &DAG) {
17962 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
17964 const IntrinsicData* IntrData = getIntrinsicWithChain(IntNo);
17969 switch(IntrData->Type) {
17971 llvm_unreachable("Unknown Intrinsic Type");
17975 // Emit the node with the right value type.
17976 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other);
17977 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17979 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
17980 // Otherwise return the value from Rand, which is always 0, casted to i32.
17981 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
17982 DAG.getConstant(1, Op->getValueType(1)),
17983 DAG.getConstant(X86::COND_B, MVT::i32),
17984 SDValue(Result.getNode(), 1) };
17985 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
17986 DAG.getVTList(Op->getValueType(1), MVT::Glue),
17989 // Return { result, isValid, chain }.
17990 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
17991 SDValue(Result.getNode(), 2));
17994 //gather(v1, mask, index, base, scale);
17995 SDValue Chain = Op.getOperand(0);
17996 SDValue Src = Op.getOperand(2);
17997 SDValue Base = Op.getOperand(3);
17998 SDValue Index = Op.getOperand(4);
17999 SDValue Mask = Op.getOperand(5);
18000 SDValue Scale = Op.getOperand(6);
18001 return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain,
18005 //scatter(base, mask, index, v1, scale);
18006 SDValue Chain = Op.getOperand(0);
18007 SDValue Base = Op.getOperand(2);
18008 SDValue Mask = Op.getOperand(3);
18009 SDValue Index = Op.getOperand(4);
18010 SDValue Src = Op.getOperand(5);
18011 SDValue Scale = Op.getOperand(6);
18012 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain);
18015 SDValue Hint = Op.getOperand(6);
18017 if (dyn_cast<ConstantSDNode> (Hint) == nullptr ||
18018 (HintVal = dyn_cast<ConstantSDNode> (Hint)->getZExtValue()) > 1)
18019 llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1");
18020 unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
18021 SDValue Chain = Op.getOperand(0);
18022 SDValue Mask = Op.getOperand(2);
18023 SDValue Index = Op.getOperand(3);
18024 SDValue Base = Op.getOperand(4);
18025 SDValue Scale = Op.getOperand(5);
18026 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain);
18028 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
18030 SmallVector<SDValue, 2> Results;
18031 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget, Results);
18032 return DAG.getMergeValues(Results, dl);
18034 // Read Performance Monitoring Counters.
18036 SmallVector<SDValue, 2> Results;
18037 getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
18038 return DAG.getMergeValues(Results, dl);
18040 // XTEST intrinsics.
18042 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
18043 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
18044 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
18045 DAG.getConstant(X86::COND_NE, MVT::i8),
18047 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
18048 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
18049 Ret, SDValue(InTrans.getNode(), 1));
18053 SmallVector<SDValue, 2> Results;
18054 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
18055 SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
18056 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
18057 DAG.getConstant(-1, MVT::i8));
18058 SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
18059 Op.getOperand(4), GenCF.getValue(1));
18060 SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
18061 Op.getOperand(5), MachinePointerInfo(),
18063 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
18064 DAG.getConstant(X86::COND_B, MVT::i8),
18066 Results.push_back(SetCC);
18067 Results.push_back(Store);
18068 return DAG.getMergeValues(Results, dl);
18070 case COMPRESS_TO_MEM: {
18072 SDValue Mask = Op.getOperand(4);
18073 SDValue DataToCompress = Op.getOperand(3);
18074 SDValue Addr = Op.getOperand(2);
18075 SDValue Chain = Op.getOperand(0);
18077 if (isAllOnes(Mask)) // return just a store
18078 return DAG.getStore(Chain, dl, DataToCompress, Addr,
18079 MachinePointerInfo(), false, false, 0);
18081 EVT VT = DataToCompress.getValueType();
18082 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18083 VT.getVectorNumElements());
18084 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18085 Mask.getValueType().getSizeInBits());
18086 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
18087 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
18088 DAG.getIntPtrConstant(0));
18090 SDValue Compressed = DAG.getNode(IntrData->Opc0, dl, VT, VMask,
18091 DataToCompress, DAG.getUNDEF(VT));
18092 return DAG.getStore(Chain, dl, Compressed, Addr,
18093 MachinePointerInfo(), false, false, 0);
18095 case EXPAND_FROM_MEM: {
18097 SDValue Mask = Op.getOperand(4);
18098 SDValue PathThru = Op.getOperand(3);
18099 SDValue Addr = Op.getOperand(2);
18100 SDValue Chain = Op.getOperand(0);
18101 EVT VT = Op.getValueType();
18103 if (isAllOnes(Mask)) // return just a load
18104 return DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(), false, false,
18106 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18107 VT.getVectorNumElements());
18108 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18109 Mask.getValueType().getSizeInBits());
18110 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
18111 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
18112 DAG.getIntPtrConstant(0));
18114 SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(),
18115 false, false, false, 0);
18117 SmallVector<SDValue, 2> Results;
18118 Results.push_back(DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToExpand,
18120 Results.push_back(Chain);
18121 return DAG.getMergeValues(Results, dl);
18126 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
18127 SelectionDAG &DAG) const {
18128 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
18129 MFI->setReturnAddressIsTaken(true);
18131 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
18134 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18136 EVT PtrVT = getPointerTy();
18139 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
18140 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18141 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
18142 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
18143 DAG.getNode(ISD::ADD, dl, PtrVT,
18144 FrameAddr, Offset),
18145 MachinePointerInfo(), false, false, false, 0);
18148 // Just load the return address.
18149 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
18150 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
18151 RetAddrFI, MachinePointerInfo(), false, false, false, 0);
18154 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
18155 MachineFunction &MF = DAG.getMachineFunction();
18156 MachineFrameInfo *MFI = MF.getFrameInfo();
18157 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
18158 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18159 EVT VT = Op.getValueType();
18161 MFI->setFrameAddressIsTaken(true);
18163 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
18164 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
18165 // is not possible to crawl up the stack without looking at the unwind codes
18167 int FrameAddrIndex = FuncInfo->getFAIndex();
18168 if (!FrameAddrIndex) {
18169 // Set up a frame object for the return address.
18170 unsigned SlotSize = RegInfo->getSlotSize();
18171 FrameAddrIndex = MF.getFrameInfo()->CreateFixedObject(
18172 SlotSize, /*Offset=*/INT64_MIN, /*IsImmutable=*/false);
18173 FuncInfo->setFAIndex(FrameAddrIndex);
18175 return DAG.getFrameIndex(FrameAddrIndex, VT);
18178 unsigned FrameReg =
18179 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
18180 SDLoc dl(Op); // FIXME probably not meaningful
18181 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18182 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
18183 (FrameReg == X86::EBP && VT == MVT::i32)) &&
18184 "Invalid Frame Register!");
18185 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
18187 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
18188 MachinePointerInfo(),
18189 false, false, false, 0);
18193 // FIXME? Maybe this could be a TableGen attribute on some registers and
18194 // this table could be generated automatically from RegInfo.
18195 unsigned X86TargetLowering::getRegisterByName(const char* RegName,
18197 unsigned Reg = StringSwitch<unsigned>(RegName)
18198 .Case("esp", X86::ESP)
18199 .Case("rsp", X86::RSP)
18203 report_fatal_error("Invalid register name global variable");
18206 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
18207 SelectionDAG &DAG) const {
18208 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18209 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
18212 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
18213 SDValue Chain = Op.getOperand(0);
18214 SDValue Offset = Op.getOperand(1);
18215 SDValue Handler = Op.getOperand(2);
18218 EVT PtrVT = getPointerTy();
18219 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18220 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
18221 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
18222 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
18223 "Invalid Frame Register!");
18224 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
18225 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
18227 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
18228 DAG.getIntPtrConstant(RegInfo->getSlotSize()));
18229 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
18230 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
18232 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
18234 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
18235 DAG.getRegister(StoreAddrReg, PtrVT));
18238 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
18239 SelectionDAG &DAG) const {
18241 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
18242 DAG.getVTList(MVT::i32, MVT::Other),
18243 Op.getOperand(0), Op.getOperand(1));
18246 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
18247 SelectionDAG &DAG) const {
18249 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
18250 Op.getOperand(0), Op.getOperand(1));
18253 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
18254 return Op.getOperand(0);
18257 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
18258 SelectionDAG &DAG) const {
18259 SDValue Root = Op.getOperand(0);
18260 SDValue Trmp = Op.getOperand(1); // trampoline
18261 SDValue FPtr = Op.getOperand(2); // nested function
18262 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
18265 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
18266 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
18268 if (Subtarget->is64Bit()) {
18269 SDValue OutChains[6];
18271 // Large code-model.
18272 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
18273 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
18275 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
18276 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
18278 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
18280 // Load the pointer to the nested function into R11.
18281 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
18282 SDValue Addr = Trmp;
18283 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18284 Addr, MachinePointerInfo(TrmpAddr),
18287 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18288 DAG.getConstant(2, MVT::i64));
18289 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
18290 MachinePointerInfo(TrmpAddr, 2),
18293 // Load the 'nest' parameter value into R10.
18294 // R10 is specified in X86CallingConv.td
18295 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
18296 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18297 DAG.getConstant(10, MVT::i64));
18298 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18299 Addr, MachinePointerInfo(TrmpAddr, 10),
18302 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18303 DAG.getConstant(12, MVT::i64));
18304 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
18305 MachinePointerInfo(TrmpAddr, 12),
18308 // Jump to the nested function.
18309 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
18310 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18311 DAG.getConstant(20, MVT::i64));
18312 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18313 Addr, MachinePointerInfo(TrmpAddr, 20),
18316 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
18317 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18318 DAG.getConstant(22, MVT::i64));
18319 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
18320 MachinePointerInfo(TrmpAddr, 22),
18323 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18325 const Function *Func =
18326 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
18327 CallingConv::ID CC = Func->getCallingConv();
18332 llvm_unreachable("Unsupported calling convention");
18333 case CallingConv::C:
18334 case CallingConv::X86_StdCall: {
18335 // Pass 'nest' parameter in ECX.
18336 // Must be kept in sync with X86CallingConv.td
18337 NestReg = X86::ECX;
18339 // Check that ECX wasn't needed by an 'inreg' parameter.
18340 FunctionType *FTy = Func->getFunctionType();
18341 const AttributeSet &Attrs = Func->getAttributes();
18343 if (!Attrs.isEmpty() && !Func->isVarArg()) {
18344 unsigned InRegCount = 0;
18347 for (FunctionType::param_iterator I = FTy->param_begin(),
18348 E = FTy->param_end(); I != E; ++I, ++Idx)
18349 if (Attrs.hasAttribute(Idx, Attribute::InReg))
18350 // FIXME: should only count parameters that are lowered to integers.
18351 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
18353 if (InRegCount > 2) {
18354 report_fatal_error("Nest register in use - reduce number of inreg"
18360 case CallingConv::X86_FastCall:
18361 case CallingConv::X86_ThisCall:
18362 case CallingConv::Fast:
18363 // Pass 'nest' parameter in EAX.
18364 // Must be kept in sync with X86CallingConv.td
18365 NestReg = X86::EAX;
18369 SDValue OutChains[4];
18370 SDValue Addr, Disp;
18372 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18373 DAG.getConstant(10, MVT::i32));
18374 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
18376 // This is storing the opcode for MOV32ri.
18377 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
18378 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
18379 OutChains[0] = DAG.getStore(Root, dl,
18380 DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
18381 Trmp, MachinePointerInfo(TrmpAddr),
18384 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18385 DAG.getConstant(1, MVT::i32));
18386 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
18387 MachinePointerInfo(TrmpAddr, 1),
18390 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
18391 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18392 DAG.getConstant(5, MVT::i32));
18393 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
18394 MachinePointerInfo(TrmpAddr, 5),
18397 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18398 DAG.getConstant(6, MVT::i32));
18399 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
18400 MachinePointerInfo(TrmpAddr, 6),
18403 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18407 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
18408 SelectionDAG &DAG) const {
18410 The rounding mode is in bits 11:10 of FPSR, and has the following
18412 00 Round to nearest
18417 FLT_ROUNDS, on the other hand, expects the following:
18424 To perform the conversion, we do:
18425 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
18428 MachineFunction &MF = DAG.getMachineFunction();
18429 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
18430 unsigned StackAlignment = TFI.getStackAlignment();
18431 MVT VT = Op.getSimpleValueType();
18434 // Save FP Control Word to stack slot
18435 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
18436 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
18438 MachineMemOperand *MMO =
18439 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
18440 MachineMemOperand::MOStore, 2, 2);
18442 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
18443 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
18444 DAG.getVTList(MVT::Other),
18445 Ops, MVT::i16, MMO);
18447 // Load FP Control Word from stack slot
18448 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
18449 MachinePointerInfo(), false, false, false, 0);
18451 // Transform as necessary
18453 DAG.getNode(ISD::SRL, DL, MVT::i16,
18454 DAG.getNode(ISD::AND, DL, MVT::i16,
18455 CWD, DAG.getConstant(0x800, MVT::i16)),
18456 DAG.getConstant(11, MVT::i8));
18458 DAG.getNode(ISD::SRL, DL, MVT::i16,
18459 DAG.getNode(ISD::AND, DL, MVT::i16,
18460 CWD, DAG.getConstant(0x400, MVT::i16)),
18461 DAG.getConstant(9, MVT::i8));
18464 DAG.getNode(ISD::AND, DL, MVT::i16,
18465 DAG.getNode(ISD::ADD, DL, MVT::i16,
18466 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
18467 DAG.getConstant(1, MVT::i16)),
18468 DAG.getConstant(3, MVT::i16));
18470 return DAG.getNode((VT.getSizeInBits() < 16 ?
18471 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
18474 static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
18475 MVT VT = Op.getSimpleValueType();
18477 unsigned NumBits = VT.getSizeInBits();
18480 Op = Op.getOperand(0);
18481 if (VT == MVT::i8) {
18482 // Zero extend to i32 since there is not an i8 bsr.
18484 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18487 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
18488 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18489 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18491 // If src is zero (i.e. bsr sets ZF), returns NumBits.
18494 DAG.getConstant(NumBits+NumBits-1, OpVT),
18495 DAG.getConstant(X86::COND_E, MVT::i8),
18498 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
18500 // Finally xor with NumBits-1.
18501 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18504 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18508 static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
18509 MVT VT = Op.getSimpleValueType();
18511 unsigned NumBits = VT.getSizeInBits();
18514 Op = Op.getOperand(0);
18515 if (VT == MVT::i8) {
18516 // Zero extend to i32 since there is not an i8 bsr.
18518 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18521 // Issue a bsr (scan bits in reverse).
18522 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18523 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18525 // And xor with NumBits-1.
18526 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18529 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18533 static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
18534 MVT VT = Op.getSimpleValueType();
18535 unsigned NumBits = VT.getSizeInBits();
18537 Op = Op.getOperand(0);
18539 // Issue a bsf (scan bits forward) which also sets EFLAGS.
18540 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
18541 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op);
18543 // If src is zero (i.e. bsf sets ZF), returns NumBits.
18546 DAG.getConstant(NumBits, VT),
18547 DAG.getConstant(X86::COND_E, MVT::i8),
18550 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
18553 // Lower256IntArith - Break a 256-bit integer operation into two new 128-bit
18554 // ones, and then concatenate the result back.
18555 static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
18556 MVT VT = Op.getSimpleValueType();
18558 assert(VT.is256BitVector() && VT.isInteger() &&
18559 "Unsupported value type for operation");
18561 unsigned NumElems = VT.getVectorNumElements();
18564 // Extract the LHS vectors
18565 SDValue LHS = Op.getOperand(0);
18566 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
18567 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
18569 // Extract the RHS vectors
18570 SDValue RHS = Op.getOperand(1);
18571 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
18572 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
18574 MVT EltVT = VT.getVectorElementType();
18575 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
18577 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
18578 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
18579 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
18582 static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) {
18583 assert(Op.getSimpleValueType().is256BitVector() &&
18584 Op.getSimpleValueType().isInteger() &&
18585 "Only handle AVX 256-bit vector integer operation");
18586 return Lower256IntArith(Op, DAG);
18589 static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
18590 assert(Op.getSimpleValueType().is256BitVector() &&
18591 Op.getSimpleValueType().isInteger() &&
18592 "Only handle AVX 256-bit vector integer operation");
18593 return Lower256IntArith(Op, DAG);
18596 static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
18597 SelectionDAG &DAG) {
18599 MVT VT = Op.getSimpleValueType();
18601 // Decompose 256-bit ops into smaller 128-bit ops.
18602 if (VT.is256BitVector() && !Subtarget->hasInt256())
18603 return Lower256IntArith(Op, DAG);
18605 SDValue A = Op.getOperand(0);
18606 SDValue B = Op.getOperand(1);
18608 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
18609 if (VT == MVT::v4i32) {
18610 assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() &&
18611 "Should not custom lower when pmuldq is available!");
18613 // Extract the odd parts.
18614 static const int UnpackMask[] = { 1, -1, 3, -1 };
18615 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
18616 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
18618 // Multiply the even parts.
18619 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B);
18620 // Now multiply odd parts.
18621 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds);
18623 Evens = DAG.getNode(ISD::BITCAST, dl, VT, Evens);
18624 Odds = DAG.getNode(ISD::BITCAST, dl, VT, Odds);
18626 // Merge the two vectors back together with a shuffle. This expands into 2
18628 static const int ShufMask[] = { 0, 4, 2, 6 };
18629 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
18632 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
18633 "Only know how to lower V2I64/V4I64/V8I64 multiply");
18635 // Ahi = psrlqi(a, 32);
18636 // Bhi = psrlqi(b, 32);
18638 // AloBlo = pmuludq(a, b);
18639 // AloBhi = pmuludq(a, Bhi);
18640 // AhiBlo = pmuludq(Ahi, b);
18642 // AloBhi = psllqi(AloBhi, 32);
18643 // AhiBlo = psllqi(AhiBlo, 32);
18644 // return AloBlo + AloBhi + AhiBlo;
18646 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
18647 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
18649 // Bit cast to 32-bit vectors for MULUDQ
18650 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 :
18651 (VT == MVT::v4i64) ? MVT::v8i32 : MVT::v16i32;
18652 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A);
18653 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B);
18654 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi);
18655 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi);
18657 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
18658 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
18659 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
18661 AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG);
18662 AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG);
18664 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
18665 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
18668 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
18669 assert(Subtarget->isTargetWin64() && "Unexpected target");
18670 EVT VT = Op.getValueType();
18671 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
18672 "Unexpected return type for lowering");
18676 switch (Op->getOpcode()) {
18677 default: llvm_unreachable("Unexpected request for libcall!");
18678 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
18679 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
18680 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
18681 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
18682 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
18683 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
18687 SDValue InChain = DAG.getEntryNode();
18689 TargetLowering::ArgListTy Args;
18690 TargetLowering::ArgListEntry Entry;
18691 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
18692 EVT ArgVT = Op->getOperand(i).getValueType();
18693 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
18694 "Unexpected argument type for lowering");
18695 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
18696 Entry.Node = StackPtr;
18697 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MachinePointerInfo(),
18699 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
18700 Entry.Ty = PointerType::get(ArgTy,0);
18701 Entry.isSExt = false;
18702 Entry.isZExt = false;
18703 Args.push_back(Entry);
18706 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
18709 TargetLowering::CallLoweringInfo CLI(DAG);
18710 CLI.setDebugLoc(dl).setChain(InChain)
18711 .setCallee(getLibcallCallingConv(LC),
18712 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()),
18713 Callee, std::move(Args), 0)
18714 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
18716 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
18717 return DAG.getNode(ISD::BITCAST, dl, VT, CallInfo.first);
18720 static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
18721 SelectionDAG &DAG) {
18722 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
18723 EVT VT = Op0.getValueType();
18726 assert((VT == MVT::v4i32 && Subtarget->hasSSE2()) ||
18727 (VT == MVT::v8i32 && Subtarget->hasInt256()));
18729 // PMULxD operations multiply each even value (starting at 0) of LHS with
18730 // the related value of RHS and produce a widen result.
18731 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18732 // => <2 x i64> <ae|cg>
18734 // In other word, to have all the results, we need to perform two PMULxD:
18735 // 1. one with the even values.
18736 // 2. one with the odd values.
18737 // To achieve #2, with need to place the odd values at an even position.
18739 // Place the odd value at an even position (basically, shift all values 1
18740 // step to the left):
18741 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
18742 // <a|b|c|d> => <b|undef|d|undef>
18743 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0, Mask);
18744 // <e|f|g|h> => <f|undef|h|undef>
18745 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1, Mask);
18747 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
18749 MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
18750 bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
18752 (!IsSigned || !Subtarget->hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
18753 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18754 // => <2 x i64> <ae|cg>
18755 SDValue Mul1 = DAG.getNode(ISD::BITCAST, dl, VT,
18756 DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
18757 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
18758 // => <2 x i64> <bf|dh>
18759 SDValue Mul2 = DAG.getNode(ISD::BITCAST, dl, VT,
18760 DAG.getNode(Opcode, dl, MulVT, Odd0, Odd1));
18762 // Shuffle it back into the right order.
18763 SDValue Highs, Lows;
18764 if (VT == MVT::v8i32) {
18765 const int HighMask[] = {1, 9, 3, 11, 5, 13, 7, 15};
18766 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18767 const int LowMask[] = {0, 8, 2, 10, 4, 12, 6, 14};
18768 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18770 const int HighMask[] = {1, 5, 3, 7};
18771 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18772 const int LowMask[] = {0, 4, 2, 6};
18773 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18776 // If we have a signed multiply but no PMULDQ fix up the high parts of a
18777 // unsigned multiply.
18778 if (IsSigned && !Subtarget->hasSSE41()) {
18780 DAG.getConstant(31, DAG.getTargetLoweringInfo().getShiftAmountTy(VT));
18781 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
18782 DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
18783 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
18784 DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0);
18786 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
18787 Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup);
18790 // The first result of MUL_LOHI is actually the low value, followed by the
18792 SDValue Ops[] = {Lows, Highs};
18793 return DAG.getMergeValues(Ops, dl);
18796 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
18797 const X86Subtarget *Subtarget) {
18798 MVT VT = Op.getSimpleValueType();
18800 SDValue R = Op.getOperand(0);
18801 SDValue Amt = Op.getOperand(1);
18803 // Optimize shl/srl/sra with constant shift amount.
18804 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
18805 if (auto *ShiftConst = BVAmt->getConstantSplatNode()) {
18806 uint64_t ShiftAmt = ShiftConst->getZExtValue();
18808 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
18809 (Subtarget->hasInt256() &&
18810 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18811 (Subtarget->hasAVX512() &&
18812 (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18813 if (Op.getOpcode() == ISD::SHL)
18814 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18816 if (Op.getOpcode() == ISD::SRL)
18817 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18819 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64)
18820 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18824 if (VT == MVT::v16i8) {
18825 if (Op.getOpcode() == ISD::SHL) {
18826 // Make a large shift.
18827 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18828 MVT::v8i16, R, ShiftAmt,
18830 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18831 // Zero out the rightmost bits.
18832 SmallVector<SDValue, 16> V(16,
18833 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18835 return DAG.getNode(ISD::AND, dl, VT, SHL,
18836 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18838 if (Op.getOpcode() == ISD::SRL) {
18839 // Make a large shift.
18840 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18841 MVT::v8i16, R, ShiftAmt,
18843 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18844 // Zero out the leftmost bits.
18845 SmallVector<SDValue, 16> V(16,
18846 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18848 return DAG.getNode(ISD::AND, dl, VT, SRL,
18849 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18851 if (Op.getOpcode() == ISD::SRA) {
18852 if (ShiftAmt == 7) {
18853 // R s>> 7 === R s< 0
18854 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18855 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18858 // R s>> a === ((R u>> a) ^ m) - m
18859 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18860 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt,
18862 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18863 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18864 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18867 llvm_unreachable("Unknown shift opcode.");
18870 if (Subtarget->hasInt256() && VT == MVT::v32i8) {
18871 if (Op.getOpcode() == ISD::SHL) {
18872 // Make a large shift.
18873 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18874 MVT::v16i16, R, ShiftAmt,
18876 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18877 // Zero out the rightmost bits.
18878 SmallVector<SDValue, 32> V(32,
18879 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18881 return DAG.getNode(ISD::AND, dl, VT, SHL,
18882 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18884 if (Op.getOpcode() == ISD::SRL) {
18885 // Make a large shift.
18886 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18887 MVT::v16i16, R, ShiftAmt,
18889 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18890 // Zero out the leftmost bits.
18891 SmallVector<SDValue, 32> V(32,
18892 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18894 return DAG.getNode(ISD::AND, dl, VT, SRL,
18895 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18897 if (Op.getOpcode() == ISD::SRA) {
18898 if (ShiftAmt == 7) {
18899 // R s>> 7 === R s< 0
18900 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18901 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18904 // R s>> a === ((R u>> a) ^ m) - m
18905 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18906 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt,
18908 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18909 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18910 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18913 llvm_unreachable("Unknown shift opcode.");
18918 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18919 if (!Subtarget->is64Bit() &&
18920 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) &&
18921 Amt.getOpcode() == ISD::BITCAST &&
18922 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18923 Amt = Amt.getOperand(0);
18924 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18925 VT.getVectorNumElements();
18926 unsigned RatioInLog2 = Log2_32_Ceil(Ratio);
18927 uint64_t ShiftAmt = 0;
18928 for (unsigned i = 0; i != Ratio; ++i) {
18929 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i));
18933 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2)));
18935 // Check remaining shift amounts.
18936 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18937 uint64_t ShAmt = 0;
18938 for (unsigned j = 0; j != Ratio; ++j) {
18939 ConstantSDNode *C =
18940 dyn_cast<ConstantSDNode>(Amt.getOperand(i + j));
18944 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2)));
18946 if (ShAmt != ShiftAmt)
18949 switch (Op.getOpcode()) {
18951 llvm_unreachable("Unknown shift opcode!");
18953 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18956 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18959 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18967 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
18968 const X86Subtarget* Subtarget) {
18969 MVT VT = Op.getSimpleValueType();
18971 SDValue R = Op.getOperand(0);
18972 SDValue Amt = Op.getOperand(1);
18974 if ((VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) ||
18975 VT == MVT::v4i32 || VT == MVT::v8i16 ||
18976 (Subtarget->hasInt256() &&
18977 ((VT == MVT::v4i64 && Op.getOpcode() != ISD::SRA) ||
18978 VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18979 (Subtarget->hasAVX512() && (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18981 EVT EltVT = VT.getVectorElementType();
18983 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Amt)) {
18984 // Check if this build_vector node is doing a splat.
18985 // If so, then set BaseShAmt equal to the splat value.
18986 BaseShAmt = BV->getSplatValue();
18987 if (BaseShAmt && BaseShAmt.getOpcode() == ISD::UNDEF)
18988 BaseShAmt = SDValue();
18990 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
18991 Amt = Amt.getOperand(0);
18993 ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt);
18994 if (SVN && SVN->isSplat()) {
18995 unsigned SplatIdx = (unsigned)SVN->getSplatIndex();
18996 SDValue InVec = Amt.getOperand(0);
18997 if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
18998 assert((SplatIdx < InVec.getValueType().getVectorNumElements()) &&
18999 "Unexpected shuffle index found!");
19000 BaseShAmt = InVec.getOperand(SplatIdx);
19001 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
19002 if (ConstantSDNode *C =
19003 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) {
19004 if (C->getZExtValue() == SplatIdx)
19005 BaseShAmt = InVec.getOperand(1);
19010 // Avoid introducing an extract element from a shuffle.
19011 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec,
19012 DAG.getIntPtrConstant(SplatIdx));
19016 if (BaseShAmt.getNode()) {
19017 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
19018 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
19019 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
19020 else if (EltVT.bitsLT(MVT::i32))
19021 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
19023 switch (Op.getOpcode()) {
19025 llvm_unreachable("Unknown shift opcode!");
19027 switch (VT.SimpleTy) {
19028 default: return SDValue();
19037 return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG);
19040 switch (VT.SimpleTy) {
19041 default: return SDValue();
19048 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG);
19051 switch (VT.SimpleTy) {
19052 default: return SDValue();
19061 return getTargetVShiftNode(X86ISD::VSRLI, dl, VT, R, BaseShAmt, DAG);
19067 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
19068 if (!Subtarget->is64Bit() &&
19069 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64) ||
19070 (Subtarget->hasAVX512() && VT == MVT::v8i64)) &&
19071 Amt.getOpcode() == ISD::BITCAST &&
19072 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
19073 Amt = Amt.getOperand(0);
19074 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
19075 VT.getVectorNumElements();
19076 std::vector<SDValue> Vals(Ratio);
19077 for (unsigned i = 0; i != Ratio; ++i)
19078 Vals[i] = Amt.getOperand(i);
19079 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
19080 for (unsigned j = 0; j != Ratio; ++j)
19081 if (Vals[j] != Amt.getOperand(i + j))
19084 switch (Op.getOpcode()) {
19086 llvm_unreachable("Unknown shift opcode!");
19088 return DAG.getNode(X86ISD::VSHL, dl, VT, R, Op.getOperand(1));
19090 return DAG.getNode(X86ISD::VSRL, dl, VT, R, Op.getOperand(1));
19092 return DAG.getNode(X86ISD::VSRA, dl, VT, R, Op.getOperand(1));
19099 static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
19100 SelectionDAG &DAG) {
19101 MVT VT = Op.getSimpleValueType();
19103 SDValue R = Op.getOperand(0);
19104 SDValue Amt = Op.getOperand(1);
19107 assert(VT.isVector() && "Custom lowering only for vector shifts!");
19108 assert(Subtarget->hasSSE2() && "Only custom lower when we have SSE2!");
19110 V = LowerScalarImmediateShift(Op, DAG, Subtarget);
19114 V = LowerScalarVariableShift(Op, DAG, Subtarget);
19118 if (Subtarget->hasAVX512() && (VT == MVT::v16i32 || VT == MVT::v8i64))
19120 // AVX2 has VPSLLV/VPSRAV/VPSRLV.
19121 if (Subtarget->hasInt256()) {
19122 if (Op.getOpcode() == ISD::SRL &&
19123 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
19124 VT == MVT::v4i64 || VT == MVT::v8i32))
19126 if (Op.getOpcode() == ISD::SHL &&
19127 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
19128 VT == MVT::v4i64 || VT == MVT::v8i32))
19130 if (Op.getOpcode() == ISD::SRA && (VT == MVT::v4i32 || VT == MVT::v8i32))
19134 // If possible, lower this packed shift into a vector multiply instead of
19135 // expanding it into a sequence of scalar shifts.
19136 // Do this only if the vector shift count is a constant build_vector.
19137 if (Op.getOpcode() == ISD::SHL &&
19138 (VT == MVT::v8i16 || VT == MVT::v4i32 ||
19139 (Subtarget->hasInt256() && VT == MVT::v16i16)) &&
19140 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
19141 SmallVector<SDValue, 8> Elts;
19142 EVT SVT = VT.getScalarType();
19143 unsigned SVTBits = SVT.getSizeInBits();
19144 const APInt &One = APInt(SVTBits, 1);
19145 unsigned NumElems = VT.getVectorNumElements();
19147 for (unsigned i=0; i !=NumElems; ++i) {
19148 SDValue Op = Amt->getOperand(i);
19149 if (Op->getOpcode() == ISD::UNDEF) {
19150 Elts.push_back(Op);
19154 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
19155 const APInt &C = APInt(SVTBits, ND->getAPIntValue().getZExtValue());
19156 uint64_t ShAmt = C.getZExtValue();
19157 if (ShAmt >= SVTBits) {
19158 Elts.push_back(DAG.getUNDEF(SVT));
19161 Elts.push_back(DAG.getConstant(One.shl(ShAmt), SVT));
19163 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
19164 return DAG.getNode(ISD::MUL, dl, VT, R, BV);
19167 // Lower SHL with variable shift amount.
19168 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
19169 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT));
19171 Op = DAG.getNode(ISD::ADD, dl, VT, Op, DAG.getConstant(0x3f800000U, VT));
19172 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
19173 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
19174 return DAG.getNode(ISD::MUL, dl, VT, Op, R);
19177 // If possible, lower this shift as a sequence of two shifts by
19178 // constant plus a MOVSS/MOVSD instead of scalarizing it.
19180 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
19182 // Could be rewritten as:
19183 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
19185 // The advantage is that the two shifts from the example would be
19186 // lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
19187 // the vector shift into four scalar shifts plus four pairs of vector
19189 if ((VT == MVT::v8i16 || VT == MVT::v4i32) &&
19190 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
19191 unsigned TargetOpcode = X86ISD::MOVSS;
19192 bool CanBeSimplified;
19193 // The splat value for the first packed shift (the 'X' from the example).
19194 SDValue Amt1 = Amt->getOperand(0);
19195 // The splat value for the second packed shift (the 'Y' from the example).
19196 SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) :
19197 Amt->getOperand(2);
19199 // See if it is possible to replace this node with a sequence of
19200 // two shifts followed by a MOVSS/MOVSD
19201 if (VT == MVT::v4i32) {
19202 // Check if it is legal to use a MOVSS.
19203 CanBeSimplified = Amt2 == Amt->getOperand(2) &&
19204 Amt2 == Amt->getOperand(3);
19205 if (!CanBeSimplified) {
19206 // Otherwise, check if we can still simplify this node using a MOVSD.
19207 CanBeSimplified = Amt1 == Amt->getOperand(1) &&
19208 Amt->getOperand(2) == Amt->getOperand(3);
19209 TargetOpcode = X86ISD::MOVSD;
19210 Amt2 = Amt->getOperand(2);
19213 // Do similar checks for the case where the machine value type
19215 CanBeSimplified = Amt1 == Amt->getOperand(1);
19216 for (unsigned i=3; i != 8 && CanBeSimplified; ++i)
19217 CanBeSimplified = Amt2 == Amt->getOperand(i);
19219 if (!CanBeSimplified) {
19220 TargetOpcode = X86ISD::MOVSD;
19221 CanBeSimplified = true;
19222 Amt2 = Amt->getOperand(4);
19223 for (unsigned i=0; i != 4 && CanBeSimplified; ++i)
19224 CanBeSimplified = Amt1 == Amt->getOperand(i);
19225 for (unsigned j=4; j != 8 && CanBeSimplified; ++j)
19226 CanBeSimplified = Amt2 == Amt->getOperand(j);
19230 if (CanBeSimplified && isa<ConstantSDNode>(Amt1) &&
19231 isa<ConstantSDNode>(Amt2)) {
19232 // Replace this node with two shifts followed by a MOVSS/MOVSD.
19233 EVT CastVT = MVT::v4i32;
19235 DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), VT);
19236 SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
19238 DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), VT);
19239 SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
19240 if (TargetOpcode == X86ISD::MOVSD)
19241 CastVT = MVT::v2i64;
19242 SDValue BitCast1 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift1);
19243 SDValue BitCast2 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift2);
19244 SDValue Result = getTargetShuffleNode(TargetOpcode, dl, CastVT, BitCast2,
19246 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
19250 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
19251 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq.");
19254 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, VT));
19255 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op);
19257 // Turn 'a' into a mask suitable for VSELECT
19258 SDValue VSelM = DAG.getConstant(0x80, VT);
19259 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19260 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19262 SDValue CM1 = DAG.getConstant(0x0f, VT);
19263 SDValue CM2 = DAG.getConstant(0x3f, VT);
19265 // r = VSELECT(r, psllw(r & (char16)15, 4), a);
19266 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1);
19267 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 4, DAG);
19268 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19269 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19272 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19273 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19274 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19276 // r = VSELECT(r, psllw(r & (char16)63, 2), a);
19277 M = DAG.getNode(ISD::AND, dl, VT, R, CM2);
19278 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 2, DAG);
19279 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19280 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19283 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19284 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19285 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19287 // return VSELECT(r, r+r, a);
19288 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel,
19289 DAG.getNode(ISD::ADD, dl, VT, R, R), R);
19293 // It's worth extending once and using the v8i32 shifts for 16-bit types, but
19294 // the extra overheads to get from v16i8 to v8i32 make the existing SSE
19295 // solution better.
19296 if (Subtarget->hasInt256() && VT == MVT::v8i16) {
19297 MVT NewVT = VT == MVT::v8i16 ? MVT::v8i32 : MVT::v16i16;
19299 Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
19300 R = DAG.getNode(ExtOpc, dl, NewVT, R);
19301 Amt = DAG.getNode(ISD::ANY_EXTEND, dl, NewVT, Amt);
19302 return DAG.getNode(ISD::TRUNCATE, dl, VT,
19303 DAG.getNode(Op.getOpcode(), dl, NewVT, R, Amt));
19306 // Decompose 256-bit shifts into smaller 128-bit shifts.
19307 if (VT.is256BitVector()) {
19308 unsigned NumElems = VT.getVectorNumElements();
19309 MVT EltVT = VT.getVectorElementType();
19310 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19312 // Extract the two vectors
19313 SDValue V1 = Extract128BitVector(R, 0, DAG, dl);
19314 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl);
19316 // Recreate the shift amount vectors
19317 SDValue Amt1, Amt2;
19318 if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
19319 // Constant shift amount
19320 SmallVector<SDValue, 4> Amt1Csts;
19321 SmallVector<SDValue, 4> Amt2Csts;
19322 for (unsigned i = 0; i != NumElems/2; ++i)
19323 Amt1Csts.push_back(Amt->getOperand(i));
19324 for (unsigned i = NumElems/2; i != NumElems; ++i)
19325 Amt2Csts.push_back(Amt->getOperand(i));
19327 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt1Csts);
19328 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt2Csts);
19330 // Variable shift amount
19331 Amt1 = Extract128BitVector(Amt, 0, DAG, dl);
19332 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl);
19335 // Issue new vector shifts for the smaller types
19336 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1);
19337 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2);
19339 // Concatenate the result back
19340 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
19346 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
19347 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
19348 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
19349 // looks for this combo and may remove the "setcc" instruction if the "setcc"
19350 // has only one use.
19351 SDNode *N = Op.getNode();
19352 SDValue LHS = N->getOperand(0);
19353 SDValue RHS = N->getOperand(1);
19354 unsigned BaseOp = 0;
19357 switch (Op.getOpcode()) {
19358 default: llvm_unreachable("Unknown ovf instruction!");
19360 // A subtract of one will be selected as a INC. Note that INC doesn't
19361 // set CF, so we can't do this for UADDO.
19362 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19364 BaseOp = X86ISD::INC;
19365 Cond = X86::COND_O;
19368 BaseOp = X86ISD::ADD;
19369 Cond = X86::COND_O;
19372 BaseOp = X86ISD::ADD;
19373 Cond = X86::COND_B;
19376 // A subtract of one will be selected as a DEC. Note that DEC doesn't
19377 // set CF, so we can't do this for USUBO.
19378 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19380 BaseOp = X86ISD::DEC;
19381 Cond = X86::COND_O;
19384 BaseOp = X86ISD::SUB;
19385 Cond = X86::COND_O;
19388 BaseOp = X86ISD::SUB;
19389 Cond = X86::COND_B;
19392 BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL;
19393 Cond = X86::COND_O;
19395 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
19396 if (N->getValueType(0) == MVT::i8) {
19397 BaseOp = X86ISD::UMUL8;
19398 Cond = X86::COND_O;
19401 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
19403 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
19406 DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
19407 DAG.getConstant(X86::COND_O, MVT::i32),
19408 SDValue(Sum.getNode(), 2));
19410 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19414 // Also sets EFLAGS.
19415 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
19416 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
19419 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
19420 DAG.getConstant(Cond, MVT::i32),
19421 SDValue(Sum.getNode(), 1));
19423 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19426 // Sign extension of the low part of vector elements. This may be used either
19427 // when sign extend instructions are not available or if the vector element
19428 // sizes already match the sign-extended size. If the vector elements are in
19429 // their pre-extended size and sign extend instructions are available, that will
19430 // be handled by LowerSIGN_EXTEND.
19431 SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
19432 SelectionDAG &DAG) const {
19434 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
19435 MVT VT = Op.getSimpleValueType();
19437 if (!Subtarget->hasSSE2() || !VT.isVector())
19440 unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
19441 ExtraVT.getScalarType().getSizeInBits();
19443 switch (VT.SimpleTy) {
19444 default: return SDValue();
19447 if (!Subtarget->hasFp256())
19449 if (!Subtarget->hasInt256()) {
19450 // needs to be split
19451 unsigned NumElems = VT.getVectorNumElements();
19453 // Extract the LHS vectors
19454 SDValue LHS = Op.getOperand(0);
19455 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
19456 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
19458 MVT EltVT = VT.getVectorElementType();
19459 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19461 EVT ExtraEltVT = ExtraVT.getVectorElementType();
19462 unsigned ExtraNumElems = ExtraVT.getVectorNumElements();
19463 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT,
19465 SDValue Extra = DAG.getValueType(ExtraVT);
19467 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra);
19468 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra);
19470 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);
19475 SDValue Op0 = Op.getOperand(0);
19477 // This is a sign extension of some low part of vector elements without
19478 // changing the size of the vector elements themselves:
19479 // Shift-Left + Shift-Right-Algebraic.
19480 SDValue Shl = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Op0,
19482 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Shl, BitsDiff,
19488 /// Returns true if the operand type is exactly twice the native width, and
19489 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
19490 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
19491 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
19492 bool X86TargetLowering::needsCmpXchgNb(const Type *MemType) const {
19493 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
19496 return !Subtarget->is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
19497 else if (OpWidth == 128)
19498 return Subtarget->hasCmpxchg16b();
19503 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
19504 return needsCmpXchgNb(SI->getValueOperand()->getType());
19507 // Note: this turns large loads into lock cmpxchg8b/16b.
19508 // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
19509 bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
19510 auto PTy = cast<PointerType>(LI->getPointerOperand()->getType());
19511 return needsCmpXchgNb(PTy->getElementType());
19514 bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
19515 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19516 const Type *MemType = AI->getType();
19518 // If the operand is too big, we must see if cmpxchg8/16b is available
19519 // and default to library calls otherwise.
19520 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19521 return needsCmpXchgNb(MemType);
19523 AtomicRMWInst::BinOp Op = AI->getOperation();
19526 llvm_unreachable("Unknown atomic operation");
19527 case AtomicRMWInst::Xchg:
19528 case AtomicRMWInst::Add:
19529 case AtomicRMWInst::Sub:
19530 // It's better to use xadd, xsub or xchg for these in all cases.
19532 case AtomicRMWInst::Or:
19533 case AtomicRMWInst::And:
19534 case AtomicRMWInst::Xor:
19535 // If the atomicrmw's result isn't actually used, we can just add a "lock"
19536 // prefix to a normal instruction for these operations.
19537 return !AI->use_empty();
19538 case AtomicRMWInst::Nand:
19539 case AtomicRMWInst::Max:
19540 case AtomicRMWInst::Min:
19541 case AtomicRMWInst::UMax:
19542 case AtomicRMWInst::UMin:
19543 // These always require a non-trivial set of data operations on x86. We must
19544 // use a cmpxchg loop.
19549 static bool hasMFENCE(const X86Subtarget& Subtarget) {
19550 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
19551 // no-sse2). There isn't any reason to disable it if the target processor
19553 return Subtarget.hasSSE2() || Subtarget.is64Bit();
19557 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
19558 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19559 const Type *MemType = AI->getType();
19560 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
19561 // there is no benefit in turning such RMWs into loads, and it is actually
19562 // harmful as it introduces a mfence.
19563 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19566 auto Builder = IRBuilder<>(AI);
19567 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
19568 auto SynchScope = AI->getSynchScope();
19569 // We must restrict the ordering to avoid generating loads with Release or
19570 // ReleaseAcquire orderings.
19571 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
19572 auto Ptr = AI->getPointerOperand();
19574 // Before the load we need a fence. Here is an example lifted from
19575 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
19578 // x.store(1, relaxed);
19579 // r1 = y.fetch_add(0, release);
19581 // y.fetch_add(42, acquire);
19582 // r2 = x.load(relaxed);
19583 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
19584 // lowered to just a load without a fence. A mfence flushes the store buffer,
19585 // making the optimization clearly correct.
19586 // FIXME: it is required if isAtLeastRelease(Order) but it is not clear
19587 // otherwise, we might be able to be more agressive on relaxed idempotent
19588 // rmw. In practice, they do not look useful, so we don't try to be
19589 // especially clever.
19590 if (SynchScope == SingleThread) {
19591 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
19592 // the IR level, so we must wrap it in an intrinsic.
19594 } else if (hasMFENCE(*Subtarget)) {
19595 Function *MFence = llvm::Intrinsic::getDeclaration(M,
19596 Intrinsic::x86_sse2_mfence);
19597 Builder.CreateCall(MFence);
19599 // FIXME: it might make sense to use a locked operation here but on a
19600 // different cache-line to prevent cache-line bouncing. In practice it
19601 // is probably a small win, and x86 processors without mfence are rare
19602 // enough that we do not bother.
19606 // Finally we can emit the atomic load.
19607 LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
19608 AI->getType()->getPrimitiveSizeInBits());
19609 Loaded->setAtomic(Order, SynchScope);
19610 AI->replaceAllUsesWith(Loaded);
19611 AI->eraseFromParent();
19615 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
19616 SelectionDAG &DAG) {
19618 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
19619 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
19620 SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
19621 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
19623 // The only fence that needs an instruction is a sequentially-consistent
19624 // cross-thread fence.
19625 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
19626 if (hasMFENCE(*Subtarget))
19627 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
19629 SDValue Chain = Op.getOperand(0);
19630 SDValue Zero = DAG.getConstant(0, MVT::i32);
19632 DAG.getRegister(X86::ESP, MVT::i32), // Base
19633 DAG.getTargetConstant(1, MVT::i8), // Scale
19634 DAG.getRegister(0, MVT::i32), // Index
19635 DAG.getTargetConstant(0, MVT::i32), // Disp
19636 DAG.getRegister(0, MVT::i32), // Segment.
19640 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
19641 return SDValue(Res, 0);
19644 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
19645 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
19648 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
19649 SelectionDAG &DAG) {
19650 MVT T = Op.getSimpleValueType();
19654 switch(T.SimpleTy) {
19655 default: llvm_unreachable("Invalid value type!");
19656 case MVT::i8: Reg = X86::AL; size = 1; break;
19657 case MVT::i16: Reg = X86::AX; size = 2; break;
19658 case MVT::i32: Reg = X86::EAX; size = 4; break;
19660 assert(Subtarget->is64Bit() && "Node not type legal!");
19661 Reg = X86::RAX; size = 8;
19664 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
19665 Op.getOperand(2), SDValue());
19666 SDValue Ops[] = { cpIn.getValue(0),
19669 DAG.getTargetConstant(size, MVT::i8),
19670 cpIn.getValue(1) };
19671 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
19672 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
19673 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
19677 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
19678 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
19679 MVT::i32, cpOut.getValue(2));
19680 SDValue Success = DAG.getNode(X86ISD::SETCC, DL, Op->getValueType(1),
19681 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
19683 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
19684 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
19685 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
19689 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
19690 SelectionDAG &DAG) {
19691 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
19692 MVT DstVT = Op.getSimpleValueType();
19694 if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) {
19695 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19696 if (DstVT != MVT::f64)
19697 // This conversion needs to be expanded.
19700 SDValue InVec = Op->getOperand(0);
19702 unsigned NumElts = SrcVT.getVectorNumElements();
19703 EVT SVT = SrcVT.getVectorElementType();
19705 // Widen the vector in input in the case of MVT::v2i32.
19706 // Example: from MVT::v2i32 to MVT::v4i32.
19707 SmallVector<SDValue, 16> Elts;
19708 for (unsigned i = 0, e = NumElts; i != e; ++i)
19709 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec,
19710 DAG.getIntPtrConstant(i)));
19712 // Explicitly mark the extra elements as Undef.
19713 SDValue Undef = DAG.getUNDEF(SVT);
19714 for (unsigned i = NumElts, e = NumElts * 2; i != e; ++i)
19715 Elts.push_back(Undef);
19717 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
19718 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
19719 SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
19720 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
19721 DAG.getIntPtrConstant(0));
19724 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
19725 Subtarget->hasMMX() && "Unexpected custom BITCAST");
19726 assert((DstVT == MVT::i64 ||
19727 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
19728 "Unexpected custom BITCAST");
19729 // i64 <=> MMX conversions are Legal.
19730 if (SrcVT==MVT::i64 && DstVT.isVector())
19732 if (DstVT==MVT::i64 && SrcVT.isVector())
19734 // MMX <=> MMX conversions are Legal.
19735 if (SrcVT.isVector() && DstVT.isVector())
19737 // All other conversions need to be expanded.
19741 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
19742 SelectionDAG &DAG) {
19743 SDNode *Node = Op.getNode();
19746 Op = Op.getOperand(0);
19747 EVT VT = Op.getValueType();
19748 assert((VT.is128BitVector() || VT.is256BitVector()) &&
19749 "CTPOP lowering only implemented for 128/256-bit wide vector types");
19751 unsigned NumElts = VT.getVectorNumElements();
19752 EVT EltVT = VT.getVectorElementType();
19753 unsigned Len = EltVT.getSizeInBits();
19755 // This is the vectorized version of the "best" algorithm from
19756 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
19757 // with a minor tweak to use a series of adds + shifts instead of vector
19758 // multiplications. Implemented for the v2i64, v4i64, v4i32, v8i32 types:
19760 // v2i64, v4i64, v4i32 => Only profitable w/ popcnt disabled
19761 // v8i32 => Always profitable
19763 // FIXME: There a couple of possible improvements:
19765 // 1) Support for i8 and i16 vectors (needs measurements if popcnt enabled).
19766 // 2) Use strategies from http://wm.ite.pl/articles/sse-popcount.html
19768 assert(EltVT.isInteger() && (Len == 32 || Len == 64) && Len % 8 == 0 &&
19769 "CTPOP not implemented for this vector element type.");
19771 // X86 canonicalize ANDs to vXi64, generate the appropriate bitcasts to avoid
19772 // extra legalization.
19773 bool NeedsBitcast = EltVT == MVT::i32;
19774 MVT BitcastVT = VT.is256BitVector() ? MVT::v4i64 : MVT::v2i64;
19776 SDValue Cst55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), EltVT);
19777 SDValue Cst33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), EltVT);
19778 SDValue Cst0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), EltVT);
19780 // v = v - ((v >> 1) & 0x55555555...)
19781 SmallVector<SDValue, 8> Ones(NumElts, DAG.getConstant(1, EltVT));
19782 SDValue OnesV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ones);
19783 SDValue Srl = DAG.getNode(ISD::SRL, dl, VT, Op, OnesV);
19785 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19787 SmallVector<SDValue, 8> Mask55(NumElts, Cst55);
19788 SDValue M55 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask55);
19790 M55 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M55);
19792 SDValue And = DAG.getNode(ISD::AND, dl, Srl.getValueType(), Srl, M55);
19793 if (VT != And.getValueType())
19794 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19795 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Op, And);
19797 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
19798 SmallVector<SDValue, 8> Mask33(NumElts, Cst33);
19799 SDValue M33 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask33);
19800 SmallVector<SDValue, 8> Twos(NumElts, DAG.getConstant(2, EltVT));
19801 SDValue TwosV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Twos);
19803 Srl = DAG.getNode(ISD::SRL, dl, VT, Sub, TwosV);
19804 if (NeedsBitcast) {
19805 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19806 M33 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M33);
19807 Sub = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Sub);
19810 SDValue AndRHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Srl, M33);
19811 SDValue AndLHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Sub, M33);
19812 if (VT != AndRHS.getValueType()) {
19813 AndRHS = DAG.getNode(ISD::BITCAST, dl, VT, AndRHS);
19814 AndLHS = DAG.getNode(ISD::BITCAST, dl, VT, AndLHS);
19816 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, AndLHS, AndRHS);
19818 // v = (v + (v >> 4)) & 0x0F0F0F0F...
19819 SmallVector<SDValue, 8> Fours(NumElts, DAG.getConstant(4, EltVT));
19820 SDValue FoursV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Fours);
19821 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, FoursV);
19822 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19824 SmallVector<SDValue, 8> Mask0F(NumElts, Cst0F);
19825 SDValue M0F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask0F);
19826 if (NeedsBitcast) {
19827 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19828 M0F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M0F);
19830 And = DAG.getNode(ISD::AND, dl, M0F.getValueType(), Add, M0F);
19831 if (VT != And.getValueType())
19832 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19834 // The algorithm mentioned above uses:
19835 // v = (v * 0x01010101...) >> (Len - 8)
19837 // Change it to use vector adds + vector shifts which yield faster results on
19838 // Haswell than using vector integer multiplication.
19840 // For i32 elements:
19841 // v = v + (v >> 8)
19842 // v = v + (v >> 16)
19844 // For i64 elements:
19845 // v = v + (v >> 8)
19846 // v = v + (v >> 16)
19847 // v = v + (v >> 32)
19850 SmallVector<SDValue, 8> Csts;
19851 for (unsigned i = 8; i <= Len/2; i *= 2) {
19852 Csts.assign(NumElts, DAG.getConstant(i, EltVT));
19853 SDValue CstsV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Csts);
19854 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, CstsV);
19855 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19859 // The result is on the least significant 6-bits on i32 and 7-bits on i64.
19860 SDValue Cst3F = DAG.getConstant(APInt(Len, Len == 32 ? 0x3F : 0x7F), EltVT);
19861 SmallVector<SDValue, 8> Cst3FV(NumElts, Cst3F);
19862 SDValue M3F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Cst3FV);
19863 if (NeedsBitcast) {
19864 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19865 M3F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M3F);
19867 And = DAG.getNode(ISD::AND, dl, M3F.getValueType(), Add, M3F);
19868 if (VT != And.getValueType())
19869 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19874 static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
19875 SDNode *Node = Op.getNode();
19877 EVT T = Node->getValueType(0);
19878 SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
19879 DAG.getConstant(0, T), Node->getOperand(2));
19880 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
19881 cast<AtomicSDNode>(Node)->getMemoryVT(),
19882 Node->getOperand(0),
19883 Node->getOperand(1), negOp,
19884 cast<AtomicSDNode>(Node)->getMemOperand(),
19885 cast<AtomicSDNode>(Node)->getOrdering(),
19886 cast<AtomicSDNode>(Node)->getSynchScope());
19889 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
19890 SDNode *Node = Op.getNode();
19892 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
19894 // Convert seq_cst store -> xchg
19895 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
19896 // FIXME: On 32-bit, store -> fist or movq would be more efficient
19897 // (The only way to get a 16-byte store is cmpxchg16b)
19898 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
19899 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
19900 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
19901 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
19902 cast<AtomicSDNode>(Node)->getMemoryVT(),
19903 Node->getOperand(0),
19904 Node->getOperand(1), Node->getOperand(2),
19905 cast<AtomicSDNode>(Node)->getMemOperand(),
19906 cast<AtomicSDNode>(Node)->getOrdering(),
19907 cast<AtomicSDNode>(Node)->getSynchScope());
19908 return Swap.getValue(1);
19910 // Other atomic stores have a simple pattern.
19914 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
19915 EVT VT = Op.getNode()->getSimpleValueType(0);
19917 // Let legalize expand this if it isn't a legal type yet.
19918 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
19921 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
19924 bool ExtraOp = false;
19925 switch (Op.getOpcode()) {
19926 default: llvm_unreachable("Invalid code");
19927 case ISD::ADDC: Opc = X86ISD::ADD; break;
19928 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break;
19929 case ISD::SUBC: Opc = X86ISD::SUB; break;
19930 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break;
19934 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19936 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19937 Op.getOperand(1), Op.getOperand(2));
19940 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
19941 SelectionDAG &DAG) {
19942 assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit());
19944 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
19945 // which returns the values as { float, float } (in XMM0) or
19946 // { double, double } (which is returned in XMM0, XMM1).
19948 SDValue Arg = Op.getOperand(0);
19949 EVT ArgVT = Arg.getValueType();
19950 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
19952 TargetLowering::ArgListTy Args;
19953 TargetLowering::ArgListEntry Entry;
19957 Entry.isSExt = false;
19958 Entry.isZExt = false;
19959 Args.push_back(Entry);
19961 bool isF64 = ArgVT == MVT::f64;
19962 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
19963 // the small struct {f32, f32} is returned in (eax, edx). For f64,
19964 // the results are returned via SRet in memory.
19965 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret";
19966 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19967 SDValue Callee = DAG.getExternalSymbol(LibcallName, TLI.getPointerTy());
19969 Type *RetTy = isF64
19970 ? (Type*)StructType::get(ArgTy, ArgTy, nullptr)
19971 : (Type*)VectorType::get(ArgTy, 4);
19973 TargetLowering::CallLoweringInfo CLI(DAG);
19974 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
19975 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
19977 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
19980 // Returned in xmm0 and xmm1.
19981 return CallResult.first;
19983 // Returned in bits 0:31 and 32:64 xmm0.
19984 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19985 CallResult.first, DAG.getIntPtrConstant(0));
19986 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19987 CallResult.first, DAG.getIntPtrConstant(1));
19988 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
19989 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
19992 /// LowerOperation - Provide custom lowering hooks for some operations.
19994 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
19995 switch (Op.getOpcode()) {
19996 default: llvm_unreachable("Should not custom lower this!");
19997 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG);
19998 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
19999 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
20000 return LowerCMP_SWAP(Op, Subtarget, DAG);
20001 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
20002 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
20003 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
20004 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
20005 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
20006 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
20007 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
20008 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
20009 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
20010 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
20011 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
20012 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
20013 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
20014 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
20015 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
20016 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
20017 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
20018 case ISD::SHL_PARTS:
20019 case ISD::SRA_PARTS:
20020 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
20021 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
20022 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
20023 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
20024 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
20025 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
20026 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
20027 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
20028 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
20029 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
20030 case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG);
20032 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
20033 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
20034 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
20035 case ISD::SETCC: return LowerSETCC(Op, DAG);
20036 case ISD::SELECT: return LowerSELECT(Op, DAG);
20037 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
20038 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
20039 case ISD::VASTART: return LowerVASTART(Op, DAG);
20040 case ISD::VAARG: return LowerVAARG(Op, DAG);
20041 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
20042 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, Subtarget, DAG);
20043 case ISD::INTRINSIC_VOID:
20044 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
20045 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
20046 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
20047 case ISD::FRAME_TO_ARGS_OFFSET:
20048 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
20049 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
20050 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
20051 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
20052 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
20053 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
20054 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
20055 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
20056 case ISD::CTLZ: return LowerCTLZ(Op, DAG);
20057 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG);
20058 case ISD::CTTZ: return LowerCTTZ(Op, DAG);
20059 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
20060 case ISD::UMUL_LOHI:
20061 case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
20064 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
20070 case ISD::UMULO: return LowerXALUO(Op, DAG);
20071 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
20072 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
20076 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
20077 case ISD::ADD: return LowerADD(Op, DAG);
20078 case ISD::SUB: return LowerSUB(Op, DAG);
20079 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
20083 /// ReplaceNodeResults - Replace a node with an illegal result type
20084 /// with a new node built out of custom code.
20085 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
20086 SmallVectorImpl<SDValue>&Results,
20087 SelectionDAG &DAG) const {
20089 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20090 switch (N->getOpcode()) {
20092 llvm_unreachable("Do not know how to custom type legalize this operation!");
20093 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
20094 case X86ISD::FMINC:
20096 case X86ISD::FMAXC:
20097 case X86ISD::FMAX: {
20098 EVT VT = N->getValueType(0);
20099 if (VT != MVT::v2f32)
20100 llvm_unreachable("Unexpected type (!= v2f32) on FMIN/FMAX.");
20101 SDValue UNDEF = DAG.getUNDEF(VT);
20102 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
20103 N->getOperand(0), UNDEF);
20104 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
20105 N->getOperand(1), UNDEF);
20106 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
20109 case ISD::SIGN_EXTEND_INREG:
20114 // We don't want to expand or promote these.
20121 case ISD::UDIVREM: {
20122 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
20123 Results.push_back(V);
20126 case ISD::FP_TO_SINT:
20127 case ISD::FP_TO_UINT: {
20128 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
20130 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType()))
20133 std::pair<SDValue,SDValue> Vals =
20134 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
20135 SDValue FIST = Vals.first, StackSlot = Vals.second;
20136 if (FIST.getNode()) {
20137 EVT VT = N->getValueType(0);
20138 // Return a load from the stack slot.
20139 if (StackSlot.getNode())
20140 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
20141 MachinePointerInfo(),
20142 false, false, false, 0));
20144 Results.push_back(FIST);
20148 case ISD::UINT_TO_FP: {
20149 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20150 if (N->getOperand(0).getValueType() != MVT::v2i32 ||
20151 N->getValueType(0) != MVT::v2f32)
20153 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
20155 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
20157 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias);
20158 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
20159 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias));
20160 Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or);
20161 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
20162 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
20165 case ISD::FP_ROUND: {
20166 if (!TLI.isTypeLegal(N->getOperand(0).getValueType()))
20168 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
20169 Results.push_back(V);
20172 case ISD::INTRINSIC_W_CHAIN: {
20173 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
20175 default : llvm_unreachable("Do not know how to custom type "
20176 "legalize this intrinsic operation!");
20177 case Intrinsic::x86_rdtsc:
20178 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20180 case Intrinsic::x86_rdtscp:
20181 return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
20183 case Intrinsic::x86_rdpmc:
20184 return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
20187 case ISD::READCYCLECOUNTER: {
20188 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20191 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
20192 EVT T = N->getValueType(0);
20193 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
20194 bool Regs64bit = T == MVT::i128;
20195 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
20196 SDValue cpInL, cpInH;
20197 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20198 DAG.getConstant(0, HalfT));
20199 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20200 DAG.getConstant(1, HalfT));
20201 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
20202 Regs64bit ? X86::RAX : X86::EAX,
20204 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
20205 Regs64bit ? X86::RDX : X86::EDX,
20206 cpInH, cpInL.getValue(1));
20207 SDValue swapInL, swapInH;
20208 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20209 DAG.getConstant(0, HalfT));
20210 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20211 DAG.getConstant(1, HalfT));
20212 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl,
20213 Regs64bit ? X86::RBX : X86::EBX,
20214 swapInL, cpInH.getValue(1));
20215 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl,
20216 Regs64bit ? X86::RCX : X86::ECX,
20217 swapInH, swapInL.getValue(1));
20218 SDValue Ops[] = { swapInH.getValue(0),
20220 swapInH.getValue(1) };
20221 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
20222 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
20223 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG :
20224 X86ISD::LCMPXCHG8_DAG;
20225 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
20226 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
20227 Regs64bit ? X86::RAX : X86::EAX,
20228 HalfT, Result.getValue(1));
20229 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
20230 Regs64bit ? X86::RDX : X86::EDX,
20231 HalfT, cpOutL.getValue(2));
20232 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
20234 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
20235 MVT::i32, cpOutH.getValue(2));
20237 DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
20238 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
20239 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
20241 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
20242 Results.push_back(Success);
20243 Results.push_back(EFLAGS.getValue(1));
20246 case ISD::ATOMIC_SWAP:
20247 case ISD::ATOMIC_LOAD_ADD:
20248 case ISD::ATOMIC_LOAD_SUB:
20249 case ISD::ATOMIC_LOAD_AND:
20250 case ISD::ATOMIC_LOAD_OR:
20251 case ISD::ATOMIC_LOAD_XOR:
20252 case ISD::ATOMIC_LOAD_NAND:
20253 case ISD::ATOMIC_LOAD_MIN:
20254 case ISD::ATOMIC_LOAD_MAX:
20255 case ISD::ATOMIC_LOAD_UMIN:
20256 case ISD::ATOMIC_LOAD_UMAX:
20257 case ISD::ATOMIC_LOAD: {
20258 // Delegate to generic TypeLegalization. Situations we can really handle
20259 // should have already been dealt with by AtomicExpandPass.cpp.
20262 case ISD::BITCAST: {
20263 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20264 EVT DstVT = N->getValueType(0);
20265 EVT SrcVT = N->getOperand(0)->getValueType(0);
20267 if (SrcVT != MVT::f64 ||
20268 (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
20271 unsigned NumElts = DstVT.getVectorNumElements();
20272 EVT SVT = DstVT.getVectorElementType();
20273 EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
20274 SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
20275 MVT::v2f64, N->getOperand(0));
20276 SDValue ToVecInt = DAG.getNode(ISD::BITCAST, dl, WiderVT, Expanded);
20278 if (ExperimentalVectorWideningLegalization) {
20279 // If we are legalizing vectors by widening, we already have the desired
20280 // legal vector type, just return it.
20281 Results.push_back(ToVecInt);
20285 SmallVector<SDValue, 8> Elts;
20286 for (unsigned i = 0, e = NumElts; i != e; ++i)
20287 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
20288 ToVecInt, DAG.getIntPtrConstant(i)));
20290 Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
20295 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
20297 default: return nullptr;
20298 case X86ISD::BSF: return "X86ISD::BSF";
20299 case X86ISD::BSR: return "X86ISD::BSR";
20300 case X86ISD::SHLD: return "X86ISD::SHLD";
20301 case X86ISD::SHRD: return "X86ISD::SHRD";
20302 case X86ISD::FAND: return "X86ISD::FAND";
20303 case X86ISD::FANDN: return "X86ISD::FANDN";
20304 case X86ISD::FOR: return "X86ISD::FOR";
20305 case X86ISD::FXOR: return "X86ISD::FXOR";
20306 case X86ISD::FSRL: return "X86ISD::FSRL";
20307 case X86ISD::FILD: return "X86ISD::FILD";
20308 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
20309 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
20310 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
20311 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
20312 case X86ISD::FLD: return "X86ISD::FLD";
20313 case X86ISD::FST: return "X86ISD::FST";
20314 case X86ISD::CALL: return "X86ISD::CALL";
20315 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
20316 case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
20317 case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
20318 case X86ISD::BT: return "X86ISD::BT";
20319 case X86ISD::CMP: return "X86ISD::CMP";
20320 case X86ISD::COMI: return "X86ISD::COMI";
20321 case X86ISD::UCOMI: return "X86ISD::UCOMI";
20322 case X86ISD::CMPM: return "X86ISD::CMPM";
20323 case X86ISD::CMPMU: return "X86ISD::CMPMU";
20324 case X86ISD::SETCC: return "X86ISD::SETCC";
20325 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
20326 case X86ISD::FSETCC: return "X86ISD::FSETCC";
20327 case X86ISD::CMOV: return "X86ISD::CMOV";
20328 case X86ISD::BRCOND: return "X86ISD::BRCOND";
20329 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
20330 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
20331 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
20332 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
20333 case X86ISD::Wrapper: return "X86ISD::Wrapper";
20334 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
20335 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
20336 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
20337 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
20338 case X86ISD::PINSRB: return "X86ISD::PINSRB";
20339 case X86ISD::PINSRW: return "X86ISD::PINSRW";
20340 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
20341 case X86ISD::ANDNP: return "X86ISD::ANDNP";
20342 case X86ISD::PSIGN: return "X86ISD::PSIGN";
20343 case X86ISD::BLENDI: return "X86ISD::BLENDI";
20344 case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
20345 case X86ISD::SUBUS: return "X86ISD::SUBUS";
20346 case X86ISD::HADD: return "X86ISD::HADD";
20347 case X86ISD::HSUB: return "X86ISD::HSUB";
20348 case X86ISD::FHADD: return "X86ISD::FHADD";
20349 case X86ISD::FHSUB: return "X86ISD::FHSUB";
20350 case X86ISD::UMAX: return "X86ISD::UMAX";
20351 case X86ISD::UMIN: return "X86ISD::UMIN";
20352 case X86ISD::SMAX: return "X86ISD::SMAX";
20353 case X86ISD::SMIN: return "X86ISD::SMIN";
20354 case X86ISD::FMAX: return "X86ISD::FMAX";
20355 case X86ISD::FMIN: return "X86ISD::FMIN";
20356 case X86ISD::FMAXC: return "X86ISD::FMAXC";
20357 case X86ISD::FMINC: return "X86ISD::FMINC";
20358 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
20359 case X86ISD::FRCP: return "X86ISD::FRCP";
20360 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
20361 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
20362 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
20363 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
20364 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
20365 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
20366 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
20367 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
20368 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
20369 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
20370 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
20371 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
20372 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
20373 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
20374 case X86ISD::VZEXT: return "X86ISD::VZEXT";
20375 case X86ISD::VSEXT: return "X86ISD::VSEXT";
20376 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
20377 case X86ISD::VTRUNCM: return "X86ISD::VTRUNCM";
20378 case X86ISD::VINSERT: return "X86ISD::VINSERT";
20379 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
20380 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
20381 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
20382 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
20383 case X86ISD::VSHL: return "X86ISD::VSHL";
20384 case X86ISD::VSRL: return "X86ISD::VSRL";
20385 case X86ISD::VSRA: return "X86ISD::VSRA";
20386 case X86ISD::VSHLI: return "X86ISD::VSHLI";
20387 case X86ISD::VSRLI: return "X86ISD::VSRLI";
20388 case X86ISD::VSRAI: return "X86ISD::VSRAI";
20389 case X86ISD::CMPP: return "X86ISD::CMPP";
20390 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
20391 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
20392 case X86ISD::PCMPEQM: return "X86ISD::PCMPEQM";
20393 case X86ISD::PCMPGTM: return "X86ISD::PCMPGTM";
20394 case X86ISD::ADD: return "X86ISD::ADD";
20395 case X86ISD::SUB: return "X86ISD::SUB";
20396 case X86ISD::ADC: return "X86ISD::ADC";
20397 case X86ISD::SBB: return "X86ISD::SBB";
20398 case X86ISD::SMUL: return "X86ISD::SMUL";
20399 case X86ISD::UMUL: return "X86ISD::UMUL";
20400 case X86ISD::SMUL8: return "X86ISD::SMUL8";
20401 case X86ISD::UMUL8: return "X86ISD::UMUL8";
20402 case X86ISD::SDIVREM8_SEXT_HREG: return "X86ISD::SDIVREM8_SEXT_HREG";
20403 case X86ISD::UDIVREM8_ZEXT_HREG: return "X86ISD::UDIVREM8_ZEXT_HREG";
20404 case X86ISD::INC: return "X86ISD::INC";
20405 case X86ISD::DEC: return "X86ISD::DEC";
20406 case X86ISD::OR: return "X86ISD::OR";
20407 case X86ISD::XOR: return "X86ISD::XOR";
20408 case X86ISD::AND: return "X86ISD::AND";
20409 case X86ISD::BEXTR: return "X86ISD::BEXTR";
20410 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
20411 case X86ISD::PTEST: return "X86ISD::PTEST";
20412 case X86ISD::TESTP: return "X86ISD::TESTP";
20413 case X86ISD::TESTM: return "X86ISD::TESTM";
20414 case X86ISD::TESTNM: return "X86ISD::TESTNM";
20415 case X86ISD::KORTEST: return "X86ISD::KORTEST";
20416 case X86ISD::PACKSS: return "X86ISD::PACKSS";
20417 case X86ISD::PACKUS: return "X86ISD::PACKUS";
20418 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
20419 case X86ISD::VALIGN: return "X86ISD::VALIGN";
20420 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
20421 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
20422 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
20423 case X86ISD::SHUFP: return "X86ISD::SHUFP";
20424 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
20425 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
20426 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
20427 case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
20428 case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
20429 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
20430 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
20431 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
20432 case X86ISD::MOVSD: return "X86ISD::MOVSD";
20433 case X86ISD::MOVSS: return "X86ISD::MOVSS";
20434 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
20435 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
20436 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
20437 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
20438 case X86ISD::VEXTRACT: return "X86ISD::VEXTRACT";
20439 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
20440 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
20441 case X86ISD::VPERMV: return "X86ISD::VPERMV";
20442 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
20443 case X86ISD::VPERMIV3: return "X86ISD::VPERMIV3";
20444 case X86ISD::VPERMI: return "X86ISD::VPERMI";
20445 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
20446 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
20447 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
20448 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
20449 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
20450 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
20451 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
20452 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL";
20453 case X86ISD::SAHF: return "X86ISD::SAHF";
20454 case X86ISD::RDRAND: return "X86ISD::RDRAND";
20455 case X86ISD::RDSEED: return "X86ISD::RDSEED";
20456 case X86ISD::FMADD: return "X86ISD::FMADD";
20457 case X86ISD::FMSUB: return "X86ISD::FMSUB";
20458 case X86ISD::FNMADD: return "X86ISD::FNMADD";
20459 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
20460 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
20461 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
20462 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI";
20463 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI";
20464 case X86ISD::XTEST: return "X86ISD::XTEST";
20465 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
20466 case X86ISD::EXPAND: return "X86ISD::EXPAND";
20467 case X86ISD::SELECT: return "X86ISD::SELECT";
20468 case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
20469 case X86ISD::RCP28: return "X86ISD::RCP28";
20470 case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
20474 // isLegalAddressingMode - Return true if the addressing mode represented
20475 // by AM is legal for this target, for a load/store of the specified type.
20476 bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
20478 // X86 supports extremely general addressing modes.
20479 CodeModel::Model M = getTargetMachine().getCodeModel();
20480 Reloc::Model R = getTargetMachine().getRelocationModel();
20482 // X86 allows a sign-extended 32-bit immediate field as a displacement.
20483 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
20488 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
20490 // If a reference to this global requires an extra load, we can't fold it.
20491 if (isGlobalStubReference(GVFlags))
20494 // If BaseGV requires a register for the PIC base, we cannot also have a
20495 // BaseReg specified.
20496 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
20499 // If lower 4G is not available, then we must use rip-relative addressing.
20500 if ((M != CodeModel::Small || R != Reloc::Static) &&
20501 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
20505 switch (AM.Scale) {
20511 // These scales always work.
20516 // These scales are formed with basereg+scalereg. Only accept if there is
20521 default: // Other stuff never works.
20528 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
20529 unsigned Bits = Ty->getScalarSizeInBits();
20531 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
20532 // particularly cheaper than those without.
20536 // On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
20537 // variable shifts just as cheap as scalar ones.
20538 if (Subtarget->hasInt256() && (Bits == 32 || Bits == 64))
20541 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
20542 // fully general vector.
20546 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
20547 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20549 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
20550 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
20551 return NumBits1 > NumBits2;
20554 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
20555 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20558 if (!isTypeLegal(EVT::getEVT(Ty1)))
20561 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
20563 // Assuming the caller doesn't have a zeroext or signext return parameter,
20564 // truncation all the way down to i1 is valid.
20568 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
20569 return isInt<32>(Imm);
20572 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
20573 // Can also use sub to handle negated immediates.
20574 return isInt<32>(Imm);
20577 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
20578 if (!VT1.isInteger() || !VT2.isInteger())
20580 unsigned NumBits1 = VT1.getSizeInBits();
20581 unsigned NumBits2 = VT2.getSizeInBits();
20582 return NumBits1 > NumBits2;
20585 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
20586 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20587 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit();
20590 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
20591 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20592 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
20595 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
20596 EVT VT1 = Val.getValueType();
20597 if (isZExtFree(VT1, VT2))
20600 if (Val.getOpcode() != ISD::LOAD)
20603 if (!VT1.isSimple() || !VT1.isInteger() ||
20604 !VT2.isSimple() || !VT2.isInteger())
20607 switch (VT1.getSimpleVT().SimpleTy) {
20612 // X86 has 8, 16, and 32-bit zero-extending loads.
20619 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue) const { return true; }
20622 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
20623 if (!(Subtarget->hasFMA() || Subtarget->hasFMA4()))
20626 VT = VT.getScalarType();
20628 if (!VT.isSimple())
20631 switch (VT.getSimpleVT().SimpleTy) {
20642 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
20643 // i16 instructions are longer (0x66 prefix) and potentially slower.
20644 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
20647 /// isShuffleMaskLegal - Targets can use this to indicate that they only
20648 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
20649 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
20650 /// are assumed to be legal.
20652 X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
20654 if (!VT.isSimple())
20657 MVT SVT = VT.getSimpleVT();
20659 // Very little shuffling can be done for 64-bit vectors right now.
20660 if (VT.getSizeInBits() == 64)
20663 // This is an experimental legality test that is tailored to match the
20664 // legality test of the experimental lowering more closely. They are gated
20665 // separately to ease testing of performance differences.
20666 if (ExperimentalVectorShuffleLegality)
20667 // We only care that the types being shuffled are legal. The lowering can
20668 // handle any possible shuffle mask that results.
20669 return isTypeLegal(SVT);
20671 // If this is a single-input shuffle with no 128 bit lane crossings we can
20672 // lower it into pshufb.
20673 if ((SVT.is128BitVector() && Subtarget->hasSSSE3()) ||
20674 (SVT.is256BitVector() && Subtarget->hasInt256())) {
20675 bool isLegal = true;
20676 for (unsigned I = 0, E = M.size(); I != E; ++I) {
20677 if (M[I] >= (int)SVT.getVectorNumElements() ||
20678 ShuffleCrosses128bitLane(SVT, I, M[I])) {
20687 // FIXME: blends, shifts.
20688 return (SVT.getVectorNumElements() == 2 ||
20689 ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
20690 isMOVLMask(M, SVT) ||
20691 isCommutedMOVLMask(M, SVT) ||
20692 isMOVHLPSMask(M, SVT) ||
20693 isSHUFPMask(M, SVT) ||
20694 isSHUFPMask(M, SVT, /* Commuted */ true) ||
20695 isPSHUFDMask(M, SVT) ||
20696 isPSHUFDMask(M, SVT, /* SecondOperand */ true) ||
20697 isPSHUFHWMask(M, SVT, Subtarget->hasInt256()) ||
20698 isPSHUFLWMask(M, SVT, Subtarget->hasInt256()) ||
20699 isPALIGNRMask(M, SVT, Subtarget) ||
20700 isUNPCKLMask(M, SVT, Subtarget->hasInt256()) ||
20701 isUNPCKHMask(M, SVT, Subtarget->hasInt256()) ||
20702 isUNPCKL_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20703 isUNPCKH_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20704 isBlendMask(M, SVT, Subtarget->hasSSE41(), Subtarget->hasInt256()) ||
20705 (Subtarget->hasSSE41() && isINSERTPSMask(M, SVT)));
20709 X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
20711 if (!VT.isSimple())
20714 MVT SVT = VT.getSimpleVT();
20716 // This is an experimental legality test that is tailored to match the
20717 // legality test of the experimental lowering more closely. They are gated
20718 // separately to ease testing of performance differences.
20719 if (ExperimentalVectorShuffleLegality)
20720 // The new vector shuffle lowering is very good at managing zero-inputs.
20721 return isShuffleMaskLegal(Mask, VT);
20723 unsigned NumElts = SVT.getVectorNumElements();
20724 // FIXME: This collection of masks seems suspect.
20727 if (NumElts == 4 && SVT.is128BitVector()) {
20728 return (isMOVLMask(Mask, SVT) ||
20729 isCommutedMOVLMask(Mask, SVT, true) ||
20730 isSHUFPMask(Mask, SVT) ||
20731 isSHUFPMask(Mask, SVT, /* Commuted */ true) ||
20732 isBlendMask(Mask, SVT, Subtarget->hasSSE41(),
20733 Subtarget->hasInt256()));
20738 //===----------------------------------------------------------------------===//
20739 // X86 Scheduler Hooks
20740 //===----------------------------------------------------------------------===//
20742 /// Utility function to emit xbegin specifying the start of an RTM region.
20743 static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
20744 const TargetInstrInfo *TII) {
20745 DebugLoc DL = MI->getDebugLoc();
20747 const BasicBlock *BB = MBB->getBasicBlock();
20748 MachineFunction::iterator I = MBB;
20751 // For the v = xbegin(), we generate
20762 MachineBasicBlock *thisMBB = MBB;
20763 MachineFunction *MF = MBB->getParent();
20764 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
20765 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
20766 MF->insert(I, mainMBB);
20767 MF->insert(I, sinkMBB);
20769 // Transfer the remainder of BB and its successor edges to sinkMBB.
20770 sinkMBB->splice(sinkMBB->begin(), MBB,
20771 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
20772 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
20776 // # fallthrough to mainMBB
20777 // # abortion to sinkMBB
20778 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB);
20779 thisMBB->addSuccessor(mainMBB);
20780 thisMBB->addSuccessor(sinkMBB);
20784 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1);
20785 mainMBB->addSuccessor(sinkMBB);
20788 // EAX is live into the sinkMBB
20789 sinkMBB->addLiveIn(X86::EAX);
20790 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
20791 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20794 MI->eraseFromParent();
20798 // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
20799 // or XMM0_V32I8 in AVX all of this code can be replaced with that
20800 // in the .td file.
20801 static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
20802 const TargetInstrInfo *TII) {
20804 switch (MI->getOpcode()) {
20805 default: llvm_unreachable("illegal opcode!");
20806 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
20807 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
20808 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break;
20809 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break;
20810 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break;
20811 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break;
20812 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break;
20813 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
20816 DebugLoc dl = MI->getDebugLoc();
20817 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20819 unsigned NumArgs = MI->getNumOperands();
20820 for (unsigned i = 1; i < NumArgs; ++i) {
20821 MachineOperand &Op = MI->getOperand(i);
20822 if (!(Op.isReg() && Op.isImplicit()))
20823 MIB.addOperand(Op);
20825 if (MI->hasOneMemOperand())
20826 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20828 BuildMI(*BB, MI, dl,
20829 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20830 .addReg(X86::XMM0);
20832 MI->eraseFromParent();
20836 // FIXME: Custom handling because TableGen doesn't support multiple implicit
20837 // defs in an instruction pattern
20838 static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
20839 const TargetInstrInfo *TII) {
20841 switch (MI->getOpcode()) {
20842 default: llvm_unreachable("illegal opcode!");
20843 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
20844 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
20845 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break;
20846 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break;
20847 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break;
20848 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break;
20849 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break;
20850 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
20853 DebugLoc dl = MI->getDebugLoc();
20854 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20856 unsigned NumArgs = MI->getNumOperands(); // remove the results
20857 for (unsigned i = 1; i < NumArgs; ++i) {
20858 MachineOperand &Op = MI->getOperand(i);
20859 if (!(Op.isReg() && Op.isImplicit()))
20860 MIB.addOperand(Op);
20862 if (MI->hasOneMemOperand())
20863 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20865 BuildMI(*BB, MI, dl,
20866 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20869 MI->eraseFromParent();
20873 static MachineBasicBlock *EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
20874 const X86Subtarget *Subtarget) {
20875 DebugLoc dl = MI->getDebugLoc();
20876 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20877 // Address into RAX/EAX, other two args into ECX, EDX.
20878 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
20879 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
20880 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
20881 for (int i = 0; i < X86::AddrNumOperands; ++i)
20882 MIB.addOperand(MI->getOperand(i));
20884 unsigned ValOps = X86::AddrNumOperands;
20885 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
20886 .addReg(MI->getOperand(ValOps).getReg());
20887 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
20888 .addReg(MI->getOperand(ValOps+1).getReg());
20890 // The instruction doesn't actually take any operands though.
20891 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
20893 MI->eraseFromParent(); // The pseudo is gone now.
20897 MachineBasicBlock *
20898 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
20899 MachineBasicBlock *MBB) const {
20900 // Emit va_arg instruction on X86-64.
20902 // Operands to this pseudo-instruction:
20903 // 0 ) Output : destination address (reg)
20904 // 1-5) Input : va_list address (addr, i64mem)
20905 // 6 ) ArgSize : Size (in bytes) of vararg type
20906 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
20907 // 8 ) Align : Alignment of type
20908 // 9 ) EFLAGS (implicit-def)
20910 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
20911 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands");
20913 unsigned DestReg = MI->getOperand(0).getReg();
20914 MachineOperand &Base = MI->getOperand(1);
20915 MachineOperand &Scale = MI->getOperand(2);
20916 MachineOperand &Index = MI->getOperand(3);
20917 MachineOperand &Disp = MI->getOperand(4);
20918 MachineOperand &Segment = MI->getOperand(5);
20919 unsigned ArgSize = MI->getOperand(6).getImm();
20920 unsigned ArgMode = MI->getOperand(7).getImm();
20921 unsigned Align = MI->getOperand(8).getImm();
20923 // Memory Reference
20924 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
20925 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
20926 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
20928 // Machine Information
20929 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20930 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
20931 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
20932 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
20933 DebugLoc DL = MI->getDebugLoc();
20935 // struct va_list {
20938 // i64 overflow_area (address)
20939 // i64 reg_save_area (address)
20941 // sizeof(va_list) = 24
20942 // alignment(va_list) = 8
20944 unsigned TotalNumIntRegs = 6;
20945 unsigned TotalNumXMMRegs = 8;
20946 bool UseGPOffset = (ArgMode == 1);
20947 bool UseFPOffset = (ArgMode == 2);
20948 unsigned MaxOffset = TotalNumIntRegs * 8 +
20949 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
20951 /* Align ArgSize to a multiple of 8 */
20952 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
20953 bool NeedsAlign = (Align > 8);
20955 MachineBasicBlock *thisMBB = MBB;
20956 MachineBasicBlock *overflowMBB;
20957 MachineBasicBlock *offsetMBB;
20958 MachineBasicBlock *endMBB;
20960 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
20961 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
20962 unsigned OffsetReg = 0;
20964 if (!UseGPOffset && !UseFPOffset) {
20965 // If we only pull from the overflow region, we don't create a branch.
20966 // We don't need to alter control flow.
20967 OffsetDestReg = 0; // unused
20968 OverflowDestReg = DestReg;
20970 offsetMBB = nullptr;
20971 overflowMBB = thisMBB;
20974 // First emit code to check if gp_offset (or fp_offset) is below the bound.
20975 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
20976 // If not, pull from overflow_area. (branch to overflowMBB)
20981 // offsetMBB overflowMBB
20986 // Registers for the PHI in endMBB
20987 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
20988 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
20990 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
20991 MachineFunction *MF = MBB->getParent();
20992 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20993 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20994 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20996 MachineFunction::iterator MBBIter = MBB;
20999 // Insert the new basic blocks
21000 MF->insert(MBBIter, offsetMBB);
21001 MF->insert(MBBIter, overflowMBB);
21002 MF->insert(MBBIter, endMBB);
21004 // Transfer the remainder of MBB and its successor edges to endMBB.
21005 endMBB->splice(endMBB->begin(), thisMBB,
21006 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
21007 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
21009 // Make offsetMBB and overflowMBB successors of thisMBB
21010 thisMBB->addSuccessor(offsetMBB);
21011 thisMBB->addSuccessor(overflowMBB);
21013 // endMBB is a successor of both offsetMBB and overflowMBB
21014 offsetMBB->addSuccessor(endMBB);
21015 overflowMBB->addSuccessor(endMBB);
21017 // Load the offset value into a register
21018 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
21019 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
21023 .addDisp(Disp, UseFPOffset ? 4 : 0)
21024 .addOperand(Segment)
21025 .setMemRefs(MMOBegin, MMOEnd);
21027 // Check if there is enough room left to pull this argument.
21028 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
21030 .addImm(MaxOffset + 8 - ArgSizeA8);
21032 // Branch to "overflowMBB" if offset >= max
21033 // Fall through to "offsetMBB" otherwise
21034 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
21035 .addMBB(overflowMBB);
21038 // In offsetMBB, emit code to use the reg_save_area.
21040 assert(OffsetReg != 0);
21042 // Read the reg_save_area address.
21043 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
21044 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
21049 .addOperand(Segment)
21050 .setMemRefs(MMOBegin, MMOEnd);
21052 // Zero-extend the offset
21053 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
21054 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
21057 .addImm(X86::sub_32bit);
21059 // Add the offset to the reg_save_area to get the final address.
21060 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
21061 .addReg(OffsetReg64)
21062 .addReg(RegSaveReg);
21064 // Compute the offset for the next argument
21065 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
21066 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
21068 .addImm(UseFPOffset ? 16 : 8);
21070 // Store it back into the va_list.
21071 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
21075 .addDisp(Disp, UseFPOffset ? 4 : 0)
21076 .addOperand(Segment)
21077 .addReg(NextOffsetReg)
21078 .setMemRefs(MMOBegin, MMOEnd);
21081 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
21086 // Emit code to use overflow area
21089 // Load the overflow_area address into a register.
21090 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
21091 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
21096 .addOperand(Segment)
21097 .setMemRefs(MMOBegin, MMOEnd);
21099 // If we need to align it, do so. Otherwise, just copy the address
21100 // to OverflowDestReg.
21102 // Align the overflow address
21103 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2");
21104 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
21106 // aligned_addr = (addr + (align-1)) & ~(align-1)
21107 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
21108 .addReg(OverflowAddrReg)
21111 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
21113 .addImm(~(uint64_t)(Align-1));
21115 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
21116 .addReg(OverflowAddrReg);
21119 // Compute the next overflow address after this argument.
21120 // (the overflow address should be kept 8-byte aligned)
21121 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
21122 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
21123 .addReg(OverflowDestReg)
21124 .addImm(ArgSizeA8);
21126 // Store the new overflow address.
21127 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
21132 .addOperand(Segment)
21133 .addReg(NextAddrReg)
21134 .setMemRefs(MMOBegin, MMOEnd);
21136 // If we branched, emit the PHI to the front of endMBB.
21138 BuildMI(*endMBB, endMBB->begin(), DL,
21139 TII->get(X86::PHI), DestReg)
21140 .addReg(OffsetDestReg).addMBB(offsetMBB)
21141 .addReg(OverflowDestReg).addMBB(overflowMBB);
21144 // Erase the pseudo instruction
21145 MI->eraseFromParent();
21150 MachineBasicBlock *
21151 X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
21153 MachineBasicBlock *MBB) const {
21154 // Emit code to save XMM registers to the stack. The ABI says that the
21155 // number of registers to save is given in %al, so it's theoretically
21156 // possible to do an indirect jump trick to avoid saving all of them,
21157 // however this code takes a simpler approach and just executes all
21158 // of the stores if %al is non-zero. It's less code, and it's probably
21159 // easier on the hardware branch predictor, and stores aren't all that
21160 // expensive anyway.
21162 // Create the new basic blocks. One block contains all the XMM stores,
21163 // and one block is the final destination regardless of whether any
21164 // stores were performed.
21165 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
21166 MachineFunction *F = MBB->getParent();
21167 MachineFunction::iterator MBBIter = MBB;
21169 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
21170 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
21171 F->insert(MBBIter, XMMSaveMBB);
21172 F->insert(MBBIter, EndMBB);
21174 // Transfer the remainder of MBB and its successor edges to EndMBB.
21175 EndMBB->splice(EndMBB->begin(), MBB,
21176 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21177 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
21179 // The original block will now fall through to the XMM save block.
21180 MBB->addSuccessor(XMMSaveMBB);
21181 // The XMMSaveMBB will fall through to the end block.
21182 XMMSaveMBB->addSuccessor(EndMBB);
21184 // Now add the instructions.
21185 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21186 DebugLoc DL = MI->getDebugLoc();
21188 unsigned CountReg = MI->getOperand(0).getReg();
21189 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm();
21190 int64_t VarArgsFPOffset = MI->getOperand(2).getImm();
21192 if (!Subtarget->isTargetWin64()) {
21193 // If %al is 0, branch around the XMM save block.
21194 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
21195 BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
21196 MBB->addSuccessor(EndMBB);
21199 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
21200 // that was just emitted, but clearly shouldn't be "saved".
21201 assert((MI->getNumOperands() <= 3 ||
21202 !MI->getOperand(MI->getNumOperands() - 1).isReg() ||
21203 MI->getOperand(MI->getNumOperands() - 1).getReg() == X86::EFLAGS)
21204 && "Expected last argument to be EFLAGS");
21205 unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
21206 // In the XMM save block, save all the XMM argument registers.
21207 for (int i = 3, e = MI->getNumOperands() - 1; i != e; ++i) {
21208 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
21209 MachineMemOperand *MMO =
21210 F->getMachineMemOperand(
21211 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset),
21212 MachineMemOperand::MOStore,
21213 /*Size=*/16, /*Align=*/16);
21214 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
21215 .addFrameIndex(RegSaveFrameIndex)
21216 .addImm(/*Scale=*/1)
21217 .addReg(/*IndexReg=*/0)
21218 .addImm(/*Disp=*/Offset)
21219 .addReg(/*Segment=*/0)
21220 .addReg(MI->getOperand(i).getReg())
21221 .addMemOperand(MMO);
21224 MI->eraseFromParent(); // The pseudo instruction is gone now.
21229 // The EFLAGS operand of SelectItr might be missing a kill marker
21230 // because there were multiple uses of EFLAGS, and ISel didn't know
21231 // which to mark. Figure out whether SelectItr should have had a
21232 // kill marker, and set it if it should. Returns the correct kill
21234 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
21235 MachineBasicBlock* BB,
21236 const TargetRegisterInfo* TRI) {
21237 // Scan forward through BB for a use/def of EFLAGS.
21238 MachineBasicBlock::iterator miI(std::next(SelectItr));
21239 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
21240 const MachineInstr& mi = *miI;
21241 if (mi.readsRegister(X86::EFLAGS))
21243 if (mi.definesRegister(X86::EFLAGS))
21244 break; // Should have kill-flag - update below.
21247 // If we hit the end of the block, check whether EFLAGS is live into a
21249 if (miI == BB->end()) {
21250 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
21251 sEnd = BB->succ_end();
21252 sItr != sEnd; ++sItr) {
21253 MachineBasicBlock* succ = *sItr;
21254 if (succ->isLiveIn(X86::EFLAGS))
21259 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
21260 // out. SelectMI should have a kill flag on EFLAGS.
21261 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
21265 MachineBasicBlock *
21266 X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
21267 MachineBasicBlock *BB) const {
21268 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21269 DebugLoc DL = MI->getDebugLoc();
21271 // To "insert" a SELECT_CC instruction, we actually have to insert the
21272 // diamond control-flow pattern. The incoming instruction knows the
21273 // destination vreg to set, the condition code register to branch on, the
21274 // true/false values to select between, and a branch opcode to use.
21275 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21276 MachineFunction::iterator It = BB;
21282 // cmpTY ccX, r1, r2
21284 // fallthrough --> copy0MBB
21285 MachineBasicBlock *thisMBB = BB;
21286 MachineFunction *F = BB->getParent();
21287 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
21288 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
21289 F->insert(It, copy0MBB);
21290 F->insert(It, sinkMBB);
21292 // If the EFLAGS register isn't dead in the terminator, then claim that it's
21293 // live into the sink and copy blocks.
21294 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
21295 if (!MI->killsRegister(X86::EFLAGS) &&
21296 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) {
21297 copy0MBB->addLiveIn(X86::EFLAGS);
21298 sinkMBB->addLiveIn(X86::EFLAGS);
21301 // Transfer the remainder of BB and its successor edges to sinkMBB.
21302 sinkMBB->splice(sinkMBB->begin(), BB,
21303 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21304 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
21306 // Add the true and fallthrough blocks as its successors.
21307 BB->addSuccessor(copy0MBB);
21308 BB->addSuccessor(sinkMBB);
21310 // Create the conditional branch instruction.
21312 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
21313 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
21316 // %FalseValue = ...
21317 // # fallthrough to sinkMBB
21318 copy0MBB->addSuccessor(sinkMBB);
21321 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
21323 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21324 TII->get(X86::PHI), MI->getOperand(0).getReg())
21325 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
21326 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
21328 MI->eraseFromParent(); // The pseudo instruction is gone now.
21332 MachineBasicBlock *
21333 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
21334 MachineBasicBlock *BB) const {
21335 MachineFunction *MF = BB->getParent();
21336 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21337 DebugLoc DL = MI->getDebugLoc();
21338 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21340 assert(MF->shouldSplitStack());
21342 const bool Is64Bit = Subtarget->is64Bit();
21343 const bool IsLP64 = Subtarget->isTarget64BitLP64();
21345 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
21346 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
21349 // ... [Till the alloca]
21350 // If stacklet is not large enough, jump to mallocMBB
21353 // Allocate by subtracting from RSP
21354 // Jump to continueMBB
21357 // Allocate by call to runtime
21361 // [rest of original BB]
21364 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21365 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21366 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21368 MachineRegisterInfo &MRI = MF->getRegInfo();
21369 const TargetRegisterClass *AddrRegClass =
21370 getRegClassFor(getPointerTy());
21372 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21373 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21374 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
21375 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
21376 sizeVReg = MI->getOperand(1).getReg(),
21377 physSPReg = IsLP64 || Subtarget->isTargetNaCl64() ? X86::RSP : X86::ESP;
21379 MachineFunction::iterator MBBIter = BB;
21382 MF->insert(MBBIter, bumpMBB);
21383 MF->insert(MBBIter, mallocMBB);
21384 MF->insert(MBBIter, continueMBB);
21386 continueMBB->splice(continueMBB->begin(), BB,
21387 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21388 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
21390 // Add code to the main basic block to check if the stack limit has been hit,
21391 // and if so, jump to mallocMBB otherwise to bumpMBB.
21392 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
21393 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
21394 .addReg(tmpSPVReg).addReg(sizeVReg);
21395 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
21396 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
21397 .addReg(SPLimitVReg);
21398 BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
21400 // bumpMBB simply decreases the stack pointer, since we know the current
21401 // stacklet has enough space.
21402 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
21403 .addReg(SPLimitVReg);
21404 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
21405 .addReg(SPLimitVReg);
21406 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21408 // Calls into a routine in libgcc to allocate more space from the heap.
21409 const uint32_t *RegMask =
21410 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21412 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
21414 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21415 .addExternalSymbol("__morestack_allocate_stack_space")
21416 .addRegMask(RegMask)
21417 .addReg(X86::RDI, RegState::Implicit)
21418 .addReg(X86::RAX, RegState::ImplicitDefine);
21419 } else if (Is64Bit) {
21420 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
21422 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21423 .addExternalSymbol("__morestack_allocate_stack_space")
21424 .addRegMask(RegMask)
21425 .addReg(X86::EDI, RegState::Implicit)
21426 .addReg(X86::EAX, RegState::ImplicitDefine);
21428 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
21430 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
21431 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
21432 .addExternalSymbol("__morestack_allocate_stack_space")
21433 .addRegMask(RegMask)
21434 .addReg(X86::EAX, RegState::ImplicitDefine);
21438 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
21441 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
21442 .addReg(IsLP64 ? X86::RAX : X86::EAX);
21443 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21445 // Set up the CFG correctly.
21446 BB->addSuccessor(bumpMBB);
21447 BB->addSuccessor(mallocMBB);
21448 mallocMBB->addSuccessor(continueMBB);
21449 bumpMBB->addSuccessor(continueMBB);
21451 // Take care of the PHI nodes.
21452 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
21453 MI->getOperand(0).getReg())
21454 .addReg(mallocPtrVReg).addMBB(mallocMBB)
21455 .addReg(bumpSPPtrVReg).addMBB(bumpMBB);
21457 // Delete the original pseudo instruction.
21458 MI->eraseFromParent();
21461 return continueMBB;
21464 MachineBasicBlock *
21465 X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
21466 MachineBasicBlock *BB) const {
21467 DebugLoc DL = MI->getDebugLoc();
21469 assert(!Subtarget->isTargetMachO());
21471 X86FrameLowering::emitStackProbeCall(*BB->getParent(), *BB, MI, DL);
21473 MI->eraseFromParent(); // The pseudo instruction is gone now.
21477 MachineBasicBlock *
21478 X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
21479 MachineBasicBlock *BB) const {
21480 // This is pretty easy. We're taking the value that we received from
21481 // our load from the relocation, sticking it in either RDI (x86-64)
21482 // or EAX and doing an indirect call. The return value will then
21483 // be in the normal return register.
21484 MachineFunction *F = BB->getParent();
21485 const X86InstrInfo *TII = Subtarget->getInstrInfo();
21486 DebugLoc DL = MI->getDebugLoc();
21488 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
21489 assert(MI->getOperand(3).isGlobal() && "This should be a global");
21491 // Get a register mask for the lowered call.
21492 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
21493 // proper register mask.
21494 const uint32_t *RegMask =
21495 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21496 if (Subtarget->is64Bit()) {
21497 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21498 TII->get(X86::MOV64rm), X86::RDI)
21500 .addImm(0).addReg(0)
21501 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21502 MI->getOperand(3).getTargetFlags())
21504 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
21505 addDirectMem(MIB, X86::RDI);
21506 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
21507 } else if (F->getTarget().getRelocationModel() != Reloc::PIC_) {
21508 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21509 TII->get(X86::MOV32rm), X86::EAX)
21511 .addImm(0).addReg(0)
21512 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21513 MI->getOperand(3).getTargetFlags())
21515 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21516 addDirectMem(MIB, X86::EAX);
21517 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21519 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21520 TII->get(X86::MOV32rm), X86::EAX)
21521 .addReg(TII->getGlobalBaseReg(F))
21522 .addImm(0).addReg(0)
21523 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21524 MI->getOperand(3).getTargetFlags())
21526 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21527 addDirectMem(MIB, X86::EAX);
21528 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21531 MI->eraseFromParent(); // The pseudo instruction is gone now.
21535 MachineBasicBlock *
21536 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
21537 MachineBasicBlock *MBB) const {
21538 DebugLoc DL = MI->getDebugLoc();
21539 MachineFunction *MF = MBB->getParent();
21540 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21541 MachineRegisterInfo &MRI = MF->getRegInfo();
21543 const BasicBlock *BB = MBB->getBasicBlock();
21544 MachineFunction::iterator I = MBB;
21547 // Memory Reference
21548 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21549 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21552 unsigned MemOpndSlot = 0;
21554 unsigned CurOp = 0;
21556 DstReg = MI->getOperand(CurOp++).getReg();
21557 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
21558 assert(RC->hasType(MVT::i32) && "Invalid destination!");
21559 unsigned mainDstReg = MRI.createVirtualRegister(RC);
21560 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
21562 MemOpndSlot = CurOp;
21564 MVT PVT = getPointerTy();
21565 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21566 "Invalid Pointer Size!");
21568 // For v = setjmp(buf), we generate
21571 // buf[LabelOffset] = restoreMBB
21572 // SjLjSetup restoreMBB
21578 // v = phi(main, restore)
21581 // if base pointer being used, load it from frame
21584 MachineBasicBlock *thisMBB = MBB;
21585 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
21586 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
21587 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
21588 MF->insert(I, mainMBB);
21589 MF->insert(I, sinkMBB);
21590 MF->push_back(restoreMBB);
21592 MachineInstrBuilder MIB;
21594 // Transfer the remainder of BB and its successor edges to sinkMBB.
21595 sinkMBB->splice(sinkMBB->begin(), MBB,
21596 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21597 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
21600 unsigned PtrStoreOpc = 0;
21601 unsigned LabelReg = 0;
21602 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21603 Reloc::Model RM = MF->getTarget().getRelocationModel();
21604 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
21605 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC);
21607 // Prepare IP either in reg or imm.
21608 if (!UseImmLabel) {
21609 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
21610 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
21611 LabelReg = MRI.createVirtualRegister(PtrRC);
21612 if (Subtarget->is64Bit()) {
21613 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
21617 .addMBB(restoreMBB)
21620 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
21621 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
21622 .addReg(XII->getGlobalBaseReg(MF))
21625 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference())
21629 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
21631 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
21632 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21633 if (i == X86::AddrDisp)
21634 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset);
21636 MIB.addOperand(MI->getOperand(MemOpndSlot + i));
21639 MIB.addReg(LabelReg);
21641 MIB.addMBB(restoreMBB);
21642 MIB.setMemRefs(MMOBegin, MMOEnd);
21644 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
21645 .addMBB(restoreMBB);
21647 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21648 MIB.addRegMask(RegInfo->getNoPreservedMask());
21649 thisMBB->addSuccessor(mainMBB);
21650 thisMBB->addSuccessor(restoreMBB);
21654 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
21655 mainMBB->addSuccessor(sinkMBB);
21658 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21659 TII->get(X86::PHI), DstReg)
21660 .addReg(mainDstReg).addMBB(mainMBB)
21661 .addReg(restoreDstReg).addMBB(restoreMBB);
21664 if (RegInfo->hasBasePointer(*MF)) {
21665 const bool Uses64BitFramePtr =
21666 Subtarget->isTarget64BitLP64() || Subtarget->isTargetNaCl64();
21667 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
21668 X86FI->setRestoreBasePointer(MF);
21669 unsigned FramePtr = RegInfo->getFrameRegister(*MF);
21670 unsigned BasePtr = RegInfo->getBaseRegister();
21671 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
21672 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
21673 FramePtr, true, X86FI->getRestoreBasePointerOffset())
21674 .setMIFlag(MachineInstr::FrameSetup);
21676 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
21677 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
21678 restoreMBB->addSuccessor(sinkMBB);
21680 MI->eraseFromParent();
21684 MachineBasicBlock *
21685 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
21686 MachineBasicBlock *MBB) const {
21687 DebugLoc DL = MI->getDebugLoc();
21688 MachineFunction *MF = MBB->getParent();
21689 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21690 MachineRegisterInfo &MRI = MF->getRegInfo();
21692 // Memory Reference
21693 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21694 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21696 MVT PVT = getPointerTy();
21697 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21698 "Invalid Pointer Size!");
21700 const TargetRegisterClass *RC =
21701 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
21702 unsigned Tmp = MRI.createVirtualRegister(RC);
21703 // Since FP is only updated here but NOT referenced, it's treated as GPR.
21704 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21705 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
21706 unsigned SP = RegInfo->getStackRegister();
21708 MachineInstrBuilder MIB;
21710 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21711 const int64_t SPOffset = 2 * PVT.getStoreSize();
21713 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
21714 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
21717 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
21718 for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
21719 MIB.addOperand(MI->getOperand(i));
21720 MIB.setMemRefs(MMOBegin, MMOEnd);
21722 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
21723 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21724 if (i == X86::AddrDisp)
21725 MIB.addDisp(MI->getOperand(i), LabelOffset);
21727 MIB.addOperand(MI->getOperand(i));
21729 MIB.setMemRefs(MMOBegin, MMOEnd);
21731 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
21732 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21733 if (i == X86::AddrDisp)
21734 MIB.addDisp(MI->getOperand(i), SPOffset);
21736 MIB.addOperand(MI->getOperand(i));
21738 MIB.setMemRefs(MMOBegin, MMOEnd);
21740 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
21742 MI->eraseFromParent();
21746 // Replace 213-type (isel default) FMA3 instructions with 231-type for
21747 // accumulator loops. Writing back to the accumulator allows the coalescer
21748 // to remove extra copies in the loop.
21749 MachineBasicBlock *
21750 X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
21751 MachineBasicBlock *MBB) const {
21752 MachineOperand &AddendOp = MI->getOperand(3);
21754 // Bail out early if the addend isn't a register - we can't switch these.
21755 if (!AddendOp.isReg())
21758 MachineFunction &MF = *MBB->getParent();
21759 MachineRegisterInfo &MRI = MF.getRegInfo();
21761 // Check whether the addend is defined by a PHI:
21762 assert(MRI.hasOneDef(AddendOp.getReg()) && "Multiple defs in SSA?");
21763 MachineInstr &AddendDef = *MRI.def_instr_begin(AddendOp.getReg());
21764 if (!AddendDef.isPHI())
21767 // Look for the following pattern:
21769 // %addend = phi [%entry, 0], [%loop, %result]
21771 // %result<tied1> = FMA213 %m2<tied0>, %m1, %addend
21775 // %addend = phi [%entry, 0], [%loop, %result]
21777 // %result<tied1> = FMA231 %addend<tied0>, %m1, %m2
21779 for (unsigned i = 1, e = AddendDef.getNumOperands(); i < e; i += 2) {
21780 assert(AddendDef.getOperand(i).isReg());
21781 MachineOperand PHISrcOp = AddendDef.getOperand(i);
21782 MachineInstr &PHISrcInst = *MRI.def_instr_begin(PHISrcOp.getReg());
21783 if (&PHISrcInst == MI) {
21784 // Found a matching instruction.
21785 unsigned NewFMAOpc = 0;
21786 switch (MI->getOpcode()) {
21787 case X86::VFMADDPDr213r: NewFMAOpc = X86::VFMADDPDr231r; break;
21788 case X86::VFMADDPSr213r: NewFMAOpc = X86::VFMADDPSr231r; break;
21789 case X86::VFMADDSDr213r: NewFMAOpc = X86::VFMADDSDr231r; break;
21790 case X86::VFMADDSSr213r: NewFMAOpc = X86::VFMADDSSr231r; break;
21791 case X86::VFMSUBPDr213r: NewFMAOpc = X86::VFMSUBPDr231r; break;
21792 case X86::VFMSUBPSr213r: NewFMAOpc = X86::VFMSUBPSr231r; break;
21793 case X86::VFMSUBSDr213r: NewFMAOpc = X86::VFMSUBSDr231r; break;
21794 case X86::VFMSUBSSr213r: NewFMAOpc = X86::VFMSUBSSr231r; break;
21795 case X86::VFNMADDPDr213r: NewFMAOpc = X86::VFNMADDPDr231r; break;
21796 case X86::VFNMADDPSr213r: NewFMAOpc = X86::VFNMADDPSr231r; break;
21797 case X86::VFNMADDSDr213r: NewFMAOpc = X86::VFNMADDSDr231r; break;
21798 case X86::VFNMADDSSr213r: NewFMAOpc = X86::VFNMADDSSr231r; break;
21799 case X86::VFNMSUBPDr213r: NewFMAOpc = X86::VFNMSUBPDr231r; break;
21800 case X86::VFNMSUBPSr213r: NewFMAOpc = X86::VFNMSUBPSr231r; break;
21801 case X86::VFNMSUBSDr213r: NewFMAOpc = X86::VFNMSUBSDr231r; break;
21802 case X86::VFNMSUBSSr213r: NewFMAOpc = X86::VFNMSUBSSr231r; break;
21803 case X86::VFMADDSUBPDr213r: NewFMAOpc = X86::VFMADDSUBPDr231r; break;
21804 case X86::VFMADDSUBPSr213r: NewFMAOpc = X86::VFMADDSUBPSr231r; break;
21805 case X86::VFMSUBADDPDr213r: NewFMAOpc = X86::VFMSUBADDPDr231r; break;
21806 case X86::VFMSUBADDPSr213r: NewFMAOpc = X86::VFMSUBADDPSr231r; break;
21808 case X86::VFMADDPDr213rY: NewFMAOpc = X86::VFMADDPDr231rY; break;
21809 case X86::VFMADDPSr213rY: NewFMAOpc = X86::VFMADDPSr231rY; break;
21810 case X86::VFMSUBPDr213rY: NewFMAOpc = X86::VFMSUBPDr231rY; break;
21811 case X86::VFMSUBPSr213rY: NewFMAOpc = X86::VFMSUBPSr231rY; break;
21812 case X86::VFNMADDPDr213rY: NewFMAOpc = X86::VFNMADDPDr231rY; break;
21813 case X86::VFNMADDPSr213rY: NewFMAOpc = X86::VFNMADDPSr231rY; break;
21814 case X86::VFNMSUBPDr213rY: NewFMAOpc = X86::VFNMSUBPDr231rY; break;
21815 case X86::VFNMSUBPSr213rY: NewFMAOpc = X86::VFNMSUBPSr231rY; break;
21816 case X86::VFMADDSUBPDr213rY: NewFMAOpc = X86::VFMADDSUBPDr231rY; break;
21817 case X86::VFMADDSUBPSr213rY: NewFMAOpc = X86::VFMADDSUBPSr231rY; break;
21818 case X86::VFMSUBADDPDr213rY: NewFMAOpc = X86::VFMSUBADDPDr231rY; break;
21819 case X86::VFMSUBADDPSr213rY: NewFMAOpc = X86::VFMSUBADDPSr231rY; break;
21820 default: llvm_unreachable("Unrecognized FMA variant.");
21823 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
21824 MachineInstrBuilder MIB =
21825 BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
21826 .addOperand(MI->getOperand(0))
21827 .addOperand(MI->getOperand(3))
21828 .addOperand(MI->getOperand(2))
21829 .addOperand(MI->getOperand(1));
21830 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
21831 MI->eraseFromParent();
21838 MachineBasicBlock *
21839 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
21840 MachineBasicBlock *BB) const {
21841 switch (MI->getOpcode()) {
21842 default: llvm_unreachable("Unexpected instr type to insert");
21843 case X86::TAILJMPd64:
21844 case X86::TAILJMPr64:
21845 case X86::TAILJMPm64:
21846 case X86::TAILJMPd64_REX:
21847 case X86::TAILJMPr64_REX:
21848 case X86::TAILJMPm64_REX:
21849 llvm_unreachable("TAILJMP64 would not be touched here.");
21850 case X86::TCRETURNdi64:
21851 case X86::TCRETURNri64:
21852 case X86::TCRETURNmi64:
21854 case X86::WIN_ALLOCA:
21855 return EmitLoweredWinAlloca(MI, BB);
21856 case X86::SEG_ALLOCA_32:
21857 case X86::SEG_ALLOCA_64:
21858 return EmitLoweredSegAlloca(MI, BB);
21859 case X86::TLSCall_32:
21860 case X86::TLSCall_64:
21861 return EmitLoweredTLSCall(MI, BB);
21862 case X86::CMOV_GR8:
21863 case X86::CMOV_FR32:
21864 case X86::CMOV_FR64:
21865 case X86::CMOV_V4F32:
21866 case X86::CMOV_V2F64:
21867 case X86::CMOV_V2I64:
21868 case X86::CMOV_V8F32:
21869 case X86::CMOV_V4F64:
21870 case X86::CMOV_V4I64:
21871 case X86::CMOV_V16F32:
21872 case X86::CMOV_V8F64:
21873 case X86::CMOV_V8I64:
21874 case X86::CMOV_GR16:
21875 case X86::CMOV_GR32:
21876 case X86::CMOV_RFP32:
21877 case X86::CMOV_RFP64:
21878 case X86::CMOV_RFP80:
21879 return EmitLoweredSelect(MI, BB);
21881 case X86::FP32_TO_INT16_IN_MEM:
21882 case X86::FP32_TO_INT32_IN_MEM:
21883 case X86::FP32_TO_INT64_IN_MEM:
21884 case X86::FP64_TO_INT16_IN_MEM:
21885 case X86::FP64_TO_INT32_IN_MEM:
21886 case X86::FP64_TO_INT64_IN_MEM:
21887 case X86::FP80_TO_INT16_IN_MEM:
21888 case X86::FP80_TO_INT32_IN_MEM:
21889 case X86::FP80_TO_INT64_IN_MEM: {
21890 MachineFunction *F = BB->getParent();
21891 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21892 DebugLoc DL = MI->getDebugLoc();
21894 // Change the floating point control register to use "round towards zero"
21895 // mode when truncating to an integer value.
21896 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
21897 addFrameReference(BuildMI(*BB, MI, DL,
21898 TII->get(X86::FNSTCW16m)), CWFrameIdx);
21900 // Load the old value of the high byte of the control word...
21902 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
21903 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
21906 // Set the high part to be round to zero...
21907 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
21910 // Reload the modified control word now...
21911 addFrameReference(BuildMI(*BB, MI, DL,
21912 TII->get(X86::FLDCW16m)), CWFrameIdx);
21914 // Restore the memory image of control word to original value
21915 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
21918 // Get the X86 opcode to use.
21920 switch (MI->getOpcode()) {
21921 default: llvm_unreachable("illegal opcode!");
21922 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
21923 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
21924 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
21925 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
21926 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
21927 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
21928 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
21929 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
21930 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
21934 MachineOperand &Op = MI->getOperand(0);
21936 AM.BaseType = X86AddressMode::RegBase;
21937 AM.Base.Reg = Op.getReg();
21939 AM.BaseType = X86AddressMode::FrameIndexBase;
21940 AM.Base.FrameIndex = Op.getIndex();
21942 Op = MI->getOperand(1);
21944 AM.Scale = Op.getImm();
21945 Op = MI->getOperand(2);
21947 AM.IndexReg = Op.getImm();
21948 Op = MI->getOperand(3);
21949 if (Op.isGlobal()) {
21950 AM.GV = Op.getGlobal();
21952 AM.Disp = Op.getImm();
21954 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
21955 .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
21957 // Reload the original control word now.
21958 addFrameReference(BuildMI(*BB, MI, DL,
21959 TII->get(X86::FLDCW16m)), CWFrameIdx);
21961 MI->eraseFromParent(); // The pseudo instruction is gone now.
21964 // String/text processing lowering.
21965 case X86::PCMPISTRM128REG:
21966 case X86::VPCMPISTRM128REG:
21967 case X86::PCMPISTRM128MEM:
21968 case X86::VPCMPISTRM128MEM:
21969 case X86::PCMPESTRM128REG:
21970 case X86::VPCMPESTRM128REG:
21971 case X86::PCMPESTRM128MEM:
21972 case X86::VPCMPESTRM128MEM:
21973 assert(Subtarget->hasSSE42() &&
21974 "Target must have SSE4.2 or AVX features enabled");
21975 return EmitPCMPSTRM(MI, BB, Subtarget->getInstrInfo());
21977 // String/text processing lowering.
21978 case X86::PCMPISTRIREG:
21979 case X86::VPCMPISTRIREG:
21980 case X86::PCMPISTRIMEM:
21981 case X86::VPCMPISTRIMEM:
21982 case X86::PCMPESTRIREG:
21983 case X86::VPCMPESTRIREG:
21984 case X86::PCMPESTRIMEM:
21985 case X86::VPCMPESTRIMEM:
21986 assert(Subtarget->hasSSE42() &&
21987 "Target must have SSE4.2 or AVX features enabled");
21988 return EmitPCMPSTRI(MI, BB, Subtarget->getInstrInfo());
21990 // Thread synchronization.
21992 return EmitMonitor(MI, BB, Subtarget);
21996 return EmitXBegin(MI, BB, Subtarget->getInstrInfo());
21998 case X86::VASTART_SAVE_XMM_REGS:
21999 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
22001 case X86::VAARG_64:
22002 return EmitVAARG64WithCustomInserter(MI, BB);
22004 case X86::EH_SjLj_SetJmp32:
22005 case X86::EH_SjLj_SetJmp64:
22006 return emitEHSjLjSetJmp(MI, BB);
22008 case X86::EH_SjLj_LongJmp32:
22009 case X86::EH_SjLj_LongJmp64:
22010 return emitEHSjLjLongJmp(MI, BB);
22012 case TargetOpcode::STATEPOINT:
22013 // As an implementation detail, STATEPOINT shares the STACKMAP format at
22014 // this point in the process. We diverge later.
22015 return emitPatchPoint(MI, BB);
22017 case TargetOpcode::STACKMAP:
22018 case TargetOpcode::PATCHPOINT:
22019 return emitPatchPoint(MI, BB);
22021 case X86::VFMADDPDr213r:
22022 case X86::VFMADDPSr213r:
22023 case X86::VFMADDSDr213r:
22024 case X86::VFMADDSSr213r:
22025 case X86::VFMSUBPDr213r:
22026 case X86::VFMSUBPSr213r:
22027 case X86::VFMSUBSDr213r:
22028 case X86::VFMSUBSSr213r:
22029 case X86::VFNMADDPDr213r:
22030 case X86::VFNMADDPSr213r:
22031 case X86::VFNMADDSDr213r:
22032 case X86::VFNMADDSSr213r:
22033 case X86::VFNMSUBPDr213r:
22034 case X86::VFNMSUBPSr213r:
22035 case X86::VFNMSUBSDr213r:
22036 case X86::VFNMSUBSSr213r:
22037 case X86::VFMADDSUBPDr213r:
22038 case X86::VFMADDSUBPSr213r:
22039 case X86::VFMSUBADDPDr213r:
22040 case X86::VFMSUBADDPSr213r:
22041 case X86::VFMADDPDr213rY:
22042 case X86::VFMADDPSr213rY:
22043 case X86::VFMSUBPDr213rY:
22044 case X86::VFMSUBPSr213rY:
22045 case X86::VFNMADDPDr213rY:
22046 case X86::VFNMADDPSr213rY:
22047 case X86::VFNMSUBPDr213rY:
22048 case X86::VFNMSUBPSr213rY:
22049 case X86::VFMADDSUBPDr213rY:
22050 case X86::VFMADDSUBPSr213rY:
22051 case X86::VFMSUBADDPDr213rY:
22052 case X86::VFMSUBADDPSr213rY:
22053 return emitFMA3Instr(MI, BB);
22057 //===----------------------------------------------------------------------===//
22058 // X86 Optimization Hooks
22059 //===----------------------------------------------------------------------===//
22061 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
22064 const SelectionDAG &DAG,
22065 unsigned Depth) const {
22066 unsigned BitWidth = KnownZero.getBitWidth();
22067 unsigned Opc = Op.getOpcode();
22068 assert((Opc >= ISD::BUILTIN_OP_END ||
22069 Opc == ISD::INTRINSIC_WO_CHAIN ||
22070 Opc == ISD::INTRINSIC_W_CHAIN ||
22071 Opc == ISD::INTRINSIC_VOID) &&
22072 "Should use MaskedValueIsZero if you don't know whether Op"
22073 " is a target node!");
22075 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
22089 // These nodes' second result is a boolean.
22090 if (Op.getResNo() == 0)
22093 case X86ISD::SETCC:
22094 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
22096 case ISD::INTRINSIC_WO_CHAIN: {
22097 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
22098 unsigned NumLoBits = 0;
22101 case Intrinsic::x86_sse_movmsk_ps:
22102 case Intrinsic::x86_avx_movmsk_ps_256:
22103 case Intrinsic::x86_sse2_movmsk_pd:
22104 case Intrinsic::x86_avx_movmsk_pd_256:
22105 case Intrinsic::x86_mmx_pmovmskb:
22106 case Intrinsic::x86_sse2_pmovmskb_128:
22107 case Intrinsic::x86_avx2_pmovmskb: {
22108 // High bits of movmskp{s|d}, pmovmskb are known zero.
22110 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
22111 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break;
22112 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break;
22113 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break;
22114 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break;
22115 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break;
22116 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
22117 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
22119 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
22128 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
22130 const SelectionDAG &,
22131 unsigned Depth) const {
22132 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
22133 if (Op.getOpcode() == X86ISD::SETCC_CARRY)
22134 return Op.getValueType().getScalarType().getSizeInBits();
22140 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
22141 /// node is a GlobalAddress + offset.
22142 bool X86TargetLowering::isGAPlusOffset(SDNode *N,
22143 const GlobalValue* &GA,
22144 int64_t &Offset) const {
22145 if (N->getOpcode() == X86ISD::Wrapper) {
22146 if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
22147 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
22148 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
22152 return TargetLowering::isGAPlusOffset(N, GA, Offset);
22155 /// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the
22156 /// same as extracting the high 128-bit part of 256-bit vector and then
22157 /// inserting the result into the low part of a new 256-bit vector
22158 static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) {
22159 EVT VT = SVOp->getValueType(0);
22160 unsigned NumElems = VT.getVectorNumElements();
22162 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
22163 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j)
22164 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
22165 SVOp->getMaskElt(j) >= 0)
22171 /// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the
22172 /// same as extracting the low 128-bit part of 256-bit vector and then
22173 /// inserting the result into the high part of a new 256-bit vector
22174 static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) {
22175 EVT VT = SVOp->getValueType(0);
22176 unsigned NumElems = VT.getVectorNumElements();
22178 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22179 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j)
22180 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
22181 SVOp->getMaskElt(j) >= 0)
22187 /// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors.
22188 static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
22189 TargetLowering::DAGCombinerInfo &DCI,
22190 const X86Subtarget* Subtarget) {
22192 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
22193 SDValue V1 = SVOp->getOperand(0);
22194 SDValue V2 = SVOp->getOperand(1);
22195 EVT VT = SVOp->getValueType(0);
22196 unsigned NumElems = VT.getVectorNumElements();
22198 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
22199 V2.getOpcode() == ISD::CONCAT_VECTORS) {
22203 // V UNDEF BUILD_VECTOR UNDEF
22205 // CONCAT_VECTOR CONCAT_VECTOR
22208 // RESULT: V + zero extended
22210 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR ||
22211 V2.getOperand(1).getOpcode() != ISD::UNDEF ||
22212 V1.getOperand(1).getOpcode() != ISD::UNDEF)
22215 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()))
22218 // To match the shuffle mask, the first half of the mask should
22219 // be exactly the first vector, and all the rest a splat with the
22220 // first element of the second one.
22221 for (unsigned i = 0; i != NumElems/2; ++i)
22222 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) ||
22223 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems))
22226 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD.
22227 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) {
22228 if (Ld->hasNUsesOfValue(1, 0)) {
22229 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other);
22230 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
22232 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
22234 Ld->getPointerInfo(),
22235 Ld->getAlignment(),
22236 false/*isVolatile*/, true/*ReadMem*/,
22237 false/*WriteMem*/);
22239 // Make sure the newly-created LOAD is in the same position as Ld in
22240 // terms of dependency. We create a TokenFactor for Ld and ResNode,
22241 // and update uses of Ld's output chain to use the TokenFactor.
22242 if (Ld->hasAnyUseOfValue(1)) {
22243 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
22244 SDValue(Ld, 1), SDValue(ResNode.getNode(), 1));
22245 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
22246 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1),
22247 SDValue(ResNode.getNode(), 1));
22250 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode);
22254 // Emit a zeroed vector and insert the desired subvector on its
22256 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
22257 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
22258 return DCI.CombineTo(N, InsV);
22261 //===--------------------------------------------------------------------===//
22262 // Combine some shuffles into subvector extracts and inserts:
22265 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
22266 if (isShuffleHigh128VectorInsertLow(SVOp)) {
22267 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl);
22268 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl);
22269 return DCI.CombineTo(N, InsV);
22272 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22273 if (isShuffleLow128VectorInsertHigh(SVOp)) {
22274 SDValue V = Extract128BitVector(V1, 0, DAG, dl);
22275 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl);
22276 return DCI.CombineTo(N, InsV);
22282 /// \brief Combine an arbitrary chain of shuffles into a single instruction if
22285 /// This is the leaf of the recursive combinine below. When we have found some
22286 /// chain of single-use x86 shuffle instructions and accumulated the combined
22287 /// shuffle mask represented by them, this will try to pattern match that mask
22288 /// into either a single instruction if there is a special purpose instruction
22289 /// for this operation, or into a PSHUFB instruction which is a fully general
22290 /// instruction but should only be used to replace chains over a certain depth.
22291 static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
22292 int Depth, bool HasPSHUFB, SelectionDAG &DAG,
22293 TargetLowering::DAGCombinerInfo &DCI,
22294 const X86Subtarget *Subtarget) {
22295 assert(!Mask.empty() && "Cannot combine an empty shuffle mask!");
22297 // Find the operand that enters the chain. Note that multiple uses are OK
22298 // here, we're not going to remove the operand we find.
22299 SDValue Input = Op.getOperand(0);
22300 while (Input.getOpcode() == ISD::BITCAST)
22301 Input = Input.getOperand(0);
22303 MVT VT = Input.getSimpleValueType();
22304 MVT RootVT = Root.getSimpleValueType();
22307 // Just remove no-op shuffle masks.
22308 if (Mask.size() == 1) {
22309 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Input),
22314 // Use the float domain if the operand type is a floating point type.
22315 bool FloatDomain = VT.isFloatingPoint();
22317 // For floating point shuffles, we don't have free copies in the shuffle
22318 // instructions or the ability to load as part of the instruction, so
22319 // canonicalize their shuffles to UNPCK or MOV variants.
22321 // Note that even with AVX we prefer the PSHUFD form of shuffle for integer
22322 // vectors because it can have a load folded into it that UNPCK cannot. This
22323 // doesn't preclude something switching to the shorter encoding post-RA.
22325 if (Mask.equals(0, 0) || Mask.equals(1, 1)) {
22326 bool Lo = Mask.equals(0, 0);
22329 // Check if we have SSE3 which will let us use MOVDDUP. That instruction
22330 // is no slower than UNPCKLPD but has the option to fold the input operand
22331 // into even an unaligned memory load.
22332 if (Lo && Subtarget->hasSSE3()) {
22333 Shuffle = X86ISD::MOVDDUP;
22334 ShuffleVT = MVT::v2f64;
22336 // We have MOVLHPS and MOVHLPS throughout SSE and they encode smaller
22337 // than the UNPCK variants.
22338 Shuffle = Lo ? X86ISD::MOVLHPS : X86ISD::MOVHLPS;
22339 ShuffleVT = MVT::v4f32;
22341 if (Depth == 1 && Root->getOpcode() == Shuffle)
22342 return false; // Nothing to do!
22343 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22344 DCI.AddToWorklist(Op.getNode());
22345 if (Shuffle == X86ISD::MOVDDUP)
22346 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22348 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22349 DCI.AddToWorklist(Op.getNode());
22350 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22354 if (Subtarget->hasSSE3() &&
22355 (Mask.equals(0, 0, 2, 2) || Mask.equals(1, 1, 3, 3))) {
22356 bool Lo = Mask.equals(0, 0, 2, 2);
22357 unsigned Shuffle = Lo ? X86ISD::MOVSLDUP : X86ISD::MOVSHDUP;
22358 MVT ShuffleVT = MVT::v4f32;
22359 if (Depth == 1 && Root->getOpcode() == Shuffle)
22360 return false; // Nothing to do!
22361 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22362 DCI.AddToWorklist(Op.getNode());
22363 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22364 DCI.AddToWorklist(Op.getNode());
22365 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22369 if (Mask.equals(0, 0, 1, 1) || Mask.equals(2, 2, 3, 3)) {
22370 bool Lo = Mask.equals(0, 0, 1, 1);
22371 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22372 MVT ShuffleVT = MVT::v4f32;
22373 if (Depth == 1 && Root->getOpcode() == Shuffle)
22374 return false; // Nothing to do!
22375 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22376 DCI.AddToWorklist(Op.getNode());
22377 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22378 DCI.AddToWorklist(Op.getNode());
22379 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22385 // We always canonicalize the 8 x i16 and 16 x i8 shuffles into their UNPCK
22386 // variants as none of these have single-instruction variants that are
22387 // superior to the UNPCK formulation.
22388 if (!FloatDomain &&
22389 (Mask.equals(0, 0, 1, 1, 2, 2, 3, 3) ||
22390 Mask.equals(4, 4, 5, 5, 6, 6, 7, 7) ||
22391 Mask.equals(0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7) ||
22392 Mask.equals(8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
22394 bool Lo = Mask[0] == 0;
22395 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22396 if (Depth == 1 && Root->getOpcode() == Shuffle)
22397 return false; // Nothing to do!
22399 switch (Mask.size()) {
22401 ShuffleVT = MVT::v8i16;
22404 ShuffleVT = MVT::v16i8;
22407 llvm_unreachable("Impossible mask size!");
22409 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22410 DCI.AddToWorklist(Op.getNode());
22411 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22412 DCI.AddToWorklist(Op.getNode());
22413 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22418 // Don't try to re-form single instruction chains under any circumstances now
22419 // that we've done encoding canonicalization for them.
22423 // If we have 3 or more shuffle instructions or a chain involving PSHUFB, we
22424 // can replace them with a single PSHUFB instruction profitably. Intel's
22425 // manuals suggest only using PSHUFB if doing so replacing 5 instructions, but
22426 // in practice PSHUFB tends to be *very* fast so we're more aggressive.
22427 if ((Depth >= 3 || HasPSHUFB) && Subtarget->hasSSSE3()) {
22428 SmallVector<SDValue, 16> PSHUFBMask;
22429 assert(Mask.size() <= 16 && "Can't shuffle elements smaller than bytes!");
22430 int Ratio = 16 / Mask.size();
22431 for (unsigned i = 0; i < 16; ++i) {
22432 if (Mask[i / Ratio] == SM_SentinelUndef) {
22433 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
22436 int M = Mask[i / Ratio] != SM_SentinelZero
22437 ? Ratio * Mask[i / Ratio] + i % Ratio
22439 PSHUFBMask.push_back(DAG.getConstant(M, MVT::i8));
22441 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Input);
22442 DCI.AddToWorklist(Op.getNode());
22443 SDValue PSHUFBMaskOp =
22444 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, PSHUFBMask);
22445 DCI.AddToWorklist(PSHUFBMaskOp.getNode());
22446 Op = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, Op, PSHUFBMaskOp);
22447 DCI.AddToWorklist(Op.getNode());
22448 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22453 // Failed to find any combines.
22457 /// \brief Fully generic combining of x86 shuffle instructions.
22459 /// This should be the last combine run over the x86 shuffle instructions. Once
22460 /// they have been fully optimized, this will recursively consider all chains
22461 /// of single-use shuffle instructions, build a generic model of the cumulative
22462 /// shuffle operation, and check for simpler instructions which implement this
22463 /// operation. We use this primarily for two purposes:
22465 /// 1) Collapse generic shuffles to specialized single instructions when
22466 /// equivalent. In most cases, this is just an encoding size win, but
22467 /// sometimes we will collapse multiple generic shuffles into a single
22468 /// special-purpose shuffle.
22469 /// 2) Look for sequences of shuffle instructions with 3 or more total
22470 /// instructions, and replace them with the slightly more expensive SSSE3
22471 /// PSHUFB instruction if available. We do this as the last combining step
22472 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
22473 /// a suitable short sequence of other instructions. The PHUFB will either
22474 /// use a register or have to read from memory and so is slightly (but only
22475 /// slightly) more expensive than the other shuffle instructions.
22477 /// Because this is inherently a quadratic operation (for each shuffle in
22478 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
22479 /// This should never be an issue in practice as the shuffle lowering doesn't
22480 /// produce sequences of more than 8 instructions.
22482 /// FIXME: We will currently miss some cases where the redundant shuffling
22483 /// would simplify under the threshold for PSHUFB formation because of
22484 /// combine-ordering. To fix this, we should do the redundant instruction
22485 /// combining in this recursive walk.
22486 static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
22487 ArrayRef<int> RootMask,
22488 int Depth, bool HasPSHUFB,
22490 TargetLowering::DAGCombinerInfo &DCI,
22491 const X86Subtarget *Subtarget) {
22492 // Bound the depth of our recursive combine because this is ultimately
22493 // quadratic in nature.
22497 // Directly rip through bitcasts to find the underlying operand.
22498 while (Op.getOpcode() == ISD::BITCAST && Op.getOperand(0).hasOneUse())
22499 Op = Op.getOperand(0);
22501 MVT VT = Op.getSimpleValueType();
22502 if (!VT.isVector())
22503 return false; // Bail if we hit a non-vector.
22504 // FIXME: This routine should be taught about 256-bit shuffles, or a 256-bit
22505 // version should be added.
22506 if (VT.getSizeInBits() != 128)
22509 assert(Root.getSimpleValueType().isVector() &&
22510 "Shuffles operate on vector types!");
22511 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
22512 "Can only combine shuffles of the same vector register size.");
22514 if (!isTargetShuffle(Op.getOpcode()))
22516 SmallVector<int, 16> OpMask;
22518 bool HaveMask = getTargetShuffleMask(Op.getNode(), VT, OpMask, IsUnary);
22519 // We only can combine unary shuffles which we can decode the mask for.
22520 if (!HaveMask || !IsUnary)
22523 assert(VT.getVectorNumElements() == OpMask.size() &&
22524 "Different mask size from vector size!");
22525 assert(((RootMask.size() > OpMask.size() &&
22526 RootMask.size() % OpMask.size() == 0) ||
22527 (OpMask.size() > RootMask.size() &&
22528 OpMask.size() % RootMask.size() == 0) ||
22529 OpMask.size() == RootMask.size()) &&
22530 "The smaller number of elements must divide the larger.");
22531 int RootRatio = std::max<int>(1, OpMask.size() / RootMask.size());
22532 int OpRatio = std::max<int>(1, RootMask.size() / OpMask.size());
22533 assert(((RootRatio == 1 && OpRatio == 1) ||
22534 (RootRatio == 1) != (OpRatio == 1)) &&
22535 "Must not have a ratio for both incoming and op masks!");
22537 SmallVector<int, 16> Mask;
22538 Mask.reserve(std::max(OpMask.size(), RootMask.size()));
22540 // Merge this shuffle operation's mask into our accumulated mask. Note that
22541 // this shuffle's mask will be the first applied to the input, followed by the
22542 // root mask to get us all the way to the root value arrangement. The reason
22543 // for this order is that we are recursing up the operation chain.
22544 for (int i = 0, e = std::max(OpMask.size(), RootMask.size()); i < e; ++i) {
22545 int RootIdx = i / RootRatio;
22546 if (RootMask[RootIdx] < 0) {
22547 // This is a zero or undef lane, we're done.
22548 Mask.push_back(RootMask[RootIdx]);
22552 int RootMaskedIdx = RootMask[RootIdx] * RootRatio + i % RootRatio;
22553 int OpIdx = RootMaskedIdx / OpRatio;
22554 if (OpMask[OpIdx] < 0) {
22555 // The incoming lanes are zero or undef, it doesn't matter which ones we
22557 Mask.push_back(OpMask[OpIdx]);
22561 // Ok, we have non-zero lanes, map them through.
22562 Mask.push_back(OpMask[OpIdx] * OpRatio +
22563 RootMaskedIdx % OpRatio);
22566 // See if we can recurse into the operand to combine more things.
22567 switch (Op.getOpcode()) {
22568 case X86ISD::PSHUFB:
22570 case X86ISD::PSHUFD:
22571 case X86ISD::PSHUFHW:
22572 case X86ISD::PSHUFLW:
22573 if (Op.getOperand(0).hasOneUse() &&
22574 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22575 HasPSHUFB, DAG, DCI, Subtarget))
22579 case X86ISD::UNPCKL:
22580 case X86ISD::UNPCKH:
22581 assert(Op.getOperand(0) == Op.getOperand(1) && "We only combine unary shuffles!");
22582 // We can't check for single use, we have to check that this shuffle is the only user.
22583 if (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
22584 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22585 HasPSHUFB, DAG, DCI, Subtarget))
22590 // Minor canonicalization of the accumulated shuffle mask to make it easier
22591 // to match below. All this does is detect masks with squential pairs of
22592 // elements, and shrink them to the half-width mask. It does this in a loop
22593 // so it will reduce the size of the mask to the minimal width mask which
22594 // performs an equivalent shuffle.
22595 SmallVector<int, 16> WidenedMask;
22596 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
22597 Mask = std::move(WidenedMask);
22598 WidenedMask.clear();
22601 return combineX86ShuffleChain(Op, Root, Mask, Depth, HasPSHUFB, DAG, DCI,
22605 /// \brief Get the PSHUF-style mask from PSHUF node.
22607 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
22608 /// PSHUF-style masks that can be reused with such instructions.
22609 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
22610 SmallVector<int, 4> Mask;
22612 bool HaveMask = getTargetShuffleMask(N.getNode(), N.getSimpleValueType(), Mask, IsUnary);
22616 switch (N.getOpcode()) {
22617 case X86ISD::PSHUFD:
22619 case X86ISD::PSHUFLW:
22622 case X86ISD::PSHUFHW:
22623 Mask.erase(Mask.begin(), Mask.begin() + 4);
22624 for (int &M : Mask)
22628 llvm_unreachable("No valid shuffle instruction found!");
22632 /// \brief Search for a combinable shuffle across a chain ending in pshufd.
22634 /// We walk up the chain and look for a combinable shuffle, skipping over
22635 /// shuffles that we could hoist this shuffle's transformation past without
22636 /// altering anything.
22638 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
22640 TargetLowering::DAGCombinerInfo &DCI) {
22641 assert(N.getOpcode() == X86ISD::PSHUFD &&
22642 "Called with something other than an x86 128-bit half shuffle!");
22645 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
22646 // of the shuffles in the chain so that we can form a fresh chain to replace
22648 SmallVector<SDValue, 8> Chain;
22649 SDValue V = N.getOperand(0);
22650 for (; V.hasOneUse(); V = V.getOperand(0)) {
22651 switch (V.getOpcode()) {
22653 return SDValue(); // Nothing combined!
22656 // Skip bitcasts as we always know the type for the target specific
22660 case X86ISD::PSHUFD:
22661 // Found another dword shuffle.
22664 case X86ISD::PSHUFLW:
22665 // Check that the low words (being shuffled) are the identity in the
22666 // dword shuffle, and the high words are self-contained.
22667 if (Mask[0] != 0 || Mask[1] != 1 ||
22668 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
22671 Chain.push_back(V);
22674 case X86ISD::PSHUFHW:
22675 // Check that the high words (being shuffled) are the identity in the
22676 // dword shuffle, and the low words are self-contained.
22677 if (Mask[2] != 2 || Mask[3] != 3 ||
22678 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
22681 Chain.push_back(V);
22684 case X86ISD::UNPCKL:
22685 case X86ISD::UNPCKH:
22686 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
22687 // shuffle into a preceding word shuffle.
22688 if (V.getValueType() != MVT::v16i8 && V.getValueType() != MVT::v8i16)
22691 // Search for a half-shuffle which we can combine with.
22692 unsigned CombineOp =
22693 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
22694 if (V.getOperand(0) != V.getOperand(1) ||
22695 !V->isOnlyUserOf(V.getOperand(0).getNode()))
22697 Chain.push_back(V);
22698 V = V.getOperand(0);
22700 switch (V.getOpcode()) {
22702 return SDValue(); // Nothing to combine.
22704 case X86ISD::PSHUFLW:
22705 case X86ISD::PSHUFHW:
22706 if (V.getOpcode() == CombineOp)
22709 Chain.push_back(V);
22713 V = V.getOperand(0);
22717 } while (V.hasOneUse());
22720 // Break out of the loop if we break out of the switch.
22724 if (!V.hasOneUse())
22725 // We fell out of the loop without finding a viable combining instruction.
22728 // Merge this node's mask and our incoming mask.
22729 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22730 for (int &M : Mask)
22732 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
22733 getV4X86ShuffleImm8ForMask(Mask, DAG));
22735 // Rebuild the chain around this new shuffle.
22736 while (!Chain.empty()) {
22737 SDValue W = Chain.pop_back_val();
22739 if (V.getValueType() != W.getOperand(0).getValueType())
22740 V = DAG.getNode(ISD::BITCAST, DL, W.getOperand(0).getValueType(), V);
22742 switch (W.getOpcode()) {
22744 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
22746 case X86ISD::UNPCKL:
22747 case X86ISD::UNPCKH:
22748 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
22751 case X86ISD::PSHUFD:
22752 case X86ISD::PSHUFLW:
22753 case X86ISD::PSHUFHW:
22754 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
22758 if (V.getValueType() != N.getValueType())
22759 V = DAG.getNode(ISD::BITCAST, DL, N.getValueType(), V);
22761 // Return the new chain to replace N.
22765 /// \brief Search for a combinable shuffle across a chain ending in pshuflw or pshufhw.
22767 /// We walk up the chain, skipping shuffles of the other half and looking
22768 /// through shuffles which switch halves trying to find a shuffle of the same
22769 /// pair of dwords.
22770 static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
22772 TargetLowering::DAGCombinerInfo &DCI) {
22774 (N.getOpcode() == X86ISD::PSHUFLW || N.getOpcode() == X86ISD::PSHUFHW) &&
22775 "Called with something other than an x86 128-bit half shuffle!");
22777 unsigned CombineOpcode = N.getOpcode();
22779 // Walk up a single-use chain looking for a combinable shuffle.
22780 SDValue V = N.getOperand(0);
22781 for (; V.hasOneUse(); V = V.getOperand(0)) {
22782 switch (V.getOpcode()) {
22784 return false; // Nothing combined!
22787 // Skip bitcasts as we always know the type for the target specific
22791 case X86ISD::PSHUFLW:
22792 case X86ISD::PSHUFHW:
22793 if (V.getOpcode() == CombineOpcode)
22796 // Other-half shuffles are no-ops.
22799 // Break out of the loop if we break out of the switch.
22803 if (!V.hasOneUse())
22804 // We fell out of the loop without finding a viable combining instruction.
22807 // Combine away the bottom node as its shuffle will be accumulated into
22808 // a preceding shuffle.
22809 DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22811 // Record the old value.
22814 // Merge this node's mask and our incoming mask (adjusted to account for all
22815 // the pshufd instructions encountered).
22816 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22817 for (int &M : Mask)
22819 V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
22820 getV4X86ShuffleImm8ForMask(Mask, DAG));
22822 // Check that the shuffles didn't cancel each other out. If not, we need to
22823 // combine to the new one.
22825 // Replace the combinable shuffle with the combined one, updating all users
22826 // so that we re-evaluate the chain here.
22827 DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
22832 /// \brief Try to combine x86 target specific shuffles.
22833 static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
22834 TargetLowering::DAGCombinerInfo &DCI,
22835 const X86Subtarget *Subtarget) {
22837 MVT VT = N.getSimpleValueType();
22838 SmallVector<int, 4> Mask;
22840 switch (N.getOpcode()) {
22841 case X86ISD::PSHUFD:
22842 case X86ISD::PSHUFLW:
22843 case X86ISD::PSHUFHW:
22844 Mask = getPSHUFShuffleMask(N);
22845 assert(Mask.size() == 4);
22851 // Nuke no-op shuffles that show up after combining.
22852 if (isNoopShuffleMask(Mask))
22853 return DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22855 // Look for simplifications involving one or two shuffle instructions.
22856 SDValue V = N.getOperand(0);
22857 switch (N.getOpcode()) {
22860 case X86ISD::PSHUFLW:
22861 case X86ISD::PSHUFHW:
22862 assert(VT == MVT::v8i16);
22865 if (combineRedundantHalfShuffle(N, Mask, DAG, DCI))
22866 return SDValue(); // We combined away this shuffle, so we're done.
22868 // See if this reduces to a PSHUFD which is no more expensive and can
22869 // combine with more operations. Note that it has to at least flip the
22870 // dwords as otherwise it would have been removed as a no-op.
22871 if (Mask[0] == 2 && Mask[1] == 3 && Mask[2] == 0 && Mask[3] == 1) {
22872 int DMask[] = {0, 1, 2, 3};
22873 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
22874 DMask[DOffset + 0] = DOffset + 1;
22875 DMask[DOffset + 1] = DOffset + 0;
22876 V = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V);
22877 DCI.AddToWorklist(V.getNode());
22878 V = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V,
22879 getV4X86ShuffleImm8ForMask(DMask, DAG));
22880 DCI.AddToWorklist(V.getNode());
22881 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
22884 // Look for shuffle patterns which can be implemented as a single unpack.
22885 // FIXME: This doesn't handle the location of the PSHUFD generically, and
22886 // only works when we have a PSHUFD followed by two half-shuffles.
22887 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
22888 (V.getOpcode() == X86ISD::PSHUFLW ||
22889 V.getOpcode() == X86ISD::PSHUFHW) &&
22890 V.getOpcode() != N.getOpcode() &&
22892 SDValue D = V.getOperand(0);
22893 while (D.getOpcode() == ISD::BITCAST && D.hasOneUse())
22894 D = D.getOperand(0);
22895 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
22896 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22897 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
22898 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22899 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22901 for (int i = 0; i < 4; ++i) {
22902 WordMask[i + NOffset] = Mask[i] + NOffset;
22903 WordMask[i + VOffset] = VMask[i] + VOffset;
22905 // Map the word mask through the DWord mask.
22907 for (int i = 0; i < 8; ++i)
22908 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
22909 const int UnpackLoMask[] = {0, 0, 1, 1, 2, 2, 3, 3};
22910 const int UnpackHiMask[] = {4, 4, 5, 5, 6, 6, 7, 7};
22911 if (std::equal(std::begin(MappedMask), std::end(MappedMask),
22912 std::begin(UnpackLoMask)) ||
22913 std::equal(std::begin(MappedMask), std::end(MappedMask),
22914 std::begin(UnpackHiMask))) {
22915 // We can replace all three shuffles with an unpack.
22916 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, D.getOperand(0));
22917 DCI.AddToWorklist(V.getNode());
22918 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
22920 DL, MVT::v8i16, V, V);
22927 case X86ISD::PSHUFD:
22928 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG, DCI))
22937 /// \brief Try to combine a shuffle into a target-specific add-sub node.
22939 /// We combine this directly on the abstract vector shuffle nodes so it is
22940 /// easier to generically match. We also insert dummy vector shuffle nodes for
22941 /// the operands which explicitly discard the lanes which are unused by this
22942 /// operation to try to flow through the rest of the combiner the fact that
22943 /// they're unused.
22944 static SDValue combineShuffleToAddSub(SDNode *N, SelectionDAG &DAG) {
22946 EVT VT = N->getValueType(0);
22948 // We only handle target-independent shuffles.
22949 // FIXME: It would be easy and harmless to use the target shuffle mask
22950 // extraction tool to support more.
22951 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
22954 auto *SVN = cast<ShuffleVectorSDNode>(N);
22955 ArrayRef<int> Mask = SVN->getMask();
22956 SDValue V1 = N->getOperand(0);
22957 SDValue V2 = N->getOperand(1);
22959 // We require the first shuffle operand to be the SUB node, and the second to
22960 // be the ADD node.
22961 // FIXME: We should support the commuted patterns.
22962 if (V1->getOpcode() != ISD::FSUB || V2->getOpcode() != ISD::FADD)
22965 // If there are other uses of these operations we can't fold them.
22966 if (!V1->hasOneUse() || !V2->hasOneUse())
22969 // Ensure that both operations have the same operands. Note that we can
22970 // commute the FADD operands.
22971 SDValue LHS = V1->getOperand(0), RHS = V1->getOperand(1);
22972 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
22973 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
22976 // We're looking for blends between FADD and FSUB nodes. We insist on these
22977 // nodes being lined up in a specific expected pattern.
22978 if (!(isShuffleEquivalent(V1, V2, Mask, 0, 3) ||
22979 isShuffleEquivalent(V1, V2, Mask, 0, 5, 2, 7) ||
22980 isShuffleEquivalent(V1, V2, Mask, 0, 9, 2, 11, 4, 13, 6, 15)))
22983 // Only specific types are legal at this point, assert so we notice if and
22984 // when these change.
22985 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v8f32 ||
22986 VT == MVT::v4f64) &&
22987 "Unknown vector type encountered!");
22989 return DAG.getNode(X86ISD::ADDSUB, DL, VT, LHS, RHS);
22992 /// PerformShuffleCombine - Performs several different shuffle combines.
22993 static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
22994 TargetLowering::DAGCombinerInfo &DCI,
22995 const X86Subtarget *Subtarget) {
22997 SDValue N0 = N->getOperand(0);
22998 SDValue N1 = N->getOperand(1);
22999 EVT VT = N->getValueType(0);
23001 // Don't create instructions with illegal types after legalize types has run.
23002 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23003 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType()))
23006 // If we have legalized the vector types, look for blends of FADD and FSUB
23007 // nodes that we can fuse into an ADDSUB node.
23008 if (TLI.isTypeLegal(VT) && Subtarget->hasSSE3())
23009 if (SDValue AddSub = combineShuffleToAddSub(N, DAG))
23012 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode
23013 if (Subtarget->hasFp256() && VT.is256BitVector() &&
23014 N->getOpcode() == ISD::VECTOR_SHUFFLE)
23015 return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
23017 // During Type Legalization, when promoting illegal vector types,
23018 // the backend might introduce new shuffle dag nodes and bitcasts.
23020 // This code performs the following transformation:
23021 // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
23022 // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
23024 // We do this only if both the bitcast and the BINOP dag nodes have
23025 // one use. Also, perform this transformation only if the new binary
23026 // operation is legal. This is to avoid introducing dag nodes that
23027 // potentially need to be further expanded (or custom lowered) into a
23028 // less optimal sequence of dag nodes.
23029 if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
23030 N1.getOpcode() == ISD::UNDEF && N0.hasOneUse() &&
23031 N0.getOpcode() == ISD::BITCAST) {
23032 SDValue BC0 = N0.getOperand(0);
23033 EVT SVT = BC0.getValueType();
23034 unsigned Opcode = BC0.getOpcode();
23035 unsigned NumElts = VT.getVectorNumElements();
23037 if (BC0.hasOneUse() && SVT.isVector() &&
23038 SVT.getVectorNumElements() * 2 == NumElts &&
23039 TLI.isOperationLegal(Opcode, VT)) {
23040 bool CanFold = false;
23052 unsigned SVTNumElts = SVT.getVectorNumElements();
23053 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
23054 for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
23055 CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
23056 for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
23057 CanFold = SVOp->getMaskElt(i) < 0;
23060 SDValue BC00 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(0));
23061 SDValue BC01 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(1));
23062 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
23063 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, &SVOp->getMask()[0]);
23068 // Only handle 128 wide vector from here on.
23069 if (!VT.is128BitVector())
23072 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
23073 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
23074 // consecutive, non-overlapping, and in the right order.
23075 SmallVector<SDValue, 16> Elts;
23076 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
23077 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
23079 SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true);
23083 if (isTargetShuffle(N->getOpcode())) {
23085 PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget);
23086 if (Shuffle.getNode())
23089 // Try recursively combining arbitrary sequences of x86 shuffle
23090 // instructions into higher-order shuffles. We do this after combining
23091 // specific PSHUF instruction sequences into their minimal form so that we
23092 // can evaluate how many specialized shuffle instructions are involved in
23093 // a particular chain.
23094 SmallVector<int, 1> NonceMask; // Just a placeholder.
23095 NonceMask.push_back(0);
23096 if (combineX86ShufflesRecursively(SDValue(N, 0), SDValue(N, 0), NonceMask,
23097 /*Depth*/ 1, /*HasPSHUFB*/ false, DAG,
23099 return SDValue(); // This routine will use CombineTo to replace N.
23105 /// PerformTruncateCombine - Converts truncate operation to
23106 /// a sequence of vector shuffle operations.
23107 /// It is possible when we truncate 256-bit vector to 128-bit vector
23108 static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
23109 TargetLowering::DAGCombinerInfo &DCI,
23110 const X86Subtarget *Subtarget) {
23114 /// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target
23115 /// specific shuffle of a load can be folded into a single element load.
23116 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
23117 /// shuffles have been custom lowered so we need to handle those here.
23118 static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
23119 TargetLowering::DAGCombinerInfo &DCI) {
23120 if (DCI.isBeforeLegalizeOps())
23123 SDValue InVec = N->getOperand(0);
23124 SDValue EltNo = N->getOperand(1);
23126 if (!isa<ConstantSDNode>(EltNo))
23129 EVT OriginalVT = InVec.getValueType();
23131 if (InVec.getOpcode() == ISD::BITCAST) {
23132 // Don't duplicate a load with other uses.
23133 if (!InVec.hasOneUse())
23135 EVT BCVT = InVec.getOperand(0).getValueType();
23136 if (BCVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
23138 InVec = InVec.getOperand(0);
23141 EVT CurrentVT = InVec.getValueType();
23143 if (!isTargetShuffle(InVec.getOpcode()))
23146 // Don't duplicate a load with other uses.
23147 if (!InVec.hasOneUse())
23150 SmallVector<int, 16> ShuffleMask;
23152 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(),
23153 ShuffleMask, UnaryShuffle))
23156 // Select the input vector, guarding against out of range extract vector.
23157 unsigned NumElems = CurrentVT.getVectorNumElements();
23158 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
23159 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt];
23160 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0)
23161 : InVec.getOperand(1);
23163 // If inputs to shuffle are the same for both ops, then allow 2 uses
23164 unsigned AllowedUses = InVec.getNumOperands() > 1 &&
23165 InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
23167 if (LdNode.getOpcode() == ISD::BITCAST) {
23168 // Don't duplicate a load with other uses.
23169 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
23172 AllowedUses = 1; // only allow 1 load use if we have a bitcast
23173 LdNode = LdNode.getOperand(0);
23176 if (!ISD::isNormalLoad(LdNode.getNode()))
23179 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
23181 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
23184 EVT EltVT = N->getValueType(0);
23185 // If there's a bitcast before the shuffle, check if the load type and
23186 // alignment is valid.
23187 unsigned Align = LN0->getAlignment();
23188 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23189 unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
23190 EltVT.getTypeForEVT(*DAG.getContext()));
23192 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
23195 // All checks match so transform back to vector_shuffle so that DAG combiner
23196 // can finish the job
23199 // Create shuffle node taking into account the case that its a unary shuffle
23200 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT)
23201 : InVec.getOperand(1);
23202 Shuffle = DAG.getVectorShuffle(CurrentVT, dl,
23203 InVec.getOperand(0), Shuffle,
23205 Shuffle = DAG.getNode(ISD::BITCAST, dl, OriginalVT, Shuffle);
23206 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
23210 /// \brief Detect bitcasts between i32 to x86mmx low word. Since MMX types are
23211 /// special and don't usually play with other vector types, it's better to
23212 /// handle them early to be sure we emit efficient code by avoiding
23213 /// store-load conversions.
23214 static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG) {
23215 if (N->getValueType(0) != MVT::x86mmx ||
23216 N->getOperand(0)->getOpcode() != ISD::BUILD_VECTOR ||
23217 N->getOperand(0)->getValueType(0) != MVT::v2i32)
23220 SDValue V = N->getOperand(0);
23221 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V.getOperand(1));
23222 if (C && C->getZExtValue() == 0 && V.getOperand(0).getValueType() == MVT::i32)
23223 return DAG.getNode(X86ISD::MMX_MOVW2D, SDLoc(V.getOperand(0)),
23224 N->getValueType(0), V.getOperand(0));
23229 /// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
23230 /// generation and convert it from being a bunch of shuffles and extracts
23231 /// into a somewhat faster sequence. For i686, the best sequence is apparently
23232 /// storing the value and loading scalars back, while for x64 we should
23233 /// use 64-bit extracts and shifts.
23234 static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
23235 TargetLowering::DAGCombinerInfo &DCI) {
23236 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI);
23237 if (NewOp.getNode())
23240 SDValue InputVector = N->getOperand(0);
23242 // Detect mmx to i32 conversion through a v2i32 elt extract.
23243 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
23244 N->getValueType(0) == MVT::i32 &&
23245 InputVector.getValueType() == MVT::v2i32) {
23247 // The bitcast source is a direct mmx result.
23248 SDValue MMXSrc = InputVector.getNode()->getOperand(0);
23249 if (MMXSrc.getValueType() == MVT::x86mmx)
23250 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23251 N->getValueType(0),
23252 InputVector.getNode()->getOperand(0));
23254 // The mmx is indirect: (i64 extract_elt (v1i64 bitcast (x86mmx ...))).
23255 SDValue MMXSrcOp = MMXSrc.getOperand(0);
23256 if (MMXSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT && MMXSrc.hasOneUse() &&
23257 MMXSrc.getValueType() == MVT::i64 && MMXSrcOp.hasOneUse() &&
23258 MMXSrcOp.getOpcode() == ISD::BITCAST &&
23259 MMXSrcOp.getValueType() == MVT::v1i64 &&
23260 MMXSrcOp.getOperand(0).getValueType() == MVT::x86mmx)
23261 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23262 N->getValueType(0),
23263 MMXSrcOp.getOperand(0));
23266 // Only operate on vectors of 4 elements, where the alternative shuffling
23267 // gets to be more expensive.
23268 if (InputVector.getValueType() != MVT::v4i32)
23271 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
23272 // single use which is a sign-extend or zero-extend, and all elements are
23274 SmallVector<SDNode *, 4> Uses;
23275 unsigned ExtractedElements = 0;
23276 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
23277 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
23278 if (UI.getUse().getResNo() != InputVector.getResNo())
23281 SDNode *Extract = *UI;
23282 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
23285 if (Extract->getValueType(0) != MVT::i32)
23287 if (!Extract->hasOneUse())
23289 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
23290 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
23292 if (!isa<ConstantSDNode>(Extract->getOperand(1)))
23295 // Record which element was extracted.
23296 ExtractedElements |=
23297 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue();
23299 Uses.push_back(Extract);
23302 // If not all the elements were used, this may not be worthwhile.
23303 if (ExtractedElements != 15)
23306 // Ok, we've now decided to do the transformation.
23307 // If 64-bit shifts are legal, use the extract-shift sequence,
23308 // otherwise bounce the vector off the cache.
23309 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23311 SDLoc dl(InputVector);
23313 if (TLI.isOperationLegal(ISD::SRA, MVT::i64)) {
23314 SDValue Cst = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, InputVector);
23315 EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy();
23316 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23317 DAG.getConstant(0, VecIdxTy));
23318 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23319 DAG.getConstant(1, VecIdxTy));
23321 SDValue ShAmt = DAG.getConstant(32,
23322 DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64));
23323 Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
23324 Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23325 DAG.getNode(ISD::SRA, dl, MVT::i64, BottomHalf, ShAmt));
23326 Vals[2] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, TopHalf);
23327 Vals[3] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23328 DAG.getNode(ISD::SRA, dl, MVT::i64, TopHalf, ShAmt));
23330 // Store the value to a temporary stack slot.
23331 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
23332 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
23333 MachinePointerInfo(), false, false, 0);
23335 EVT ElementType = InputVector.getValueType().getVectorElementType();
23336 unsigned EltSize = ElementType.getSizeInBits() / 8;
23338 // Replace each use (extract) with a load of the appropriate element.
23339 for (unsigned i = 0; i < 4; ++i) {
23340 uint64_t Offset = EltSize * i;
23341 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
23343 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
23344 StackPtr, OffsetVal);
23346 // Load the scalar.
23347 Vals[i] = DAG.getLoad(ElementType, dl, Ch,
23348 ScalarAddr, MachinePointerInfo(),
23349 false, false, false, 0);
23354 // Replace the extracts
23355 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
23356 UE = Uses.end(); UI != UE; ++UI) {
23357 SDNode *Extract = *UI;
23359 SDValue Idx = Extract->getOperand(1);
23360 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
23361 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), Vals[IdxVal]);
23364 // The replacement was made in place; don't return anything.
23368 /// \brief Matches a VSELECT onto min/max or return 0 if the node doesn't match.
23369 static std::pair<unsigned, bool>
23370 matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, SDValue RHS,
23371 SelectionDAG &DAG, const X86Subtarget *Subtarget) {
23372 if (!VT.isVector())
23373 return std::make_pair(0, false);
23375 bool NeedSplit = false;
23376 switch (VT.getSimpleVT().SimpleTy) {
23377 default: return std::make_pair(0, false);
23380 if (!Subtarget->hasVLX())
23381 return std::make_pair(0, false);
23385 if (!Subtarget->hasBWI())
23386 return std::make_pair(0, false);
23390 if (!Subtarget->hasAVX512())
23391 return std::make_pair(0, false);
23396 if (!Subtarget->hasAVX2())
23398 if (!Subtarget->hasAVX())
23399 return std::make_pair(0, false);
23404 if (!Subtarget->hasSSE2())
23405 return std::make_pair(0, false);
23408 // SSE2 has only a small subset of the operations.
23409 bool hasUnsigned = Subtarget->hasSSE41() ||
23410 (Subtarget->hasSSE2() && VT == MVT::v16i8);
23411 bool hasSigned = Subtarget->hasSSE41() ||
23412 (Subtarget->hasSSE2() && VT == MVT::v8i16);
23414 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23417 // Check for x CC y ? x : y.
23418 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23419 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23424 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23427 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23430 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23433 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23435 // Check for x CC y ? y : x -- a min/max with reversed arms.
23436 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23437 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23442 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23445 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23448 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23451 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23455 return std::make_pair(Opc, NeedSplit);
23459 transformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
23460 const X86Subtarget *Subtarget) {
23462 SDValue Cond = N->getOperand(0);
23463 SDValue LHS = N->getOperand(1);
23464 SDValue RHS = N->getOperand(2);
23466 if (Cond.getOpcode() == ISD::SIGN_EXTEND) {
23467 SDValue CondSrc = Cond->getOperand(0);
23468 if (CondSrc->getOpcode() == ISD::SIGN_EXTEND_INREG)
23469 Cond = CondSrc->getOperand(0);
23472 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
23475 // A vselect where all conditions and data are constants can be optimized into
23476 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
23477 if (ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
23478 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
23481 unsigned MaskValue = 0;
23482 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
23485 MVT VT = N->getSimpleValueType(0);
23486 unsigned NumElems = VT.getVectorNumElements();
23487 SmallVector<int, 8> ShuffleMask(NumElems, -1);
23488 for (unsigned i = 0; i < NumElems; ++i) {
23489 // Be sure we emit undef where we can.
23490 if (Cond.getOperand(i)->getOpcode() == ISD::UNDEF)
23491 ShuffleMask[i] = -1;
23493 ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1);
23496 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23497 if (!TLI.isShuffleMaskLegal(ShuffleMask, VT))
23499 return DAG.getVectorShuffle(VT, dl, LHS, RHS, &ShuffleMask[0]);
23502 /// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
23504 static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
23505 TargetLowering::DAGCombinerInfo &DCI,
23506 const X86Subtarget *Subtarget) {
23508 SDValue Cond = N->getOperand(0);
23509 // Get the LHS/RHS of the select.
23510 SDValue LHS = N->getOperand(1);
23511 SDValue RHS = N->getOperand(2);
23512 EVT VT = LHS.getValueType();
23513 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23515 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
23516 // instructions match the semantics of the common C idiom x<y?x:y but not
23517 // x<=y?x:y, because of how they handle negative zero (which can be
23518 // ignored in unsafe-math mode).
23519 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
23520 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
23521 VT != MVT::f80 && (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
23522 (Subtarget->hasSSE2() ||
23523 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
23524 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23526 unsigned Opcode = 0;
23527 // Check for x CC y ? x : y.
23528 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23529 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23533 // Converting this to a min would handle NaNs incorrectly, and swapping
23534 // the operands would cause it to handle comparisons between positive
23535 // and negative zero incorrectly.
23536 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23537 if (!DAG.getTarget().Options.UnsafeFPMath &&
23538 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23540 std::swap(LHS, RHS);
23542 Opcode = X86ISD::FMIN;
23545 // Converting this to a min would handle comparisons between positive
23546 // and negative zero incorrectly.
23547 if (!DAG.getTarget().Options.UnsafeFPMath &&
23548 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23550 Opcode = X86ISD::FMIN;
23553 // Converting this to a min would handle both negative zeros and NaNs
23554 // incorrectly, but we can swap the operands to fix both.
23555 std::swap(LHS, RHS);
23559 Opcode = X86ISD::FMIN;
23563 // Converting this to a max would handle comparisons between positive
23564 // and negative zero incorrectly.
23565 if (!DAG.getTarget().Options.UnsafeFPMath &&
23566 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23568 Opcode = X86ISD::FMAX;
23571 // Converting this to a max would handle NaNs incorrectly, and swapping
23572 // the operands would cause it to handle comparisons between positive
23573 // and negative zero incorrectly.
23574 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23575 if (!DAG.getTarget().Options.UnsafeFPMath &&
23576 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23578 std::swap(LHS, RHS);
23580 Opcode = X86ISD::FMAX;
23583 // Converting this to a max would handle both negative zeros and NaNs
23584 // incorrectly, but we can swap the operands to fix both.
23585 std::swap(LHS, RHS);
23589 Opcode = X86ISD::FMAX;
23592 // Check for x CC y ? y : x -- a min/max with reversed arms.
23593 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23594 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23598 // Converting this to a min would handle comparisons between positive
23599 // and negative zero incorrectly, and swapping the operands would
23600 // cause it to handle NaNs incorrectly.
23601 if (!DAG.getTarget().Options.UnsafeFPMath &&
23602 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
23603 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23605 std::swap(LHS, RHS);
23607 Opcode = X86ISD::FMIN;
23610 // Converting this to a min would handle NaNs incorrectly.
23611 if (!DAG.getTarget().Options.UnsafeFPMath &&
23612 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
23614 Opcode = X86ISD::FMIN;
23617 // Converting this to a min would handle both negative zeros and NaNs
23618 // incorrectly, but we can swap the operands to fix both.
23619 std::swap(LHS, RHS);
23623 Opcode = X86ISD::FMIN;
23627 // Converting this to a max would handle NaNs incorrectly.
23628 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23630 Opcode = X86ISD::FMAX;
23633 // Converting this to a max would handle comparisons between positive
23634 // and negative zero incorrectly, and swapping the operands would
23635 // cause it to handle NaNs incorrectly.
23636 if (!DAG.getTarget().Options.UnsafeFPMath &&
23637 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
23638 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23640 std::swap(LHS, RHS);
23642 Opcode = X86ISD::FMAX;
23645 // Converting this to a max would handle both negative zeros and NaNs
23646 // incorrectly, but we can swap the operands to fix both.
23647 std::swap(LHS, RHS);
23651 Opcode = X86ISD::FMAX;
23657 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
23660 EVT CondVT = Cond.getValueType();
23661 if (Subtarget->hasAVX512() && VT.isVector() && CondVT.isVector() &&
23662 CondVT.getVectorElementType() == MVT::i1) {
23663 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
23664 // lowering on KNL. In this case we convert it to
23665 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
23666 // The same situation for all 128 and 256-bit vectors of i8 and i16.
23667 // Since SKX these selects have a proper lowering.
23668 EVT OpVT = LHS.getValueType();
23669 if ((OpVT.is128BitVector() || OpVT.is256BitVector()) &&
23670 (OpVT.getVectorElementType() == MVT::i8 ||
23671 OpVT.getVectorElementType() == MVT::i16) &&
23672 !(Subtarget->hasBWI() && Subtarget->hasVLX())) {
23673 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond);
23674 DCI.AddToWorklist(Cond.getNode());
23675 return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS);
23678 // If this is a select between two integer constants, try to do some
23680 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
23681 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS))
23682 // Don't do this for crazy integer types.
23683 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) {
23684 // If this is efficiently invertible, canonicalize the LHSC/RHSC values
23685 // so that TrueC (the true value) is larger than FalseC.
23686 bool NeedsCondInvert = false;
23688 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
23689 // Efficiently invertible.
23690 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
23691 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
23692 isa<ConstantSDNode>(Cond.getOperand(1))))) {
23693 NeedsCondInvert = true;
23694 std::swap(TrueC, FalseC);
23697 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
23698 if (FalseC->getAPIntValue() == 0 &&
23699 TrueC->getAPIntValue().isPowerOf2()) {
23700 if (NeedsCondInvert) // Invert the condition if needed.
23701 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23702 DAG.getConstant(1, Cond.getValueType()));
23704 // Zero extend the condition if needed.
23705 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
23707 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
23708 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
23709 DAG.getConstant(ShAmt, MVT::i8));
23712 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
23713 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
23714 if (NeedsCondInvert) // Invert the condition if needed.
23715 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23716 DAG.getConstant(1, Cond.getValueType()));
23718 // Zero extend the condition if needed.
23719 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
23720 FalseC->getValueType(0), Cond);
23721 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23722 SDValue(FalseC, 0));
23725 // Optimize cases that will turn into an LEA instruction. This requires
23726 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
23727 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
23728 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
23729 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
23731 bool isFastMultiplier = false;
23733 switch ((unsigned char)Diff) {
23735 case 1: // result = add base, cond
23736 case 2: // result = lea base( , cond*2)
23737 case 3: // result = lea base(cond, cond*2)
23738 case 4: // result = lea base( , cond*4)
23739 case 5: // result = lea base(cond, cond*4)
23740 case 8: // result = lea base( , cond*8)
23741 case 9: // result = lea base(cond, cond*8)
23742 isFastMultiplier = true;
23747 if (isFastMultiplier) {
23748 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
23749 if (NeedsCondInvert) // Invert the condition if needed.
23750 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23751 DAG.getConstant(1, Cond.getValueType()));
23753 // Zero extend the condition if needed.
23754 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
23756 // Scale the condition by the difference.
23758 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
23759 DAG.getConstant(Diff, Cond.getValueType()));
23761 // Add the base if non-zero.
23762 if (FalseC->getAPIntValue() != 0)
23763 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23764 SDValue(FalseC, 0));
23771 // Canonicalize max and min:
23772 // (x > y) ? x : y -> (x >= y) ? x : y
23773 // (x < y) ? x : y -> (x <= y) ? x : y
23774 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
23775 // the need for an extra compare
23776 // against zero. e.g.
23777 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
23779 // testl %edi, %edi
23781 // cmovgl %edi, %eax
23785 // cmovsl %eax, %edi
23786 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
23787 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23788 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23789 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23794 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
23795 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
23796 Cond.getOperand(0), Cond.getOperand(1), NewCC);
23797 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS);
23802 // Early exit check
23803 if (!TLI.isTypeLegal(VT))
23806 // Match VSELECTs into subs with unsigned saturation.
23807 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
23808 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
23809 ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
23810 (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
23811 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23813 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
23814 // left side invert the predicate to simplify logic below.
23816 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
23818 CC = ISD::getSetCCInverse(CC, true);
23819 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
23823 if (Other.getNode() && Other->getNumOperands() == 2 &&
23824 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) {
23825 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
23826 SDValue CondRHS = Cond->getOperand(1);
23828 // Look for a general sub with unsigned saturation first.
23829 // x >= y ? x-y : 0 --> subus x, y
23830 // x > y ? x-y : 0 --> subus x, y
23831 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
23832 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS))
23833 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS);
23835 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS))
23836 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
23837 if (auto *CondRHSBV = dyn_cast<BuildVectorSDNode>(CondRHS))
23838 if (auto *CondRHSConst = CondRHSBV->getConstantSplatNode())
23839 // If the RHS is a constant we have to reverse the const
23840 // canonicalization.
23841 // x > C-1 ? x+-C : 0 --> subus x, C
23842 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
23843 CondRHSConst->getAPIntValue() ==
23844 (-OpRHSConst->getAPIntValue() - 1))
23845 return DAG.getNode(
23846 X86ISD::SUBUS, DL, VT, OpLHS,
23847 DAG.getConstant(-OpRHSConst->getAPIntValue(), VT));
23849 // Another special case: If C was a sign bit, the sub has been
23850 // canonicalized into a xor.
23851 // FIXME: Would it be better to use computeKnownBits to determine
23852 // whether it's safe to decanonicalize the xor?
23853 // x s< 0 ? x^C : 0 --> subus x, C
23854 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
23855 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
23856 OpRHSConst->getAPIntValue().isSignBit())
23857 // Note that we have to rebuild the RHS constant here to ensure we
23858 // don't rely on particular values of undef lanes.
23859 return DAG.getNode(
23860 X86ISD::SUBUS, DL, VT, OpLHS,
23861 DAG.getConstant(OpRHSConst->getAPIntValue(), VT));
23866 // Try to match a min/max vector operation.
23867 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC) {
23868 std::pair<unsigned, bool> ret = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget);
23869 unsigned Opc = ret.first;
23870 bool NeedSplit = ret.second;
23872 if (Opc && NeedSplit) {
23873 unsigned NumElems = VT.getVectorNumElements();
23874 // Extract the LHS vectors
23875 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, DL);
23876 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, DL);
23878 // Extract the RHS vectors
23879 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, DL);
23880 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, DL);
23882 // Create min/max for each subvector
23883 LHS = DAG.getNode(Opc, DL, LHS1.getValueType(), LHS1, RHS1);
23884 RHS = DAG.getNode(Opc, DL, LHS2.getValueType(), LHS2, RHS2);
23886 // Merge the result
23887 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS, RHS);
23889 return DAG.getNode(Opc, DL, VT, LHS, RHS);
23892 // Simplify vector selection if condition value type matches vselect
23894 if (N->getOpcode() == ISD::VSELECT && CondVT == VT) {
23895 assert(Cond.getValueType().isVector() &&
23896 "vector select expects a vector selector!");
23898 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
23899 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
23901 // Try invert the condition if true value is not all 1s and false value
23903 if (!TValIsAllOnes && !FValIsAllZeros &&
23904 // Check if the selector will be produced by CMPP*/PCMP*
23905 Cond.getOpcode() == ISD::SETCC &&
23906 // Check if SETCC has already been promoted
23907 TLI.getSetCCResultType(*DAG.getContext(), VT) == CondVT) {
23908 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
23909 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
23911 if (TValIsAllZeros || FValIsAllOnes) {
23912 SDValue CC = Cond.getOperand(2);
23913 ISD::CondCode NewCC =
23914 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
23915 Cond.getOperand(0).getValueType().isInteger());
23916 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), NewCC);
23917 std::swap(LHS, RHS);
23918 TValIsAllOnes = FValIsAllOnes;
23919 FValIsAllZeros = TValIsAllZeros;
23923 if (TValIsAllOnes || FValIsAllZeros) {
23926 if (TValIsAllOnes && FValIsAllZeros)
23928 else if (TValIsAllOnes)
23929 Ret = DAG.getNode(ISD::OR, DL, CondVT, Cond,
23930 DAG.getNode(ISD::BITCAST, DL, CondVT, RHS));
23931 else if (FValIsAllZeros)
23932 Ret = DAG.getNode(ISD::AND, DL, CondVT, Cond,
23933 DAG.getNode(ISD::BITCAST, DL, CondVT, LHS));
23935 return DAG.getNode(ISD::BITCAST, DL, VT, Ret);
23939 // If we know that this node is legal then we know that it is going to be
23940 // matched by one of the SSE/AVX BLEND instructions. These instructions only
23941 // depend on the highest bit in each word. Try to use SimplifyDemandedBits
23942 // to simplify previous instructions.
23943 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
23944 !DCI.isBeforeLegalize() &&
23945 // We explicitly check against v8i16 and v16i16 because, although
23946 // they're marked as Custom, they might only be legal when Cond is a
23947 // build_vector of constants. This will be taken care in a later
23949 (TLI.isOperationLegalOrCustom(ISD::VSELECT, VT) && VT != MVT::v16i16 &&
23950 VT != MVT::v8i16) &&
23951 // Don't optimize vector of constants. Those are handled by
23952 // the generic code and all the bits must be properly set for
23953 // the generic optimizer.
23954 !ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
23955 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits();
23957 // Don't optimize vector selects that map to mask-registers.
23961 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
23962 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
23964 APInt KnownZero, KnownOne;
23965 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
23966 DCI.isBeforeLegalizeOps());
23967 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) ||
23968 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne,
23970 // If we changed the computation somewhere in the DAG, this change
23971 // will affect all users of Cond.
23972 // Make sure it is fine and update all the nodes so that we do not
23973 // use the generic VSELECT anymore. Otherwise, we may perform
23974 // wrong optimizations as we messed up with the actual expectation
23975 // for the vector boolean values.
23976 if (Cond != TLO.Old) {
23977 // Check all uses of that condition operand to check whether it will be
23978 // consumed by non-BLEND instructions, which may depend on all bits are
23980 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23982 if (I->getOpcode() != ISD::VSELECT)
23983 // TODO: Add other opcodes eventually lowered into BLEND.
23986 // Update all the users of the condition, before committing the change,
23987 // so that the VSELECT optimizations that expect the correct vector
23988 // boolean value will not be triggered.
23989 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23991 DAG.ReplaceAllUsesOfValueWith(
23993 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(*I), I->getValueType(0),
23994 Cond, I->getOperand(1), I->getOperand(2)));
23995 DCI.CommitTargetLoweringOpt(TLO);
23998 // At this point, only Cond is changed. Change the condition
23999 // just for N to keep the opportunity to optimize all other
24000 // users their own way.
24001 DAG.ReplaceAllUsesOfValueWith(
24003 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(N), N->getValueType(0),
24004 TLO.New, N->getOperand(1), N->getOperand(2)));
24009 // We should generate an X86ISD::BLENDI from a vselect if its argument
24010 // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
24011 // constants. This specific pattern gets generated when we split a
24012 // selector for a 512 bit vector in a machine without AVX512 (but with
24013 // 256-bit vectors), during legalization:
24015 // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
24017 // Iff we find this pattern and the build_vectors are built from
24018 // constants, we translate the vselect into a shuffle_vector that we
24019 // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
24020 if ((N->getOpcode() == ISD::VSELECT ||
24021 N->getOpcode() == X86ISD::SHRUNKBLEND) &&
24022 !DCI.isBeforeLegalize()) {
24023 SDValue Shuffle = transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
24024 if (Shuffle.getNode())
24031 // Check whether a boolean test is testing a boolean value generated by
24032 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
24035 // Simplify the following patterns:
24036 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
24037 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
24038 // to (Op EFLAGS Cond)
24040 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
24041 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
24042 // to (Op EFLAGS !Cond)
24044 // where Op could be BRCOND or CMOV.
24046 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
24047 // Quit if not CMP and SUB with its value result used.
24048 if (Cmp.getOpcode() != X86ISD::CMP &&
24049 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0)))
24052 // Quit if not used as a boolean value.
24053 if (CC != X86::COND_E && CC != X86::COND_NE)
24056 // Check CMP operands. One of them should be 0 or 1 and the other should be
24057 // an SetCC or extended from it.
24058 SDValue Op1 = Cmp.getOperand(0);
24059 SDValue Op2 = Cmp.getOperand(1);
24062 const ConstantSDNode* C = nullptr;
24063 bool needOppositeCond = (CC == X86::COND_E);
24064 bool checkAgainstTrue = false; // Is it a comparison against 1?
24066 if ((C = dyn_cast<ConstantSDNode>(Op1)))
24068 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
24070 else // Quit if all operands are not constants.
24073 if (C->getZExtValue() == 1) {
24074 needOppositeCond = !needOppositeCond;
24075 checkAgainstTrue = true;
24076 } else if (C->getZExtValue() != 0)
24077 // Quit if the constant is neither 0 or 1.
24080 bool truncatedToBoolWithAnd = false;
24081 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
24082 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
24083 SetCC.getOpcode() == ISD::TRUNCATE ||
24084 SetCC.getOpcode() == ISD::AND) {
24085 if (SetCC.getOpcode() == ISD::AND) {
24087 ConstantSDNode *CS;
24088 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(0))) &&
24089 CS->getZExtValue() == 1)
24091 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(1))) &&
24092 CS->getZExtValue() == 1)
24096 SetCC = SetCC.getOperand(OpIdx);
24097 truncatedToBoolWithAnd = true;
24099 SetCC = SetCC.getOperand(0);
24102 switch (SetCC.getOpcode()) {
24103 case X86ISD::SETCC_CARRY:
24104 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
24105 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
24106 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
24107 // truncated to i1 using 'and'.
24108 if (checkAgainstTrue && !truncatedToBoolWithAnd)
24110 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
24111 "Invalid use of SETCC_CARRY!");
24113 case X86ISD::SETCC:
24114 // Set the condition code or opposite one if necessary.
24115 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
24116 if (needOppositeCond)
24117 CC = X86::GetOppositeBranchCondition(CC);
24118 return SetCC.getOperand(1);
24119 case X86ISD::CMOV: {
24120 // Check whether false/true value has canonical one, i.e. 0 or 1.
24121 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
24122 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
24123 // Quit if true value is not a constant.
24126 // Quit if false value is not a constant.
24128 SDValue Op = SetCC.getOperand(0);
24129 // Skip 'zext' or 'trunc' node.
24130 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
24131 Op.getOpcode() == ISD::TRUNCATE)
24132 Op = Op.getOperand(0);
24133 // A special case for rdrand/rdseed, where 0 is set if false cond is
24135 if ((Op.getOpcode() != X86ISD::RDRAND &&
24136 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
24139 // Quit if false value is not the constant 0 or 1.
24140 bool FValIsFalse = true;
24141 if (FVal && FVal->getZExtValue() != 0) {
24142 if (FVal->getZExtValue() != 1)
24144 // If FVal is 1, opposite cond is needed.
24145 needOppositeCond = !needOppositeCond;
24146 FValIsFalse = false;
24148 // Quit if TVal is not the constant opposite of FVal.
24149 if (FValIsFalse && TVal->getZExtValue() != 1)
24151 if (!FValIsFalse && TVal->getZExtValue() != 0)
24153 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
24154 if (needOppositeCond)
24155 CC = X86::GetOppositeBranchCondition(CC);
24156 return SetCC.getOperand(3);
24163 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
24164 static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
24165 TargetLowering::DAGCombinerInfo &DCI,
24166 const X86Subtarget *Subtarget) {
24169 // If the flag operand isn't dead, don't touch this CMOV.
24170 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
24173 SDValue FalseOp = N->getOperand(0);
24174 SDValue TrueOp = N->getOperand(1);
24175 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
24176 SDValue Cond = N->getOperand(3);
24178 if (CC == X86::COND_E || CC == X86::COND_NE) {
24179 switch (Cond.getOpcode()) {
24183 // If operand of BSR / BSF are proven never zero, then ZF cannot be set.
24184 if (DAG.isKnownNeverZero(Cond.getOperand(0)))
24185 return (CC == X86::COND_E) ? FalseOp : TrueOp;
24191 Flags = checkBoolTestSetCCCombine(Cond, CC);
24192 if (Flags.getNode() &&
24193 // Extra check as FCMOV only supports a subset of X86 cond.
24194 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
24195 SDValue Ops[] = { FalseOp, TrueOp,
24196 DAG.getConstant(CC, MVT::i8), Flags };
24197 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
24200 // If this is a select between two integer constants, try to do some
24201 // optimizations. Note that the operands are ordered the opposite of SELECT
24203 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
24204 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
24205 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
24206 // larger than FalseC (the false value).
24207 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
24208 CC = X86::GetOppositeBranchCondition(CC);
24209 std::swap(TrueC, FalseC);
24210 std::swap(TrueOp, FalseOp);
24213 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
24214 // This is efficient for any integer data type (including i8/i16) and
24216 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
24217 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24218 DAG.getConstant(CC, MVT::i8), Cond);
24220 // Zero extend the condition if needed.
24221 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
24223 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
24224 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
24225 DAG.getConstant(ShAmt, MVT::i8));
24226 if (N->getNumValues() == 2) // Dead flag value?
24227 return DCI.CombineTo(N, Cond, SDValue());
24231 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
24232 // for any integer data type, including i8/i16.
24233 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
24234 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24235 DAG.getConstant(CC, MVT::i8), Cond);
24237 // Zero extend the condition if needed.
24238 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
24239 FalseC->getValueType(0), Cond);
24240 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24241 SDValue(FalseC, 0));
24243 if (N->getNumValues() == 2) // Dead flag value?
24244 return DCI.CombineTo(N, Cond, SDValue());
24248 // Optimize cases that will turn into an LEA instruction. This requires
24249 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
24250 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
24251 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
24252 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
24254 bool isFastMultiplier = false;
24256 switch ((unsigned char)Diff) {
24258 case 1: // result = add base, cond
24259 case 2: // result = lea base( , cond*2)
24260 case 3: // result = lea base(cond, cond*2)
24261 case 4: // result = lea base( , cond*4)
24262 case 5: // result = lea base(cond, cond*4)
24263 case 8: // result = lea base( , cond*8)
24264 case 9: // result = lea base(cond, cond*8)
24265 isFastMultiplier = true;
24270 if (isFastMultiplier) {
24271 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
24272 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24273 DAG.getConstant(CC, MVT::i8), Cond);
24274 // Zero extend the condition if needed.
24275 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
24277 // Scale the condition by the difference.
24279 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
24280 DAG.getConstant(Diff, Cond.getValueType()));
24282 // Add the base if non-zero.
24283 if (FalseC->getAPIntValue() != 0)
24284 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24285 SDValue(FalseC, 0));
24286 if (N->getNumValues() == 2) // Dead flag value?
24287 return DCI.CombineTo(N, Cond, SDValue());
24294 // Handle these cases:
24295 // (select (x != c), e, c) -> select (x != c), e, x),
24296 // (select (x == c), c, e) -> select (x == c), x, e)
24297 // where the c is an integer constant, and the "select" is the combination
24298 // of CMOV and CMP.
24300 // The rationale for this change is that the conditional-move from a constant
24301 // needs two instructions, however, conditional-move from a register needs
24302 // only one instruction.
24304 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
24305 // some instruction-combining opportunities. This opt needs to be
24306 // postponed as late as possible.
24308 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
24309 // the DCI.xxxx conditions are provided to postpone the optimization as
24310 // late as possible.
24312 ConstantSDNode *CmpAgainst = nullptr;
24313 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
24314 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
24315 !isa<ConstantSDNode>(Cond.getOperand(0))) {
24317 if (CC == X86::COND_NE &&
24318 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
24319 CC = X86::GetOppositeBranchCondition(CC);
24320 std::swap(TrueOp, FalseOp);
24323 if (CC == X86::COND_E &&
24324 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
24325 SDValue Ops[] = { FalseOp, Cond.getOperand(0),
24326 DAG.getConstant(CC, MVT::i8), Cond };
24327 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops);
24335 static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
24336 const X86Subtarget *Subtarget) {
24337 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
24339 default: return SDValue();
24340 // SSE/AVX/AVX2 blend intrinsics.
24341 case Intrinsic::x86_avx2_pblendvb:
24342 case Intrinsic::x86_avx2_pblendw:
24343 case Intrinsic::x86_avx2_pblendd_128:
24344 case Intrinsic::x86_avx2_pblendd_256:
24345 // Don't try to simplify this intrinsic if we don't have AVX2.
24346 if (!Subtarget->hasAVX2())
24349 case Intrinsic::x86_avx_blend_pd_256:
24350 case Intrinsic::x86_avx_blend_ps_256:
24351 case Intrinsic::x86_avx_blendv_pd_256:
24352 case Intrinsic::x86_avx_blendv_ps_256:
24353 // Don't try to simplify this intrinsic if we don't have AVX.
24354 if (!Subtarget->hasAVX())
24357 case Intrinsic::x86_sse41_pblendw:
24358 case Intrinsic::x86_sse41_blendpd:
24359 case Intrinsic::x86_sse41_blendps:
24360 case Intrinsic::x86_sse41_blendvps:
24361 case Intrinsic::x86_sse41_blendvpd:
24362 case Intrinsic::x86_sse41_pblendvb: {
24363 SDValue Op0 = N->getOperand(1);
24364 SDValue Op1 = N->getOperand(2);
24365 SDValue Mask = N->getOperand(3);
24367 // Don't try to simplify this intrinsic if we don't have SSE4.1.
24368 if (!Subtarget->hasSSE41())
24371 // fold (blend A, A, Mask) -> A
24374 // fold (blend A, B, allZeros) -> A
24375 if (ISD::isBuildVectorAllZeros(Mask.getNode()))
24377 // fold (blend A, B, allOnes) -> B
24378 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
24381 // Simplify the case where the mask is a constant i32 value.
24382 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Mask)) {
24383 if (C->isNullValue())
24385 if (C->isAllOnesValue())
24392 // Packed SSE2/AVX2 arithmetic shift immediate intrinsics.
24393 case Intrinsic::x86_sse2_psrai_w:
24394 case Intrinsic::x86_sse2_psrai_d:
24395 case Intrinsic::x86_avx2_psrai_w:
24396 case Intrinsic::x86_avx2_psrai_d:
24397 case Intrinsic::x86_sse2_psra_w:
24398 case Intrinsic::x86_sse2_psra_d:
24399 case Intrinsic::x86_avx2_psra_w:
24400 case Intrinsic::x86_avx2_psra_d: {
24401 SDValue Op0 = N->getOperand(1);
24402 SDValue Op1 = N->getOperand(2);
24403 EVT VT = Op0.getValueType();
24404 assert(VT.isVector() && "Expected a vector type!");
24406 if (isa<BuildVectorSDNode>(Op1))
24407 Op1 = Op1.getOperand(0);
24409 if (!isa<ConstantSDNode>(Op1))
24412 EVT SVT = VT.getVectorElementType();
24413 unsigned SVTBits = SVT.getSizeInBits();
24415 ConstantSDNode *CND = cast<ConstantSDNode>(Op1);
24416 const APInt &C = APInt(SVTBits, CND->getAPIntValue().getZExtValue());
24417 uint64_t ShAmt = C.getZExtValue();
24419 // Don't try to convert this shift into a ISD::SRA if the shift
24420 // count is bigger than or equal to the element size.
24421 if (ShAmt >= SVTBits)
24424 // Trivial case: if the shift count is zero, then fold this
24425 // into the first operand.
24429 // Replace this packed shift intrinsic with a target independent
24431 SDValue Splat = DAG.getConstant(C, VT);
24432 return DAG.getNode(ISD::SRA, SDLoc(N), VT, Op0, Splat);
24437 /// PerformMulCombine - Optimize a single multiply with constant into two
24438 /// in order to implement it with two cheaper instructions, e.g.
24439 /// LEA + SHL, LEA + LEA.
24440 static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
24441 TargetLowering::DAGCombinerInfo &DCI) {
24442 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
24445 EVT VT = N->getValueType(0);
24446 if (VT != MVT::i64 && VT != MVT::i32)
24449 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
24452 uint64_t MulAmt = C->getZExtValue();
24453 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
24456 uint64_t MulAmt1 = 0;
24457 uint64_t MulAmt2 = 0;
24458 if ((MulAmt % 9) == 0) {
24460 MulAmt2 = MulAmt / 9;
24461 } else if ((MulAmt % 5) == 0) {
24463 MulAmt2 = MulAmt / 5;
24464 } else if ((MulAmt % 3) == 0) {
24466 MulAmt2 = MulAmt / 3;
24469 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
24472 if (isPowerOf2_64(MulAmt2) &&
24473 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
24474 // If second multiplifer is pow2, issue it first. We want the multiply by
24475 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
24477 std::swap(MulAmt1, MulAmt2);
24480 if (isPowerOf2_64(MulAmt1))
24481 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
24482 DAG.getConstant(Log2_64(MulAmt1), MVT::i8));
24484 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
24485 DAG.getConstant(MulAmt1, VT));
24487 if (isPowerOf2_64(MulAmt2))
24488 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
24489 DAG.getConstant(Log2_64(MulAmt2), MVT::i8));
24491 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
24492 DAG.getConstant(MulAmt2, VT));
24494 // Do not add new nodes to DAG combiner worklist.
24495 DCI.CombineTo(N, NewMul, false);
24500 static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
24501 SDValue N0 = N->getOperand(0);
24502 SDValue N1 = N->getOperand(1);
24503 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
24504 EVT VT = N0.getValueType();
24506 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
24507 // since the result of setcc_c is all zero's or all ones.
24508 if (VT.isInteger() && !VT.isVector() &&
24509 N1C && N0.getOpcode() == ISD::AND &&
24510 N0.getOperand(1).getOpcode() == ISD::Constant) {
24511 SDValue N00 = N0.getOperand(0);
24512 if (N00.getOpcode() == X86ISD::SETCC_CARRY ||
24513 ((N00.getOpcode() == ISD::ANY_EXTEND ||
24514 N00.getOpcode() == ISD::ZERO_EXTEND) &&
24515 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) {
24516 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
24517 APInt ShAmt = N1C->getAPIntValue();
24518 Mask = Mask.shl(ShAmt);
24520 return DAG.getNode(ISD::AND, SDLoc(N), VT,
24521 N00, DAG.getConstant(Mask, VT));
24525 // Hardware support for vector shifts is sparse which makes us scalarize the
24526 // vector operations in many cases. Also, on sandybridge ADD is faster than
24528 // (shl V, 1) -> add V,V
24529 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
24530 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
24531 assert(N0.getValueType().isVector() && "Invalid vector shift type");
24532 // We shift all of the values by one. In many cases we do not have
24533 // hardware support for this operation. This is better expressed as an ADD
24535 if (N1SplatC->getZExtValue() == 1)
24536 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
24542 /// \brief Returns a vector of 0s if the node in input is a vector logical
24543 /// shift by a constant amount which is known to be bigger than or equal
24544 /// to the vector element size in bits.
24545 static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
24546 const X86Subtarget *Subtarget) {
24547 EVT VT = N->getValueType(0);
24549 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
24550 (!Subtarget->hasInt256() ||
24551 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
24554 SDValue Amt = N->getOperand(1);
24556 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
24557 if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
24558 APInt ShiftAmt = AmtSplat->getAPIntValue();
24559 unsigned MaxAmount = VT.getVectorElementType().getSizeInBits();
24561 // SSE2/AVX2 logical shifts always return a vector of 0s
24562 // if the shift amount is bigger than or equal to
24563 // the element size. The constant shift amount will be
24564 // encoded as a 8-bit immediate.
24565 if (ShiftAmt.trunc(8).uge(MaxAmount))
24566 return getZeroVector(VT, Subtarget, DAG, DL);
24572 /// PerformShiftCombine - Combine shifts.
24573 static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
24574 TargetLowering::DAGCombinerInfo &DCI,
24575 const X86Subtarget *Subtarget) {
24576 if (N->getOpcode() == ISD::SHL) {
24577 SDValue V = PerformSHLCombine(N, DAG);
24578 if (V.getNode()) return V;
24581 if (N->getOpcode() != ISD::SRA) {
24582 // Try to fold this logical shift into a zero vector.
24583 SDValue V = performShiftToAllZeros(N, DAG, Subtarget);
24584 if (V.getNode()) return V;
24590 // CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..))
24591 // where both setccs reference the same FP CMP, and rewrite for CMPEQSS
24592 // and friends. Likewise for OR -> CMPNEQSS.
24593 static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
24594 TargetLowering::DAGCombinerInfo &DCI,
24595 const X86Subtarget *Subtarget) {
24598 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
24599 // we're requiring SSE2 for both.
24600 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
24601 SDValue N0 = N->getOperand(0);
24602 SDValue N1 = N->getOperand(1);
24603 SDValue CMP0 = N0->getOperand(1);
24604 SDValue CMP1 = N1->getOperand(1);
24607 // The SETCCs should both refer to the same CMP.
24608 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
24611 SDValue CMP00 = CMP0->getOperand(0);
24612 SDValue CMP01 = CMP0->getOperand(1);
24613 EVT VT = CMP00.getValueType();
24615 if (VT == MVT::f32 || VT == MVT::f64) {
24616 bool ExpectingFlags = false;
24617 // Check for any users that want flags:
24618 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
24619 !ExpectingFlags && UI != UE; ++UI)
24620 switch (UI->getOpcode()) {
24625 ExpectingFlags = true;
24627 case ISD::CopyToReg:
24628 case ISD::SIGN_EXTEND:
24629 case ISD::ZERO_EXTEND:
24630 case ISD::ANY_EXTEND:
24634 if (!ExpectingFlags) {
24635 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
24636 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
24638 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
24639 X86::CondCode tmp = cc0;
24644 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
24645 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
24646 // FIXME: need symbolic constants for these magic numbers.
24647 // See X86ATTInstPrinter.cpp:printSSECC().
24648 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
24649 if (Subtarget->hasAVX512()) {
24650 SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
24651 CMP01, DAG.getConstant(x86cc, MVT::i8));
24652 if (N->getValueType(0) != MVT::i1)
24653 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0),
24657 SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
24658 CMP00.getValueType(), CMP00, CMP01,
24659 DAG.getConstant(x86cc, MVT::i8));
24661 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
24662 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
24664 if (is64BitFP && !Subtarget->is64Bit()) {
24665 // On a 32-bit target, we cannot bitcast the 64-bit float to a
24666 // 64-bit integer, since that's not a legal type. Since
24667 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
24668 // bits, but can do this little dance to extract the lowest 32 bits
24669 // and work with those going forward.
24670 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
24672 SDValue Vector32 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32,
24674 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
24675 Vector32, DAG.getIntPtrConstant(0));
24679 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, IntVT, OnesOrZeroesF);
24680 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
24681 DAG.getConstant(1, IntVT));
24682 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed);
24683 return OneBitOfTruth;
24691 /// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector
24692 /// so it can be folded inside ANDNP.
24693 static bool CanFoldXORWithAllOnes(const SDNode *N) {
24694 EVT VT = N->getValueType(0);
24696 // Match direct AllOnes for 128 and 256-bit vectors
24697 if (ISD::isBuildVectorAllOnes(N))
24700 // Look through a bit convert.
24701 if (N->getOpcode() == ISD::BITCAST)
24702 N = N->getOperand(0).getNode();
24704 // Sometimes the operand may come from a insert_subvector building a 256-bit
24706 if (VT.is256BitVector() &&
24707 N->getOpcode() == ISD::INSERT_SUBVECTOR) {
24708 SDValue V1 = N->getOperand(0);
24709 SDValue V2 = N->getOperand(1);
24711 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR &&
24712 V1.getOperand(0).getOpcode() == ISD::UNDEF &&
24713 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) &&
24714 ISD::isBuildVectorAllOnes(V2.getNode()))
24721 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
24722 // register. In most cases we actually compare or select YMM-sized registers
24723 // and mixing the two types creates horrible code. This method optimizes
24724 // some of the transition sequences.
24725 static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
24726 TargetLowering::DAGCombinerInfo &DCI,
24727 const X86Subtarget *Subtarget) {
24728 EVT VT = N->getValueType(0);
24729 if (!VT.is256BitVector())
24732 assert((N->getOpcode() == ISD::ANY_EXTEND ||
24733 N->getOpcode() == ISD::ZERO_EXTEND ||
24734 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
24736 SDValue Narrow = N->getOperand(0);
24737 EVT NarrowVT = Narrow->getValueType(0);
24738 if (!NarrowVT.is128BitVector())
24741 if (Narrow->getOpcode() != ISD::XOR &&
24742 Narrow->getOpcode() != ISD::AND &&
24743 Narrow->getOpcode() != ISD::OR)
24746 SDValue N0 = Narrow->getOperand(0);
24747 SDValue N1 = Narrow->getOperand(1);
24750 // The Left side has to be a trunc.
24751 if (N0.getOpcode() != ISD::TRUNCATE)
24754 // The type of the truncated inputs.
24755 EVT WideVT = N0->getOperand(0)->getValueType(0);
24759 // The right side has to be a 'trunc' or a constant vector.
24760 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE;
24761 ConstantSDNode *RHSConstSplat = nullptr;
24762 if (auto *RHSBV = dyn_cast<BuildVectorSDNode>(N1))
24763 RHSConstSplat = RHSBV->getConstantSplatNode();
24764 if (!RHSTrunc && !RHSConstSplat)
24767 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24769 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT))
24772 // Set N0 and N1 to hold the inputs to the new wide operation.
24773 N0 = N0->getOperand(0);
24774 if (RHSConstSplat) {
24775 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getScalarType(),
24776 SDValue(RHSConstSplat, 0));
24777 SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1);
24778 N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C);
24779 } else if (RHSTrunc) {
24780 N1 = N1->getOperand(0);
24783 // Generate the wide operation.
24784 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1);
24785 unsigned Opcode = N->getOpcode();
24787 case ISD::ANY_EXTEND:
24789 case ISD::ZERO_EXTEND: {
24790 unsigned InBits = NarrowVT.getScalarType().getSizeInBits();
24791 APInt Mask = APInt::getAllOnesValue(InBits);
24792 Mask = Mask.zext(VT.getScalarType().getSizeInBits());
24793 return DAG.getNode(ISD::AND, DL, VT,
24794 Op, DAG.getConstant(Mask, VT));
24796 case ISD::SIGN_EXTEND:
24797 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
24798 Op, DAG.getValueType(NarrowVT));
24800 llvm_unreachable("Unexpected opcode");
24804 static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
24805 TargetLowering::DAGCombinerInfo &DCI,
24806 const X86Subtarget *Subtarget) {
24807 EVT VT = N->getValueType(0);
24808 if (DCI.isBeforeLegalizeOps())
24811 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24815 // Create BEXTR instructions
24816 // BEXTR is ((X >> imm) & (2**size-1))
24817 if (VT == MVT::i32 || VT == MVT::i64) {
24818 SDValue N0 = N->getOperand(0);
24819 SDValue N1 = N->getOperand(1);
24822 // Check for BEXTR.
24823 if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
24824 (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
24825 ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
24826 ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
24827 if (MaskNode && ShiftNode) {
24828 uint64_t Mask = MaskNode->getZExtValue();
24829 uint64_t Shift = ShiftNode->getZExtValue();
24830 if (isMask_64(Mask)) {
24831 uint64_t MaskSize = countPopulation(Mask);
24832 if (Shift + MaskSize <= VT.getSizeInBits())
24833 return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
24834 DAG.getConstant(Shift | (MaskSize << 8), VT));
24842 // Want to form ANDNP nodes:
24843 // 1) In the hopes of then easily combining them with OR and AND nodes
24844 // to form PBLEND/PSIGN.
24845 // 2) To match ANDN packed intrinsics
24846 if (VT != MVT::v2i64 && VT != MVT::v4i64)
24849 SDValue N0 = N->getOperand(0);
24850 SDValue N1 = N->getOperand(1);
24853 // Check LHS for vnot
24854 if (N0.getOpcode() == ISD::XOR &&
24855 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
24856 CanFoldXORWithAllOnes(N0.getOperand(1).getNode()))
24857 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1);
24859 // Check RHS for vnot
24860 if (N1.getOpcode() == ISD::XOR &&
24861 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
24862 CanFoldXORWithAllOnes(N1.getOperand(1).getNode()))
24863 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0);
24868 static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
24869 TargetLowering::DAGCombinerInfo &DCI,
24870 const X86Subtarget *Subtarget) {
24871 if (DCI.isBeforeLegalizeOps())
24874 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24878 SDValue N0 = N->getOperand(0);
24879 SDValue N1 = N->getOperand(1);
24880 EVT VT = N->getValueType(0);
24882 // look for psign/blend
24883 if (VT == MVT::v2i64 || VT == MVT::v4i64) {
24884 if (!Subtarget->hasSSSE3() ||
24885 (VT == MVT::v4i64 && !Subtarget->hasInt256()))
24888 // Canonicalize pandn to RHS
24889 if (N0.getOpcode() == X86ISD::ANDNP)
24891 // or (and (m, y), (pandn m, x))
24892 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
24893 SDValue Mask = N1.getOperand(0);
24894 SDValue X = N1.getOperand(1);
24896 if (N0.getOperand(0) == Mask)
24897 Y = N0.getOperand(1);
24898 if (N0.getOperand(1) == Mask)
24899 Y = N0.getOperand(0);
24901 // Check to see if the mask appeared in both the AND and ANDNP and
24905 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
24906 // Look through mask bitcast.
24907 if (Mask.getOpcode() == ISD::BITCAST)
24908 Mask = Mask.getOperand(0);
24909 if (X.getOpcode() == ISD::BITCAST)
24910 X = X.getOperand(0);
24911 if (Y.getOpcode() == ISD::BITCAST)
24912 Y = Y.getOperand(0);
24914 EVT MaskVT = Mask.getValueType();
24916 // Validate that the Mask operand is a vector sra node.
24917 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
24918 // there is no psrai.b
24919 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
24920 unsigned SraAmt = ~0;
24921 if (Mask.getOpcode() == ISD::SRA) {
24922 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
24923 if (auto *AmtConst = AmtBV->getConstantSplatNode())
24924 SraAmt = AmtConst->getZExtValue();
24925 } else if (Mask.getOpcode() == X86ISD::VSRAI) {
24926 SDValue SraC = Mask.getOperand(1);
24927 SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
24929 if ((SraAmt + 1) != EltBits)
24934 // Now we know we at least have a plendvb with the mask val. See if
24935 // we can form a psignb/w/d.
24936 // psign = x.type == y.type == mask.type && y = sub(0, x);
24937 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
24938 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
24939 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
24940 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
24941 "Unsupported VT for PSIGN");
24942 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0));
24943 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24945 // PBLENDVB only available on SSE 4.1
24946 if (!Subtarget->hasSSE41())
24949 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
24951 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X);
24952 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y);
24953 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask);
24954 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
24955 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24959 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
24962 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
24963 MachineFunction &MF = DAG.getMachineFunction();
24965 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
24967 // SHLD/SHRD instructions have lower register pressure, but on some
24968 // platforms they have higher latency than the equivalent
24969 // series of shifts/or that would otherwise be generated.
24970 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
24971 // have higher latencies and we are not optimizing for size.
24972 if (!OptForSize && Subtarget->isSHLDSlow())
24975 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
24977 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
24979 if (!N0.hasOneUse() || !N1.hasOneUse())
24982 SDValue ShAmt0 = N0.getOperand(1);
24983 if (ShAmt0.getValueType() != MVT::i8)
24985 SDValue ShAmt1 = N1.getOperand(1);
24986 if (ShAmt1.getValueType() != MVT::i8)
24988 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
24989 ShAmt0 = ShAmt0.getOperand(0);
24990 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
24991 ShAmt1 = ShAmt1.getOperand(0);
24994 unsigned Opc = X86ISD::SHLD;
24995 SDValue Op0 = N0.getOperand(0);
24996 SDValue Op1 = N1.getOperand(0);
24997 if (ShAmt0.getOpcode() == ISD::SUB) {
24998 Opc = X86ISD::SHRD;
24999 std::swap(Op0, Op1);
25000 std::swap(ShAmt0, ShAmt1);
25003 unsigned Bits = VT.getSizeInBits();
25004 if (ShAmt1.getOpcode() == ISD::SUB) {
25005 SDValue Sum = ShAmt1.getOperand(0);
25006 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
25007 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
25008 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
25009 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
25010 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
25011 return DAG.getNode(Opc, DL, VT,
25013 DAG.getNode(ISD::TRUNCATE, DL,
25016 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
25017 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
25019 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits)
25020 return DAG.getNode(Opc, DL, VT,
25021 N0.getOperand(0), N1.getOperand(0),
25022 DAG.getNode(ISD::TRUNCATE, DL,
25029 // Generate NEG and CMOV for integer abs.
25030 static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
25031 EVT VT = N->getValueType(0);
25033 // Since X86 does not have CMOV for 8-bit integer, we don't convert
25034 // 8-bit integer abs to NEG and CMOV.
25035 if (VT.isInteger() && VT.getSizeInBits() == 8)
25038 SDValue N0 = N->getOperand(0);
25039 SDValue N1 = N->getOperand(1);
25042 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
25043 // and change it to SUB and CMOV.
25044 if (VT.isInteger() && N->getOpcode() == ISD::XOR &&
25045 N0.getOpcode() == ISD::ADD &&
25046 N0.getOperand(1) == N1 &&
25047 N1.getOpcode() == ISD::SRA &&
25048 N1.getOperand(0) == N0.getOperand(0))
25049 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1)))
25050 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) {
25051 // Generate SUB & CMOV.
25052 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
25053 DAG.getConstant(0, VT), N0.getOperand(0));
25055 SDValue Ops[] = { N0.getOperand(0), Neg,
25056 DAG.getConstant(X86::COND_GE, MVT::i8),
25057 SDValue(Neg.getNode(), 1) };
25058 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops);
25063 // PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes
25064 static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
25065 TargetLowering::DAGCombinerInfo &DCI,
25066 const X86Subtarget *Subtarget) {
25067 if (DCI.isBeforeLegalizeOps())
25070 if (Subtarget->hasCMov()) {
25071 SDValue RV = performIntegerAbsCombine(N, DAG);
25079 /// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
25080 static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
25081 TargetLowering::DAGCombinerInfo &DCI,
25082 const X86Subtarget *Subtarget) {
25083 LoadSDNode *Ld = cast<LoadSDNode>(N);
25084 EVT RegVT = Ld->getValueType(0);
25085 EVT MemVT = Ld->getMemoryVT();
25087 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25089 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
25090 // into two 16-byte operations.
25091 ISD::LoadExtType Ext = Ld->getExtensionType();
25092 unsigned Alignment = Ld->getAlignment();
25093 bool IsAligned = Alignment == 0 || Alignment >= MemVT.getSizeInBits()/8;
25094 if (RegVT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25095 !DCI.isBeforeLegalizeOps() && !IsAligned && Ext == ISD::NON_EXTLOAD) {
25096 unsigned NumElems = RegVT.getVectorNumElements();
25100 SDValue Ptr = Ld->getBasePtr();
25101 SDValue Increment = DAG.getConstant(16, TLI.getPointerTy());
25103 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
25105 SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
25106 Ld->getPointerInfo(), Ld->isVolatile(),
25107 Ld->isNonTemporal(), Ld->isInvariant(),
25109 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25110 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
25111 Ld->getPointerInfo(), Ld->isVolatile(),
25112 Ld->isNonTemporal(), Ld->isInvariant(),
25113 std::min(16U, Alignment));
25114 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
25116 Load2.getValue(1));
25118 SDValue NewVec = DAG.getUNDEF(RegVT);
25119 NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl);
25120 NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl);
25121 return DCI.CombineTo(N, NewVec, TF, true);
25127 /// PerformMLOADCombine - Resolve extending loads
25128 static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
25129 TargetLowering::DAGCombinerInfo &DCI,
25130 const X86Subtarget *Subtarget) {
25131 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
25132 if (Mld->getExtensionType() != ISD::SEXTLOAD)
25135 EVT VT = Mld->getValueType(0);
25136 unsigned NumElems = VT.getVectorNumElements();
25137 EVT LdVT = Mld->getMemoryVT();
25140 assert(LdVT != VT && "Cannot extend to the same type");
25141 unsigned ToSz = VT.getVectorElementType().getSizeInBits();
25142 unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
25143 // From, To sizes and ElemCount must be pow of two
25144 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
25145 "Unexpected size for extending masked load");
25147 unsigned SizeRatio = ToSz / FromSz;
25148 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
25150 // Create a type on which we perform the shuffle
25151 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25152 LdVT.getScalarType(), NumElems*SizeRatio);
25153 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25155 // Convert Src0 value
25156 SDValue WideSrc0 = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mld->getSrc0());
25157 if (Mld->getSrc0().getOpcode() != ISD::UNDEF) {
25158 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25159 for (unsigned i = 0; i != NumElems; ++i)
25160 ShuffleVec[i] = i * SizeRatio;
25162 // Can't shuffle using an illegal type.
25163 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
25164 && "WideVecVT should be legal");
25165 WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
25166 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
25168 // Prepare the new mask
25170 SDValue Mask = Mld->getMask();
25171 if (Mask.getValueType() == VT) {
25172 // Mask and original value have the same type
25173 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
25174 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25175 for (unsigned i = 0; i != NumElems; ++i)
25176 ShuffleVec[i] = i * SizeRatio;
25177 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
25178 ShuffleVec[i] = NumElems*SizeRatio;
25179 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
25180 DAG.getConstant(0, WideVecVT),
25184 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
25185 unsigned WidenNumElts = NumElems*SizeRatio;
25186 unsigned MaskNumElts = VT.getVectorNumElements();
25187 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
25190 unsigned NumConcat = WidenNumElts / MaskNumElts;
25191 SmallVector<SDValue, 16> Ops(NumConcat);
25192 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
25194 for (unsigned i = 1; i != NumConcat; ++i)
25197 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
25200 SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
25201 Mld->getBasePtr(), NewMask, WideSrc0,
25202 Mld->getMemoryVT(), Mld->getMemOperand(),
25204 SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd);
25205 return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
25208 /// PerformMSTORECombine - Resolve truncating stores
25209 static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
25210 const X86Subtarget *Subtarget) {
25211 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
25212 if (!Mst->isTruncatingStore())
25215 EVT VT = Mst->getValue().getValueType();
25216 unsigned NumElems = VT.getVectorNumElements();
25217 EVT StVT = Mst->getMemoryVT();
25220 assert(StVT != VT && "Cannot truncate to the same type");
25221 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25222 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25224 // From, To sizes and ElemCount must be pow of two
25225 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
25226 "Unexpected size for truncating masked store");
25227 // We are going to use the original vector elt for storing.
25228 // Accumulated smaller vector elements must be a multiple of the store size.
25229 assert (((NumElems * FromSz) % ToSz) == 0 &&
25230 "Unexpected ratio for truncating masked store");
25232 unsigned SizeRatio = FromSz / ToSz;
25233 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25235 // Create a type on which we perform the shuffle
25236 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25237 StVT.getScalarType(), NumElems*SizeRatio);
25239 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25241 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mst->getValue());
25242 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25243 for (unsigned i = 0; i != NumElems; ++i)
25244 ShuffleVec[i] = i * SizeRatio;
25246 // Can't shuffle using an illegal type.
25247 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
25248 && "WideVecVT should be legal");
25250 SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25251 DAG.getUNDEF(WideVecVT),
25255 SDValue Mask = Mst->getMask();
25256 if (Mask.getValueType() == VT) {
25257 // Mask and original value have the same type
25258 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
25259 for (unsigned i = 0; i != NumElems; ++i)
25260 ShuffleVec[i] = i * SizeRatio;
25261 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
25262 ShuffleVec[i] = NumElems*SizeRatio;
25263 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
25264 DAG.getConstant(0, WideVecVT),
25268 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
25269 unsigned WidenNumElts = NumElems*SizeRatio;
25270 unsigned MaskNumElts = VT.getVectorNumElements();
25271 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
25274 unsigned NumConcat = WidenNumElts / MaskNumElts;
25275 SmallVector<SDValue, 16> Ops(NumConcat);
25276 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
25278 for (unsigned i = 1; i != NumConcat; ++i)
25281 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
25284 return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal, Mst->getBasePtr(),
25285 NewMask, StVT, Mst->getMemOperand(), false);
25287 /// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
25288 static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
25289 const X86Subtarget *Subtarget) {
25290 StoreSDNode *St = cast<StoreSDNode>(N);
25291 EVT VT = St->getValue().getValueType();
25292 EVT StVT = St->getMemoryVT();
25294 SDValue StoredVal = St->getOperand(1);
25295 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25297 // If we are saving a concatenation of two XMM registers and 32-byte stores
25298 // are slow, such as on Sandy Bridge, perform two 16-byte stores.
25299 unsigned Alignment = St->getAlignment();
25300 bool IsAligned = Alignment == 0 || Alignment >= VT.getSizeInBits()/8;
25301 if (VT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25302 StVT == VT && !IsAligned) {
25303 unsigned NumElems = VT.getVectorNumElements();
25307 SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl);
25308 SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl);
25310 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy());
25311 SDValue Ptr0 = St->getBasePtr();
25312 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride);
25314 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0,
25315 St->getPointerInfo(), St->isVolatile(),
25316 St->isNonTemporal(), Alignment);
25317 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1,
25318 St->getPointerInfo(), St->isVolatile(),
25319 St->isNonTemporal(),
25320 std::min(16U, Alignment));
25321 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
25324 // Optimize trunc store (of multiple scalars) to shuffle and store.
25325 // First, pack all of the elements in one place. Next, store to memory
25326 // in fewer chunks.
25327 if (St->isTruncatingStore() && VT.isVector()) {
25328 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25329 unsigned NumElems = VT.getVectorNumElements();
25330 assert(StVT != VT && "Cannot truncate to the same type");
25331 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25332 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25334 // From, To sizes and ElemCount must be pow of two
25335 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
25336 // We are going to use the original vector elt for storing.
25337 // Accumulated smaller vector elements must be a multiple of the store size.
25338 if (0 != (NumElems * FromSz) % ToSz) return SDValue();
25340 unsigned SizeRatio = FromSz / ToSz;
25342 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25344 // Create a type on which we perform the shuffle
25345 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25346 StVT.getScalarType(), NumElems*SizeRatio);
25348 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25350 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue());
25351 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
25352 for (unsigned i = 0; i != NumElems; ++i)
25353 ShuffleVec[i] = i * SizeRatio;
25355 // Can't shuffle using an illegal type.
25356 if (!TLI.isTypeLegal(WideVecVT))
25359 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25360 DAG.getUNDEF(WideVecVT),
25362 // At this point all of the data is stored at the bottom of the
25363 // register. We now need to save it to mem.
25365 // Find the largest store unit
25366 MVT StoreType = MVT::i8;
25367 for (MVT Tp : MVT::integer_valuetypes()) {
25368 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
25372 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
25373 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
25374 (64 <= NumElems * ToSz))
25375 StoreType = MVT::f64;
25377 // Bitcast the original vector into a vector of store-size units
25378 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
25379 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
25380 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
25381 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff);
25382 SmallVector<SDValue, 8> Chains;
25383 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8,
25384 TLI.getPointerTy());
25385 SDValue Ptr = St->getBasePtr();
25387 // Perform one or more big stores into memory.
25388 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
25389 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
25390 StoreType, ShuffWide,
25391 DAG.getIntPtrConstant(i));
25392 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr,
25393 St->getPointerInfo(), St->isVolatile(),
25394 St->isNonTemporal(), St->getAlignment());
25395 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25396 Chains.push_back(Ch);
25399 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
25402 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
25403 // the FP state in cases where an emms may be missing.
25404 // A preferable solution to the general problem is to figure out the right
25405 // places to insert EMMS. This qualifies as a quick hack.
25407 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
25408 if (VT.getSizeInBits() != 64)
25411 const Function *F = DAG.getMachineFunction().getFunction();
25412 bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat);
25413 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
25414 && Subtarget->hasSSE2();
25415 if ((VT.isVector() ||
25416 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
25417 isa<LoadSDNode>(St->getValue()) &&
25418 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
25419 St->getChain().hasOneUse() && !St->isVolatile()) {
25420 SDNode* LdVal = St->getValue().getNode();
25421 LoadSDNode *Ld = nullptr;
25422 int TokenFactorIndex = -1;
25423 SmallVector<SDValue, 8> Ops;
25424 SDNode* ChainVal = St->getChain().getNode();
25425 // Must be a store of a load. We currently handle two cases: the load
25426 // is a direct child, and it's under an intervening TokenFactor. It is
25427 // possible to dig deeper under nested TokenFactors.
25428 if (ChainVal == LdVal)
25429 Ld = cast<LoadSDNode>(St->getChain());
25430 else if (St->getValue().hasOneUse() &&
25431 ChainVal->getOpcode() == ISD::TokenFactor) {
25432 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) {
25433 if (ChainVal->getOperand(i).getNode() == LdVal) {
25434 TokenFactorIndex = i;
25435 Ld = cast<LoadSDNode>(St->getValue());
25437 Ops.push_back(ChainVal->getOperand(i));
25441 if (!Ld || !ISD::isNormalLoad(Ld))
25444 // If this is not the MMX case, i.e. we are just turning i64 load/store
25445 // into f64 load/store, avoid the transformation if there are multiple
25446 // uses of the loaded value.
25447 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
25452 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
25453 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
25455 if (Subtarget->is64Bit() || F64IsLegal) {
25456 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
25457 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
25458 Ld->getPointerInfo(), Ld->isVolatile(),
25459 Ld->isNonTemporal(), Ld->isInvariant(),
25460 Ld->getAlignment());
25461 SDValue NewChain = NewLd.getValue(1);
25462 if (TokenFactorIndex != -1) {
25463 Ops.push_back(NewChain);
25464 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25466 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
25467 St->getPointerInfo(),
25468 St->isVolatile(), St->isNonTemporal(),
25469 St->getAlignment());
25472 // Otherwise, lower to two pairs of 32-bit loads / stores.
25473 SDValue LoAddr = Ld->getBasePtr();
25474 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
25475 DAG.getConstant(4, MVT::i32));
25477 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
25478 Ld->getPointerInfo(),
25479 Ld->isVolatile(), Ld->isNonTemporal(),
25480 Ld->isInvariant(), Ld->getAlignment());
25481 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
25482 Ld->getPointerInfo().getWithOffset(4),
25483 Ld->isVolatile(), Ld->isNonTemporal(),
25485 MinAlign(Ld->getAlignment(), 4));
25487 SDValue NewChain = LoLd.getValue(1);
25488 if (TokenFactorIndex != -1) {
25489 Ops.push_back(LoLd);
25490 Ops.push_back(HiLd);
25491 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25494 LoAddr = St->getBasePtr();
25495 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
25496 DAG.getConstant(4, MVT::i32));
25498 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
25499 St->getPointerInfo(),
25500 St->isVolatile(), St->isNonTemporal(),
25501 St->getAlignment());
25502 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
25503 St->getPointerInfo().getWithOffset(4),
25505 St->isNonTemporal(),
25506 MinAlign(St->getAlignment(), 4));
25507 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
25512 /// Return 'true' if this vector operation is "horizontal"
25513 /// and return the operands for the horizontal operation in LHS and RHS. A
25514 /// horizontal operation performs the binary operation on successive elements
25515 /// of its first operand, then on successive elements of its second operand,
25516 /// returning the resulting values in a vector. For example, if
25517 /// A = < float a0, float a1, float a2, float a3 >
25519 /// B = < float b0, float b1, float b2, float b3 >
25520 /// then the result of doing a horizontal operation on A and B is
25521 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
25522 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
25523 /// A horizontal-op B, for some already available A and B, and if so then LHS is
25524 /// set to A, RHS to B, and the routine returns 'true'.
25525 /// Note that the binary operation should have the property that if one of the
25526 /// operands is UNDEF then the result is UNDEF.
25527 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
25528 // Look for the following pattern: if
25529 // A = < float a0, float a1, float a2, float a3 >
25530 // B = < float b0, float b1, float b2, float b3 >
25532 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
25533 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
25534 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
25535 // which is A horizontal-op B.
25537 // At least one of the operands should be a vector shuffle.
25538 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
25539 RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
25542 MVT VT = LHS.getSimpleValueType();
25544 assert((VT.is128BitVector() || VT.is256BitVector()) &&
25545 "Unsupported vector type for horizontal add/sub");
25547 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to
25548 // operate independently on 128-bit lanes.
25549 unsigned NumElts = VT.getVectorNumElements();
25550 unsigned NumLanes = VT.getSizeInBits()/128;
25551 unsigned NumLaneElts = NumElts / NumLanes;
25552 assert((NumLaneElts % 2 == 0) &&
25553 "Vector type should have an even number of elements in each lane");
25554 unsigned HalfLaneElts = NumLaneElts/2;
25556 // View LHS in the form
25557 // LHS = VECTOR_SHUFFLE A, B, LMask
25558 // If LHS is not a shuffle then pretend it is the shuffle
25559 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
25560 // NOTE: in what follows a default initialized SDValue represents an UNDEF of
25563 SmallVector<int, 16> LMask(NumElts);
25564 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25565 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF)
25566 A = LHS.getOperand(0);
25567 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF)
25568 B = LHS.getOperand(1);
25569 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
25570 std::copy(Mask.begin(), Mask.end(), LMask.begin());
25572 if (LHS.getOpcode() != ISD::UNDEF)
25574 for (unsigned i = 0; i != NumElts; ++i)
25578 // Likewise, view RHS in the form
25579 // RHS = VECTOR_SHUFFLE C, D, RMask
25581 SmallVector<int, 16> RMask(NumElts);
25582 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25583 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF)
25584 C = RHS.getOperand(0);
25585 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF)
25586 D = RHS.getOperand(1);
25587 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
25588 std::copy(Mask.begin(), Mask.end(), RMask.begin());
25590 if (RHS.getOpcode() != ISD::UNDEF)
25592 for (unsigned i = 0; i != NumElts; ++i)
25596 // Check that the shuffles are both shuffling the same vectors.
25597 if (!(A == C && B == D) && !(A == D && B == C))
25600 // If everything is UNDEF then bail out: it would be better to fold to UNDEF.
25601 if (!A.getNode() && !B.getNode())
25604 // If A and B occur in reverse order in RHS, then "swap" them (which means
25605 // rewriting the mask).
25607 CommuteVectorShuffleMask(RMask, NumElts);
25609 // At this point LHS and RHS are equivalent to
25610 // LHS = VECTOR_SHUFFLE A, B, LMask
25611 // RHS = VECTOR_SHUFFLE A, B, RMask
25612 // Check that the masks correspond to performing a horizontal operation.
25613 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
25614 for (unsigned i = 0; i != NumLaneElts; ++i) {
25615 int LIdx = LMask[i+l], RIdx = RMask[i+l];
25617 // Ignore any UNDEF components.
25618 if (LIdx < 0 || RIdx < 0 ||
25619 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
25620 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
25623 // Check that successive elements are being operated on. If not, this is
25624 // not a horizontal operation.
25625 unsigned Src = (i/HalfLaneElts); // each lane is split between srcs
25626 int Index = 2*(i%HalfLaneElts) + NumElts*Src + l;
25627 if (!(LIdx == Index && RIdx == Index + 1) &&
25628 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
25633 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
25634 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
25638 /// Do target-specific dag combines on floating point adds.
25639 static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
25640 const X86Subtarget *Subtarget) {
25641 EVT VT = N->getValueType(0);
25642 SDValue LHS = N->getOperand(0);
25643 SDValue RHS = N->getOperand(1);
25645 // Try to synthesize horizontal adds from adds of shuffles.
25646 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25647 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25648 isHorizontalBinOp(LHS, RHS, true))
25649 return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS);
25653 /// Do target-specific dag combines on floating point subs.
25654 static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
25655 const X86Subtarget *Subtarget) {
25656 EVT VT = N->getValueType(0);
25657 SDValue LHS = N->getOperand(0);
25658 SDValue RHS = N->getOperand(1);
25660 // Try to synthesize horizontal subs from subs of shuffles.
25661 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25662 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25663 isHorizontalBinOp(LHS, RHS, false))
25664 return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS);
25668 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
25669 static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
25670 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
25672 // F[X]OR(0.0, x) -> x
25673 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25674 if (C->getValueAPF().isPosZero())
25675 return N->getOperand(1);
25677 // F[X]OR(x, 0.0) -> x
25678 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25679 if (C->getValueAPF().isPosZero())
25680 return N->getOperand(0);
25684 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
25685 static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
25686 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
25688 // Only perform optimizations if UnsafeMath is used.
25689 if (!DAG.getTarget().Options.UnsafeFPMath)
25692 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
25693 // into FMINC and FMAXC, which are Commutative operations.
25694 unsigned NewOp = 0;
25695 switch (N->getOpcode()) {
25696 default: llvm_unreachable("unknown opcode");
25697 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
25698 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
25701 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
25702 N->getOperand(0), N->getOperand(1));
25705 /// Do target-specific dag combines on X86ISD::FAND nodes.
25706 static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
25707 // FAND(0.0, x) -> 0.0
25708 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25709 if (C->getValueAPF().isPosZero())
25710 return N->getOperand(0);
25712 // FAND(x, 0.0) -> 0.0
25713 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25714 if (C->getValueAPF().isPosZero())
25715 return N->getOperand(1);
25720 /// Do target-specific dag combines on X86ISD::FANDN nodes
25721 static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG) {
25722 // FANDN(0.0, x) -> x
25723 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25724 if (C->getValueAPF().isPosZero())
25725 return N->getOperand(1);
25727 // FANDN(x, 0.0) -> 0.0
25728 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25729 if (C->getValueAPF().isPosZero())
25730 return N->getOperand(1);
25735 static SDValue PerformBTCombine(SDNode *N,
25737 TargetLowering::DAGCombinerInfo &DCI) {
25738 // BT ignores high bits in the bit index operand.
25739 SDValue Op1 = N->getOperand(1);
25740 if (Op1.hasOneUse()) {
25741 unsigned BitWidth = Op1.getValueSizeInBits();
25742 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
25743 APInt KnownZero, KnownOne;
25744 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
25745 !DCI.isBeforeLegalizeOps());
25746 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25747 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) ||
25748 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
25749 DCI.CommitTargetLoweringOpt(TLO);
25754 static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
25755 SDValue Op = N->getOperand(0);
25756 if (Op.getOpcode() == ISD::BITCAST)
25757 Op = Op.getOperand(0);
25758 EVT VT = N->getValueType(0), OpVT = Op.getValueType();
25759 if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
25760 VT.getVectorElementType().getSizeInBits() ==
25761 OpVT.getVectorElementType().getSizeInBits()) {
25762 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
25767 static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
25768 const X86Subtarget *Subtarget) {
25769 EVT VT = N->getValueType(0);
25770 if (!VT.isVector())
25773 SDValue N0 = N->getOperand(0);
25774 SDValue N1 = N->getOperand(1);
25775 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
25778 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
25779 // both SSE and AVX2 since there is no sign-extended shift right
25780 // operation on a vector with 64-bit elements.
25781 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
25782 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
25783 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
25784 N0.getOpcode() == ISD::SIGN_EXTEND)) {
25785 SDValue N00 = N0.getOperand(0);
25787 // EXTLOAD has a better solution on AVX2,
25788 // it may be replaced with X86ISD::VSEXT node.
25789 if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256())
25790 if (!ISD::isNormalLoad(N00.getNode()))
25793 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
25794 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
25796 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
25802 static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
25803 TargetLowering::DAGCombinerInfo &DCI,
25804 const X86Subtarget *Subtarget) {
25805 SDValue N0 = N->getOperand(0);
25806 EVT VT = N->getValueType(0);
25808 // (i8,i32 sext (sdivrem (i8 x, i8 y)) ->
25809 // (i8,i32 (sdivrem_sext_hreg (i8 x, i8 y)
25810 // This exposes the sext to the sdivrem lowering, so that it directly extends
25811 // from AH (which we otherwise need to do contortions to access).
25812 if (N0.getOpcode() == ISD::SDIVREM && N0.getResNo() == 1 &&
25813 N0.getValueType() == MVT::i8 && VT == MVT::i32) {
25815 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25816 SDValue R = DAG.getNode(X86ISD::SDIVREM8_SEXT_HREG, dl, NodeTys,
25817 N0.getOperand(0), N0.getOperand(1));
25818 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25819 return R.getValue(1);
25822 if (!DCI.isBeforeLegalizeOps())
25825 if (!Subtarget->hasFp256())
25828 if (VT.isVector() && VT.getSizeInBits() == 256) {
25829 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25837 static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
25838 const X86Subtarget* Subtarget) {
25840 EVT VT = N->getValueType(0);
25842 // Let legalize expand this if it isn't a legal type yet.
25843 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
25846 EVT ScalarVT = VT.getScalarType();
25847 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
25848 (!Subtarget->hasFMA() && !Subtarget->hasFMA4()))
25851 SDValue A = N->getOperand(0);
25852 SDValue B = N->getOperand(1);
25853 SDValue C = N->getOperand(2);
25855 bool NegA = (A.getOpcode() == ISD::FNEG);
25856 bool NegB = (B.getOpcode() == ISD::FNEG);
25857 bool NegC = (C.getOpcode() == ISD::FNEG);
25859 // Negative multiplication when NegA xor NegB
25860 bool NegMul = (NegA != NegB);
25862 A = A.getOperand(0);
25864 B = B.getOperand(0);
25866 C = C.getOperand(0);
25870 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB;
25872 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB;
25874 return DAG.getNode(Opcode, dl, VT, A, B, C);
25877 static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
25878 TargetLowering::DAGCombinerInfo &DCI,
25879 const X86Subtarget *Subtarget) {
25880 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
25881 // (and (i32 x86isd::setcc_carry), 1)
25882 // This eliminates the zext. This transformation is necessary because
25883 // ISD::SETCC is always legalized to i8.
25885 SDValue N0 = N->getOperand(0);
25886 EVT VT = N->getValueType(0);
25888 if (N0.getOpcode() == ISD::AND &&
25890 N0.getOperand(0).hasOneUse()) {
25891 SDValue N00 = N0.getOperand(0);
25892 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25893 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
25894 if (!C || C->getZExtValue() != 1)
25896 return DAG.getNode(ISD::AND, dl, VT,
25897 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25898 N00.getOperand(0), N00.getOperand(1)),
25899 DAG.getConstant(1, VT));
25903 if (N0.getOpcode() == ISD::TRUNCATE &&
25905 N0.getOperand(0).hasOneUse()) {
25906 SDValue N00 = N0.getOperand(0);
25907 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25908 return DAG.getNode(ISD::AND, dl, VT,
25909 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25910 N00.getOperand(0), N00.getOperand(1)),
25911 DAG.getConstant(1, VT));
25914 if (VT.is256BitVector()) {
25915 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25920 // (i8,i32 zext (udivrem (i8 x, i8 y)) ->
25921 // (i8,i32 (udivrem_zext_hreg (i8 x, i8 y)
25922 // This exposes the zext to the udivrem lowering, so that it directly extends
25923 // from AH (which we otherwise need to do contortions to access).
25924 if (N0.getOpcode() == ISD::UDIVREM &&
25925 N0.getResNo() == 1 && N0.getValueType() == MVT::i8 &&
25926 (VT == MVT::i32 || VT == MVT::i64)) {
25927 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25928 SDValue R = DAG.getNode(X86ISD::UDIVREM8_ZEXT_HREG, dl, NodeTys,
25929 N0.getOperand(0), N0.getOperand(1));
25930 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25931 return R.getValue(1);
25937 // Optimize x == -y --> x+y == 0
25938 // x != -y --> x+y != 0
25939 static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
25940 const X86Subtarget* Subtarget) {
25941 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
25942 SDValue LHS = N->getOperand(0);
25943 SDValue RHS = N->getOperand(1);
25944 EVT VT = N->getValueType(0);
25947 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
25948 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0)))
25949 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) {
25950 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25951 LHS.getValueType(), RHS, LHS.getOperand(1));
25952 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25953 addV, DAG.getConstant(0, addV.getValueType()), CC);
25955 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB)
25956 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0)))
25957 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) {
25958 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25959 RHS.getValueType(), LHS, RHS.getOperand(1));
25960 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25961 addV, DAG.getConstant(0, addV.getValueType()), CC);
25964 if (VT.getScalarType() == MVT::i1) {
25965 bool IsSEXT0 = (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
25966 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25967 bool IsVZero0 = ISD::isBuildVectorAllZeros(LHS.getNode());
25968 if (!IsSEXT0 && !IsVZero0)
25970 bool IsSEXT1 = (RHS.getOpcode() == ISD::SIGN_EXTEND) &&
25971 (RHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25972 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
25974 if (!IsSEXT1 && !IsVZero1)
25977 if (IsSEXT0 && IsVZero1) {
25978 assert(VT == LHS.getOperand(0).getValueType() && "Uexpected operand type");
25979 if (CC == ISD::SETEQ)
25980 return DAG.getNOT(DL, LHS.getOperand(0), VT);
25981 return LHS.getOperand(0);
25983 if (IsSEXT1 && IsVZero0) {
25984 assert(VT == RHS.getOperand(0).getValueType() && "Uexpected operand type");
25985 if (CC == ISD::SETEQ)
25986 return DAG.getNOT(DL, RHS.getOperand(0), VT);
25987 return RHS.getOperand(0);
25994 static SDValue PerformINSERTPSCombine(SDNode *N, SelectionDAG &DAG,
25995 const X86Subtarget *Subtarget) {
25997 MVT VT = N->getOperand(1)->getSimpleValueType(0);
25998 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
25999 "X86insertps is only defined for v4x32");
26001 SDValue Ld = N->getOperand(1);
26002 if (MayFoldLoad(Ld)) {
26003 // Extract the countS bits from the immediate so we can get the proper
26004 // address when narrowing the vector load to a specific element.
26005 // When the second source op is a memory address, interps doesn't use
26006 // countS and just gets an f32 from that address.
26007 unsigned DestIndex =
26008 cast<ConstantSDNode>(N->getOperand(2))->getZExtValue() >> 6;
26009 Ld = NarrowVectorLoadToElement(cast<LoadSDNode>(Ld), DestIndex, DAG);
26013 // Create this as a scalar to vector to match the instruction pattern.
26014 SDValue LoadScalarToVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Ld);
26015 // countS bits are ignored when loading from memory on insertps, which
26016 // means we don't need to explicitly set them to 0.
26017 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N->getOperand(0),
26018 LoadScalarToVector, N->getOperand(2));
26021 // Helper function of PerformSETCCCombine. It is to materialize "setb reg"
26022 // as "sbb reg,reg", since it can be extended without zext and produces
26023 // an all-ones bit which is more useful than 0/1 in some cases.
26024 static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
26027 return DAG.getNode(ISD::AND, DL, VT,
26028 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
26029 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS),
26030 DAG.getConstant(1, VT));
26031 assert (VT == MVT::i1 && "Unexpected type for SECCC node");
26032 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1,
26033 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
26034 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS));
26037 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
26038 static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
26039 TargetLowering::DAGCombinerInfo &DCI,
26040 const X86Subtarget *Subtarget) {
26042 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
26043 SDValue EFLAGS = N->getOperand(1);
26045 if (CC == X86::COND_A) {
26046 // Try to convert COND_A into COND_B in an attempt to facilitate
26047 // materializing "setb reg".
26049 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
26050 // cannot take an immediate as its first operand.
26052 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
26053 EFLAGS.getValueType().isInteger() &&
26054 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
26055 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
26056 EFLAGS.getNode()->getVTList(),
26057 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
26058 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
26059 return MaterializeSETB(DL, NewEFLAGS, DAG, N->getSimpleValueType(0));
26063 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without
26064 // a zext and produces an all-ones bit which is more useful than 0/1 in some
26066 if (CC == X86::COND_B)
26067 return MaterializeSETB(DL, EFLAGS, DAG, N->getSimpleValueType(0));
26071 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
26072 if (Flags.getNode()) {
26073 SDValue Cond = DAG.getConstant(CC, MVT::i8);
26074 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
26080 // Optimize branch condition evaluation.
26082 static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
26083 TargetLowering::DAGCombinerInfo &DCI,
26084 const X86Subtarget *Subtarget) {
26086 SDValue Chain = N->getOperand(0);
26087 SDValue Dest = N->getOperand(1);
26088 SDValue EFLAGS = N->getOperand(3);
26089 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
26093 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
26094 if (Flags.getNode()) {
26095 SDValue Cond = DAG.getConstant(CC, MVT::i8);
26096 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
26103 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
26104 SelectionDAG &DAG) {
26105 // Take advantage of vector comparisons producing 0 or -1 in each lane to
26106 // optimize away operation when it's from a constant.
26108 // The general transformation is:
26109 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
26110 // AND(VECTOR_CMP(x,y), constant2)
26111 // constant2 = UNARYOP(constant)
26113 // Early exit if this isn't a vector operation, the operand of the
26114 // unary operation isn't a bitwise AND, or if the sizes of the operations
26115 // aren't the same.
26116 EVT VT = N->getValueType(0);
26117 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
26118 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
26119 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
26122 // Now check that the other operand of the AND is a constant. We could
26123 // make the transformation for non-constant splats as well, but it's unclear
26124 // that would be a benefit as it would not eliminate any operations, just
26125 // perform one more step in scalar code before moving to the vector unit.
26126 if (BuildVectorSDNode *BV =
26127 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
26128 // Bail out if the vector isn't a constant.
26129 if (!BV->isConstant())
26132 // Everything checks out. Build up the new and improved node.
26134 EVT IntVT = BV->getValueType(0);
26135 // Create a new constant of the appropriate type for the transformed
26137 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
26138 // The AND node needs bitcasts to/from an integer vector type around it.
26139 SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
26140 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
26141 N->getOperand(0)->getOperand(0), MaskConst);
26142 SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
26149 static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
26150 const X86Subtarget *Subtarget) {
26151 // First try to optimize away the conversion entirely when it's
26152 // conditionally from a constant. Vectors only.
26153 SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
26154 if (Res != SDValue())
26157 // Now move on to more general possibilities.
26158 SDValue Op0 = N->getOperand(0);
26159 EVT InVT = Op0->getValueType(0);
26161 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32))
26162 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) {
26164 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
26165 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
26166 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P);
26169 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
26170 // a 32-bit target where SSE doesn't support i64->FP operations.
26171 if (Op0.getOpcode() == ISD::LOAD) {
26172 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
26173 EVT VT = Ld->getValueType(0);
26174 if (!Ld->isVolatile() && !N->getValueType(0).isVector() &&
26175 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
26176 !Subtarget->is64Bit() && VT == MVT::i64) {
26177 SDValue FILDChain = Subtarget->getTargetLowering()->BuildFILD(
26178 SDValue(N, 0), Ld->getValueType(0), Ld->getChain(), Op0, DAG);
26179 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
26186 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
26187 static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
26188 X86TargetLowering::DAGCombinerInfo &DCI) {
26189 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
26190 // the result is either zero or one (depending on the input carry bit).
26191 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
26192 if (X86::isZeroNode(N->getOperand(0)) &&
26193 X86::isZeroNode(N->getOperand(1)) &&
26194 // We don't have a good way to replace an EFLAGS use, so only do this when
26196 SDValue(N, 1).use_empty()) {
26198 EVT VT = N->getValueType(0);
26199 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1));
26200 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
26201 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
26202 DAG.getConstant(X86::COND_B,MVT::i8),
26204 DAG.getConstant(1, VT));
26205 return DCI.CombineTo(N, Res1, CarryOut);
26211 // fold (add Y, (sete X, 0)) -> adc 0, Y
26212 // (add Y, (setne X, 0)) -> sbb -1, Y
26213 // (sub (sete X, 0), Y) -> sbb 0, Y
26214 // (sub (setne X, 0), Y) -> adc -1, Y
26215 static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
26218 // Look through ZExts.
26219 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
26220 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse())
26223 SDValue SetCC = Ext.getOperand(0);
26224 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse())
26227 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
26228 if (CC != X86::COND_E && CC != X86::COND_NE)
26231 SDValue Cmp = SetCC.getOperand(1);
26232 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
26233 !X86::isZeroNode(Cmp.getOperand(1)) ||
26234 !Cmp.getOperand(0).getValueType().isInteger())
26237 SDValue CmpOp0 = Cmp.getOperand(0);
26238 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0,
26239 DAG.getConstant(1, CmpOp0.getValueType()));
26241 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1);
26242 if (CC == X86::COND_NE)
26243 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB,
26244 DL, OtherVal.getValueType(), OtherVal,
26245 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp);
26246 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC,
26247 DL, OtherVal.getValueType(), OtherVal,
26248 DAG.getConstant(0, OtherVal.getValueType()), NewCmp);
26251 /// PerformADDCombine - Do target-specific dag combines on integer adds.
26252 static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG,
26253 const X86Subtarget *Subtarget) {
26254 EVT VT = N->getValueType(0);
26255 SDValue Op0 = N->getOperand(0);
26256 SDValue Op1 = N->getOperand(1);
26258 // Try to synthesize horizontal adds from adds of shuffles.
26259 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26260 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26261 isHorizontalBinOp(Op0, Op1, true))
26262 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
26264 return OptimizeConditionalInDecrement(N, DAG);
26267 static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
26268 const X86Subtarget *Subtarget) {
26269 SDValue Op0 = N->getOperand(0);
26270 SDValue Op1 = N->getOperand(1);
26272 // X86 can't encode an immediate LHS of a sub. See if we can push the
26273 // negation into a preceding instruction.
26274 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
26275 // If the RHS of the sub is a XOR with one use and a constant, invert the
26276 // immediate. Then add one to the LHS of the sub so we can turn
26277 // X-Y -> X+~Y+1, saving one register.
26278 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
26279 isa<ConstantSDNode>(Op1.getOperand(1))) {
26280 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
26281 EVT VT = Op0.getValueType();
26282 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
26284 DAG.getConstant(~XorC, VT));
26285 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
26286 DAG.getConstant(C->getAPIntValue()+1, VT));
26290 // Try to synthesize horizontal adds from adds of shuffles.
26291 EVT VT = N->getValueType(0);
26292 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26293 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26294 isHorizontalBinOp(Op0, Op1, true))
26295 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
26297 return OptimizeConditionalInDecrement(N, DAG);
26300 /// performVZEXTCombine - Performs build vector combines
26301 static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
26302 TargetLowering::DAGCombinerInfo &DCI,
26303 const X86Subtarget *Subtarget) {
26305 MVT VT = N->getSimpleValueType(0);
26306 SDValue Op = N->getOperand(0);
26307 MVT OpVT = Op.getSimpleValueType();
26308 MVT OpEltVT = OpVT.getVectorElementType();
26309 unsigned InputBits = OpEltVT.getSizeInBits() * VT.getVectorNumElements();
26311 // (vzext (bitcast (vzext (x)) -> (vzext x)
26313 while (V.getOpcode() == ISD::BITCAST)
26314 V = V.getOperand(0);
26316 if (V != Op && V.getOpcode() == X86ISD::VZEXT) {
26317 MVT InnerVT = V.getSimpleValueType();
26318 MVT InnerEltVT = InnerVT.getVectorElementType();
26320 // If the element sizes match exactly, we can just do one larger vzext. This
26321 // is always an exact type match as vzext operates on integer types.
26322 if (OpEltVT == InnerEltVT) {
26323 assert(OpVT == InnerVT && "Types must match for vzext!");
26324 return DAG.getNode(X86ISD::VZEXT, DL, VT, V.getOperand(0));
26327 // The only other way we can combine them is if only a single element of the
26328 // inner vzext is used in the input to the outer vzext.
26329 if (InnerEltVT.getSizeInBits() < InputBits)
26332 // In this case, the inner vzext is completely dead because we're going to
26333 // only look at bits inside of the low element. Just do the outer vzext on
26334 // a bitcast of the input to the inner.
26335 return DAG.getNode(X86ISD::VZEXT, DL, VT,
26336 DAG.getNode(ISD::BITCAST, DL, OpVT, V));
26339 // Check if we can bypass extracting and re-inserting an element of an input
26340 // vector. Essentialy:
26341 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
26342 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
26343 V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
26344 V.getOperand(0).getSimpleValueType().getSizeInBits() == InputBits) {
26345 SDValue ExtractedV = V.getOperand(0);
26346 SDValue OrigV = ExtractedV.getOperand(0);
26347 if (auto *ExtractIdx = dyn_cast<ConstantSDNode>(ExtractedV.getOperand(1)))
26348 if (ExtractIdx->getZExtValue() == 0) {
26349 MVT OrigVT = OrigV.getSimpleValueType();
26350 // Extract a subvector if necessary...
26351 if (OrigVT.getSizeInBits() > OpVT.getSizeInBits()) {
26352 int Ratio = OrigVT.getSizeInBits() / OpVT.getSizeInBits();
26353 OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(),
26354 OrigVT.getVectorNumElements() / Ratio);
26355 OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV,
26356 DAG.getIntPtrConstant(0));
26358 Op = DAG.getNode(ISD::BITCAST, DL, OpVT, OrigV);
26359 return DAG.getNode(X86ISD::VZEXT, DL, VT, Op);
26366 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
26367 DAGCombinerInfo &DCI) const {
26368 SelectionDAG &DAG = DCI.DAG;
26369 switch (N->getOpcode()) {
26371 case ISD::EXTRACT_VECTOR_ELT:
26372 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI);
26375 case X86ISD::SHRUNKBLEND:
26376 return PerformSELECTCombine(N, DAG, DCI, Subtarget);
26377 case ISD::BITCAST: return PerformBITCASTCombine(N, DAG);
26378 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget);
26379 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
26380 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
26381 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
26382 case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
26385 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget);
26386 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
26387 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
26388 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
26389 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
26390 case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget);
26391 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
26392 case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget);
26393 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, Subtarget);
26394 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
26395 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
26397 case X86ISD::FOR: return PerformFORCombine(N, DAG);
26399 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG);
26400 case X86ISD::FAND: return PerformFANDCombine(N, DAG);
26401 case X86ISD::FANDN: return PerformFANDNCombine(N, DAG);
26402 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
26403 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
26404 case ISD::ANY_EXTEND:
26405 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
26406 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
26407 case ISD::SIGN_EXTEND_INREG:
26408 return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
26409 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget);
26410 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG, Subtarget);
26411 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget);
26412 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
26413 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget);
26414 case X86ISD::SHUFP: // Handle all target specific shuffles
26415 case X86ISD::PALIGNR:
26416 case X86ISD::UNPCKH:
26417 case X86ISD::UNPCKL:
26418 case X86ISD::MOVHLPS:
26419 case X86ISD::MOVLHPS:
26420 case X86ISD::PSHUFB:
26421 case X86ISD::PSHUFD:
26422 case X86ISD::PSHUFHW:
26423 case X86ISD::PSHUFLW:
26424 case X86ISD::MOVSS:
26425 case X86ISD::MOVSD:
26426 case X86ISD::VPERMILPI:
26427 case X86ISD::VPERM2X128:
26428 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
26429 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
26430 case ISD::INTRINSIC_WO_CHAIN:
26431 return PerformINTRINSIC_WO_CHAINCombine(N, DAG, Subtarget);
26432 case X86ISD::INSERTPS: {
26433 if (getTargetMachine().getOptLevel() > CodeGenOpt::None)
26434 return PerformINSERTPSCombine(N, DAG, Subtarget);
26437 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DAG, Subtarget);
26443 /// isTypeDesirableForOp - Return true if the target has native support for
26444 /// the specified value type and it is 'desirable' to use the type for the
26445 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
26446 /// instruction encodings are longer and some i16 instructions are slow.
26447 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
26448 if (!isTypeLegal(VT))
26450 if (VT != MVT::i16)
26457 case ISD::SIGN_EXTEND:
26458 case ISD::ZERO_EXTEND:
26459 case ISD::ANY_EXTEND:
26472 /// IsDesirableToPromoteOp - This method query the target whether it is
26473 /// beneficial for dag combiner to promote the specified node. If true, it
26474 /// should return the desired promotion type by reference.
26475 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
26476 EVT VT = Op.getValueType();
26477 if (VT != MVT::i16)
26480 bool Promote = false;
26481 bool Commute = false;
26482 switch (Op.getOpcode()) {
26485 LoadSDNode *LD = cast<LoadSDNode>(Op);
26486 // If the non-extending load has a single use and it's not live out, then it
26487 // might be folded.
26488 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
26489 Op.hasOneUse()*/) {
26490 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
26491 UE = Op.getNode()->use_end(); UI != UE; ++UI) {
26492 // The only case where we'd want to promote LOAD (rather then it being
26493 // promoted as an operand is when it's only use is liveout.
26494 if (UI->getOpcode() != ISD::CopyToReg)
26501 case ISD::SIGN_EXTEND:
26502 case ISD::ZERO_EXTEND:
26503 case ISD::ANY_EXTEND:
26508 SDValue N0 = Op.getOperand(0);
26509 // Look out for (store (shl (load), x)).
26510 if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
26523 SDValue N0 = Op.getOperand(0);
26524 SDValue N1 = Op.getOperand(1);
26525 if (!Commute && MayFoldLoad(N1))
26527 // Avoid disabling potential load folding opportunities.
26528 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op)))
26530 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op)))
26540 //===----------------------------------------------------------------------===//
26541 // X86 Inline Assembly Support
26542 //===----------------------------------------------------------------------===//
26545 // Helper to match a string separated by whitespace.
26546 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) {
26547 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace.
26549 for (unsigned i = 0, e = args.size(); i != e; ++i) {
26550 StringRef piece(*args[i]);
26551 if (!s.startswith(piece)) // Check if the piece matches.
26554 s = s.substr(piece.size());
26555 StringRef::size_type pos = s.find_first_not_of(" \t");
26556 if (pos == 0) // We matched a prefix.
26564 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={};
26567 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
26569 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
26570 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
26571 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
26572 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
26574 if (AsmPieces.size() == 3)
26576 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
26583 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
26584 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
26586 std::string AsmStr = IA->getAsmString();
26588 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
26589 if (!Ty || Ty->getBitWidth() % 16 != 0)
26592 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
26593 SmallVector<StringRef, 4> AsmPieces;
26594 SplitString(AsmStr, AsmPieces, ";\n");
26596 switch (AsmPieces.size()) {
26597 default: return false;
26599 // FIXME: this should verify that we are targeting a 486 or better. If not,
26600 // we will turn this bswap into something that will be lowered to logical
26601 // ops instead of emitting the bswap asm. For now, we don't support 486 or
26602 // lower so don't worry about this.
26604 if (matchAsm(AsmPieces[0], "bswap", "$0") ||
26605 matchAsm(AsmPieces[0], "bswapl", "$0") ||
26606 matchAsm(AsmPieces[0], "bswapq", "$0") ||
26607 matchAsm(AsmPieces[0], "bswap", "${0:q}") ||
26608 matchAsm(AsmPieces[0], "bswapl", "${0:q}") ||
26609 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) {
26610 // No need to check constraints, nothing other than the equivalent of
26611 // "=r,0" would be valid here.
26612 return IntrinsicLowering::LowerToByteSwap(CI);
26615 // rorw $$8, ${0:w} --> llvm.bswap.i16
26616 if (CI->getType()->isIntegerTy(16) &&
26617 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26618 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") ||
26619 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) {
26621 const std::string &ConstraintsStr = IA->getConstraintString();
26622 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26623 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26624 if (clobbersFlagRegisters(AsmPieces))
26625 return IntrinsicLowering::LowerToByteSwap(CI);
26629 if (CI->getType()->isIntegerTy(32) &&
26630 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26631 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") &&
26632 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") &&
26633 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) {
26635 const std::string &ConstraintsStr = IA->getConstraintString();
26636 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26637 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26638 if (clobbersFlagRegisters(AsmPieces))
26639 return IntrinsicLowering::LowerToByteSwap(CI);
26642 if (CI->getType()->isIntegerTy(64)) {
26643 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
26644 if (Constraints.size() >= 2 &&
26645 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
26646 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
26647 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
26648 if (matchAsm(AsmPieces[0], "bswap", "%eax") &&
26649 matchAsm(AsmPieces[1], "bswap", "%edx") &&
26650 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx"))
26651 return IntrinsicLowering::LowerToByteSwap(CI);
26659 /// getConstraintType - Given a constraint letter, return the type of
26660 /// constraint it is for this target.
26661 X86TargetLowering::ConstraintType
26662 X86TargetLowering::getConstraintType(const std::string &Constraint) const {
26663 if (Constraint.size() == 1) {
26664 switch (Constraint[0]) {
26675 return C_RegisterClass;
26699 return TargetLowering::getConstraintType(Constraint);
26702 /// Examine constraint type and operand type and determine a weight value.
26703 /// This object must already have been set up with the operand type
26704 /// and the current alternative constraint selected.
26705 TargetLowering::ConstraintWeight
26706 X86TargetLowering::getSingleConstraintMatchWeight(
26707 AsmOperandInfo &info, const char *constraint) const {
26708 ConstraintWeight weight = CW_Invalid;
26709 Value *CallOperandVal = info.CallOperandVal;
26710 // If we don't have a value, we can't do a match,
26711 // but allow it at the lowest weight.
26712 if (!CallOperandVal)
26714 Type *type = CallOperandVal->getType();
26715 // Look at the constraint type.
26716 switch (*constraint) {
26718 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
26729 if (CallOperandVal->getType()->isIntegerTy())
26730 weight = CW_SpecificReg;
26735 if (type->isFloatingPointTy())
26736 weight = CW_SpecificReg;
26739 if (type->isX86_MMXTy() && Subtarget->hasMMX())
26740 weight = CW_SpecificReg;
26744 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) ||
26745 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256()))
26746 weight = CW_Register;
26749 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
26750 if (C->getZExtValue() <= 31)
26751 weight = CW_Constant;
26755 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26756 if (C->getZExtValue() <= 63)
26757 weight = CW_Constant;
26761 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26762 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
26763 weight = CW_Constant;
26767 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26768 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
26769 weight = CW_Constant;
26773 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26774 if (C->getZExtValue() <= 3)
26775 weight = CW_Constant;
26779 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26780 if (C->getZExtValue() <= 0xff)
26781 weight = CW_Constant;
26786 if (dyn_cast<ConstantFP>(CallOperandVal)) {
26787 weight = CW_Constant;
26791 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26792 if ((C->getSExtValue() >= -0x80000000LL) &&
26793 (C->getSExtValue() <= 0x7fffffffLL))
26794 weight = CW_Constant;
26798 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26799 if (C->getZExtValue() <= 0xffffffff)
26800 weight = CW_Constant;
26807 /// LowerXConstraint - try to replace an X constraint, which matches anything,
26808 /// with another that has more specific requirements based on the type of the
26809 /// corresponding operand.
26810 const char *X86TargetLowering::
26811 LowerXConstraint(EVT ConstraintVT) const {
26812 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
26813 // 'f' like normal targets.
26814 if (ConstraintVT.isFloatingPoint()) {
26815 if (Subtarget->hasSSE2())
26817 if (Subtarget->hasSSE1())
26821 return TargetLowering::LowerXConstraint(ConstraintVT);
26824 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
26825 /// vector. If it is invalid, don't add anything to Ops.
26826 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
26827 std::string &Constraint,
26828 std::vector<SDValue>&Ops,
26829 SelectionDAG &DAG) const {
26832 // Only support length 1 constraints for now.
26833 if (Constraint.length() > 1) return;
26835 char ConstraintLetter = Constraint[0];
26836 switch (ConstraintLetter) {
26839 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26840 if (C->getZExtValue() <= 31) {
26841 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26847 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26848 if (C->getZExtValue() <= 63) {
26849 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26855 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26856 if (isInt<8>(C->getSExtValue())) {
26857 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26863 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26864 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
26865 (Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) {
26866 Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType());
26872 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26873 if (C->getZExtValue() <= 3) {
26874 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26880 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26881 if (C->getZExtValue() <= 255) {
26882 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26888 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26889 if (C->getZExtValue() <= 127) {
26890 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26896 // 32-bit signed value
26897 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26898 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26899 C->getSExtValue())) {
26900 // Widen to 64 bits here to get it sign extended.
26901 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
26904 // FIXME gcc accepts some relocatable values here too, but only in certain
26905 // memory models; it's complicated.
26910 // 32-bit unsigned value
26911 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26912 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26913 C->getZExtValue())) {
26914 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26918 // FIXME gcc accepts some relocatable values here too, but only in certain
26919 // memory models; it's complicated.
26923 // Literal immediates are always ok.
26924 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
26925 // Widen to 64 bits here to get it sign extended.
26926 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64);
26930 // In any sort of PIC mode addresses need to be computed at runtime by
26931 // adding in a register or some sort of table lookup. These can't
26932 // be used as immediates.
26933 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
26936 // If we are in non-pic codegen mode, we allow the address of a global (with
26937 // an optional displacement) to be used with 'i'.
26938 GlobalAddressSDNode *GA = nullptr;
26939 int64_t Offset = 0;
26941 // Match either (GA), (GA+C), (GA+C1+C2), etc.
26943 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
26944 Offset += GA->getOffset();
26946 } else if (Op.getOpcode() == ISD::ADD) {
26947 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26948 Offset += C->getZExtValue();
26949 Op = Op.getOperand(0);
26952 } else if (Op.getOpcode() == ISD::SUB) {
26953 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26954 Offset += -C->getZExtValue();
26955 Op = Op.getOperand(0);
26960 // Otherwise, this isn't something we can handle, reject it.
26964 const GlobalValue *GV = GA->getGlobal();
26965 // If we require an extra load to get this address, as in PIC mode, we
26966 // can't accept it.
26967 if (isGlobalStubReference(
26968 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget())))
26971 Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
26972 GA->getValueType(0), Offset);
26977 if (Result.getNode()) {
26978 Ops.push_back(Result);
26981 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
26984 std::pair<unsigned, const TargetRegisterClass*>
26985 X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
26987 // First, see if this is a constraint that directly corresponds to an LLVM
26989 if (Constraint.size() == 1) {
26990 // GCC Constraint Letters
26991 switch (Constraint[0]) {
26993 // TODO: Slight differences here in allocation order and leaving
26994 // RIP in the class. Do they matter any more here than they do
26995 // in the normal allocation?
26996 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
26997 if (Subtarget->is64Bit()) {
26998 if (VT == MVT::i32 || VT == MVT::f32)
26999 return std::make_pair(0U, &X86::GR32RegClass);
27000 if (VT == MVT::i16)
27001 return std::make_pair(0U, &X86::GR16RegClass);
27002 if (VT == MVT::i8 || VT == MVT::i1)
27003 return std::make_pair(0U, &X86::GR8RegClass);
27004 if (VT == MVT::i64 || VT == MVT::f64)
27005 return std::make_pair(0U, &X86::GR64RegClass);
27008 // 32-bit fallthrough
27009 case 'Q': // Q_REGS
27010 if (VT == MVT::i32 || VT == MVT::f32)
27011 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
27012 if (VT == MVT::i16)
27013 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
27014 if (VT == MVT::i8 || VT == MVT::i1)
27015 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
27016 if (VT == MVT::i64)
27017 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
27019 case 'r': // GENERAL_REGS
27020 case 'l': // INDEX_REGS
27021 if (VT == MVT::i8 || VT == MVT::i1)
27022 return std::make_pair(0U, &X86::GR8RegClass);
27023 if (VT == MVT::i16)
27024 return std::make_pair(0U, &X86::GR16RegClass);
27025 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit())
27026 return std::make_pair(0U, &X86::GR32RegClass);
27027 return std::make_pair(0U, &X86::GR64RegClass);
27028 case 'R': // LEGACY_REGS
27029 if (VT == MVT::i8 || VT == MVT::i1)
27030 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
27031 if (VT == MVT::i16)
27032 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
27033 if (VT == MVT::i32 || !Subtarget->is64Bit())
27034 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
27035 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
27036 case 'f': // FP Stack registers.
27037 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
27038 // value to the correct fpstack register class.
27039 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
27040 return std::make_pair(0U, &X86::RFP32RegClass);
27041 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
27042 return std::make_pair(0U, &X86::RFP64RegClass);
27043 return std::make_pair(0U, &X86::RFP80RegClass);
27044 case 'y': // MMX_REGS if MMX allowed.
27045 if (!Subtarget->hasMMX()) break;
27046 return std::make_pair(0U, &X86::VR64RegClass);
27047 case 'Y': // SSE_REGS if SSE2 allowed
27048 if (!Subtarget->hasSSE2()) break;
27050 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
27051 if (!Subtarget->hasSSE1()) break;
27053 switch (VT.SimpleTy) {
27055 // Scalar SSE types.
27058 return std::make_pair(0U, &X86::FR32RegClass);
27061 return std::make_pair(0U, &X86::FR64RegClass);
27069 return std::make_pair(0U, &X86::VR128RegClass);
27077 return std::make_pair(0U, &X86::VR256RegClass);
27082 return std::make_pair(0U, &X86::VR512RegClass);
27088 // Use the default implementation in TargetLowering to convert the register
27089 // constraint into a member of a register class.
27090 std::pair<unsigned, const TargetRegisterClass*> Res;
27091 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
27093 // Not found as a standard register?
27095 // Map st(0) -> st(7) -> ST0
27096 if (Constraint.size() == 7 && Constraint[0] == '{' &&
27097 tolower(Constraint[1]) == 's' &&
27098 tolower(Constraint[2]) == 't' &&
27099 Constraint[3] == '(' &&
27100 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
27101 Constraint[5] == ')' &&
27102 Constraint[6] == '}') {
27104 Res.first = X86::FP0+Constraint[4]-'0';
27105 Res.second = &X86::RFP80RegClass;
27109 // GCC allows "st(0)" to be called just plain "st".
27110 if (StringRef("{st}").equals_lower(Constraint)) {
27111 Res.first = X86::FP0;
27112 Res.second = &X86::RFP80RegClass;
27117 if (StringRef("{flags}").equals_lower(Constraint)) {
27118 Res.first = X86::EFLAGS;
27119 Res.second = &X86::CCRRegClass;
27123 // 'A' means EAX + EDX.
27124 if (Constraint == "A") {
27125 Res.first = X86::EAX;
27126 Res.second = &X86::GR32_ADRegClass;
27132 // Otherwise, check to see if this is a register class of the wrong value
27133 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
27134 // turn into {ax},{dx}.
27135 if (Res.second->hasType(VT))
27136 return Res; // Correct type already, nothing to do.
27138 // All of the single-register GCC register classes map their values onto
27139 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
27140 // really want an 8-bit or 32-bit register, map to the appropriate register
27141 // class and return the appropriate register.
27142 if (Res.second == &X86::GR16RegClass) {
27143 if (VT == MVT::i8 || VT == MVT::i1) {
27144 unsigned DestReg = 0;
27145 switch (Res.first) {
27147 case X86::AX: DestReg = X86::AL; break;
27148 case X86::DX: DestReg = X86::DL; break;
27149 case X86::CX: DestReg = X86::CL; break;
27150 case X86::BX: DestReg = X86::BL; break;
27153 Res.first = DestReg;
27154 Res.second = &X86::GR8RegClass;
27156 } else if (VT == MVT::i32 || VT == MVT::f32) {
27157 unsigned DestReg = 0;
27158 switch (Res.first) {
27160 case X86::AX: DestReg = X86::EAX; break;
27161 case X86::DX: DestReg = X86::EDX; break;
27162 case X86::CX: DestReg = X86::ECX; break;
27163 case X86::BX: DestReg = X86::EBX; break;
27164 case X86::SI: DestReg = X86::ESI; break;
27165 case X86::DI: DestReg = X86::EDI; break;
27166 case X86::BP: DestReg = X86::EBP; break;
27167 case X86::SP: DestReg = X86::ESP; break;
27170 Res.first = DestReg;
27171 Res.second = &X86::GR32RegClass;
27173 } else if (VT == MVT::i64 || VT == MVT::f64) {
27174 unsigned DestReg = 0;
27175 switch (Res.first) {
27177 case X86::AX: DestReg = X86::RAX; break;
27178 case X86::DX: DestReg = X86::RDX; break;
27179 case X86::CX: DestReg = X86::RCX; break;
27180 case X86::BX: DestReg = X86::RBX; break;
27181 case X86::SI: DestReg = X86::RSI; break;
27182 case X86::DI: DestReg = X86::RDI; break;
27183 case X86::BP: DestReg = X86::RBP; break;
27184 case X86::SP: DestReg = X86::RSP; break;
27187 Res.first = DestReg;
27188 Res.second = &X86::GR64RegClass;
27191 } else if (Res.second == &X86::FR32RegClass ||
27192 Res.second == &X86::FR64RegClass ||
27193 Res.second == &X86::VR128RegClass ||
27194 Res.second == &X86::VR256RegClass ||
27195 Res.second == &X86::FR32XRegClass ||
27196 Res.second == &X86::FR64XRegClass ||
27197 Res.second == &X86::VR128XRegClass ||
27198 Res.second == &X86::VR256XRegClass ||
27199 Res.second == &X86::VR512RegClass) {
27200 // Handle references to XMM physical registers that got mapped into the
27201 // wrong class. This can happen with constraints like {xmm0} where the
27202 // target independent register mapper will just pick the first match it can
27203 // find, ignoring the required type.
27205 if (VT == MVT::f32 || VT == MVT::i32)
27206 Res.second = &X86::FR32RegClass;
27207 else if (VT == MVT::f64 || VT == MVT::i64)
27208 Res.second = &X86::FR64RegClass;
27209 else if (X86::VR128RegClass.hasType(VT))
27210 Res.second = &X86::VR128RegClass;
27211 else if (X86::VR256RegClass.hasType(VT))
27212 Res.second = &X86::VR256RegClass;
27213 else if (X86::VR512RegClass.hasType(VT))
27214 Res.second = &X86::VR512RegClass;
27220 int X86TargetLowering::getScalingFactorCost(const AddrMode &AM,
27222 // Scaling factors are not free at all.
27223 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
27224 // will take 2 allocations in the out of order engine instead of 1
27225 // for plain addressing mode, i.e. inst (reg1).
27227 // vaddps (%rsi,%drx), %ymm0, %ymm1
27228 // Requires two allocations (one for the load, one for the computation)
27230 // vaddps (%rsi), %ymm0, %ymm1
27231 // Requires just 1 allocation, i.e., freeing allocations for other operations
27232 // and having less micro operations to execute.
27234 // For some X86 architectures, this is even worse because for instance for
27235 // stores, the complex addressing mode forces the instruction to use the
27236 // "load" ports instead of the dedicated "store" port.
27237 // E.g., on Haswell:
27238 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
27239 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
27240 if (isLegalAddressingMode(AM, Ty))
27241 // Scale represents reg2 * scale, thus account for 1
27242 // as soon as we use a second register.
27243 return AM.Scale != 0;
27247 bool X86TargetLowering::isTargetFTOL() const {
27248 return Subtarget->isTargetKnownWindowsMSVC() && !Subtarget->is64Bit();