1 //===-- SelectionDAGBuilder.cpp - Selection-DAG building ------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements routines for translating from LLVM IR into SelectionDAG IR.
12 //===----------------------------------------------------------------------===//
14 #include "SelectionDAGBuilder.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/Optional.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/BranchProbabilityInfo.h"
22 #include "llvm/Analysis/ConstantFolding.h"
23 #include "llvm/Analysis/TargetLibraryInfo.h"
24 #include "llvm/Analysis/ValueTracking.h"
25 #include "llvm/CodeGen/FastISel.h"
26 #include "llvm/CodeGen/FunctionLoweringInfo.h"
27 #include "llvm/CodeGen/GCMetadata.h"
28 #include "llvm/CodeGen/GCStrategy.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineJumpTableInfo.h"
33 #include "llvm/CodeGen/MachineModuleInfo.h"
34 #include "llvm/CodeGen/MachineRegisterInfo.h"
35 #include "llvm/CodeGen/SelectionDAG.h"
36 #include "llvm/CodeGen/StackMaps.h"
37 #include "llvm/CodeGen/WinEHFuncInfo.h"
38 #include "llvm/IR/CallingConv.h"
39 #include "llvm/IR/Constants.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DebugInfo.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/Function.h"
44 #include "llvm/IR/GlobalVariable.h"
45 #include "llvm/IR/InlineAsm.h"
46 #include "llvm/IR/Instructions.h"
47 #include "llvm/IR/IntrinsicInst.h"
48 #include "llvm/IR/Intrinsics.h"
49 #include "llvm/IR/LLVMContext.h"
50 #include "llvm/IR/Module.h"
51 #include "llvm/IR/Statepoint.h"
52 #include "llvm/MC/MCSymbol.h"
53 #include "llvm/Support/CommandLine.h"
54 #include "llvm/Support/Debug.h"
55 #include "llvm/Support/ErrorHandling.h"
56 #include "llvm/Support/MathExtras.h"
57 #include "llvm/Support/raw_ostream.h"
58 #include "llvm/Target/TargetFrameLowering.h"
59 #include "llvm/Target/TargetInstrInfo.h"
60 #include "llvm/Target/TargetIntrinsicInfo.h"
61 #include "llvm/Target/TargetLowering.h"
62 #include "llvm/Target/TargetOptions.h"
63 #include "llvm/Target/TargetSelectionDAGInfo.h"
64 #include "llvm/Target/TargetSubtargetInfo.h"
68 #define DEBUG_TYPE "isel"
70 /// LimitFloatPrecision - Generate low-precision inline sequences for
71 /// some float libcalls (6, 8 or 12 bits).
72 static unsigned LimitFloatPrecision;
74 static cl::opt<unsigned, true>
75 LimitFPPrecision("limit-float-precision",
76 cl::desc("Generate low-precision inline sequences "
77 "for some float libcalls"),
78 cl::location(LimitFloatPrecision),
82 EnableFMFInDAG("enable-fmf-dag", cl::init(false), cl::Hidden,
83 cl::desc("Enable fast-math-flags for DAG nodes"));
85 // Limit the width of DAG chains. This is important in general to prevent
86 // DAG-based analysis from blowing up. For example, alias analysis and
87 // load clustering may not complete in reasonable time. It is difficult to
88 // recognize and avoid this situation within each individual analysis, and
89 // future analyses are likely to have the same behavior. Limiting DAG width is
90 // the safe approach and will be especially important with global DAGs.
92 // MaxParallelChains default is arbitrarily high to avoid affecting
93 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
94 // sequence over this should have been converted to llvm.memcpy by the
95 // frontend. It easy to induce this behavior with .ll code such as:
96 // %buffer = alloca [4096 x i8]
97 // %data = load [4096 x i8]* %argPtr
98 // store [4096 x i8] %data, [4096 x i8]* %buffer
99 static const unsigned MaxParallelChains = 64;
101 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL,
102 const SDValue *Parts, unsigned NumParts,
103 MVT PartVT, EVT ValueVT, const Value *V);
105 /// getCopyFromParts - Create a value that contains the specified legal parts
106 /// combined into the value they represent. If the parts combine to a type
107 /// larger then ValueVT then AssertOp can be used to specify whether the extra
108 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
109 /// (ISD::AssertSext).
110 static SDValue getCopyFromParts(SelectionDAG &DAG, SDLoc DL,
111 const SDValue *Parts,
112 unsigned NumParts, MVT PartVT, EVT ValueVT,
114 ISD::NodeType AssertOp = ISD::DELETED_NODE) {
115 if (ValueVT.isVector())
116 return getCopyFromPartsVector(DAG, DL, Parts, NumParts,
119 assert(NumParts > 0 && "No parts to assemble!");
120 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
121 SDValue Val = Parts[0];
124 // Assemble the value from multiple parts.
125 if (ValueVT.isInteger()) {
126 unsigned PartBits = PartVT.getSizeInBits();
127 unsigned ValueBits = ValueVT.getSizeInBits();
129 // Assemble the power of 2 part.
130 unsigned RoundParts = NumParts & (NumParts - 1) ?
131 1 << Log2_32(NumParts) : NumParts;
132 unsigned RoundBits = PartBits * RoundParts;
133 EVT RoundVT = RoundBits == ValueBits ?
134 ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
137 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
139 if (RoundParts > 2) {
140 Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
142 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
143 RoundParts / 2, PartVT, HalfVT, V);
145 Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
146 Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
149 if (DAG.getDataLayout().isBigEndian())
152 Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
154 if (RoundParts < NumParts) {
155 // Assemble the trailing non-power-of-2 part.
156 unsigned OddParts = NumParts - RoundParts;
157 EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
158 Hi = getCopyFromParts(DAG, DL,
159 Parts + RoundParts, OddParts, PartVT, OddVT, V);
161 // Combine the round and odd parts.
163 if (DAG.getDataLayout().isBigEndian())
165 EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
166 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
167 Hi = DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
168 DAG.getConstant(Lo.getValueType().getSizeInBits(), DL,
169 TLI.getPointerTy()));
170 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
171 Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
173 } else if (PartVT.isFloatingPoint()) {
174 // FP split into multiple FP parts (for ppcf128)
175 assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
178 Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
179 Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
180 if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
182 Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
184 // FP split into integer parts (soft fp)
185 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
186 !PartVT.isVector() && "Unexpected split");
187 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
188 Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V);
192 // There is now one part, held in Val. Correct it to match ValueVT.
193 EVT PartEVT = Val.getValueType();
195 if (PartEVT == ValueVT)
198 if (PartEVT.isInteger() && ValueVT.isInteger()) {
199 if (ValueVT.bitsLT(PartEVT)) {
200 // For a truncate, see if we have any information to
201 // indicate whether the truncated bits will always be
202 // zero or sign-extension.
203 if (AssertOp != ISD::DELETED_NODE)
204 Val = DAG.getNode(AssertOp, DL, PartEVT, Val,
205 DAG.getValueType(ValueVT));
206 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
208 return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
211 if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
212 // FP_ROUND's are always exact here.
213 if (ValueVT.bitsLT(Val.getValueType()))
214 return DAG.getNode(ISD::FP_ROUND, DL, ValueVT, Val,
215 DAG.getTargetConstant(1, DL, TLI.getPointerTy()));
217 return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
220 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
221 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
223 llvm_unreachable("Unknown mismatch!");
226 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
227 const Twine &ErrMsg) {
228 const Instruction *I = dyn_cast_or_null<Instruction>(V);
230 return Ctx.emitError(ErrMsg);
232 const char *AsmError = ", possible invalid constraint for vector type";
233 if (const CallInst *CI = dyn_cast<CallInst>(I))
234 if (isa<InlineAsm>(CI->getCalledValue()))
235 return Ctx.emitError(I, ErrMsg + AsmError);
237 return Ctx.emitError(I, ErrMsg);
240 /// getCopyFromPartsVector - Create a value that contains the specified legal
241 /// parts combined into the value they represent. If the parts combine to a
242 /// type larger then ValueVT then AssertOp can be used to specify whether the
243 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
244 /// ValueVT (ISD::AssertSext).
245 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL,
246 const SDValue *Parts, unsigned NumParts,
247 MVT PartVT, EVT ValueVT, const Value *V) {
248 assert(ValueVT.isVector() && "Not a vector value");
249 assert(NumParts > 0 && "No parts to assemble!");
250 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
251 SDValue Val = Parts[0];
253 // Handle a multi-element vector.
257 unsigned NumIntermediates;
259 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
260 NumIntermediates, RegisterVT);
261 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
262 NumParts = NumRegs; // Silence a compiler warning.
263 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
264 assert(RegisterVT.getSizeInBits() ==
265 Parts[0].getSimpleValueType().getSizeInBits() &&
266 "Part type sizes don't match!");
268 // Assemble the parts into intermediate operands.
269 SmallVector<SDValue, 8> Ops(NumIntermediates);
270 if (NumIntermediates == NumParts) {
271 // If the register was not expanded, truncate or copy the value,
273 for (unsigned i = 0; i != NumParts; ++i)
274 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
275 PartVT, IntermediateVT, V);
276 } else if (NumParts > 0) {
277 // If the intermediate type was expanded, build the intermediate
278 // operands from the parts.
279 assert(NumParts % NumIntermediates == 0 &&
280 "Must expand into a divisible number of parts!");
281 unsigned Factor = NumParts / NumIntermediates;
282 for (unsigned i = 0; i != NumIntermediates; ++i)
283 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
284 PartVT, IntermediateVT, V);
287 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
288 // intermediate operands.
289 Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
294 // There is now one part, held in Val. Correct it to match ValueVT.
295 EVT PartEVT = Val.getValueType();
297 if (PartEVT == ValueVT)
300 if (PartEVT.isVector()) {
301 // If the element type of the source/dest vectors are the same, but the
302 // parts vector has more elements than the value vector, then we have a
303 // vector widening case (e.g. <2 x float> -> <4 x float>). Extract the
305 if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) {
306 assert(PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() &&
307 "Cannot narrow, it would be a lossy transformation");
308 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
309 DAG.getConstant(0, DL, TLI.getVectorIdxTy()));
312 // Vector/Vector bitcast.
313 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
314 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
316 assert(PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() &&
317 "Cannot handle this kind of promotion");
318 // Promoted vector extract
319 bool Smaller = ValueVT.bitsLE(PartEVT);
320 return DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
325 // Trivial bitcast if the types are the same size and the destination
326 // vector type is legal.
327 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
328 TLI.isTypeLegal(ValueVT))
329 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
331 // Handle cases such as i8 -> <1 x i1>
332 if (ValueVT.getVectorNumElements() != 1) {
333 diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
334 "non-trivial scalar-to-vector conversion");
335 return DAG.getUNDEF(ValueVT);
338 if (ValueVT.getVectorNumElements() == 1 &&
339 ValueVT.getVectorElementType() != PartEVT) {
340 bool Smaller = ValueVT.bitsLE(PartEVT);
341 Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
342 DL, ValueVT.getScalarType(), Val);
345 return DAG.getNode(ISD::BUILD_VECTOR, DL, ValueVT, Val);
348 static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc dl,
349 SDValue Val, SDValue *Parts, unsigned NumParts,
350 MVT PartVT, const Value *V);
352 /// getCopyToParts - Create a series of nodes that contain the specified value
353 /// split into legal parts. If the parts contain more bits than Val, then, for
354 /// integers, ExtendKind can be used to specify how to generate the extra bits.
355 static void getCopyToParts(SelectionDAG &DAG, SDLoc DL,
356 SDValue Val, SDValue *Parts, unsigned NumParts,
357 MVT PartVT, const Value *V,
358 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
359 EVT ValueVT = Val.getValueType();
361 // Handle the vector case separately.
362 if (ValueVT.isVector())
363 return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V);
365 unsigned PartBits = PartVT.getSizeInBits();
366 unsigned OrigNumParts = NumParts;
367 assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
368 "Copying to an illegal type!");
373 assert(!ValueVT.isVector() && "Vector case handled elsewhere");
374 EVT PartEVT = PartVT;
375 if (PartEVT == ValueVT) {
376 assert(NumParts == 1 && "No-op copy with multiple parts!");
381 if (NumParts * PartBits > ValueVT.getSizeInBits()) {
382 // If the parts cover more bits than the value has, promote the value.
383 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
384 assert(NumParts == 1 && "Do not know what to promote to!");
385 Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
387 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
388 ValueVT.isInteger() &&
389 "Unknown mismatch!");
390 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
391 Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
392 if (PartVT == MVT::x86mmx)
393 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
395 } else if (PartBits == ValueVT.getSizeInBits()) {
396 // Different types of the same size.
397 assert(NumParts == 1 && PartEVT != ValueVT);
398 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
399 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
400 // If the parts cover less bits than value has, truncate the value.
401 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
402 ValueVT.isInteger() &&
403 "Unknown mismatch!");
404 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
405 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
406 if (PartVT == MVT::x86mmx)
407 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
410 // The value may have changed - recompute ValueVT.
411 ValueVT = Val.getValueType();
412 assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
413 "Failed to tile the value with PartVT!");
416 if (PartEVT != ValueVT)
417 diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
418 "scalar-to-vector conversion failed");
424 // Expand the value into multiple parts.
425 if (NumParts & (NumParts - 1)) {
426 // The number of parts is not a power of 2. Split off and copy the tail.
427 assert(PartVT.isInteger() && ValueVT.isInteger() &&
428 "Do not know what to expand to!");
429 unsigned RoundParts = 1 << Log2_32(NumParts);
430 unsigned RoundBits = RoundParts * PartBits;
431 unsigned OddParts = NumParts - RoundParts;
432 SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
433 DAG.getIntPtrConstant(RoundBits, DL));
434 getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V);
436 if (DAG.getDataLayout().isBigEndian())
437 // The odd parts were reversed by getCopyToParts - unreverse them.
438 std::reverse(Parts + RoundParts, Parts + NumParts);
440 NumParts = RoundParts;
441 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
442 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
445 // The number of parts is a power of 2. Repeatedly bisect the value using
447 Parts[0] = DAG.getNode(ISD::BITCAST, DL,
448 EVT::getIntegerVT(*DAG.getContext(),
449 ValueVT.getSizeInBits()),
452 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
453 for (unsigned i = 0; i < NumParts; i += StepSize) {
454 unsigned ThisBits = StepSize * PartBits / 2;
455 EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
456 SDValue &Part0 = Parts[i];
457 SDValue &Part1 = Parts[i+StepSize/2];
459 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
460 ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
461 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
462 ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
464 if (ThisBits == PartBits && ThisVT != PartVT) {
465 Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
466 Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
471 if (DAG.getDataLayout().isBigEndian())
472 std::reverse(Parts, Parts + OrigNumParts);
476 /// getCopyToPartsVector - Create a series of nodes that contain the specified
477 /// value split into legal parts.
478 static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc DL,
479 SDValue Val, SDValue *Parts, unsigned NumParts,
480 MVT PartVT, const Value *V) {
481 EVT ValueVT = Val.getValueType();
482 assert(ValueVT.isVector() && "Not a vector");
483 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
486 EVT PartEVT = PartVT;
487 if (PartEVT == ValueVT) {
489 } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
490 // Bitconvert vector->vector case.
491 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
492 } else if (PartVT.isVector() &&
493 PartEVT.getVectorElementType() == ValueVT.getVectorElementType() &&
494 PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements()) {
495 EVT ElementVT = PartVT.getVectorElementType();
496 // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in
498 SmallVector<SDValue, 16> Ops;
499 for (unsigned i = 0, e = ValueVT.getVectorNumElements(); i != e; ++i)
500 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
501 ElementVT, Val, DAG.getConstant(i, DL,
502 TLI.getVectorIdxTy())));
504 for (unsigned i = ValueVT.getVectorNumElements(),
505 e = PartVT.getVectorNumElements(); i != e; ++i)
506 Ops.push_back(DAG.getUNDEF(ElementVT));
508 Val = DAG.getNode(ISD::BUILD_VECTOR, DL, PartVT, Ops);
510 // FIXME: Use CONCAT for 2x -> 4x.
512 //SDValue UndefElts = DAG.getUNDEF(VectorTy);
513 //Val = DAG.getNode(ISD::CONCAT_VECTORS, DL, PartVT, Val, UndefElts);
514 } else if (PartVT.isVector() &&
515 PartEVT.getVectorElementType().bitsGE(
516 ValueVT.getVectorElementType()) &&
517 PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements()) {
519 // Promoted vector extract
520 bool Smaller = PartEVT.bitsLE(ValueVT);
521 Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
524 // Vector -> scalar conversion.
525 assert(ValueVT.getVectorNumElements() == 1 &&
526 "Only trivial vector-to-scalar conversions should get here!");
527 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
529 DAG.getConstant(0, DL, TLI.getVectorIdxTy()));
531 bool Smaller = ValueVT.bitsLE(PartVT);
532 Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
540 // Handle a multi-element vector.
543 unsigned NumIntermediates;
544 unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
546 NumIntermediates, RegisterVT);
547 unsigned NumElements = ValueVT.getVectorNumElements();
549 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
550 NumParts = NumRegs; // Silence a compiler warning.
551 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
553 // Split the vector into intermediate operands.
554 SmallVector<SDValue, 8> Ops(NumIntermediates);
555 for (unsigned i = 0; i != NumIntermediates; ++i) {
556 if (IntermediateVT.isVector())
557 Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL,
559 DAG.getConstant(i * (NumElements / NumIntermediates), DL,
560 TLI.getVectorIdxTy()));
562 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
564 DAG.getConstant(i, DL, TLI.getVectorIdxTy()));
567 // Split the intermediate operands into legal parts.
568 if (NumParts == NumIntermediates) {
569 // If the register was not expanded, promote or copy the value,
571 for (unsigned i = 0; i != NumParts; ++i)
572 getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V);
573 } else if (NumParts > 0) {
574 // If the intermediate type was expanded, split each the value into
576 assert(NumIntermediates != 0 && "division by zero");
577 assert(NumParts % NumIntermediates == 0 &&
578 "Must expand into a divisible number of parts!");
579 unsigned Factor = NumParts / NumIntermediates;
580 for (unsigned i = 0; i != NumIntermediates; ++i)
581 getCopyToParts(DAG, DL, Ops[i], &Parts[i*Factor], Factor, PartVT, V);
585 RegsForValue::RegsForValue() {}
587 RegsForValue::RegsForValue(const SmallVector<unsigned, 4> ®s, MVT regvt,
589 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
591 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
592 const DataLayout &DL, unsigned Reg, Type *Ty) {
593 ComputeValueVTs(TLI, DL, Ty, ValueVTs);
595 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
596 EVT ValueVT = ValueVTs[Value];
597 unsigned NumRegs = TLI.getNumRegisters(Context, ValueVT);
598 MVT RegisterVT = TLI.getRegisterType(Context, ValueVT);
599 for (unsigned i = 0; i != NumRegs; ++i)
600 Regs.push_back(Reg + i);
601 RegVTs.push_back(RegisterVT);
606 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
607 /// this value and returns the result as a ValueVT value. This uses
608 /// Chain/Flag as the input and updates them for the output Chain/Flag.
609 /// If the Flag pointer is NULL, no flag is used.
610 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
611 FunctionLoweringInfo &FuncInfo,
613 SDValue &Chain, SDValue *Flag,
614 const Value *V) const {
615 // A Value with type {} or [0 x %t] needs no registers.
616 if (ValueVTs.empty())
619 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
621 // Assemble the legal parts into the final values.
622 SmallVector<SDValue, 4> Values(ValueVTs.size());
623 SmallVector<SDValue, 8> Parts;
624 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
625 // Copy the legal parts from the registers.
626 EVT ValueVT = ValueVTs[Value];
627 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
628 MVT RegisterVT = RegVTs[Value];
630 Parts.resize(NumRegs);
631 for (unsigned i = 0; i != NumRegs; ++i) {
634 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
636 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
637 *Flag = P.getValue(2);
640 Chain = P.getValue(1);
643 // If the source register was virtual and if we know something about it,
644 // add an assert node.
645 if (!TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) ||
646 !RegisterVT.isInteger() || RegisterVT.isVector())
649 const FunctionLoweringInfo::LiveOutInfo *LOI =
650 FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
654 unsigned RegSize = RegisterVT.getSizeInBits();
655 unsigned NumSignBits = LOI->NumSignBits;
656 unsigned NumZeroBits = LOI->KnownZero.countLeadingOnes();
658 if (NumZeroBits == RegSize) {
659 // The current value is a zero.
660 // Explicitly express that as it would be easier for
661 // optimizations to kick in.
662 Parts[i] = DAG.getConstant(0, dl, RegisterVT);
666 // FIXME: We capture more information than the dag can represent. For
667 // now, just use the tightest assertzext/assertsext possible.
669 EVT FromVT(MVT::Other);
670 if (NumSignBits == RegSize)
671 isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1
672 else if (NumZeroBits >= RegSize-1)
673 isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1
674 else if (NumSignBits > RegSize-8)
675 isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8
676 else if (NumZeroBits >= RegSize-8)
677 isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8
678 else if (NumSignBits > RegSize-16)
679 isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16
680 else if (NumZeroBits >= RegSize-16)
681 isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
682 else if (NumSignBits > RegSize-32)
683 isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32
684 else if (NumZeroBits >= RegSize-32)
685 isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
689 // Add an assertion node.
690 assert(FromVT != MVT::Other);
691 Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
692 RegisterVT, P, DAG.getValueType(FromVT));
695 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
696 NumRegs, RegisterVT, ValueVT, V);
701 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
704 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
705 /// specified value into the registers specified by this object. This uses
706 /// Chain/Flag as the input and updates them for the output Chain/Flag.
707 /// If the Flag pointer is NULL, no flag is used.
708 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, SDLoc dl,
709 SDValue &Chain, SDValue *Flag, const Value *V,
710 ISD::NodeType PreferredExtendType) const {
711 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
712 ISD::NodeType ExtendKind = PreferredExtendType;
714 // Get the list of the values's legal parts.
715 unsigned NumRegs = Regs.size();
716 SmallVector<SDValue, 8> Parts(NumRegs);
717 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
718 EVT ValueVT = ValueVTs[Value];
719 unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
720 MVT RegisterVT = RegVTs[Value];
722 if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
723 ExtendKind = ISD::ZERO_EXTEND;
725 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
726 &Parts[Part], NumParts, RegisterVT, V, ExtendKind);
730 // Copy the parts into the registers.
731 SmallVector<SDValue, 8> Chains(NumRegs);
732 for (unsigned i = 0; i != NumRegs; ++i) {
735 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
737 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
738 *Flag = Part.getValue(1);
741 Chains[i] = Part.getValue(0);
744 if (NumRegs == 1 || Flag)
745 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
746 // flagged to it. That is the CopyToReg nodes and the user are considered
747 // a single scheduling unit. If we create a TokenFactor and return it as
748 // chain, then the TokenFactor is both a predecessor (operand) of the
749 // user as well as a successor (the TF operands are flagged to the user).
750 // c1, f1 = CopyToReg
751 // c2, f2 = CopyToReg
752 // c3 = TokenFactor c1, c2
755 Chain = Chains[NumRegs-1];
757 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
760 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
761 /// operand list. This adds the code marker and includes the number of
762 /// values added into it.
763 void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
764 unsigned MatchingIdx, SDLoc dl,
766 std::vector<SDValue> &Ops) const {
767 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
769 unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
771 Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
772 else if (!Regs.empty() &&
773 TargetRegisterInfo::isVirtualRegister(Regs.front())) {
774 // Put the register class of the virtual registers in the flag word. That
775 // way, later passes can recompute register class constraints for inline
776 // assembly as well as normal instructions.
777 // Don't do this for tied operands that can use the regclass information
779 const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
780 const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
781 Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
784 SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
787 unsigned SP = TLI.getStackPointerRegisterToSaveRestore();
788 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
789 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
790 MVT RegisterVT = RegVTs[Value];
791 for (unsigned i = 0; i != NumRegs; ++i) {
792 assert(Reg < Regs.size() && "Mismatch in # registers expected");
793 unsigned TheReg = Regs[Reg++];
794 Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
796 if (TheReg == SP && Code == InlineAsm::Kind_Clobber) {
797 // If we clobbered the stack pointer, MFI should know about it.
798 assert(DAG.getMachineFunction().getFrameInfo()->
799 hasOpaqueSPAdjustment());
805 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa,
806 const TargetLibraryInfo *li) {
810 DL = &DAG.getDataLayout();
811 Context = DAG.getContext();
812 LPadToCallSiteMap.clear();
815 /// clear - Clear out the current SelectionDAG and the associated
816 /// state and prepare this SelectionDAGBuilder object to be used
817 /// for a new block. This doesn't clear out information about
818 /// additional blocks that are needed to complete switch lowering
819 /// or PHI node updating; that information is cleared out as it is
821 void SelectionDAGBuilder::clear() {
823 UnusedArgNodeMap.clear();
824 PendingLoads.clear();
825 PendingExports.clear();
828 SDNodeOrder = LowestSDNodeOrder;
829 StatepointLowering.clear();
832 /// clearDanglingDebugInfo - Clear the dangling debug information
833 /// map. This function is separated from the clear so that debug
834 /// information that is dangling in a basic block can be properly
835 /// resolved in a different basic block. This allows the
836 /// SelectionDAG to resolve dangling debug information attached
838 void SelectionDAGBuilder::clearDanglingDebugInfo() {
839 DanglingDebugInfoMap.clear();
842 /// getRoot - Return the current virtual root of the Selection DAG,
843 /// flushing any PendingLoad items. This must be done before emitting
844 /// a store or any other node that may need to be ordered after any
845 /// prior load instructions.
847 SDValue SelectionDAGBuilder::getRoot() {
848 if (PendingLoads.empty())
849 return DAG.getRoot();
851 if (PendingLoads.size() == 1) {
852 SDValue Root = PendingLoads[0];
854 PendingLoads.clear();
858 // Otherwise, we have to make a token factor node.
859 SDValue Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
861 PendingLoads.clear();
866 /// getControlRoot - Similar to getRoot, but instead of flushing all the
867 /// PendingLoad items, flush all the PendingExports items. It is necessary
868 /// to do this before emitting a terminator instruction.
870 SDValue SelectionDAGBuilder::getControlRoot() {
871 SDValue Root = DAG.getRoot();
873 if (PendingExports.empty())
876 // Turn all of the CopyToReg chains into one factored node.
877 if (Root.getOpcode() != ISD::EntryToken) {
878 unsigned i = 0, e = PendingExports.size();
879 for (; i != e; ++i) {
880 assert(PendingExports[i].getNode()->getNumOperands() > 1);
881 if (PendingExports[i].getNode()->getOperand(0) == Root)
882 break; // Don't add the root if we already indirectly depend on it.
886 PendingExports.push_back(Root);
889 Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
891 PendingExports.clear();
896 void SelectionDAGBuilder::visit(const Instruction &I) {
897 // Set up outgoing PHI node register values before emitting the terminator.
898 if (isa<TerminatorInst>(&I))
899 HandlePHINodesInSuccessorBlocks(I.getParent());
905 visit(I.getOpcode(), I);
907 if (!isa<TerminatorInst>(&I) && !HasTailCall)
908 CopyToExportRegsIfNeeded(&I);
913 void SelectionDAGBuilder::visitPHI(const PHINode &) {
914 llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
917 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
918 // Note: this doesn't use InstVisitor, because it has to work with
919 // ConstantExpr's in addition to instructions.
921 default: llvm_unreachable("Unknown instruction type encountered!");
922 // Build the switch statement using the Instruction.def file.
923 #define HANDLE_INST(NUM, OPCODE, CLASS) \
924 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
925 #include "llvm/IR/Instruction.def"
929 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
930 // generate the debug data structures now that we've seen its definition.
931 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
933 DanglingDebugInfo &DDI = DanglingDebugInfoMap[V];
935 const DbgValueInst *DI = DDI.getDI();
936 DebugLoc dl = DDI.getdl();
937 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
938 DILocalVariable *Variable = DI->getVariable();
939 DIExpression *Expr = DI->getExpression();
940 assert(Variable->isValidLocationForIntrinsic(dl) &&
941 "Expected inlined-at fields to agree");
942 uint64_t Offset = DI->getOffset();
943 // A dbg.value for an alloca is always indirect.
944 bool IsIndirect = isa<AllocaInst>(V) || Offset != 0;
947 if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl, Offset, IsIndirect,
949 SDV = DAG.getDbgValue(Variable, Expr, Val.getNode(), Val.getResNo(),
950 IsIndirect, Offset, dl, DbgSDNodeOrder);
951 DAG.AddDbgValue(SDV, Val.getNode(), false);
954 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
955 DanglingDebugInfoMap[V] = DanglingDebugInfo();
959 /// getCopyFromRegs - If there was virtual register allocated for the value V
960 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
961 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
962 DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V);
965 if (It != FuncInfo.ValueMap.end()) {
966 unsigned InReg = It->second;
967 RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
968 DAG.getDataLayout(), InReg, Ty);
969 SDValue Chain = DAG.getEntryNode();
970 Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
971 resolveDanglingDebugInfo(V, Result);
977 /// getValue - Return an SDValue for the given Value.
978 SDValue SelectionDAGBuilder::getValue(const Value *V) {
979 // If we already have an SDValue for this value, use it. It's important
980 // to do this first, so that we don't create a CopyFromReg if we already
981 // have a regular SDValue.
982 SDValue &N = NodeMap[V];
983 if (N.getNode()) return N;
985 // If there's a virtual register allocated and initialized for this
987 SDValue copyFromReg = getCopyFromRegs(V, V->getType());
988 if (copyFromReg.getNode()) {
992 // Otherwise create a new SDValue and remember it.
993 SDValue Val = getValueImpl(V);
995 resolveDanglingDebugInfo(V, Val);
999 // Return true if SDValue exists for the given Value
1000 bool SelectionDAGBuilder::findValue(const Value *V) const {
1001 return (NodeMap.find(V) != NodeMap.end()) ||
1002 (FuncInfo.ValueMap.find(V) != FuncInfo.ValueMap.end());
1005 /// getNonRegisterValue - Return an SDValue for the given Value, but
1006 /// don't look in FuncInfo.ValueMap for a virtual register.
1007 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1008 // If we already have an SDValue for this value, use it.
1009 SDValue &N = NodeMap[V];
1011 if (isa<ConstantSDNode>(N) || isa<ConstantFPSDNode>(N)) {
1012 // Remove the debug location from the node as the node is about to be used
1013 // in a location which may differ from the original debug location. This
1014 // is relevant to Constant and ConstantFP nodes because they can appear
1015 // as constant expressions inside PHI nodes.
1016 N->setDebugLoc(DebugLoc());
1021 // Otherwise create a new SDValue and remember it.
1022 SDValue Val = getValueImpl(V);
1024 resolveDanglingDebugInfo(V, Val);
1028 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1029 /// Create an SDValue for the given value.
1030 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1031 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1033 if (const Constant *C = dyn_cast<Constant>(V)) {
1034 EVT VT = TLI.getValueType(V->getType(), true);
1036 if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1037 return DAG.getConstant(*CI, getCurSDLoc(), VT);
1039 if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1040 return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1042 if (isa<ConstantPointerNull>(C)) {
1043 unsigned AS = V->getType()->getPointerAddressSpace();
1044 return DAG.getConstant(0, getCurSDLoc(), TLI.getPointerTy(AS));
1047 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1048 return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1050 if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1051 return DAG.getUNDEF(VT);
1053 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1054 visit(CE->getOpcode(), *CE);
1055 SDValue N1 = NodeMap[V];
1056 assert(N1.getNode() && "visit didn't populate the NodeMap!");
1060 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1061 SmallVector<SDValue, 4> Constants;
1062 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
1064 SDNode *Val = getValue(*OI).getNode();
1065 // If the operand is an empty aggregate, there are no values.
1067 // Add each leaf value from the operand to the Constants list
1068 // to form a flattened list of all the values.
1069 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1070 Constants.push_back(SDValue(Val, i));
1073 return DAG.getMergeValues(Constants, getCurSDLoc());
1076 if (const ConstantDataSequential *CDS =
1077 dyn_cast<ConstantDataSequential>(C)) {
1078 SmallVector<SDValue, 4> Ops;
1079 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1080 SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1081 // Add each leaf value from the operand to the Constants list
1082 // to form a flattened list of all the values.
1083 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1084 Ops.push_back(SDValue(Val, i));
1087 if (isa<ArrayType>(CDS->getType()))
1088 return DAG.getMergeValues(Ops, getCurSDLoc());
1089 return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(),
1093 if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1094 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1095 "Unknown struct or array constant!");
1097 SmallVector<EVT, 4> ValueVTs;
1098 ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1099 unsigned NumElts = ValueVTs.size();
1101 return SDValue(); // empty struct
1102 SmallVector<SDValue, 4> Constants(NumElts);
1103 for (unsigned i = 0; i != NumElts; ++i) {
1104 EVT EltVT = ValueVTs[i];
1105 if (isa<UndefValue>(C))
1106 Constants[i] = DAG.getUNDEF(EltVT);
1107 else if (EltVT.isFloatingPoint())
1108 Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1110 Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1113 return DAG.getMergeValues(Constants, getCurSDLoc());
1116 if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1117 return DAG.getBlockAddress(BA, VT);
1119 VectorType *VecTy = cast<VectorType>(V->getType());
1120 unsigned NumElements = VecTy->getNumElements();
1122 // Now that we know the number and type of the elements, get that number of
1123 // elements into the Ops array based on what kind of constant it is.
1124 SmallVector<SDValue, 16> Ops;
1125 if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1126 for (unsigned i = 0; i != NumElements; ++i)
1127 Ops.push_back(getValue(CV->getOperand(i)));
1129 assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
1130 EVT EltVT = TLI.getValueType(VecTy->getElementType());
1133 if (EltVT.isFloatingPoint())
1134 Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1136 Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1137 Ops.assign(NumElements, Op);
1140 // Create a BUILD_VECTOR node.
1141 return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(), VT, Ops);
1144 // If this is a static alloca, generate it as the frameindex instead of
1146 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1147 DenseMap<const AllocaInst*, int>::iterator SI =
1148 FuncInfo.StaticAllocaMap.find(AI);
1149 if (SI != FuncInfo.StaticAllocaMap.end())
1150 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
1153 // If this is an instruction which fast-isel has deferred, select it now.
1154 if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1155 unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
1156 RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1158 SDValue Chain = DAG.getEntryNode();
1159 return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1162 llvm_unreachable("Can't get register for value!");
1165 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
1166 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1167 auto &DL = DAG.getDataLayout();
1168 SDValue Chain = getControlRoot();
1169 SmallVector<ISD::OutputArg, 8> Outs;
1170 SmallVector<SDValue, 8> OutVals;
1172 if (!FuncInfo.CanLowerReturn) {
1173 unsigned DemoteReg = FuncInfo.DemoteRegister;
1174 const Function *F = I.getParent()->getParent();
1176 // Emit a store of the return value through the virtual register.
1177 // Leave Outs empty so that LowerReturn won't try to load return
1178 // registers the usual way.
1179 SmallVector<EVT, 1> PtrValueVTs;
1180 ComputeValueVTs(TLI, DL, PointerType::getUnqual(F->getReturnType()),
1183 SDValue RetPtr = DAG.getRegister(DemoteReg, PtrValueVTs[0]);
1184 SDValue RetOp = getValue(I.getOperand(0));
1186 SmallVector<EVT, 4> ValueVTs;
1187 SmallVector<uint64_t, 4> Offsets;
1188 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &Offsets);
1189 unsigned NumValues = ValueVTs.size();
1191 SmallVector<SDValue, 4> Chains(NumValues);
1192 for (unsigned i = 0; i != NumValues; ++i) {
1193 SDValue Add = DAG.getNode(ISD::ADD, getCurSDLoc(),
1194 RetPtr.getValueType(), RetPtr,
1195 DAG.getIntPtrConstant(Offsets[i],
1198 DAG.getStore(Chain, getCurSDLoc(),
1199 SDValue(RetOp.getNode(), RetOp.getResNo() + i),
1200 // FIXME: better loc info would be nice.
1201 Add, MachinePointerInfo(), false, false, 0);
1204 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
1205 MVT::Other, Chains);
1206 } else if (I.getNumOperands() != 0) {
1207 SmallVector<EVT, 4> ValueVTs;
1208 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
1209 unsigned NumValues = ValueVTs.size();
1211 SDValue RetOp = getValue(I.getOperand(0));
1213 const Function *F = I.getParent()->getParent();
1215 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1216 if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex,
1218 ExtendKind = ISD::SIGN_EXTEND;
1219 else if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex,
1221 ExtendKind = ISD::ZERO_EXTEND;
1223 LLVMContext &Context = F->getContext();
1224 bool RetInReg = F->getAttributes().hasAttribute(AttributeSet::ReturnIndex,
1227 for (unsigned j = 0; j != NumValues; ++j) {
1228 EVT VT = ValueVTs[j];
1230 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
1231 VT = TLI.getTypeForExtArgOrReturn(Context, VT, ExtendKind);
1233 unsigned NumParts = TLI.getNumRegisters(Context, VT);
1234 MVT PartVT = TLI.getRegisterType(Context, VT);
1235 SmallVector<SDValue, 4> Parts(NumParts);
1236 getCopyToParts(DAG, getCurSDLoc(),
1237 SDValue(RetOp.getNode(), RetOp.getResNo() + j),
1238 &Parts[0], NumParts, PartVT, &I, ExtendKind);
1240 // 'inreg' on function refers to return value
1241 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1245 // Propagate extension type if any
1246 if (ExtendKind == ISD::SIGN_EXTEND)
1248 else if (ExtendKind == ISD::ZERO_EXTEND)
1251 for (unsigned i = 0; i < NumParts; ++i) {
1252 Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
1253 VT, /*isfixed=*/true, 0, 0));
1254 OutVals.push_back(Parts[i]);
1260 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
1261 CallingConv::ID CallConv =
1262 DAG.getMachineFunction().getFunction()->getCallingConv();
1263 Chain = DAG.getTargetLoweringInfo().LowerReturn(
1264 Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
1266 // Verify that the target's LowerReturn behaved as expected.
1267 assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
1268 "LowerReturn didn't return a valid chain!");
1270 // Update the DAG with the new chain value resulting from return lowering.
1274 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
1275 /// created for it, emit nodes to copy the value into the virtual
1277 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
1279 if (V->getType()->isEmptyTy())
1282 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
1283 if (VMI != FuncInfo.ValueMap.end()) {
1284 assert(!V->use_empty() && "Unused value assigned virtual registers!");
1285 CopyValueToVirtualRegister(V, VMI->second);
1289 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
1290 /// the current basic block, add it to ValueMap now so that we'll get a
1292 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
1293 // No need to export constants.
1294 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1296 // Already exported?
1297 if (FuncInfo.isExportedInst(V)) return;
1299 unsigned Reg = FuncInfo.InitializeRegForValue(V);
1300 CopyValueToVirtualRegister(V, Reg);
1303 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
1304 const BasicBlock *FromBB) {
1305 // The operands of the setcc have to be in this block. We don't know
1306 // how to export them from some other block.
1307 if (const Instruction *VI = dyn_cast<Instruction>(V)) {
1308 // Can export from current BB.
1309 if (VI->getParent() == FromBB)
1312 // Is already exported, noop.
1313 return FuncInfo.isExportedInst(V);
1316 // If this is an argument, we can export it if the BB is the entry block or
1317 // if it is already exported.
1318 if (isa<Argument>(V)) {
1319 if (FromBB == &FromBB->getParent()->getEntryBlock())
1322 // Otherwise, can only export this if it is already exported.
1323 return FuncInfo.isExportedInst(V);
1326 // Otherwise, constants can always be exported.
1330 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
1331 uint32_t SelectionDAGBuilder::getEdgeWeight(const MachineBasicBlock *Src,
1332 const MachineBasicBlock *Dst) const {
1333 BranchProbabilityInfo *BPI = FuncInfo.BPI;
1336 const BasicBlock *SrcBB = Src->getBasicBlock();
1337 const BasicBlock *DstBB = Dst->getBasicBlock();
1338 return BPI->getEdgeWeight(SrcBB, DstBB);
1341 void SelectionDAGBuilder::
1342 addSuccessorWithWeight(MachineBasicBlock *Src, MachineBasicBlock *Dst,
1343 uint32_t Weight /* = 0 */) {
1345 Weight = getEdgeWeight(Src, Dst);
1346 Src->addSuccessor(Dst, Weight);
1350 static bool InBlock(const Value *V, const BasicBlock *BB) {
1351 if (const Instruction *I = dyn_cast<Instruction>(V))
1352 return I->getParent() == BB;
1356 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1357 /// This function emits a branch and is used at the leaves of an OR or an
1358 /// AND operator tree.
1361 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
1362 MachineBasicBlock *TBB,
1363 MachineBasicBlock *FBB,
1364 MachineBasicBlock *CurBB,
1365 MachineBasicBlock *SwitchBB,
1368 const BasicBlock *BB = CurBB->getBasicBlock();
1370 // If the leaf of the tree is a comparison, merge the condition into
1372 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1373 // The operands of the cmp have to be in this block. We don't know
1374 // how to export them from some other block. If this is the first block
1375 // of the sequence, no exporting is needed.
1376 if (CurBB == SwitchBB ||
1377 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1378 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1379 ISD::CondCode Condition;
1380 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1381 Condition = getICmpCondCode(IC->getPredicate());
1382 } else if (const FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
1383 Condition = getFCmpCondCode(FC->getPredicate());
1384 if (TM.Options.NoNaNsFPMath)
1385 Condition = getFCmpCodeWithoutNaN(Condition);
1387 (void)Condition; // silence warning.
1388 llvm_unreachable("Unknown compare instruction");
1391 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
1392 TBB, FBB, CurBB, TWeight, FWeight);
1393 SwitchCases.push_back(CB);
1398 // Create a CaseBlock record representing this branch.
1399 CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(*DAG.getContext()),
1400 nullptr, TBB, FBB, CurBB, TWeight, FWeight);
1401 SwitchCases.push_back(CB);
1404 /// Scale down both weights to fit into uint32_t.
1405 static void ScaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
1406 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse;
1407 uint32_t Scale = (NewMax / UINT32_MAX) + 1;
1408 NewTrue = NewTrue / Scale;
1409 NewFalse = NewFalse / Scale;
1412 /// FindMergedConditions - If Cond is an expression like
1413 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
1414 MachineBasicBlock *TBB,
1415 MachineBasicBlock *FBB,
1416 MachineBasicBlock *CurBB,
1417 MachineBasicBlock *SwitchBB,
1418 unsigned Opc, uint32_t TWeight,
1420 // If this node is not part of the or/and tree, emit it as a branch.
1421 const Instruction *BOp = dyn_cast<Instruction>(Cond);
1422 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1423 (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
1424 BOp->getParent() != CurBB->getBasicBlock() ||
1425 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1426 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1427 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
1432 // Create TmpBB after CurBB.
1433 MachineFunction::iterator BBI = CurBB;
1434 MachineFunction &MF = DAG.getMachineFunction();
1435 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1436 CurBB->getParent()->insert(++BBI, TmpBB);
1438 if (Opc == Instruction::Or) {
1439 // Codegen X | Y as:
1448 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
1449 // The requirement is that
1450 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
1451 // = TrueProb for original BB.
1452 // Assuming the original weights are A and B, one choice is to set BB1's
1453 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice
1455 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
1456 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
1457 // TmpBB, but the math is more complicated.
1459 uint64_t NewTrueWeight = TWeight;
1460 uint64_t NewFalseWeight = (uint64_t)TWeight + 2 * (uint64_t)FWeight;
1461 ScaleWeights(NewTrueWeight, NewFalseWeight);
1462 // Emit the LHS condition.
1463 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc,
1464 NewTrueWeight, NewFalseWeight);
1466 NewTrueWeight = TWeight;
1467 NewFalseWeight = 2 * (uint64_t)FWeight;
1468 ScaleWeights(NewTrueWeight, NewFalseWeight);
1469 // Emit the RHS condition into TmpBB.
1470 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1471 NewTrueWeight, NewFalseWeight);
1473 assert(Opc == Instruction::And && "Unknown merge op!");
1474 // Codegen X & Y as:
1482 // This requires creation of TmpBB after CurBB.
1484 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
1485 // The requirement is that
1486 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
1487 // = FalseProb for original BB.
1488 // Assuming the original weights are A and B, one choice is to set BB1's
1489 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice
1491 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB.
1493 uint64_t NewTrueWeight = 2 * (uint64_t)TWeight + (uint64_t)FWeight;
1494 uint64_t NewFalseWeight = FWeight;
1495 ScaleWeights(NewTrueWeight, NewFalseWeight);
1496 // Emit the LHS condition.
1497 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc,
1498 NewTrueWeight, NewFalseWeight);
1500 NewTrueWeight = 2 * (uint64_t)TWeight;
1501 NewFalseWeight = FWeight;
1502 ScaleWeights(NewTrueWeight, NewFalseWeight);
1503 // Emit the RHS condition into TmpBB.
1504 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1505 NewTrueWeight, NewFalseWeight);
1509 /// If the set of cases should be emitted as a series of branches, return true.
1510 /// If we should emit this as a bunch of and/or'd together conditions, return
1513 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
1514 if (Cases.size() != 2) return true;
1516 // If this is two comparisons of the same values or'd or and'd together, they
1517 // will get folded into a single comparison, so don't emit two blocks.
1518 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1519 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1520 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1521 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1525 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
1526 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
1527 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
1528 Cases[0].CC == Cases[1].CC &&
1529 isa<Constant>(Cases[0].CmpRHS) &&
1530 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
1531 if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
1533 if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
1540 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
1541 MachineBasicBlock *BrMBB = FuncInfo.MBB;
1543 // Update machine-CFG edges.
1544 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1546 if (I.isUnconditional()) {
1547 // Update machine-CFG edges.
1548 BrMBB->addSuccessor(Succ0MBB);
1550 // If this is not a fall-through branch or optimizations are switched off,
1552 if (Succ0MBB != NextBlock(BrMBB) || TM.getOptLevel() == CodeGenOpt::None)
1553 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
1554 MVT::Other, getControlRoot(),
1555 DAG.getBasicBlock(Succ0MBB)));
1560 // If this condition is one of the special cases we handle, do special stuff
1562 const Value *CondVal = I.getCondition();
1563 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
1565 // If this is a series of conditions that are or'd or and'd together, emit
1566 // this as a sequence of branches instead of setcc's with and/or operations.
1567 // As long as jumps are not expensive, this should improve performance.
1568 // For example, instead of something like:
1581 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
1582 if (!DAG.getTargetLoweringInfo().isJumpExpensive() &&
1583 BOp->hasOneUse() && (BOp->getOpcode() == Instruction::And ||
1584 BOp->getOpcode() == Instruction::Or)) {
1585 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
1586 BOp->getOpcode(), getEdgeWeight(BrMBB, Succ0MBB),
1587 getEdgeWeight(BrMBB, Succ1MBB));
1588 // If the compares in later blocks need to use values not currently
1589 // exported from this block, export them now. This block should always
1590 // be the first entry.
1591 assert(SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
1593 // Allow some cases to be rejected.
1594 if (ShouldEmitAsBranches(SwitchCases)) {
1595 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
1596 ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
1597 ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
1600 // Emit the branch for this block.
1601 visitSwitchCase(SwitchCases[0], BrMBB);
1602 SwitchCases.erase(SwitchCases.begin());
1606 // Okay, we decided not to do this, remove any inserted MBB's and clear
1608 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
1609 FuncInfo.MF->erase(SwitchCases[i].ThisBB);
1611 SwitchCases.clear();
1615 // Create a CaseBlock record representing this branch.
1616 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
1617 nullptr, Succ0MBB, Succ1MBB, BrMBB);
1619 // Use visitSwitchCase to actually insert the fast branch sequence for this
1621 visitSwitchCase(CB, BrMBB);
1624 /// visitSwitchCase - Emits the necessary code to represent a single node in
1625 /// the binary search tree resulting from lowering a switch instruction.
1626 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
1627 MachineBasicBlock *SwitchBB) {
1629 SDValue CondLHS = getValue(CB.CmpLHS);
1630 SDLoc dl = getCurSDLoc();
1632 // Build the setcc now.
1634 // Fold "(X == true)" to X and "(X == false)" to !X to
1635 // handle common cases produced by branch lowering.
1636 if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
1637 CB.CC == ISD::SETEQ)
1639 else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
1640 CB.CC == ISD::SETEQ) {
1641 SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
1642 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
1644 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
1646 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
1648 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
1649 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
1651 SDValue CmpOp = getValue(CB.CmpMHS);
1652 EVT VT = CmpOp.getValueType();
1654 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
1655 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
1658 SDValue SUB = DAG.getNode(ISD::SUB, dl,
1659 VT, CmpOp, DAG.getConstant(Low, dl, VT));
1660 Cond = DAG.getSetCC(dl, MVT::i1, SUB,
1661 DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
1665 // Update successor info
1666 addSuccessorWithWeight(SwitchBB, CB.TrueBB, CB.TrueWeight);
1667 // TrueBB and FalseBB are always different unless the incoming IR is
1668 // degenerate. This only happens when running llc on weird IR.
1669 if (CB.TrueBB != CB.FalseBB)
1670 addSuccessorWithWeight(SwitchBB, CB.FalseBB, CB.FalseWeight);
1672 // If the lhs block is the next block, invert the condition so that we can
1673 // fall through to the lhs instead of the rhs block.
1674 if (CB.TrueBB == NextBlock(SwitchBB)) {
1675 std::swap(CB.TrueBB, CB.FalseBB);
1676 SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
1677 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
1680 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1681 MVT::Other, getControlRoot(), Cond,
1682 DAG.getBasicBlock(CB.TrueBB));
1684 // Insert the false branch. Do this even if it's a fall through branch,
1685 // this makes it easier to do DAG optimizations which require inverting
1686 // the branch condition.
1687 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
1688 DAG.getBasicBlock(CB.FalseBB));
1690 DAG.setRoot(BrCond);
1693 /// visitJumpTable - Emit JumpTable node in the current MBB
1694 void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
1695 // Emit the code for the jump table
1696 assert(JT.Reg != -1U && "Should lower JT Header first!");
1697 EVT PTy = DAG.getTargetLoweringInfo().getPointerTy();
1698 SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
1700 SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
1701 SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(),
1702 MVT::Other, Index.getValue(1),
1704 DAG.setRoot(BrJumpTable);
1707 /// visitJumpTableHeader - This function emits necessary code to produce index
1708 /// in the JumpTable from switch case.
1709 void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
1710 JumpTableHeader &JTH,
1711 MachineBasicBlock *SwitchBB) {
1712 SDLoc dl = getCurSDLoc();
1714 // Subtract the lowest switch case value from the value being switched on and
1715 // conditional branch to default mbb if the result is greater than the
1716 // difference between smallest and largest cases.
1717 SDValue SwitchOp = getValue(JTH.SValue);
1718 EVT VT = SwitchOp.getValueType();
1719 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
1720 DAG.getConstant(JTH.First, dl, VT));
1722 // The SDNode we just created, which holds the value being switched on minus
1723 // the smallest case value, needs to be copied to a virtual register so it
1724 // can be used as an index into the jump table in a subsequent basic block.
1725 // This value may be smaller or larger than the target's pointer type, and
1726 // therefore require extension or truncating.
1727 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1728 SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy());
1730 unsigned JumpTableReg = FuncInfo.CreateReg(TLI.getPointerTy());
1731 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
1732 JumpTableReg, SwitchOp);
1733 JT.Reg = JumpTableReg;
1735 // Emit the range check for the jump table, and branch to the default block
1736 // for the switch statement if the value being switched on exceeds the largest
1737 // case in the switch.
1739 DAG.getSetCC(dl, TLI.getSetCCResultType(*DAG.getContext(),
1740 Sub.getValueType()),
1741 Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT),
1744 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1745 MVT::Other, CopyTo, CMP,
1746 DAG.getBasicBlock(JT.Default));
1748 // Avoid emitting unnecessary branches to the next block.
1749 if (JT.MBB != NextBlock(SwitchBB))
1750 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
1751 DAG.getBasicBlock(JT.MBB));
1753 DAG.setRoot(BrCond);
1756 /// Codegen a new tail for a stack protector check ParentMBB which has had its
1757 /// tail spliced into a stack protector check success bb.
1759 /// For a high level explanation of how this fits into the stack protector
1760 /// generation see the comment on the declaration of class
1761 /// StackProtectorDescriptor.
1762 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
1763 MachineBasicBlock *ParentBB) {
1765 // First create the loads to the guard/stack slot for the comparison.
1766 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1767 EVT PtrTy = TLI.getPointerTy();
1769 MachineFrameInfo *MFI = ParentBB->getParent()->getFrameInfo();
1770 int FI = MFI->getStackProtectorIndex();
1772 const Value *IRGuard = SPD.getGuard();
1773 SDValue GuardPtr = getValue(IRGuard);
1774 SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
1776 unsigned Align = DL->getPrefTypeAlignment(IRGuard->getType());
1779 SDLoc dl = getCurSDLoc();
1781 // If GuardReg is set and useLoadStackGuardNode returns true, retrieve the
1782 // guard value from the virtual register holding the value. Otherwise, emit a
1783 // volatile load to retrieve the stack guard value.
1784 unsigned GuardReg = SPD.getGuardReg();
1786 if (GuardReg && TLI.useLoadStackGuardNode())
1787 Guard = DAG.getCopyFromReg(DAG.getEntryNode(), dl, GuardReg,
1790 Guard = DAG.getLoad(PtrTy, dl, DAG.getEntryNode(),
1791 GuardPtr, MachinePointerInfo(IRGuard, 0),
1792 true, false, false, Align);
1794 SDValue StackSlot = DAG.getLoad(PtrTy, dl, DAG.getEntryNode(),
1796 MachinePointerInfo::getFixedStack(FI),
1797 true, false, false, Align);
1799 // Perform the comparison via a subtract/getsetcc.
1800 EVT VT = Guard.getValueType();
1801 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Guard, StackSlot);
1804 DAG.getSetCC(dl, TLI.getSetCCResultType(*DAG.getContext(),
1805 Sub.getValueType()),
1806 Sub, DAG.getConstant(0, dl, VT), ISD::SETNE);
1808 // If the sub is not 0, then we know the guard/stackslot do not equal, so
1809 // branch to failure MBB.
1810 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1811 MVT::Other, StackSlot.getOperand(0),
1812 Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
1813 // Otherwise branch to success MBB.
1814 SDValue Br = DAG.getNode(ISD::BR, dl,
1816 DAG.getBasicBlock(SPD.getSuccessMBB()));
1821 /// Codegen the failure basic block for a stack protector check.
1823 /// A failure stack protector machine basic block consists simply of a call to
1824 /// __stack_chk_fail().
1826 /// For a high level explanation of how this fits into the stack protector
1827 /// generation see the comment on the declaration of class
1828 /// StackProtectorDescriptor.
1830 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
1831 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1833 TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
1834 nullptr, 0, false, getCurSDLoc(), false, false).second;
1838 /// visitBitTestHeader - This function emits necessary code to produce value
1839 /// suitable for "bit tests"
1840 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
1841 MachineBasicBlock *SwitchBB) {
1842 SDLoc dl = getCurSDLoc();
1844 // Subtract the minimum value
1845 SDValue SwitchOp = getValue(B.SValue);
1846 EVT VT = SwitchOp.getValueType();
1847 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
1848 DAG.getConstant(B.First, dl, VT));
1851 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1853 DAG.getSetCC(dl, TLI.getSetCCResultType(*DAG.getContext(),
1854 Sub.getValueType()),
1855 Sub, DAG.getConstant(B.Range, dl, VT), ISD::SETUGT);
1857 // Determine the type of the test operands.
1858 bool UsePtrType = false;
1859 if (!TLI.isTypeLegal(VT))
1862 for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
1863 if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
1864 // Switch table case range are encoded into series of masks.
1865 // Just use pointer type, it's guaranteed to fit.
1871 VT = TLI.getPointerTy();
1872 Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
1875 B.RegVT = VT.getSimpleVT();
1876 B.Reg = FuncInfo.CreateReg(B.RegVT);
1877 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
1879 MachineBasicBlock* MBB = B.Cases[0].ThisBB;
1881 addSuccessorWithWeight(SwitchBB, B.Default);
1882 addSuccessorWithWeight(SwitchBB, MBB);
1884 SDValue BrRange = DAG.getNode(ISD::BRCOND, dl,
1885 MVT::Other, CopyTo, RangeCmp,
1886 DAG.getBasicBlock(B.Default));
1888 // Avoid emitting unnecessary branches to the next block.
1889 if (MBB != NextBlock(SwitchBB))
1890 BrRange = DAG.getNode(ISD::BR, dl, MVT::Other, BrRange,
1891 DAG.getBasicBlock(MBB));
1893 DAG.setRoot(BrRange);
1896 /// visitBitTestCase - this function produces one "bit test"
1897 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
1898 MachineBasicBlock* NextMBB,
1899 uint32_t BranchWeightToNext,
1902 MachineBasicBlock *SwitchBB) {
1903 SDLoc dl = getCurSDLoc();
1905 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
1907 unsigned PopCount = countPopulation(B.Mask);
1908 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1909 if (PopCount == 1) {
1910 // Testing for a single bit; just compare the shift count with what it
1911 // would need to be to shift a 1 bit in that position.
1913 dl, TLI.getSetCCResultType(*DAG.getContext(), VT), ShiftOp,
1914 DAG.getConstant(countTrailingZeros(B.Mask), dl, VT), ISD::SETEQ);
1915 } else if (PopCount == BB.Range) {
1916 // There is only one zero bit in the range, test for it directly.
1918 dl, TLI.getSetCCResultType(*DAG.getContext(), VT), ShiftOp,
1919 DAG.getConstant(countTrailingOnes(B.Mask), dl, VT), ISD::SETNE);
1921 // Make desired shift
1922 SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
1923 DAG.getConstant(1, dl, VT), ShiftOp);
1925 // Emit bit tests and jumps
1926 SDValue AndOp = DAG.getNode(ISD::AND, dl,
1927 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
1928 Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(*DAG.getContext(), VT), AndOp,
1929 DAG.getConstant(0, dl, VT), ISD::SETNE);
1932 // The branch weight from SwitchBB to B.TargetBB is B.ExtraWeight.
1933 addSuccessorWithWeight(SwitchBB, B.TargetBB, B.ExtraWeight);
1934 // The branch weight from SwitchBB to NextMBB is BranchWeightToNext.
1935 addSuccessorWithWeight(SwitchBB, NextMBB, BranchWeightToNext);
1937 SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
1938 MVT::Other, getControlRoot(),
1939 Cmp, DAG.getBasicBlock(B.TargetBB));
1941 // Avoid emitting unnecessary branches to the next block.
1942 if (NextMBB != NextBlock(SwitchBB))
1943 BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
1944 DAG.getBasicBlock(NextMBB));
1949 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
1950 MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
1952 // Retrieve successors.
1953 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
1954 MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
1956 const Value *Callee(I.getCalledValue());
1957 const Function *Fn = dyn_cast<Function>(Callee);
1958 if (isa<InlineAsm>(Callee))
1960 else if (Fn && Fn->isIntrinsic()) {
1961 switch (Fn->getIntrinsicID()) {
1963 llvm_unreachable("Cannot invoke this intrinsic");
1964 case Intrinsic::donothing:
1965 // Ignore invokes to @llvm.donothing: jump directly to the next BB.
1967 case Intrinsic::experimental_patchpoint_void:
1968 case Intrinsic::experimental_patchpoint_i64:
1969 visitPatchpoint(&I, LandingPad);
1971 case Intrinsic::experimental_gc_statepoint:
1972 LowerStatepoint(ImmutableStatepoint(&I), LandingPad);
1976 LowerCallTo(&I, getValue(Callee), false, LandingPad);
1978 // If the value of the invoke is used outside of its defining block, make it
1979 // available as a virtual register.
1980 // We already took care of the exported value for the statepoint instruction
1981 // during call to the LowerStatepoint.
1982 if (!isStatepoint(I)) {
1983 CopyToExportRegsIfNeeded(&I);
1986 // Update successor info
1987 addSuccessorWithWeight(InvokeMBB, Return);
1988 addSuccessorWithWeight(InvokeMBB, LandingPad);
1990 // Drop into normal successor.
1991 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
1992 MVT::Other, getControlRoot(),
1993 DAG.getBasicBlock(Return)));
1996 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
1997 llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
2000 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
2001 assert(FuncInfo.MBB->isLandingPad() &&
2002 "Call to landingpad not in landing pad!");
2004 MachineBasicBlock *MBB = FuncInfo.MBB;
2005 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
2006 AddLandingPadInfo(LP, MMI, MBB);
2008 // If there aren't registers to copy the values into (e.g., during SjLj
2009 // exceptions), then don't bother to create these DAG nodes.
2010 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2011 if (TLI.getExceptionPointerRegister() == 0 &&
2012 TLI.getExceptionSelectorRegister() == 0)
2015 SmallVector<EVT, 2> ValueVTs;
2016 SDLoc dl = getCurSDLoc();
2017 ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
2018 assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
2020 // Get the two live-in registers as SDValues. The physregs have already been
2021 // copied into virtual registers.
2023 if (FuncInfo.ExceptionPointerVirtReg) {
2024 Ops[0] = DAG.getZExtOrTrunc(
2025 DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2026 FuncInfo.ExceptionPointerVirtReg, TLI.getPointerTy()),
2029 Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy());
2031 Ops[1] = DAG.getZExtOrTrunc(
2032 DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2033 FuncInfo.ExceptionSelectorVirtReg, TLI.getPointerTy()),
2037 SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
2038 DAG.getVTList(ValueVTs), Ops);
2043 SelectionDAGBuilder::visitLandingPadClauseBB(GlobalValue *ClauseGV,
2044 MachineBasicBlock *LPadBB) {
2045 SDValue Chain = getControlRoot();
2046 SDLoc dl = getCurSDLoc();
2048 // Get the typeid that we will dispatch on later.
2049 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2050 const TargetRegisterClass *RC = TLI.getRegClassFor(TLI.getPointerTy());
2051 unsigned VReg = FuncInfo.MF->getRegInfo().createVirtualRegister(RC);
2052 unsigned TypeID = DAG.getMachineFunction().getMMI().getTypeIDFor(ClauseGV);
2053 SDValue Sel = DAG.getConstant(TypeID, dl, TLI.getPointerTy());
2054 Chain = DAG.getCopyToReg(Chain, dl, VReg, Sel);
2056 // Branch to the main landing pad block.
2057 MachineBasicBlock *ClauseMBB = FuncInfo.MBB;
2058 ClauseMBB->addSuccessor(LPadBB);
2059 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, Chain,
2060 DAG.getBasicBlock(LPadBB)));
2064 void SelectionDAGBuilder::sortAndRangeify(CaseClusterVector &Clusters) {
2066 for (const CaseCluster &CC : Clusters)
2067 assert(CC.Low == CC.High && "Input clusters must be single-case");
2070 std::sort(Clusters.begin(), Clusters.end(),
2071 [](const CaseCluster &a, const CaseCluster &b) {
2072 return a.Low->getValue().slt(b.Low->getValue());
2075 // Merge adjacent clusters with the same destination.
2076 const unsigned N = Clusters.size();
2077 unsigned DstIndex = 0;
2078 for (unsigned SrcIndex = 0; SrcIndex < N; ++SrcIndex) {
2079 CaseCluster &CC = Clusters[SrcIndex];
2080 const ConstantInt *CaseVal = CC.Low;
2081 MachineBasicBlock *Succ = CC.MBB;
2083 if (DstIndex != 0 && Clusters[DstIndex - 1].MBB == Succ &&
2084 (CaseVal->getValue() - Clusters[DstIndex - 1].High->getValue()) == 1) {
2085 // If this case has the same successor and is a neighbour, merge it into
2086 // the previous cluster.
2087 Clusters[DstIndex - 1].High = CaseVal;
2088 Clusters[DstIndex - 1].Weight += CC.Weight;
2089 assert(Clusters[DstIndex - 1].Weight >= CC.Weight && "Weight overflow!");
2091 std::memmove(&Clusters[DstIndex++], &Clusters[SrcIndex],
2092 sizeof(Clusters[SrcIndex]));
2095 Clusters.resize(DstIndex);
2098 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
2099 MachineBasicBlock *Last) {
2101 for (unsigned i = 0, e = JTCases.size(); i != e; ++i)
2102 if (JTCases[i].first.HeaderBB == First)
2103 JTCases[i].first.HeaderBB = Last;
2105 // Update BitTestCases.
2106 for (unsigned i = 0, e = BitTestCases.size(); i != e; ++i)
2107 if (BitTestCases[i].Parent == First)
2108 BitTestCases[i].Parent = Last;
2111 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
2112 MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
2114 // Update machine-CFG edges with unique successors.
2115 SmallSet<BasicBlock*, 32> Done;
2116 for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
2117 BasicBlock *BB = I.getSuccessor(i);
2118 bool Inserted = Done.insert(BB).second;
2122 MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
2123 addSuccessorWithWeight(IndirectBrMBB, Succ);
2126 DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
2127 MVT::Other, getControlRoot(),
2128 getValue(I.getAddress())));
2131 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
2132 if (DAG.getTarget().Options.TrapUnreachable)
2133 DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
2136 void SelectionDAGBuilder::visitFSub(const User &I) {
2137 // -0.0 - X --> fneg
2138 Type *Ty = I.getType();
2139 if (isa<Constant>(I.getOperand(0)) &&
2140 I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
2141 SDValue Op2 = getValue(I.getOperand(1));
2142 setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(),
2143 Op2.getValueType(), Op2));
2147 visitBinary(I, ISD::FSUB);
2150 void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) {
2151 SDValue Op1 = getValue(I.getOperand(0));
2152 SDValue Op2 = getValue(I.getOperand(1));
2159 if (const OverflowingBinaryOperator *OFBinOp =
2160 dyn_cast<const OverflowingBinaryOperator>(&I)) {
2161 nuw = OFBinOp->hasNoUnsignedWrap();
2162 nsw = OFBinOp->hasNoSignedWrap();
2164 if (const PossiblyExactOperator *ExactOp =
2165 dyn_cast<const PossiblyExactOperator>(&I))
2166 exact = ExactOp->isExact();
2167 if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(&I))
2168 FMF = FPOp->getFastMathFlags();
2171 Flags.setExact(exact);
2172 Flags.setNoSignedWrap(nsw);
2173 Flags.setNoUnsignedWrap(nuw);
2174 if (EnableFMFInDAG) {
2175 Flags.setAllowReciprocal(FMF.allowReciprocal());
2176 Flags.setNoInfs(FMF.noInfs());
2177 Flags.setNoNaNs(FMF.noNaNs());
2178 Flags.setNoSignedZeros(FMF.noSignedZeros());
2179 Flags.setUnsafeAlgebra(FMF.unsafeAlgebra());
2181 SDValue BinNodeValue = DAG.getNode(OpCode, getCurSDLoc(), Op1.getValueType(),
2183 setValue(&I, BinNodeValue);
2186 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
2187 SDValue Op1 = getValue(I.getOperand(0));
2188 SDValue Op2 = getValue(I.getOperand(1));
2191 DAG.getTargetLoweringInfo().getShiftAmountTy(Op2.getValueType());
2193 // Coerce the shift amount to the right type if we can.
2194 if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
2195 unsigned ShiftSize = ShiftTy.getSizeInBits();
2196 unsigned Op2Size = Op2.getValueType().getSizeInBits();
2197 SDLoc DL = getCurSDLoc();
2199 // If the operand is smaller than the shift count type, promote it.
2200 if (ShiftSize > Op2Size)
2201 Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
2203 // If the operand is larger than the shift count type but the shift
2204 // count type has enough bits to represent any shift value, truncate
2205 // it now. This is a common case and it exposes the truncate to
2206 // optimization early.
2207 else if (ShiftSize >= Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
2208 Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
2209 // Otherwise we'll need to temporarily settle for some other convenient
2210 // type. Type legalization will make adjustments once the shiftee is split.
2212 Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
2219 if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
2221 if (const OverflowingBinaryOperator *OFBinOp =
2222 dyn_cast<const OverflowingBinaryOperator>(&I)) {
2223 nuw = OFBinOp->hasNoUnsignedWrap();
2224 nsw = OFBinOp->hasNoSignedWrap();
2226 if (const PossiblyExactOperator *ExactOp =
2227 dyn_cast<const PossiblyExactOperator>(&I))
2228 exact = ExactOp->isExact();
2231 Flags.setExact(exact);
2232 Flags.setNoSignedWrap(nsw);
2233 Flags.setNoUnsignedWrap(nuw);
2234 SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
2239 void SelectionDAGBuilder::visitSDiv(const User &I) {
2240 SDValue Op1 = getValue(I.getOperand(0));
2241 SDValue Op2 = getValue(I.getOperand(1));
2244 Flags.setExact(isa<PossiblyExactOperator>(&I) &&
2245 cast<PossiblyExactOperator>(&I)->isExact());
2246 setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
2250 void SelectionDAGBuilder::visitICmp(const User &I) {
2251 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2252 if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2253 predicate = IC->getPredicate();
2254 else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2255 predicate = ICmpInst::Predicate(IC->getPredicate());
2256 SDValue Op1 = getValue(I.getOperand(0));
2257 SDValue Op2 = getValue(I.getOperand(1));
2258 ISD::CondCode Opcode = getICmpCondCode(predicate);
2260 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
2261 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
2264 void SelectionDAGBuilder::visitFCmp(const User &I) {
2265 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2266 if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2267 predicate = FC->getPredicate();
2268 else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2269 predicate = FCmpInst::Predicate(FC->getPredicate());
2270 SDValue Op1 = getValue(I.getOperand(0));
2271 SDValue Op2 = getValue(I.getOperand(1));
2272 ISD::CondCode Condition = getFCmpCondCode(predicate);
2273 if (TM.Options.NoNaNsFPMath)
2274 Condition = getFCmpCodeWithoutNaN(Condition);
2275 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
2276 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
2279 void SelectionDAGBuilder::visitSelect(const User &I) {
2280 SmallVector<EVT, 4> ValueVTs;
2281 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
2283 unsigned NumValues = ValueVTs.size();
2284 if (NumValues == 0) return;
2286 SmallVector<SDValue, 4> Values(NumValues);
2287 SDValue Cond = getValue(I.getOperand(0));
2288 SDValue LHSVal = getValue(I.getOperand(1));
2289 SDValue RHSVal = getValue(I.getOperand(2));
2290 auto BaseOps = {Cond};
2291 ISD::NodeType OpCode = Cond.getValueType().isVector() ?
2292 ISD::VSELECT : ISD::SELECT;
2294 // Min/max matching is only viable if all output VTs are the same.
2295 if (std::equal(ValueVTs.begin(), ValueVTs.end(), ValueVTs.begin())) {
2297 SelectPatternFlavor SPF = matchSelectPattern(const_cast<User*>(&I), LHS, RHS);
2298 ISD::NodeType Opc = ISD::DELETED_NODE;
2300 case SPF_UMAX: Opc = ISD::UMAX; break;
2301 case SPF_UMIN: Opc = ISD::UMIN; break;
2302 case SPF_SMAX: Opc = ISD::SMAX; break;
2303 case SPF_SMIN: Opc = ISD::SMIN; break;
2307 EVT VT = ValueVTs[0];
2308 LLVMContext &Ctx = *DAG.getContext();
2309 auto &TLI = DAG.getTargetLoweringInfo();
2310 while (TLI.getTypeAction(Ctx, VT) == TargetLoweringBase::TypeSplitVector)
2311 VT = TLI.getTypeToTransformTo(Ctx, VT);
2313 if (Opc != ISD::DELETED_NODE && TLI.isOperationLegalOrCustom(Opc, VT) &&
2314 // If the underlying comparison instruction is used by any other instruction,
2315 // the consumed instructions won't be destroyed, so it is not profitable
2316 // to convert to a min/max.
2317 cast<SelectInst>(&I)->getCondition()->hasOneUse()) {
2319 LHSVal = getValue(LHS);
2320 RHSVal = getValue(RHS);
2325 for (unsigned i = 0; i != NumValues; ++i) {
2326 SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
2327 Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
2328 Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
2329 Values[i] = DAG.getNode(OpCode, getCurSDLoc(),
2330 LHSVal.getNode()->getValueType(LHSVal.getResNo()+i),
2334 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
2335 DAG.getVTList(ValueVTs), Values));
2338 void SelectionDAGBuilder::visitTrunc(const User &I) {
2339 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
2340 SDValue N = getValue(I.getOperand(0));
2341 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
2342 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
2345 void SelectionDAGBuilder::visitZExt(const User &I) {
2346 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2347 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
2348 SDValue N = getValue(I.getOperand(0));
2349 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
2350 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
2353 void SelectionDAGBuilder::visitSExt(const User &I) {
2354 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2355 // SExt also can't be a cast to bool for same reason. So, nothing much to do
2356 SDValue N = getValue(I.getOperand(0));
2357 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
2358 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
2361 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
2362 // FPTrunc is never a no-op cast, no need to check
2363 SDValue N = getValue(I.getOperand(0));
2364 SDLoc dl = getCurSDLoc();
2365 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2366 EVT DestVT = TLI.getValueType(I.getType());
2367 setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
2368 DAG.getTargetConstant(0, dl, TLI.getPointerTy())));
2371 void SelectionDAGBuilder::visitFPExt(const User &I) {
2372 // FPExt is never a no-op cast, no need to check
2373 SDValue N = getValue(I.getOperand(0));
2374 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
2375 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
2378 void SelectionDAGBuilder::visitFPToUI(const User &I) {
2379 // FPToUI is never a no-op cast, no need to check
2380 SDValue N = getValue(I.getOperand(0));
2381 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
2382 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
2385 void SelectionDAGBuilder::visitFPToSI(const User &I) {
2386 // FPToSI is never a no-op cast, no need to check
2387 SDValue N = getValue(I.getOperand(0));
2388 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
2389 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
2392 void SelectionDAGBuilder::visitUIToFP(const User &I) {
2393 // UIToFP is never a no-op cast, no need to check
2394 SDValue N = getValue(I.getOperand(0));
2395 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
2396 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
2399 void SelectionDAGBuilder::visitSIToFP(const User &I) {
2400 // SIToFP is never a no-op cast, no need to check
2401 SDValue N = getValue(I.getOperand(0));
2402 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
2403 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
2406 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
2407 // What to do depends on the size of the integer and the size of the pointer.
2408 // We can either truncate, zero extend, or no-op, accordingly.
2409 SDValue N = getValue(I.getOperand(0));
2410 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
2411 setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
2414 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
2415 // What to do depends on the size of the integer and the size of the pointer.
2416 // We can either truncate, zero extend, or no-op, accordingly.
2417 SDValue N = getValue(I.getOperand(0));
2418 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
2419 setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
2422 void SelectionDAGBuilder::visitBitCast(const User &I) {
2423 SDValue N = getValue(I.getOperand(0));
2424 SDLoc dl = getCurSDLoc();
2425 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(I.getType());
2427 // BitCast assures us that source and destination are the same size so this is
2428 // either a BITCAST or a no-op.
2429 if (DestVT != N.getValueType())
2430 setValue(&I, DAG.getNode(ISD::BITCAST, dl,
2431 DestVT, N)); // convert types.
2432 // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
2433 // might fold any kind of constant expression to an integer constant and that
2434 // is not what we are looking for. Only regcognize a bitcast of a genuine
2435 // constant integer as an opaque constant.
2436 else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
2437 setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
2440 setValue(&I, N); // noop cast.
2443 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
2444 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2445 const Value *SV = I.getOperand(0);
2446 SDValue N = getValue(SV);
2447 EVT DestVT = TLI.getValueType(I.getType());
2449 unsigned SrcAS = SV->getType()->getPointerAddressSpace();
2450 unsigned DestAS = I.getType()->getPointerAddressSpace();
2452 if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
2453 N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
2458 void SelectionDAGBuilder::visitInsertElement(const User &I) {
2459 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2460 SDValue InVec = getValue(I.getOperand(0));
2461 SDValue InVal = getValue(I.getOperand(1));
2462 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)),
2463 getCurSDLoc(), TLI.getVectorIdxTy());
2464 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
2465 TLI.getValueType(I.getType()), InVec, InVal, InIdx));
2468 void SelectionDAGBuilder::visitExtractElement(const User &I) {
2469 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2470 SDValue InVec = getValue(I.getOperand(0));
2471 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)),
2472 getCurSDLoc(), TLI.getVectorIdxTy());
2473 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
2474 TLI.getValueType(I.getType()), InVec, InIdx));
2477 // Utility for visitShuffleVector - Return true if every element in Mask,
2478 // beginning from position Pos and ending in Pos+Size, falls within the
2479 // specified sequential range [L, L+Pos). or is undef.
2480 static bool isSequentialInRange(const SmallVectorImpl<int> &Mask,
2481 unsigned Pos, unsigned Size, int Low) {
2482 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
2483 if (Mask[i] >= 0 && Mask[i] != Low)
2488 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
2489 SDValue Src1 = getValue(I.getOperand(0));
2490 SDValue Src2 = getValue(I.getOperand(1));
2492 SmallVector<int, 8> Mask;
2493 ShuffleVectorInst::getShuffleMask(cast<Constant>(I.getOperand(2)), Mask);
2494 unsigned MaskNumElts = Mask.size();
2496 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2497 EVT VT = TLI.getValueType(I.getType());
2498 EVT SrcVT = Src1.getValueType();
2499 unsigned SrcNumElts = SrcVT.getVectorNumElements();
2501 if (SrcNumElts == MaskNumElts) {
2502 setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2,
2507 // Normalize the shuffle vector since mask and vector length don't match.
2508 if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) {
2509 // Mask is longer than the source vectors and is a multiple of the source
2510 // vectors. We can use concatenate vector to make the mask and vectors
2512 if (SrcNumElts*2 == MaskNumElts) {
2513 // First check for Src1 in low and Src2 in high
2514 if (isSequentialInRange(Mask, 0, SrcNumElts, 0) &&
2515 isSequentialInRange(Mask, SrcNumElts, SrcNumElts, SrcNumElts)) {
2516 // The shuffle is concatenating two vectors together.
2517 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurSDLoc(),
2521 // Then check for Src2 in low and Src1 in high
2522 if (isSequentialInRange(Mask, 0, SrcNumElts, SrcNumElts) &&
2523 isSequentialInRange(Mask, SrcNumElts, SrcNumElts, 0)) {
2524 // The shuffle is concatenating two vectors together.
2525 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurSDLoc(),
2531 // Pad both vectors with undefs to make them the same length as the mask.
2532 unsigned NumConcat = MaskNumElts / SrcNumElts;
2533 bool Src1U = Src1.getOpcode() == ISD::UNDEF;
2534 bool Src2U = Src2.getOpcode() == ISD::UNDEF;
2535 SDValue UndefVal = DAG.getUNDEF(SrcVT);
2537 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
2538 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
2542 Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2543 getCurSDLoc(), VT, MOps1);
2544 Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2545 getCurSDLoc(), VT, MOps2);
2547 // Readjust mask for new input vector length.
2548 SmallVector<int, 8> MappedOps;
2549 for (unsigned i = 0; i != MaskNumElts; ++i) {
2551 if (Idx >= (int)SrcNumElts)
2552 Idx -= SrcNumElts - MaskNumElts;
2553 MappedOps.push_back(Idx);
2556 setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2,
2561 if (SrcNumElts > MaskNumElts) {
2562 // Analyze the access pattern of the vector to see if we can extract
2563 // two subvectors and do the shuffle. The analysis is done by calculating
2564 // the range of elements the mask access on both vectors.
2565 int MinRange[2] = { static_cast<int>(SrcNumElts),
2566 static_cast<int>(SrcNumElts)};
2567 int MaxRange[2] = {-1, -1};
2569 for (unsigned i = 0; i != MaskNumElts; ++i) {
2575 if (Idx >= (int)SrcNumElts) {
2579 if (Idx > MaxRange[Input])
2580 MaxRange[Input] = Idx;
2581 if (Idx < MinRange[Input])
2582 MinRange[Input] = Idx;
2585 // Check if the access is smaller than the vector size and can we find
2586 // a reasonable extract index.
2587 int RangeUse[2] = { -1, -1 }; // 0 = Unused, 1 = Extract, -1 = Can not
2589 int StartIdx[2]; // StartIdx to extract from
2590 for (unsigned Input = 0; Input < 2; ++Input) {
2591 if (MinRange[Input] >= (int)SrcNumElts && MaxRange[Input] < 0) {
2592 RangeUse[Input] = 0; // Unused
2593 StartIdx[Input] = 0;
2597 // Find a good start index that is a multiple of the mask length. Then
2598 // see if the rest of the elements are in range.
2599 StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
2600 if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
2601 StartIdx[Input] + MaskNumElts <= SrcNumElts)
2602 RangeUse[Input] = 1; // Extract from a multiple of the mask length.
2605 if (RangeUse[0] == 0 && RangeUse[1] == 0) {
2606 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
2609 if (RangeUse[0] >= 0 && RangeUse[1] >= 0) {
2610 // Extract appropriate subvector and generate a vector shuffle
2611 for (unsigned Input = 0; Input < 2; ++Input) {
2612 SDValue &Src = Input == 0 ? Src1 : Src2;
2613 if (RangeUse[Input] == 0)
2614 Src = DAG.getUNDEF(VT);
2616 SDLoc dl = getCurSDLoc();
2618 ISD::EXTRACT_SUBVECTOR, dl, VT, Src,
2619 DAG.getConstant(StartIdx[Input], dl, TLI.getVectorIdxTy()));
2623 // Calculate new mask.
2624 SmallVector<int, 8> MappedOps;
2625 for (unsigned i = 0; i != MaskNumElts; ++i) {
2628 if (Idx < (int)SrcNumElts)
2631 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
2633 MappedOps.push_back(Idx);
2636 setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2,
2642 // We can't use either concat vectors or extract subvectors so fall back to
2643 // replacing the shuffle with extract and build vector.
2644 // to insert and build vector.
2645 EVT EltVT = VT.getVectorElementType();
2646 EVT IdxVT = TLI.getVectorIdxTy();
2647 SDLoc dl = getCurSDLoc();
2648 SmallVector<SDValue,8> Ops;
2649 for (unsigned i = 0; i != MaskNumElts; ++i) {
2654 Res = DAG.getUNDEF(EltVT);
2656 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
2657 if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
2659 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
2660 EltVT, Src, DAG.getConstant(Idx, dl, IdxVT));
2666 setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops));
2669 void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
2670 const Value *Op0 = I.getOperand(0);
2671 const Value *Op1 = I.getOperand(1);
2672 Type *AggTy = I.getType();
2673 Type *ValTy = Op1->getType();
2674 bool IntoUndef = isa<UndefValue>(Op0);
2675 bool FromUndef = isa<UndefValue>(Op1);
2677 unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
2679 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2680 SmallVector<EVT, 4> AggValueVTs;
2681 ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
2682 SmallVector<EVT, 4> ValValueVTs;
2683 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
2685 unsigned NumAggValues = AggValueVTs.size();
2686 unsigned NumValValues = ValValueVTs.size();
2687 SmallVector<SDValue, 4> Values(NumAggValues);
2689 // Ignore an insertvalue that produces an empty object
2690 if (!NumAggValues) {
2691 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
2695 SDValue Agg = getValue(Op0);
2697 // Copy the beginning value(s) from the original aggregate.
2698 for (; i != LinearIndex; ++i)
2699 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2700 SDValue(Agg.getNode(), Agg.getResNo() + i);
2701 // Copy values from the inserted value(s).
2703 SDValue Val = getValue(Op1);
2704 for (; i != LinearIndex + NumValValues; ++i)
2705 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2706 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
2708 // Copy remaining value(s) from the original aggregate.
2709 for (; i != NumAggValues; ++i)
2710 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2711 SDValue(Agg.getNode(), Agg.getResNo() + i);
2713 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
2714 DAG.getVTList(AggValueVTs), Values));
2717 void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
2718 const Value *Op0 = I.getOperand(0);
2719 Type *AggTy = Op0->getType();
2720 Type *ValTy = I.getType();
2721 bool OutOfUndef = isa<UndefValue>(Op0);
2723 unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
2725 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2726 SmallVector<EVT, 4> ValValueVTs;
2727 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
2729 unsigned NumValValues = ValValueVTs.size();
2731 // Ignore a extractvalue that produces an empty object
2732 if (!NumValValues) {
2733 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
2737 SmallVector<SDValue, 4> Values(NumValValues);
2739 SDValue Agg = getValue(Op0);
2740 // Copy out the selected value(s).
2741 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
2742 Values[i - LinearIndex] =
2744 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
2745 SDValue(Agg.getNode(), Agg.getResNo() + i);
2747 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
2748 DAG.getVTList(ValValueVTs), Values));
2751 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
2752 Value *Op0 = I.getOperand(0);
2753 // Note that the pointer operand may be a vector of pointers. Take the scalar
2754 // element which holds a pointer.
2755 Type *Ty = Op0->getType()->getScalarType();
2756 unsigned AS = Ty->getPointerAddressSpace();
2757 SDValue N = getValue(Op0);
2758 SDLoc dl = getCurSDLoc();
2760 for (GetElementPtrInst::const_op_iterator OI = I.op_begin()+1, E = I.op_end();
2762 const Value *Idx = *OI;
2763 if (StructType *StTy = dyn_cast<StructType>(Ty)) {
2764 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
2767 uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
2768 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
2769 DAG.getConstant(Offset, dl, N.getValueType()));
2772 Ty = StTy->getElementType(Field);
2774 Ty = cast<SequentialType>(Ty)->getElementType();
2775 MVT PtrTy = DAG.getTargetLoweringInfo().getPointerTy(AS);
2776 unsigned PtrSize = PtrTy.getSizeInBits();
2777 APInt ElementSize(PtrSize, DL->getTypeAllocSize(Ty));
2779 // If this is a constant subscript, handle it quickly.
2780 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
2783 APInt Offs = ElementSize * CI->getValue().sextOrTrunc(PtrSize);
2784 SDValue OffsVal = DAG.getConstant(Offs, dl, PtrTy);
2785 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal);
2789 // N = N + Idx * ElementSize;
2790 SDValue IdxN = getValue(Idx);
2792 // If the index is smaller or larger than intptr_t, truncate or extend
2794 IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
2796 // If this is a multiply by a power of two, turn it into a shl
2797 // immediately. This is a very common case.
2798 if (ElementSize != 1) {
2799 if (ElementSize.isPowerOf2()) {
2800 unsigned Amt = ElementSize.logBase2();
2801 IdxN = DAG.getNode(ISD::SHL, dl,
2802 N.getValueType(), IdxN,
2803 DAG.getConstant(Amt, dl, IdxN.getValueType()));
2805 SDValue Scale = DAG.getConstant(ElementSize, dl, IdxN.getValueType());
2806 IdxN = DAG.getNode(ISD::MUL, dl,
2807 N.getValueType(), IdxN, Scale);
2811 N = DAG.getNode(ISD::ADD, dl,
2812 N.getValueType(), N, IdxN);
2819 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
2820 // If this is a fixed sized alloca in the entry block of the function,
2821 // allocate it statically on the stack.
2822 if (FuncInfo.StaticAllocaMap.count(&I))
2823 return; // getValue will auto-populate this.
2825 SDLoc dl = getCurSDLoc();
2826 Type *Ty = I.getAllocatedType();
2827 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2828 auto &DL = DAG.getDataLayout();
2829 uint64_t TySize = DL.getTypeAllocSize(Ty);
2831 std::max((unsigned)DL.getPrefTypeAlignment(Ty), I.getAlignment());
2833 SDValue AllocSize = getValue(I.getArraySize());
2835 EVT IntPtr = TLI.getPointerTy();
2836 if (AllocSize.getValueType() != IntPtr)
2837 AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
2839 AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr,
2841 DAG.getConstant(TySize, dl, IntPtr));
2843 // Handle alignment. If the requested alignment is less than or equal to
2844 // the stack alignment, ignore it. If the size is greater than or equal to
2845 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
2846 unsigned StackAlign =
2847 DAG.getSubtarget().getFrameLowering()->getStackAlignment();
2848 if (Align <= StackAlign)
2851 // Round the size of the allocation up to the stack alignment size
2852 // by add SA-1 to the size.
2853 AllocSize = DAG.getNode(ISD::ADD, dl,
2854 AllocSize.getValueType(), AllocSize,
2855 DAG.getIntPtrConstant(StackAlign - 1, dl));
2857 // Mask out the low bits for alignment purposes.
2858 AllocSize = DAG.getNode(ISD::AND, dl,
2859 AllocSize.getValueType(), AllocSize,
2860 DAG.getIntPtrConstant(~(uint64_t)(StackAlign - 1),
2863 SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align, dl) };
2864 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
2865 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
2867 DAG.setRoot(DSA.getValue(1));
2869 assert(FuncInfo.MF->getFrameInfo()->hasVarSizedObjects());
2872 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
2874 return visitAtomicLoad(I);
2876 const Value *SV = I.getOperand(0);
2877 SDValue Ptr = getValue(SV);
2879 Type *Ty = I.getType();
2881 bool isVolatile = I.isVolatile();
2882 bool isNonTemporal = I.getMetadata(LLVMContext::MD_nontemporal) != nullptr;
2884 // The IR notion of invariant_load only guarantees that all *non-faulting*
2885 // invariant loads result in the same value. The MI notion of invariant load
2886 // guarantees that the load can be legally moved to any location within its
2887 // containing function. The MI notion of invariant_load is stronger than the
2888 // IR notion of invariant_load -- an MI invariant_load is an IR invariant_load
2889 // with a guarantee that the location being loaded from is dereferenceable
2890 // throughout the function's lifetime.
2892 bool isInvariant = I.getMetadata(LLVMContext::MD_invariant_load) != nullptr &&
2893 isDereferenceablePointer(SV, *DAG.getTarget().getDataLayout());
2894 unsigned Alignment = I.getAlignment();
2897 I.getAAMetadata(AAInfo);
2898 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
2900 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2901 SmallVector<EVT, 4> ValueVTs;
2902 SmallVector<uint64_t, 4> Offsets;
2903 ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &Offsets);
2904 unsigned NumValues = ValueVTs.size();
2909 bool ConstantMemory = false;
2910 if (isVolatile || NumValues > MaxParallelChains)
2911 // Serialize volatile loads with other side effects.
2913 else if (AA->pointsToConstantMemory(
2914 MemoryLocation(SV, AA->getTypeStoreSize(Ty), AAInfo))) {
2915 // Do not serialize (non-volatile) loads of constant memory with anything.
2916 Root = DAG.getEntryNode();
2917 ConstantMemory = true;
2919 // Do not serialize non-volatile loads against each other.
2920 Root = DAG.getRoot();
2923 SDLoc dl = getCurSDLoc();
2926 Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
2928 SmallVector<SDValue, 4> Values(NumValues);
2929 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
2930 EVT PtrVT = Ptr.getValueType();
2931 unsigned ChainI = 0;
2932 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
2933 // Serializing loads here may result in excessive register pressure, and
2934 // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
2935 // could recover a bit by hoisting nodes upward in the chain by recognizing
2936 // they are side-effect free or do not alias. The optimizer should really
2937 // avoid this case by converting large object/array copies to llvm.memcpy
2938 // (MaxParallelChains should always remain as failsafe).
2939 if (ChainI == MaxParallelChains) {
2940 assert(PendingLoads.empty() && "PendingLoads must be serialized first");
2941 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
2942 makeArrayRef(Chains.data(), ChainI));
2946 SDValue A = DAG.getNode(ISD::ADD, dl,
2948 DAG.getConstant(Offsets[i], dl, PtrVT));
2949 SDValue L = DAG.getLoad(ValueVTs[i], dl, Root,
2950 A, MachinePointerInfo(SV, Offsets[i]), isVolatile,
2951 isNonTemporal, isInvariant, Alignment, AAInfo,
2955 Chains[ChainI] = L.getValue(1);
2958 if (!ConstantMemory) {
2959 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
2960 makeArrayRef(Chains.data(), ChainI));
2964 PendingLoads.push_back(Chain);
2967 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
2968 DAG.getVTList(ValueVTs), Values));
2971 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
2973 return visitAtomicStore(I);
2975 const Value *SrcV = I.getOperand(0);
2976 const Value *PtrV = I.getOperand(1);
2978 SmallVector<EVT, 4> ValueVTs;
2979 SmallVector<uint64_t, 4> Offsets;
2980 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
2981 SrcV->getType(), ValueVTs, &Offsets);
2982 unsigned NumValues = ValueVTs.size();
2986 // Get the lowered operands. Note that we do this after
2987 // checking if NumResults is zero, because with zero results
2988 // the operands won't have values in the map.
2989 SDValue Src = getValue(SrcV);
2990 SDValue Ptr = getValue(PtrV);
2992 SDValue Root = getRoot();
2993 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
2994 EVT PtrVT = Ptr.getValueType();
2995 bool isVolatile = I.isVolatile();
2996 bool isNonTemporal = I.getMetadata(LLVMContext::MD_nontemporal) != nullptr;
2997 unsigned Alignment = I.getAlignment();
2998 SDLoc dl = getCurSDLoc();
3001 I.getAAMetadata(AAInfo);
3003 unsigned ChainI = 0;
3004 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3005 // See visitLoad comments.
3006 if (ChainI == MaxParallelChains) {
3007 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3008 makeArrayRef(Chains.data(), ChainI));
3012 SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr,
3013 DAG.getConstant(Offsets[i], dl, PtrVT));
3014 SDValue St = DAG.getStore(Root, dl,
3015 SDValue(Src.getNode(), Src.getResNo() + i),
3016 Add, MachinePointerInfo(PtrV, Offsets[i]),
3017 isVolatile, isNonTemporal, Alignment, AAInfo);
3018 Chains[ChainI] = St;
3021 SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3022 makeArrayRef(Chains.data(), ChainI));
3023 DAG.setRoot(StoreNode);
3026 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I) {
3027 SDLoc sdl = getCurSDLoc();
3029 // llvm.masked.store.*(Src0, Ptr, alignemt, Mask)
3030 Value *PtrOperand = I.getArgOperand(1);
3031 SDValue Ptr = getValue(PtrOperand);
3032 SDValue Src0 = getValue(I.getArgOperand(0));
3033 SDValue Mask = getValue(I.getArgOperand(3));
3034 EVT VT = Src0.getValueType();
3035 unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(2)))->getZExtValue();
3037 Alignment = DAG.getEVTAlignment(VT);
3040 I.getAAMetadata(AAInfo);
3042 MachineMemOperand *MMO =
3043 DAG.getMachineFunction().
3044 getMachineMemOperand(MachinePointerInfo(PtrOperand),
3045 MachineMemOperand::MOStore, VT.getStoreSize(),
3047 SDValue StoreNode = DAG.getMaskedStore(getRoot(), sdl, Src0, Ptr, Mask, VT,
3049 DAG.setRoot(StoreNode);
3050 setValue(&I, StoreNode);
3053 // Gather/scatter receive a vector of pointers.
3054 // This vector of pointers may be represented as a base pointer + vector of
3055 // indices, it depends on GEP and instruction preceeding GEP
3056 // that calculates indices
3057 static bool getUniformBase(Value *& Ptr, SDValue& Base, SDValue& Index,
3058 SelectionDAGBuilder* SDB) {
3060 assert (Ptr->getType()->isVectorTy() && "Uexpected pointer type");
3061 GetElementPtrInst *Gep = dyn_cast<GetElementPtrInst>(Ptr);
3062 if (!Gep || Gep->getNumOperands() > 2)
3064 ShuffleVectorInst *ShuffleInst =
3065 dyn_cast<ShuffleVectorInst>(Gep->getPointerOperand());
3066 if (!ShuffleInst || !ShuffleInst->getMask()->isNullValue() ||
3067 cast<Instruction>(ShuffleInst->getOperand(0))->getOpcode() !=
3068 Instruction::InsertElement)
3071 Ptr = cast<InsertElementInst>(ShuffleInst->getOperand(0))->getOperand(1);
3073 SelectionDAG& DAG = SDB->DAG;
3074 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3075 // Check is the Ptr is inside current basic block
3076 // If not, look for the shuffle instruction
3077 if (SDB->findValue(Ptr))
3078 Base = SDB->getValue(Ptr);
3079 else if (SDB->findValue(ShuffleInst)) {
3080 SDValue ShuffleNode = SDB->getValue(ShuffleInst);
3081 SDLoc sdl = ShuffleNode;
3082 Base = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, sdl,
3083 ShuffleNode.getValueType().getScalarType(), ShuffleNode,
3084 DAG.getConstant(0, sdl, TLI.getVectorIdxTy()));
3085 SDB->setValue(Ptr, Base);
3090 Value *IndexVal = Gep->getOperand(1);
3091 if (SDB->findValue(IndexVal)) {
3092 Index = SDB->getValue(IndexVal);
3094 if (SExtInst* Sext = dyn_cast<SExtInst>(IndexVal)) {
3095 IndexVal = Sext->getOperand(0);
3096 if (SDB->findValue(IndexVal))
3097 Index = SDB->getValue(IndexVal);
3104 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
3105 SDLoc sdl = getCurSDLoc();
3107 // llvm.masked.scatter.*(Src0, Ptrs, alignemt, Mask)
3108 Value *Ptr = I.getArgOperand(1);
3109 SDValue Src0 = getValue(I.getArgOperand(0));
3110 SDValue Mask = getValue(I.getArgOperand(3));
3111 EVT VT = Src0.getValueType();
3112 unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(2)))->getZExtValue();
3114 Alignment = DAG.getEVTAlignment(VT);
3115 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3118 I.getAAMetadata(AAInfo);
3122 Value *BasePtr = Ptr;
3123 bool UniformBase = getUniformBase(BasePtr, Base, Index, this);
3125 Value *MemOpBasePtr = UniformBase ? BasePtr : nullptr;
3126 MachineMemOperand *MMO = DAG.getMachineFunction().
3127 getMachineMemOperand(MachinePointerInfo(MemOpBasePtr),
3128 MachineMemOperand::MOStore, VT.getStoreSize(),
3131 Base = DAG.getTargetConstant(0, sdl, TLI.getPointerTy());
3132 Index = getValue(Ptr);
3134 SDValue Ops[] = { getRoot(), Src0, Mask, Base, Index };
3135 SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
3137 DAG.setRoot(Scatter);
3138 setValue(&I, Scatter);
3141 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I) {
3142 SDLoc sdl = getCurSDLoc();
3144 // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
3145 Value *PtrOperand = I.getArgOperand(0);
3146 SDValue Ptr = getValue(PtrOperand);
3147 SDValue Src0 = getValue(I.getArgOperand(3));
3148 SDValue Mask = getValue(I.getArgOperand(2));
3150 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3151 EVT VT = TLI.getValueType(I.getType());
3152 unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue();
3154 Alignment = DAG.getEVTAlignment(VT);
3157 I.getAAMetadata(AAInfo);
3158 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3160 SDValue InChain = DAG.getRoot();
3161 if (AA->pointsToConstantMemory(MemoryLocation(
3162 PtrOperand, AA->getTypeStoreSize(I.getType()), AAInfo))) {
3163 // Do not serialize (non-volatile) loads of constant memory with anything.
3164 InChain = DAG.getEntryNode();
3167 MachineMemOperand *MMO =
3168 DAG.getMachineFunction().
3169 getMachineMemOperand(MachinePointerInfo(PtrOperand),
3170 MachineMemOperand::MOLoad, VT.getStoreSize(),
3171 Alignment, AAInfo, Ranges);
3173 SDValue Load = DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Mask, Src0, VT, MMO,
3175 SDValue OutChain = Load.getValue(1);
3176 DAG.setRoot(OutChain);
3180 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
3181 SDLoc sdl = getCurSDLoc();
3183 // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
3184 Value *Ptr = I.getArgOperand(0);
3185 SDValue Src0 = getValue(I.getArgOperand(3));
3186 SDValue Mask = getValue(I.getArgOperand(2));
3188 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3189 EVT VT = TLI.getValueType(I.getType());
3190 unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue();
3192 Alignment = DAG.getEVTAlignment(VT);
3195 I.getAAMetadata(AAInfo);
3196 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3198 SDValue Root = DAG.getRoot();
3201 Value *BasePtr = Ptr;
3202 bool UniformBase = getUniformBase(BasePtr, Base, Index, this);
3203 bool ConstantMemory = false;
3205 AA->pointsToConstantMemory(
3206 MemoryLocation(BasePtr, AA->getTypeStoreSize(I.getType()), AAInfo))) {
3207 // Do not serialize (non-volatile) loads of constant memory with anything.
3208 Root = DAG.getEntryNode();
3209 ConstantMemory = true;
3212 MachineMemOperand *MMO =
3213 DAG.getMachineFunction().
3214 getMachineMemOperand(MachinePointerInfo(UniformBase ? BasePtr : nullptr),
3215 MachineMemOperand::MOLoad, VT.getStoreSize(),
3216 Alignment, AAInfo, Ranges);
3219 Base = DAG.getTargetConstant(0, sdl, TLI.getPointerTy());
3220 Index = getValue(Ptr);
3222 SDValue Ops[] = { Root, Src0, Mask, Base, Index };
3223 SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
3226 SDValue OutChain = Gather.getValue(1);
3227 if (!ConstantMemory)
3228 PendingLoads.push_back(OutChain);
3229 setValue(&I, Gather);