1 //===-- SelectionDAGBuilder.cpp - Selection-DAG building ------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements routines for translating from LLVM IR into SelectionDAG IR.
12 //===----------------------------------------------------------------------===//
14 #include "SelectionDAGBuilder.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/Optional.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/BranchProbabilityInfo.h"
21 #include "llvm/Analysis/ConstantFolding.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/CodeGen/Analysis.h"
24 #include "llvm/CodeGen/FastISel.h"
25 #include "llvm/CodeGen/FunctionLoweringInfo.h"
26 #include "llvm/CodeGen/GCMetadata.h"
27 #include "llvm/CodeGen/GCStrategy.h"
28 #include "llvm/CodeGen/MachineFrameInfo.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachineJumpTableInfo.h"
32 #include "llvm/CodeGen/MachineModuleInfo.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/CodeGen/SelectionDAG.h"
35 #include "llvm/CodeGen/StackMaps.h"
36 #include "llvm/IR/CallingConv.h"
37 #include "llvm/IR/Constants.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/IR/DebugInfo.h"
40 #include "llvm/IR/DerivedTypes.h"
41 #include "llvm/IR/Function.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/InlineAsm.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/LLVMContext.h"
48 #include "llvm/IR/Module.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Support/raw_ostream.h"
54 #include "llvm/Target/TargetFrameLowering.h"
55 #include "llvm/Target/TargetInstrInfo.h"
56 #include "llvm/Target/TargetIntrinsicInfo.h"
57 #include "llvm/Target/TargetLibraryInfo.h"
58 #include "llvm/Target/TargetLowering.h"
59 #include "llvm/Target/TargetOptions.h"
60 #include "llvm/Target/TargetSelectionDAGInfo.h"
61 #include "llvm/Target/TargetSubtargetInfo.h"
65 #define DEBUG_TYPE "isel"
67 /// LimitFloatPrecision - Generate low-precision inline sequences for
68 /// some float libcalls (6, 8 or 12 bits).
69 static unsigned LimitFloatPrecision;
71 static cl::opt<unsigned, true>
72 LimitFPPrecision("limit-float-precision",
73 cl::desc("Generate low-precision inline sequences "
74 "for some float libcalls"),
75 cl::location(LimitFloatPrecision),
78 // Limit the width of DAG chains. This is important in general to prevent
79 // prevent DAG-based analysis from blowing up. For example, alias analysis and
80 // load clustering may not complete in reasonable time. It is difficult to
81 // recognize and avoid this situation within each individual analysis, and
82 // future analyses are likely to have the same behavior. Limiting DAG width is
83 // the safe approach, and will be especially important with global DAGs.
85 // MaxParallelChains default is arbitrarily high to avoid affecting
86 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
87 // sequence over this should have been converted to llvm.memcpy by the
88 // frontend. It easy to induce this behavior with .ll code such as:
89 // %buffer = alloca [4096 x i8]
90 // %data = load [4096 x i8]* %argPtr
91 // store [4096 x i8] %data, [4096 x i8]* %buffer
92 static const unsigned MaxParallelChains = 64;
94 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL,
95 const SDValue *Parts, unsigned NumParts,
96 MVT PartVT, EVT ValueVT, const Value *V);
98 /// getCopyFromParts - Create a value that contains the specified legal parts
99 /// combined into the value they represent. If the parts combine to a type
100 /// larger then ValueVT then AssertOp can be used to specify whether the extra
101 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
102 /// (ISD::AssertSext).
103 static SDValue getCopyFromParts(SelectionDAG &DAG, SDLoc DL,
104 const SDValue *Parts,
105 unsigned NumParts, MVT PartVT, EVT ValueVT,
107 ISD::NodeType AssertOp = ISD::DELETED_NODE) {
108 if (ValueVT.isVector())
109 return getCopyFromPartsVector(DAG, DL, Parts, NumParts,
112 assert(NumParts > 0 && "No parts to assemble!");
113 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
114 SDValue Val = Parts[0];
117 // Assemble the value from multiple parts.
118 if (ValueVT.isInteger()) {
119 unsigned PartBits = PartVT.getSizeInBits();
120 unsigned ValueBits = ValueVT.getSizeInBits();
122 // Assemble the power of 2 part.
123 unsigned RoundParts = NumParts & (NumParts - 1) ?
124 1 << Log2_32(NumParts) : NumParts;
125 unsigned RoundBits = PartBits * RoundParts;
126 EVT RoundVT = RoundBits == ValueBits ?
127 ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
130 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
132 if (RoundParts > 2) {
133 Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
135 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
136 RoundParts / 2, PartVT, HalfVT, V);
138 Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
139 Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
142 if (TLI.isBigEndian())
145 Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
147 if (RoundParts < NumParts) {
148 // Assemble the trailing non-power-of-2 part.
149 unsigned OddParts = NumParts - RoundParts;
150 EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
151 Hi = getCopyFromParts(DAG, DL,
152 Parts + RoundParts, OddParts, PartVT, OddVT, V);
154 // Combine the round and odd parts.
156 if (TLI.isBigEndian())
158 EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
159 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
160 Hi = DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
161 DAG.getConstant(Lo.getValueType().getSizeInBits(),
162 TLI.getPointerTy()));
163 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
164 Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
166 } else if (PartVT.isFloatingPoint()) {
167 // FP split into multiple FP parts (for ppcf128)
168 assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
171 Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
172 Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
173 if (TLI.hasBigEndianPartOrdering(ValueVT))
175 Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
177 // FP split into integer parts (soft fp)
178 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
179 !PartVT.isVector() && "Unexpected split");
180 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
181 Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V);
185 // There is now one part, held in Val. Correct it to match ValueVT.
186 EVT PartEVT = Val.getValueType();
188 if (PartEVT == ValueVT)
191 if (PartEVT.isInteger() && ValueVT.isInteger()) {
192 if (ValueVT.bitsLT(PartEVT)) {
193 // For a truncate, see if we have any information to
194 // indicate whether the truncated bits will always be
195 // zero or sign-extension.
196 if (AssertOp != ISD::DELETED_NODE)
197 Val = DAG.getNode(AssertOp, DL, PartEVT, Val,
198 DAG.getValueType(ValueVT));
199 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
201 return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
204 if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
205 // FP_ROUND's are always exact here.
206 if (ValueVT.bitsLT(Val.getValueType()))
207 return DAG.getNode(ISD::FP_ROUND, DL, ValueVT, Val,
208 DAG.getTargetConstant(1, TLI.getPointerTy()));
210 return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
213 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
214 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
216 llvm_unreachable("Unknown mismatch!");
219 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
220 const Twine &ErrMsg) {
221 const Instruction *I = dyn_cast_or_null<Instruction>(V);
223 return Ctx.emitError(ErrMsg);
225 const char *AsmError = ", possible invalid constraint for vector type";
226 if (const CallInst *CI = dyn_cast<CallInst>(I))
227 if (isa<InlineAsm>(CI->getCalledValue()))
228 return Ctx.emitError(I, ErrMsg + AsmError);
230 return Ctx.emitError(I, ErrMsg);
233 /// getCopyFromPartsVector - Create a value that contains the specified legal
234 /// parts combined into the value they represent. If the parts combine to a
235 /// type larger then ValueVT then AssertOp can be used to specify whether the
236 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
237 /// ValueVT (ISD::AssertSext).
238 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL,
239 const SDValue *Parts, unsigned NumParts,
240 MVT PartVT, EVT ValueVT, const Value *V) {
241 assert(ValueVT.isVector() && "Not a vector value");
242 assert(NumParts > 0 && "No parts to assemble!");
243 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
244 SDValue Val = Parts[0];
246 // Handle a multi-element vector.
250 unsigned NumIntermediates;
252 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
253 NumIntermediates, RegisterVT);
254 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
255 NumParts = NumRegs; // Silence a compiler warning.
256 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
257 assert(RegisterVT == Parts[0].getSimpleValueType() &&
258 "Part type doesn't match part!");
260 // Assemble the parts into intermediate operands.
261 SmallVector<SDValue, 8> Ops(NumIntermediates);
262 if (NumIntermediates == NumParts) {
263 // If the register was not expanded, truncate or copy the value,
265 for (unsigned i = 0; i != NumParts; ++i)
266 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
267 PartVT, IntermediateVT, V);
268 } else if (NumParts > 0) {
269 // If the intermediate type was expanded, build the intermediate
270 // operands from the parts.
271 assert(NumParts % NumIntermediates == 0 &&
272 "Must expand into a divisible number of parts!");
273 unsigned Factor = NumParts / NumIntermediates;
274 for (unsigned i = 0; i != NumIntermediates; ++i)
275 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
276 PartVT, IntermediateVT, V);
279 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
280 // intermediate operands.
281 Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
286 // There is now one part, held in Val. Correct it to match ValueVT.
287 EVT PartEVT = Val.getValueType();
289 if (PartEVT == ValueVT)
292 if (PartEVT.isVector()) {
293 // If the element type of the source/dest vectors are the same, but the
294 // parts vector has more elements than the value vector, then we have a
295 // vector widening case (e.g. <2 x float> -> <4 x float>). Extract the
297 if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) {
298 assert(PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() &&
299 "Cannot narrow, it would be a lossy transformation");
300 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
301 DAG.getConstant(0, TLI.getVectorIdxTy()));
304 // Vector/Vector bitcast.
305 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
306 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
308 assert(PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() &&
309 "Cannot handle this kind of promotion");
310 // Promoted vector extract
311 bool Smaller = ValueVT.bitsLE(PartEVT);
312 return DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
317 // Trivial bitcast if the types are the same size and the destination
318 // vector type is legal.
319 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
320 TLI.isTypeLegal(ValueVT))
321 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
323 // Handle cases such as i8 -> <1 x i1>
324 if (ValueVT.getVectorNumElements() != 1) {
325 diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
326 "non-trivial scalar-to-vector conversion");
327 return DAG.getUNDEF(ValueVT);
330 if (ValueVT.getVectorNumElements() == 1 &&
331 ValueVT.getVectorElementType() != PartEVT) {
332 bool Smaller = ValueVT.bitsLE(PartEVT);
333 Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
334 DL, ValueVT.getScalarType(), Val);
337 return DAG.getNode(ISD::BUILD_VECTOR, DL, ValueVT, Val);
340 static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc dl,
341 SDValue Val, SDValue *Parts, unsigned NumParts,
342 MVT PartVT, const Value *V);
344 /// getCopyToParts - Create a series of nodes that contain the specified value
345 /// split into legal parts. If the parts contain more bits than Val, then, for
346 /// integers, ExtendKind can be used to specify how to generate the extra bits.
347 static void getCopyToParts(SelectionDAG &DAG, SDLoc DL,
348 SDValue Val, SDValue *Parts, unsigned NumParts,
349 MVT PartVT, const Value *V,
350 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
351 EVT ValueVT = Val.getValueType();
353 // Handle the vector case separately.
354 if (ValueVT.isVector())
355 return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V);
357 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
358 unsigned PartBits = PartVT.getSizeInBits();
359 unsigned OrigNumParts = NumParts;
360 assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
365 assert(!ValueVT.isVector() && "Vector case handled elsewhere");
366 EVT PartEVT = PartVT;
367 if (PartEVT == ValueVT) {
368 assert(NumParts == 1 && "No-op copy with multiple parts!");
373 if (NumParts * PartBits > ValueVT.getSizeInBits()) {
374 // If the parts cover more bits than the value has, promote the value.
375 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
376 assert(NumParts == 1 && "Do not know what to promote to!");
377 Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
379 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
380 ValueVT.isInteger() &&
381 "Unknown mismatch!");
382 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
383 Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
384 if (PartVT == MVT::x86mmx)
385 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
387 } else if (PartBits == ValueVT.getSizeInBits()) {
388 // Different types of the same size.
389 assert(NumParts == 1 && PartEVT != ValueVT);
390 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
391 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
392 // If the parts cover less bits than value has, truncate the value.
393 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
394 ValueVT.isInteger() &&
395 "Unknown mismatch!");
396 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
397 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
398 if (PartVT == MVT::x86mmx)
399 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
402 // The value may have changed - recompute ValueVT.
403 ValueVT = Val.getValueType();
404 assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
405 "Failed to tile the value with PartVT!");
408 if (PartEVT != ValueVT)
409 diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
410 "scalar-to-vector conversion failed");
416 // Expand the value into multiple parts.
417 if (NumParts & (NumParts - 1)) {
418 // The number of parts is not a power of 2. Split off and copy the tail.
419 assert(PartVT.isInteger() && ValueVT.isInteger() &&
420 "Do not know what to expand to!");
421 unsigned RoundParts = 1 << Log2_32(NumParts);
422 unsigned RoundBits = RoundParts * PartBits;
423 unsigned OddParts = NumParts - RoundParts;
424 SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
425 DAG.getIntPtrConstant(RoundBits));
426 getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V);
428 if (TLI.isBigEndian())
429 // The odd parts were reversed by getCopyToParts - unreverse them.
430 std::reverse(Parts + RoundParts, Parts + NumParts);
432 NumParts = RoundParts;
433 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
434 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
437 // The number of parts is a power of 2. Repeatedly bisect the value using
439 Parts[0] = DAG.getNode(ISD::BITCAST, DL,
440 EVT::getIntegerVT(*DAG.getContext(),
441 ValueVT.getSizeInBits()),
444 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
445 for (unsigned i = 0; i < NumParts; i += StepSize) {
446 unsigned ThisBits = StepSize * PartBits / 2;
447 EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
448 SDValue &Part0 = Parts[i];
449 SDValue &Part1 = Parts[i+StepSize/2];
451 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
452 ThisVT, Part0, DAG.getIntPtrConstant(1));
453 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
454 ThisVT, Part0, DAG.getIntPtrConstant(0));
456 if (ThisBits == PartBits && ThisVT != PartVT) {
457 Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
458 Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
463 if (TLI.isBigEndian())
464 std::reverse(Parts, Parts + OrigNumParts);
468 /// getCopyToPartsVector - Create a series of nodes that contain the specified
469 /// value split into legal parts.
470 static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc DL,
471 SDValue Val, SDValue *Parts, unsigned NumParts,
472 MVT PartVT, const Value *V) {
473 EVT ValueVT = Val.getValueType();
474 assert(ValueVT.isVector() && "Not a vector");
475 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
478 EVT PartEVT = PartVT;
479 if (PartEVT == ValueVT) {
481 } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
482 // Bitconvert vector->vector case.
483 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
484 } else if (PartVT.isVector() &&
485 PartEVT.getVectorElementType() == ValueVT.getVectorElementType() &&
486 PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements()) {
487 EVT ElementVT = PartVT.getVectorElementType();
488 // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in
490 SmallVector<SDValue, 16> Ops;
491 for (unsigned i = 0, e = ValueVT.getVectorNumElements(); i != e; ++i)
492 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
493 ElementVT, Val, DAG.getConstant(i,
494 TLI.getVectorIdxTy())));
496 for (unsigned i = ValueVT.getVectorNumElements(),
497 e = PartVT.getVectorNumElements(); i != e; ++i)
498 Ops.push_back(DAG.getUNDEF(ElementVT));
500 Val = DAG.getNode(ISD::BUILD_VECTOR, DL, PartVT, Ops);
502 // FIXME: Use CONCAT for 2x -> 4x.
504 //SDValue UndefElts = DAG.getUNDEF(VectorTy);
505 //Val = DAG.getNode(ISD::CONCAT_VECTORS, DL, PartVT, Val, UndefElts);
506 } else if (PartVT.isVector() &&
507 PartEVT.getVectorElementType().bitsGE(
508 ValueVT.getVectorElementType()) &&
509 PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements()) {
511 // Promoted vector extract
512 bool Smaller = PartEVT.bitsLE(ValueVT);
513 Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
516 // Vector -> scalar conversion.
517 assert(ValueVT.getVectorNumElements() == 1 &&
518 "Only trivial vector-to-scalar conversions should get here!");
519 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
520 PartVT, Val, DAG.getConstant(0, TLI.getVectorIdxTy()));
522 bool Smaller = ValueVT.bitsLE(PartVT);
523 Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
531 // Handle a multi-element vector.
534 unsigned NumIntermediates;
535 unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
537 NumIntermediates, RegisterVT);
538 unsigned NumElements = ValueVT.getVectorNumElements();
540 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
541 NumParts = NumRegs; // Silence a compiler warning.
542 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
544 // Split the vector into intermediate operands.
545 SmallVector<SDValue, 8> Ops(NumIntermediates);
546 for (unsigned i = 0; i != NumIntermediates; ++i) {
547 if (IntermediateVT.isVector())
548 Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL,
550 DAG.getConstant(i * (NumElements / NumIntermediates),
551 TLI.getVectorIdxTy()));
553 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
555 DAG.getConstant(i, TLI.getVectorIdxTy()));
558 // Split the intermediate operands into legal parts.
559 if (NumParts == NumIntermediates) {
560 // If the register was not expanded, promote or copy the value,
562 for (unsigned i = 0; i != NumParts; ++i)
563 getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V);
564 } else if (NumParts > 0) {
565 // If the intermediate type was expanded, split each the value into
567 assert(NumParts % NumIntermediates == 0 &&
568 "Must expand into a divisible number of parts!");
569 unsigned Factor = NumParts / NumIntermediates;
570 for (unsigned i = 0; i != NumIntermediates; ++i)
571 getCopyToParts(DAG, DL, Ops[i], &Parts[i*Factor], Factor, PartVT, V);
576 /// RegsForValue - This struct represents the registers (physical or virtual)
577 /// that a particular set of values is assigned, and the type information
578 /// about the value. The most common situation is to represent one value at a
579 /// time, but struct or array values are handled element-wise as multiple
580 /// values. The splitting of aggregates is performed recursively, so that we
581 /// never have aggregate-typed registers. The values at this point do not
582 /// necessarily have legal types, so each value may require one or more
583 /// registers of some legal type.
585 struct RegsForValue {
586 /// ValueVTs - The value types of the values, which may not be legal, and
587 /// may need be promoted or synthesized from one or more registers.
589 SmallVector<EVT, 4> ValueVTs;
591 /// RegVTs - The value types of the registers. This is the same size as
592 /// ValueVTs and it records, for each value, what the type of the assigned
593 /// register or registers are. (Individual values are never synthesized
594 /// from more than one type of register.)
596 /// With virtual registers, the contents of RegVTs is redundant with TLI's
597 /// getRegisterType member function, however when with physical registers
598 /// it is necessary to have a separate record of the types.
600 SmallVector<MVT, 4> RegVTs;
602 /// Regs - This list holds the registers assigned to the values.
603 /// Each legal or promoted value requires one register, and each
604 /// expanded value requires multiple registers.
606 SmallVector<unsigned, 4> Regs;
610 RegsForValue(const SmallVector<unsigned, 4> ®s,
611 MVT regvt, EVT valuevt)
612 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
614 RegsForValue(LLVMContext &Context, const TargetLowering &tli,
615 unsigned Reg, Type *Ty) {
616 ComputeValueVTs(tli, Ty, ValueVTs);
618 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
619 EVT ValueVT = ValueVTs[Value];
620 unsigned NumRegs = tli.getNumRegisters(Context, ValueVT);
621 MVT RegisterVT = tli.getRegisterType(Context, ValueVT);
622 for (unsigned i = 0; i != NumRegs; ++i)
623 Regs.push_back(Reg + i);
624 RegVTs.push_back(RegisterVT);
629 /// append - Add the specified values to this one.
630 void append(const RegsForValue &RHS) {
631 ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
632 RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
633 Regs.append(RHS.Regs.begin(), RHS.Regs.end());
636 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
637 /// this value and returns the result as a ValueVTs value. This uses
638 /// Chain/Flag as the input and updates them for the output Chain/Flag.
639 /// If the Flag pointer is NULL, no flag is used.
640 SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo,
642 SDValue &Chain, SDValue *Flag,
643 const Value *V = nullptr) const;
645 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
646 /// specified value into the registers specified by this object. This uses
647 /// Chain/Flag as the input and updates them for the output Chain/Flag.
648 /// If the Flag pointer is NULL, no flag is used.
649 void getCopyToRegs(SDValue Val, SelectionDAG &DAG, SDLoc dl,
650 SDValue &Chain, SDValue *Flag, const Value *V) const;
652 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
653 /// operand list. This adds the code marker, matching input operand index
654 /// (if applicable), and includes the number of values added into it.
655 void AddInlineAsmOperands(unsigned Kind,
656 bool HasMatching, unsigned MatchingIdx,
658 std::vector<SDValue> &Ops) const;
662 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
663 /// this value and returns the result as a ValueVT value. This uses
664 /// Chain/Flag as the input and updates them for the output Chain/Flag.
665 /// If the Flag pointer is NULL, no flag is used.
666 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
667 FunctionLoweringInfo &FuncInfo,
669 SDValue &Chain, SDValue *Flag,
670 const Value *V) const {
671 // A Value with type {} or [0 x %t] needs no registers.
672 if (ValueVTs.empty())
675 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
677 // Assemble the legal parts into the final values.
678 SmallVector<SDValue, 4> Values(ValueVTs.size());
679 SmallVector<SDValue, 8> Parts;
680 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
681 // Copy the legal parts from the registers.
682 EVT ValueVT = ValueVTs[Value];
683 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
684 MVT RegisterVT = RegVTs[Value];
686 Parts.resize(NumRegs);
687 for (unsigned i = 0; i != NumRegs; ++i) {
690 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
692 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
693 *Flag = P.getValue(2);
696 Chain = P.getValue(1);
699 // If the source register was virtual and if we know something about it,
700 // add an assert node.
701 if (!TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) ||
702 !RegisterVT.isInteger() || RegisterVT.isVector())
705 const FunctionLoweringInfo::LiveOutInfo *LOI =
706 FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
710 unsigned RegSize = RegisterVT.getSizeInBits();
711 unsigned NumSignBits = LOI->NumSignBits;
712 unsigned NumZeroBits = LOI->KnownZero.countLeadingOnes();
714 if (NumZeroBits == RegSize) {
715 // The current value is a zero.
716 // Explicitly express that as it would be easier for
717 // optimizations to kick in.
718 Parts[i] = DAG.getConstant(0, RegisterVT);
722 // FIXME: We capture more information than the dag can represent. For
723 // now, just use the tightest assertzext/assertsext possible.
725 EVT FromVT(MVT::Other);
726 if (NumSignBits == RegSize)
727 isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1
728 else if (NumZeroBits >= RegSize-1)
729 isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1
730 else if (NumSignBits > RegSize-8)
731 isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8
732 else if (NumZeroBits >= RegSize-8)
733 isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8
734 else if (NumSignBits > RegSize-16)
735 isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16
736 else if (NumZeroBits >= RegSize-16)
737 isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
738 else if (NumSignBits > RegSize-32)
739 isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32
740 else if (NumZeroBits >= RegSize-32)
741 isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
745 // Add an assertion node.
746 assert(FromVT != MVT::Other);
747 Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
748 RegisterVT, P, DAG.getValueType(FromVT));
751 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
752 NumRegs, RegisterVT, ValueVT, V);
757 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
760 static ISD::NodeType getPreferredExtendForValue(const Value *V) {
761 // For the users of the source value being used for compare instruction, if
762 // the number of signed predicate is greater than unsigned predicate, we
763 // prefer to use SIGN_EXTEND.
765 // With this optimization, we would be able to reduce some redundant sign or
766 // zero extension instruction, and eventually more machine CSE opportunities
768 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
769 unsigned int NumOfSigned = 0, NumOfUnsigned = 0;
770 for (const User *U : V->users()) {
771 if (const CmpInst *CI = dyn_cast<CmpInst>(U)) {
772 NumOfSigned += CI->isSigned();
773 NumOfUnsigned += CI->isUnsigned();
776 if (NumOfSigned > NumOfUnsigned)
777 ExtendKind = ISD::SIGN_EXTEND;
782 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
783 /// specified value into the registers specified by this object. This uses
784 /// Chain/Flag as the input and updates them for the output Chain/Flag.
785 /// If the Flag pointer is NULL, no flag is used.
786 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, SDLoc dl,
787 SDValue &Chain, SDValue *Flag,
788 const Value *V) const {
789 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
790 ISD::NodeType ExtendKind = getPreferredExtendForValue(V);
792 // Get the list of the values's legal parts.
793 unsigned NumRegs = Regs.size();
794 SmallVector<SDValue, 8> Parts(NumRegs);
795 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
796 EVT ValueVT = ValueVTs[Value];
797 unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
798 MVT RegisterVT = RegVTs[Value];
800 if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
801 ExtendKind = ISD::ZERO_EXTEND;
803 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
804 &Parts[Part], NumParts, RegisterVT, V, ExtendKind);
808 // Copy the parts into the registers.
809 SmallVector<SDValue, 8> Chains(NumRegs);
810 for (unsigned i = 0; i != NumRegs; ++i) {
813 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
815 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
816 *Flag = Part.getValue(1);
819 Chains[i] = Part.getValue(0);
822 if (NumRegs == 1 || Flag)
823 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
824 // flagged to it. That is the CopyToReg nodes and the user are considered
825 // a single scheduling unit. If we create a TokenFactor and return it as
826 // chain, then the TokenFactor is both a predecessor (operand) of the
827 // user as well as a successor (the TF operands are flagged to the user).
828 // c1, f1 = CopyToReg
829 // c2, f2 = CopyToReg
830 // c3 = TokenFactor c1, c2
833 Chain = Chains[NumRegs-1];
835 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
838 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
839 /// operand list. This adds the code marker and includes the number of
840 /// values added into it.
841 void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
842 unsigned MatchingIdx,
844 std::vector<SDValue> &Ops) const {
845 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
847 unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
849 Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
850 else if (!Regs.empty() &&
851 TargetRegisterInfo::isVirtualRegister(Regs.front())) {
852 // Put the register class of the virtual registers in the flag word. That
853 // way, later passes can recompute register class constraints for inline
854 // assembly as well as normal instructions.
855 // Don't do this for tied operands that can use the regclass information
857 const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
858 const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
859 Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
862 SDValue Res = DAG.getTargetConstant(Flag, MVT::i32);
865 unsigned SP = TLI.getStackPointerRegisterToSaveRestore();
866 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
867 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
868 MVT RegisterVT = RegVTs[Value];
869 for (unsigned i = 0; i != NumRegs; ++i) {
870 assert(Reg < Regs.size() && "Mismatch in # registers expected");
871 unsigned TheReg = Regs[Reg++];
872 Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
874 if (TheReg == SP && Code == InlineAsm::Kind_Clobber) {
875 // If we clobbered the stack pointer, MFI should know about it.
876 assert(DAG.getMachineFunction().getFrameInfo()->
877 hasInlineAsmWithSPAdjust());
883 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa,
884 const TargetLibraryInfo *li) {
888 DL = DAG.getSubtarget().getDataLayout();
889 Context = DAG.getContext();
890 LPadToCallSiteMap.clear();
893 /// clear - Clear out the current SelectionDAG and the associated
894 /// state and prepare this SelectionDAGBuilder object to be used
895 /// for a new block. This doesn't clear out information about
896 /// additional blocks that are needed to complete switch lowering
897 /// or PHI node updating; that information is cleared out as it is
899 void SelectionDAGBuilder::clear() {
901 UnusedArgNodeMap.clear();
902 PendingLoads.clear();
903 PendingExports.clear();
906 SDNodeOrder = LowestSDNodeOrder;
909 /// clearDanglingDebugInfo - Clear the dangling debug information
910 /// map. This function is separated from the clear so that debug
911 /// information that is dangling in a basic block can be properly
912 /// resolved in a different basic block. This allows the
913 /// SelectionDAG to resolve dangling debug information attached
915 void SelectionDAGBuilder::clearDanglingDebugInfo() {
916 DanglingDebugInfoMap.clear();
919 /// getRoot - Return the current virtual root of the Selection DAG,
920 /// flushing any PendingLoad items. This must be done before emitting
921 /// a store or any other node that may need to be ordered after any
922 /// prior load instructions.
924 SDValue SelectionDAGBuilder::getRoot() {
925 if (PendingLoads.empty())
926 return DAG.getRoot();
928 if (PendingLoads.size() == 1) {
929 SDValue Root = PendingLoads[0];
931 PendingLoads.clear();
935 // Otherwise, we have to make a token factor node.
936 SDValue Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
938 PendingLoads.clear();
943 /// getControlRoot - Similar to getRoot, but instead of flushing all the
944 /// PendingLoad items, flush all the PendingExports items. It is necessary
945 /// to do this before emitting a terminator instruction.
947 SDValue SelectionDAGBuilder::getControlRoot() {
948 SDValue Root = DAG.getRoot();
950 if (PendingExports.empty())
953 // Turn all of the CopyToReg chains into one factored node.
954 if (Root.getOpcode() != ISD::EntryToken) {
955 unsigned i = 0, e = PendingExports.size();
956 for (; i != e; ++i) {
957 assert(PendingExports[i].getNode()->getNumOperands() > 1);
958 if (PendingExports[i].getNode()->getOperand(0) == Root)
959 break; // Don't add the root if we already indirectly depend on it.
963 PendingExports.push_back(Root);
966 Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
968 PendingExports.clear();
973 void SelectionDAGBuilder::visit(const Instruction &I) {
974 // Set up outgoing PHI node register values before emitting the terminator.
975 if (isa<TerminatorInst>(&I))
976 HandlePHINodesInSuccessorBlocks(I.getParent());
982 visit(I.getOpcode(), I);
984 if (!isa<TerminatorInst>(&I) && !HasTailCall)
985 CopyToExportRegsIfNeeded(&I);
990 void SelectionDAGBuilder::visitPHI(const PHINode &) {
991 llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
994 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
995 // Note: this doesn't use InstVisitor, because it has to work with
996 // ConstantExpr's in addition to instructions.
998 default: llvm_unreachable("Unknown instruction type encountered!");
999 // Build the switch statement using the Instruction.def file.
1000 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1001 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1002 #include "llvm/IR/Instruction.def"
1006 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1007 // generate the debug data structures now that we've seen its definition.
1008 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
1010 DanglingDebugInfo &DDI = DanglingDebugInfoMap[V];
1012 const DbgValueInst *DI = DDI.getDI();
1013 DebugLoc dl = DDI.getdl();
1014 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1015 MDNode *Variable = DI->getVariable();
1016 uint64_t Offset = DI->getOffset();
1017 // A dbg.value for an alloca is always indirect.
1018 bool IsIndirect = isa<AllocaInst>(V) || Offset != 0;
1020 if (Val.getNode()) {
1021 if (!EmitFuncArgumentDbgValue(V, Variable, Offset, IsIndirect, Val)) {
1022 SDV = DAG.getDbgValue(Variable, Val.getNode(),
1023 Val.getResNo(), IsIndirect,
1024 Offset, dl, DbgSDNodeOrder);
1025 DAG.AddDbgValue(SDV, Val.getNode(), false);
1028 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1029 DanglingDebugInfoMap[V] = DanglingDebugInfo();
1033 /// getValue - Return an SDValue for the given Value.
1034 SDValue SelectionDAGBuilder::getValue(const Value *V) {
1035 // If we already have an SDValue for this value, use it. It's important
1036 // to do this first, so that we don't create a CopyFromReg if we already
1037 // have a regular SDValue.
1038 SDValue &N = NodeMap[V];
1039 if (N.getNode()) return N;
1041 // If there's a virtual register allocated and initialized for this
1043 DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V);
1044 if (It != FuncInfo.ValueMap.end()) {
1045 unsigned InReg = It->second;
1046 RegsForValue RFV(*DAG.getContext(),
1047 *TM.getSubtargetImpl()->getTargetLowering(), InReg,
1049 SDValue Chain = DAG.getEntryNode();
1050 N = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1051 resolveDanglingDebugInfo(V, N);
1055 // Otherwise create a new SDValue and remember it.
1056 SDValue Val = getValueImpl(V);
1058 resolveDanglingDebugInfo(V, Val);
1062 /// getNonRegisterValue - Return an SDValue for the given Value, but
1063 /// don't look in FuncInfo.ValueMap for a virtual register.
1064 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1065 // If we already have an SDValue for this value, use it.
1066 SDValue &N = NodeMap[V];
1067 if (N.getNode()) return N;
1069 // Otherwise create a new SDValue and remember it.
1070 SDValue Val = getValueImpl(V);
1072 resolveDanglingDebugInfo(V, Val);
1076 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1077 /// Create an SDValue for the given value.
1078 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1079 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
1081 if (const Constant *C = dyn_cast<Constant>(V)) {
1082 EVT VT = TLI->getValueType(V->getType(), true);
1084 if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1085 return DAG.getConstant(*CI, VT);
1087 if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1088 return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1090 if (isa<ConstantPointerNull>(C)) {
1091 unsigned AS = V->getType()->getPointerAddressSpace();
1092 return DAG.getConstant(0, TLI->getPointerTy(AS));
1095 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1096 return DAG.getConstantFP(*CFP, VT);
1098 if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1099 return DAG.getUNDEF(VT);
1101 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1102 visit(CE->getOpcode(), *CE);
1103 SDValue N1 = NodeMap[V];
1104 assert(N1.getNode() && "visit didn't populate the NodeMap!");
1108 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1109 SmallVector<SDValue, 4> Constants;
1110 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
1112 SDNode *Val = getValue(*OI).getNode();
1113 // If the operand is an empty aggregate, there are no values.
1115 // Add each leaf value from the operand to the Constants list
1116 // to form a flattened list of all the values.
1117 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1118 Constants.push_back(SDValue(Val, i));
1121 return DAG.getMergeValues(Constants, getCurSDLoc());
1124 if (const ConstantDataSequential *CDS =
1125 dyn_cast<ConstantDataSequential>(C)) {
1126 SmallVector<SDValue, 4> Ops;
1127 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1128 SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1129 // Add each leaf value from the operand to the Constants list
1130 // to form a flattened list of all the values.
1131 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1132 Ops.push_back(SDValue(Val, i));
1135 if (isa<ArrayType>(CDS->getType()))
1136 return DAG.getMergeValues(Ops, getCurSDLoc());
1137 return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(),
1141 if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1142 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1143 "Unknown struct or array constant!");
1145 SmallVector<EVT, 4> ValueVTs;
1146 ComputeValueVTs(*TLI, C->getType(), ValueVTs);
1147 unsigned NumElts = ValueVTs.size();
1149 return SDValue(); // empty struct
1150 SmallVector<SDValue, 4> Constants(NumElts);
1151 for (unsigned i = 0; i != NumElts; ++i) {
1152 EVT EltVT = ValueVTs[i];
1153 if (isa<UndefValue>(C))
1154 Constants[i] = DAG.getUNDEF(EltVT);
1155 else if (EltVT.isFloatingPoint())
1156 Constants[i] = DAG.getConstantFP(0, EltVT);
1158 Constants[i] = DAG.getConstant(0, EltVT);
1161 return DAG.getMergeValues(Constants, getCurSDLoc());
1164 if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1165 return DAG.getBlockAddress(BA, VT);
1167 VectorType *VecTy = cast<VectorType>(V->getType());
1168 unsigned NumElements = VecTy->getNumElements();
1170 // Now that we know the number and type of the elements, get that number of
1171 // elements into the Ops array based on what kind of constant it is.
1172 SmallVector<SDValue, 16> Ops;
1173 if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1174 for (unsigned i = 0; i != NumElements; ++i)
1175 Ops.push_back(getValue(CV->getOperand(i)));
1177 assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
1178 EVT EltVT = TLI->getValueType(VecTy->getElementType());
1181 if (EltVT.isFloatingPoint())
1182 Op = DAG.getConstantFP(0, EltVT);
1184 Op = DAG.getConstant(0, EltVT);
1185 Ops.assign(NumElements, Op);
1188 // Create a BUILD_VECTOR node.
1189 return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(), VT, Ops);
1192 // If this is a static alloca, generate it as the frameindex instead of
1194 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1195 DenseMap<const AllocaInst*, int>::iterator SI =
1196 FuncInfo.StaticAllocaMap.find(AI);
1197 if (SI != FuncInfo.StaticAllocaMap.end())
1198 return DAG.getFrameIndex(SI->second, TLI->getPointerTy());
1201 // If this is an instruction which fast-isel has deferred, select it now.
1202 if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1203 unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
1204 RegsForValue RFV(*DAG.getContext(), *TLI, InReg, Inst->getType());
1205 SDValue Chain = DAG.getEntryNode();
1206 return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1209 llvm_unreachable("Can't get register for value!");
1212 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
1213 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
1214 SDValue Chain = getControlRoot();
1215 SmallVector<ISD::OutputArg, 8> Outs;
1216 SmallVector<SDValue, 8> OutVals;
1218 if (!FuncInfo.CanLowerReturn) {
1219 unsigned DemoteReg = FuncInfo.DemoteRegister;
1220 const Function *F = I.getParent()->getParent();
1222 // Emit a store of the return value through the virtual register.
1223 // Leave Outs empty so that LowerReturn won't try to load return
1224 // registers the usual way.
1225 SmallVector<EVT, 1> PtrValueVTs;
1226 ComputeValueVTs(*TLI, PointerType::getUnqual(F->getReturnType()),
1229 SDValue RetPtr = DAG.getRegister(DemoteReg, PtrValueVTs[0]);
1230 SDValue RetOp = getValue(I.getOperand(0));
1232 SmallVector<EVT, 4> ValueVTs;
1233 SmallVector<uint64_t, 4> Offsets;
1234 ComputeValueVTs(*TLI, I.getOperand(0)->getType(), ValueVTs, &Offsets);
1235 unsigned NumValues = ValueVTs.size();
1237 SmallVector<SDValue, 4> Chains(NumValues);
1238 for (unsigned i = 0; i != NumValues; ++i) {
1239 SDValue Add = DAG.getNode(ISD::ADD, getCurSDLoc(),
1240 RetPtr.getValueType(), RetPtr,
1241 DAG.getIntPtrConstant(Offsets[i]));
1243 DAG.getStore(Chain, getCurSDLoc(),
1244 SDValue(RetOp.getNode(), RetOp.getResNo() + i),
1245 // FIXME: better loc info would be nice.
1246 Add, MachinePointerInfo(), false, false, 0);
1249 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
1250 MVT::Other, Chains);
1251 } else if (I.getNumOperands() != 0) {
1252 SmallVector<EVT, 4> ValueVTs;
1253 ComputeValueVTs(*TLI, I.getOperand(0)->getType(), ValueVTs);
1254 unsigned NumValues = ValueVTs.size();
1256 SDValue RetOp = getValue(I.getOperand(0));
1257 for (unsigned j = 0, f = NumValues; j != f; ++j) {
1258 EVT VT = ValueVTs[j];
1260 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1262 const Function *F = I.getParent()->getParent();
1263 if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex,
1265 ExtendKind = ISD::SIGN_EXTEND;
1266 else if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex,
1268 ExtendKind = ISD::ZERO_EXTEND;
1270 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
1271 VT = TLI->getTypeForExtArgOrReturn(*DAG.getContext(), VT, ExtendKind);
1273 unsigned NumParts = TLI->getNumRegisters(*DAG.getContext(), VT);
1274 MVT PartVT = TLI->getRegisterType(*DAG.getContext(), VT);
1275 SmallVector<SDValue, 4> Parts(NumParts);
1276 getCopyToParts(DAG, getCurSDLoc(),
1277 SDValue(RetOp.getNode(), RetOp.getResNo() + j),
1278 &Parts[0], NumParts, PartVT, &I, ExtendKind);
1280 // 'inreg' on function refers to return value
1281 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1282 if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex,
1286 // Propagate extension type if any
1287 if (ExtendKind == ISD::SIGN_EXTEND)
1289 else if (ExtendKind == ISD::ZERO_EXTEND)
1292 for (unsigned i = 0; i < NumParts; ++i) {
1293 Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
1294 VT, /*isfixed=*/true, 0, 0));
1295 OutVals.push_back(Parts[i]);
1301 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
1302 CallingConv::ID CallConv =
1303 DAG.getMachineFunction().getFunction()->getCallingConv();
1304 Chain = TM.getSubtargetImpl()->getTargetLowering()->LowerReturn(
1305 Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
1307 // Verify that the target's LowerReturn behaved as expected.
1308 assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
1309 "LowerReturn didn't return a valid chain!");
1311 // Update the DAG with the new chain value resulting from return lowering.
1315 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
1316 /// created for it, emit nodes to copy the value into the virtual
1318 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
1320 if (V->getType()->isEmptyTy())
1323 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
1324 if (VMI != FuncInfo.ValueMap.end()) {
1325 assert(!V->use_empty() && "Unused value assigned virtual registers!");
1326 CopyValueToVirtualRegister(V, VMI->second);
1330 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
1331 /// the current basic block, add it to ValueMap now so that we'll get a
1333 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
1334 // No need to export constants.
1335 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1337 // Already exported?
1338 if (FuncInfo.isExportedInst(V)) return;
1340 unsigned Reg = FuncInfo.InitializeRegForValue(V);
1341 CopyValueToVirtualRegister(V, Reg);
1344 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
1345 const BasicBlock *FromBB) {
1346 // The operands of the setcc have to be in this block. We don't know
1347 // how to export them from some other block.
1348 if (const Instruction *VI = dyn_cast<Instruction>(V)) {
1349 // Can export from current BB.
1350 if (VI->getParent() == FromBB)
1353 // Is already exported, noop.
1354 return FuncInfo.isExportedInst(V);
1357 // If this is an argument, we can export it if the BB is the entry block or
1358 // if it is already exported.
1359 if (isa<Argument>(V)) {
1360 if (FromBB == &FromBB->getParent()->getEntryBlock())
1363 // Otherwise, can only export this if it is already exported.
1364 return FuncInfo.isExportedInst(V);
1367 // Otherwise, constants can always be exported.
1371 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
1372 uint32_t SelectionDAGBuilder::getEdgeWeight(const MachineBasicBlock *Src,
1373 const MachineBasicBlock *Dst) const {
1374 BranchProbabilityInfo *BPI = FuncInfo.BPI;
1377 const BasicBlock *SrcBB = Src->getBasicBlock();
1378 const BasicBlock *DstBB = Dst->getBasicBlock();
1379 return BPI->getEdgeWeight(SrcBB, DstBB);
1382 void SelectionDAGBuilder::
1383 addSuccessorWithWeight(MachineBasicBlock *Src, MachineBasicBlock *Dst,
1384 uint32_t Weight /* = 0 */) {
1386 Weight = getEdgeWeight(Src, Dst);
1387 Src->addSuccessor(Dst, Weight);
1391 static bool InBlock(const Value *V, const BasicBlock *BB) {
1392 if (const Instruction *I = dyn_cast<Instruction>(V))
1393 return I->getParent() == BB;
1397 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1398 /// This function emits a branch and is used at the leaves of an OR or an
1399 /// AND operator tree.
1402 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
1403 MachineBasicBlock *TBB,
1404 MachineBasicBlock *FBB,
1405 MachineBasicBlock *CurBB,
1406 MachineBasicBlock *SwitchBB,
1409 const BasicBlock *BB = CurBB->getBasicBlock();
1411 // If the leaf of the tree is a comparison, merge the condition into
1413 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1414 // The operands of the cmp have to be in this block. We don't know
1415 // how to export them from some other block. If this is the first block
1416 // of the sequence, no exporting is needed.
1417 if (CurBB == SwitchBB ||
1418 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1419 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1420 ISD::CondCode Condition;
1421 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1422 Condition = getICmpCondCode(IC->getPredicate());
1423 } else if (const FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
1424 Condition = getFCmpCondCode(FC->getPredicate());
1425 if (TM.Options.NoNaNsFPMath)
1426 Condition = getFCmpCodeWithoutNaN(Condition);
1428 Condition = ISD::SETEQ; // silence warning.
1429 llvm_unreachable("Unknown compare instruction");
1432 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
1433 TBB, FBB, CurBB, TWeight, FWeight);
1434 SwitchCases.push_back(CB);
1439 // Create a CaseBlock record representing this branch.
1440 CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(*DAG.getContext()),
1441 nullptr, TBB, FBB, CurBB, TWeight, FWeight);
1442 SwitchCases.push_back(CB);
1445 /// Scale down both weights to fit into uint32_t.
1446 static void ScaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
1447 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse;
1448 uint32_t Scale = (NewMax / UINT32_MAX) + 1;
1449 NewTrue = NewTrue / Scale;
1450 NewFalse = NewFalse / Scale;
1453 /// FindMergedConditions - If Cond is an expression like
1454 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
1455 MachineBasicBlock *TBB,
1456 MachineBasicBlock *FBB,
1457 MachineBasicBlock *CurBB,
1458 MachineBasicBlock *SwitchBB,
1459 unsigned Opc, uint32_t TWeight,
1461 // If this node is not part of the or/and tree, emit it as a branch.
1462 const Instruction *BOp = dyn_cast<Instruction>(Cond);
1463 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1464 (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
1465 BOp->getParent() != CurBB->getBasicBlock() ||
1466 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1467 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1468 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
1473 // Create TmpBB after CurBB.
1474 MachineFunction::iterator BBI = CurBB;
1475 MachineFunction &MF = DAG.getMachineFunction();
1476 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1477 CurBB->getParent()->insert(++BBI, TmpBB);
1479 if (Opc == Instruction::Or) {
1480 // Codegen X | Y as:
1489 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
1490 // The requirement is that
1491 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
1492 // = TrueProb for orignal BB.
1493 // Assuming the orignal weights are A and B, one choice is to set BB1's
1494 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice
1496 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
1497 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
1498 // TmpBB, but the math is more complicated.
1500 uint64_t NewTrueWeight = TWeight;
1501 uint64_t NewFalseWeight = (uint64_t)TWeight + 2 * (uint64_t)FWeight;
1502 ScaleWeights(NewTrueWeight, NewFalseWeight);
1503 // Emit the LHS condition.
1504 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc,
1505 NewTrueWeight, NewFalseWeight);
1507 NewTrueWeight = TWeight;
1508 NewFalseWeight = 2 * (uint64_t)FWeight;
1509 ScaleWeights(NewTrueWeight, NewFalseWeight);
1510 // Emit the RHS condition into TmpBB.
1511 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1512 NewTrueWeight, NewFalseWeight);
1514 assert(Opc == Instruction::And && "Unknown merge op!");
1515 // Codegen X & Y as:
1523 // This requires creation of TmpBB after CurBB.
1525 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
1526 // The requirement is that
1527 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
1528 // = FalseProb for orignal BB.
1529 // Assuming the orignal weights are A and B, one choice is to set BB1's
1530 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice
1532 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB.
1534 uint64_t NewTrueWeight = 2 * (uint64_t)TWeight + (uint64_t)FWeight;
1535 uint64_t NewFalseWeight = FWeight;
1536 ScaleWeights(NewTrueWeight, NewFalseWeight);
1537 // Emit the LHS condition.
1538 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc,
1539 NewTrueWeight, NewFalseWeight);
1541 NewTrueWeight = 2 * (uint64_t)TWeight;
1542 NewFalseWeight = FWeight;
1543 ScaleWeights(NewTrueWeight, NewFalseWeight);
1544 // Emit the RHS condition into TmpBB.
1545 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1546 NewTrueWeight, NewFalseWeight);
1550 /// If the set of cases should be emitted as a series of branches, return true.
1551 /// If we should emit this as a bunch of and/or'd together conditions, return
1554 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
1555 if (Cases.size() != 2) return true;
1557 // If this is two comparisons of the same values or'd or and'd together, they
1558 // will get folded into a single comparison, so don't emit two blocks.
1559 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1560 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1561 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1562 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1566 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
1567 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
1568 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
1569 Cases[0].CC == Cases[1].CC &&
1570 isa<Constant>(Cases[0].CmpRHS) &&
1571 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
1572 if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
1574 if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
1581 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
1582 MachineBasicBlock *BrMBB = FuncInfo.MBB;
1584 // Update machine-CFG edges.
1585 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1587 // Figure out which block is immediately after the current one.
1588 MachineBasicBlock *NextBlock = nullptr;
1589 MachineFunction::iterator BBI = BrMBB;
1590 if (++BBI != FuncInfo.MF->end())
1593 if (I.isUnconditional()) {
1594 // Update machine-CFG edges.
1595 BrMBB->addSuccessor(Succ0MBB);
1597 // If this is not a fall-through branch or optimizations are switched off,
1599 if (Succ0MBB != NextBlock || TM.getOptLevel() == CodeGenOpt::None)
1600 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
1601 MVT::Other, getControlRoot(),
1602 DAG.getBasicBlock(Succ0MBB)));
1607 // If this condition is one of the special cases we handle, do special stuff
1609 const Value *CondVal = I.getCondition();
1610 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
1612 // If this is a series of conditions that are or'd or and'd together, emit
1613 // this as a sequence of branches instead of setcc's with and/or operations.
1614 // As long as jumps are not expensive, this should improve performance.
1615 // For example, instead of something like:
1628 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
1629 if (!TM.getSubtargetImpl()->getTargetLowering()->isJumpExpensive() &&
1630 BOp->hasOneUse() && (BOp->getOpcode() == Instruction::And ||
1631 BOp->getOpcode() == Instruction::Or)) {
1632 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
1633 BOp->getOpcode(), getEdgeWeight(BrMBB, Succ0MBB),
1634 getEdgeWeight(BrMBB, Succ1MBB));
1635 // If the compares in later blocks need to use values not currently
1636 // exported from this block, export them now. This block should always
1637 // be the first entry.
1638 assert(SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
1640 // Allow some cases to be rejected.
1641 if (ShouldEmitAsBranches(SwitchCases)) {
1642 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
1643 ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
1644 ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
1647 // Emit the branch for this block.
1648 visitSwitchCase(SwitchCases[0], BrMBB);
1649 SwitchCases.erase(SwitchCases.begin());
1653 // Okay, we decided not to do this, remove any inserted MBB's and clear
1655 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
1656 FuncInfo.MF->erase(SwitchCases[i].ThisBB);
1658 SwitchCases.clear();
1662 // Create a CaseBlock record representing this branch.
1663 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
1664 nullptr, Succ0MBB, Succ1MBB, BrMBB);
1666 // Use visitSwitchCase to actually insert the fast branch sequence for this
1668 visitSwitchCase(CB, BrMBB);
1671 /// visitSwitchCase - Emits the necessary code to represent a single node in
1672 /// the binary search tree resulting from lowering a switch instruction.
1673 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
1674 MachineBasicBlock *SwitchBB) {
1676 SDValue CondLHS = getValue(CB.CmpLHS);
1677 SDLoc dl = getCurSDLoc();
1679 // Build the setcc now.
1681 // Fold "(X == true)" to X and "(X == false)" to !X to
1682 // handle common cases produced by branch lowering.
1683 if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
1684 CB.CC == ISD::SETEQ)
1686 else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
1687 CB.CC == ISD::SETEQ) {
1688 SDValue True = DAG.getConstant(1, CondLHS.getValueType());
1689 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
1691 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
1693 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
1695 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
1696 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
1698 SDValue CmpOp = getValue(CB.CmpMHS);
1699 EVT VT = CmpOp.getValueType();
1701 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
1702 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, VT),
1705 SDValue SUB = DAG.getNode(ISD::SUB, dl,
1706 VT, CmpOp, DAG.getConstant(Low, VT));
1707 Cond = DAG.getSetCC(dl, MVT::i1, SUB,
1708 DAG.getConstant(High-Low, VT), ISD::SETULE);
1712 // Update successor info
1713 addSuccessorWithWeight(SwitchBB, CB.TrueBB, CB.TrueWeight);
1714 // TrueBB and FalseBB are always different unless the incoming IR is
1715 // degenerate. This only happens when running llc on weird IR.
1716 if (CB.TrueBB != CB.FalseBB)
1717 addSuccessorWithWeight(SwitchBB, CB.FalseBB, CB.FalseWeight);
1719 // Set NextBlock to be the MBB immediately after the current one, if any.
1720 // This is used to avoid emitting unnecessary branches to the next block.
1721 MachineBasicBlock *NextBlock = nullptr;
1722 MachineFunction::iterator BBI = SwitchBB;
1723 if (++BBI != FuncInfo.MF->end())
1726 // If the lhs block is the next block, invert the condition so that we can
1727 // fall through to the lhs instead of the rhs block.
1728 if (CB.TrueBB == NextBlock) {
1729 std::swap(CB.TrueBB, CB.FalseBB);
1730 SDValue True = DAG.getConstant(1, Cond.getValueType());
1731 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
1734 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1735 MVT::Other, getControlRoot(), Cond,
1736 DAG.getBasicBlock(CB.TrueBB));
1738 // Insert the false branch. Do this even if it's a fall through branch,
1739 // this makes it easier to do DAG optimizations which require inverting
1740 // the branch condition.
1741 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
1742 DAG.getBasicBlock(CB.FalseBB));
1744 DAG.setRoot(BrCond);
1747 /// visitJumpTable - Emit JumpTable node in the current MBB
1748 void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
1749 // Emit the code for the jump table
1750 assert(JT.Reg != -1U && "Should lower JT Header first!");
1751 EVT PTy = TM.getSubtargetImpl()->getTargetLowering()->getPointerTy();
1752 SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
1754 SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
1755 SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(),
1756 MVT::Other, Index.getValue(1),
1758 DAG.setRoot(BrJumpTable);
1761 /// visitJumpTableHeader - This function emits necessary code to produce index
1762 /// in the JumpTable from switch case.
1763 void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
1764 JumpTableHeader &JTH,
1765 MachineBasicBlock *SwitchBB) {
1766 // Subtract the lowest switch case value from the value being switched on and
1767 // conditional branch to default mbb if the result is greater than the
1768 // difference between smallest and largest cases.
1769 SDValue SwitchOp = getValue(JTH.SValue);
1770 EVT VT = SwitchOp.getValueType();
1771 SDValue Sub = DAG.getNode(ISD::SUB, getCurSDLoc(), VT, SwitchOp,
1772 DAG.getConstant(JTH.First, VT));
1774 // The SDNode we just created, which holds the value being switched on minus
1775 // the smallest case value, needs to be copied to a virtual register so it
1776 // can be used as an index into the jump table in a subsequent basic block.
1777 // This value may be smaller or larger than the target's pointer type, and
1778 // therefore require extension or truncating.
1779 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
1780 SwitchOp = DAG.getZExtOrTrunc(Sub, getCurSDLoc(), TLI->getPointerTy());
1782 unsigned JumpTableReg = FuncInfo.CreateReg(TLI->getPointerTy());
1783 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurSDLoc(),
1784 JumpTableReg, SwitchOp);
1785 JT.Reg = JumpTableReg;
1787 // Emit the range check for the jump table, and branch to the default block
1788 // for the switch statement if the value being switched on exceeds the largest
1789 // case in the switch.
1790 SDValue CMP = DAG.getSetCC(getCurSDLoc(),
1791 TLI->getSetCCResultType(*DAG.getContext(),
1792 Sub.getValueType()),
1794 DAG.getConstant(JTH.Last - JTH.First,VT),
1797 // Set NextBlock to be the MBB immediately after the current one, if any.
1798 // This is used to avoid emitting unnecessary branches to the next block.
1799 MachineBasicBlock *NextBlock = nullptr;
1800 MachineFunction::iterator BBI = SwitchBB;
1802 if (++BBI != FuncInfo.MF->end())
1805 SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurSDLoc(),
1806 MVT::Other, CopyTo, CMP,
1807 DAG.getBasicBlock(JT.Default));
1809 if (JT.MBB != NextBlock)
1810 BrCond = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, BrCond,
1811 DAG.getBasicBlock(JT.MBB));
1813 DAG.setRoot(BrCond);
1816 /// Codegen a new tail for a stack protector check ParentMBB which has had its
1817 /// tail spliced into a stack protector check success bb.
1819 /// For a high level explanation of how this fits into the stack protector
1820 /// generation see the comment on the declaration of class
1821 /// StackProtectorDescriptor.
1822 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
1823 MachineBasicBlock *ParentBB) {
1825 // First create the loads to the guard/stack slot for the comparison.
1826 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
1827 EVT PtrTy = TLI->getPointerTy();
1829 MachineFrameInfo *MFI = ParentBB->getParent()->getFrameInfo();
1830 int FI = MFI->getStackProtectorIndex();
1832 const Value *IRGuard = SPD.getGuard();
1833 SDValue GuardPtr = getValue(IRGuard);
1834 SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
1837 TLI->getDataLayout()->getPrefTypeAlignment(IRGuard->getType());
1841 // If GuardReg is set and useLoadStackGuardNode returns true, retrieve the
1842 // guard value from the virtual register holding the value. Otherwise, emit a
1843 // volatile load to retrieve the stack guard value.
1844 unsigned GuardReg = SPD.getGuardReg();
1846 if (GuardReg && TLI->useLoadStackGuardNode())
1847 Guard = DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), GuardReg,
1850 Guard = DAG.getLoad(PtrTy, getCurSDLoc(), DAG.getEntryNode(),
1851 GuardPtr, MachinePointerInfo(IRGuard, 0),
1852 true, false, false, Align);
1854 SDValue StackSlot = DAG.getLoad(PtrTy, getCurSDLoc(), DAG.getEntryNode(),
1856 MachinePointerInfo::getFixedStack(FI),
1857 true, false, false, Align);
1859 // Perform the comparison via a subtract/getsetcc.
1860 EVT VT = Guard.getValueType();
1861 SDValue Sub = DAG.getNode(ISD::SUB, getCurSDLoc(), VT, Guard, StackSlot);
1863 SDValue Cmp = DAG.getSetCC(getCurSDLoc(),
1864 TLI->getSetCCResultType(*DAG.getContext(),
1865 Sub.getValueType()),
1866 Sub, DAG.getConstant(0, VT),
1869 // If the sub is not 0, then we know the guard/stackslot do not equal, so
1870 // branch to failure MBB.
1871 SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurSDLoc(),
1872 MVT::Other, StackSlot.getOperand(0),
1873 Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
1874 // Otherwise branch to success MBB.
1875 SDValue Br = DAG.getNode(ISD::BR, getCurSDLoc(),
1877 DAG.getBasicBlock(SPD.getSuccessMBB()));
1882 /// Codegen the failure basic block for a stack protector check.
1884 /// A failure stack protector machine basic block consists simply of a call to
1885 /// __stack_chk_fail().
1887 /// For a high level explanation of how this fits into the stack protector
1888 /// generation see the comment on the declaration of class
1889 /// StackProtectorDescriptor.
1891 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
1892 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
1893 SDValue Chain = TLI->makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL,
1894 MVT::isVoid, nullptr, 0, false,
1895 getCurSDLoc(), false, false).second;
1899 /// visitBitTestHeader - This function emits necessary code to produce value
1900 /// suitable for "bit tests"
1901 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
1902 MachineBasicBlock *SwitchBB) {
1903 // Subtract the minimum value
1904 SDValue SwitchOp = getValue(B.SValue);
1905 EVT VT = SwitchOp.getValueType();
1906 SDValue Sub = DAG.getNode(ISD::SUB, getCurSDLoc(), VT, SwitchOp,
1907 DAG.getConstant(B.First, VT));
1910 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
1911 SDValue RangeCmp = DAG.getSetCC(getCurSDLoc(),
1912 TLI->getSetCCResultType(*DAG.getContext(),
1913 Sub.getValueType()),
1914 Sub, DAG.getConstant(B.Range, VT),
1917 // Determine the type of the test operands.
1918 bool UsePtrType = false;
1919 if (!TLI->isTypeLegal(VT))
1922 for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
1923 if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
1924 // Switch table case range are encoded into series of masks.
1925 // Just use pointer type, it's guaranteed to fit.
1931 VT = TLI->getPointerTy();
1932 Sub = DAG.getZExtOrTrunc(Sub, getCurSDLoc(), VT);
1935 B.RegVT = VT.getSimpleVT();
1936 B.Reg = FuncInfo.CreateReg(B.RegVT);
1937 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurSDLoc(),
1940 // Set NextBlock to be the MBB immediately after the current one, if any.
1941 // This is used to avoid emitting unnecessary branches to the next block.
1942 MachineBasicBlock *NextBlock = nullptr;
1943 MachineFunction::iterator BBI = SwitchBB;
1944 if (++BBI != FuncInfo.MF->end())
1947 MachineBasicBlock* MBB = B.Cases[0].ThisBB;
1949 addSuccessorWithWeight(SwitchBB, B.Default);
1950 addSuccessorWithWeight(SwitchBB, MBB);
1952 SDValue BrRange = DAG.getNode(ISD::BRCOND, getCurSDLoc(),
1953 MVT::Other, CopyTo, RangeCmp,
1954 DAG.getBasicBlock(B.Default));
1956 if (MBB != NextBlock)
1957 BrRange = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, CopyTo,
1958 DAG.getBasicBlock(MBB));
1960 DAG.setRoot(BrRange);
1963 /// visitBitTestCase - this function produces one "bit test"
1964 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
1965 MachineBasicBlock* NextMBB,
1966 uint32_t BranchWeightToNext,
1969 MachineBasicBlock *SwitchBB) {
1971 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
1974 unsigned PopCount = CountPopulation_64(B.Mask);
1975 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
1976 if (PopCount == 1) {
1977 // Testing for a single bit; just compare the shift count with what it
1978 // would need to be to shift a 1 bit in that position.
1979 Cmp = DAG.getSetCC(getCurSDLoc(),
1980 TLI->getSetCCResultType(*DAG.getContext(), VT),
1982 DAG.getConstant(countTrailingZeros(B.Mask), VT),
1984 } else if (PopCount == BB.Range) {
1985 // There is only one zero bit in the range, test for it directly.
1986 Cmp = DAG.getSetCC(getCurSDLoc(),
1987 TLI->getSetCCResultType(*DAG.getContext(), VT),
1989 DAG.getConstant(CountTrailingOnes_64(B.Mask), VT),
1992 // Make desired shift
1993 SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurSDLoc(), VT,
1994 DAG.getConstant(1, VT), ShiftOp);
1996 // Emit bit tests and jumps
1997 SDValue AndOp = DAG.getNode(ISD::AND, getCurSDLoc(),
1998 VT, SwitchVal, DAG.getConstant(B.Mask, VT));
1999 Cmp = DAG.getSetCC(getCurSDLoc(),
2000 TLI->getSetCCResultType(*DAG.getContext(), VT),
2001 AndOp, DAG.getConstant(0, VT),
2005 // The branch weight from SwitchBB to B.TargetBB is B.ExtraWeight.
2006 addSuccessorWithWeight(SwitchBB, B.TargetBB, B.ExtraWeight);
2007 // The branch weight from SwitchBB to NextMBB is BranchWeightToNext.
2008 addSuccessorWithWeight(SwitchBB, NextMBB, BranchWeightToNext);
2010 SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurSDLoc(),
2011 MVT::Other, getControlRoot(),
2012 Cmp, DAG.getBasicBlock(B.TargetBB));
2014 // Set NextBlock to be the MBB immediately after the current one, if any.
2015 // This is used to avoid emitting unnecessary branches to the next block.
2016 MachineBasicBlock *NextBlock = nullptr;
2017 MachineFunction::iterator BBI = SwitchBB;
2018 if (++BBI != FuncInfo.MF->end())
2021 if (NextMBB != NextBlock)
2022 BrAnd = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, BrAnd,
2023 DAG.getBasicBlock(NextMBB));
2028 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
2029 MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
2031 // Retrieve successors.
2032 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
2033 MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
2035 const Value *Callee(I.getCalledValue());
2036 const Function *Fn = dyn_cast<Function>(Callee);
2037 if (isa<InlineAsm>(Callee))
2039 else if (Fn && Fn->isIntrinsic()) {
2040 assert(Fn->getIntrinsicID() == Intrinsic::donothing);
2041 // Ignore invokes to @llvm.donothing: jump directly to the next BB.
2043 LowerCallTo(&I, getValue(Callee), false, LandingPad);
2045 // If the value of the invoke is used outside of its defining block, make it
2046 // available as a virtual register.
2047 CopyToExportRegsIfNeeded(&I);
2049 // Update successor info
2050 addSuccessorWithWeight(InvokeMBB, Return);
2051 addSuccessorWithWeight(InvokeMBB, LandingPad);
2053 // Drop into normal successor.
2054 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2055 MVT::Other, getControlRoot(),
2056 DAG.getBasicBlock(Return)));
2059 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
2060 llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
2063 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
2064 assert(FuncInfo.MBB->isLandingPad() &&
2065 "Call to landingpad not in landing pad!");
2067 MachineBasicBlock *MBB = FuncInfo.MBB;
2068 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
2069 AddLandingPadInfo(LP, MMI, MBB);
2071 // If there aren't registers to copy the values into (e.g., during SjLj
2072 // exceptions), then don't bother to create these DAG nodes.
2073 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
2074 if (TLI->getExceptionPointerRegister() == 0 &&
2075 TLI->getExceptionSelectorRegister() == 0)
2078 SmallVector<EVT, 2> ValueVTs;
2079 ComputeValueVTs(*TLI, LP.getType(), ValueVTs);
2080 assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
2082 // Get the two live-in registers as SDValues. The physregs have already been
2083 // copied into virtual registers.
2085 Ops[0] = DAG.getZExtOrTrunc(
2086 DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
2087 FuncInfo.ExceptionPointerVirtReg, TLI->getPointerTy()),
2088 getCurSDLoc(), ValueVTs[0]);
2089 Ops[1] = DAG.getZExtOrTrunc(
2090 DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
2091 FuncInfo.ExceptionSelectorVirtReg, TLI->getPointerTy()),
2092 getCurSDLoc(), ValueVTs[1]);
2095 SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
2096 DAG.getVTList(ValueVTs), Ops);
2100 /// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
2101 /// small case ranges).
2102 bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
2103 CaseRecVector& WorkList,
2105 MachineBasicBlock *Default,
2106 MachineBasicBlock *SwitchBB) {
2107 // Size is the number of Cases represented by this range.
2108 size_t Size = CR.Range.second - CR.Range.first;
2112 // Get the MachineFunction which holds the current MBB. This is used when
2113 // inserting any additional MBBs necessary to represent the switch.
2114 MachineFunction *CurMF = FuncInfo.MF;
2116 // Figure out which block is immediately after the current one.
2117 MachineBasicBlock *NextBlock = nullptr;
2118 MachineFunction::iterator BBI = CR.CaseBB;
2120 if (++BBI != FuncInfo.MF->end())
2123 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2124 // If any two of the cases has the same destination, and if one value
2125 // is the same as the other, but has one bit unset that the other has set,
2126 // use bit manipulation to do two compares at once. For example:
2127 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
2128 // TODO: This could be extended to merge any 2 cases in switches with 3 cases.
2129 // TODO: Handle cases where CR.CaseBB != SwitchBB.
2130 if (Size == 2 && CR.CaseBB == SwitchBB) {
2131 Case &Small = *CR.Range.first;
2132 Case &Big = *(CR.Range.second-1);
2134 if (Small.Low == Small.High && Big.Low == Big.High && Small.BB == Big.BB) {
2135 const APInt& SmallValue = cast<ConstantInt>(Small.Low)->getValue();
2136 const APInt& BigValue = cast<ConstantInt>(Big.Low)->getValue();
2138 // Check that there is only one bit different.
2139 if (BigValue.countPopulation() == SmallValue.countPopulation() + 1 &&
2140 (SmallValue | BigValue) == BigValue) {
2141 // Isolate the common bit.
2142 APInt CommonBit = BigValue & ~SmallValue;
2143 assert((SmallValue | CommonBit) == BigValue &&
2144 CommonBit.countPopulation() == 1 && "Not a common bit?");
2146 SDValue CondLHS = getValue(SV);
2147 EVT VT = CondLHS.getValueType();
2148 SDLoc DL = getCurSDLoc();
2150 SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
2151 DAG.getConstant(CommonBit, VT));
2152 SDValue Cond = DAG.getSetCC(DL, MVT::i1,
2153 Or, DAG.getConstant(BigValue, VT),
2156 // Update successor info.
2157 // Both Small and Big will jump to Small.BB, so we sum up the weights.
2158 addSuccessorWithWeight(SwitchBB, Small.BB,
2159 Small.ExtraWeight + Big.ExtraWeight);
2160 addSuccessorWithWeight(SwitchBB, Default,
2161 // The default destination is the first successor in IR.
2162 BPI ? BPI->getEdgeWeight(SwitchBB->getBasicBlock(), (unsigned)0) : 0);
2164 // Insert the true branch.
2165 SDValue BrCond = DAG.getNode(ISD::BRCOND, DL, MVT::Other,
2166 getControlRoot(), Cond,
2167 DAG.getBasicBlock(Small.BB));
2169 // Insert the false branch.
2170 BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
2171 DAG.getBasicBlock(Default));
2173 DAG.setRoot(BrCond);
2179 // Order cases by weight so the most likely case will be checked first.
2180 uint32_t UnhandledWeights = 0;
2182 for (CaseItr I = CR.Range.first, IE = CR.Range.second; I != IE; ++I) {
2183 uint32_t IWeight = I->ExtraWeight;
2184 UnhandledWeights += IWeight;
2185 for (CaseItr J = CR.Range.first; J < I; ++J) {
2186 uint32_t JWeight = J->ExtraWeight;
2187 if (IWeight > JWeight)
2192 // Rearrange the case blocks so that the last one falls through if possible.
2193 Case &BackCase = *(CR.Range.second-1);
2195 NextBlock && Default != NextBlock && BackCase.BB != NextBlock) {
2196 // The last case block won't fall through into 'NextBlock' if we emit the
2197 // branches in this order. See if rearranging a case value would help.
2198 // We start at the bottom as it's the case with the least weight.
2199 for (Case *I = &*(CR.Range.second-2), *E = &*CR.Range.first-1; I != E; --I)
2200 if (I->BB == NextBlock) {
2201 std::swap(*I, BackCase);
2206 // Create a CaseBlock record representing a conditional branch to
2207 // the Case's target mbb if the value being switched on SV is equal
2209 MachineBasicBlock *CurBlock = CR.CaseBB;
2210 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
2211 MachineBasicBlock *FallThrough;
2213 FallThrough = CurMF->CreateMachineBasicBlock(CurBlock->getBasicBlock());
2214 CurMF->insert(BBI, FallThrough);
2216 // Put SV in a virtual register to make it available from the new blocks.
2217 ExportFromCurrentBlock(SV);
2219 // If the last case doesn't match, go to the default block.
2220 FallThrough = Default;
2223 const Value *RHS, *LHS, *MHS;
2225 if (I->High == I->Low) {
2226 // This is just small small case range :) containing exactly 1 case
2228 LHS = SV; RHS = I->High; MHS = nullptr;
2231 LHS = I->Low; MHS = SV; RHS = I->High;
2234 // The false weight should be sum of all un-handled cases.
2235 UnhandledWeights -= I->ExtraWeight;
2236 CaseBlock CB(CC, LHS, RHS, MHS, /* truebb */ I->BB, /* falsebb */ FallThrough,
2238 /* trueweight */ I->ExtraWeight,
2239 /* falseweight */ UnhandledWeights);
2241 // If emitting the first comparison, just call visitSwitchCase to emit the
2242 // code into the current block. Otherwise, push the CaseBlock onto the
2243 // vector to be later processed by SDISel, and insert the node's MBB
2244 // before the next MBB.
2245 if (CurBlock == SwitchBB)
2246 visitSwitchCase(CB, SwitchBB);
2248 SwitchCases.push_back(CB);
2250 CurBlock = FallThrough;
2256 static inline bool areJTsAllowed(const TargetLowering &TLI) {
2257 return TLI.supportJumpTables() &&
2258 (TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
2259 TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
2262 static APInt ComputeRange(const APInt &First, const APInt &Last) {
2263 uint32_t BitWidth = std::max(Last.getBitWidth(), First.getBitWidth()) + 1;
2264 APInt LastExt = Last.sext(BitWidth), FirstExt = First.sext(BitWidth);
2265 return (LastExt - FirstExt + 1ULL);
2268 /// handleJTSwitchCase - Emit jumptable for current switch case range
2269 bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec &CR,
2270 CaseRecVector &WorkList,
2272 MachineBasicBlock *Default,
2273 MachineBasicBlock *SwitchBB) {
2274 Case& FrontCase = *CR.Range.first;
2275 Case& BackCase = *(CR.Range.second-1);
2277 const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue();
2278 const APInt &Last = cast<ConstantInt>(BackCase.High)->getValue();
2280 APInt TSize(First.getBitWidth(), 0);
2281 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I)
2284 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
2285 if (!areJTsAllowed(*TLI) || TSize.ult(TLI->getMinimumJumpTableEntries()))
2288 APInt Range = ComputeRange(First, Last);
2289 // The density is TSize / Range. Require at least 40%.
2290 // It should not be possible for IntTSize to saturate for sane code, but make
2291 // sure we handle Range saturation correctly.
2292 uint64_t IntRange = Range.getLimitedValue(UINT64_MAX/10);
2293 uint64_t IntTSize = TSize.getLimitedValue(UINT64_MAX/10);
2294 if (IntTSize * 10 < IntRange * 4)
2297 DEBUG(dbgs() << "Lowering jump table\n"
2298 << "First entry: " << First << ". Last entry: " << Last << '\n'
2299 << "Range: " << Range << ". Size: " << TSize << ".\n\n");
2301 // Get the MachineFunction which holds the current MBB. This is used when
2302 // inserting any additional MBBs necessary to represent the switch.
2303 MachineFunction *CurMF = FuncInfo.MF;
2305 // Figure out which block is immediately after the current one.
2306 MachineFunction::iterator BBI = CR.CaseBB;
2309 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
2311 // Create a new basic block to hold the code for loading the address
2312 // of the jump table, and jumping to it. Update successor information;
2313 // we will either branch to the default case for the switch, or the jump
2315 MachineBasicBlock *JumpTableBB = CurMF->CreateMachineBasicBlock(LLVMBB);
2316 CurMF->insert(BBI, JumpTableBB);
2318 addSuccessorWithWeight(CR.CaseBB, Default);
2319 addSuccessorWithWeight(CR.CaseBB, JumpTableBB);
2321 // Build a vector of destination BBs, corresponding to each target
2322 // of the jump table. If the value of the jump table slot corresponds to
2323 // a case statement, push the case's BB onto the vector, otherwise, push
2325 std::vector<MachineBasicBlock*> DestBBs;
2327 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++TEI) {
2328 const APInt &Low = cast<ConstantInt>(I->Low)->getValue();
2329 const APInt &High = cast<ConstantInt>(I->High)->getValue();
2331 if (Low.sle(TEI) && TEI.sle(High)) {
2332 DestBBs.push_back(I->BB);
2336 DestBBs.push_back(Default);
2340 // Calculate weight for each unique destination in CR.
2341 DenseMap<MachineBasicBlock*, uint32_t> DestWeights;
2343 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
2344 DenseMap<MachineBasicBlock*, uint32_t>::iterator Itr =
2345 DestWeights.find(I->BB);
2346 if (Itr != DestWeights.end())
2347 Itr->second += I->ExtraWeight;
2349 DestWeights[I->BB] = I->ExtraWeight;
2352 // Update successor info. Add one edge to each unique successor.
2353 BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs());
2354 for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
2355 E = DestBBs.end(); I != E; ++I) {
2356 if (!SuccsHandled[(*I)->getNumber()]) {
2357 SuccsHandled[(*I)->getNumber()] = true;
2358 DenseMap<MachineBasicBlock*, uint32_t>::iterator Itr =
2359 DestWeights.find(*I);
2360 addSuccessorWithWeight(JumpTableBB, *I,
2361 Itr != DestWeights.end() ? Itr->second : 0);
2365 // Create a jump table index for this jump table.
2366 unsigned JTEncoding = TLI->getJumpTableEncoding();
2367 unsigned JTI = CurMF->getOrCreateJumpTableInfo(JTEncoding)
2368 ->createJumpTableIndex(DestBBs);
2370 // Set the jump table information so that we can codegen it as a second
2371 // MachineBasicBlock
2372 JumpTable JT(-1U, JTI, JumpTableBB, Default);
2373 JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == SwitchBB));
2374 if (CR.CaseBB == SwitchBB)
2375 visitJumpTableHeader(JT, JTH, SwitchBB);
2377 JTCases.push_back(JumpTableBlock(JTH, JT));
2381 /// handleBTSplitSwitchCase - emit comparison and split binary search tree into
2383 bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR,
2384 CaseRecVector& WorkList,
2386 MachineBasicBlock* Default,
2387 MachineBasicBlock* SwitchBB) {
2388 // Get the MachineFunction which holds the current MBB. This is used when
2389 // inserting any additional MBBs necessary to represent the switch.
2390 MachineFunction *CurMF = FuncInfo.MF;
2392 // Figure out which block is immediately after the current one.
2393 MachineFunction::iterator BBI = CR.CaseBB;
2396 Case& FrontCase = *CR.Range.first;
2397 Case& BackCase = *(CR.Range.second-1);
2398 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
2400 // Size is the number of Cases represented by this range.
2401 unsigned Size = CR.Range.second - CR.Range.first;
2403 const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue();
2404 const APInt &Last = cast<ConstantInt>(BackCase.High)->getValue();
2406 CaseItr Pivot = CR.Range.first + Size/2;
2408 // Select optimal pivot, maximizing sum density of LHS and RHS. This will
2409 // (heuristically) allow us to emit JumpTable's later.
2410 APInt TSize(First.getBitWidth(), 0);
2411 for (CaseItr I = CR.Range.first, E = CR.Range.second;
2415 APInt LSize = FrontCase.size();
2416 APInt RSize = TSize-LSize;
2417 DEBUG(dbgs() << "Selecting best pivot: \n"
2418 << "First: " << First << ", Last: " << Last <<'\n'
2419 << "LSize: " << LSize << ", RSize: " << RSize << '\n');
2420 for (CaseItr I = CR.Range.first, J=I+1, E = CR.Range.second;
2422 const APInt &LEnd = cast<ConstantInt>(I->High)->getValue();
2423 const APInt &RBegin = cast<ConstantInt>(J->Low)->getValue();
2424 APInt Range = ComputeRange(LEnd, RBegin);
2425 assert((Range - 2ULL).isNonNegative() &&
2426 "Invalid case distance");
2427 // Use volatile double here to avoid excess precision issues on some hosts,
2428 // e.g. that use 80-bit X87 registers.
2429 volatile double LDensity =
2430 (double)LSize.roundToDouble() /
2431 (LEnd - First + 1ULL).roundToDouble();
2432 volatile double RDensity =
2433 (double)RSize.roundToDouble() /
2434 (Last - RBegin + 1ULL).roundToDouble();
2435 volatile double Metric = Range.logBase2()*(LDensity+RDensity);
2436 // Should always split in some non-trivial place
2437 DEBUG(dbgs() <<"=>Step\n"
2438 << "LEnd: " << LEnd << ", RBegin: " << RBegin << '\n'
2439 << "LDensity: " << LDensity
2440 << ", RDensity: " << RDensity << '\n'
2441 << "Metric: " << Metric << '\n');
2442 if (FMetric < Metric) {
2445 DEBUG(dbgs() << "Current metric set to: " << FMetric << '\n');
2452 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
2453 if (areJTsAllowed(*TLI)) {
2454 // If our case is dense we *really* should handle it earlier!
2455 assert((FMetric > 0) && "Should handle dense range earlier!");
2457 Pivot = CR.Range.first + Size/2;
2460 CaseRange LHSR(CR.Range.first, Pivot);
2461 CaseRange RHSR(Pivot, CR.Range.second);
2462 const Constant *C = Pivot->Low;
2463 MachineBasicBlock *FalseBB = nullptr, *TrueBB = nullptr;
2465 // We know that we branch to the LHS if the Value being switched on is
2466 // less than the Pivot value, C. We use this to optimize our binary
2467 // tree a bit, by recognizing that if SV is greater than or equal to the
2468 // LHS's Case Value, and that Case Value is exactly one less than the
2469 // Pivot's Value, then we can branch directly to the LHS's Target,
2470 // rather than creating a leaf node for it.
2471 if ((LHSR.second - LHSR.first) == 1 &&
2472 LHSR.first->High == CR.GE &&
2473 cast<ConstantInt>(C)->getValue() ==
2474 (cast<ConstantInt>(CR.GE)->getValue() + 1LL)) {
2475 TrueBB = LHSR.first->BB;
2477 TrueBB = CurMF->CreateMachineBasicBlock(LLVMBB);
2478 CurMF->insert(BBI, TrueBB);
2479 WorkList.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
2481 // Put SV in a virtual register to make it available from the new blocks.
2482 ExportFromCurrentBlock(SV);
2485 // Similar to the optimization above, if the Value being switched on is
2486 // known to be less than the Constant CR.LT, and the current Case Value
2487 // is CR.LT - 1, then we can branch directly to the target block for
2488 // the current Case Value, rather than emitting a RHS leaf node for it.
2489 if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
2490 cast<ConstantInt>(RHSR.first->Low)->getValue() ==
2491 (cast<ConstantInt>(CR.LT)->getValue() - 1LL)) {
2492 FalseBB = RHSR.first->BB;
2494 FalseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
2495 CurMF->insert(BBI, FalseBB);
2496 WorkList.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
2498 // Put SV in a virtual register to make it available from the new blocks.
2499 ExportFromCurrentBlock(SV);
2502 // Create a CaseBlock record representing a conditional branch to
2503 // the LHS node if the value being switched on SV is less than C.
2504 // Otherwise, branch to LHS.
2505 CaseBlock CB(ISD::SETLT, SV, C, nullptr, TrueBB, FalseBB, CR.CaseBB);
2507 if (CR.CaseBB == SwitchBB)
2508 visitSwitchCase(CB, SwitchBB);
2510 SwitchCases.push_back(CB);
2515 /// handleBitTestsSwitchCase - if current case range has few destination and
2516 /// range span less, than machine word bitwidth, encode case range into series
2517 /// of masks and emit bit tests with these masks.
2518 bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
2519 CaseRecVector& WorkList,
2521 MachineBasicBlock* Default,
2522 MachineBasicBlock* SwitchBB) {
2523 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
2524 EVT PTy = TLI->getPointerTy();
2525 unsigned IntPtrBits = PTy.getSizeInBits();
2527 Case& FrontCase = *CR.Range.first;
2528 Case& BackCase = *(CR.Range.second-1);
2530 // Get the MachineFunction which holds the current MBB. This is used when
2531 // inserting any additional MBBs necessary to represent the switch.
2532 MachineFunction *CurMF = FuncInfo.MF;
2534 // If target does not have legal shift left, do not emit bit tests at all.
2535 if (!TLI->isOperationLegal(ISD::SHL, PTy))
2539 for (CaseItr I = CR.Range.first, E = CR.Range.second;
2541 // Single case counts one, case range - two.
2542 numCmps += (I->Low == I->High ? 1 : 2);
2545 // Count unique destinations
2546 SmallSet<MachineBasicBlock*, 4> Dests;
2547 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
2548 Dests.insert(I->BB);
2549 if (Dests.size() > 3)
2550 // Don't bother the code below, if there are too much unique destinations
2553 DEBUG(dbgs() << "Total number of unique destinations: "
2554 << Dests.size() << '\n'
2555 << "Total number of comparisons: " << numCmps << '\n');
2557 // Compute span of values.
2558 const APInt& minValue = cast<ConstantInt>(FrontCase.Low)->getValue();
2559 const APInt& maxValue = cast<ConstantInt>(BackCase.High)->getValue();
2560 APInt cmpRange = maxValue - minValue;
2562 DEBUG(dbgs() << "Compare range: " << cmpRange << '\n'
2563 << "Low bound: " << minValue << '\n'
2564 << "High bound: " << maxValue << '\n');
2566 if (cmpRange.uge(IntPtrBits) ||
2567 (!(Dests.size() == 1 && numCmps >= 3) &&
2568 !(Dests.size() == 2 && numCmps >= 5) &&
2569 !(Dests.size() >= 3 && numCmps >= 6)))
2572 DEBUG(dbgs() << "Emitting bit tests\n");
2573 APInt lowBound = APInt::getNullValue(cmpRange.getBitWidth());
2575 // Optimize the case where all the case values fit in a
2576 // word without having to subtract minValue. In this case,
2577 // we can optimize away the subtraction.
2578 if (minValue.isNonNegative() && maxValue.slt(IntPtrBits)) {
2579 cmpRange = maxValue;
2581 lowBound = minValue;
2584 CaseBitsVector CasesBits;
2585 unsigned i, count = 0;
2587 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
2588 MachineBasicBlock* Dest = I->BB;
2589 for (i = 0; i < count; ++i)
2590 if (Dest == CasesBits[i].BB)
2594 assert((count < 3) && "Too much destinations to test!");
2595 CasesBits.push_back(CaseBits(0, Dest, 0, 0/*Weight*/));
2599 const APInt& lowValue = cast<ConstantInt>(I->Low)->getValue();
2600 const APInt& highValue = cast<ConstantInt>(I->High)->getValue();
2602 uint64_t lo = (lowValue - lowBound).getZExtValue();
2603 uint64_t hi = (highValue - lowBound).getZExtValue();
2604 CasesBits[i].ExtraWeight += I->ExtraWeight;
2606 for (uint64_t j = lo; j <= hi; j++) {
2607 CasesBits[i].Mask |= 1ULL << j;
2608 CasesBits[i].Bits++;
2612 std::sort(CasesBits.begin(), CasesBits.end(), CaseBitsCmp());
2616 // Figure out which block is immediately after the current one.
2617 MachineFunction::iterator BBI = CR.CaseBB;
2620 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
2622 DEBUG(dbgs() << "Cases:\n");
2623 for (unsigned i = 0, e = CasesBits.size(); i!=e; ++i) {
2624 DEBUG(dbgs() << "Mask: " << CasesBits[i].Mask
2625 << ", Bits: " << CasesBits[i].Bits
2626 << ", BB: " << CasesBits[i].BB << '\n');
2628 MachineBasicBlock *CaseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
2629 CurMF->insert(BBI, CaseBB);
2630 BTC.push_back(BitTestCase(CasesBits[i].Mask,
2632 CasesBits[i].BB, CasesBits[i].ExtraWeight));
2634 // Put SV in a virtual register to make it available from the new blocks.
2635 ExportFromCurrentBlock(SV);
2638 BitTestBlock BTB(lowBound, cmpRange, SV,
2639 -1U, MVT::Other, (CR.CaseBB == SwitchBB),
2640 CR.CaseBB, Default, BTC);
2642 if (CR.CaseBB == SwitchBB)
2643 visitBitTestHeader(BTB, SwitchBB);
2645 BitTestCases.push_back(BTB);
2650 /// Clusterify - Transform simple list of Cases into list of CaseRange's
2651 size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
2652 const SwitchInst& SI) {
2655 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2656 // Start with "simple" cases
2657 for (SwitchInst::ConstCaseIt i = SI.case_begin(), e = SI.case_end();
2659 const BasicBlock *SuccBB = i.getCaseSuccessor();
2660 MachineBasicBlock *SMBB = FuncInfo.MBBMap[SuccBB];
2662 uint32_t ExtraWeight =
2663 BPI ? BPI->getEdgeWeight(SI.getParent(), i.getSuccessorIndex()) : 0;
2665 Cases.push_back(Case(i.getCaseValue(), i.getCaseValue(),
2666 SMBB, ExtraWeight));
2668 std::sort(Cases.begin(), Cases.end(), CaseCmp());
2670 // Merge case into clusters
2671 if (Cases.size() >= 2)
2672 // Must recompute end() each iteration because it may be
2673 // invalidated by erase if we hold on to it
2674 for (CaseItr I = Cases.begin(), J = std::next(Cases.begin());
2675 J != Cases.end(); ) {
2676 const APInt& nextValue = cast<ConstantInt>(J->Low)->getValue();
2677 const APInt& currentValue = cast<ConstantInt>(I->High)->getValue();
2678 MachineBasicBlock* nextBB = J->BB;
2679 MachineBasicBlock* currentBB = I->BB;
2681 // If the two neighboring cases go to the same destination, merge them
2682 // into a single case.
2683 if ((nextValue - currentValue == 1) && (currentBB == nextBB)) {
2685 I->ExtraWeight += J->ExtraWeight;
2692 for (CaseItr I=Cases.begin(), E=Cases.end(); I!=E; ++I, ++numCmps) {
2693 if (I->Low != I->High)
2694 // A range counts double, since it requires two compares.
2701 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
2702 MachineBasicBlock *Last) {
2704 for (unsigned i = 0, e = JTCases.size(); i != e; ++i)
2705 if (JTCases[i].first.HeaderBB == First)
2706 JTCases[i].first.HeaderBB = Last;
2708 // Update BitTestCases.
2709 for (unsigned i = 0, e = BitTestCases.size(); i != e; ++i)
2710 if (BitTestCases[i].Parent == First)
2711 BitTestCases[i].Parent = Last;
2714 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
2715 MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
2717 // Figure out which block is immediately after the current one.
2718 MachineBasicBlock *NextBlock = nullptr;
2719 MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
2721 // If there is only the default destination, branch to it if it is not the
2722 // next basic block. Otherwise, just fall through.
2723 if (!SI.getNumCases()) {
2724 // Update machine-CFG edges.
2726 // If this is not a fall-through branch, emit the branch.
2727 SwitchMBB->addSuccessor(Default);
2728 if (Default != NextBlock)
2729 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2730 MVT::Other, getControlRoot(),
2731 DAG.getBasicBlock(Default)));
2736 // If there are any non-default case statements, create a vector of Cases
2737 // representing each one, and sort the vector so that we can efficiently
2738 // create a binary search tree from them.
2740 size_t numCmps = Clusterify(Cases, SI);
2741 DEBUG(dbgs() << "Clusterify finished. Total clusters: " << Cases.size()
2742 << ". Total compares: " << numCmps << '\n');
2745 // Get the Value to be switched on and default basic blocks, which will be
2746 // inserted into CaseBlock records, representing basic blocks in the binary
2748 const Value *SV = SI.getCondition();
2750 // Push the initial CaseRec onto the worklist
2751 CaseRecVector WorkList;
2752 WorkList.push_back(CaseRec(SwitchMBB,nullptr,nullptr,
2753 CaseRange(Cases.begin(),Cases.end())));
2755 while (!WorkList.empty()) {
2756 // Grab a record representing a case range to process off the worklist
2757 CaseRec CR = WorkList.back();
2758 WorkList.pop_back();
2760 if (handleBitTestsSwitchCase(CR, WorkList, SV, Default, SwitchMBB))
2763 // If the range has few cases (two or less) emit a series of specific
2765 if (handleSmallSwitchRange(CR, WorkList, SV, Default, SwitchMBB))
2768 // If the switch has more than N blocks, and is at least 40% dense, and the
2769 // target supports indirect branches, then emit a jump table rather than
2770 // lowering the switch to a binary tree of conditional branches.
2771 // N defaults to 4 and is controlled via TLS.getMinimumJumpTableEntries().
2772 if (handleJTSwitchCase(CR, WorkList, SV, Default, SwitchMBB))
2775 // Emit binary tree. We need to pick a pivot, and push left and right ranges
2776 // onto the worklist. Leafs are handled via handleSmallSwitchRange() call.
2777 handleBTSplitSwitchCase(CR, WorkList, SV, Default, SwitchMBB);
2781 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
2782 MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
2784 // Update machine-CFG edges with unique successors.
2785 SmallSet<BasicBlock*, 32> Done;
2786 for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
2787 BasicBlock *BB = I.getSuccessor(i);
2788 bool Inserted = Done.insert(BB);
2792 MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
2793 addSuccessorWithWeight(IndirectBrMBB, Succ);
2796 DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
2797 MVT::Other, getControlRoot(),
2798 getValue(I.getAddress())));
2801 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
2802 if (DAG.getTarget().Options.TrapUnreachable)
2803 DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
2806 void SelectionDAGBuilder::visitFSub(const User &I) {
2807 // -0.0 - X --> fneg
2808 Type *Ty = I.getType();
2809 if (isa<Constant>(I.getOperand(0)) &&
2810 I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
2811 SDValue Op2 = getValue(I.getOperand(1));
2812 setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(),
2813 Op2.getValueType(), Op2));
2817 visitBinary(I, ISD::FSUB);
2820 void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) {
2821 SDValue Op1 = getValue(I.getOperand(0));
2822 SDValue Op2 = getValue(I.getOperand(1));
2827 if (const OverflowingBinaryOperator *OFBinOp =
2828 dyn_cast<const OverflowingBinaryOperator>(&I)) {
2829 nuw = OFBinOp->hasNoUnsignedWrap();
2830 nsw = OFBinOp->hasNoSignedWrap();
2832 if (const PossiblyExactOperator *ExactOp =
2833 dyn_cast<const PossiblyExactOperator>(&I))
2834 exact = ExactOp->isExact();
2836 SDValue BinNodeValue = DAG.getNode(OpCode, getCurSDLoc(), Op1.getValueType(),
2837 Op1, Op2, nuw, nsw, exact);
2838 setValue(&I, BinNodeValue);
2841 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
2842 SDValue Op1 = getValue(I.getOperand(0));
2843 SDValue Op2 = getValue(I.getOperand(1));
2845 EVT ShiftTy = TM.getSubtargetImpl()->getTargetLowering()->getShiftAmountTy(
2846 Op2.getValueType());
2848 // Coerce the shift amount to the right type if we can.
2849 if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
2850 unsigned ShiftSize = ShiftTy.getSizeInBits();
2851 unsigned Op2Size = Op2.getValueType().getSizeInBits();
2852 SDLoc DL = getCurSDLoc();
2854 // If the operand is smaller than the shift count type, promote it.
2855 if (ShiftSize > Op2Size)
2856 Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
2858 // If the operand is larger than the shift count type but the shift
2859 // count type has enough bits to represent any shift value, truncate
2860 // it now. This is a common case and it exposes the truncate to
2861 // optimization early.
2862 else if (ShiftSize >= Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
2863 Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
2864 // Otherwise we'll need to temporarily settle for some other convenient
2865 // type. Type legalization will make adjustments once the shiftee is split.
2867 Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
2874 if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
2876 if (const OverflowingBinaryOperator *OFBinOp =
2877 dyn_cast<const OverflowingBinaryOperator>(&I)) {
2878 nuw = OFBinOp->hasNoUnsignedWrap();
2879 nsw = OFBinOp->hasNoSignedWrap();
2881 if (const PossiblyExactOperator *ExactOp =
2882 dyn_cast<const PossiblyExactOperator>(&I))
2883 exact = ExactOp->isExact();
2886 SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
2891 void SelectionDAGBuilder::visitSDiv(const User &I) {
2892 SDValue Op1 = getValue(I.getOperand(0));
2893 SDValue Op2 = getValue(I.getOperand(1));
2895 // Turn exact SDivs into multiplications.
2896 // FIXME: This should be in DAGCombiner, but it doesn't have access to the
2898 if (isa<BinaryOperator>(&I) && cast<BinaryOperator>(&I)->isExact() &&
2899 !isa<ConstantSDNode>(Op1) &&
2900 isa<ConstantSDNode>(Op2) && !cast<ConstantSDNode>(Op2)->isNullValue())
2901 setValue(&I, TM.getSubtargetImpl()->getTargetLowering()->BuildExactSDIV(
2902 Op1, Op2, getCurSDLoc(), DAG));
2904 setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(),
2908 void SelectionDAGBuilder::visitICmp(const User &I) {
2909 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2910 if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2911 predicate = IC->getPredicate();
2912 else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2913 predicate = ICmpInst::Predicate(IC->getPredicate());
2914 SDValue Op1 = getValue(I.getOperand(0));
2915 SDValue Op2 = getValue(I.getOperand(1));
2916 ISD::CondCode Opcode = getICmpCondCode(predicate);
2919 TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
2920 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
2923 void SelectionDAGBuilder::visitFCmp(const User &I) {
2924 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2925 if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2926 predicate = FC->getPredicate();
2927 else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2928 predicate = FCmpInst::Predicate(FC->getPredicate());
2929 SDValue Op1 = getValue(I.getOperand(0));
2930 SDValue Op2 = getValue(I.getOperand(1));
2931 ISD::CondCode Condition = getFCmpCondCode(predicate);
2932 if (TM.Options.NoNaNsFPMath)
2933 Condition = getFCmpCodeWithoutNaN(Condition);
2935 TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
2936 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
2939 void SelectionDAGBuilder::visitSelect(const User &I) {
2940 SmallVector<EVT, 4> ValueVTs;
2941 ComputeValueVTs(*TM.getSubtargetImpl()->getTargetLowering(), I.getType(),
2943 unsigned NumValues = ValueVTs.size();
2944 if (NumValues == 0) return;
2946 SmallVector<SDValue, 4> Values(NumValues);
2947 SDValue Cond = getValue(I.getOperand(0));
2948 SDValue TrueVal = getValue(I.getOperand(1));
2949 SDValue FalseVal = getValue(I.getOperand(2));
2950 ISD::NodeType OpCode = Cond.getValueType().isVector() ?
2951 ISD::VSELECT : ISD::SELECT;
2953 for (unsigned i = 0; i != NumValues; ++i)
2954 Values[i] = DAG.getNode(OpCode, getCurSDLoc(),
2955 TrueVal.getNode()->getValueType(TrueVal.getResNo()+i),
2957 SDValue(TrueVal.getNode(),
2958 TrueVal.getResNo() + i),
2959 SDValue(FalseVal.getNode(),
2960 FalseVal.getResNo() + i));
2962 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
2963 DAG.getVTList(ValueVTs), Values));
2966 void SelectionDAGBuilder::visitTrunc(const User &I) {
2967 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
2968 SDValue N = getValue(I.getOperand(0));
2970 TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
2971 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
2974 void SelectionDAGBuilder::visitZExt(const User &I) {
2975 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2976 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
2977 SDValue N = getValue(I.getOperand(0));
2979 TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
2980 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
2983 void SelectionDAGBuilder::visitSExt(const User &I) {
2984 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2985 // SExt also can't be a cast to bool for same reason. So, nothing much to do
2986 SDValue N = getValue(I.getOperand(0));
2988 TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
2989 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
2992 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
2993 // FPTrunc is never a no-op cast, no need to check
2994 SDValue N = getValue(I.getOperand(0));
2995 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
2996 EVT DestVT = TLI->getValueType(I.getType());
2997 setValue(&I, DAG.getNode(ISD::FP_ROUND, getCurSDLoc(),
2999 DAG.getTargetConstant(0, TLI->getPointerTy())));
3002 void SelectionDAGBuilder::visitFPExt(const User &I) {
3003 // FPExt is never a no-op cast, no need to check
3004 SDValue N = getValue(I.getOperand(0));
3006 TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
3007 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
3010 void SelectionDAGBuilder::visitFPToUI(const User &I) {
3011 // FPToUI is never a no-op cast, no need to check
3012 SDValue N = getValue(I.getOperand(0));
3014 TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
3015 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
3018 void SelectionDAGBuilder::visitFPToSI(const User &I) {
3019 // FPToSI is never a no-op cast, no need to check
3020 SDValue N = getValue(I.getOperand(0));
3022 TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
3023 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
3026 void SelectionDAGBuilder::visitUIToFP(const User &I) {
3027 // UIToFP is never a no-op cast, no need to check
3028 SDValue N = getValue(I.getOperand(0));
3030 TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
3031 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
3034 void SelectionDAGBuilder::visitSIToFP(const User &I) {
3035 // SIToFP is never a no-op cast, no need to check
3036 SDValue N = getValue(I.getOperand(0));
3038 TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
3039 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
3042 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3043 // What to do depends on the size of the integer and the size of the pointer.
3044 // We can either truncate, zero extend, or no-op, accordingly.
3045 SDValue N = getValue(I.getOperand(0));
3047 TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
3048 setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
3051 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3052 // What to do depends on the size of the integer and the size of the pointer.
3053 // We can either truncate, zero extend, or no-op, accordingly.
3054 SDValue N = getValue(I.getOperand(0));
3056 TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
3057 setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
3060 void SelectionDAGBuilder::visitBitCast(const User &I) {
3061 SDValue N = getValue(I.getOperand(0));
3063 TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
3065 // BitCast assures us that source and destination are the same size so this is
3066 // either a BITCAST or a no-op.
3067 if (DestVT != N.getValueType())
3068 setValue(&I, DAG.getNode(ISD::BITCAST, getCurSDLoc(),
3069 DestVT, N)); // convert types.
3070 // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3071 // might fold any kind of constant expression to an integer constant and that
3072 // is not what we are looking for. Only regcognize a bitcast of a genuine
3073 // constant integer as an opaque constant.
3074 else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3075 setValue(&I, DAG.getConstant(C->getValue(), DestVT, /*isTarget=*/false,
3078 setValue(&I, N); // noop cast.
3081 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3082 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3083 const Value *SV = I.getOperand(0);
3084 SDValue N = getValue(SV);
3086 TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
3088 unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3089 unsigned DestAS = I.getType()->getPointerAddressSpace();
3091 if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
3092 N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3097 void SelectionDAGBuilder::visitInsertElement(const User &I) {
3098 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3099 SDValue InVec = getValue(I.getOperand(0));
3100 SDValue InVal = getValue(I.getOperand(1));
3101 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)),
3102 getCurSDLoc(), TLI.getVectorIdxTy());
3104 DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
3105 TM.getSubtargetImpl()->getTargetLowering()->getValueType(
3107 InVec, InVal, InIdx));
3110 void SelectionDAGBuilder::visitExtractElement(const User &I) {
3111 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3112 SDValue InVec = getValue(I.getOperand(0));
3113 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)),
3114 getCurSDLoc(), TLI.getVectorIdxTy());
3116 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3117 TM.getSubtargetImpl()->getTargetLowering()->getValueType(
3122 // Utility for visitShuffleVector - Return true if every element in Mask,
3123 // beginning from position Pos and ending in Pos+Size, falls within the
3124 // specified sequential range [L, L+Pos). or is undef.
3125 static bool isSequentialInRange(const SmallVectorImpl<int> &Mask,
3126 unsigned Pos, unsigned Size, int Low) {
3127 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
3128 if (Mask[i] >= 0 && Mask[i] != Low)
3133 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
3134 SDValue Src1 = getValue(I.getOperand(0));
3135 SDValue Src2 = getValue(I.getOperand(1));
3137 SmallVector<int, 8> Mask;
3138 ShuffleVectorInst::getShuffleMask(cast<Constant>(I.getOperand(2)), Mask);
3139 unsigned MaskNumElts = Mask.size();
3141 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
3142 EVT VT = TLI->getValueType(I.getType());
3143 EVT SrcVT = Src1.getValueType();
3144 unsigned SrcNumElts = SrcVT.getVectorNumElements();
3146 if (SrcNumElts == MaskNumElts) {
3147 setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2,
3152 // Normalize the shuffle vector since mask and vector length don't match.
3153 if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) {
3154 // Mask is longer than the source vectors and is a multiple of the source
3155 // vectors. We can use concatenate vector to make the mask and vectors
3157 if (SrcNumElts*2 == MaskNumElts) {
3158 // First check for Src1 in low and Src2 in high
3159 if (isSequentialInRange(Mask, 0, SrcNumElts, 0) &&
3160 isSequentialInRange(Mask, SrcNumElts, SrcNumElts, SrcNumElts)) {
3161 // The shuffle is concatenating two vectors together.
3162 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurSDLoc(),
3166 // Then check for Src2 in low and Src1 in high
3167 if (isSequentialInRange(Mask, 0, SrcNumElts, SrcNumElts) &&
3168 isSequentialInRange(Mask, SrcNumElts, SrcNumElts, 0)) {
3169 // The shuffle is concatenating two vectors together.
3170 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurSDLoc(),
3176 // Pad both vectors with undefs to make them the same length as the mask.
3177 unsigned NumConcat = MaskNumElts / SrcNumElts;
3178 bool Src1U = Src1.getOpcode() == ISD::UNDEF;
3179 bool Src2U = Src2.getOpcode() == ISD::UNDEF;
3180 SDValue UndefVal = DAG.getUNDEF(SrcVT);
3182 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3183 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3187 Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
3188 getCurSDLoc(), VT, MOps1);
3189 Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
3190 getCurSDLoc(), VT, MOps2);
3192 // Readjust mask for new input vector length.
3193 SmallVector<int, 8> MappedOps;
3194 for (unsigned i = 0; i != MaskNumElts; ++i) {
3196 if (Idx >= (int)SrcNumElts)
3197 Idx -= SrcNumElts - MaskNumElts;
3198 MappedOps.push_back(Idx);
3201 setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2,
3206 if (SrcNumElts > MaskNumElts) {
3207 // Analyze the access pattern of the vector to see if we can extract
3208 // two subvectors and do the shuffle. The analysis is done by calculating
3209 // the range of elements the mask access on both vectors.
3210 int MinRange[2] = { static_cast<int>(SrcNumElts),
3211 static_cast<int>(SrcNumElts)};
3212 int MaxRange[2] = {-1, -1};
3214 for (unsigned i = 0; i != MaskNumElts; ++i) {
3220 if (Idx >= (int)SrcNumElts) {
3224 if (Idx > MaxRange[Input])
3225 MaxRange[Input] = Idx;
3226 if (Idx < MinRange[Input])
3227 MinRange[Input] = Idx;
3230 // Check if the access is smaller than the vector size and can we find
3231 // a reasonable extract index.
3232 int RangeUse[2] = { -1, -1 }; // 0 = Unused, 1 = Extract, -1 = Can not
3234 int StartIdx[2]; // StartIdx to extract from
3235 for (unsigned Input = 0; Input < 2; ++Input) {
3236 if (MinRange[Input] >= (int)SrcNumElts && MaxRange[Input] < 0) {
3237 RangeUse[Input] = 0; // Unused
3238 StartIdx[Input] = 0;
3242 // Find a good start index that is a multiple of the mask length. Then
3243 // see if the rest of the elements are in range.
3244 StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
3245 if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
3246 StartIdx[Input] + MaskNumElts <= SrcNumElts)
3247 RangeUse[Input] = 1; // Extract from a multiple of the mask length.
3250 if (RangeUse[0] == 0 && RangeUse[1] == 0) {
3251 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3254 if (RangeUse[0] >= 0 && RangeUse[1] >= 0) {
3255 // Extract appropriate subvector and generate a vector shuffle
3256 for (unsigned Input = 0; Input < 2; ++Input) {
3257 SDValue &Src = Input == 0 ? Src1 : Src2;
3258 if (RangeUse[Input] == 0)
3259 Src = DAG.getUNDEF(VT);
3261 Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, getCurSDLoc(), VT,
3262 Src, DAG.getConstant(StartIdx[Input],
3263 TLI->getVectorIdxTy()));
3266 // Calculate new mask.
3267 SmallVector<int, 8> MappedOps;
3268 for (unsigned i = 0; i != MaskNumElts; ++i) {
3271 if (Idx < (int)SrcNumElts)
3274 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3276 MappedOps.push_back(Idx);
3279 setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2,
3285 // We can't use either concat vectors or extract subvectors so fall back to
3286 // replacing the shuffle with extract and build vector.
3287 // to insert and build vector.
3288 EVT EltVT = VT.getVectorElementType();
3289 EVT IdxVT = TLI->getVectorIdxTy();
3290 SmallVector<SDValue,8> Ops;
3291 for (unsigned i = 0; i != MaskNumElts; ++i) {
3296 Res = DAG.getUNDEF(EltVT);
3298 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3299 if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
3301 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3302 EltVT, Src, DAG.getConstant(Idx, IdxVT));
3308 setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(), VT, Ops));
3311 void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
3312 const Value *Op0 = I.getOperand(0);
3313 const Value *Op1 = I.getOperand(1);
3314 Type *AggTy = I.getType();
3315 Type *ValTy = Op1->getType();
3316 bool IntoUndef = isa<UndefValue>(Op0);
3317 bool FromUndef = isa<UndefValue>(Op1);
3319 unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
3321 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
3322 SmallVector<EVT, 4> AggValueVTs;
3323 ComputeValueVTs(*TLI, AggTy, AggValueVTs);
3324 SmallVector<EVT, 4> ValValueVTs;
3325 ComputeValueVTs(*TLI, ValTy, ValValueVTs);
3327 unsigned NumAggValues = AggValueVTs.size();
3328 unsigned NumValValues = ValValueVTs.size();
3329 SmallVector<SDValue, 4> Values(NumAggValues);
3331 SDValue Agg = getValue(Op0);
3333 // Copy the beginning value(s) from the original aggregate.
3334 for (; i != LinearIndex; ++i)
3335 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3336 SDValue(Agg.getNode(), Agg.getResNo() + i);
3337 // Copy values from the inserted value(s).
3339 SDValue Val = getValue(Op1);
3340 for (; i != LinearIndex + NumValValues; ++i)
3341 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3342 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
3344 // Copy remaining value(s) from the original aggregate.
3345 for (; i != NumAggValues; ++i)
3346 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3347 SDValue(Agg.getNode(), Agg.getResNo() + i);
3349 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3350 DAG.getVTList(AggValueVTs), Values));
3353 void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
3354 const Value *Op0 = I.getOperand(0);
3355 Type *AggTy = Op0->getType();
3356 Type *ValTy = I.getType();
3357 bool OutOfUndef = isa<UndefValue>(Op0);
3359 unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
3361 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
3362 SmallVector<EVT, 4> ValValueVTs;
3363 ComputeValueVTs(*TLI, ValTy, ValValueVTs);
3365 unsigned NumValValues = ValValueVTs.size();
3367 // Ignore a extractvalue that produces an empty object
3368 if (!NumValValues) {
3369 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3373 SmallVector<SDValue, 4> Values(NumValValues);
3375 SDValue Agg = getValue(Op0);
3376 // Copy out the selected value(s).
3377 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
3378 Values[i - LinearIndex] =
3380 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
3381 SDValue(Agg.getNode(), Agg.getResNo() + i);
3383 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3384 DAG.getVTList(ValValueVTs), Values));
3387 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
3388 Value *Op0 = I.getOperand(0);
3389 // Note that the pointer operand may be a vector of pointers. Take the scalar
3390 // element which holds a pointer.
3391 Type *Ty = Op0->getType()->getScalarType();
3392 unsigned AS = Ty->getPointerAddressSpace();
3393 SDValue N = getValue(Op0);
3395 for (GetElementPtrInst::const_op_iterator OI = I.op_begin()+1, E = I.op_end();
3397 const Value *Idx = *OI;
3398 if (StructType *StTy = dyn_cast<StructType>(Ty)) {
3399 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
3402 uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
3403 N = DAG.getNode(ISD::ADD, getCurSDLoc(), N.getValueType(), N,
3404 DAG.getConstant(Offset, N.getValueType()));
3407 Ty = StTy->getElementType(Field);
3409 Ty = cast<SequentialType>(Ty)->getElementType();
3411 // If this is a constant subscript, handle it quickly.
3412 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
3413 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
3414 if (CI->isZero()) continue;
3416 DL->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
3418 EVT PTy = TLI->getPointerTy(AS);
3419 unsigned PtrBits = PTy.getSizeInBits();
3421 OffsVal = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), PTy,
3422 DAG.getConstant(Offs, MVT::i64));
3424 OffsVal = DAG.getConstant(Offs, PTy);
3426 N = DAG.getNode(ISD::ADD, getCurSDLoc(), N.getValueType(), N,
3431 // N = N + Idx * ElementSize;
3432 APInt ElementSize = APInt(TLI->getPointerSizeInBits(AS),
3433 DL->getTypeAllocSize(Ty));
3434 SDValue IdxN = getValue(Idx);
3436 // If the index is smaller or larger than intptr_t, truncate or extend
3438 IdxN = DAG.getSExtOrTrunc(IdxN, getCurSDLoc(), N.getValueType());
3440 // If this is a multiply by a power of two, turn it into a shl
3441 // immediately. This is a very common case.
3442 if (ElementSize != 1) {
3443 if (ElementSize.isPowerOf2()) {
3444 unsigned Amt = ElementSize.logBase2();
3445 IdxN = DAG.getNode(ISD::SHL, getCurSDLoc(),
3446 N.getValueType(), IdxN,
3447 DAG.getConstant(Amt, IdxN.getValueType()));
3449 SDValue Scale = DAG.getConstant(ElementSize, IdxN.getValueType());
3450 IdxN = DAG.getNode(ISD::MUL, getCurSDLoc(),
3451 N.getValueType(), IdxN, Scale);
3455 N = DAG.getNode(ISD::ADD, getCurSDLoc(),
3456 N.getValueType(), N, IdxN);
3463 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
3464 // If this is a fixed sized alloca in the entry block of the function,
3465 // allocate it statically on the stack.
3466 if (FuncInfo.StaticAllocaMap.count(&I))
3467 return; // getValue will auto-populate this.
3469 Type *Ty = I.getAllocatedType();
3470 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
3471 uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(Ty);
3473 std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty),
3476 SDValue AllocSize = getValue(I.getArraySize());
3478 EVT IntPtr = TLI->getPointerTy();
3479 if (AllocSize.getValueType() != IntPtr)
3480 AllocSize = DAG.getZExtOrTrunc(AllocSize, getCurSDLoc(), IntPtr);
3482 AllocSize = DAG.getNode(ISD::MUL, getCurSDLoc(), IntPtr,
3484 DAG.getConstant(TySize, IntPtr));
3486 // Handle alignment. If the requested alignment is less than or equal to
3487 // the stack alignment, ignore it. If the size is greater than or equal to
3488 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
3489 unsigned StackAlign =
3490 TM.getSubtargetImpl()->getFrameLowering()->getStackAlignment();
3491 if (Align <= StackAlign)
3494 // Round the size of the allocation up to the stack alignment size
3495 // by add SA-1 to the size.
3496 AllocSize = DAG.getNode(ISD::ADD, getCurSDLoc(),
3497 AllocSize.getValueType(), AllocSize,
3498 DAG.getIntPtrConstant(StackAlign-1));
3500 // Mask out the low bits for alignment purposes.
3501 AllocSize = DAG.getNode(ISD::AND, getCurSDLoc(),
3502 AllocSize.getValueType(), AllocSize,
3503 DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1)));
3505 SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) };
3506 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
3507 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, getCurSDLoc(), VTs, Ops);
3509 DAG.setRoot(DSA.getValue(1));
3511 assert(FuncInfo.MF->getFrameInfo()->hasVarSizedObjects());
3514 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
3516 return visitAtomicLoad(I);
3518 const Value *SV = I.getOperand(0);
3519 SDValue Ptr = getValue(SV);
3521 Type *Ty = I.getType();
3523 bool isVolatile = I.isVolatile();
3524 bool isNonTemporal = I.getMetadata("nontemporal") != nullptr;
3525 bool isInvariant = I.getMetadata("invariant.load") != nullptr;
3526 unsigned Alignment = I.getAlignment();
3529 I.getAAMetadata(AAInfo);
3530 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3532 SmallVector<EVT, 4> ValueVTs;
3533 SmallVector<uint64_t, 4> Offsets;
3534 ComputeValueVTs(*TM.getSubtargetImpl()->getTargetLowering(), Ty, ValueVTs,
3536 unsigned NumValues = ValueVTs.size();
3541 bool ConstantMemory = false;
3542 if (isVolatile || NumValues > MaxParallelChains)
3543 // Serialize volatile loads with other side effects.
3545 else if (AA->pointsToConstantMemory(
3546 AliasAnalysis::Location(SV, AA->getTypeStoreSize(Ty), AAInfo))) {
3547 // Do not serialize (non-volatile) loads of constant memory with anything.
3548 Root = DAG.getEntryNode();
3549 ConstantMemory = true;
3551 // Do not serialize non-volatile loads against each other.
3552 Root = DAG.getRoot();
3555 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
3557 Root = TLI->prepareVolatileOrAtomicLoad(Root, getCurSDLoc(), DAG);
3559 SmallVector<SDValue, 4> Values(NumValues);
3560 SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains),
3562 EVT PtrVT = Ptr.getValueType();
3563 unsigned ChainI = 0;
3564 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3565 // Serializing loads here may result in excessive register pressure, and
3566 // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
3567 // could recover a bit by hoisting nodes upward in the chain by recognizing
3568 // they are side-effect free or do not alias. The optimizer should really
3569 // avoid this case by converting large object/array copies to llvm.memcpy
3570 // (MaxParallelChains should always remain as failsafe).
3571 if (ChainI == MaxParallelChains) {
3572 assert(PendingLoads.empty() && "PendingLoads must be serialized first");
3573 SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
3574 makeArrayRef(Chains.data(), ChainI));
3578 SDValue A = DAG.getNode(ISD::ADD, getCurSDLoc(),
3580 DAG.getConstant(Offsets[i], PtrVT));
3581 SDValue L = DAG.getLoad(ValueVTs[i], getCurSDLoc(), Root,
3582 A, MachinePointerInfo(SV, Offsets[i]), isVolatile,
3583 isNonTemporal, isInvariant, Alignment, AAInfo,
3587 Chains[ChainI] = L.getValue(1);
3590 if (!ConstantMemory) {
3591 SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
3592 makeArrayRef(Chains.data(), ChainI));
3596 PendingLoads.push_back(Chain);
3599 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3600 DAG.getVTList(ValueVTs), Values));
3603 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
3605 return visitAtomicStore(I);
3607 const Value *SrcV = I.getOperand(0);
3608 const Value *PtrV = I.getOperand(1);
3610 SmallVector<EVT, 4> ValueVTs;
3611 SmallVector<uint64_t, 4> Offsets;
3612 ComputeValueVTs(*TM.getSubtargetImpl()->getTargetLowering(), SrcV->getType(),
3613 ValueVTs, &Offsets);
3614 unsigned NumValues = ValueVTs.size();
3618 // Get the lowered operands. Note that we do this after
3619 // checking if NumResults is zero, because with zero results
3620 // the operands won't have values in the map.
3621 SDValue Src = getValue(SrcV);
3622 SDValue Ptr = getValue(PtrV);
3624 SDValue Root = getRoot();
3625 SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains),
3627 EVT PtrVT = Ptr.getValueType();
3628 bool isVolatile = I.isVolatile();
3629 bool isNonTemporal = I.getMetadata("nontemporal") != nullptr;
3630 unsigned Alignment = I.getAlignment();
3633 I.getAAMetadata(AAInfo);
3635 unsigned ChainI = 0;
3636 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3637 // See visitLoad comments.
3638 if (ChainI == MaxParallelChains) {
3639 SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
3640 makeArrayRef(Chains.data(), ChainI));
3644 SDValue Add = DAG.getNode(ISD::ADD, getCurSDLoc(), PtrVT, Ptr,
3645 DAG.getConstant(Offsets[i], PtrVT));
3646 SDValue St = DAG.getStore(Root, getCurSDLoc(),
3647 SDValue(Src.getNode(), Src.getResNo() + i),
3648 Add, MachinePointerInfo(PtrV, Offsets[i]),
3649 isVolatile, isNonTemporal, Alignment, AAInfo);
3650 Chains[ChainI] = St;
3653 SDValue StoreNode = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
3654 makeArrayRef(Chains.data(), ChainI));
3655 DAG.setRoot(StoreNode);
3658 static SDValue InsertFenceForAtomic(SDValue Chain, AtomicOrdering Order,
3659 SynchronizationScope Scope,
3660 bool Before, SDLoc dl,
3662 const TargetLowering &TLI) {
3663 // Fence, if necessary
3665 if (Order == AcquireRelease || Order == SequentiallyConsistent)
3667 else if (Order == Acquire || Order == Monotonic || Order == Unordered)
3670 if (Order == AcquireRelease)
3672 else if (Order == Release || Order == Monotonic || Order == Unordered)
3677 Ops[1] = DAG.getConstant(Order, TLI.getPointerTy());
3678 Ops[2] = DAG.getConstant(Scope, TLI.getPointerTy());
3679 return DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops);
3682 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
3683 SDLoc dl = getCurSDLoc();
3684 AtomicOrdering SuccessOrder = I.getSuccessOrdering();
3685 AtomicOrdering FailureOrder = I.getFailureOrdering();
3686 SynchronizationScope Scope = I.getSynchScope();
3688 SDValue InChain = getRoot();
3690 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
3691 if (TLI->getInsertFencesForAtomic())
3692 InChain = InsertFenceForAtomic(InChain, SuccessOrder, Scope, true, dl,
3695 MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
3696 SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
3697 SDValue L = DAG.getAtomicCmpSwap(
3698 ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl, MemVT, VTs, InChain,
3699 getValue(I.getPointerOperand()), getValue(I.getCompareOperand()),
3700 getValue(I.getNewValOperand()), MachinePointerInfo(I.getPointerOperand()),
3702 TLI->getInsertFencesForAtomic() ? Monotonic : SuccessOrder,
3703 TLI->getInsertFencesForAtomic() ? Monotonic : FailureOrder, Scope);
3705 SDValue OutChain = L.getValue(2);
3707 if (TLI->getInsertFencesForAtomic())
3708 OutChain = InsertFenceForAtomic(OutChain, SuccessOrder, Scope, false, dl,
3712 DAG.setRoot(OutChain);
3715 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
3716 SDLoc dl = getCurSDLoc();
3718 switch (I.getOperation()) {
3719 default: llvm_unreachable("Unknown atomicrmw operation");
3720 case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
3721 case AtomicRMWInst::Add: NT = ISD::ATOMIC_LOAD_ADD; break;
3722 case AtomicRMWInst::Sub: NT = ISD::ATOMIC_LOAD_SUB; break;
3723 case AtomicRMWInst::And: NT = ISD::ATOMIC_LOAD_AND; break;
3724 case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
3725 case AtomicRMWInst::Or: NT = ISD::ATOMIC_LOAD_OR; break;
3726 case AtomicRMWInst::Xor: NT = ISD::ATOMIC_LOAD_XOR; break;
3727 case AtomicRMWInst::Max: NT = ISD::ATOMIC_LOAD_MAX; break;
3728 case AtomicRMWInst::Min: NT = ISD::ATOMIC_LOAD_MIN; break;
3729 case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
3730 case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
3732 AtomicOrdering Order = I.getOrdering();
3733 SynchronizationScope Scope = I.getSynchScope();
3735 SDValue InChain = getRoot();
3737 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
3738 if (TLI->getInsertFencesForAtomic())
3739 InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
3743 DAG.getAtomic(NT, dl,
3744 getValue(I.getValOperand()).getSimpleValueType(),
3746 getValue(I.getPointerOperand()),
3747 getValue(I.getValOperand()),
3748 I.getPointerOperand(), 0 /* Alignment */,
3749 TLI->getInsertFencesForAtomic() ? Monotonic : Order,
3752 SDValue OutChain = L.getValue(1);
3754 if (TLI->getInsertFencesForAtomic())
3755 OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
3759 DAG.setRoot(OutChain);
3762 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
3763 SDLoc dl = getCurSDLoc();
3764 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
3767 Ops[1] = DAG.getConstant(I.getOrdering(), TLI->getPointerTy());
3768 Ops[2] = DAG.getConstant(I.getSynchScope(), TLI->getPointerTy());
3769 DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops));
3772 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
3773 SDLoc dl = getCurSDLoc();
3774 AtomicOrdering Order = I.getOrdering();
3775 SynchronizationScope Scope = I.getSynchScope();
3777 SDValue InChain = getRoot();
3779 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
3780 EVT VT = TLI->getValueType(I.getType());
3782 if (I.getAlignment() < VT.getSizeInBits() / 8)
3783 report_fatal_error("Cannot generate unaligned atomic load");
3785 MachineMemOperand *MMO =
3786 DAG.getMachineFunction().
3787 getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
3788 MachineMemOperand::MOVolatile |
3789 MachineMemOperand::MOLoad,
3791 I.getAlignment() ? I.getAlignment() :
3792 DAG.getEVTAlignment(VT));
3794 InChain = TLI->prepareVolatileOrAtomicLoad(InChain, dl, DAG);
3796 DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain,
3797 getValue(I.getPointerOperand()), MMO,
3798 TLI->getInsertFencesForAtomic() ? Monotonic : Order,
3801 SDValue OutChain = L.getValue(1);
3803 if (TLI->getInsertFencesForAtomic())
3804 OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
3808 DAG.setRoot(OutChain);
3811 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
3812 SDLoc dl = getCurSDLoc();
3814 AtomicOrdering Order = I.getOrdering();
3815 SynchronizationScope Scope = I.getSynchScope();
3817 SDValue InChain = getRoot();
3819 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
3820 EVT VT = TLI->getValueType(I.getValueOperand()->getType());
3822 if (I.getAlignment() < VT.getSizeInBits() / 8)
3823 report_fatal_error("Cannot generate unaligned atomic store");
3825 if (TLI->getInsertFencesForAtomic())
3826 InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
3830 DAG.getAtomic(ISD::ATOMIC_STORE, dl, VT,
3832 getValue(I.getPointerOperand()),
3833 getValue(I.getValueOperand()),
3834 I.getPointerOperand(), I.getAlignment(),
3835 TLI->getInsertFencesForAtomic() ? Monotonic : Order,
3838 if (TLI->getInsertFencesForAtomic())
3839 OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
3842 DAG.setRoot(OutChain);
3845 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
3847 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
3848 unsigned Intrinsic) {
3849 bool HasChain = !I.doesNotAccessMemory();
3850 bool OnlyLoad = HasChain && I.onlyReadsMemory();
3852 // Build the operand list.
3853 SmallVector<SDValue, 8> Ops;
3854 if (HasChain) { // If this intrinsic has side-effects, chainify it.
3856 // We don't need to serialize loads against other loads.
3857 Ops.push_back(DAG.getRoot());
3859 Ops.push_back(getRoot());
3863 // Info is set by getTgtMemInstrinsic
3864 TargetLowering::IntrinsicInfo Info;
3865 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
3866 bool IsTgtIntrinsic = TLI->getTgtMemIntrinsic(Info, I, Intrinsic);
3868 // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
3869 if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
3870 Info.opc == ISD::INTRINSIC_W_CHAIN)
3871 Ops.push_back(DAG.getTargetConstant(Intrinsic, TLI->getPointerTy()));
3873 // Add all operands of the call to the operand list.
3874 for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
3875 SDValue Op = getValue(I.getArgOperand(i));
3879 SmallVector<EVT, 4> ValueVTs;
3880 ComputeValueVTs(*TLI, I.getType(), ValueVTs);
3883 ValueVTs.push_back(MVT::Other);
3885 SDVTList VTs = DAG.getVTList(ValueVTs);
3889 if (IsTgtIntrinsic) {
3890 // This is target intrinsic that touches memory
3891 Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(),
3892 VTs, Ops, Info.memVT,
3893 MachinePointerInfo(Info.ptrVal, Info.offset),
3894 Info.align, Info.vol,
3895 Info.readMem, Info.writeMem, Info.size);
3896 } else if (!HasChain) {
3897 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
3898 } else if (!I.getType()->isVoidTy()) {
3899 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
3901 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
3905 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
3907 PendingLoads.push_back(Chain);
3912 if (!I.getType()->isVoidTy()) {
3913 if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
3914 EVT VT = TLI->getValueType(PTy);
3915 Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result);
3918 setValue(&I, Result);
3922 /// GetSignificand - Get the significand and build it into a floating-point
3923 /// number with exponent of 1:
3925 /// Op = (Op & 0x007fffff) | 0x3f800000;
3927 /// where Op is the hexadecimal representation of floating point value.
3929 GetSignificand(SelectionDAG &DAG, SDValue Op, SDLoc dl) {
3930 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3931 DAG.getConstant(0x007fffff, MVT::i32));
3932 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
3933 DAG.getConstant(0x3f800000, MVT::i32));
3934 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
3937 /// GetExponent - Get the exponent:
3939 /// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
3941 /// where Op is the hexadecimal representation of floating point value.
3943 GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
3945 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3946 DAG.getConstant(0x7f800000, MVT::i32));
3947 SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0,
3948 DAG.getConstant(23, TLI.getPointerTy()));
3949 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
3950 DAG.getConstant(127, MVT::i32));
3951 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
3954 /// getF32Constant - Get 32-bit floating point constant.
3956 getF32Constant(SelectionDAG &DAG, unsigned Flt) {
3957 return DAG.getConstantFP(APFloat(APFloat::IEEEsingle, APInt(32, Flt)),
3961 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
3962 /// limited-precision mode.
3963 static SDValue expandExp(SDLoc dl, SDValue Op, SelectionDAG &DAG,
3964 const TargetLowering &TLI) {
3965 if (Op.getValueType() == MVT::f32 &&
3966 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3968 // Put the exponent in the right bit position for later addition to the
3971 // #define LOG2OFe 1.4426950f
3972 // IntegerPartOfX = ((int32_t)(X * LOG2OFe));
3973 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3974 getF32Constant(DAG, 0x3fb8aa3b));
3975 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3977 // FractionalPartOfX = (X * LOG2OFe) - (float)IntegerPartOfX;
3978 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3979 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3981 // IntegerPartOfX <<= 23;
3982 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3983 DAG.getConstant(23, TLI.getPointerTy()));
3985 SDValue TwoToFracPartOfX;
3986 if (LimitFloatPrecision <= 6) {
3987 // For floating-point precision of 6:
3989 // TwoToFractionalPartOfX =
3991 // (0.735607626f + 0.252464424f * x) * x;
3993 // error 0.0144103317, which is 6 bits
3994 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3995 getF32Constant(DAG, 0x3e814304));
3996 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3997 getF32Constant(DAG, 0x3f3c50c8));
3998 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3999 TwoToFracPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4000 getF32Constant(DAG, 0x3f7f5e7e));
4001 } else if (LimitFloatPrecision <= 12) {
4002 // For floating-point precision of 12:
4004 // TwoToFractionalPartOfX =
4007 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
4009 // 0.000107046256 error, which is 13 to 14 bits
4010 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4011 getF32Constant(DAG, 0x3da235e3));
4012 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4013 getF32Constant(DAG, 0x3e65b8f3));
4014 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4015 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4016 getF32Constant(DAG, 0x3f324b07));
4017 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4018 TwoToFracPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4019 getF32Constant(DAG, 0x3f7ff8fd));
4020 } else { // LimitFloatPrecision <= 18
4021 // For floating-point precision of 18:
4023 // TwoToFractionalPartOfX =
4027 // (0.554906021e-1f +
4028 // (0.961591928e-2f +
4029 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4031 // error 2.47208000*10^(-7), which is better than 18 bits
4032 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4033 getF32Constant(DAG, 0x3924b03e));
4034 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4035 getF32Constant(DAG, 0x3ab24b87));
4036 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4037 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4038 getF32Constant(DAG, 0x3c1d8c17));
4039 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4040 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4041 getF32Constant(DAG, 0x3d634a1d));
4042 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4043 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4044 getF32Constant(DAG, 0x3e75fe14));
4045 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4046 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4047 getF32Constant(DAG, 0x3f317234));
4048 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4049 TwoToFracPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4050 getF32Constant(DAG, 0x3f800000));
4053 // Add the exponent into the result in integer domain.
4054 SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFracPartOfX);
4055 return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4056 DAG.getNode(ISD::ADD, dl, MVT::i32,
4057 t13, IntegerPartOfX));
4060 // No special expansion.
4061 return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op);
4064 /// expandLog - Lower a log intrinsic. Handles the special sequences for
4065 /// limited-precision mode.
4066 static SDValue expandLog(SDLoc dl, SDValue Op, SelectionDAG &DAG,
4067 const TargetLowering &TLI) {
4068 if (Op.getValueType() == MVT::f32 &&
4069 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4070 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4072 // Scale the exponent by log(2) [0.69314718f].
4073 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4074 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4075 getF32Constant(DAG, 0x3f317218));
4077 // Get the significand and build it into a floating-point number with
4079 SDValue X = GetSignificand(DAG, Op1, dl);
4081 SDValue LogOfMantissa;
4082 if (LimitFloatPrecision <= 6) {
4083 // For floating-point precision of 6:
4087 // (1.4034025f - 0.23903021f * x) * x;
4089 // error 0.0034276066, which is better than 8 bits
4090 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4091 getF32Constant(DAG, 0xbe74c456));
4092 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4093 getF32Constant(DAG, 0x3fb3a2b1));
4094 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4095 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4096 getF32Constant(DAG, 0x3f949a29));
4097 } else if (LimitFloatPrecision <= 12) {
4098 // For floating-point precision of 12:
4104 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
4106 // error 0.000061011436, which is 14 bits
4107 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4108 getF32Constant(DAG, 0xbd67b6d6));
4109 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4110 getF32Constant(DAG, 0x3ee4f4b8));
4111 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4112 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4113 getF32Constant(DAG, 0x3fbc278b));
4114 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4115 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4116 getF32Constant(DAG, 0x40348e95));
4117 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4118 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4119 getF32Constant(DAG, 0x3fdef31a));
4120 } else { // LimitFloatPrecision <= 18
4121 // For floating-point precision of 18:
4129 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
4131 // error 0.0000023660568, which is better than 18 bits
4132 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4133 getF32Constant(DAG, 0xbc91e5ac));
4134 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4135 getF32Constant(DAG, 0x3e4350aa));
4136 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4137 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4138 getF32Constant(DAG, 0x3f60d3e3));
4139 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4140 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4141 getF32Constant(DAG, 0x4011cdf0));
4142 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4143 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4144 getF32Constant(DAG, 0x406cfd1c));
4145 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4146 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4147 getF32Constant(DAG, 0x408797cb));
4148 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4149 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4150 getF32Constant(DAG, 0x4006dcab));
4153 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
4156 // No special expansion.
4157 return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op);
4160 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
4161 /// limited-precision mode.
4162 static SDValue expandLog2(SDLoc dl, SDValue Op, SelectionDAG &DAG,
4163 const TargetLowering &TLI) {
4164 if (Op.getValueType() == MVT::f32 &&
4165 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4166 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4168 // Get the exponent.
4169 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
4171 // Get the significand and build it into a floating-point number with
4173 SDValue X = GetSignificand(DAG, Op1, dl);
4175 // Different possible minimax approximations of significand in
4176 // floating-point for various degrees of accuracy over [1,2].
4177 SDValue Log2ofMantissa;
4178 if (LimitFloatPrecision <= 6) {
4179 // For floating-point precision of 6:
4181 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
4183 // error 0.0049451742, which is more than 7 bits
4184 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4185 getF32Constant(DAG, 0xbeb08fe0));
4186 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4187 getF32Constant(DAG, 0x40019463));
4188 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4189 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4190 getF32Constant(DAG, 0x3fd6633d));
4191 } else if (LimitFloatPrecision <= 12) {
4192 // For floating-point precision of 12:
4198 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
4200 // error 0.0000876136000, which is better than 13 bits
4201 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4202 getF32Constant(DAG, 0xbda7262e));
4203 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4204 getF32Constant(DAG, 0x3f25280b));
4205 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4206 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4207 getF32Constant(DAG, 0x4007b923));
4208 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4209 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4210 getF32Constant(DAG, 0x40823e2f));
4211 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4212 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4213 getF32Constant(DAG, 0x4020d29c));
4214 } else { // LimitFloatPrecision <= 18
4215 // For floating-point precision of 18:
4224 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
4226 // error 0.0000018516, which is better than 18 bits
4227 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4228 getF32Constant(DAG, 0xbcd2769e));
4229 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4230 getF32Constant(DAG, 0x3e8ce0b9));
4231 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4232 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4233 getF32Constant(DAG, 0x3fa22ae7));
4234 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4235 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4236 getF32Constant(DAG, 0x40525723));
4237 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4238 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4239 getF32Constant(DAG, 0x40aaf200));
4240 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4241 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4242 getF32Constant(DAG, 0x40c39dad));
4243 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4244 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4245 getF32Constant(DAG, 0x4042902c));
4248 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
4251 // No special expansion.
4252 return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op);
4255 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
4256 /// limited-precision mode.
4257 static SDValue expandLog10(SDLoc dl, SDValue Op, SelectionDAG &DAG,
4258 const TargetLowering &TLI) {
4259 if (Op.getValueType() == MVT::f32 &&
4260 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4261 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4263 // Scale the exponent by log10(2) [0.30102999f].
4264 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4265 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4266 getF32Constant(DAG, 0x3e9a209a));
4268 // Get the significand and build it into a floating-point number with
4270 SDValue X = GetSignificand(DAG, Op1, dl);
4272 SDValue Log10ofMantissa;
4273 if (LimitFloatPrecision <= 6) {
4274 // For floating-point precision of 6:
4276 // Log10ofMantissa =
4278 // (0.60948995f - 0.10380950f * x) * x;
4280 // error 0.0014886165, which is 6 bits
4281 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4282 getF32Constant(DAG, 0xbdd49a13));
4283 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4284 getF32Constant(DAG, 0x3f1c0789));
4285 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4286 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4287 getF32Constant(DAG, 0x3f011300));
4288 } else if (LimitFloatPrecision <= 12) {
4289 // For floating-point precision of 12:
4291 // Log10ofMantissa =
4294 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
4296 // error 0.00019228036, which is better than 12 bits
4297 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4298 getF32Constant(DAG, 0x3d431f31));
4299 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
4300 getF32Constant(DAG, 0x3ea21fb2));
4301 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4302 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4303 getF32Constant(DAG, 0x3f6ae232));
4304 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4305 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
4306 getF32Constant(DAG, 0x3f25f7c3));
4307 } else { // LimitFloatPrecision <= 18
4308 // For floating-point precision of 18:
4310 // Log10ofMantissa =
4315 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
4317 // error 0.0000037995730, which is better than 18 bits
4318 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4319 getF32Constant(DAG, 0x3c5d51ce));
4320 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
4321 getF32Constant(DAG, 0x3e00685a));
4322 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4323 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4324 getF32Constant(DAG, 0x3efb6798));
4325 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4326 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
4327 getF32Constant(DAG, 0x3f88d192));
4328 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4329 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4330 getF32Constant(DAG, 0x3fc4316c));
4331 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4332 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
4333 getF32Constant(DAG, 0x3f57ce70));
4336 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
4339 // No special expansion.
4340 return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op);
4343 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
4344 /// limited-precision mode.
4345 static SDValue expandExp2(SDLoc dl, SDValue Op, SelectionDAG &DAG,
4346 const TargetLowering &TLI) {
4347 if (Op.getValueType() == MVT::f32 &&
4348 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4349 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op);
4351 // FractionalPartOfX = x - (float)IntegerPartOfX;
4352 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
4353 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, Op, t1);
4355 // IntegerPartOfX <<= 23;
4356 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
4357 DAG.getConstant(23, TLI.getPointerTy()));
4359 SDValue TwoToFractionalPartOfX;
4360 if (LimitFloatPrecision <= 6) {
4361 // For floating-point precision of 6:
4363 // TwoToFractionalPartOfX =
4365 // (0.735607626f + 0.252464424f * x) * x;
4367 // error 0.0144103317, which is 6 bits
4368 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4369 getF32Constant(DAG, 0x3e814304));
4370 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4371 getF32Constant(DAG, 0x3f3c50c8));
4372 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4373 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4374 getF32Constant(DAG, 0x3f7f5e7e));
4375 } else if (LimitFloatPrecision <= 12) {
4376 // For floating-point precision of 12:
4378 // TwoToFractionalPartOfX =
4381 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
4383 // error 0.000107046256, which is 13 to 14 bits
4384 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4385 getF32Constant(DAG, 0x3da235e3));
4386 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4387 getF32Constant(DAG, 0x3e65b8f3));
4388 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4389 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4390 getF32Constant(DAG, 0x3f324b07));
4391 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4392 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4393 getF32Constant(DAG, 0x3f7ff8fd));
4394 } else { // LimitFloatPrecision <= 18
4395 // For floating-point precision of 18:
4397 // TwoToFractionalPartOfX =
4401 // (0.554906021e-1f +
4402 // (0.961591928e-2f +
4403 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4404 // error 2.47208000*10^(-7), which is better than 18 bits
4405 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4406 getF32Constant(DAG, 0x3924b03e));
4407 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4408 getF32Constant(DAG, 0x3ab24b87));
4409 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4410 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4411 getF32Constant(DAG, 0x3c1d8c17));
4412 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4413 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4414 getF32Constant(DAG, 0x3d634a1d));
4415 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4416 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4417 getF32Constant(DAG, 0x3e75fe14));
4418 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4419 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4420 getF32Constant(DAG, 0x3f317234));
4421 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4422 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4423 getF32Constant(DAG, 0x3f800000));
4426 // Add the exponent into the result in integer domain.
4427 SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32,
4428 TwoToFractionalPartOfX);
4429 return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4430 DAG.getNode(ISD::ADD, dl, MVT::i32,
4431 t13, IntegerPartOfX));
4434 // No special expansion.
4435 return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op);
4438 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
4439 /// limited-precision mode with x == 10.0f.
4440 static SDValue expandPow(SDLoc dl, SDValue LHS, SDValue RHS,
4441 SelectionDAG &DAG, const TargetLowering &TLI) {
4442 bool IsExp10 = false;
4443 if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
4444 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4445 if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
4447 IsExp10 = LHSC->isExactlyValue(Ten);
4452 // Put the exponent in the right bit position for later addition to the
4455 // #define LOG2OF10 3.3219281f
4456 // IntegerPartOfX = (int32_t)(x * LOG2OF10);
4457 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
4458 getF32Constant(DAG, 0x40549a78));
4459 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
4461 // FractionalPartOfX = x - (float)IntegerPartOfX;
4462 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
4463 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
4465 // IntegerPartOfX <<= 23;
4466 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
4467 DAG.getConstant(23, TLI.getPointerTy()));
4469 SDValue TwoToFractionalPartOfX;
4470 if (LimitFloatPrecision <= 6) {
4471 // For floating-point precision of 6:
4473 // twoToFractionalPartOfX =
4475 // (0.735607626f + 0.252464424f * x) * x;
4477 // error 0.0144103317, which is 6 bits
4478 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4479 getF32Constant(DAG, 0x3e814304));
4480 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4481 getF32Constant(DAG, 0x3f3c50c8));
4482 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4483 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4484 getF32Constant(DAG, 0x3f7f5e7e));
4485 } else if (LimitFloatPrecision <= 12) {
4486 // For floating-point precision of 12:
4488 // TwoToFractionalPartOfX =
4491 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
4493 // error 0.000107046256, which is 13 to 14 bits
4494 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4495 getF32Constant(DAG, 0x3da235e3));
4496 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4497 getF32Constant(DAG, 0x3e65b8f3));
4498 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4499 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4500 getF32Constant(DAG, 0x3f324b07));
4501 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4502 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4503 getF32Constant(DAG, 0x3f7ff8fd));
4504 } else { // LimitFloatPrecision <= 18
4505 // For floating-point precision of 18:
4507 // TwoToFractionalPartOfX =
4511 // (0.554906021e-1f +
4512 // (0.961591928e-2f +
4513 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4514 // error 2.47208000*10^(-7), which is better than 18 bits
4515 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4516 getF32Constant(DAG, 0x3924b03e));
4517 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4518 getF32Constant(DAG, 0x3ab24b87));
4519 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4520 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4521 getF32Constant(DAG, 0x3c1d8c17));
4522 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4523 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4524 getF32Constant(DAG, 0x3d634a1d));
4525 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4526 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4527 getF32Constant(DAG, 0x3e75fe14));
4528 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4529 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4530 getF32Constant(DAG, 0x3f317234));
4531 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4532 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4533 getF32Constant(DAG, 0x3f800000));
4536 SDValue t13 = DAG.getNode(ISD::BITCAST, dl,MVT::i32,TwoToFractionalPartOfX);
4537 return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4538 DAG.getNode(ISD::ADD, dl, MVT::i32,
4539 t13, IntegerPartOfX));
4542 // No special expansion.
4543 return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS);
4547 /// ExpandPowI - Expand a llvm.powi intrinsic.
4548 static SDValue ExpandPowI(SDLoc DL, SDValue LHS, SDValue RHS,
4549 SelectionDAG &DAG) {
4550 // If RHS is a constant, we can expand this out to a multiplication tree,
4551 // otherwise we end up lowering to a call to __powidf2 (for example). When
4552 // optimizing for size, we only want to do this if the expansion would produce
4553 // a small number of multiplies, otherwise we do the full expansion.
4554 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
4555 // Get the exponent as a positive value.
4556 unsigned Val = RHSC->getSExtValue();
4557 if ((int)Val < 0) Val = -Val;
4559 // powi(x, 0) -> 1.0
4561 return DAG.getConstantFP(1.0, LHS.getValueType());
4563 const Function *F = DAG.getMachineFunction().getFunction();
4564 if (!F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
4565 Attribute::OptimizeForSize) ||
4566 // If optimizing for size, don't insert too many multiplies. This
4567 // inserts up to 5 multiplies.
4568 CountPopulation_32(Val)+Log2_32(Val) < 7) {
4569 // We use the simple binary decomposition method to generate the multiply
4570 // sequence. There are more optimal ways to do this (for example,
4571 // powi(x,15) generates one more multiply than it should), but this has
4572 // the benefit of being both really simple and much better than a libcall.
4573 SDValue Res; // Logically starts equal to 1.0
4574 SDValue CurSquare = LHS;
4578 Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare);
4580 Res = CurSquare; // 1.0*CurSquare.
4583 CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
4584 CurSquare, CurSquare);
4588 // If the original was negative, invert the result, producing 1/(x*x*x).
4589 if (RHSC->getSExtValue() < 0)
4590 Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
4591 DAG.getConstantFP(1.0, LHS.getValueType()), Res);
4596 // Otherwise, expand to a libcall.
4597 return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
4600 // getTruncatedArgReg - Find underlying register used for an truncated
4602 static unsigned getTruncatedArgReg(const SDValue &N) {
4603 if (N.getOpcode() != ISD::TRUNCATE)
4606 const SDValue &Ext = N.getOperand(0);
4607 if (Ext.getOpcode() == ISD::AssertZext ||
4608 Ext.getOpcode() == ISD::AssertSext) {
4609 const SDValue &CFR = Ext.getOperand(0);
4610 if (CFR.getOpcode() == ISD::CopyFromReg)
4611 return cast<RegisterSDNode>(CFR.getOperand(1))->getReg();
4612 if (CFR.getOpcode() == ISD::TRUNCATE)
4613 return getTruncatedArgReg(CFR);
4618 /// EmitFuncArgumentDbgValue - If the DbgValueInst is a dbg_value of a function
4619 /// argument, create the corresponding DBG_VALUE machine instruction for it now.
4620 /// At the end of instruction selection, they will be inserted to the entry BB.
4622 SelectionDAGBuilder::EmitFuncArgumentDbgValue(const Value *V, MDNode *Variable,
4623 int64_t Offset, bool IsIndirect,
4625 const Argument *Arg = dyn_cast<Argument>(V);
4629 MachineFunction &MF = DAG.getMachineFunction();
4630 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
4632 // Ignore inlined function arguments here.
4633 DIVariable DV(Variable);
4634 if (DV.isInlinedFnArgument(MF.getFunction()))
4637 Optional<MachineOperand> Op;
4638 // Some arguments' frame index is recorded during argument lowering.
4639 if (int FI = FuncInfo.getArgumentFrameIndex(Arg))
4640 Op = MachineOperand::CreateFI(FI);
4642 if (!Op && N.getNode()) {
4644 if (N.getOpcode() == ISD::CopyFromReg)
4645 Reg = cast<RegisterSDNode>(N.getOperand(1))->getReg();
4647 Reg = getTruncatedArgReg(N);
4648 if (Reg && TargetRegisterInfo::isVirtualRegister(Reg)) {
4649 MachineRegisterInfo &RegInfo = MF.getRegInfo();
4650 unsigned PR = RegInfo.getLiveInPhysReg(Reg);
4655 Op = MachineOperand::CreateReg(Reg, false);
4659 // Check if ValueMap has reg number.
4660 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
4661 if (VMI != FuncInfo.ValueMap.end())
4662 Op = MachineOperand::CreateReg(VMI->second, false);
4665 if (!Op && N.getNode())
4666 // Check if frame index is available.
4667 if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(N.getNode()))
4668 if (FrameIndexSDNode *FINode =
4669 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
4670 Op = MachineOperand::CreateFI(FINode->getIndex());
4676 FuncInfo.ArgDbgValues.push_back(BuildMI(MF, getCurDebugLoc(),
4677 TII->get(TargetOpcode::DBG_VALUE),
4679 Op->getReg(), Offset, Variable));
4681 FuncInfo.ArgDbgValues.push_back(
4682 BuildMI(MF, getCurDebugLoc(), TII->get(TargetOpcode::DBG_VALUE))
4683 .addOperand(*Op).addImm(Offset).addMetadata(Variable));
4688 // VisualStudio defines setjmp as _setjmp
4689 #if defined(_MSC_VER) && defined(setjmp) && \
4690 !defined(setjmp_undefined_for_msvc)
4691 # pragma push_macro("setjmp")
4693 # define setjmp_undefined_for_msvc
4696 /// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
4697 /// we want to emit this as a call to a named external function, return the name
4698 /// otherwise lower it and return null.
4700 SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
4701 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
4702 SDLoc sdl = getCurSDLoc();
4703 DebugLoc dl = getCurDebugLoc();
4706 switch (Intrinsic) {
4708 // By default, turn this into a target intrinsic node.
4709 visitTargetIntrinsic(I, Intrinsic);
4711 case Intrinsic::vastart: visitVAStart(I); return nullptr;
4712 case Intrinsic::vaend: visitVAEnd(I); return nullptr;
4713 case Intrinsic::vacopy: visitVACopy(I); return nullptr;
4714 case Intrinsic::returnaddress:
4715 setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl, TLI->getPointerTy(),
4716 getValue(I.getArgOperand(0))));
4718 case Intrinsic::frameaddress:
4719 setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl, TLI->getPointerTy(),
4720 getValue(I.getArgOperand(0))));
4722 case Intrinsic::read_register: {
4723 Value *Reg = I.getArgOperand(0);
4724 SDValue RegName = DAG.getMDNode(cast<MDNode>(Reg));
4726 TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType());
4727 setValue(&I, DAG.getNode(ISD::READ_REGISTER, sdl, VT, RegName));
4730 case Intrinsic::write_register: {
4731 Value *Reg = I.getArgOperand(0);
4732 Value *RegValue = I.getArgOperand(1);
4733 SDValue Chain = getValue(RegValue).getOperand(0);
4734 SDValue RegName = DAG.getMDNode(cast<MDNode>(Reg));
4735 DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
4736 RegName, getValue(RegValue)));
4739 case Intrinsic::setjmp:
4740 return &"_setjmp"[!TLI->usesUnderscoreSetJmp()];
4741 case Intrinsic::longjmp:
4742 return &"_longjmp"[!TLI->usesUnderscoreLongJmp()];
4743 case Intrinsic::memcpy: {
4744 // Assert for address < 256 since we support only user defined address
4746 assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
4748 cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace()
4750 "Unknown address space");
4751 SDValue Op1 = getValue(I.getArgOperand(0));
4752 SDValue Op2 = getValue(I.getArgOperand(1));
4753 SDValue Op3 = getValue(I.getArgOperand(2));
4754 unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
4756 Align = 1; // @llvm.memcpy defines 0 and 1 to both mean no alignment.
4757 bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
4758 DAG.setRoot(DAG.getMemcpy(getRoot(), sdl, Op1, Op2, Op3, Align, isVol, false,
4759 MachinePointerInfo(I.getArgOperand(0)),
4760 MachinePointerInfo(I.getArgOperand(1))));
4763 case Intrinsic::memset: {
4764 // Assert for address < 256 since we support only user defined address
4766 assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
4768 "Unknown address space");
4769 SDValue Op1 = getValue(I.getArgOperand(0));
4770 SDValue Op2 = getValue(I.getArgOperand(1));
4771 SDValue Op3 = getValue(I.getArgOperand(2));
4772 unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
4774 Align = 1; // @llvm.memset defines 0 and 1 to both mean no alignment.
4775 bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
4776 DAG.setRoot(DAG.getMemset(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
4777 MachinePointerInfo(I.getArgOperand(0))));
4780 case Intrinsic::memmove: {
4781 // Assert for address < 256 since we support only user defined address
4783 assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
4785 cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace()
4787 "Unknown address space");
4788 SDValue Op1 = getValue(I.getArgOperand(0));
4789 SDValue Op2 = getValue(I.getArgOperand(1));
4790 SDValue Op3 = getValue(I.getArgOperand(2));
4791 unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
4793 Align = 1; // @llvm.memmove defines 0 and 1 to both mean no alignment.
4794 bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
4795 DAG.setRoot(DAG.getMemmove(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
4796 MachinePointerInfo(I.getArgOperand(0)),
4797 MachinePointerInfo(I.getArgOperand(1))));
4800 case Intrinsic::dbg_declare: {
4801 const DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
4802 MDNode *Variable = DI.getVariable();
4803 const Value *Address = DI.getAddress();
4804 DIVariable DIVar(Variable);
4805 assert((!DIVar || DIVar.isVariable()) &&
4806 "Variable in DbgDeclareInst should be either null or a DIVariable.");
4807 if (!Address || !DIVar) {
4808 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4812 // Check if address has undef value.
4813 if (isa<UndefValue>(Address) ||
4814 (Address->use_empty() && !isa<Argument>(Address))) {
4815 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4819 SDValue &N = NodeMap[Address];
4820 if (!N.getNode() && isa<Argument>(Address))
4821 // Check unused arguments map.
4822 N = UnusedArgNodeMap[Address];
4825 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
4826 Address = BCI->getOperand(0);
4827 // Parameters are handled specially.
4829 (DIVariable(Variable).getTag() == dwarf::DW_TAG_arg_variable ||
4830 isa<Argument>(Address));
4832 const AllocaInst *AI = dyn_cast<AllocaInst>(Address);
4834 if (isParameter && !AI) {
4835 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
4837 // Byval parameter. We have a frame index at this point.
4838 SDV = DAG.getFrameIndexDbgValue(Variable, FINode->getIndex(),
4839 0, dl, SDNodeOrder);
4841 // Address is an argument, so try to emit its dbg value using
4842 // virtual register info from the FuncInfo.ValueMap.
4843 EmitFuncArgumentDbgValue(Address, Variable, 0, false, N);
4847 SDV = DAG.getDbgValue(Variable, N.getNode(), N.getResNo(),
4848 true, 0, dl, SDNodeOrder);
4850 // Can't do anything with other non-AI cases yet.
4851 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4852 DEBUG(dbgs() << "non-AllocaInst issue for Address: \n\t");
4853 DEBUG(Address->dump());
4856 DAG.AddDbgValue(SDV, N.getNode(), isParameter);
4858 // If Address is an argument then try to emit its dbg value using
4859 // virtual register info from the FuncInfo.ValueMap.
4860 if (!EmitFuncArgumentDbgValue(Address, Variable, 0, false, N)) {
4861 // If variable is pinned by a alloca in dominating bb then
4862 // use StaticAllocaMap.
4863 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Address)) {
4864 if (AI->getParent() != DI.getParent()) {
4865 DenseMap<const AllocaInst*, int>::iterator SI =
4866 FuncInfo.StaticAllocaMap.find(AI);
4867 if (SI != FuncInfo.StaticAllocaMap.end()) {
4868 SDV = DAG.getFrameIndexDbgValue(Variable, SI->second,
4869 0, dl, SDNodeOrder);
4870 DAG.AddDbgValue(SDV, nullptr, false);
4875 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4880 case Intrinsic::dbg_value: {
4881 const DbgValueInst &DI = cast<DbgValueInst>(I);
4882 DIVariable DIVar(DI.getVariable());
4883 assert((!DIVar || DIVar.isVariable()) &&
4884 "Variable in DbgValueInst should be either null or a DIVariable.");
4888 MDNode *Variable = DI.getVariable();
4889 uint64_t Offset = DI.getOffset();
4890 const Value *V = DI.getValue();
4895 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V)) {
4896 SDV = DAG.getConstantDbgValue(Variable, V, Offset, dl, SDNodeOrder);
4897 DAG.AddDbgValue(SDV, nullptr, false);
4899 // Do not use getValue() in here; we don't want to generate code at
4900 // this point if it hasn't been done yet.
4901 SDValue N = NodeMap[V];
4902 if (!N.getNode() && isa<Argument>(V))
4903 // Check unused arguments map.
4904 N = UnusedArgNodeMap[V];
4906 // A dbg.value for an alloca is always indirect.
4907 bool IsIndirect = isa<AllocaInst>(V) || Offset != 0;
4908 if (!EmitFuncArgumentDbgValue(V, Variable, Offset, IsIndirect, N)) {
4909 SDV = DAG.getDbgValue(Variable, N.getNode(),
4910 N.getResNo(), IsIndirect,
4911 Offset, dl, SDNodeOrder);
4912 DAG.AddDbgValue(SDV, N.getNode(), false);
4914 } else if (!V->use_empty() ) {
4915 // Do not call getValue(V) yet, as we don't want to generate code.
4916 // Remember it for later.
4917 DanglingDebugInfo DDI(&DI, dl, SDNodeOrder);
4918 DanglingDebugInfoMap[V] = DDI;
4920 // We may expand this to cover more cases. One case where we have no
4921 // data available is an unreferenced parameter.
4922 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4926 // Build a debug info table entry.
4927 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(V))
4928 V = BCI->getOperand(0);
4929 const AllocaInst *AI = dyn_cast<AllocaInst>(V);
4930 // Don't handle byval struct arguments or VLAs, for example.
4932 DEBUG(dbgs() << "Dropping debug location info for:\n " << DI << "\n");
4933 DEBUG(dbgs() << " Last seen at:\n " << *V << "\n");
4936 DenseMap<const AllocaInst*, int>::iterator SI =
4937 FuncInfo.StaticAllocaMap.find(AI);
4938 if (SI == FuncInfo.StaticAllocaMap.end())
4939 return nullptr; // VLAs.
4943 case Intrinsic::eh_typeid_for: {
4944 // Find the type id for the given typeinfo.
4945 GlobalVariable *GV = ExtractTypeInfo(I.getArgOperand(0));
4946 unsigned TypeID = DAG.getMachineFunction().getMMI().getTypeIDFor(GV);
4947 Res = DAG.getConstant(TypeID, MVT::i32);
4952 case Intrinsic::eh_return_i32:
4953 case Intrinsic::eh_return_i64:
4954 DAG.getMachineFunction().getMMI().setCallsEHReturn(true);
4955 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
4958 getValue(I.getArgOperand(0)),
4959 getValue(I.getArgOperand(1))));
4961 case Intrinsic::eh_unwind_init:
4962 DAG.getMachineFunction().getMMI().setCallsUnwindInit(true);
4964 case Intrinsic::eh_dwarf_cfa: {
4965 SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getArgOperand(0)), sdl,
4966 TLI->getPointerTy());
4967 SDValue Offset = DAG.getNode(ISD::ADD, sdl,
4968 CfaArg.getValueType(),
4969 DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, sdl,
4970 CfaArg.getValueType()),
4972 SDValue FA = DAG.getNode(ISD::FRAMEADDR, sdl,
4973 TLI->getPointerTy(),
4974 DAG.getConstant(0, TLI->getPointerTy()));
4975 setValue(&I, DAG.getNode(ISD::ADD, sdl, FA.getValueType(),
4979 case Intrinsic::eh_sjlj_callsite: {
4980 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
4981 ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
4982 assert(CI && "Non-constant call site value in eh.sjlj.callsite!");
4983 assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
4985 MMI.setCurrentCallSite(CI->getZExtValue());
4988 case Intrinsic::eh_sjlj_functioncontext: {
4989 // Get and store the index of the function context.
4990 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
4992 cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
4993 int FI = FuncInfo.StaticAllocaMap[FnCtx];
4994 MFI->setFunctionContextIndex(FI);
4997 case Intrinsic::eh_sjlj_setjmp: {
5000 Ops[1] = getValue(I.getArgOperand(0));
5001 SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
5002 DAG.getVTList(MVT::i32, MVT::Other), Ops);
5003 setValue(&I, Op.getValue(0));
5004 DAG.setRoot(Op.getValue(1));
5007 case Intrinsic::eh_sjlj_longjmp: {
5008 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
5009 getRoot(), getValue(I.getArgOperand(0))));
5013 case Intrinsic::x86_mmx_pslli_w:
5014 case Intrinsic::x86_mmx_pslli_d:
5015 case Intrinsic::x86_mmx_pslli_q:
5016 case Intrinsic::x86_mmx_psrli_w:
5017 case Intrinsic::x86_mmx_psrli_d:
5018 case Intrinsic::x86_mmx_psrli_q:
5019 case Intrinsic::x86_mmx_psrai_w:
5020 case Intrinsic::x86_mmx_psrai_d: {
5021 SDValue ShAmt = getValue(I.getArgOperand(1));
5022 if (isa<ConstantSDNode>(ShAmt)) {
5023 visitTargetIntrinsic(I, Intrinsic);
5026 unsigned NewIntrinsic = 0;
5027 EVT ShAmtVT = MVT::v2i32;
5028 switch (Intrinsic) {
5029 case Intrinsic::x86_mmx_pslli_w:
5030 NewIntrinsic = Intrinsic::x86_mmx_psll_w;
5032 case Intrinsic::x86_mmx_pslli_d:
5033 NewIntrinsic = Intrinsic::x86_mmx_psll_d;
5035 case Intrinsic::x86_mmx_pslli_q:
5036 NewIntrinsic = Intrinsic::x86_mmx_psll_q;
5038 case Intrinsic::x86_mmx_psrli_w:
5039 NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
5041 case Intrinsic::x86_mmx_psrli_d:
5042 NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
5044 case Intrinsic::x86_mmx_psrli_q:
5045 NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
5047 case Intrinsic::x86_mmx_psrai_w:
5048 NewIntrinsic = Intrinsic::x86_mmx_psra_w;
5050 case Intrinsic::x86_mmx_psrai_d:
5051 NewIntrinsic = Intrinsic::x86_mmx_psra_d;
5053 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
5056 // The vector shift intrinsics with scalars uses 32b shift amounts but
5057 // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits
5059 // We must do this early because v2i32 is not a legal type.
5062 ShOps[1] = DAG.getConstant(0, MVT::i32);
5063 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, sdl, ShAmtVT, ShOps);
5064 EVT DestVT = TLI->getValueType(I.getType());
5065 ShAmt = DAG.getNode(ISD::BITCAST, sdl, DestVT, ShAmt);
5066 Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, sdl, DestVT,
5067 DAG.getConstant(NewIntrinsic, MVT::i32),
5068 getValue(I.getArgOperand(0)), ShAmt);
5072 case Intrinsic::x86_avx_vinsertf128_pd_256:
5073 case Intrinsic::x86_avx_vinsertf128_ps_256:
5074 case Intrinsic::x86_avx_vinsertf128_si_256:
5075 case Intrinsic::x86_avx2_vinserti128: {
5076 EVT DestVT = TLI->getValueType(I.getType());
5077 EVT ElVT = TLI->getValueType(I.getArgOperand(1)->getType());
5078 uint64_t Idx = (cast<ConstantInt>(I.getArgOperand(2))->getZExtValue() & 1) *
5079 ElVT.getVectorNumElements();
5080 Res = DAG.getNode(ISD::INSERT_SUBVECTOR, sdl, DestVT,
5081 getValue(I.getArgOperand(0)),
5082 getValue(I.getArgOperand(1)),
5083 DAG.getConstant(Idx, TLI->getVectorIdxTy()));
5087 case Intrinsic::x86_avx_vextractf128_pd_256:
5088 case Intrinsic::x86_avx_vextractf128_ps_256:
5089 case Intrinsic::x86_avx_vextractf128_si_256:
5090 case Intrinsic::x86_avx2_vextracti128: {
5091 EVT DestVT = TLI->getValueType(I.getType());
5092 uint64_t Idx = (cast<ConstantInt>(I.getArgOperand(1))->getZExtValue() & 1) *
5093 DestVT.getVectorNumElements();
5094 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, sdl, DestVT,
5095 getValue(I.getArgOperand(0)),
5096 DAG.getConstant(Idx, TLI->getVectorIdxTy()));
5100 case Intrinsic::convertff:
5101 case Intrinsic::convertfsi:
5102 case Intrinsic::convertfui:
5103 case Intrinsic::convertsif:
5104 case Intrinsic::convertuif:
5105 case Intrinsic::convertss:
5106 case Intrinsic::convertsu:
5107 case Intrinsic::convertus:
5108 case Intrinsic::convertuu: {
5109 ISD::CvtCode Code = ISD::CVT_INVALID;
5110 switch (Intrinsic) {
5111 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
5112 case Intrinsic::convertff: Code = ISD::CVT_FF; break;
5113 case Intrinsic::convertfsi: Code = ISD::CVT_FS; break;
5114 case Intrinsic::convertfui: Code = ISD::CVT_FU; break;
5115 case Intrinsic::convertsif: Code = ISD::CVT_SF; break;
5116 case Intrinsic::convertuif: Code = ISD::CVT_UF; break;
5117 case Intrinsic::convertss: Code = ISD::CVT_SS; break;
5118 case Intrinsic::convertsu: Code = ISD::CVT_SU; break;
5119 case Intrinsic::convertus: Code = ISD::CVT_US; break;
5120 case Intrinsic::convertuu: Code = ISD::CVT_UU; break;
5122 EVT DestVT = TLI->getValueType(I.getType());
5123 const Value *Op1 = I.getArgOperand(0);
5124 Res = DAG.getConvertRndSat(DestVT, sdl, getValue(Op1),
5125 DAG.getValueType(DestVT),
5126 DAG.getValueType(getValue(Op1).getValueType()),
5127 getValue(I.getArgOperand(1)),
5128 getValue(I.getArgOperand(2)),
5133 case Intrinsic::powi:
5134 setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
5135 getValue(I.getArgOperand(1)), DAG));
5137 case Intrinsic::log:
5138 setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
5140 case Intrinsic::log2:
5141 setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
5143 case Intrinsic::log10:
5144 setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
5146 case Intrinsic::exp:
5147 setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
5149 case Intrinsic::exp2:
5150 setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
5152 case Intrinsic::pow:
5153 setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
5154 getValue(I.getArgOperand(1)), DAG, *TLI));
5156 case Intrinsic::sqrt:
5157 case Intrinsic::fabs:
5158 case Intrinsic::sin:
5159 case Intrinsic::cos:
5160 case Intrinsic::floor:
5161 case Intrinsic::ceil:
5162 case Intrinsic::trunc:
5163 case Intrinsic::rint:
5164 case Intrinsic::nearbyint:
5165 case Intrinsic::round: {
5167 switch (Intrinsic) {
5168 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
5169 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break;
5170 case Intrinsic::fabs: Opcode = ISD::FABS; break;
5171 case Intrinsic::sin: Opcode = ISD::FSIN; break;
5172 case Intrinsic::cos: Opcode = ISD::FCOS; break;
5173 case Intrinsic::floor: Opcode = ISD::FFLOOR; break;
5174 case Intrinsic::ceil: Opcode = ISD::FCEIL; break;
5175 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break;
5176 case Intrinsic::rint: Opcode = ISD::FRINT; break;
5177 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
5178 case Intrinsic::round: Opcode = ISD::FROUND; break;
5181 setValue(&I, DAG.getNode(Opcode, sdl,
5182 getValue(I.getArgOperand(0)).getValueType(),
5183 getValue(I.getArgOperand(0))));
5186 case Intrinsic::copysign:
5187 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
5188 getValue(I.getArgOperand(0)).getValueType(),
5189 getValue(I.getArgOperand(0)),
5190 getValue(I.getArgOperand(1))));
5192 case Intrinsic::fma:
5193 setValue(&I, DAG.getNode(ISD::FMA, sdl,
5194 getValue(I.getArgOperand(0)).getValueType(),
5195 getValue(I.getArgOperand(0)),
5196 getValue(I.getArgOperand(1)),
5197 getValue(I.getArgOperand(2))));
5199 case Intrinsic::fmuladd: {
5200 EVT VT = TLI->getValueType(I.getType());
5201 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
5202 TLI->isFMAFasterThanFMulAndFAdd(VT)) {
5203 setValue(&I, DAG.getNode(ISD::FMA, sdl,
5204 getValue(I.getArgOperand(0)).getValueType(),
5205 getValue(I.getArgOperand(0)),
5206 getValue(I.getArgOperand(1)),
5207 getValue(I.getArgOperand(2))));
5209 SDValue Mul = DAG.getNode(ISD::FMUL, sdl,
5210 getValue(I.getArgOperand(0)).getValueType(),
5211 getValue(I.getArgOperand(0)),
5212 getValue(I.getArgOperand(1)));
5213 SDValue Add = DAG.getNode(ISD::FADD, sdl,
5214 getValue(I.getArgOperand(0)).getValueType(),
5216 getValue(I.getArgOperand(2)));
5221 case Intrinsic::convert_to_fp16:
5222 setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
5223 DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
5224 getValue(I.getArgOperand(0)),
5225 DAG.getTargetConstant(0, MVT::i32))));
5227 case Intrinsic::convert_from_fp16:
5229 DAG.getNode(ISD::FP_EXTEND, sdl, TLI->getValueType(I.getType()),
5230 DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
5231 getValue(I.getArgOperand(0)))));
5233 case Intrinsic::pcmarker: {
5234 SDValue Tmp = getValue(I.getArgOperand(0));
5235 DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
5238 case Intrinsic::readcyclecounter: {
5239 SDValue Op = getRoot();
5240 Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
5241 DAG.getVTList(MVT::i64, MVT::Other), Op);
5243 DAG.setRoot(Res.getValue(1));
5246 case Intrinsic::bswap:
5247 setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
5248 getValue(I.getArgOperand(0)).getValueType(),
5249 getValue(I.getArgOperand(0))));
5251 case Intrinsic::cttz: {
5252 SDValue Arg = getValue(I.getArgOperand(0));
5253 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
5254 EVT Ty = Arg.getValueType();
5255 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
5259 case Intrinsic::ctlz: {
5260 SDValue Arg = getValue(I.getArgOperand(0));
5261 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
5262 EVT Ty = Arg.getValueType();
5263 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
5267 case Intrinsic::ctpop: {
5268 SDValue Arg = getValue(I.getArgOperand(0));
5269 EVT Ty = Arg.getValueType();
5270 setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
5273 case Intrinsic::stacksave: {
5274 SDValue Op = getRoot();
5275 Res = DAG.getNode(ISD::STACKSAVE, sdl,
5276 DAG.getVTList(TLI->getPointerTy(), MVT::Other), Op);
5278 DAG.setRoot(Res.getValue(1));
5281 case Intrinsic::stackrestore: {
5282 Res = getValue(I.getArgOperand(0));
5283 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
5286 case Intrinsic::stackprotector: {
5287 // Emit code into the DAG to store the stack guard onto the stack.
5288 MachineFunction &MF = DAG.getMachineFunction();
5289 MachineFrameInfo *MFI = MF.getFrameInfo();
5290 EVT PtrTy = TLI->getPointerTy();
5291 SDValue Src, Chain = getRoot();
5292 const Value *Ptr = cast<LoadInst>(I.getArgOperand(0))->getPointerOperand();
5293 const GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr);
5295 // See if Ptr is a bitcast. If it is, look through it and see if we can get
5296 // global variable __stack_chk_guard.
5298 if (const Operator *BC = dyn_cast<Operator>(Ptr))
5299 if (BC->getOpcode() == Instruction::BitCast)
5300 GV = dyn_cast<GlobalVariable>(BC->getOperand(0));
5302 if (GV && TLI->useLoadStackGuardNode()) {
5303 // Emit a LOAD_STACK_GUARD node.
5304 MachineSDNode *Node = DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD,
5306 MachinePointerInfo MPInfo(GV);
5307 MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(1);
5308 unsigned Flags = MachineMemOperand::MOLoad |
5309 MachineMemOperand::MOInvariant;
5310 *MemRefs = MF.getMachineMemOperand(MPInfo, Flags,
5311 PtrTy.getSizeInBits() / 8,
5312 DAG.getEVTAlignment(PtrTy));
5313 Node->setMemRefs(MemRefs, MemRefs + 1);
5315 // Copy the guard value to a virtual register so that it can be
5316 // retrieved in the epilogue.
5317 Src = SDValue(Node, 0);
5318 const TargetRegisterClass *RC =
5319 TLI->getRegClassFor(Src.getSimpleValueType());
5320 unsigned Reg = MF.getRegInfo().createVirtualRegister(RC);
5322 SPDescriptor.setGuardReg(Reg);
5323 Chain = DAG.getCopyToReg(Chain, sdl, Reg, Src);
5325 Src = getValue(I.getArgOperand(0)); // The guard's value.
5328 AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
5330 int FI = FuncInfo.StaticAllocaMap[Slot];
5331 MFI->setStackProtectorIndex(FI);
5333 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
5335 // Store the stack protector onto the stack.
5336 Res = DAG.getStore(Chain, sdl, Src, FIN,
5337 MachinePointerInfo::getFixedStack(FI),
5343 case Intrinsic::objectsize: {
5344 // If we don't know by now, we're never going to know.
5345 ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
5347 assert(CI && "Non-constant type in __builtin_object_size?");
5349 SDValue Arg = getValue(I.getCalledValue());
5350 EVT Ty = Arg.getValueType();
5353 Res = DAG.getConstant(-1ULL, Ty);
5355 Res = DAG.getConstant(0, Ty);
5360 case Intrinsic::annotation:
5361 case Intrinsic::ptr_annotation:
5362 // Drop the intrinsic, but forward the value
5363 setValue(&I, getValue(I.getOperand(0)));
5365 case Intrinsic::assume:
5366 case Intrinsic::var_annotation:
5367 // Discard annotate attributes and assumptions
5370 case Intrinsic::init_trampoline: {
5371 const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
5375 Ops[1] = getValue(I.getArgOperand(0));
5376 Ops[2] = getValue(I.getArgOperand(1));
5377 Ops[3] = getValue(I.getArgOperand(2));
5378 Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
5379 Ops[5] = DAG.getSrcValue(F);
5381 Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
5386 case Intrinsic::adjust_trampoline: {
5387 setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
5388 TLI->getPointerTy(),
5389 getValue(I.getArgOperand(0))));
5392 case Intrinsic::gcroot:
5394 const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
5395 const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
5397 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
5398 GFI->addStackRoot(FI->getIndex(), TypeMap);
5401 case Intrinsic::gcread:
5402 case Intrinsic::gcwrite:
5403 llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
5404 case Intrinsic::flt_rounds:
5405 setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, sdl, MVT::i32));
5408 case Intrinsic::expect: {
5409 // Just replace __builtin_expect(exp, c) with EXP.
5410 setValue(&I, getValue(I.getArgOperand(0)));
5414 case Intrinsic::debugtrap:
5415 case Intrinsic::trap: {
5416 StringRef TrapFuncName = TM.Options.getTrapFunctionName();
5417 if (TrapFuncName.empty()) {
5418 ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ?
5419 ISD::TRAP : ISD::DEBUGTRAP;
5420 DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot()));
5423 TargetLowering::ArgListTy Args;
5425 TargetLowering::CallLoweringInfo CLI(DAG);
5426 CLI.setDebugLoc(sdl).setChain(getRoot())
5427 .setCallee(CallingConv::C, I.getType(),
5428 DAG.getExternalSymbol(TrapFuncName.data(), TLI->getPointerTy()),
5429 std::move(Args), 0);
5431 std::pair<SDValue, SDValue> Result = TLI->LowerCallTo(CLI);
5432 DAG.setRoot(Result.second);
5436 case Intrinsic::uadd_with_overflow:
5437 case Intrinsic::sadd_with_overflow:
5438 case Intrinsic::usub_with_overflow:
5439 case Intrinsic::ssub_with_overflow:
5440 case Intrinsic::umul_with_overflow:
5441 case Intrinsic::smul_with_overflow: {
5443 switch (Intrinsic) {
5444 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
5445 case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
5446 case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
5447 case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
5448 case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
5449 case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
5450 case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
5452 SDValue Op1 = getValue(I.getArgOperand(0));
5453 SDValue Op2 = getValue(I.getArgOperand(1));
5455 SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
5456 setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
5459 case Intrinsic::prefetch: {
5461 unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
5463 Ops[1] = getValue(I.getArgOperand(0));
5464 Ops[2] = getValue(I.getArgOperand(1));
5465 Ops[3] = getValue(I.getArgOperand(2));
5466 Ops[4] = getValue(I.getArgOperand(3));
5467 DAG.setRoot(DAG.getMemIntrinsicNode(ISD::PREFETCH, sdl,
5468 DAG.getVTList(MVT::Other), Ops,
5469 EVT::getIntegerVT(*Context, 8),
5470 MachinePointerInfo(I.getArgOperand(0)),
5472 false, /* volatile */
5474 rw==1)); /* write */
5477 case Intrinsic::lifetime_start:
5478 case Intrinsic::lifetime_end: {
5479 bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
5480 // Stack coloring is not enabled in O0, discard region information.
5481 if (TM.getOptLevel() == CodeGenOpt::None)
5484 SmallVector<Value *, 4> Allocas;
5485 GetUnderlyingObjects(I.getArgOperand(1), Allocas, DL);
5487 for (SmallVectorImpl<Value*>::iterator Object = Allocas.begin(),
5488 E = Allocas.end(); Object != E; ++Object) {
5489 AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object);
5491 // Could not find an Alloca.
5492 if (!LifetimeObject)
5495 int FI = FuncInfo.StaticAllocaMap[LifetimeObject];
5499 Ops[1] = DAG.getFrameIndex(FI, TLI->getPointerTy(), true);
5500 unsigned Opcode = (IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END);
5502 Res = DAG.getNode(Opcode, sdl, MVT::Other, Ops);
5507 case Intrinsic::invariant_start:
5508 // Discard region information.
5509 setValue(&I, DAG.getUNDEF(TLI->getPointerTy()));
5511 case Intrinsic::invariant_end:
5512 // Discard region information.
5514 case Intrinsic::stackprotectorcheck: {
5515 // Do not actually emit anything for this basic block. Instead we initialize
5516 // the stack protector descriptor and export the guard variable so we can
5517 // access it in FinishBasicBlock.
5518 const BasicBlock *BB = I.getParent();
5519 SPDescriptor.initialize(BB, FuncInfo.MBBMap[BB], I);
5520 ExportFromCurrentBlock(SPDescriptor.getGuard());
5522 // Flush our exports since we are going to process a terminator.
5523 (void)getControlRoot();
5526 case Intrinsic::clear_cache:
5527 return TLI->getClearCacheBuiltinName();
5528 case Intrinsic::donothing:
5531 case Intrinsic::experimental_stackmap: {
5535 case Intrinsic::experimental_patchpoint_void:
5536 case Intrinsic::experimental_patchpoint_i64: {
5543 void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
5545 MachineBasicBlock *LandingPad) {
5546 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
5547 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
5548 FunctionType *FTy = cast<FunctionType>(PT->getElementType());
5549 Type *RetTy = FTy->getReturnType();
5550 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
5551 MCSymbol *BeginLabel = nullptr;
5553 TargetLowering::ArgListTy Args;
5554 TargetLowering::ArgListEntry Entry;
5555 Args.reserve(CS.arg_size());
5557 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
5559 const Value *V = *i;
5562 if (V->getType()->isEmptyTy())
5565 SDValue ArgNode = getValue(V);
5566 Entry.Node = ArgNode; Entry.Ty = V->getType();
5568 // Skip the first return-type Attribute to get to params.
5569 Entry.setAttributes(&CS, i - CS.arg_begin() + 1);
5570 Args.push_back(Entry);
5574 // Insert a label before the invoke call to mark the try range. This can be
5575 // used to detect deletion of the invoke via the MachineModuleInfo.
5576 BeginLabel = MMI.getContext().CreateTempSymbol();
5578 // For SjLj, keep track of which landing pads go with which invokes
5579 // so as to maintain the ordering of pads in the LSDA.
5580 unsigned CallSiteIndex = MMI.getCurrentCallSite();
5581 if (CallSiteIndex) {
5582 MMI.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
5583 LPadToCallSiteMap[LandingPad].push_back(CallSiteIndex);
5585 // Now that the call site is handled, stop tracking it.
5586 MMI.setCurrentCallSite(0);
5589 // Both PendingLoads and PendingExports must be flushed here;
5590 // this call might not return.
5592 DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel));
5595 // Check if target-independent constraints permit a tail call here.
5596 // Target-dependent constraints are checked within TLI->LowerCallTo.
5597 if (isTailCall && !isInTailCallPosition(CS, DAG.getTarget()))
5600 TargetLowering::CallLoweringInfo CLI(DAG);
5601 CLI.setDebugLoc(getCurSDLoc()).setChain(getRoot())
5602 .setCallee(RetTy, FTy, Callee, std::move(Args), CS).setTailCall(isTailCall);
5604 std::pair<SDValue,SDValue> Result = TLI->LowerCallTo(CLI);
5605 assert((isTailCall || Result.second.getNode()) &&
5606 "Non-null chain expected with non-tail call!");
5607 assert((Result.second.getNode() || !Result.first.getNode()) &&
5608 "Null value expected with tail call!");
5609 if (Result.first.getNode())
5610 setValue(CS.getInstruction(), Result.first);
5612 if (!Result.second.getNode()) {
5613 // As a special case, a null chain means that a tail call has been emitted
5614 // and the DAG root is already updated.
5617 // Since there's no actual continuation from this block, nothing can be
5618 // relying on us setting vregs for them.
5619 PendingExports.clear();
5621 DAG.setRoot(Result.second);
5625 // Insert a label at the end of the invoke call to mark the try range. This
5626 // can be used to detect deletion of the invoke via the MachineModuleInfo.
5627 MCSymbol *EndLabel = MMI.getContext().CreateTempSymbol();
5628 DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel));
5630 // Inform MachineModuleInfo of range.
5631 MMI.addInvoke(LandingPad, BeginLabel, EndLabel);
5635 /// IsOnlyUsedInZeroEqualityComparison - Return true if it only matters that the
5636 /// value is equal or not-equal to zero.
5637 static bool IsOnlyUsedInZeroEqualityComparison(const Value *V) {
5638 for (const User *U : V->users()) {
5639 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
5640 if (IC->isEquality())
5641 if (const Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
5642 if (C->isNullValue())
5644 // Unknown instruction.
5650 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
5652 SelectionDAGBuilder &Builder) {
5654 // Check to see if this load can be trivially constant folded, e.g. if the
5655 // input is from a string literal.
5656 if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
5657 // Cast pointer to the type we really want to load.
5658 LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
5659 PointerType::getUnqual(LoadTy));
5661 if (const Constant *LoadCst =
5662 ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput),
5664 return Builder.getValue(LoadCst);
5667 // Otherwise, we have to emit the load. If the pointer is to unfoldable but
5668 // still constant memory, the input chain can be the entry node.
5670 bool ConstantMemory = false;
5672 // Do not serialize (non-volatile) loads of constant memory with anything.
5673 if (Builder.AA->pointsToConstantMemory(PtrVal)) {
5674 Root = Builder.DAG.getEntryNode();
5675 ConstantMemory = true;
5677 // Do not serialize non-volatile loads against each other.
5678 Root = Builder.DAG.getRoot();
5681 SDValue Ptr = Builder.getValue(PtrVal);
5682 SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root,
5683 Ptr, MachinePointerInfo(PtrVal),
5685 false /*nontemporal*/,
5686 false /*isinvariant*/, 1 /* align=1 */);
5688 if (!ConstantMemory)
5689 Builder.PendingLoads.push_back(LoadVal.getValue(1));
5693 /// processIntegerCallValue - Record the value for an instruction that
5694 /// produces an integer result, converting the type where necessary.
5695 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
5698 EVT VT = TM.getSubtargetImpl()->getTargetLowering()->getValueType(I.getType(),
5701 Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT);
5703 Value = DAG.getZExtOrTrunc(Value, getCurSDLoc(), VT);
5704 setValue(&I, Value);
5707 /// visitMemCmpCall - See if we can lower a call to memcmp in an optimized form.
5708 /// If so, return true and lower it, otherwise return false and it will be
5709 /// lowered like a normal call.
5710 bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
5711 // Verify that the prototype makes sense. int memcmp(void*,void*,size_t)
5712 if (I.getNumArgOperands() != 3)
5715 const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
5716 if (!LHS->getType()->isPointerTy() || !RHS->getType()->isPointerTy() ||
5717 !I.getArgOperand(2)->getType()->isIntegerTy() ||
5718 !I.getType()->isIntegerTy())
5721 const Value *Size = I.getArgOperand(2);
5722 const ConstantInt *CSize = dyn_cast<ConstantInt>(Size);
5723 if (CSize && CSize->getZExtValue() == 0) {
5724 EVT CallVT = TM.getSubtargetImpl()->getTargetLowering()->getValueType(
5726 setValue(&I, DAG.getConstant(0, CallVT));
5730 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
5731 std::pair<SDValue, SDValue> Res =
5732 TSI.EmitTargetCodeForMemcmp(DAG, getCurSDLoc(), DAG.getRoot(),
5733 getValue(LHS), getValue(RHS), getValue(Size),
5734 MachinePointerInfo(LHS),
5735 MachinePointerInfo(RHS));
5736 if (Res.first.getNode()) {
5737 processIntegerCallValue(I, Res.first, true);
5738 PendingLoads.push_back(Res.second);
5742 // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0
5743 // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0
5744 if (CSize && IsOnlyUsedInZeroEqualityComparison(&I)) {
5745 bool ActuallyDoIt = true;
5748 switch (CSize->getZExtValue()) {
5750 LoadVT = MVT::Other;
5752 ActuallyDoIt = false;
5756 LoadTy = Type::getInt16Ty(CSize->getContext());
5760 LoadTy = Type::getInt32Ty(CSize->getContext());
5764 LoadTy = Type::getInt64Ty(CSize->getContext());
5768 LoadVT = MVT::v4i32;
5769 LoadTy = Type::getInt32Ty(CSize->getContext());
5770 LoadTy = VectorType::get(LoadTy, 4);
5775 // This turns into unaligned loads. We only do this if the target natively
5776 // supports the MVT we'll be loading or if it is small enough (<= 4) that
5777 // we'll only produce a small number of byte loads.
5779 // Require that we can find a legal MVT, and only do this if the target
5780 // supports unaligned loads of that type. Expanding into byte loads would
5782 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
5783 if (ActuallyDoIt && CSize->getZExtValue() > 4) {
5784 unsigned DstAS = LHS->getType()->getPointerAddressSpace();
5785 unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
5786 // TODO: Handle 5 byte compare as 4-byte + 1 byte.
5787 // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
5788 // TODO: Check alignment of src and dest ptrs.
5789 if (!TLI->isTypeLegal(LoadVT) ||
5790 !TLI->allowsMisalignedMemoryAccesses(LoadVT, SrcAS) ||
5791 !TLI->allowsMisalignedMemoryAccesses(LoadVT, DstAS))
5792 ActuallyDoIt = false;
5796 SDValue LHSVal = getMemCmpLoad(LHS, LoadVT, LoadTy, *this);
5797 SDValue RHSVal = getMemCmpLoad(RHS, LoadVT, LoadTy, *this);
5799 SDValue Res = DAG.getSetCC(getCurSDLoc(), MVT::i1, LHSVal, RHSVal,
5801 processIntegerCallValue(I, Res, false);
5810 /// visitMemChrCall -- See if we can lower a memchr call into an optimized
5811 /// form. If so, return true and lower it, otherwise return false and it
5812 /// will be lowered like a normal call.
5813 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
5814 // Verify that the prototype makes sense. void *memchr(void *, int, size_t)
5815 if (I.getNumArgOperands() != 3)
5818 const Value *Src = I.getArgOperand(0);
5819 const Value *Char = I.getArgOperand(1);
5820 const Value *Length = I.getArgOperand(2);
5821 if (!Src->getType()->isPointerTy() ||
5822 !Char->getType()->isIntegerTy() ||
5823 !Length->getType()->isIntegerTy() ||
5824 !I.getType()->isPointerTy())
5827 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
5828 std::pair<SDValue, SDValue> Res =
5829 TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
5830 getValue(Src), getValue(Char), getValue(Length),
5831 MachinePointerInfo(Src));
5832 if (Res.first.getNode()) {
5833 setValue(&I, Res.first);
5834 PendingLoads.push_back(Res.second);
5841 /// visitStrCpyCall -- See if we can lower a strcpy or stpcpy call into an
5842 /// optimized form. If so, return true and lower it, otherwise return false
5843 /// and it will be lowered like a normal call.
5844 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
5845 // Verify that the prototype makes sense. char *strcpy(char *, char *)
5846 if (I.getNumArgOperands() != 2)
5849 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
5850 if (!Arg0->getType()->isPointerTy() ||
5851 !Arg1->getType()->isPointerTy() ||
5852 !I.getType()->isPointerTy())
5855 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
5856 std::pair<SDValue, SDValue> Res =
5857 TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
5858 getValue(Arg0), getValue(Arg1),
5859 MachinePointerInfo(Arg0),
5860 MachinePointerInfo(Arg1), isStpcpy);
5861 if (Res.first.getNode()) {
5862 setValue(&I, Res.first);
5863 DAG.setRoot(Res.second);
5870 /// visitStrCmpCall - See if we can lower a call to strcmp in an optimized form.
5871 /// If so, return true and lower it, otherwise return false and it will be
5872 /// lowered like a normal call.
5873 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
5874 // Verify that the prototype makes sense. int strcmp(void*,void*)
5875 if (I.getNumArgOperands() != 2)
5878 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
5879 if (!Arg0->getType()->isPointerTy() ||
5880 !Arg1->getType()->isPointerTy() ||
5881 !I.getType()->isIntegerTy())
5884 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
5885 std::pair<SDValue, SDValue> Res =
5886 TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
5887 getValue(Arg0), getValue(Arg1),
5888 MachinePointerInfo(Arg0),
5889 MachinePointerInfo(Arg1));
5890 if (Res.first.getNode()) {
5891 processIntegerCallValue(I, Res.first, true);
5892 PendingLoads.push_back(Res.second);
5899 /// visitStrLenCall -- See if we can lower a strlen call into an optimized
5900 /// form. If so, return true and lower it, otherwise return false and it
5901 /// will be lowered like a normal call.
5902 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
5903 // Verify that the prototype makes sense. size_t strlen(char *)
5904 if (I.getNumArgOperands() != 1)
5907 const Value *Arg0 = I.getArgOperand(0);
5908 if (!Arg0->getType()->isPointerTy() || !I.getType()->isIntegerTy())
5911 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
5912 std::pair<SDValue, SDValue> Res =
5913 TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
5914 getValue(Arg0), MachinePointerInfo(Arg0));
5915 if (Res.first.getNode()) {
5916 processIntegerCallValue(I, Res.first, false);
5917 PendingLoads.push_back(Res.second);
5924 /// visitStrNLenCall -- See if we can lower a strnlen call into an optimized
5925 /// form. If so, return true and lower it, otherwise return false and it
5926 /// will be lowered like a normal call.
5927 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
5928 // Verify that the prototype makes sense. size_t strnlen(char *, size_t)
5929 if (I.getNumArgOperands() != 2)
5932 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
5933 if (!Arg0->getType()->isPointerTy() ||
5934 !Arg1->getType()->isIntegerTy() ||
5935 !I.getType()->isIntegerTy())
5938 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
5939 std::pair<SDValue, SDValue> Res =
5940 TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
5941 getValue(Arg0), getValue(Arg1),
5942 MachinePointerInfo(Arg0));
5943 if (Res.first.getNode()) {
5944 processIntegerCallValue(I, Res.first, false);
5945 PendingLoads.push_back(Res.second);
5952 /// visitUnaryFloatCall - If a call instruction is a unary floating-point
5953 /// operation (as expected), translate it to an SDNode with the specified opcode
5954 /// and return true.
5955 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
5957 // Sanity check that it really is a unary floating-point call.
5958 if (I.getNumArgOperands() != 1 ||
5959 !I.getArgOperand(0)->getType()->isFloatingPointTy() ||
5960 I.getType() != I.getArgOperand(0)->getType() ||
5961 !I.onlyReadsMemory())
5964 SDValue Tmp = getValue(I.getArgOperand(0));
5965 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp));
5969 void SelectionDAGBuilder::visitCall(const CallInst &I) {
5970 // Handle inline assembly differently.
5971 if (isa<InlineAsm>(I.getCalledValue())) {
5976 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
5977 ComputeUsesVAFloatArgument(I, &MMI);
5979 const char *RenameFn = nullptr;
5980 if (Function *F = I.getCalledFunction()) {
5981 if (F->isDeclaration()) {
5982 if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo()) {
5983 if (unsigned IID = II->getIntrinsicID(F)) {
5984 RenameFn = visitIntrinsicCall(I, IID);
5989 if (unsigned IID = F->getIntrinsicID()) {
5990 RenameFn = visitIntrinsicCall(I, IID);
5996 // Check for well-known libc/libm calls. If the function is internal, it
5997 // can't be a library call.
5999 if (!F->hasLocalLinkage() && F->hasName() &&
6000 LibInfo->getLibFunc(F->getName(), Func) &&
6001 LibInfo->hasOptimizedCodeGen(Func)) {
6004 case LibFunc::copysign:
6005 case LibFunc::copysignf:
6006 case LibFunc::copysignl:
6007 if (I.getNumArgOperands() == 2 && // Basic sanity checks.
6008 I.getArgOperand(0)->getType()->isFloatingPointTy() &&
6009 I.getType() == I.getArgOperand(0)->getType() &&
6010 I.getType() == I.getArgOperand(1)->getType() &&
6011 I.onlyReadsMemory()) {
6012 SDValue LHS = getValue(I.getArgOperand(0));
6013 SDValue RHS = getValue(I.getArgOperand(1));
6014 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
6015 LHS.getValueType(), LHS, RHS));
6020 case LibFunc::fabsf:
6021 case LibFunc::fabsl:
6022 if (visitUnaryFloatCall(I, ISD::FABS))
6028 if (visitUnaryFloatCall(I, ISD::FSIN))
6034 if (visitUnaryFloatCall(I, ISD::FCOS))
6038 case LibFunc::sqrtf:
6039 case LibFunc::sqrtl:
6040 case LibFunc::sqrt_finite:
6041 case LibFunc::sqrtf_finite:
6042 case LibFunc::sqrtl_finite:
6043 if (visitUnaryFloatCall(I, ISD::FSQRT))
6046 case LibFunc::floor:
6047 case LibFunc::floorf:
6048 case LibFunc::floorl:
6049 if (visitUnaryFloatCall(I, ISD::FFLOOR))
6052 case LibFunc::nearbyint:
6053 case LibFunc::nearbyintf:
6054 case LibFunc::nearbyintl:
6055 if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
6059 case LibFunc::ceilf:
6060 case LibFunc::ceill:
6061 if (visitUnaryFloatCall(I, ISD::FCEIL))
6065 case LibFunc::rintf:
6066 case LibFunc::rintl:
6067 if (visitUnaryFloatCall(I, ISD::FRINT))
6070 case LibFunc::round:
6071 case LibFunc::roundf:
6072 case LibFunc::roundl:
6073 if (visitUnaryFloatCall(I, ISD::FROUND))
6076 case LibFunc::trunc:
6077 case LibFunc::truncf:
6078 case LibFunc::truncl:
6079 if (visitUnaryFloatCall(I, ISD::FTRUNC))
6083 case LibFunc::log2f:
6084 case LibFunc::log2l:
6085 if (visitUnaryFloatCall(I, ISD::FLOG2))
6089 case LibFunc::exp2f:
6090 case LibFunc::exp2l:
6091 if (visitUnaryFloatCall(I, ISD::FEXP2))
6094 case LibFunc::memcmp:
6095 if (visitMemCmpCall(I))
6098 case LibFunc::memchr:
6099 if (visitMemChrCall(I))
6102 case LibFunc::strcpy:
6103 if (visitStrCpyCall(I, false))
6106 case LibFunc::stpcpy:
6107 if (visitStrCpyCall(I, true))
6110 case LibFunc::strcmp:
6111 if (visitStrCmpCall(I))
6114 case LibFunc::strlen:
6115 if (visitStrLenCall(I))
6118 case LibFunc::strnlen:
6119 if (visitStrNLenCall(I))
6128 Callee = getValue(I.getCalledValue());
6130 Callee = DAG.getExternalSymbol(
6131 RenameFn, TM.getSubtargetImpl()->getTargetLowering()->getPointerTy());
6133 // Check if we can potentially perform a tail call. More detailed checking is
6134 // be done within LowerCallTo, after more information about the call is known.
6135 LowerCallTo(&I, Callee, I.isTailCall());
6140 /// AsmOperandInfo - This contains information for each constraint that we are
6142 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
6144 /// CallOperand - If this is the result output operand or a clobber
6145 /// this is null, otherwise it is the incoming operand to the CallInst.
6146 /// This gets modified as the asm is processed.
6147 SDValue CallOperand;
6149 /// AssignedRegs - If this is a register or register class operand, this
6150 /// contains the set of register corresponding to the operand.
6151 RegsForValue AssignedRegs;
6153 explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
6154 : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr,0) {
6157 /// getCallOperandValEVT - Return the EVT of the Value* that this operand
6158 /// corresponds to. If there is no Value* for this operand, it returns
6160 EVT getCallOperandValEVT(LLVMContext &Context,
6161 const TargetLowering &TLI,
6162 const DataLayout *DL) const {
6163 if (!CallOperandVal) return MVT::Other;
6165 if (isa<BasicBlock>(CallOperandVal))
6166 return TLI.getPointerTy();
6168 llvm::Type *OpTy = CallOperandVal->getType();
6170 // FIXME: code duplicated from TargetLowering::ParseConstraints().
6171 // If this is an indirect operand, the operand is a pointer to the
6174 llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
6176 report_fatal_error("Indirect operand for inline asm not a pointer!");
6177 OpTy = PtrTy->getElementType();
6180 // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
6181 if (StructType *STy = dyn_cast<StructType>(OpTy))
6182 if (STy->getNumElements() == 1)
6183 OpTy = STy->getElementType(0);
6185 // If OpTy is not a single value, it may be a struct/union that we
6186 // can tile with integers.
6187 if (!OpTy->isSingleValueType() && OpTy->isSized()) {
6188 unsigned BitSize = DL->getTypeSizeInBits(OpTy);
6197 OpTy = IntegerType::get(Context, BitSize);
6202 return TLI.getValueType(OpTy, true);
6206 typedef SmallVector<SDISelAsmOperandInfo,16> SDISelAsmOperandInfoVector;
6208 } // end anonymous namespace
6210 /// GetRegistersForValue - Assign registers (virtual or physical) for the
6211 /// specified operand. We prefer to assign virtual registers, to allow the
6212 /// register allocator to handle the assignment process. However, if the asm
6213 /// uses features that we can't model on machineinstrs, we have SDISel do the
6214 /// allocation. This produces generally horrible, but correct, code.
6216 /// OpInfo describes the operand.
6218 static void GetRegistersForValue(SelectionDAG &DAG,
6219 const TargetLowering &TLI,
6221 SDISelAsmOperandInfo &OpInfo) {
6222 LLVMContext &Context = *DAG.getContext();
6224 MachineFunction &MF = DAG.getMachineFunction();
6225 SmallVector<unsigned, 4> Regs;
6227 // If this is a constraint for a single physreg, or a constraint for a
6228 // register class, find it.
6229 std::pair<unsigned, const TargetRegisterClass*> PhysReg =
6230 TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
6231 OpInfo.ConstraintVT);
6233 unsigned NumRegs = 1;
6234 if (OpInfo.ConstraintVT != MVT::Other) {
6235 // If this is a FP input in an integer register (or visa versa) insert a bit
6236 // cast of the input value. More generally, handle any case where the input
6237 // value disagrees with the register class we plan to stick this in.
6238 if (OpInfo.Type == InlineAsm::isInput &&
6239 PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) {
6240 // Try to convert to the first EVT that the reg class contains. If the
6241 // types are identical size, use a bitcast to convert (e.g. two differing
6243 MVT RegVT = *PhysReg.second->vt_begin();
6244 if (RegVT.getSizeInBits() == OpInfo.CallOperand.getValueSizeInBits()) {
6245 OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL,
6246 RegVT, OpInfo.CallOperand);
6247 OpInfo.ConstraintVT = RegVT;
6248 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
6249 // If the input is a FP value and we want it in FP registers, do a
6250 // bitcast to the corresponding integer type. This turns an f64 value
6251 // into i64, which can be passed with two i32 values on a 32-bit
6253 RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
6254 OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL,
6255 RegVT, OpInfo.CallOperand);
6256 OpInfo.ConstraintVT = RegVT;
6260 NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
6264 EVT ValueVT = OpInfo.ConstraintVT;
6266 // If this is a constraint for a specific physical register, like {r17},
6268 if (unsigned AssignedReg = PhysReg.first) {
6269 const TargetRegisterClass *RC = PhysReg.second;
6270 if (OpInfo.ConstraintVT == MVT::Other)
6271 ValueVT = *RC->vt_begin();
6273 // Get the actual register value type. This is important, because the user
6274 // may have asked for (e.g.) the AX register in i32 type. We need to
6275 // remember that AX is actually i16 to get the right extension.
6276 RegVT = *RC->vt_begin();
6278 // This is a explicit reference to a physical register.
6279 Regs.push_back(AssignedReg);
6281 // If this is an expanded reference, add the rest of the regs to Regs.
6283 TargetRegisterClass::iterator I = RC->begin();
6284 for (; *I != AssignedReg; ++I)
6285 assert(I != RC->end() && "Didn't find reg!");
6287 // Already added the first reg.
6289 for (; NumRegs; --NumRegs, ++I) {
6290 assert(I != RC->end() && "Ran out of registers to allocate!");
6295 OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
6299 // Otherwise, if this was a reference to an LLVM register class, create vregs
6300 // for this reference.
6301 if (const TargetRegisterClass *RC = PhysReg.second) {
6302 RegVT = *RC->vt_begin();
6303 if (OpInfo.ConstraintVT == MVT::Other)
6306 // Create the appropriate number of virtual registers.
6307 MachineRegisterInfo &RegInfo = MF.getRegInfo();
6308 for (; NumRegs; --NumRegs)
6309 Regs.push_back(RegInfo.createVirtualRegister(RC));
6311 OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
6315 // Otherwise, we couldn't allocate enough registers for this.
6318 /// visitInlineAsm - Handle a call to an InlineAsm object.
6320 void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
6321 const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
6323 /// ConstraintOperands - Information about all of the constraints.
6324 SDISelAsmOperandInfoVector ConstraintOperands;
6326 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
6327 TargetLowering::AsmOperandInfoVector
6328 TargetConstraints = TLI->ParseConstraints(CS);
6330 bool hasMemory = false;
6332 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
6333 unsigned ResNo = 0; // ResNo - The result number of the next output.
6334 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
6335 ConstraintOperands.push_back(SDISelAsmOperandInfo(TargetConstraints[i]));
6336 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
6338 MVT OpVT = MVT::Other;
6340 // Compute the value type for each operand.
6341 switch (OpInfo.Type) {
6342 case InlineAsm::isOutput:
6343 // Indirect outputs just consume an argument.
6344 if (OpInfo.isIndirect) {
6345 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
6349 // The return value of the call is this value. As such, there is no
6350 // corresponding argument.
6351 assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
6352 if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
6353 OpVT = TLI->getSimpleValueType(STy->getElementType(ResNo));
6355 assert(ResNo == 0 && "Asm only has one result!");
6356 OpVT = TLI->getSimpleValueType(CS.getType());
6360 case InlineAsm::isInput:
6361 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
6363 case InlineAsm::isClobber:
6368 // If this is an input or an indirect output, process the call argument.
6369 // BasicBlocks are labels, currently appearing only in asm's.
6370 if (OpInfo.CallOperandVal) {
6371 if (const BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
6372 OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
6374 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
6377 OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), *TLI, DL).
6381 OpInfo.ConstraintVT = OpVT;
6383 // Indirect operand accesses access memory.
6384 if (OpInfo.isIndirect)
6387 for (unsigned j = 0, ee = OpInfo.Codes.size(); j != ee; ++j) {
6388 TargetLowering::ConstraintType
6389 CType = TLI->getConstraintType(OpInfo.Codes[j]);
6390 if (CType == TargetLowering::C_Memory) {
6398 SDValue Chain, Flag;
6400 // We won't need to flush pending loads if this asm doesn't touch
6401 // memory and is nonvolatile.
6402 if (hasMemory || IA->hasSideEffects())
6405 Chain = DAG.getRoot();
6407 // Second pass over the constraints: compute which constraint option to use
6408 // and assign registers to constraints that want a specific physreg.
6409 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
6410 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
6412 // If this is an output operand with a matching input operand, look up the
6413 // matching input. If their types mismatch, e.g. one is an integer, the
6414 // other is floating point, or their sizes are different, flag it as an
6416 if (OpInfo.hasMatchingInput()) {
6417 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
6419 if (OpInfo.ConstraintVT != Input.ConstraintVT) {
6420 std::pair<unsigned, const TargetRegisterClass*> MatchRC =
6421 TLI->getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
6422 OpInfo.ConstraintVT);
6423 std::pair<unsigned, const TargetRegisterClass*> InputRC =
6424 TLI->getRegForInlineAsmConstraint(Input.ConstraintCode,
6425 Input.ConstraintVT);
6426 if ((OpInfo.ConstraintVT.isInteger() !=
6427 Input.ConstraintVT.isInteger()) ||
6428 (MatchRC.second != InputRC.second)) {
6429 report_fatal_error("Unsupported asm: input constraint"
6430 " with a matching output constraint of"
6431 " incompatible type!");
6433 Input.ConstraintVT = OpInfo.ConstraintVT;
6437 // Compute the constraint code and ConstraintType to use.
6438 TLI->ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
6440 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
6441 OpInfo.Type == InlineAsm::isClobber)
6444 // If this is a memory input, and if the operand is not indirect, do what we
6445 // need to to provide an address for the memory input.
6446 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
6447 !OpInfo.isIndirect) {
6448 assert((OpInfo.isMultipleAlternative ||
6449 (OpInfo.Type == InlineAsm::isInput)) &&
6450 "Can only indirectify direct input operands!");
6452 // Memory operands really want the address of the value. If we don't have
6453 // an indirect input, put it in the constpool if we can, otherwise spill
6454 // it to a stack slot.
6455 // TODO: This isn't quite right. We need to handle these according to
6456 // the addressing mode that the constraint wants. Also, this may take
6457 // an additional register for the computation and we don't want that
6460 // If the operand is a float, integer, or vector constant, spill to a
6461 // constant pool entry to get its address.
6462 const Value *OpVal = OpInfo.CallOperandVal;
6463 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
6464 isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
6465 OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
6466 TLI->getPointerTy());
6468 // Otherwise, create a stack slot and emit a store to it before the
6470 Type *Ty = OpVal->getType();
6471 uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(Ty);
6472 unsigned Align = TLI->getDataLayout()->getPrefTypeAlignment(Ty);
6473 MachineFunction &MF = DAG.getMachineFunction();
6474 int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
6475 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI->getPointerTy());
6476 Chain = DAG.getStore(Chain, getCurSDLoc(),
6477 OpInfo.CallOperand, StackSlot,
6478 MachinePointerInfo::getFixedStack(SSFI),
6480 OpInfo.CallOperand = StackSlot;
6483 // There is no longer a Value* corresponding to this operand.
6484 OpInfo.CallOperandVal = nullptr;
6486 // It is now an indirect operand.
6487 OpInfo.isIndirect = true;
6490 // If this constraint is for a specific register, allocate it before
6492 if (OpInfo.ConstraintType == TargetLowering::C_Register)
6493 GetRegistersForValue(DAG, *TLI, getCurSDLoc(), OpInfo);
6496 // Second pass - Loop over all of the operands, assigning virtual or physregs
6497 // to register class operands.
6498 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
6499 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
6501 // C_Register operands have already been allocated, Other/Memory don't need
6503 if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
6504 GetRegistersForValue(DAG, *TLI, getCurSDLoc(), OpInfo);
6507 // AsmNodeOperands - The operands for the ISD::INLINEASM node.
6508 std::vector<SDValue> AsmNodeOperands;
6509 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
6510 AsmNodeOperands.push_back(
6511 DAG.getTargetExternalSymbol(IA->getAsmString().c_str(),
6512 TLI->getPointerTy()));
6514 // If we have a !srcloc metadata node associated with it, we want to attach
6515 // this to the ultimately generated inline asm machineinstr. To do this, we
6516 // pass in the third operand as this (potentially null) inline asm MDNode.
6517 const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc");
6518 AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
6520 // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
6521 // bits as operand 3.
6522 unsigned ExtraInfo = 0;
6523 if (IA->hasSideEffects())
6524 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
6525 if (IA->isAlignStack())
6526 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
6527 // Set the asm dialect.
6528 ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
6530 // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
6531 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
6532 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
6534 // Compute the constraint code and ConstraintType to use.
6535 TLI->ComputeConstraintToUse(OpInfo, SDValue());
6537 // Ideally, we would only check against memory constraints. However, the
6538 // meaning of an other constraint can be target-specific and we can't easily
6539 // reason about it. Therefore, be conservative and set MayLoad/MayStore
6540 // for other constriants as well.
6541 if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
6542 OpInfo.ConstraintType == TargetLowering::C_Other) {
6543 if (OpInfo.Type == InlineAsm::isInput)
6544 ExtraInfo |= InlineAsm::Extra_MayLoad;
6545 else if (OpInfo.Type == InlineAsm::isOutput)
6546 ExtraInfo |= InlineAsm::Extra_MayStore;
6547 else if (OpInfo.Type == InlineAsm::isClobber)
6548 ExtraInfo |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
6552 AsmNodeOperands.push_back(DAG.getTargetConstant(ExtraInfo,
6553 TLI->getPointerTy()));
6555 // Loop over all of the inputs, copying the operand values into the
6556 // appropriate registers and processing the output regs.
6557 RegsForValue RetValRegs;
6559 // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
6560 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
6562 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
6563 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
6565 switch (OpInfo.Type) {
6566 case InlineAsm::isOutput: {
6567 if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
6568 OpInfo.ConstraintType != TargetLowering::C_Register) {
6569 // Memory output, or 'other' output (e.g. 'X' constraint).
6570 assert(OpInfo.isIndirect && "Memory output must be indirect operand");
6572 // Add information to the INLINEASM node to know about this output.
6573 unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
6574 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags,
6575 TLI->getPointerTy()));
6576 AsmNodeOperands.push_back(OpInfo.CallOperand);
6580 // Otherwise, this is a register or register class output.
6582 // Copy the output from the appropriate register. Find a register that
6584 if (OpInfo.AssignedRegs.Regs.empty()) {
6585 LLVMContext &Ctx = *DAG.getContext();
6586 Ctx.emitError(CS.getInstruction(),
6587 "couldn't allocate output register for constraint '" +
6588 Twine(OpInfo.ConstraintCode) + "'");
6592 // If this is an indirect operand, store through the pointer after the
6594 if (OpInfo.isIndirect) {
6595 IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
6596 OpInfo.CallOperandVal));
6598 // This is the result value of the call.
6599 assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
6600 // Concatenate this output onto the outputs list.
6601 RetValRegs.append(OpInfo.AssignedRegs);
6604 // Add information to the INLINEASM node to know that this register is
6607 .AddInlineAsmOperands(OpInfo.isEarlyClobber
6608 ? InlineAsm::Kind_RegDefEarlyClobber
6609 : InlineAsm::Kind_RegDef,
6610 false, 0, DAG, AsmNodeOperands);
6613 case InlineAsm::isInput: {
6614 SDValue InOperandVal = OpInfo.CallOperand;
6616 if (OpInfo.isMatchingInputConstraint()) { // Matching constraint?
6617 // If this is required to match an output register we have already set,
6618 // just use its register.
6619 unsigned OperandNo = OpInfo.getMatchedOperand();
6621 // Scan until we find the definition we already emitted of this operand.
6622 // When we find it, create a RegsForValue operand.
6623 unsigned CurOp = InlineAsm::Op_FirstOperand;
6624 for (; OperandNo; --OperandNo) {
6625 // Advance to the next operand.
6627 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
6628 assert((InlineAsm::isRegDefKind(OpFlag) ||
6629 InlineAsm::isRegDefEarlyClobberKind(OpFlag) ||
6630 InlineAsm::isMemKind(OpFlag)) && "Skipped past definitions?");
6631 CurOp += InlineAsm::getNumOperandRegisters(OpFlag)+1;
6635 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
6636 if (InlineAsm::isRegDefKind(OpFlag) ||
6637 InlineAsm::isRegDefEarlyClobberKind(OpFlag)) {
6638 // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
6639 if (OpInfo.isIndirect) {
6640 // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
6641 LLVMContext &Ctx = *DAG.getContext();
6642 Ctx.emitError(CS.getInstruction(), "inline asm not supported yet:"
6643 " don't know how to handle tied "
6644 "indirect register inputs");
6648 RegsForValue MatchedRegs;
6649 MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
6650 MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType();
6651 MatchedRegs.RegVTs.push_back(RegVT);
6652 MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
6653 for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag);
6655 if (const TargetRegisterClass *RC = TLI->getRegClassFor(RegVT))
6656 MatchedRegs.Regs.push_back(RegInfo.createVirtualRegister(RC));
6658 LLVMContext &Ctx = *DAG.getContext();
6659 Ctx.emitError(CS.getInstruction(),
6660 "inline asm error: This value"
6661 " type register class is not natively supported!");
6665 // Use the produced MatchedRegs object to
6666 MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurSDLoc(),
6667 Chain, &Flag, CS.getInstruction());
6668 MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
6669 true, OpInfo.getMatchedOperand(),
6670 DAG, AsmNodeOperands);
6674 assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!");
6675 assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 &&
6676 "Unexpected number of operands");
6677 // Add information to the INLINEASM node to know about this input.
6678 // See InlineAsm.h isUseOperandTiedToDef.
6679 OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag,
6680 OpInfo.getMatchedOperand());
6681 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag,
6682 TLI->getPointerTy()));
6683 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
6687 // Treat indirect 'X' constraint as memory.
6688 if (OpInfo.ConstraintType == TargetLowering::C_Other &&
6690 OpInfo.ConstraintType = TargetLowering::C_Memory;
6692 if (OpInfo.ConstraintType == TargetLowering::C_Other) {
6693 std::vector<SDValue> Ops;
6694 TLI->LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
6697 LLVMContext &Ctx = *DAG.getContext();
6698 Ctx.emitError(CS.getInstruction(),
6699 "invalid operand for inline asm constraint '" +
6700 Twine(OpInfo.ConstraintCode) + "'");
6704 // Add information to the INLINEASM node to know about this input.
6705 unsigned ResOpType =
6706 InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
6707 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
6708 TLI->getPointerTy()));
6709 AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
6713 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
6714 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
6715 assert(InOperandVal.getValueType() == TLI->getPointerTy() &&
6716 "Memory operands expect pointer values");
6718 // Add information to the INLINEASM node to know about this input.
6719 unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
6720 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
6721 TLI->getPointerTy()));
6722 AsmNodeOperands.push_back(InOperandVal);
6726 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
6727 OpInfo.ConstraintType == TargetLowering::C_Register) &&
6728 "Unknown constraint type!");
6730 // TODO: Support this.
6731 if (OpInfo.isIndirect) {
6732 LLVMContext &Ctx = *DAG.getContext();
6733 Ctx.emitError(CS.getInstruction(),
6734 "Don't know how to handle indirect register inputs yet "
6735 "for constraint '" +
6736 Twine(OpInfo.ConstraintCode) + "'");
6740 // Copy the input into the appropriate registers.
6741 if (OpInfo.AssignedRegs.Regs.empty()) {
6742 LLVMContext &Ctx = *DAG.getContext();
6743 Ctx.emitError(CS.getInstruction(),
6744 "couldn't allocate input reg for constraint '" +
6745 Twine(OpInfo.ConstraintCode) + "'");
6749 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurSDLoc(),
6750 Chain, &Flag, CS.getInstruction());
6752 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0,
6753 DAG, AsmNodeOperands);
6756 case InlineAsm::isClobber: {
6757 // Add the clobbered value to the operand list, so that the register
6758 // allocator is aware that the physreg got clobbered.
6759 if (!OpInfo.AssignedRegs.Regs.empty())
6760 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber,
6768 // Finish up input operands. Set the input chain and add the flag last.
6769 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
6770 if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
6772 Chain = DAG.getNode(ISD::INLINEASM, getCurSDLoc(),
6773 DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
6774 Flag = Chain.getValue(1);
6776 // If this asm returns a register value, copy the result from that register
6777 // and set it as the value of the call.
6778 if (!RetValRegs.Regs.empty()) {
6779 SDValue Val = RetValRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
6780 Chain, &Flag, CS.getInstruction());
6782 // FIXME: Why don't we do this for inline asms with MRVs?
6783 if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
6784 EVT ResultType = TLI->getValueType(CS.getType());
6786 // If any of the results of the inline asm is a vector, it may have the
6787 // wrong width/num elts. This can happen for register classes that can
6788 // contain multiple different value types. The preg or vreg allocated may
6789 // not have the same VT as was expected. Convert it to the right type
6790 // with bit_convert.
6791 if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
6792 Val = DAG.getNode(ISD::BITCAST, getCurSDLoc(),
6795 } else if (ResultType != Val.getValueType() &&
6796 ResultType.isInteger() && Val.getValueType().isInteger()) {
6797 // If a result value was tied to an input value, the computed result may
6798 // have a wider width than the expected result. Extract the relevant
6800 Val = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultType, Val);
6803 assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
6806 setValue(CS.getInstruction(), Val);
6807 // Don't need to use this as a chain in this case.
6808 if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
6812 std::vector<std::pair<SDValue, const Value *> > StoresToEmit;
6814 // Process indirect outputs, first output all of the flagged copies out of
6816 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
6817 RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
6818 const Value *Ptr = IndirectStoresToEmit[i].second;
6819 SDValue OutVal = OutRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
6821 StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
6824 // Emit the non-flagged stores from the physregs.
6825 SmallVector<SDValue, 8> OutChains;
6826 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) {
6827 SDValue Val = DAG.getStore(Chain, getCurSDLoc(),
6828 StoresToEmit[i].first,
6829 getValue(StoresToEmit[i].second),
6830 MachinePointerInfo(StoresToEmit[i].second),
6832 OutChains.push_back(Val);
6835 if (!OutChains.empty())
6836 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
6841 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
6842 DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
6843 MVT::Other, getRoot(),
6844 getValue(I.getArgOperand(0)),
6845 DAG.getSrcValue(I.getArgOperand(0))));
6848 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
6849 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
6850 const DataLayout &DL = *TLI->getDataLayout();
6851 SDValue V = DAG.getVAArg(TLI->getValueType(I.getType()), getCurSDLoc(),
6852 getRoot(), getValue(I.getOperand(0)),
6853 DAG.getSrcValue(I.getOperand(0)),
6854 DL.getABITypeAlignment(I.getType()));
6856 DAG.setRoot(V.getValue(1));
6859 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
6860 DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
6861 MVT::Other, getRoot(),
6862 getValue(I.getArgOperand(0)),
6863 DAG.getSrcValue(I.getArgOperand(0))));
6866 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
6867 DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
6868 MVT::Other, getRoot(),
6869 getValue(I.getArgOperand(0)),
6870 getValue(I.getArgOperand(1)),
6871 DAG.getSrcValue(I.getArgOperand(0)),
6872 DAG.getSrcValue(I.getArgOperand(1))));
6875 /// \brief Lower an argument list according to the target calling convention.
6877 /// \return A tuple of <return-value, token-chain>
6879 /// This is a helper for lowering intrinsics that follow a target calling
6880 /// convention or require stack pointer adjustment. Only a subset of the
6881 /// intrinsic's operands need to participate in the calling convention.
6882 std::pair<SDValue, SDValue>
6883 SelectionDAGBuilder::LowerCallOperands(const CallInst &CI, unsigned ArgIdx,
6884 unsigned NumArgs, SDValue Callee,
6886 TargetLowering::ArgListTy Args;
6887 Args.reserve(NumArgs);
6889 // Populate the argument list.
6890 // Attributes for args start at offset 1, after the return attribute.
6891 ImmutableCallSite CS(&CI);
6892 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs, AttrI = ArgIdx + 1;
6893 ArgI != ArgE; ++ArgI) {
6894 const Value *V = CI.getOperand(ArgI);
6896 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
6898 TargetLowering::ArgListEntry Entry;
6899 Entry.Node = getValue(V);
6900 Entry.Ty = V->getType();
6901 Entry.setAttributes(&CS, AttrI);
6902 Args.push_back(Entry);
6905 Type *retTy = useVoidTy ? Type::getVoidTy(*DAG.getContext()) : CI.getType();
6906 TargetLowering::CallLoweringInfo CLI(DAG);
6907 CLI.setDebugLoc(getCurSDLoc()).setChain(getRoot())
6908 .setCallee(CI.getCallingConv(), retTy, Callee, std::move(Args), NumArgs)
6909 .setDiscardResult(!CI.use_empty());
6911 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
6912 return TLI->LowerCallTo(CLI);
6915 /// \brief Add a stack map intrinsic call's live variable operands to a stackmap
6916 /// or patchpoint target node's operand list.
6918 /// Constants are converted to TargetConstants purely as an optimization to
6919 /// avoid constant materialization and register allocation.
6921 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
6922 /// generate addess computation nodes, and so ExpandISelPseudo can convert the
6923 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
6924 /// address materialization and register allocation, but may also be required
6925 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
6926 /// alloca in the entry block, then the runtime may assume that the alloca's
6927 /// StackMap location can be read immediately after compilation and that the
6928 /// location is valid at any point during execution (this is similar to the
6929 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
6930 /// only available in a register, then the runtime would need to trap when
6931 /// execution reaches the StackMap in order to read the alloca's location.
6932 static void addStackMapLiveVars(const CallInst &CI, unsigned StartIdx,
6933 SmallVectorImpl<SDValue> &Ops,
6934 SelectionDAGBuilder &Builder) {
6935 for (unsigned i = StartIdx, e = CI.getNumArgOperands(); i != e; ++i) {
6936 SDValue OpVal = Builder.getValue(CI.getArgOperand(i));
6937 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) {
6939 Builder.DAG.getTargetConstant(StackMaps::ConstantOp, MVT::i64));
6941 Builder.DAG.getTargetConstant(C->getSExtValue(), MVT::i64));
6942 } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) {
6943 const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo();
6945 Builder.DAG.getTargetFrameIndex(FI->getIndex(), TLI.getPointerTy()));
6947 Ops.push_back(OpVal);
6951 /// \brief Lower llvm.experimental.stackmap directly to its target opcode.
6952 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
6953 // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>,
6954 // [live variables...])
6956 assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
6958 SDValue Chain, InFlag, Callee, NullPtr;
6959 SmallVector<SDValue, 32> Ops;
6961 SDLoc DL = getCurSDLoc();
6962 Callee = getValue(CI.getCalledValue());
6963 NullPtr = DAG.getIntPtrConstant(0, true);
6965 // The stackmap intrinsic only records the live variables (the arguemnts
6966 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
6967 // intrinsic, this won't be lowered to a function call. This means we don't
6968 // have to worry about calling conventions and target specific lowering code.
6969 // Instead we perform the call lowering right here.
6971 // chain, flag = CALLSEQ_START(chain, 0)
6972 // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
6973 // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
6975 Chain = DAG.getCALLSEQ_START(getRoot(), NullPtr, DL);
6976 InFlag = Chain.getValue(1);
6978 // Add the <id> and <numBytes> constants.
6979 SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos));
6980 Ops.push_back(DAG.getTargetConstant(
6981 cast<ConstantSDNode>(IDVal)->getZExtValue(), MVT::i64));
6982 SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos));
6983 Ops.push_back(DAG.getTargetConstant(
6984 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), MVT::i32));
6986 // Push live variables for the stack map.
6987 addStackMapLiveVars(CI, 2, Ops, *this);
6989 // We are not pushing any register mask info here on the operands list,
6990 // because the stackmap doesn't clobber anything.
6992 // Push the chain and the glue flag.
6993 Ops.push_back(Chain);
6994 Ops.push_back(InFlag);
6996 // Create the STACKMAP node.
6997 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6998 SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops);
6999 Chain = SDValue(SM, 0);
7000 InFlag = Chain.getValue(1);
7002 Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL);
7004 // Stackmaps don't generate values, so nothing goes into the NodeMap.
7006 // Set the root to the target-lowered call chain.
7009 // Inform the Frame Information that we have a stackmap in this function.
7010 FuncInfo.MF->getFrameInfo()->setHasStackMap();
7013 /// \brief Lower llvm.experimental.patchpoint directly to its target opcode.
7014 void SelectionDAGBuilder::visitPatchpoint(const CallInst &CI) {
7015 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
7020 // [live variables...])
7022 CallingConv::ID CC = CI.getCallingConv();
7023 bool isAnyRegCC = CC == CallingConv::AnyReg;
7024 bool hasDef = !CI.getType()->isVoidTy();
7025 SDValue Callee = getValue(CI.getOperand(2)); // <target>
7027 // Get the real number of arguments participating in the call <numArgs>
7028 SDValue NArgVal = getValue(CI.getArgOperand(PatchPointOpers::NArgPos));
7029 unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
7031 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
7032 // Intrinsics include all meta-operands up to but not including CC.
7033 unsigned NumMetaOpers = PatchPointOpers::CCPos;
7034 assert(CI.getNumArgOperands() >= NumMetaOpers + NumArgs &&
7035 "Not enough arguments provided to the patchpoint intrinsic");
7037 // For AnyRegCC the arguments are lowered later on manually.
7038 unsigned NumCallArgs = isAnyRegCC ? 0 : NumArgs;
7039 std::pair<SDValue, SDValue> Result =
7040 LowerCallOperands(CI, NumMetaOpers, NumCallArgs, Callee, isAnyRegCC);
7042 // Set the root to the target-lowered call chain.
7043 SDValue Chain = Result.second;
7046 SDNode *CallEnd = Chain.getNode();
7047 if (hasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
7048 CallEnd = CallEnd->getOperand(0).getNode();
7050 /// Get a call instruction from the call sequence chain.
7051 /// Tail calls are not allowed.
7052 assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
7053 "Expected a callseq node.");
7054 SDNode *Call = CallEnd->getOperand(0).getNode();
7055 bool hasGlue = Call->getGluedNode();
7057 // Replace the target specific call node with the patchable intrinsic.
7058 SmallVector<SDValue, 8> Ops;
7060 // Add the <id> and <numBytes> constants.
7061 SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos));
7062 Ops.push_back(DAG.getTargetConstant(
7063 cast<ConstantSDNode>(IDVal)->getZExtValue(), MVT::i64));
7064 SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos));
7065 Ops.push_back(DAG.getTargetConstant(
7066 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), MVT::i32));
7068 // Assume that the Callee is a constant address.
7069 // FIXME: handle function symbols in the future.
7071 DAG.getIntPtrConstant(cast<ConstantSDNode>(Callee)->getZExtValue(),
7072 /*isTarget=*/true));
7074 // Adjust <numArgs> to account for any arguments that have been passed on the
7076 // Call Node: Chain, Target, {Args}, RegMask, [Glue]
7077 unsigned NumCallRegArgs = Call->getNumOperands() - (hasGlue ? 4 : 3);
7078 NumCallRegArgs = isAnyRegCC ? NumArgs : NumCallRegArgs;
7079 Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, MVT::i32));
7081 // Add the calling convention
7082 Ops.push_back(DAG.getTargetConstant((unsigned)CC, MVT::i32));
7084 // Add the arguments we omitted previously. The register allocator should
7085 // place these in any free register.
7087 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
7088 Ops.push_back(getValue(CI.getArgOperand(i)));
7090 // Push the arguments from the call instruction up to the register mask.
7091 SDNode::op_iterator e = hasGlue ? Call->op_end()-2 : Call->op_end()-1;
7092 for (SDNode::op_iterator i = Call->op_begin()+2; i != e; ++i)
7095 // Push live variables for the stack map.
7096 addStackMapLiveVars(CI, NumMetaOpers + NumArgs, Ops, *this);
7098 // Push the register mask info.
7100 Ops.push_back(*(Call->op_end()-2));
7102 Ops.push_back(*(Call->op_end()-1));
7104 // Push the chain (this is originally the first operand of the call, but
7105 // becomes now the last or second to last operand).
7106 Ops.push_back(*(Call->op_begin()));
7108 // Push the glue flag (last operand).
7110 Ops.push_back(*(Call->op_end()-1));
7113 if (isAnyRegCC && hasDef) {
7114 // Create the return types based on the intrinsic definition
7115 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7116 SmallVector<EVT, 3> ValueVTs;
7117 ComputeValueVTs(TLI, CI.getType(), ValueVTs);
7118 assert(ValueVTs.size() == 1 && "Expected only one return value type.");
7120 // There is always a chain and a glue type at the end
7121 ValueVTs.push_back(MVT::Other);
7122 ValueVTs.push_back(MVT::Glue);
7123 NodeTys = DAG.getVTList(ValueVTs);
7125 NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7127 // Replace the target specific call node with a PATCHPOINT node.
7128 MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHPOINT,
7129 getCurSDLoc(), NodeTys, Ops);
7131 // Update the NodeMap.
7134 setValue(&CI, SDValue(MN, 0));
7136 setValue(&CI, Result.first);
7139 // Fixup the consumers of the intrinsic. The chain and glue may be used in the
7140 // call sequence. Furthermore the location of the chain and glue can change
7141 // when the AnyReg calling convention is used and the intrinsic returns a
7143 if (isAnyRegCC && hasDef) {
7144 SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
7145 SDValue To[] = {SDValue(MN, 1), SDValue(MN, 2)};
7146 DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
7148 DAG.ReplaceAllUsesWith(Call, MN);
7149 DAG.DeleteNode(Call);
7151 // Inform the Frame Information that we have a patchpoint in this function.
7152 FuncInfo.MF->getFrameInfo()->setHasPatchPoint();
7155 /// Returns an AttributeSet representing the attributes applied to the return
7156 /// value of the given call.
7157 static AttributeSet getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
7158 SmallVector<Attribute::AttrKind, 2> Attrs;
7160 Attrs.push_back(Attribute::SExt);
7162 Attrs.push_back(Attribute::ZExt);
7164 Attrs.push_back(Attribute::InReg);
7166 return AttributeSet::get(CLI.RetTy->getContext(), AttributeSet::ReturnIndex,
7170 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
7171 /// implementation, which just calls LowerCall.
7172 /// FIXME: When all targets are
7173 /// migrated to using LowerCall, this hook should be integrated into SDISel.
7174 std::pair<SDValue, SDValue>
7175 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
7176 // Handle the incoming return values from the call.
7178 Type *OrigRetTy = CLI.RetTy;
7179 SmallVector<EVT, 4> RetTys;
7180 SmallVector<uint64_t, 4> Offsets;
7181 ComputeValueVTs(*this, CLI.RetTy, RetTys, &Offsets);
7183 SmallVector<ISD::OutputArg, 4> Outs;
7184 GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, *this);
7186 bool CanLowerReturn =
7187 this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
7188 CLI.IsVarArg, Outs, CLI.RetTy->getContext());
7190 SDValue DemoteStackSlot;
7191 int DemoteStackIdx = -100;
7192 if (!CanLowerReturn) {
7193 // FIXME: equivalent assert?
7194 // assert(!CS.hasInAllocaArgument() &&
7195 // "sret demotion is incompatible with inalloca");
7196 uint64_t TySize = getDataLayout()->getTypeAllocSize(CLI.RetTy);
7197 unsigned Align = getDataLayout()->getPrefTypeAlignment(CLI.RetTy);
7198 MachineFunction &MF = CLI.DAG.getMachineFunction();
7199 DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
7200 Type *StackSlotPtrType = PointerType::getUnqual(CLI.RetTy);
7202 DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getPointerTy());
7204 Entry.Node = DemoteStackSlot;
7205 Entry.Ty = StackSlotPtrType;
7206 Entry.isSExt = false;
7207 Entry.isZExt = false;
7208 Entry.isInReg = false;
7209 Entry.isSRet = true;
7210 Entry.isNest = false;
7211 Entry.isByVal = false;
7212 Entry.isReturned = false;
7213 Entry.Alignment = Align;
7214 CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
7215 CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
7217 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
7219 MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
7220 unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
7221 for (unsigned i = 0; i != NumRegs; ++i) {
7222 ISD::InputArg MyFlags;
7223 MyFlags.VT = RegisterVT;
7225 MyFlags.Used = CLI.IsReturnValueUsed;
7227 MyFlags.Flags.setSExt();
7229 MyFlags.Flags.setZExt();
7231 MyFlags.Flags.setInReg();
7232 CLI.Ins.push_back(MyFlags);
7237 // Handle all of the outgoing arguments.
7239 CLI.OutVals.clear();
7240 ArgListTy &Args = CLI.getArgs();
7241 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
7242 SmallVector<EVT, 4> ValueVTs;
7243 ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
7244 Type *FinalType = Args[i].Ty;
7245 if (Args[i].isByVal)
7246 FinalType = cast<PointerType>(Args[i].Ty)->getElementType();
7247 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
7248 FinalType, CLI.CallConv, CLI.IsVarArg);
7249 for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
7251 EVT VT = ValueVTs[Value];
7252 Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
7253 SDValue Op = SDValue(Args[i].Node.getNode(),
7254 Args[i].Node.getResNo() + Value);
7255 ISD::ArgFlagsTy Flags;
7256 unsigned OriginalAlignment = getDataLayout()->getABITypeAlignment(ArgTy);
7262 if (Args[i].isInReg)
7266 if (Args[i].isByVal)
7268 if (Args[i].isInAlloca) {
7269 Flags.setInAlloca();
7270 // Set the byval flag for CCAssignFn callbacks that don't know about
7271 // inalloca. This way we can know how many bytes we should've allocated
7272 // and how many bytes a callee cleanup function will pop. If we port
7273 // inalloca to more targets, we'll have to add custom inalloca handling
7274 // in the various CC lowering callbacks.
7277 if (Args[i].isByVal || Args[i].isInAlloca) {
7278 PointerType *Ty = cast<PointerType>(Args[i].Ty);
7279 Type *ElementTy = Ty->getElementType();
7280 Flags.setByValSize(getDataLayout()->getTypeAllocSize(ElementTy));
7281 // For ByVal, alignment should come from FE. BE will guess if this
7282 // info is not there but there are cases it cannot get right.
7283 unsigned FrameAlign;
7284 if (Args[i].Alignment)
7285 FrameAlign = Args[i].Alignment;
7287 FrameAlign = getByValTypeAlignment(ElementTy);
7288 Flags.setByValAlign(FrameAlign);
7293 Flags.setInConsecutiveRegs();
7294 Flags.setOrigAlign(OriginalAlignment);
7296 MVT PartVT = getRegisterType(CLI.RetTy->getContext(), VT);
7297 unsigned NumParts = getNumRegisters(CLI.RetTy->getContext(), VT);
7298 SmallVector<SDValue, 4> Parts(NumParts);
7299 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
7302 ExtendKind = ISD::SIGN_EXTEND;
7303 else if (Args[i].isZExt)
7304 ExtendKind = ISD::ZERO_EXTEND;
7306 // Conservatively only handle 'returned' on non-vectors for now
7307 if (Args[i].isReturned && !Op.getValueType().isVector()) {
7308 assert(CLI.RetTy == Args[i].Ty && RetTys.size() == NumValues &&
7309 "unexpected use of 'returned'");
7310 // Before passing 'returned' to the target lowering code, ensure that
7311 // either the register MVT and the actual EVT are the same size or that
7312 // the return value and argument are extended in the same way; in these
7313 // cases it's safe to pass the argument register value unchanged as the
7314 // return register value (although it's at the target's option whether
7316 // TODO: allow code generation to take advantage of partially preserved
7317 // registers rather than clobbering the entire register when the
7318 // parameter extension method is not compatible with the return
7320 if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
7321 (ExtendKind != ISD::ANY_EXTEND &&
7322 CLI.RetSExt == Args[i].isSExt && CLI.RetZExt == Args[i].isZExt))
7323 Flags.setReturned();
7326 getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT,
7327 CLI.CS ? CLI.CS->getInstruction() : nullptr, ExtendKind);
7329 for (unsigned j = 0; j != NumParts; ++j) {
7330 // if it isn't first piece, alignment must be 1
7331 ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(), VT,
7332 i < CLI.NumFixedArgs,
7333 i, j*Parts[j].getValueType().getStoreSize());
7334 if (NumParts > 1 && j == 0)
7335 MyFlags.Flags.setSplit();
7337 MyFlags.Flags.setOrigAlign(1);
7339 // Only mark the end at the last register of the last value.
7340 if (NeedsRegBlock && Value == NumValues - 1 && j == NumParts - 1)
7341 MyFlags.Flags.setInConsecutiveRegsLast();
7343 CLI.Outs.push_back(MyFlags);
7344 CLI.OutVals.push_back(Parts[j]);
7349 SmallVector<SDValue, 4> InVals;
7350 CLI.Chain = LowerCall(CLI, InVals);
7352 // Verify that the target's LowerCall behaved as expected.
7353 assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
7354 "LowerCall didn't return a valid chain!");
7355 assert((!CLI.IsTailCall || InVals.empty()) &&
7356 "LowerCall emitted a return value for a tail call!");
7357 assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
7358 "LowerCall didn't emit the correct number of values!");
7360 // For a tail call, the return value is merely live-out and there aren't
7361 // any nodes in the DAG representing it. Return a special value to
7362 // indicate that a tail call has been emitted and no more Instructions
7363 // should be processed in the current block.
7364 if (CLI.IsTailCall) {
7365 CLI.DAG.setRoot(CLI.Chain);
7366 return std::make_pair(SDValue(), SDValue());
7369 DEBUG(for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
7370 assert(InVals[i].getNode() &&
7371 "LowerCall emitted a null value!");
7372 assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
7373 "LowerCall emitted a value with the wrong type!");
7376 SmallVector<SDValue, 4> ReturnValues;
7377 if (!CanLowerReturn) {
7378 // The instruction result is the result of loading from the
7379 // hidden sret parameter.
7380 SmallVector<EVT, 1> PVTs;
7381 Type *PtrRetTy = PointerType::getUnqual(OrigRetTy);
7383 ComputeValueVTs(*this, PtrRetTy, PVTs);
7384 assert(PVTs.size() == 1 && "Pointers should fit in one register");
7385 EVT PtrVT = PVTs[0];
7387 unsigned NumValues = RetTys.size();
7388 ReturnValues.resize(NumValues);
7389 SmallVector<SDValue, 4> Chains(NumValues);
7391 for (unsigned i = 0; i < NumValues; ++i) {
7392 SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
7393 CLI.DAG.getConstant(Offsets[i], PtrVT));
7394 SDValue L = CLI.DAG.getLoad(
7395 RetTys[i], CLI.DL, CLI.Chain, Add,
7396 MachinePointerInfo::getFixedStack(DemoteStackIdx, Offsets[i]), false,
7398 ReturnValues[i] = L;
7399 Chains[i] = L.getValue(1);
7402 CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
7404 // Collect the legal value parts into potentially illegal values
7405 // that correspond to the original function's return values.
7406 ISD::NodeType AssertOp = ISD::DELETED_NODE;
7408 AssertOp = ISD::AssertSext;
7409 else if (CLI.RetZExt)
7410 AssertOp = ISD::AssertZext;
7411 unsigned CurReg = 0;
7412 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
7414 MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
7415 unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
7417 ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
7418 NumRegs, RegisterVT, VT, nullptr,
7423 // For a function returning void, there is no return value. We can't create
7424 // such a node, so we just return a null return value in that case. In
7425 // that case, nothing will actually look at the value.
7426 if (ReturnValues.empty())
7427 return std::make_pair(SDValue(), CLI.Chain);
7430 SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
7431 CLI.DAG.getVTList(RetTys), ReturnValues);
7432 return std::make_pair(Res, CLI.Chain);
7435 void TargetLowering::LowerOperationWrapper(SDNode *N,
7436 SmallVectorImpl<SDValue> &Results,
7437 SelectionDAG &DAG) const {
7438 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
7440 Results.push_back(Res);
7443 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
7444 llvm_unreachable("LowerOperation not implemented for this target!");
7448 SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
7449 SDValue Op = getNonRegisterValue(V);
7450 assert((Op.getOpcode() != ISD::CopyFromReg ||
7451 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
7452 "Copy from a reg to the same reg!");
7453 assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
7455 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
7456 RegsForValue RFV(V->getContext(), *TLI, Reg, V->getType());
7457 SDValue Chain = DAG.getEntryNode();
7458 RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V);
7459 PendingExports.push_back(Chain);
7462 #include "llvm/CodeGen/SelectionDAGISel.h"
7464 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
7465 /// entry block, return true. This includes arguments used by switches, since
7466 /// the switch may expand into multiple basic blocks.
7467 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
7468 // With FastISel active, we may be splitting blocks, so force creation
7469 // of virtual registers for all non-dead arguments.
7471 return A->use_empty();
7473 const BasicBlock *Entry = A->getParent()->begin();
7474 for (const User *U : A->users())
7475 if (cast<Instruction>(U)->getParent() != Entry || isa<SwitchInst>(U))
7476 return false; // Use not in entry block.
7481 void SelectionDAGISel::LowerArguments(const Function &F) {
7482 SelectionDAG &DAG = SDB->DAG;
7483 SDLoc dl = SDB->getCurSDLoc();
7484 const TargetLowering *TLI = getTargetLowering();
7485 const DataLayout *DL = TLI->getDataLayout();
7486 SmallVector<ISD::InputArg, 16> Ins;
7488 if (!FuncInfo->CanLowerReturn) {
7489 // Put in an sret pointer parameter before all the other parameters.
7490 SmallVector<EVT, 1> ValueVTs;
7491 ComputeValueVTs(*getTargetLowering(),
7492 PointerType::getUnqual(F.getReturnType()), ValueVTs);
7494 // NOTE: Assuming that a pointer will never break down to more than one VT
7496 ISD::ArgFlagsTy Flags;
7498 MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
7499 ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true, 0, 0);
7500 Ins.push_back(RetArg);
7503 // Set up the incoming argument description vector.
7505 for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
7506 I != E; ++I, ++Idx) {
7507 SmallVector<EVT, 4> ValueVTs;
7508 ComputeValueVTs(*TLI, I->getType(), ValueVTs);
7509 bool isArgValueUsed = !I->use_empty();
7510 unsigned PartBase = 0;
7511 Type *FinalType = I->getType();
7512 if (F.getAttributes().hasAttribute(Idx, Attribute::ByVal))
7513 FinalType = cast<PointerType>(FinalType)->getElementType();
7514 bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
7515 FinalType, F.getCallingConv(), F.isVarArg());
7516 for (unsigned Value = 0, NumValues = ValueVTs.size();
7517 Value != NumValues; ++Value) {
7518 EVT VT = ValueVTs[Value];
7519 Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
7520 ISD::ArgFlagsTy Flags;
7521 unsigned OriginalAlignment = DL->getABITypeAlignment(ArgTy);
7523 if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt))
7525 if (F.getAttributes().hasAttribute(Idx, Attribute::SExt))
7527 if (F.getAttributes().hasAttribute(Idx, Attribute::InReg))
7529 if (F.getAttributes().hasAttribute(Idx, Attribute::StructRet))
7531 if (F.getAttributes().hasAttribute(Idx, Attribute::ByVal))
7533 if (F.getAttributes().hasAttribute(Idx, Attribute::InAlloca)) {
7534 Flags.setInAlloca();
7535 // Set the byval flag for CCAssignFn callbacks that don't know about
7536 // inalloca. This way we can know how many bytes we should've allocated
7537 // and how many bytes a callee cleanup function will pop. If we port
7538 // inalloca to more targets, we'll have to add custom inalloca handling
7539 // in the various CC lowering callbacks.
7542 if (Flags.isByVal() || Flags.isInAlloca()) {
7543 PointerType *Ty = cast<PointerType>(I->getType());
7544 Type *ElementTy = Ty->getElementType();
7545 Flags.setByValSize(DL->getTypeAllocSize(ElementTy));
7546 // For ByVal, alignment should be passed from FE. BE will guess if
7547 // this info is not there but there are cases it cannot get right.
7548 unsigned FrameAlign;
7549 if (F.getParamAlignment(Idx))
7550 FrameAlign = F.getParamAlignment(Idx);
7552 FrameAlign = TLI->getByValTypeAlignment(ElementTy);
7553 Flags.setByValAlign(FrameAlign);
7555 if (F.getAttributes().hasAttribute(Idx, Attribute::Nest))
7558 Flags.setInConsecutiveRegs();
7559 Flags.setOrigAlign(OriginalAlignment);
7561 MVT RegisterVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
7562 unsigned NumRegs = TLI->getNumRegisters(*CurDAG->getContext(), VT);
7563 for (unsigned i = 0; i != NumRegs; ++i) {
7564 ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed,
7565 Idx-1, PartBase+i*RegisterVT.getStoreSize());
7566 if (NumRegs > 1 && i == 0)
7567 MyFlags.Flags.setSplit();
7568 // if it isn't first piece, alignment must be 1
7570 MyFlags.Flags.setOrigAlign(1);
7572 // Only mark the end at the last register of the last value.
7573 if (NeedsRegBlock && Value == NumValues - 1 && i == NumRegs - 1)
7574 MyFlags.Flags.setInConsecutiveRegsLast();
7576 Ins.push_back(MyFlags);
7578 PartBase += VT.getStoreSize();
7582 // Call the target to set up the argument values.
7583 SmallVector<SDValue, 8> InVals;
7584 SDValue NewRoot = TLI->LowerFormalArguments(DAG.getRoot(), F.getCallingConv(),
7588 // Verify that the target's LowerFormalArguments behaved as expected.
7589 assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
7590 "LowerFormalArguments didn't return a valid chain!");
7591 assert(InVals.size() == Ins.size() &&
7592 "LowerFormalArguments didn't emit the correct number of values!");
7594 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
7595 assert(InVals[i].getNode() &&
7596 "LowerFormalArguments emitted a null value!");
7597 assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
7598 "LowerFormalArguments emitted a value with the wrong type!");
7602 // Update the DAG with the new chain value resulting from argument lowering.
7603 DAG.setRoot(NewRoot);
7605 // Set up the argument values.
7608 if (!FuncInfo->CanLowerReturn) {
7609 // Create a virtual register for the sret pointer, and put in a copy
7610 // from the sret argument into it.
7611 SmallVector<EVT, 1> ValueVTs;
7612 ComputeValueVTs(*TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
7613 MVT VT = ValueVTs[0].getSimpleVT();
7614 MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
7615 ISD::NodeType AssertOp = ISD::DELETED_NODE;
7616 SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1,
7617 RegVT, VT, nullptr, AssertOp);
7619 MachineFunction& MF = SDB->DAG.getMachineFunction();
7620 MachineRegisterInfo& RegInfo = MF.getRegInfo();
7621 unsigned SRetReg = RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
7622 FuncInfo->DemoteRegister = SRetReg;
7623 NewRoot = SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(),
7625 DAG.setRoot(NewRoot);
7627 // i indexes lowered arguments. Bump it past the hidden sret argument.
7628 // Idx indexes LLVM arguments. Don't touch it.
7632 for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
7634 SmallVector<SDValue, 4> ArgValues;
7635 SmallVector<EVT, 4> ValueVTs;
7636 ComputeValueVTs(*TLI, I->getType(), ValueVTs);
7637 unsigned NumValues = ValueVTs.size();
7639 // If this argument is unused then remember its value. It is used to generate
7640 // debugging information.
7641 if (I->use_empty() && NumValues) {
7642 SDB->setUnusedArgValue(I, InVals[i]);
7644 // Also remember any frame index for use in FastISel.
7645 if (FrameIndexSDNode *FI =
7646 dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
7647 FuncInfo->setArgumentFrameIndex(I, FI->getIndex());
7650 for (unsigned Val = 0; Val != NumValues; ++Val) {
7651 EVT VT = ValueVTs[Val];
7652 MVT PartVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
7653 unsigned NumParts = TLI->getNumRegisters(*CurDAG->getContext(), VT);
7655 if (!I->use_empty()) {
7656 ISD::NodeType AssertOp = ISD::DELETED_NODE;
7657 if (F.getAttributes().hasAttribute(Idx, Attribute::SExt))
7658 AssertOp = ISD::AssertSext;
7659 else if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt))
7660 AssertOp = ISD::AssertZext;
7662 ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i],
7663 NumParts, PartVT, VT,
7664 nullptr, AssertOp));
7670 // We don't need to do anything else for unused arguments.
7671 if (ArgValues.empty())
7674 // Note down frame index.
7675 if (FrameIndexSDNode *FI =
7676 dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
7677 FuncInfo->setArgumentFrameIndex(I, FI->getIndex());
7679 SDValue Res = DAG.getMergeValues(makeArrayRef(ArgValues.data(), NumValues),
7680 SDB->getCurSDLoc());
7682 SDB->setValue(I, Res);
7683 if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
7684 if (LoadSDNode *LNode =
7685 dyn_cast<LoadSDNode>(Res.getOperand(0).getNode()))
7686 if (FrameIndexSDNode *FI =
7687 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
7688 FuncInfo->setArgumentFrameIndex(I, FI->getIndex());
7691 // If this argument is live outside of the entry block, insert a copy from
7692 // wherever we got it to the vreg that other BB's will reference it as.
7693 if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::CopyFromReg) {
7694 // If we can, though, try to skip creating an unnecessary vreg.
7695 // FIXME: This isn't very clean... it would be nice to make this more
7696 // general. It's also subtly incompatible with the hacks FastISel
7698 unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
7699 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
7700 FuncInfo->ValueMap[I] = Reg;
7704 if (!isOnlyUsedInEntryBlock(I, TM.Options.EnableFastISel)) {
7705 FuncInfo->InitializeRegForValue(I);
7706 SDB->CopyToExportRegsIfNeeded(I);
7710 assert(i == InVals.size() && "Argument register count mismatch!");
7712 // Finally, if the target has anything special to do, allow it to do so.
7713 // FIXME: this should insert code into the DAG!
7714 EmitFunctionEntryCode();
7717 /// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
7718 /// ensure constants are generated when needed. Remember the virtual registers
7719 /// that need to be added to the Machine PHI nodes as input. We cannot just
7720 /// directly add them, because expansion might result in multiple MBB's for one
7721 /// BB. As such, the start of the BB might correspond to a different MBB than
7725 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
7726 const TerminatorInst *TI = LLVMBB->getTerminator();
7728 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
7730 // Check successor nodes' PHI nodes that expect a constant to be available
7732 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
7733 const BasicBlock *SuccBB = TI->getSuccessor(succ);
7734 if (!isa<PHINode>(SuccBB->begin())) continue;
7735 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
7737 // If this terminator has multiple identical successors (common for
7738 // switches), only handle each succ once.
7739 if (!SuccsHandled.insert(SuccMBB)) continue;
7741 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
7743 // At this point we know that there is a 1-1 correspondence between LLVM PHI
7744 // nodes and Machine PHI nodes, but the incoming operands have not been
7746 for (BasicBlock::const_iterator I = SuccBB->begin();
7747 const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
7748 // Ignore dead phi's.
7749 if (PN->use_empty()) continue;
7752 if (PN->getType()->isEmptyTy())
7756 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
7758 if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
7759 unsigned &RegOut = ConstantsOut[C];
7761 RegOut = FuncInfo.CreateRegs(C->getType());
7762 CopyValueToVirtualRegister(C, RegOut);
7766 DenseMap<const Value *, unsigned>::iterator I =
7767 FuncInfo.ValueMap.find(PHIOp);
7768 if (I != FuncInfo.ValueMap.end())
7771 assert(isa<AllocaInst>(PHIOp) &&
7772 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
7773 "Didn't codegen value into a register!??");
7774 Reg = FuncInfo.CreateRegs(PHIOp->getType());
7775 CopyValueToVirtualRegister(PHIOp, Reg);
7779 // Remember that this register needs to added to the machine PHI node as
7780 // the input for this MBB.
7781 SmallVector<EVT, 4> ValueVTs;
7782 const TargetLowering *TLI = TM.getSubtargetImpl()->getTargetLowering();
7783 ComputeValueVTs(*TLI, PN->getType(), ValueVTs);
7784 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
7785 EVT VT = ValueVTs[vti];
7786 unsigned NumRegisters = TLI->getNumRegisters(*DAG.getContext(), VT);
7787 for (unsigned i = 0, e = NumRegisters; i != e; ++i)
7788 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
7789 Reg += NumRegisters;
7794 ConstantsOut.clear();
7797 /// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB
7800 SelectionDAGBuilder::StackProtectorDescriptor::
7801 AddSuccessorMBB(const BasicBlock *BB,
7802 MachineBasicBlock *ParentMBB,
7803 MachineBasicBlock *SuccMBB) {
7804 // If SuccBB has not been created yet, create it.
7806 MachineFunction *MF = ParentMBB->getParent();
7807 MachineFunction::iterator BBI = ParentMBB;
7808 SuccMBB = MF->CreateMachineBasicBlock(BB);
7809 MF->insert(++BBI, SuccMBB);
7811 // Add it as a successor of ParentMBB.
7812 ParentMBB->addSuccessor(SuccMBB);