1 //===-- SelectionDAGBuilder.cpp - Selection-DAG building ------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements routines for translating from LLVM IR into SelectionDAG IR.
12 //===----------------------------------------------------------------------===//
14 #include "SelectionDAGBuilder.h"
15 #include "SDNodeDbgValue.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/Optional.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/BranchProbabilityInfo.h"
21 #include "llvm/Analysis/ConstantFolding.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/CodeGen/Analysis.h"
24 #include "llvm/CodeGen/FastISel.h"
25 #include "llvm/CodeGen/FunctionLoweringInfo.h"
26 #include "llvm/CodeGen/GCMetadata.h"
27 #include "llvm/CodeGen/GCStrategy.h"
28 #include "llvm/CodeGen/MachineFrameInfo.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachineJumpTableInfo.h"
32 #include "llvm/CodeGen/MachineModuleInfo.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/CodeGen/SelectionDAG.h"
35 #include "llvm/CodeGen/StackMaps.h"
36 #include "llvm/IR/CallingConv.h"
37 #include "llvm/IR/Constants.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/IR/DebugInfo.h"
40 #include "llvm/IR/DerivedTypes.h"
41 #include "llvm/IR/Function.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/InlineAsm.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/LLVMContext.h"
48 #include "llvm/IR/Module.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Support/raw_ostream.h"
54 #include "llvm/Target/TargetFrameLowering.h"
55 #include "llvm/Target/TargetInstrInfo.h"
56 #include "llvm/Target/TargetIntrinsicInfo.h"
57 #include "llvm/Target/TargetLibraryInfo.h"
58 #include "llvm/Target/TargetLowering.h"
59 #include "llvm/Target/TargetOptions.h"
60 #include "llvm/Target/TargetSelectionDAGInfo.h"
64 #define DEBUG_TYPE "isel"
66 /// LimitFloatPrecision - Generate low-precision inline sequences for
67 /// some float libcalls (6, 8 or 12 bits).
68 static unsigned LimitFloatPrecision;
70 static cl::opt<unsigned, true>
71 LimitFPPrecision("limit-float-precision",
72 cl::desc("Generate low-precision inline sequences "
73 "for some float libcalls"),
74 cl::location(LimitFloatPrecision),
77 // Limit the width of DAG chains. This is important in general to prevent
78 // prevent DAG-based analysis from blowing up. For example, alias analysis and
79 // load clustering may not complete in reasonable time. It is difficult to
80 // recognize and avoid this situation within each individual analysis, and
81 // future analyses are likely to have the same behavior. Limiting DAG width is
82 // the safe approach, and will be especially important with global DAGs.
84 // MaxParallelChains default is arbitrarily high to avoid affecting
85 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
86 // sequence over this should have been converted to llvm.memcpy by the
87 // frontend. It easy to induce this behavior with .ll code such as:
88 // %buffer = alloca [4096 x i8]
89 // %data = load [4096 x i8]* %argPtr
90 // store [4096 x i8] %data, [4096 x i8]* %buffer
91 static const unsigned MaxParallelChains = 64;
93 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL,
94 const SDValue *Parts, unsigned NumParts,
95 MVT PartVT, EVT ValueVT, const Value *V);
97 /// getCopyFromParts - Create a value that contains the specified legal parts
98 /// combined into the value they represent. If the parts combine to a type
99 /// larger then ValueVT then AssertOp can be used to specify whether the extra
100 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
101 /// (ISD::AssertSext).
102 static SDValue getCopyFromParts(SelectionDAG &DAG, SDLoc DL,
103 const SDValue *Parts,
104 unsigned NumParts, MVT PartVT, EVT ValueVT,
106 ISD::NodeType AssertOp = ISD::DELETED_NODE) {
107 if (ValueVT.isVector())
108 return getCopyFromPartsVector(DAG, DL, Parts, NumParts,
111 assert(NumParts > 0 && "No parts to assemble!");
112 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
113 SDValue Val = Parts[0];
116 // Assemble the value from multiple parts.
117 if (ValueVT.isInteger()) {
118 unsigned PartBits = PartVT.getSizeInBits();
119 unsigned ValueBits = ValueVT.getSizeInBits();
121 // Assemble the power of 2 part.
122 unsigned RoundParts = NumParts & (NumParts - 1) ?
123 1 << Log2_32(NumParts) : NumParts;
124 unsigned RoundBits = PartBits * RoundParts;
125 EVT RoundVT = RoundBits == ValueBits ?
126 ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
129 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
131 if (RoundParts > 2) {
132 Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
134 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
135 RoundParts / 2, PartVT, HalfVT, V);
137 Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
138 Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
141 if (TLI.isBigEndian())
144 Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
146 if (RoundParts < NumParts) {
147 // Assemble the trailing non-power-of-2 part.
148 unsigned OddParts = NumParts - RoundParts;
149 EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
150 Hi = getCopyFromParts(DAG, DL,
151 Parts + RoundParts, OddParts, PartVT, OddVT, V);
153 // Combine the round and odd parts.
155 if (TLI.isBigEndian())
157 EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
158 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
159 Hi = DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
160 DAG.getConstant(Lo.getValueType().getSizeInBits(),
161 TLI.getPointerTy()));
162 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
163 Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
165 } else if (PartVT.isFloatingPoint()) {
166 // FP split into multiple FP parts (for ppcf128)
167 assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
170 Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
171 Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
172 if (TLI.isBigEndian())
174 Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
176 // FP split into integer parts (soft fp)
177 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
178 !PartVT.isVector() && "Unexpected split");
179 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
180 Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V);
184 // There is now one part, held in Val. Correct it to match ValueVT.
185 EVT PartEVT = Val.getValueType();
187 if (PartEVT == ValueVT)
190 if (PartEVT.isInteger() && ValueVT.isInteger()) {
191 if (ValueVT.bitsLT(PartEVT)) {
192 // For a truncate, see if we have any information to
193 // indicate whether the truncated bits will always be
194 // zero or sign-extension.
195 if (AssertOp != ISD::DELETED_NODE)
196 Val = DAG.getNode(AssertOp, DL, PartEVT, Val,
197 DAG.getValueType(ValueVT));
198 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
200 return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
203 if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
204 // FP_ROUND's are always exact here.
205 if (ValueVT.bitsLT(Val.getValueType()))
206 return DAG.getNode(ISD::FP_ROUND, DL, ValueVT, Val,
207 DAG.getTargetConstant(1, TLI.getPointerTy()));
209 return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
212 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
213 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
215 llvm_unreachable("Unknown mismatch!");
218 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
219 const Twine &ErrMsg) {
220 const Instruction *I = dyn_cast_or_null<Instruction>(V);
222 return Ctx.emitError(ErrMsg);
224 const char *AsmError = ", possible invalid constraint for vector type";
225 if (const CallInst *CI = dyn_cast<CallInst>(I))
226 if (isa<InlineAsm>(CI->getCalledValue()))
227 return Ctx.emitError(I, ErrMsg + AsmError);
229 return Ctx.emitError(I, ErrMsg);
232 /// getCopyFromPartsVector - Create a value that contains the specified legal
233 /// parts combined into the value they represent. If the parts combine to a
234 /// type larger then ValueVT then AssertOp can be used to specify whether the
235 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
236 /// ValueVT (ISD::AssertSext).
237 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, SDLoc DL,
238 const SDValue *Parts, unsigned NumParts,
239 MVT PartVT, EVT ValueVT, const Value *V) {
240 assert(ValueVT.isVector() && "Not a vector value");
241 assert(NumParts > 0 && "No parts to assemble!");
242 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
243 SDValue Val = Parts[0];
245 // Handle a multi-element vector.
249 unsigned NumIntermediates;
251 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
252 NumIntermediates, RegisterVT);
253 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
254 NumParts = NumRegs; // Silence a compiler warning.
255 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
256 assert(RegisterVT == Parts[0].getSimpleValueType() &&
257 "Part type doesn't match part!");
259 // Assemble the parts into intermediate operands.
260 SmallVector<SDValue, 8> Ops(NumIntermediates);
261 if (NumIntermediates == NumParts) {
262 // If the register was not expanded, truncate or copy the value,
264 for (unsigned i = 0; i != NumParts; ++i)
265 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
266 PartVT, IntermediateVT, V);
267 } else if (NumParts > 0) {
268 // If the intermediate type was expanded, build the intermediate
269 // operands from the parts.
270 assert(NumParts % NumIntermediates == 0 &&
271 "Must expand into a divisible number of parts!");
272 unsigned Factor = NumParts / NumIntermediates;
273 for (unsigned i = 0; i != NumIntermediates; ++i)
274 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
275 PartVT, IntermediateVT, V);
278 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
279 // intermediate operands.
280 Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
285 // There is now one part, held in Val. Correct it to match ValueVT.
286 EVT PartEVT = Val.getValueType();
288 if (PartEVT == ValueVT)
291 if (PartEVT.isVector()) {
292 // If the element type of the source/dest vectors are the same, but the
293 // parts vector has more elements than the value vector, then we have a
294 // vector widening case (e.g. <2 x float> -> <4 x float>). Extract the
296 if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) {
297 assert(PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() &&
298 "Cannot narrow, it would be a lossy transformation");
299 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
300 DAG.getConstant(0, TLI.getVectorIdxTy()));
303 // Vector/Vector bitcast.
304 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
305 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
307 assert(PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() &&
308 "Cannot handle this kind of promotion");
309 // Promoted vector extract
310 bool Smaller = ValueVT.bitsLE(PartEVT);
311 return DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
316 // Trivial bitcast if the types are the same size and the destination
317 // vector type is legal.
318 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
319 TLI.isTypeLegal(ValueVT))
320 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
322 // Handle cases such as i8 -> <1 x i1>
323 if (ValueVT.getVectorNumElements() != 1) {
324 diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
325 "non-trivial scalar-to-vector conversion");
326 return DAG.getUNDEF(ValueVT);
329 if (ValueVT.getVectorNumElements() == 1 &&
330 ValueVT.getVectorElementType() != PartEVT) {
331 bool Smaller = ValueVT.bitsLE(PartEVT);
332 Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
333 DL, ValueVT.getScalarType(), Val);
336 return DAG.getNode(ISD::BUILD_VECTOR, DL, ValueVT, Val);
339 static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc dl,
340 SDValue Val, SDValue *Parts, unsigned NumParts,
341 MVT PartVT, const Value *V);
343 /// getCopyToParts - Create a series of nodes that contain the specified value
344 /// split into legal parts. If the parts contain more bits than Val, then, for
345 /// integers, ExtendKind can be used to specify how to generate the extra bits.
346 static void getCopyToParts(SelectionDAG &DAG, SDLoc DL,
347 SDValue Val, SDValue *Parts, unsigned NumParts,
348 MVT PartVT, const Value *V,
349 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
350 EVT ValueVT = Val.getValueType();
352 // Handle the vector case separately.
353 if (ValueVT.isVector())
354 return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V);
356 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
357 unsigned PartBits = PartVT.getSizeInBits();
358 unsigned OrigNumParts = NumParts;
359 assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
364 assert(!ValueVT.isVector() && "Vector case handled elsewhere");
365 EVT PartEVT = PartVT;
366 if (PartEVT == ValueVT) {
367 assert(NumParts == 1 && "No-op copy with multiple parts!");
372 if (NumParts * PartBits > ValueVT.getSizeInBits()) {
373 // If the parts cover more bits than the value has, promote the value.
374 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
375 assert(NumParts == 1 && "Do not know what to promote to!");
376 Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
378 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
379 ValueVT.isInteger() &&
380 "Unknown mismatch!");
381 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
382 Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
383 if (PartVT == MVT::x86mmx)
384 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
386 } else if (PartBits == ValueVT.getSizeInBits()) {
387 // Different types of the same size.
388 assert(NumParts == 1 && PartEVT != ValueVT);
389 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
390 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
391 // If the parts cover less bits than value has, truncate the value.
392 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
393 ValueVT.isInteger() &&
394 "Unknown mismatch!");
395 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
396 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
397 if (PartVT == MVT::x86mmx)
398 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
401 // The value may have changed - recompute ValueVT.
402 ValueVT = Val.getValueType();
403 assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
404 "Failed to tile the value with PartVT!");
407 if (PartEVT != ValueVT)
408 diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
409 "scalar-to-vector conversion failed");
415 // Expand the value into multiple parts.
416 if (NumParts & (NumParts - 1)) {
417 // The number of parts is not a power of 2. Split off and copy the tail.
418 assert(PartVT.isInteger() && ValueVT.isInteger() &&
419 "Do not know what to expand to!");
420 unsigned RoundParts = 1 << Log2_32(NumParts);
421 unsigned RoundBits = RoundParts * PartBits;
422 unsigned OddParts = NumParts - RoundParts;
423 SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
424 DAG.getIntPtrConstant(RoundBits));
425 getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V);
427 if (TLI.isBigEndian())
428 // The odd parts were reversed by getCopyToParts - unreverse them.
429 std::reverse(Parts + RoundParts, Parts + NumParts);
431 NumParts = RoundParts;
432 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
433 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
436 // The number of parts is a power of 2. Repeatedly bisect the value using
438 Parts[0] = DAG.getNode(ISD::BITCAST, DL,
439 EVT::getIntegerVT(*DAG.getContext(),
440 ValueVT.getSizeInBits()),
443 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
444 for (unsigned i = 0; i < NumParts; i += StepSize) {
445 unsigned ThisBits = StepSize * PartBits / 2;
446 EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
447 SDValue &Part0 = Parts[i];
448 SDValue &Part1 = Parts[i+StepSize/2];
450 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
451 ThisVT, Part0, DAG.getIntPtrConstant(1));
452 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
453 ThisVT, Part0, DAG.getIntPtrConstant(0));
455 if (ThisBits == PartBits && ThisVT != PartVT) {
456 Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
457 Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
462 if (TLI.isBigEndian())
463 std::reverse(Parts, Parts + OrigNumParts);
467 /// getCopyToPartsVector - Create a series of nodes that contain the specified
468 /// value split into legal parts.
469 static void getCopyToPartsVector(SelectionDAG &DAG, SDLoc DL,
470 SDValue Val, SDValue *Parts, unsigned NumParts,
471 MVT PartVT, const Value *V) {
472 EVT ValueVT = Val.getValueType();
473 assert(ValueVT.isVector() && "Not a vector");
474 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
477 EVT PartEVT = PartVT;
478 if (PartEVT == ValueVT) {
480 } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
481 // Bitconvert vector->vector case.
482 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
483 } else if (PartVT.isVector() &&
484 PartEVT.getVectorElementType() == ValueVT.getVectorElementType() &&
485 PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements()) {
486 EVT ElementVT = PartVT.getVectorElementType();
487 // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in
489 SmallVector<SDValue, 16> Ops;
490 for (unsigned i = 0, e = ValueVT.getVectorNumElements(); i != e; ++i)
491 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
492 ElementVT, Val, DAG.getConstant(i,
493 TLI.getVectorIdxTy())));
495 for (unsigned i = ValueVT.getVectorNumElements(),
496 e = PartVT.getVectorNumElements(); i != e; ++i)
497 Ops.push_back(DAG.getUNDEF(ElementVT));
499 Val = DAG.getNode(ISD::BUILD_VECTOR, DL, PartVT, Ops);
501 // FIXME: Use CONCAT for 2x -> 4x.
503 //SDValue UndefElts = DAG.getUNDEF(VectorTy);
504 //Val = DAG.getNode(ISD::CONCAT_VECTORS, DL, PartVT, Val, UndefElts);
505 } else if (PartVT.isVector() &&
506 PartEVT.getVectorElementType().bitsGE(
507 ValueVT.getVectorElementType()) &&
508 PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements()) {
510 // Promoted vector extract
511 bool Smaller = PartEVT.bitsLE(ValueVT);
512 Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
515 // Vector -> scalar conversion.
516 assert(ValueVT.getVectorNumElements() == 1 &&
517 "Only trivial vector-to-scalar conversions should get here!");
518 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
519 PartVT, Val, DAG.getConstant(0, TLI.getVectorIdxTy()));
521 bool Smaller = ValueVT.bitsLE(PartVT);
522 Val = DAG.getNode((Smaller ? ISD::TRUNCATE : ISD::ANY_EXTEND),
530 // Handle a multi-element vector.
533 unsigned NumIntermediates;
534 unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
536 NumIntermediates, RegisterVT);
537 unsigned NumElements = ValueVT.getVectorNumElements();
539 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
540 NumParts = NumRegs; // Silence a compiler warning.
541 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
543 // Split the vector into intermediate operands.
544 SmallVector<SDValue, 8> Ops(NumIntermediates);
545 for (unsigned i = 0; i != NumIntermediates; ++i) {
546 if (IntermediateVT.isVector())
547 Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL,
549 DAG.getConstant(i * (NumElements / NumIntermediates),
550 TLI.getVectorIdxTy()));
552 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
554 DAG.getConstant(i, TLI.getVectorIdxTy()));
557 // Split the intermediate operands into legal parts.
558 if (NumParts == NumIntermediates) {
559 // If the register was not expanded, promote or copy the value,
561 for (unsigned i = 0; i != NumParts; ++i)
562 getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V);
563 } else if (NumParts > 0) {
564 // If the intermediate type was expanded, split each the value into
566 assert(NumParts % NumIntermediates == 0 &&
567 "Must expand into a divisible number of parts!");
568 unsigned Factor = NumParts / NumIntermediates;
569 for (unsigned i = 0; i != NumIntermediates; ++i)
570 getCopyToParts(DAG, DL, Ops[i], &Parts[i*Factor], Factor, PartVT, V);
575 /// RegsForValue - This struct represents the registers (physical or virtual)
576 /// that a particular set of values is assigned, and the type information
577 /// about the value. The most common situation is to represent one value at a
578 /// time, but struct or array values are handled element-wise as multiple
579 /// values. The splitting of aggregates is performed recursively, so that we
580 /// never have aggregate-typed registers. The values at this point do not
581 /// necessarily have legal types, so each value may require one or more
582 /// registers of some legal type.
584 struct RegsForValue {
585 /// ValueVTs - The value types of the values, which may not be legal, and
586 /// may need be promoted or synthesized from one or more registers.
588 SmallVector<EVT, 4> ValueVTs;
590 /// RegVTs - The value types of the registers. This is the same size as
591 /// ValueVTs and it records, for each value, what the type of the assigned
592 /// register or registers are. (Individual values are never synthesized
593 /// from more than one type of register.)
595 /// With virtual registers, the contents of RegVTs is redundant with TLI's
596 /// getRegisterType member function, however when with physical registers
597 /// it is necessary to have a separate record of the types.
599 SmallVector<MVT, 4> RegVTs;
601 /// Regs - This list holds the registers assigned to the values.
602 /// Each legal or promoted value requires one register, and each
603 /// expanded value requires multiple registers.
605 SmallVector<unsigned, 4> Regs;
609 RegsForValue(const SmallVector<unsigned, 4> ®s,
610 MVT regvt, EVT valuevt)
611 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
613 RegsForValue(LLVMContext &Context, const TargetLowering &tli,
614 unsigned Reg, Type *Ty) {
615 ComputeValueVTs(tli, Ty, ValueVTs);
617 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
618 EVT ValueVT = ValueVTs[Value];
619 unsigned NumRegs = tli.getNumRegisters(Context, ValueVT);
620 MVT RegisterVT = tli.getRegisterType(Context, ValueVT);
621 for (unsigned i = 0; i != NumRegs; ++i)
622 Regs.push_back(Reg + i);
623 RegVTs.push_back(RegisterVT);
628 /// append - Add the specified values to this one.
629 void append(const RegsForValue &RHS) {
630 ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
631 RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
632 Regs.append(RHS.Regs.begin(), RHS.Regs.end());
635 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
636 /// this value and returns the result as a ValueVTs value. This uses
637 /// Chain/Flag as the input and updates them for the output Chain/Flag.
638 /// If the Flag pointer is NULL, no flag is used.
639 SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo,
641 SDValue &Chain, SDValue *Flag,
642 const Value *V = nullptr) const;
644 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
645 /// specified value into the registers specified by this object. This uses
646 /// Chain/Flag as the input and updates them for the output Chain/Flag.
647 /// If the Flag pointer is NULL, no flag is used.
648 void getCopyToRegs(SDValue Val, SelectionDAG &DAG, SDLoc dl,
649 SDValue &Chain, SDValue *Flag, const Value *V) const;
651 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
652 /// operand list. This adds the code marker, matching input operand index
653 /// (if applicable), and includes the number of values added into it.
654 void AddInlineAsmOperands(unsigned Kind,
655 bool HasMatching, unsigned MatchingIdx,
657 std::vector<SDValue> &Ops) const;
661 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
662 /// this value and returns the result as a ValueVT value. This uses
663 /// Chain/Flag as the input and updates them for the output Chain/Flag.
664 /// If the Flag pointer is NULL, no flag is used.
665 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
666 FunctionLoweringInfo &FuncInfo,
668 SDValue &Chain, SDValue *Flag,
669 const Value *V) const {
670 // A Value with type {} or [0 x %t] needs no registers.
671 if (ValueVTs.empty())
674 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
676 // Assemble the legal parts into the final values.
677 SmallVector<SDValue, 4> Values(ValueVTs.size());
678 SmallVector<SDValue, 8> Parts;
679 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
680 // Copy the legal parts from the registers.
681 EVT ValueVT = ValueVTs[Value];
682 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
683 MVT RegisterVT = RegVTs[Value];
685 Parts.resize(NumRegs);
686 for (unsigned i = 0; i != NumRegs; ++i) {
689 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
691 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
692 *Flag = P.getValue(2);
695 Chain = P.getValue(1);
698 // If the source register was virtual and if we know something about it,
699 // add an assert node.
700 if (!TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) ||
701 !RegisterVT.isInteger() || RegisterVT.isVector())
704 const FunctionLoweringInfo::LiveOutInfo *LOI =
705 FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
709 unsigned RegSize = RegisterVT.getSizeInBits();
710 unsigned NumSignBits = LOI->NumSignBits;
711 unsigned NumZeroBits = LOI->KnownZero.countLeadingOnes();
713 if (NumZeroBits == RegSize) {
714 // The current value is a zero.
715 // Explicitly express that as it would be easier for
716 // optimizations to kick in.
717 Parts[i] = DAG.getConstant(0, RegisterVT);
721 // FIXME: We capture more information than the dag can represent. For
722 // now, just use the tightest assertzext/assertsext possible.
724 EVT FromVT(MVT::Other);
725 if (NumSignBits == RegSize)
726 isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1
727 else if (NumZeroBits >= RegSize-1)
728 isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1
729 else if (NumSignBits > RegSize-8)
730 isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8
731 else if (NumZeroBits >= RegSize-8)
732 isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8
733 else if (NumSignBits > RegSize-16)
734 isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16
735 else if (NumZeroBits >= RegSize-16)
736 isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
737 else if (NumSignBits > RegSize-32)
738 isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32
739 else if (NumZeroBits >= RegSize-32)
740 isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
744 // Add an assertion node.
745 assert(FromVT != MVT::Other);
746 Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
747 RegisterVT, P, DAG.getValueType(FromVT));
750 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
751 NumRegs, RegisterVT, ValueVT, V);
756 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
759 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
760 /// specified value into the registers specified by this object. This uses
761 /// Chain/Flag as the input and updates them for the output Chain/Flag.
762 /// If the Flag pointer is NULL, no flag is used.
763 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, SDLoc dl,
764 SDValue &Chain, SDValue *Flag,
765 const Value *V) const {
766 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
768 // Get the list of the values's legal parts.
769 unsigned NumRegs = Regs.size();
770 SmallVector<SDValue, 8> Parts(NumRegs);
771 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
772 EVT ValueVT = ValueVTs[Value];
773 unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), ValueVT);
774 MVT RegisterVT = RegVTs[Value];
775 ISD::NodeType ExtendKind =
776 TLI.isZExtFree(Val, RegisterVT)? ISD::ZERO_EXTEND: ISD::ANY_EXTEND;
778 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
779 &Parts[Part], NumParts, RegisterVT, V, ExtendKind);
783 // Copy the parts into the registers.
784 SmallVector<SDValue, 8> Chains(NumRegs);
785 for (unsigned i = 0; i != NumRegs; ++i) {
788 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
790 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
791 *Flag = Part.getValue(1);
794 Chains[i] = Part.getValue(0);
797 if (NumRegs == 1 || Flag)
798 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
799 // flagged to it. That is the CopyToReg nodes and the user are considered
800 // a single scheduling unit. If we create a TokenFactor and return it as
801 // chain, then the TokenFactor is both a predecessor (operand) of the
802 // user as well as a successor (the TF operands are flagged to the user).
803 // c1, f1 = CopyToReg
804 // c2, f2 = CopyToReg
805 // c3 = TokenFactor c1, c2
808 Chain = Chains[NumRegs-1];
810 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
813 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
814 /// operand list. This adds the code marker and includes the number of
815 /// values added into it.
816 void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
817 unsigned MatchingIdx,
819 std::vector<SDValue> &Ops) const {
820 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
822 unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
824 Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
825 else if (!Regs.empty() &&
826 TargetRegisterInfo::isVirtualRegister(Regs.front())) {
827 // Put the register class of the virtual registers in the flag word. That
828 // way, later passes can recompute register class constraints for inline
829 // assembly as well as normal instructions.
830 // Don't do this for tied operands that can use the regclass information
832 const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
833 const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
834 Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
837 SDValue Res = DAG.getTargetConstant(Flag, MVT::i32);
840 unsigned SP = TLI.getStackPointerRegisterToSaveRestore();
841 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
842 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
843 MVT RegisterVT = RegVTs[Value];
844 for (unsigned i = 0; i != NumRegs; ++i) {
845 assert(Reg < Regs.size() && "Mismatch in # registers expected");
846 unsigned TheReg = Regs[Reg++];
847 Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
849 if (TheReg == SP && Code == InlineAsm::Kind_Clobber) {
850 // If we clobbered the stack pointer, MFI should know about it.
851 assert(DAG.getMachineFunction().getFrameInfo()->
852 hasInlineAsmWithSPAdjust());
858 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa,
859 const TargetLibraryInfo *li) {
863 DL = DAG.getTarget().getDataLayout();
864 Context = DAG.getContext();
865 LPadToCallSiteMap.clear();
868 /// clear - Clear out the current SelectionDAG and the associated
869 /// state and prepare this SelectionDAGBuilder object to be used
870 /// for a new block. This doesn't clear out information about
871 /// additional blocks that are needed to complete switch lowering
872 /// or PHI node updating; that information is cleared out as it is
874 void SelectionDAGBuilder::clear() {
876 UnusedArgNodeMap.clear();
877 PendingLoads.clear();
878 PendingExports.clear();
881 SDNodeOrder = LowestSDNodeOrder;
884 /// clearDanglingDebugInfo - Clear the dangling debug information
885 /// map. This function is separated from the clear so that debug
886 /// information that is dangling in a basic block can be properly
887 /// resolved in a different basic block. This allows the
888 /// SelectionDAG to resolve dangling debug information attached
890 void SelectionDAGBuilder::clearDanglingDebugInfo() {
891 DanglingDebugInfoMap.clear();
894 /// getRoot - Return the current virtual root of the Selection DAG,
895 /// flushing any PendingLoad items. This must be done before emitting
896 /// a store or any other node that may need to be ordered after any
897 /// prior load instructions.
899 SDValue SelectionDAGBuilder::getRoot() {
900 if (PendingLoads.empty())
901 return DAG.getRoot();
903 if (PendingLoads.size() == 1) {
904 SDValue Root = PendingLoads[0];
906 PendingLoads.clear();
910 // Otherwise, we have to make a token factor node.
911 SDValue Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
913 PendingLoads.clear();
918 /// getControlRoot - Similar to getRoot, but instead of flushing all the
919 /// PendingLoad items, flush all the PendingExports items. It is necessary
920 /// to do this before emitting a terminator instruction.
922 SDValue SelectionDAGBuilder::getControlRoot() {
923 SDValue Root = DAG.getRoot();
925 if (PendingExports.empty())
928 // Turn all of the CopyToReg chains into one factored node.
929 if (Root.getOpcode() != ISD::EntryToken) {
930 unsigned i = 0, e = PendingExports.size();
931 for (; i != e; ++i) {
932 assert(PendingExports[i].getNode()->getNumOperands() > 1);
933 if (PendingExports[i].getNode()->getOperand(0) == Root)
934 break; // Don't add the root if we already indirectly depend on it.
938 PendingExports.push_back(Root);
941 Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
943 PendingExports.clear();
948 void SelectionDAGBuilder::visit(const Instruction &I) {
949 // Set up outgoing PHI node register values before emitting the terminator.
950 if (isa<TerminatorInst>(&I))
951 HandlePHINodesInSuccessorBlocks(I.getParent());
957 visit(I.getOpcode(), I);
959 if (!isa<TerminatorInst>(&I) && !HasTailCall)
960 CopyToExportRegsIfNeeded(&I);
965 void SelectionDAGBuilder::visitPHI(const PHINode &) {
966 llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
969 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
970 // Note: this doesn't use InstVisitor, because it has to work with
971 // ConstantExpr's in addition to instructions.
973 default: llvm_unreachable("Unknown instruction type encountered!");
974 // Build the switch statement using the Instruction.def file.
975 #define HANDLE_INST(NUM, OPCODE, CLASS) \
976 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
977 #include "llvm/IR/Instruction.def"
981 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
982 // generate the debug data structures now that we've seen its definition.
983 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
985 DanglingDebugInfo &DDI = DanglingDebugInfoMap[V];
987 const DbgValueInst *DI = DDI.getDI();
988 DebugLoc dl = DDI.getdl();
989 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
990 MDNode *Variable = DI->getVariable();
991 uint64_t Offset = DI->getOffset();
992 // A dbg.value for an alloca is always indirect.
993 bool IsIndirect = isa<AllocaInst>(V) || Offset != 0;
996 if (!EmitFuncArgumentDbgValue(V, Variable, Offset, IsIndirect, Val)) {
997 SDV = DAG.getDbgValue(Variable, Val.getNode(),
998 Val.getResNo(), IsIndirect,
999 Offset, dl, DbgSDNodeOrder);
1000 DAG.AddDbgValue(SDV, Val.getNode(), false);
1003 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1004 DanglingDebugInfoMap[V] = DanglingDebugInfo();
1008 /// getValue - Return an SDValue for the given Value.
1009 SDValue SelectionDAGBuilder::getValue(const Value *V) {
1010 // If we already have an SDValue for this value, use it. It's important
1011 // to do this first, so that we don't create a CopyFromReg if we already
1012 // have a regular SDValue.
1013 SDValue &N = NodeMap[V];
1014 if (N.getNode()) return N;
1016 // If there's a virtual register allocated and initialized for this
1018 DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V);
1019 if (It != FuncInfo.ValueMap.end()) {
1020 unsigned InReg = It->second;
1021 RegsForValue RFV(*DAG.getContext(), *TM.getTargetLowering(),
1022 InReg, V->getType());
1023 SDValue Chain = DAG.getEntryNode();
1024 N = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1025 resolveDanglingDebugInfo(V, N);
1029 // Otherwise create a new SDValue and remember it.
1030 SDValue Val = getValueImpl(V);
1032 resolveDanglingDebugInfo(V, Val);
1036 /// getNonRegisterValue - Return an SDValue for the given Value, but
1037 /// don't look in FuncInfo.ValueMap for a virtual register.
1038 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1039 // If we already have an SDValue for this value, use it.
1040 SDValue &N = NodeMap[V];
1041 if (N.getNode()) return N;
1043 // Otherwise create a new SDValue and remember it.
1044 SDValue Val = getValueImpl(V);
1046 resolveDanglingDebugInfo(V, Val);
1050 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1051 /// Create an SDValue for the given value.
1052 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1053 const TargetLowering *TLI = TM.getTargetLowering();
1055 if (const Constant *C = dyn_cast<Constant>(V)) {
1056 EVT VT = TLI->getValueType(V->getType(), true);
1058 if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1059 return DAG.getConstant(*CI, VT);
1061 if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1062 return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1064 if (isa<ConstantPointerNull>(C)) {
1065 unsigned AS = V->getType()->getPointerAddressSpace();
1066 return DAG.getConstant(0, TLI->getPointerTy(AS));
1069 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1070 return DAG.getConstantFP(*CFP, VT);
1072 if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1073 return DAG.getUNDEF(VT);
1075 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1076 visit(CE->getOpcode(), *CE);
1077 SDValue N1 = NodeMap[V];
1078 assert(N1.getNode() && "visit didn't populate the NodeMap!");
1082 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1083 SmallVector<SDValue, 4> Constants;
1084 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
1086 SDNode *Val = getValue(*OI).getNode();
1087 // If the operand is an empty aggregate, there are no values.
1089 // Add each leaf value from the operand to the Constants list
1090 // to form a flattened list of all the values.
1091 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1092 Constants.push_back(SDValue(Val, i));
1095 return DAG.getMergeValues(Constants, getCurSDLoc());
1098 if (const ConstantDataSequential *CDS =
1099 dyn_cast<ConstantDataSequential>(C)) {
1100 SmallVector<SDValue, 4> Ops;
1101 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1102 SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1103 // Add each leaf value from the operand to the Constants list
1104 // to form a flattened list of all the values.
1105 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1106 Ops.push_back(SDValue(Val, i));
1109 if (isa<ArrayType>(CDS->getType()))
1110 return DAG.getMergeValues(Ops, getCurSDLoc());
1111 return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(),
1115 if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1116 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1117 "Unknown struct or array constant!");
1119 SmallVector<EVT, 4> ValueVTs;
1120 ComputeValueVTs(*TLI, C->getType(), ValueVTs);
1121 unsigned NumElts = ValueVTs.size();
1123 return SDValue(); // empty struct
1124 SmallVector<SDValue, 4> Constants(NumElts);
1125 for (unsigned i = 0; i != NumElts; ++i) {
1126 EVT EltVT = ValueVTs[i];
1127 if (isa<UndefValue>(C))
1128 Constants[i] = DAG.getUNDEF(EltVT);
1129 else if (EltVT.isFloatingPoint())
1130 Constants[i] = DAG.getConstantFP(0, EltVT);
1132 Constants[i] = DAG.getConstant(0, EltVT);
1135 return DAG.getMergeValues(Constants, getCurSDLoc());
1138 if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1139 return DAG.getBlockAddress(BA, VT);
1141 VectorType *VecTy = cast<VectorType>(V->getType());
1142 unsigned NumElements = VecTy->getNumElements();
1144 // Now that we know the number and type of the elements, get that number of
1145 // elements into the Ops array based on what kind of constant it is.
1146 SmallVector<SDValue, 16> Ops;
1147 if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1148 for (unsigned i = 0; i != NumElements; ++i)
1149 Ops.push_back(getValue(CV->getOperand(i)));
1151 assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
1152 EVT EltVT = TLI->getValueType(VecTy->getElementType());
1155 if (EltVT.isFloatingPoint())
1156 Op = DAG.getConstantFP(0, EltVT);
1158 Op = DAG.getConstant(0, EltVT);
1159 Ops.assign(NumElements, Op);
1162 // Create a BUILD_VECTOR node.
1163 return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(), VT, Ops);
1166 // If this is a static alloca, generate it as the frameindex instead of
1168 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1169 DenseMap<const AllocaInst*, int>::iterator SI =
1170 FuncInfo.StaticAllocaMap.find(AI);
1171 if (SI != FuncInfo.StaticAllocaMap.end())
1172 return DAG.getFrameIndex(SI->second, TLI->getPointerTy());
1175 // If this is an instruction which fast-isel has deferred, select it now.
1176 if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1177 unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
1178 RegsForValue RFV(*DAG.getContext(), *TLI, InReg, Inst->getType());
1179 SDValue Chain = DAG.getEntryNode();
1180 return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1183 llvm_unreachable("Can't get register for value!");
1186 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
1187 const TargetLowering *TLI = TM.getTargetLowering();
1188 SDValue Chain = getControlRoot();
1189 SmallVector<ISD::OutputArg, 8> Outs;
1190 SmallVector<SDValue, 8> OutVals;
1192 if (!FuncInfo.CanLowerReturn) {
1193 unsigned DemoteReg = FuncInfo.DemoteRegister;
1194 const Function *F = I.getParent()->getParent();
1196 // Emit a store of the return value through the virtual register.
1197 // Leave Outs empty so that LowerReturn won't try to load return
1198 // registers the usual way.
1199 SmallVector<EVT, 1> PtrValueVTs;
1200 ComputeValueVTs(*TLI, PointerType::getUnqual(F->getReturnType()),
1203 SDValue RetPtr = DAG.getRegister(DemoteReg, PtrValueVTs[0]);
1204 SDValue RetOp = getValue(I.getOperand(0));
1206 SmallVector<EVT, 4> ValueVTs;
1207 SmallVector<uint64_t, 4> Offsets;
1208 ComputeValueVTs(*TLI, I.getOperand(0)->getType(), ValueVTs, &Offsets);
1209 unsigned NumValues = ValueVTs.size();
1211 SmallVector<SDValue, 4> Chains(NumValues);
1212 for (unsigned i = 0; i != NumValues; ++i) {
1213 SDValue Add = DAG.getNode(ISD::ADD, getCurSDLoc(),
1214 RetPtr.getValueType(), RetPtr,
1215 DAG.getIntPtrConstant(Offsets[i]));
1217 DAG.getStore(Chain, getCurSDLoc(),
1218 SDValue(RetOp.getNode(), RetOp.getResNo() + i),
1219 // FIXME: better loc info would be nice.
1220 Add, MachinePointerInfo(), false, false, 0);
1223 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
1224 MVT::Other, Chains);
1225 } else if (I.getNumOperands() != 0) {
1226 SmallVector<EVT, 4> ValueVTs;
1227 ComputeValueVTs(*TLI, I.getOperand(0)->getType(), ValueVTs);
1228 unsigned NumValues = ValueVTs.size();
1230 SDValue RetOp = getValue(I.getOperand(0));
1231 for (unsigned j = 0, f = NumValues; j != f; ++j) {
1232 EVT VT = ValueVTs[j];
1234 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1236 const Function *F = I.getParent()->getParent();
1237 if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex,
1239 ExtendKind = ISD::SIGN_EXTEND;
1240 else if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex,
1242 ExtendKind = ISD::ZERO_EXTEND;
1244 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
1245 VT = TLI->getTypeForExtArgOrReturn(VT.getSimpleVT(), ExtendKind);
1247 unsigned NumParts = TLI->getNumRegisters(*DAG.getContext(), VT);
1248 MVT PartVT = TLI->getRegisterType(*DAG.getContext(), VT);
1249 SmallVector<SDValue, 4> Parts(NumParts);
1250 getCopyToParts(DAG, getCurSDLoc(),
1251 SDValue(RetOp.getNode(), RetOp.getResNo() + j),
1252 &Parts[0], NumParts, PartVT, &I, ExtendKind);
1254 // 'inreg' on function refers to return value
1255 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1256 if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex,
1260 // Propagate extension type if any
1261 if (ExtendKind == ISD::SIGN_EXTEND)
1263 else if (ExtendKind == ISD::ZERO_EXTEND)
1266 for (unsigned i = 0; i < NumParts; ++i) {
1267 Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
1268 VT, /*isfixed=*/true, 0, 0));
1269 OutVals.push_back(Parts[i]);
1275 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
1276 CallingConv::ID CallConv =
1277 DAG.getMachineFunction().getFunction()->getCallingConv();
1278 Chain = TM.getTargetLowering()->LowerReturn(Chain, CallConv, isVarArg,
1279 Outs, OutVals, getCurSDLoc(),
1282 // Verify that the target's LowerReturn behaved as expected.
1283 assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
1284 "LowerReturn didn't return a valid chain!");
1286 // Update the DAG with the new chain value resulting from return lowering.
1290 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
1291 /// created for it, emit nodes to copy the value into the virtual
1293 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
1295 if (V->getType()->isEmptyTy())
1298 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
1299 if (VMI != FuncInfo.ValueMap.end()) {
1300 assert(!V->use_empty() && "Unused value assigned virtual registers!");
1301 CopyValueToVirtualRegister(V, VMI->second);
1305 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
1306 /// the current basic block, add it to ValueMap now so that we'll get a
1308 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
1309 // No need to export constants.
1310 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1312 // Already exported?
1313 if (FuncInfo.isExportedInst(V)) return;
1315 unsigned Reg = FuncInfo.InitializeRegForValue(V);
1316 CopyValueToVirtualRegister(V, Reg);
1319 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
1320 const BasicBlock *FromBB) {
1321 // The operands of the setcc have to be in this block. We don't know
1322 // how to export them from some other block.
1323 if (const Instruction *VI = dyn_cast<Instruction>(V)) {
1324 // Can export from current BB.
1325 if (VI->getParent() == FromBB)
1328 // Is already exported, noop.
1329 return FuncInfo.isExportedInst(V);
1332 // If this is an argument, we can export it if the BB is the entry block or
1333 // if it is already exported.
1334 if (isa<Argument>(V)) {
1335 if (FromBB == &FromBB->getParent()->getEntryBlock())
1338 // Otherwise, can only export this if it is already exported.
1339 return FuncInfo.isExportedInst(V);
1342 // Otherwise, constants can always be exported.
1346 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
1347 uint32_t SelectionDAGBuilder::getEdgeWeight(const MachineBasicBlock *Src,
1348 const MachineBasicBlock *Dst) const {
1349 BranchProbabilityInfo *BPI = FuncInfo.BPI;
1352 const BasicBlock *SrcBB = Src->getBasicBlock();
1353 const BasicBlock *DstBB = Dst->getBasicBlock();
1354 return BPI->getEdgeWeight(SrcBB, DstBB);
1357 void SelectionDAGBuilder::
1358 addSuccessorWithWeight(MachineBasicBlock *Src, MachineBasicBlock *Dst,
1359 uint32_t Weight /* = 0 */) {
1361 Weight = getEdgeWeight(Src, Dst);
1362 Src->addSuccessor(Dst, Weight);
1366 static bool InBlock(const Value *V, const BasicBlock *BB) {
1367 if (const Instruction *I = dyn_cast<Instruction>(V))
1368 return I->getParent() == BB;
1372 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1373 /// This function emits a branch and is used at the leaves of an OR or an
1374 /// AND operator tree.
1377 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
1378 MachineBasicBlock *TBB,
1379 MachineBasicBlock *FBB,
1380 MachineBasicBlock *CurBB,
1381 MachineBasicBlock *SwitchBB,
1384 const BasicBlock *BB = CurBB->getBasicBlock();
1386 // If the leaf of the tree is a comparison, merge the condition into
1388 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1389 // The operands of the cmp have to be in this block. We don't know
1390 // how to export them from some other block. If this is the first block
1391 // of the sequence, no exporting is needed.
1392 if (CurBB == SwitchBB ||
1393 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1394 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1395 ISD::CondCode Condition;
1396 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1397 Condition = getICmpCondCode(IC->getPredicate());
1398 } else if (const FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
1399 Condition = getFCmpCondCode(FC->getPredicate());
1400 if (TM.Options.NoNaNsFPMath)
1401 Condition = getFCmpCodeWithoutNaN(Condition);
1403 Condition = ISD::SETEQ; // silence warning.
1404 llvm_unreachable("Unknown compare instruction");
1407 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
1408 TBB, FBB, CurBB, TWeight, FWeight);
1409 SwitchCases.push_back(CB);
1414 // Create a CaseBlock record representing this branch.
1415 CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(*DAG.getContext()),
1416 nullptr, TBB, FBB, CurBB, TWeight, FWeight);
1417 SwitchCases.push_back(CB);
1420 /// Scale down both weights to fit into uint32_t.
1421 static void ScaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
1422 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse;
1423 uint32_t Scale = (NewMax / UINT32_MAX) + 1;
1424 NewTrue = NewTrue / Scale;
1425 NewFalse = NewFalse / Scale;
1428 /// FindMergedConditions - If Cond is an expression like
1429 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
1430 MachineBasicBlock *TBB,
1431 MachineBasicBlock *FBB,
1432 MachineBasicBlock *CurBB,
1433 MachineBasicBlock *SwitchBB,
1434 unsigned Opc, uint32_t TWeight,
1436 // If this node is not part of the or/and tree, emit it as a branch.
1437 const Instruction *BOp = dyn_cast<Instruction>(Cond);
1438 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1439 (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
1440 BOp->getParent() != CurBB->getBasicBlock() ||
1441 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1442 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1443 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
1448 // Create TmpBB after CurBB.
1449 MachineFunction::iterator BBI = CurBB;
1450 MachineFunction &MF = DAG.getMachineFunction();
1451 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1452 CurBB->getParent()->insert(++BBI, TmpBB);
1454 if (Opc == Instruction::Or) {
1455 // Codegen X | Y as:
1464 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
1465 // The requirement is that
1466 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
1467 // = TrueProb for orignal BB.
1468 // Assuming the orignal weights are A and B, one choice is to set BB1's
1469 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice
1471 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
1472 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
1473 // TmpBB, but the math is more complicated.
1475 uint64_t NewTrueWeight = TWeight;
1476 uint64_t NewFalseWeight = (uint64_t)TWeight + 2 * (uint64_t)FWeight;
1477 ScaleWeights(NewTrueWeight, NewFalseWeight);
1478 // Emit the LHS condition.
1479 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc,
1480 NewTrueWeight, NewFalseWeight);
1482 NewTrueWeight = TWeight;
1483 NewFalseWeight = 2 * (uint64_t)FWeight;
1484 ScaleWeights(NewTrueWeight, NewFalseWeight);
1485 // Emit the RHS condition into TmpBB.
1486 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1487 NewTrueWeight, NewFalseWeight);
1489 assert(Opc == Instruction::And && "Unknown merge op!");
1490 // Codegen X & Y as:
1498 // This requires creation of TmpBB after CurBB.
1500 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
1501 // The requirement is that
1502 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
1503 // = FalseProb for orignal BB.
1504 // Assuming the orignal weights are A and B, one choice is to set BB1's
1505 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice
1507 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB.
1509 uint64_t NewTrueWeight = 2 * (uint64_t)TWeight + (uint64_t)FWeight;
1510 uint64_t NewFalseWeight = FWeight;
1511 ScaleWeights(NewTrueWeight, NewFalseWeight);
1512 // Emit the LHS condition.
1513 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc,
1514 NewTrueWeight, NewFalseWeight);
1516 NewTrueWeight = 2 * (uint64_t)TWeight;
1517 NewFalseWeight = FWeight;
1518 ScaleWeights(NewTrueWeight, NewFalseWeight);
1519 // Emit the RHS condition into TmpBB.
1520 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
1521 NewTrueWeight, NewFalseWeight);
1525 /// If the set of cases should be emitted as a series of branches, return true.
1526 /// If we should emit this as a bunch of and/or'd together conditions, return
1529 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
1530 if (Cases.size() != 2) return true;
1532 // If this is two comparisons of the same values or'd or and'd together, they
1533 // will get folded into a single comparison, so don't emit two blocks.
1534 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1535 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1536 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1537 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1541 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
1542 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
1543 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
1544 Cases[0].CC == Cases[1].CC &&
1545 isa<Constant>(Cases[0].CmpRHS) &&
1546 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
1547 if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
1549 if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
1556 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
1557 MachineBasicBlock *BrMBB = FuncInfo.MBB;
1559 // Update machine-CFG edges.
1560 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1562 // Figure out which block is immediately after the current one.
1563 MachineBasicBlock *NextBlock = nullptr;
1564 MachineFunction::iterator BBI = BrMBB;
1565 if (++BBI != FuncInfo.MF->end())
1568 if (I.isUnconditional()) {
1569 // Update machine-CFG edges.
1570 BrMBB->addSuccessor(Succ0MBB);
1572 // If this is not a fall-through branch or optimizations are switched off,
1574 if (Succ0MBB != NextBlock || TM.getOptLevel() == CodeGenOpt::None)
1575 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
1576 MVT::Other, getControlRoot(),
1577 DAG.getBasicBlock(Succ0MBB)));
1582 // If this condition is one of the special cases we handle, do special stuff
1584 const Value *CondVal = I.getCondition();
1585 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
1587 // If this is a series of conditions that are or'd or and'd together, emit
1588 // this as a sequence of branches instead of setcc's with and/or operations.
1589 // As long as jumps are not expensive, this should improve performance.
1590 // For example, instead of something like:
1603 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
1604 if (!TM.getTargetLowering()->isJumpExpensive() &&
1606 (BOp->getOpcode() == Instruction::And ||
1607 BOp->getOpcode() == Instruction::Or)) {
1608 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
1609 BOp->getOpcode(), getEdgeWeight(BrMBB, Succ0MBB),
1610 getEdgeWeight(BrMBB, Succ1MBB));
1611 // If the compares in later blocks need to use values not currently
1612 // exported from this block, export them now. This block should always
1613 // be the first entry.
1614 assert(SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
1616 // Allow some cases to be rejected.
1617 if (ShouldEmitAsBranches(SwitchCases)) {
1618 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
1619 ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
1620 ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
1623 // Emit the branch for this block.
1624 visitSwitchCase(SwitchCases[0], BrMBB);
1625 SwitchCases.erase(SwitchCases.begin());
1629 // Okay, we decided not to do this, remove any inserted MBB's and clear
1631 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
1632 FuncInfo.MF->erase(SwitchCases[i].ThisBB);
1634 SwitchCases.clear();
1638 // Create a CaseBlock record representing this branch.
1639 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
1640 nullptr, Succ0MBB, Succ1MBB, BrMBB);
1642 // Use visitSwitchCase to actually insert the fast branch sequence for this
1644 visitSwitchCase(CB, BrMBB);
1647 /// visitSwitchCase - Emits the necessary code to represent a single node in
1648 /// the binary search tree resulting from lowering a switch instruction.
1649 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
1650 MachineBasicBlock *SwitchBB) {
1652 SDValue CondLHS = getValue(CB.CmpLHS);
1653 SDLoc dl = getCurSDLoc();
1655 // Build the setcc now.
1657 // Fold "(X == true)" to X and "(X == false)" to !X to
1658 // handle common cases produced by branch lowering.
1659 if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
1660 CB.CC == ISD::SETEQ)
1662 else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
1663 CB.CC == ISD::SETEQ) {
1664 SDValue True = DAG.getConstant(1, CondLHS.getValueType());
1665 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
1667 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
1669 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
1671 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
1672 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
1674 SDValue CmpOp = getValue(CB.CmpMHS);
1675 EVT VT = CmpOp.getValueType();
1677 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
1678 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, VT),
1681 SDValue SUB = DAG.getNode(ISD::SUB, dl,
1682 VT, CmpOp, DAG.getConstant(Low, VT));
1683 Cond = DAG.getSetCC(dl, MVT::i1, SUB,
1684 DAG.getConstant(High-Low, VT), ISD::SETULE);
1688 // Update successor info
1689 addSuccessorWithWeight(SwitchBB, CB.TrueBB, CB.TrueWeight);
1690 // TrueBB and FalseBB are always different unless the incoming IR is
1691 // degenerate. This only happens when running llc on weird IR.
1692 if (CB.TrueBB != CB.FalseBB)
1693 addSuccessorWithWeight(SwitchBB, CB.FalseBB, CB.FalseWeight);
1695 // Set NextBlock to be the MBB immediately after the current one, if any.
1696 // This is used to avoid emitting unnecessary branches to the next block.
1697 MachineBasicBlock *NextBlock = nullptr;
1698 MachineFunction::iterator BBI = SwitchBB;
1699 if (++BBI != FuncInfo.MF->end())
1702 // If the lhs block is the next block, invert the condition so that we can
1703 // fall through to the lhs instead of the rhs block.
1704 if (CB.TrueBB == NextBlock) {
1705 std::swap(CB.TrueBB, CB.FalseBB);
1706 SDValue True = DAG.getConstant(1, Cond.getValueType());
1707 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
1710 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1711 MVT::Other, getControlRoot(), Cond,
1712 DAG.getBasicBlock(CB.TrueBB));
1714 // Insert the false branch. Do this even if it's a fall through branch,
1715 // this makes it easier to do DAG optimizations which require inverting
1716 // the branch condition.
1717 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
1718 DAG.getBasicBlock(CB.FalseBB));
1720 DAG.setRoot(BrCond);
1723 /// visitJumpTable - Emit JumpTable node in the current MBB
1724 void SelectionDAGBuilder::visitJumpTable(JumpTable &JT) {
1725 // Emit the code for the jump table
1726 assert(JT.Reg != -1U && "Should lower JT Header first!");
1727 EVT PTy = TM.getTargetLowering()->getPointerTy();
1728 SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
1730 SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
1731 SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(),
1732 MVT::Other, Index.getValue(1),
1734 DAG.setRoot(BrJumpTable);
1737 /// visitJumpTableHeader - This function emits necessary code to produce index
1738 /// in the JumpTable from switch case.
1739 void SelectionDAGBuilder::visitJumpTableHeader(JumpTable &JT,
1740 JumpTableHeader &JTH,
1741 MachineBasicBlock *SwitchBB) {
1742 // Subtract the lowest switch case value from the value being switched on and
1743 // conditional branch to default mbb if the result is greater than the
1744 // difference between smallest and largest cases.
1745 SDValue SwitchOp = getValue(JTH.SValue);
1746 EVT VT = SwitchOp.getValueType();
1747 SDValue Sub = DAG.getNode(ISD::SUB, getCurSDLoc(), VT, SwitchOp,
1748 DAG.getConstant(JTH.First, VT));
1750 // The SDNode we just created, which holds the value being switched on minus
1751 // the smallest case value, needs to be copied to a virtual register so it
1752 // can be used as an index into the jump table in a subsequent basic block.
1753 // This value may be smaller or larger than the target's pointer type, and
1754 // therefore require extension or truncating.
1755 const TargetLowering *TLI = TM.getTargetLowering();
1756 SwitchOp = DAG.getZExtOrTrunc(Sub, getCurSDLoc(), TLI->getPointerTy());
1758 unsigned JumpTableReg = FuncInfo.CreateReg(TLI->getPointerTy());
1759 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurSDLoc(),
1760 JumpTableReg, SwitchOp);
1761 JT.Reg = JumpTableReg;
1763 // Emit the range check for the jump table, and branch to the default block
1764 // for the switch statement if the value being switched on exceeds the largest
1765 // case in the switch.
1766 SDValue CMP = DAG.getSetCC(getCurSDLoc(),
1767 TLI->getSetCCResultType(*DAG.getContext(),
1768 Sub.getValueType()),
1770 DAG.getConstant(JTH.Last - JTH.First,VT),
1773 // Set NextBlock to be the MBB immediately after the current one, if any.
1774 // This is used to avoid emitting unnecessary branches to the next block.
1775 MachineBasicBlock *NextBlock = nullptr;
1776 MachineFunction::iterator BBI = SwitchBB;
1778 if (++BBI != FuncInfo.MF->end())
1781 SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurSDLoc(),
1782 MVT::Other, CopyTo, CMP,
1783 DAG.getBasicBlock(JT.Default));
1785 if (JT.MBB != NextBlock)
1786 BrCond = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, BrCond,
1787 DAG.getBasicBlock(JT.MBB));
1789 DAG.setRoot(BrCond);
1792 /// Codegen a new tail for a stack protector check ParentMBB which has had its
1793 /// tail spliced into a stack protector check success bb.
1795 /// For a high level explanation of how this fits into the stack protector
1796 /// generation see the comment on the declaration of class
1797 /// StackProtectorDescriptor.
1798 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
1799 MachineBasicBlock *ParentBB) {
1801 // First create the loads to the guard/stack slot for the comparison.
1802 const TargetLowering *TLI = TM.getTargetLowering();
1803 EVT PtrTy = TLI->getPointerTy();
1805 MachineFrameInfo *MFI = ParentBB->getParent()->getFrameInfo();
1806 int FI = MFI->getStackProtectorIndex();
1808 const Value *IRGuard = SPD.getGuard();
1809 SDValue GuardPtr = getValue(IRGuard);
1810 SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
1813 TLI->getDataLayout()->getPrefTypeAlignment(IRGuard->getType());
1814 SDValue Guard = DAG.getLoad(PtrTy, getCurSDLoc(), DAG.getEntryNode(),
1815 GuardPtr, MachinePointerInfo(IRGuard, 0),
1816 true, false, false, Align);
1818 SDValue StackSlot = DAG.getLoad(PtrTy, getCurSDLoc(), DAG.getEntryNode(),
1820 MachinePointerInfo::getFixedStack(FI),
1821 true, false, false, Align);
1823 // Perform the comparison via a subtract/getsetcc.
1824 EVT VT = Guard.getValueType();
1825 SDValue Sub = DAG.getNode(ISD::SUB, getCurSDLoc(), VT, Guard, StackSlot);
1827 SDValue Cmp = DAG.getSetCC(getCurSDLoc(),
1828 TLI->getSetCCResultType(*DAG.getContext(),
1829 Sub.getValueType()),
1830 Sub, DAG.getConstant(0, VT),
1833 // If the sub is not 0, then we know the guard/stackslot do not equal, so
1834 // branch to failure MBB.
1835 SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurSDLoc(),
1836 MVT::Other, StackSlot.getOperand(0),
1837 Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
1838 // Otherwise branch to success MBB.
1839 SDValue Br = DAG.getNode(ISD::BR, getCurSDLoc(),
1841 DAG.getBasicBlock(SPD.getSuccessMBB()));
1846 /// Codegen the failure basic block for a stack protector check.
1848 /// A failure stack protector machine basic block consists simply of a call to
1849 /// __stack_chk_fail().
1851 /// For a high level explanation of how this fits into the stack protector
1852 /// generation see the comment on the declaration of class
1853 /// StackProtectorDescriptor.
1855 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
1856 const TargetLowering *TLI = TM.getTargetLowering();
1857 SDValue Chain = TLI->makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL,
1858 MVT::isVoid, nullptr, 0, false,
1859 getCurSDLoc(), false, false).second;
1863 /// visitBitTestHeader - This function emits necessary code to produce value
1864 /// suitable for "bit tests"
1865 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
1866 MachineBasicBlock *SwitchBB) {
1867 // Subtract the minimum value
1868 SDValue SwitchOp = getValue(B.SValue);
1869 EVT VT = SwitchOp.getValueType();
1870 SDValue Sub = DAG.getNode(ISD::SUB, getCurSDLoc(), VT, SwitchOp,
1871 DAG.getConstant(B.First, VT));
1874 const TargetLowering *TLI = TM.getTargetLowering();
1875 SDValue RangeCmp = DAG.getSetCC(getCurSDLoc(),
1876 TLI->getSetCCResultType(*DAG.getContext(),
1877 Sub.getValueType()),
1878 Sub, DAG.getConstant(B.Range, VT),
1881 // Determine the type of the test operands.
1882 bool UsePtrType = false;
1883 if (!TLI->isTypeLegal(VT))
1886 for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
1887 if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
1888 // Switch table case range are encoded into series of masks.
1889 // Just use pointer type, it's guaranteed to fit.
1895 VT = TLI->getPointerTy();
1896 Sub = DAG.getZExtOrTrunc(Sub, getCurSDLoc(), VT);
1899 B.RegVT = VT.getSimpleVT();
1900 B.Reg = FuncInfo.CreateReg(B.RegVT);
1901 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurSDLoc(),
1904 // Set NextBlock to be the MBB immediately after the current one, if any.
1905 // This is used to avoid emitting unnecessary branches to the next block.
1906 MachineBasicBlock *NextBlock = nullptr;
1907 MachineFunction::iterator BBI = SwitchBB;
1908 if (++BBI != FuncInfo.MF->end())
1911 MachineBasicBlock* MBB = B.Cases[0].ThisBB;
1913 addSuccessorWithWeight(SwitchBB, B.Default);
1914 addSuccessorWithWeight(SwitchBB, MBB);
1916 SDValue BrRange = DAG.getNode(ISD::BRCOND, getCurSDLoc(),
1917 MVT::Other, CopyTo, RangeCmp,
1918 DAG.getBasicBlock(B.Default));
1920 if (MBB != NextBlock)
1921 BrRange = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, CopyTo,
1922 DAG.getBasicBlock(MBB));
1924 DAG.setRoot(BrRange);
1927 /// visitBitTestCase - this function produces one "bit test"
1928 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
1929 MachineBasicBlock* NextMBB,
1930 uint32_t BranchWeightToNext,
1933 MachineBasicBlock *SwitchBB) {
1935 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
1938 unsigned PopCount = CountPopulation_64(B.Mask);
1939 const TargetLowering *TLI = TM.getTargetLowering();
1940 if (PopCount == 1) {
1941 // Testing for a single bit; just compare the shift count with what it
1942 // would need to be to shift a 1 bit in that position.
1943 Cmp = DAG.getSetCC(getCurSDLoc(),
1944 TLI->getSetCCResultType(*DAG.getContext(), VT),
1946 DAG.getConstant(countTrailingZeros(B.Mask), VT),
1948 } else if (PopCount == BB.Range) {
1949 // There is only one zero bit in the range, test for it directly.
1950 Cmp = DAG.getSetCC(getCurSDLoc(),
1951 TLI->getSetCCResultType(*DAG.getContext(), VT),
1953 DAG.getConstant(CountTrailingOnes_64(B.Mask), VT),
1956 // Make desired shift
1957 SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurSDLoc(), VT,
1958 DAG.getConstant(1, VT), ShiftOp);
1960 // Emit bit tests and jumps
1961 SDValue AndOp = DAG.getNode(ISD::AND, getCurSDLoc(),
1962 VT, SwitchVal, DAG.getConstant(B.Mask, VT));
1963 Cmp = DAG.getSetCC(getCurSDLoc(),
1964 TLI->getSetCCResultType(*DAG.getContext(), VT),
1965 AndOp, DAG.getConstant(0, VT),
1969 // The branch weight from SwitchBB to B.TargetBB is B.ExtraWeight.
1970 addSuccessorWithWeight(SwitchBB, B.TargetBB, B.ExtraWeight);
1971 // The branch weight from SwitchBB to NextMBB is BranchWeightToNext.
1972 addSuccessorWithWeight(SwitchBB, NextMBB, BranchWeightToNext);
1974 SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurSDLoc(),
1975 MVT::Other, getControlRoot(),
1976 Cmp, DAG.getBasicBlock(B.TargetBB));
1978 // Set NextBlock to be the MBB immediately after the current one, if any.
1979 // This is used to avoid emitting unnecessary branches to the next block.
1980 MachineBasicBlock *NextBlock = nullptr;
1981 MachineFunction::iterator BBI = SwitchBB;
1982 if (++BBI != FuncInfo.MF->end())
1985 if (NextMBB != NextBlock)
1986 BrAnd = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, BrAnd,
1987 DAG.getBasicBlock(NextMBB));
1992 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
1993 MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
1995 // Retrieve successors.
1996 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
1997 MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
1999 const Value *Callee(I.getCalledValue());
2000 const Function *Fn = dyn_cast<Function>(Callee);
2001 if (isa<InlineAsm>(Callee))
2003 else if (Fn && Fn->isIntrinsic()) {
2004 assert(Fn->getIntrinsicID() == Intrinsic::donothing);
2005 // Ignore invokes to @llvm.donothing: jump directly to the next BB.
2007 LowerCallTo(&I, getValue(Callee), false, LandingPad);
2009 // If the value of the invoke is used outside of its defining block, make it
2010 // available as a virtual register.
2011 CopyToExportRegsIfNeeded(&I);
2013 // Update successor info
2014 addSuccessorWithWeight(InvokeMBB, Return);
2015 addSuccessorWithWeight(InvokeMBB, LandingPad);
2017 // Drop into normal successor.
2018 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2019 MVT::Other, getControlRoot(),
2020 DAG.getBasicBlock(Return)));
2023 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
2024 llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
2027 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
2028 assert(FuncInfo.MBB->isLandingPad() &&
2029 "Call to landingpad not in landing pad!");
2031 MachineBasicBlock *MBB = FuncInfo.MBB;
2032 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
2033 AddLandingPadInfo(LP, MMI, MBB);
2035 // If there aren't registers to copy the values into (e.g., during SjLj
2036 // exceptions), then don't bother to create these DAG nodes.
2037 const TargetLowering *TLI = TM.getTargetLowering();
2038 if (TLI->getExceptionPointerRegister() == 0 &&
2039 TLI->getExceptionSelectorRegister() == 0)
2042 SmallVector<EVT, 2> ValueVTs;
2043 ComputeValueVTs(*TLI, LP.getType(), ValueVTs);
2044 assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
2046 // Get the two live-in registers as SDValues. The physregs have already been
2047 // copied into virtual registers.
2049 Ops[0] = DAG.getZExtOrTrunc(
2050 DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
2051 FuncInfo.ExceptionPointerVirtReg, TLI->getPointerTy()),
2052 getCurSDLoc(), ValueVTs[0]);
2053 Ops[1] = DAG.getZExtOrTrunc(
2054 DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
2055 FuncInfo.ExceptionSelectorVirtReg, TLI->getPointerTy()),
2056 getCurSDLoc(), ValueVTs[1]);
2059 SDValue Res = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
2060 DAG.getVTList(ValueVTs), Ops);
2064 /// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
2065 /// small case ranges).
2066 bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,
2067 CaseRecVector& WorkList,
2069 MachineBasicBlock *Default,
2070 MachineBasicBlock *SwitchBB) {
2071 // Size is the number of Cases represented by this range.
2072 size_t Size = CR.Range.second - CR.Range.first;
2076 // Get the MachineFunction which holds the current MBB. This is used when
2077 // inserting any additional MBBs necessary to represent the switch.
2078 MachineFunction *CurMF = FuncInfo.MF;
2080 // Figure out which block is immediately after the current one.
2081 MachineBasicBlock *NextBlock = nullptr;
2082 MachineFunction::iterator BBI = CR.CaseBB;
2084 if (++BBI != FuncInfo.MF->end())
2087 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2088 // If any two of the cases has the same destination, and if one value
2089 // is the same as the other, but has one bit unset that the other has set,
2090 // use bit manipulation to do two compares at once. For example:
2091 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
2092 // TODO: This could be extended to merge any 2 cases in switches with 3 cases.
2093 // TODO: Handle cases where CR.CaseBB != SwitchBB.
2094 if (Size == 2 && CR.CaseBB == SwitchBB) {
2095 Case &Small = *CR.Range.first;
2096 Case &Big = *(CR.Range.second-1);
2098 if (Small.Low == Small.High && Big.Low == Big.High && Small.BB == Big.BB) {
2099 const APInt& SmallValue = cast<ConstantInt>(Small.Low)->getValue();
2100 const APInt& BigValue = cast<ConstantInt>(Big.Low)->getValue();
2102 // Check that there is only one bit different.
2103 if (BigValue.countPopulation() == SmallValue.countPopulation() + 1 &&
2104 (SmallValue | BigValue) == BigValue) {
2105 // Isolate the common bit.
2106 APInt CommonBit = BigValue & ~SmallValue;
2107 assert((SmallValue | CommonBit) == BigValue &&
2108 CommonBit.countPopulation() == 1 && "Not a common bit?");
2110 SDValue CondLHS = getValue(SV);
2111 EVT VT = CondLHS.getValueType();
2112 SDLoc DL = getCurSDLoc();
2114 SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
2115 DAG.getConstant(CommonBit, VT));
2116 SDValue Cond = DAG.getSetCC(DL, MVT::i1,
2117 Or, DAG.getConstant(BigValue, VT),
2120 // Update successor info.
2121 // Both Small and Big will jump to Small.BB, so we sum up the weights.
2122 addSuccessorWithWeight(SwitchBB, Small.BB,
2123 Small.ExtraWeight + Big.ExtraWeight);
2124 addSuccessorWithWeight(SwitchBB, Default,
2125 // The default destination is the first successor in IR.
2126 BPI ? BPI->getEdgeWeight(SwitchBB->getBasicBlock(), (unsigned)0) : 0);
2128 // Insert the true branch.
2129 SDValue BrCond = DAG.getNode(ISD::BRCOND, DL, MVT::Other,
2130 getControlRoot(), Cond,
2131 DAG.getBasicBlock(Small.BB));
2133 // Insert the false branch.
2134 BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
2135 DAG.getBasicBlock(Default));
2137 DAG.setRoot(BrCond);
2143 // Order cases by weight so the most likely case will be checked first.
2144 uint32_t UnhandledWeights = 0;
2146 for (CaseItr I = CR.Range.first, IE = CR.Range.second; I != IE; ++I) {
2147 uint32_t IWeight = I->ExtraWeight;
2148 UnhandledWeights += IWeight;
2149 for (CaseItr J = CR.Range.first; J < I; ++J) {
2150 uint32_t JWeight = J->ExtraWeight;
2151 if (IWeight > JWeight)
2156 // Rearrange the case blocks so that the last one falls through if possible.
2157 Case &BackCase = *(CR.Range.second-1);
2159 NextBlock && Default != NextBlock && BackCase.BB != NextBlock) {
2160 // The last case block won't fall through into 'NextBlock' if we emit the
2161 // branches in this order. See if rearranging a case value would help.
2162 // We start at the bottom as it's the case with the least weight.
2163 for (Case *I = &*(CR.Range.second-2), *E = &*CR.Range.first-1; I != E; --I)
2164 if (I->BB == NextBlock) {
2165 std::swap(*I, BackCase);
2170 // Create a CaseBlock record representing a conditional branch to
2171 // the Case's target mbb if the value being switched on SV is equal
2173 MachineBasicBlock *CurBlock = CR.CaseBB;
2174 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
2175 MachineBasicBlock *FallThrough;
2177 FallThrough = CurMF->CreateMachineBasicBlock(CurBlock->getBasicBlock());
2178 CurMF->insert(BBI, FallThrough);
2180 // Put SV in a virtual register to make it available from the new blocks.
2181 ExportFromCurrentBlock(SV);
2183 // If the last case doesn't match, go to the default block.
2184 FallThrough = Default;
2187 const Value *RHS, *LHS, *MHS;
2189 if (I->High == I->Low) {
2190 // This is just small small case range :) containing exactly 1 case
2192 LHS = SV; RHS = I->High; MHS = nullptr;
2195 LHS = I->Low; MHS = SV; RHS = I->High;
2198 // The false weight should be sum of all un-handled cases.
2199 UnhandledWeights -= I->ExtraWeight;
2200 CaseBlock CB(CC, LHS, RHS, MHS, /* truebb */ I->BB, /* falsebb */ FallThrough,
2202 /* trueweight */ I->ExtraWeight,
2203 /* falseweight */ UnhandledWeights);
2205 // If emitting the first comparison, just call visitSwitchCase to emit the
2206 // code into the current block. Otherwise, push the CaseBlock onto the
2207 // vector to be later processed by SDISel, and insert the node's MBB
2208 // before the next MBB.
2209 if (CurBlock == SwitchBB)
2210 visitSwitchCase(CB, SwitchBB);
2212 SwitchCases.push_back(CB);
2214 CurBlock = FallThrough;
2220 static inline bool areJTsAllowed(const TargetLowering &TLI) {
2221 return TLI.supportJumpTables() &&
2222 (TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
2223 TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
2226 static APInt ComputeRange(const APInt &First, const APInt &Last) {
2227 uint32_t BitWidth = std::max(Last.getBitWidth(), First.getBitWidth()) + 1;
2228 APInt LastExt = Last.sext(BitWidth), FirstExt = First.sext(BitWidth);
2229 return (LastExt - FirstExt + 1ULL);
2232 /// handleJTSwitchCase - Emit jumptable for current switch case range
2233 bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec &CR,
2234 CaseRecVector &WorkList,
2236 MachineBasicBlock *Default,
2237 MachineBasicBlock *SwitchBB) {
2238 Case& FrontCase = *CR.Range.first;
2239 Case& BackCase = *(CR.Range.second-1);
2241 const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue();
2242 const APInt &Last = cast<ConstantInt>(BackCase.High)->getValue();
2244 APInt TSize(First.getBitWidth(), 0);
2245 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I)
2248 const TargetLowering *TLI = TM.getTargetLowering();
2249 if (!areJTsAllowed(*TLI) || TSize.ult(TLI->getMinimumJumpTableEntries()))
2252 APInt Range = ComputeRange(First, Last);
2253 // The density is TSize / Range. Require at least 40%.
2254 // It should not be possible for IntTSize to saturate for sane code, but make
2255 // sure we handle Range saturation correctly.
2256 uint64_t IntRange = Range.getLimitedValue(UINT64_MAX/10);
2257 uint64_t IntTSize = TSize.getLimitedValue(UINT64_MAX/10);
2258 if (IntTSize * 10 < IntRange * 4)
2261 DEBUG(dbgs() << "Lowering jump table\n"
2262 << "First entry: " << First << ". Last entry: " << Last << '\n'
2263 << "Range: " << Range << ". Size: " << TSize << ".\n\n");
2265 // Get the MachineFunction which holds the current MBB. This is used when
2266 // inserting any additional MBBs necessary to represent the switch.
2267 MachineFunction *CurMF = FuncInfo.MF;
2269 // Figure out which block is immediately after the current one.
2270 MachineFunction::iterator BBI = CR.CaseBB;
2273 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
2275 // Create a new basic block to hold the code for loading the address
2276 // of the jump table, and jumping to it. Update successor information;
2277 // we will either branch to the default case for the switch, or the jump
2279 MachineBasicBlock *JumpTableBB = CurMF->CreateMachineBasicBlock(LLVMBB);
2280 CurMF->insert(BBI, JumpTableBB);
2282 addSuccessorWithWeight(CR.CaseBB, Default);
2283 addSuccessorWithWeight(CR.CaseBB, JumpTableBB);
2285 // Build a vector of destination BBs, corresponding to each target
2286 // of the jump table. If the value of the jump table slot corresponds to
2287 // a case statement, push the case's BB onto the vector, otherwise, push
2289 std::vector<MachineBasicBlock*> DestBBs;
2291 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++TEI) {
2292 const APInt &Low = cast<ConstantInt>(I->Low)->getValue();
2293 const APInt &High = cast<ConstantInt>(I->High)->getValue();
2295 if (Low.sle(TEI) && TEI.sle(High)) {
2296 DestBBs.push_back(I->BB);
2300 DestBBs.push_back(Default);
2304 // Calculate weight for each unique destination in CR.
2305 DenseMap<MachineBasicBlock*, uint32_t> DestWeights;
2307 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
2308 DenseMap<MachineBasicBlock*, uint32_t>::iterator Itr =
2309 DestWeights.find(I->BB);
2310 if (Itr != DestWeights.end())
2311 Itr->second += I->ExtraWeight;
2313 DestWeights[I->BB] = I->ExtraWeight;
2316 // Update successor info. Add one edge to each unique successor.
2317 BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs());
2318 for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
2319 E = DestBBs.end(); I != E; ++I) {
2320 if (!SuccsHandled[(*I)->getNumber()]) {
2321 SuccsHandled[(*I)->getNumber()] = true;
2322 DenseMap<MachineBasicBlock*, uint32_t>::iterator Itr =
2323 DestWeights.find(*I);
2324 addSuccessorWithWeight(JumpTableBB, *I,
2325 Itr != DestWeights.end() ? Itr->second : 0);
2329 // Create a jump table index for this jump table.
2330 unsigned JTEncoding = TLI->getJumpTableEncoding();
2331 unsigned JTI = CurMF->getOrCreateJumpTableInfo(JTEncoding)
2332 ->createJumpTableIndex(DestBBs);
2334 // Set the jump table information so that we can codegen it as a second
2335 // MachineBasicBlock
2336 JumpTable JT(-1U, JTI, JumpTableBB, Default);
2337 JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == SwitchBB));
2338 if (CR.CaseBB == SwitchBB)
2339 visitJumpTableHeader(JT, JTH, SwitchBB);
2341 JTCases.push_back(JumpTableBlock(JTH, JT));
2345 /// handleBTSplitSwitchCase - emit comparison and split binary search tree into
2347 bool SelectionDAGBuilder::handleBTSplitSwitchCase(CaseRec& CR,
2348 CaseRecVector& WorkList,
2350 MachineBasicBlock* Default,
2351 MachineBasicBlock* SwitchBB) {
2352 // Get the MachineFunction which holds the current MBB. This is used when
2353 // inserting any additional MBBs necessary to represent the switch.
2354 MachineFunction *CurMF = FuncInfo.MF;
2356 // Figure out which block is immediately after the current one.
2357 MachineFunction::iterator BBI = CR.CaseBB;
2360 Case& FrontCase = *CR.Range.first;
2361 Case& BackCase = *(CR.Range.second-1);
2362 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
2364 // Size is the number of Cases represented by this range.
2365 unsigned Size = CR.Range.second - CR.Range.first;
2367 const APInt &First = cast<ConstantInt>(FrontCase.Low)->getValue();
2368 const APInt &Last = cast<ConstantInt>(BackCase.High)->getValue();
2370 CaseItr Pivot = CR.Range.first + Size/2;
2372 // Select optimal pivot, maximizing sum density of LHS and RHS. This will
2373 // (heuristically) allow us to emit JumpTable's later.
2374 APInt TSize(First.getBitWidth(), 0);
2375 for (CaseItr I = CR.Range.first, E = CR.Range.second;
2379 APInt LSize = FrontCase.size();
2380 APInt RSize = TSize-LSize;
2381 DEBUG(dbgs() << "Selecting best pivot: \n"
2382 << "First: " << First << ", Last: " << Last <<'\n'
2383 << "LSize: " << LSize << ", RSize: " << RSize << '\n');
2384 for (CaseItr I = CR.Range.first, J=I+1, E = CR.Range.second;
2386 const APInt &LEnd = cast<ConstantInt>(I->High)->getValue();
2387 const APInt &RBegin = cast<ConstantInt>(J->Low)->getValue();
2388 APInt Range = ComputeRange(LEnd, RBegin);
2389 assert((Range - 2ULL).isNonNegative() &&
2390 "Invalid case distance");
2391 // Use volatile double here to avoid excess precision issues on some hosts,
2392 // e.g. that use 80-bit X87 registers.
2393 volatile double LDensity =
2394 (double)LSize.roundToDouble() /
2395 (LEnd - First + 1ULL).roundToDouble();
2396 volatile double RDensity =
2397 (double)RSize.roundToDouble() /
2398 (Last - RBegin + 1ULL).roundToDouble();
2399 volatile double Metric = Range.logBase2()*(LDensity+RDensity);
2400 // Should always split in some non-trivial place
2401 DEBUG(dbgs() <<"=>Step\n"
2402 << "LEnd: " << LEnd << ", RBegin: " << RBegin << '\n'
2403 << "LDensity: " << LDensity
2404 << ", RDensity: " << RDensity << '\n'
2405 << "Metric: " << Metric << '\n');
2406 if (FMetric < Metric) {
2409 DEBUG(dbgs() << "Current metric set to: " << FMetric << '\n');
2416 const TargetLowering *TLI = TM.getTargetLowering();
2417 if (areJTsAllowed(*TLI)) {
2418 // If our case is dense we *really* should handle it earlier!
2419 assert((FMetric > 0) && "Should handle dense range earlier!");
2421 Pivot = CR.Range.first + Size/2;
2424 CaseRange LHSR(CR.Range.first, Pivot);
2425 CaseRange RHSR(Pivot, CR.Range.second);
2426 const Constant *C = Pivot->Low;
2427 MachineBasicBlock *FalseBB = nullptr, *TrueBB = nullptr;
2429 // We know that we branch to the LHS if the Value being switched on is
2430 // less than the Pivot value, C. We use this to optimize our binary
2431 // tree a bit, by recognizing that if SV is greater than or equal to the
2432 // LHS's Case Value, and that Case Value is exactly one less than the
2433 // Pivot's Value, then we can branch directly to the LHS's Target,
2434 // rather than creating a leaf node for it.
2435 if ((LHSR.second - LHSR.first) == 1 &&
2436 LHSR.first->High == CR.GE &&
2437 cast<ConstantInt>(C)->getValue() ==
2438 (cast<ConstantInt>(CR.GE)->getValue() + 1LL)) {
2439 TrueBB = LHSR.first->BB;
2441 TrueBB = CurMF->CreateMachineBasicBlock(LLVMBB);
2442 CurMF->insert(BBI, TrueBB);
2443 WorkList.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
2445 // Put SV in a virtual register to make it available from the new blocks.
2446 ExportFromCurrentBlock(SV);
2449 // Similar to the optimization above, if the Value being switched on is
2450 // known to be less than the Constant CR.LT, and the current Case Value
2451 // is CR.LT - 1, then we can branch directly to the target block for
2452 // the current Case Value, rather than emitting a RHS leaf node for it.
2453 if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
2454 cast<ConstantInt>(RHSR.first->Low)->getValue() ==
2455 (cast<ConstantInt>(CR.LT)->getValue() - 1LL)) {
2456 FalseBB = RHSR.first->BB;
2458 FalseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
2459 CurMF->insert(BBI, FalseBB);
2460 WorkList.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
2462 // Put SV in a virtual register to make it available from the new blocks.
2463 ExportFromCurrentBlock(SV);
2466 // Create a CaseBlock record representing a conditional branch to
2467 // the LHS node if the value being switched on SV is less than C.
2468 // Otherwise, branch to LHS.
2469 CaseBlock CB(ISD::SETLT, SV, C, nullptr, TrueBB, FalseBB, CR.CaseBB);
2471 if (CR.CaseBB == SwitchBB)
2472 visitSwitchCase(CB, SwitchBB);
2474 SwitchCases.push_back(CB);
2479 /// handleBitTestsSwitchCase - if current case range has few destination and
2480 /// range span less, than machine word bitwidth, encode case range into series
2481 /// of masks and emit bit tests with these masks.
2482 bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR,
2483 CaseRecVector& WorkList,
2485 MachineBasicBlock* Default,
2486 MachineBasicBlock* SwitchBB) {
2487 const TargetLowering *TLI = TM.getTargetLowering();
2488 EVT PTy = TLI->getPointerTy();
2489 unsigned IntPtrBits = PTy.getSizeInBits();
2491 Case& FrontCase = *CR.Range.first;
2492 Case& BackCase = *(CR.Range.second-1);
2494 // Get the MachineFunction which holds the current MBB. This is used when
2495 // inserting any additional MBBs necessary to represent the switch.
2496 MachineFunction *CurMF = FuncInfo.MF;
2498 // If target does not have legal shift left, do not emit bit tests at all.
2499 if (!TLI->isOperationLegal(ISD::SHL, PTy))
2503 for (CaseItr I = CR.Range.first, E = CR.Range.second;
2505 // Single case counts one, case range - two.
2506 numCmps += (I->Low == I->High ? 1 : 2);
2509 // Count unique destinations
2510 SmallSet<MachineBasicBlock*, 4> Dests;
2511 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
2512 Dests.insert(I->BB);
2513 if (Dests.size() > 3)
2514 // Don't bother the code below, if there are too much unique destinations
2517 DEBUG(dbgs() << "Total number of unique destinations: "
2518 << Dests.size() << '\n'
2519 << "Total number of comparisons: " << numCmps << '\n');
2521 // Compute span of values.
2522 const APInt& minValue = cast<ConstantInt>(FrontCase.Low)->getValue();
2523 const APInt& maxValue = cast<ConstantInt>(BackCase.High)->getValue();
2524 APInt cmpRange = maxValue - minValue;
2526 DEBUG(dbgs() << "Compare range: " << cmpRange << '\n'
2527 << "Low bound: " << minValue << '\n'
2528 << "High bound: " << maxValue << '\n');
2530 if (cmpRange.uge(IntPtrBits) ||
2531 (!(Dests.size() == 1 && numCmps >= 3) &&
2532 !(Dests.size() == 2 && numCmps >= 5) &&
2533 !(Dests.size() >= 3 && numCmps >= 6)))
2536 DEBUG(dbgs() << "Emitting bit tests\n");
2537 APInt lowBound = APInt::getNullValue(cmpRange.getBitWidth());
2539 // Optimize the case where all the case values fit in a
2540 // word without having to subtract minValue. In this case,
2541 // we can optimize away the subtraction.
2542 if (minValue.isNonNegative() && maxValue.slt(IntPtrBits)) {
2543 cmpRange = maxValue;
2545 lowBound = minValue;
2548 CaseBitsVector CasesBits;
2549 unsigned i, count = 0;
2551 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
2552 MachineBasicBlock* Dest = I->BB;
2553 for (i = 0; i < count; ++i)
2554 if (Dest == CasesBits[i].BB)
2558 assert((count < 3) && "Too much destinations to test!");
2559 CasesBits.push_back(CaseBits(0, Dest, 0, 0/*Weight*/));
2563 const APInt& lowValue = cast<ConstantInt>(I->Low)->getValue();
2564 const APInt& highValue = cast<ConstantInt>(I->High)->getValue();
2566 uint64_t lo = (lowValue - lowBound).getZExtValue();
2567 uint64_t hi = (highValue - lowBound).getZExtValue();
2568 CasesBits[i].ExtraWeight += I->ExtraWeight;
2570 for (uint64_t j = lo; j <= hi; j++) {
2571 CasesBits[i].Mask |= 1ULL << j;
2572 CasesBits[i].Bits++;
2576 std::sort(CasesBits.begin(), CasesBits.end(), CaseBitsCmp());
2580 // Figure out which block is immediately after the current one.
2581 MachineFunction::iterator BBI = CR.CaseBB;
2584 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
2586 DEBUG(dbgs() << "Cases:\n");
2587 for (unsigned i = 0, e = CasesBits.size(); i!=e; ++i) {
2588 DEBUG(dbgs() << "Mask: " << CasesBits[i].Mask
2589 << ", Bits: " << CasesBits[i].Bits
2590 << ", BB: " << CasesBits[i].BB << '\n');
2592 MachineBasicBlock *CaseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
2593 CurMF->insert(BBI, CaseBB);
2594 BTC.push_back(BitTestCase(CasesBits[i].Mask,
2596 CasesBits[i].BB, CasesBits[i].ExtraWeight));
2598 // Put SV in a virtual register to make it available from the new blocks.
2599 ExportFromCurrentBlock(SV);
2602 BitTestBlock BTB(lowBound, cmpRange, SV,
2603 -1U, MVT::Other, (CR.CaseBB == SwitchBB),
2604 CR.CaseBB, Default, BTC);
2606 if (CR.CaseBB == SwitchBB)
2607 visitBitTestHeader(BTB, SwitchBB);
2609 BitTestCases.push_back(BTB);
2614 /// Clusterify - Transform simple list of Cases into list of CaseRange's
2615 size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases,
2616 const SwitchInst& SI) {
2619 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2620 // Start with "simple" cases
2621 for (SwitchInst::ConstCaseIt i = SI.case_begin(), e = SI.case_end();
2623 const BasicBlock *SuccBB = i.getCaseSuccessor();
2624 MachineBasicBlock *SMBB = FuncInfo.MBBMap[SuccBB];
2626 uint32_t ExtraWeight =
2627 BPI ? BPI->getEdgeWeight(SI.getParent(), i.getSuccessorIndex()) : 0;
2629 Cases.push_back(Case(i.getCaseValue(), i.getCaseValue(),
2630 SMBB, ExtraWeight));
2632 std::sort(Cases.begin(), Cases.end(), CaseCmp());
2634 // Merge case into clusters
2635 if (Cases.size() >= 2)
2636 // Must recompute end() each iteration because it may be
2637 // invalidated by erase if we hold on to it
2638 for (CaseItr I = Cases.begin(), J = std::next(Cases.begin());
2639 J != Cases.end(); ) {
2640 const APInt& nextValue = cast<ConstantInt>(J->Low)->getValue();
2641 const APInt& currentValue = cast<ConstantInt>(I->High)->getValue();
2642 MachineBasicBlock* nextBB = J->BB;
2643 MachineBasicBlock* currentBB = I->BB;
2645 // If the two neighboring cases go to the same destination, merge them
2646 // into a single case.
2647 if ((nextValue - currentValue == 1) && (currentBB == nextBB)) {
2649 I->ExtraWeight += J->ExtraWeight;
2656 for (CaseItr I=Cases.begin(), E=Cases.end(); I!=E; ++I, ++numCmps) {
2657 if (I->Low != I->High)
2658 // A range counts double, since it requires two compares.
2665 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
2666 MachineBasicBlock *Last) {
2668 for (unsigned i = 0, e = JTCases.size(); i != e; ++i)
2669 if (JTCases[i].first.HeaderBB == First)
2670 JTCases[i].first.HeaderBB = Last;
2672 // Update BitTestCases.
2673 for (unsigned i = 0, e = BitTestCases.size(); i != e; ++i)
2674 if (BitTestCases[i].Parent == First)
2675 BitTestCases[i].Parent = Last;
2678 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
2679 MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
2681 // Figure out which block is immediately after the current one.
2682 MachineBasicBlock *NextBlock = nullptr;
2683 MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
2685 // If there is only the default destination, branch to it if it is not the
2686 // next basic block. Otherwise, just fall through.
2687 if (!SI.getNumCases()) {
2688 // Update machine-CFG edges.
2690 // If this is not a fall-through branch, emit the branch.
2691 SwitchMBB->addSuccessor(Default);
2692 if (Default != NextBlock)
2693 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2694 MVT::Other, getControlRoot(),
2695 DAG.getBasicBlock(Default)));
2700 // If there are any non-default case statements, create a vector of Cases
2701 // representing each one, and sort the vector so that we can efficiently
2702 // create a binary search tree from them.
2704 size_t numCmps = Clusterify(Cases, SI);
2705 DEBUG(dbgs() << "Clusterify finished. Total clusters: " << Cases.size()
2706 << ". Total compares: " << numCmps << '\n');
2709 // Get the Value to be switched on and default basic blocks, which will be
2710 // inserted into CaseBlock records, representing basic blocks in the binary
2712 const Value *SV = SI.getCondition();
2714 // Push the initial CaseRec onto the worklist
2715 CaseRecVector WorkList;
2716 WorkList.push_back(CaseRec(SwitchMBB,nullptr,nullptr,
2717 CaseRange(Cases.begin(),Cases.end())));
2719 while (!WorkList.empty()) {
2720 // Grab a record representing a case range to process off the worklist
2721 CaseRec CR = WorkList.back();
2722 WorkList.pop_back();
2724 if (handleBitTestsSwitchCase(CR, WorkList, SV, Default, SwitchMBB))
2727 // If the range has few cases (two or less) emit a series of specific
2729 if (handleSmallSwitchRange(CR, WorkList, SV, Default, SwitchMBB))
2732 // If the switch has more than N blocks, and is at least 40% dense, and the
2733 // target supports indirect branches, then emit a jump table rather than
2734 // lowering the switch to a binary tree of conditional branches.
2735 // N defaults to 4 and is controlled via TLS.getMinimumJumpTableEntries().
2736 if (handleJTSwitchCase(CR, WorkList, SV, Default, SwitchMBB))
2739 // Emit binary tree. We need to pick a pivot, and push left and right ranges
2740 // onto the worklist. Leafs are handled via handleSmallSwitchRange() call.
2741 handleBTSplitSwitchCase(CR, WorkList, SV, Default, SwitchMBB);
2745 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
2746 MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
2748 // Update machine-CFG edges with unique successors.
2749 SmallSet<BasicBlock*, 32> Done;
2750 for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
2751 BasicBlock *BB = I.getSuccessor(i);
2752 bool Inserted = Done.insert(BB);
2756 MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
2757 addSuccessorWithWeight(IndirectBrMBB, Succ);
2760 DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
2761 MVT::Other, getControlRoot(),
2762 getValue(I.getAddress())));
2765 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
2766 if (DAG.getTarget().Options.TrapUnreachable)
2767 DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
2770 void SelectionDAGBuilder::visitFSub(const User &I) {
2771 // -0.0 - X --> fneg
2772 Type *Ty = I.getType();
2773 if (isa<Constant>(I.getOperand(0)) &&
2774 I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
2775 SDValue Op2 = getValue(I.getOperand(1));
2776 setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(),
2777 Op2.getValueType(), Op2));
2781 visitBinary(I, ISD::FSUB);
2784 void SelectionDAGBuilder::visitBinary(const User &I, unsigned OpCode) {
2785 SDValue Op1 = getValue(I.getOperand(0));
2786 SDValue Op2 = getValue(I.getOperand(1));
2791 if (const OverflowingBinaryOperator *OFBinOp =
2792 dyn_cast<const OverflowingBinaryOperator>(&I)) {
2793 nuw = OFBinOp->hasNoUnsignedWrap();
2794 nsw = OFBinOp->hasNoSignedWrap();
2796 if (const PossiblyExactOperator *ExactOp =
2797 dyn_cast<const PossiblyExactOperator>(&I))
2798 exact = ExactOp->isExact();
2800 SDValue BinNodeValue = DAG.getNode(OpCode, getCurSDLoc(), Op1.getValueType(),
2801 Op1, Op2, nuw, nsw, exact);
2802 setValue(&I, BinNodeValue);
2805 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
2806 SDValue Op1 = getValue(I.getOperand(0));
2807 SDValue Op2 = getValue(I.getOperand(1));
2809 EVT ShiftTy = TM.getTargetLowering()->getShiftAmountTy(Op2.getValueType());
2811 // Coerce the shift amount to the right type if we can.
2812 if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
2813 unsigned ShiftSize = ShiftTy.getSizeInBits();
2814 unsigned Op2Size = Op2.getValueType().getSizeInBits();
2815 SDLoc DL = getCurSDLoc();
2817 // If the operand is smaller than the shift count type, promote it.
2818 if (ShiftSize > Op2Size)
2819 Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
2821 // If the operand is larger than the shift count type but the shift
2822 // count type has enough bits to represent any shift value, truncate
2823 // it now. This is a common case and it exposes the truncate to
2824 // optimization early.
2825 else if (ShiftSize >= Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
2826 Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
2827 // Otherwise we'll need to temporarily settle for some other convenient
2828 // type. Type legalization will make adjustments once the shiftee is split.
2830 Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
2837 if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
2839 if (const OverflowingBinaryOperator *OFBinOp =
2840 dyn_cast<const OverflowingBinaryOperator>(&I)) {
2841 nuw = OFBinOp->hasNoUnsignedWrap();
2842 nsw = OFBinOp->hasNoSignedWrap();
2844 if (const PossiblyExactOperator *ExactOp =
2845 dyn_cast<const PossiblyExactOperator>(&I))
2846 exact = ExactOp->isExact();
2849 SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
2854 void SelectionDAGBuilder::visitSDiv(const User &I) {
2855 SDValue Op1 = getValue(I.getOperand(0));
2856 SDValue Op2 = getValue(I.getOperand(1));
2858 // Turn exact SDivs into multiplications.
2859 // FIXME: This should be in DAGCombiner, but it doesn't have access to the
2861 if (isa<BinaryOperator>(&I) && cast<BinaryOperator>(&I)->isExact() &&
2862 !isa<ConstantSDNode>(Op1) &&
2863 isa<ConstantSDNode>(Op2) && !cast<ConstantSDNode>(Op2)->isNullValue())
2864 setValue(&I, TM.getTargetLowering()->BuildExactSDIV(Op1, Op2,
2865 getCurSDLoc(), DAG));
2867 setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(),
2871 void SelectionDAGBuilder::visitICmp(const User &I) {
2872 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2873 if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2874 predicate = IC->getPredicate();
2875 else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2876 predicate = ICmpInst::Predicate(IC->getPredicate());
2877 SDValue Op1 = getValue(I.getOperand(0));
2878 SDValue Op2 = getValue(I.getOperand(1));
2879 ISD::CondCode Opcode = getICmpCondCode(predicate);
2881 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
2882 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
2885 void SelectionDAGBuilder::visitFCmp(const User &I) {
2886 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2887 if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2888 predicate = FC->getPredicate();
2889 else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2890 predicate = FCmpInst::Predicate(FC->getPredicate());
2891 SDValue Op1 = getValue(I.getOperand(0));
2892 SDValue Op2 = getValue(I.getOperand(1));
2893 ISD::CondCode Condition = getFCmpCondCode(predicate);
2894 if (TM.Options.NoNaNsFPMath)
2895 Condition = getFCmpCodeWithoutNaN(Condition);
2896 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
2897 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
2900 void SelectionDAGBuilder::visitSelect(const User &I) {
2901 SmallVector<EVT, 4> ValueVTs;
2902 ComputeValueVTs(*TM.getTargetLowering(), I.getType(), ValueVTs);
2903 unsigned NumValues = ValueVTs.size();
2904 if (NumValues == 0) return;
2906 SmallVector<SDValue, 4> Values(NumValues);
2907 SDValue Cond = getValue(I.getOperand(0));
2908 SDValue TrueVal = getValue(I.getOperand(1));
2909 SDValue FalseVal = getValue(I.getOperand(2));
2910 ISD::NodeType OpCode = Cond.getValueType().isVector() ?
2911 ISD::VSELECT : ISD::SELECT;
2913 for (unsigned i = 0; i != NumValues; ++i)
2914 Values[i] = DAG.getNode(OpCode, getCurSDLoc(),
2915 TrueVal.getNode()->getValueType(TrueVal.getResNo()+i),
2917 SDValue(TrueVal.getNode(),
2918 TrueVal.getResNo() + i),
2919 SDValue(FalseVal.getNode(),
2920 FalseVal.getResNo() + i));
2922 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
2923 DAG.getVTList(ValueVTs), Values));
2926 void SelectionDAGBuilder::visitTrunc(const User &I) {
2927 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
2928 SDValue N = getValue(I.getOperand(0));
2929 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
2930 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
2933 void SelectionDAGBuilder::visitZExt(const User &I) {
2934 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2935 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
2936 SDValue N = getValue(I.getOperand(0));
2937 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
2938 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
2941 void SelectionDAGBuilder::visitSExt(const User &I) {
2942 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2943 // SExt also can't be a cast to bool for same reason. So, nothing much to do
2944 SDValue N = getValue(I.getOperand(0));
2945 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
2946 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
2949 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
2950 // FPTrunc is never a no-op cast, no need to check
2951 SDValue N = getValue(I.getOperand(0));
2952 const TargetLowering *TLI = TM.getTargetLowering();
2953 EVT DestVT = TLI->getValueType(I.getType());
2954 setValue(&I, DAG.getNode(ISD::FP_ROUND, getCurSDLoc(),
2956 DAG.getTargetConstant(0, TLI->getPointerTy())));
2959 void SelectionDAGBuilder::visitFPExt(const User &I) {
2960 // FPExt is never a no-op cast, no need to check
2961 SDValue N = getValue(I.getOperand(0));
2962 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
2963 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
2966 void SelectionDAGBuilder::visitFPToUI(const User &I) {
2967 // FPToUI is never a no-op cast, no need to check
2968 SDValue N = getValue(I.getOperand(0));
2969 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
2970 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
2973 void SelectionDAGBuilder::visitFPToSI(const User &I) {
2974 // FPToSI is never a no-op cast, no need to check
2975 SDValue N = getValue(I.getOperand(0));
2976 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
2977 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
2980 void SelectionDAGBuilder::visitUIToFP(const User &I) {
2981 // UIToFP is never a no-op cast, no need to check
2982 SDValue N = getValue(I.getOperand(0));
2983 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
2984 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
2987 void SelectionDAGBuilder::visitSIToFP(const User &I) {
2988 // SIToFP is never a no-op cast, no need to check
2989 SDValue N = getValue(I.getOperand(0));
2990 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
2991 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
2994 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
2995 // What to do depends on the size of the integer and the size of the pointer.
2996 // We can either truncate, zero extend, or no-op, accordingly.
2997 SDValue N = getValue(I.getOperand(0));
2998 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
2999 setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
3002 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3003 // What to do depends on the size of the integer and the size of the pointer.
3004 // We can either truncate, zero extend, or no-op, accordingly.
3005 SDValue N = getValue(I.getOperand(0));
3006 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
3007 setValue(&I, DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT));
3010 void SelectionDAGBuilder::visitBitCast(const User &I) {
3011 SDValue N = getValue(I.getOperand(0));
3012 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
3014 // BitCast assures us that source and destination are the same size so this is
3015 // either a BITCAST or a no-op.
3016 if (DestVT != N.getValueType())
3017 setValue(&I, DAG.getNode(ISD::BITCAST, getCurSDLoc(),
3018 DestVT, N)); // convert types.
3019 // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3020 // might fold any kind of constant expression to an integer constant and that
3021 // is not what we are looking for. Only regcognize a bitcast of a genuine
3022 // constant integer as an opaque constant.
3023 else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3024 setValue(&I, DAG.getConstant(C->getValue(), DestVT, /*isTarget=*/false,
3027 setValue(&I, N); // noop cast.
3030 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3031 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3032 const Value *SV = I.getOperand(0);
3033 SDValue N = getValue(SV);
3034 EVT DestVT = TM.getTargetLowering()->getValueType(I.getType());
3036 unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3037 unsigned DestAS = I.getType()->getPointerAddressSpace();
3039 if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
3040 N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3045 void SelectionDAGBuilder::visitInsertElement(const User &I) {
3046 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3047 SDValue InVec = getValue(I.getOperand(0));
3048 SDValue InVal = getValue(I.getOperand(1));
3049 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)),
3050 getCurSDLoc(), TLI.getVectorIdxTy());
3051 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
3052 TM.getTargetLowering()->getValueType(I.getType()),
3053 InVec, InVal, InIdx));
3056 void SelectionDAGBuilder::visitExtractElement(const User &I) {
3057 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3058 SDValue InVec = getValue(I.getOperand(0));
3059 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)),
3060 getCurSDLoc(), TLI.getVectorIdxTy());
3061 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3062 TM.getTargetLowering()->getValueType(I.getType()),
3066 // Utility for visitShuffleVector - Return true if every element in Mask,
3067 // beginning from position Pos and ending in Pos+Size, falls within the
3068 // specified sequential range [L, L+Pos). or is undef.
3069 static bool isSequentialInRange(const SmallVectorImpl<int> &Mask,
3070 unsigned Pos, unsigned Size, int Low) {
3071 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
3072 if (Mask[i] >= 0 && Mask[i] != Low)
3077 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
3078 SDValue Src1 = getValue(I.getOperand(0));
3079 SDValue Src2 = getValue(I.getOperand(1));
3081 SmallVector<int, 8> Mask;
3082 ShuffleVectorInst::getShuffleMask(cast<Constant>(I.getOperand(2)), Mask);
3083 unsigned MaskNumElts = Mask.size();
3085 const TargetLowering *TLI = TM.getTargetLowering();
3086 EVT VT = TLI->getValueType(I.getType());
3087 EVT SrcVT = Src1.getValueType();
3088 unsigned SrcNumElts = SrcVT.getVectorNumElements();
3090 if (SrcNumElts == MaskNumElts) {
3091 setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2,
3096 // Normalize the shuffle vector since mask and vector length don't match.
3097 if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) {
3098 // Mask is longer than the source vectors and is a multiple of the source
3099 // vectors. We can use concatenate vector to make the mask and vectors
3101 if (SrcNumElts*2 == MaskNumElts) {
3102 // First check for Src1 in low and Src2 in high
3103 if (isSequentialInRange(Mask, 0, SrcNumElts, 0) &&
3104 isSequentialInRange(Mask, SrcNumElts, SrcNumElts, SrcNumElts)) {
3105 // The shuffle is concatenating two vectors together.
3106 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurSDLoc(),
3110 // Then check for Src2 in low and Src1 in high
3111 if (isSequentialInRange(Mask, 0, SrcNumElts, SrcNumElts) &&
3112 isSequentialInRange(Mask, SrcNumElts, SrcNumElts, 0)) {
3113 // The shuffle is concatenating two vectors together.
3114 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurSDLoc(),
3120 // Pad both vectors with undefs to make them the same length as the mask.
3121 unsigned NumConcat = MaskNumElts / SrcNumElts;
3122 bool Src1U = Src1.getOpcode() == ISD::UNDEF;
3123 bool Src2U = Src2.getOpcode() == ISD::UNDEF;
3124 SDValue UndefVal = DAG.getUNDEF(SrcVT);
3126 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3127 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3131 Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
3132 getCurSDLoc(), VT, MOps1);
3133 Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
3134 getCurSDLoc(), VT, MOps2);
3136 // Readjust mask for new input vector length.
3137 SmallVector<int, 8> MappedOps;
3138 for (unsigned i = 0; i != MaskNumElts; ++i) {
3140 if (Idx >= (int)SrcNumElts)
3141 Idx -= SrcNumElts - MaskNumElts;
3142 MappedOps.push_back(Idx);
3145 setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2,
3150 if (SrcNumElts > MaskNumElts) {
3151 // Analyze the access pattern of the vector to see if we can extract
3152 // two subvectors and do the shuffle. The analysis is done by calculating
3153 // the range of elements the mask access on both vectors.
3154 int MinRange[2] = { static_cast<int>(SrcNumElts),
3155 static_cast<int>(SrcNumElts)};
3156 int MaxRange[2] = {-1, -1};
3158 for (unsigned i = 0; i != MaskNumElts; ++i) {
3164 if (Idx >= (int)SrcNumElts) {
3168 if (Idx > MaxRange[Input])
3169 MaxRange[Input] = Idx;
3170 if (Idx < MinRange[Input])
3171 MinRange[Input] = Idx;
3174 // Check if the access is smaller than the vector size and can we find
3175 // a reasonable extract index.
3176 int RangeUse[2] = { -1, -1 }; // 0 = Unused, 1 = Extract, -1 = Can not
3178 int StartIdx[2]; // StartIdx to extract from
3179 for (unsigned Input = 0; Input < 2; ++Input) {
3180 if (MinRange[Input] >= (int)SrcNumElts && MaxRange[Input] < 0) {
3181 RangeUse[Input] = 0; // Unused
3182 StartIdx[Input] = 0;
3186 // Find a good start index that is a multiple of the mask length. Then
3187 // see if the rest of the elements are in range.
3188 StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
3189 if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
3190 StartIdx[Input] + MaskNumElts <= SrcNumElts)
3191 RangeUse[Input] = 1; // Extract from a multiple of the mask length.
3194 if (RangeUse[0] == 0 && RangeUse[1] == 0) {
3195 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3198 if (RangeUse[0] >= 0 && RangeUse[1] >= 0) {
3199 // Extract appropriate subvector and generate a vector shuffle
3200 for (unsigned Input = 0; Input < 2; ++Input) {
3201 SDValue &Src = Input == 0 ? Src1 : Src2;
3202 if (RangeUse[Input] == 0)
3203 Src = DAG.getUNDEF(VT);
3205 Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, getCurSDLoc(), VT,
3206 Src, DAG.getConstant(StartIdx[Input],
3207 TLI->getVectorIdxTy()));
3210 // Calculate new mask.
3211 SmallVector<int, 8> MappedOps;
3212 for (unsigned i = 0; i != MaskNumElts; ++i) {
3215 if (Idx < (int)SrcNumElts)
3218 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3220 MappedOps.push_back(Idx);
3223 setValue(&I, DAG.getVectorShuffle(VT, getCurSDLoc(), Src1, Src2,
3229 // We can't use either concat vectors or extract subvectors so fall back to
3230 // replacing the shuffle with extract and build vector.
3231 // to insert and build vector.
3232 EVT EltVT = VT.getVectorElementType();
3233 EVT IdxVT = TLI->getVectorIdxTy();
3234 SmallVector<SDValue,8> Ops;
3235 for (unsigned i = 0; i != MaskNumElts; ++i) {
3240 Res = DAG.getUNDEF(EltVT);
3242 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3243 if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
3245 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3246 EltVT, Src, DAG.getConstant(Idx, IdxVT));
3252 setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, getCurSDLoc(), VT, Ops));
3255 void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
3256 const Value *Op0 = I.getOperand(0);
3257 const Value *Op1 = I.getOperand(1);
3258 Type *AggTy = I.getType();
3259 Type *ValTy = Op1->getType();
3260 bool IntoUndef = isa<UndefValue>(Op0);
3261 bool FromUndef = isa<UndefValue>(Op1);
3263 unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
3265 const TargetLowering *TLI = TM.getTargetLowering();
3266 SmallVector<EVT, 4> AggValueVTs;
3267 ComputeValueVTs(*TLI, AggTy, AggValueVTs);
3268 SmallVector<EVT, 4> ValValueVTs;
3269 ComputeValueVTs(*TLI, ValTy, ValValueVTs);
3271 unsigned NumAggValues = AggValueVTs.size();
3272 unsigned NumValValues = ValValueVTs.size();
3273 SmallVector<SDValue, 4> Values(NumAggValues);
3275 SDValue Agg = getValue(Op0);
3277 // Copy the beginning value(s) from the original aggregate.
3278 for (; i != LinearIndex; ++i)
3279 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3280 SDValue(Agg.getNode(), Agg.getResNo() + i);
3281 // Copy values from the inserted value(s).
3283 SDValue Val = getValue(Op1);
3284 for (; i != LinearIndex + NumValValues; ++i)
3285 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3286 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
3288 // Copy remaining value(s) from the original aggregate.
3289 for (; i != NumAggValues; ++i)
3290 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3291 SDValue(Agg.getNode(), Agg.getResNo() + i);
3293 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3294 DAG.getVTList(AggValueVTs), Values));
3297 void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
3298 const Value *Op0 = I.getOperand(0);
3299 Type *AggTy = Op0->getType();
3300 Type *ValTy = I.getType();
3301 bool OutOfUndef = isa<UndefValue>(Op0);
3303 unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
3305 const TargetLowering *TLI = TM.getTargetLowering();
3306 SmallVector<EVT, 4> ValValueVTs;
3307 ComputeValueVTs(*TLI, ValTy, ValValueVTs);
3309 unsigned NumValValues = ValValueVTs.size();
3311 // Ignore a extractvalue that produces an empty object
3312 if (!NumValValues) {
3313 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3317 SmallVector<SDValue, 4> Values(NumValValues);
3319 SDValue Agg = getValue(Op0);
3320 // Copy out the selected value(s).
3321 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
3322 Values[i - LinearIndex] =
3324 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
3325 SDValue(Agg.getNode(), Agg.getResNo() + i);
3327 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3328 DAG.getVTList(ValValueVTs), Values));
3331 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
3332 Value *Op0 = I.getOperand(0);
3333 // Note that the pointer operand may be a vector of pointers. Take the scalar
3334 // element which holds a pointer.
3335 Type *Ty = Op0->getType()->getScalarType();
3336 unsigned AS = Ty->getPointerAddressSpace();
3337 SDValue N = getValue(Op0);
3339 for (GetElementPtrInst::const_op_iterator OI = I.op_begin()+1, E = I.op_end();
3341 const Value *Idx = *OI;
3342 if (StructType *StTy = dyn_cast<StructType>(Ty)) {
3343 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
3346 uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
3347 N = DAG.getNode(ISD::ADD, getCurSDLoc(), N.getValueType(), N,
3348 DAG.getConstant(Offset, N.getValueType()));
3351 Ty = StTy->getElementType(Field);
3353 Ty = cast<SequentialType>(Ty)->getElementType();
3355 // If this is a constant subscript, handle it quickly.
3356 const TargetLowering *TLI = TM.getTargetLowering();
3357 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
3358 if (CI->isZero()) continue;
3360 DL->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
3362 EVT PTy = TLI->getPointerTy(AS);
3363 unsigned PtrBits = PTy.getSizeInBits();
3365 OffsVal = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), PTy,
3366 DAG.getConstant(Offs, MVT::i64));
3368 OffsVal = DAG.getConstant(Offs, PTy);
3370 N = DAG.getNode(ISD::ADD, getCurSDLoc(), N.getValueType(), N,
3375 // N = N + Idx * ElementSize;
3376 APInt ElementSize = APInt(TLI->getPointerSizeInBits(AS),
3377 DL->getTypeAllocSize(Ty));
3378 SDValue IdxN = getValue(Idx);
3380 // If the index is smaller or larger than intptr_t, truncate or extend
3382 IdxN = DAG.getSExtOrTrunc(IdxN, getCurSDLoc(), N.getValueType());
3384 // If this is a multiply by a power of two, turn it into a shl
3385 // immediately. This is a very common case.
3386 if (ElementSize != 1) {
3387 if (ElementSize.isPowerOf2()) {
3388 unsigned Amt = ElementSize.logBase2();
3389 IdxN = DAG.getNode(ISD::SHL, getCurSDLoc(),
3390 N.getValueType(), IdxN,
3391 DAG.getConstant(Amt, IdxN.getValueType()));
3393 SDValue Scale = DAG.getConstant(ElementSize, IdxN.getValueType());
3394 IdxN = DAG.getNode(ISD::MUL, getCurSDLoc(),
3395 N.getValueType(), IdxN, Scale);
3399 N = DAG.getNode(ISD::ADD, getCurSDLoc(),
3400 N.getValueType(), N, IdxN);
3407 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
3408 // If this is a fixed sized alloca in the entry block of the function,
3409 // allocate it statically on the stack.
3410 if (FuncInfo.StaticAllocaMap.count(&I))
3411 return; // getValue will auto-populate this.
3413 Type *Ty = I.getAllocatedType();
3414 const TargetLowering *TLI = TM.getTargetLowering();
3415 uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(Ty);
3417 std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty),
3420 SDValue AllocSize = getValue(I.getArraySize());
3422 EVT IntPtr = TLI->getPointerTy();
3423 if (AllocSize.getValueType() != IntPtr)
3424 AllocSize = DAG.getZExtOrTrunc(AllocSize, getCurSDLoc(), IntPtr);
3426 AllocSize = DAG.getNode(ISD::MUL, getCurSDLoc(), IntPtr,
3428 DAG.getConstant(TySize, IntPtr));
3430 // Handle alignment. If the requested alignment is less than or equal to
3431 // the stack alignment, ignore it. If the size is greater than or equal to
3432 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
3433 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
3434 if (Align <= StackAlign)
3437 // Round the size of the allocation up to the stack alignment size
3438 // by add SA-1 to the size.
3439 AllocSize = DAG.getNode(ISD::ADD, getCurSDLoc(),
3440 AllocSize.getValueType(), AllocSize,
3441 DAG.getIntPtrConstant(StackAlign-1));
3443 // Mask out the low bits for alignment purposes.
3444 AllocSize = DAG.getNode(ISD::AND, getCurSDLoc(),
3445 AllocSize.getValueType(), AllocSize,
3446 DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1)));
3448 SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) };
3449 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
3450 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, getCurSDLoc(), VTs, Ops);
3452 DAG.setRoot(DSA.getValue(1));
3454 assert(FuncInfo.MF->getFrameInfo()->hasVarSizedObjects());
3457 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
3459 return visitAtomicLoad(I);
3461 const Value *SV = I.getOperand(0);
3462 SDValue Ptr = getValue(SV);
3464 Type *Ty = I.getType();
3466 bool isVolatile = I.isVolatile();
3467 bool isNonTemporal = I.getMetadata("nontemporal") != nullptr;
3468 bool isInvariant = I.getMetadata("invariant.load") != nullptr;
3469 unsigned Alignment = I.getAlignment();
3470 const MDNode *TBAAInfo = I.getMetadata(LLVMContext::MD_tbaa);
3471 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
3473 SmallVector<EVT, 4> ValueVTs;
3474 SmallVector<uint64_t, 4> Offsets;
3475 ComputeValueVTs(*TM.getTargetLowering(), Ty, ValueVTs, &Offsets);
3476 unsigned NumValues = ValueVTs.size();
3481 bool ConstantMemory = false;
3482 if (isVolatile || NumValues > MaxParallelChains)
3483 // Serialize volatile loads with other side effects.
3485 else if (AA->pointsToConstantMemory(
3486 AliasAnalysis::Location(SV, AA->getTypeStoreSize(Ty), TBAAInfo))) {
3487 // Do not serialize (non-volatile) loads of constant memory with anything.
3488 Root = DAG.getEntryNode();
3489 ConstantMemory = true;
3491 // Do not serialize non-volatile loads against each other.
3492 Root = DAG.getRoot();
3495 const TargetLowering *TLI = TM.getTargetLowering();
3497 Root = TLI->prepareVolatileOrAtomicLoad(Root, getCurSDLoc(), DAG);
3499 SmallVector<SDValue, 4> Values(NumValues);
3500 SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains),
3502 EVT PtrVT = Ptr.getValueType();
3503 unsigned ChainI = 0;
3504 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3505 // Serializing loads here may result in excessive register pressure, and
3506 // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
3507 // could recover a bit by hoisting nodes upward in the chain by recognizing
3508 // they are side-effect free or do not alias. The optimizer should really
3509 // avoid this case by converting large object/array copies to llvm.memcpy
3510 // (MaxParallelChains should always remain as failsafe).
3511 if (ChainI == MaxParallelChains) {
3512 assert(PendingLoads.empty() && "PendingLoads must be serialized first");
3513 SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
3514 makeArrayRef(Chains.data(), ChainI));
3518 SDValue A = DAG.getNode(ISD::ADD, getCurSDLoc(),
3520 DAG.getConstant(Offsets[i], PtrVT));
3521 SDValue L = DAG.getLoad(ValueVTs[i], getCurSDLoc(), Root,
3522 A, MachinePointerInfo(SV, Offsets[i]), isVolatile,
3523 isNonTemporal, isInvariant, Alignment, TBAAInfo,
3527 Chains[ChainI] = L.getValue(1);
3530 if (!ConstantMemory) {
3531 SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
3532 makeArrayRef(Chains.data(), ChainI));
3536 PendingLoads.push_back(Chain);
3539 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3540 DAG.getVTList(ValueVTs), Values));
3543 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
3545 return visitAtomicStore(I);
3547 const Value *SrcV = I.getOperand(0);
3548 const Value *PtrV = I.getOperand(1);
3550 SmallVector<EVT, 4> ValueVTs;
3551 SmallVector<uint64_t, 4> Offsets;
3552 ComputeValueVTs(*TM.getTargetLowering(), SrcV->getType(), ValueVTs, &Offsets);
3553 unsigned NumValues = ValueVTs.size();
3557 // Get the lowered operands. Note that we do this after
3558 // checking if NumResults is zero, because with zero results
3559 // the operands won't have values in the map.
3560 SDValue Src = getValue(SrcV);
3561 SDValue Ptr = getValue(PtrV);
3563 SDValue Root = getRoot();
3564 SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains),
3566 EVT PtrVT = Ptr.getValueType();
3567 bool isVolatile = I.isVolatile();
3568 bool isNonTemporal = I.getMetadata("nontemporal") != nullptr;
3569 unsigned Alignment = I.getAlignment();
3570 const MDNode *TBAAInfo = I.getMetadata(LLVMContext::MD_tbaa);
3572 unsigned ChainI = 0;
3573 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
3574 // See visitLoad comments.
3575 if (ChainI == MaxParallelChains) {
3576 SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
3577 makeArrayRef(Chains.data(), ChainI));
3581 SDValue Add = DAG.getNode(ISD::ADD, getCurSDLoc(), PtrVT, Ptr,
3582 DAG.getConstant(Offsets[i], PtrVT));
3583 SDValue St = DAG.getStore(Root, getCurSDLoc(),
3584 SDValue(Src.getNode(), Src.getResNo() + i),
3585 Add, MachinePointerInfo(PtrV, Offsets[i]),
3586 isVolatile, isNonTemporal, Alignment, TBAAInfo);
3587 Chains[ChainI] = St;
3590 SDValue StoreNode = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other,
3591 makeArrayRef(Chains.data(), ChainI));
3592 DAG.setRoot(StoreNode);
3595 static SDValue InsertFenceForAtomic(SDValue Chain, AtomicOrdering Order,
3596 SynchronizationScope Scope,
3597 bool Before, SDLoc dl,
3599 const TargetLowering &TLI) {
3600 // Fence, if necessary
3602 if (Order == AcquireRelease || Order == SequentiallyConsistent)
3604 else if (Order == Acquire || Order == Monotonic || Order == Unordered)
3607 if (Order == AcquireRelease)
3609 else if (Order == Release || Order == Monotonic || Order == Unordered)
3614 Ops[1] = DAG.getConstant(Order, TLI.getPointerTy());
3615 Ops[2] = DAG.getConstant(Scope, TLI.getPointerTy());
3616 return DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops);
3619 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
3620 SDLoc dl = getCurSDLoc();
3621 AtomicOrdering SuccessOrder = I.getSuccessOrdering();
3622 AtomicOrdering FailureOrder = I.getFailureOrdering();
3623 SynchronizationScope Scope = I.getSynchScope();
3625 SDValue InChain = getRoot();
3627 const TargetLowering *TLI = TM.getTargetLowering();
3628 if (TLI->getInsertFencesForAtomic())
3629 InChain = InsertFenceForAtomic(InChain, SuccessOrder, Scope, true, dl,
3633 DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, dl,
3634 getValue(I.getCompareOperand()).getSimpleValueType(),
3636 getValue(I.getPointerOperand()),
3637 getValue(I.getCompareOperand()),
3638 getValue(I.getNewValOperand()),
3639 MachinePointerInfo(I.getPointerOperand()), 0 /* Alignment */,
3640 TLI->getInsertFencesForAtomic() ? Monotonic : SuccessOrder,
3641 TLI->getInsertFencesForAtomic() ? Monotonic : FailureOrder,
3644 SDValue OutChain = L.getValue(1);
3646 if (TLI->getInsertFencesForAtomic())
3647 OutChain = InsertFenceForAtomic(OutChain, SuccessOrder, Scope, false, dl,
3651 DAG.setRoot(OutChain);
3654 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
3655 SDLoc dl = getCurSDLoc();
3657 switch (I.getOperation()) {
3658 default: llvm_unreachable("Unknown atomicrmw operation");
3659 case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
3660 case AtomicRMWInst::Add: NT = ISD::ATOMIC_LOAD_ADD; break;
3661 case AtomicRMWInst::Sub: NT = ISD::ATOMIC_LOAD_SUB; break;
3662 case AtomicRMWInst::And: NT = ISD::ATOMIC_LOAD_AND; break;
3663 case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
3664 case AtomicRMWInst::Or: NT = ISD::ATOMIC_LOAD_OR; break;
3665 case AtomicRMWInst::Xor: NT = ISD::ATOMIC_LOAD_XOR; break;
3666 case AtomicRMWInst::Max: NT = ISD::ATOMIC_LOAD_MAX; break;
3667 case AtomicRMWInst::Min: NT = ISD::ATOMIC_LOAD_MIN; break;
3668 case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
3669 case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
3671 AtomicOrdering Order = I.getOrdering();
3672 SynchronizationScope Scope = I.getSynchScope();
3674 SDValue InChain = getRoot();
3676 const TargetLowering *TLI = TM.getTargetLowering();
3677 if (TLI->getInsertFencesForAtomic())
3678 InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
3682 DAG.getAtomic(NT, dl,
3683 getValue(I.getValOperand()).getSimpleValueType(),
3685 getValue(I.getPointerOperand()),
3686 getValue(I.getValOperand()),
3687 I.getPointerOperand(), 0 /* Alignment */,
3688 TLI->getInsertFencesForAtomic() ? Monotonic : Order,
3691 SDValue OutChain = L.getValue(1);
3693 if (TLI->getInsertFencesForAtomic())
3694 OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
3698 DAG.setRoot(OutChain);
3701 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
3702 SDLoc dl = getCurSDLoc();
3703 const TargetLowering *TLI = TM.getTargetLowering();
3706 Ops[1] = DAG.getConstant(I.getOrdering(), TLI->getPointerTy());
3707 Ops[2] = DAG.getConstant(I.getSynchScope(), TLI->getPointerTy());
3708 DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops));
3711 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
3712 SDLoc dl = getCurSDLoc();
3713 AtomicOrdering Order = I.getOrdering();
3714 SynchronizationScope Scope = I.getSynchScope();
3716 SDValue InChain = getRoot();
3718 const TargetLowering *TLI = TM.getTargetLowering();
3719 EVT VT = TLI->getValueType(I.getType());
3721 if (I.getAlignment() < VT.getSizeInBits() / 8)
3722 report_fatal_error("Cannot generate unaligned atomic load");
3724 MachineMemOperand *MMO =
3725 DAG.getMachineFunction().
3726 getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
3727 MachineMemOperand::MOVolatile |
3728 MachineMemOperand::MOLoad,
3730 I.getAlignment() ? I.getAlignment() :
3731 DAG.getEVTAlignment(VT));
3733 InChain = TLI->prepareVolatileOrAtomicLoad(InChain, dl, DAG);
3735 DAG.getAtomic(ISD::ATOMIC_LOAD, dl, VT, VT, InChain,
3736 getValue(I.getPointerOperand()), MMO,
3737 TLI->getInsertFencesForAtomic() ? Monotonic : Order,
3740 SDValue OutChain = L.getValue(1);
3742 if (TLI->getInsertFencesForAtomic())
3743 OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
3747 DAG.setRoot(OutChain);
3750 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
3751 SDLoc dl = getCurSDLoc();
3753 AtomicOrdering Order = I.getOrdering();
3754 SynchronizationScope Scope = I.getSynchScope();
3756 SDValue InChain = getRoot();
3758 const TargetLowering *TLI = TM.getTargetLowering();
3759 EVT VT = TLI->getValueType(I.getValueOperand()->getType());
3761 if (I.getAlignment() < VT.getSizeInBits() / 8)
3762 report_fatal_error("Cannot generate unaligned atomic store");
3764 if (TLI->getInsertFencesForAtomic())
3765 InChain = InsertFenceForAtomic(InChain, Order, Scope, true, dl,
3769 DAG.getAtomic(ISD::ATOMIC_STORE, dl, VT,
3771 getValue(I.getPointerOperand()),
3772 getValue(I.getValueOperand()),
3773 I.getPointerOperand(), I.getAlignment(),
3774 TLI->getInsertFencesForAtomic() ? Monotonic : Order,
3777 if (TLI->getInsertFencesForAtomic())
3778 OutChain = InsertFenceForAtomic(OutChain, Order, Scope, false, dl,
3781 DAG.setRoot(OutChain);
3784 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
3786 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
3787 unsigned Intrinsic) {
3788 bool HasChain = !I.doesNotAccessMemory();
3789 bool OnlyLoad = HasChain && I.onlyReadsMemory();
3791 // Build the operand list.
3792 SmallVector<SDValue, 8> Ops;
3793 if (HasChain) { // If this intrinsic has side-effects, chainify it.
3795 // We don't need to serialize loads against other loads.
3796 Ops.push_back(DAG.getRoot());
3798 Ops.push_back(getRoot());
3802 // Info is set by getTgtMemInstrinsic
3803 TargetLowering::IntrinsicInfo Info;
3804 const TargetLowering *TLI = TM.getTargetLowering();
3805 bool IsTgtIntrinsic = TLI->getTgtMemIntrinsic(Info, I, Intrinsic);
3807 // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
3808 if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
3809 Info.opc == ISD::INTRINSIC_W_CHAIN)
3810 Ops.push_back(DAG.getTargetConstant(Intrinsic, TLI->getPointerTy()));
3812 // Add all operands of the call to the operand list.
3813 for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
3814 SDValue Op = getValue(I.getArgOperand(i));
3818 SmallVector<EVT, 4> ValueVTs;
3819 ComputeValueVTs(*TLI, I.getType(), ValueVTs);
3822 ValueVTs.push_back(MVT::Other);
3824 SDVTList VTs = DAG.getVTList(ValueVTs);
3828 if (IsTgtIntrinsic) {
3829 // This is target intrinsic that touches memory
3830 Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(),
3831 VTs, Ops, Info.memVT,
3832 MachinePointerInfo(Info.ptrVal, Info.offset),
3833 Info.align, Info.vol,
3834 Info.readMem, Info.writeMem);
3835 } else if (!HasChain) {
3836 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
3837 } else if (!I.getType()->isVoidTy()) {
3838 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
3840 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
3844 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
3846 PendingLoads.push_back(Chain);
3851 if (!I.getType()->isVoidTy()) {
3852 if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
3853 EVT VT = TLI->getValueType(PTy);
3854 Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result);
3857 setValue(&I, Result);
3861 /// GetSignificand - Get the significand and build it into a floating-point
3862 /// number with exponent of 1:
3864 /// Op = (Op & 0x007fffff) | 0x3f800000;
3866 /// where Op is the hexadecimal representation of floating point value.
3868 GetSignificand(SelectionDAG &DAG, SDValue Op, SDLoc dl) {
3869 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3870 DAG.getConstant(0x007fffff, MVT::i32));
3871 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
3872 DAG.getConstant(0x3f800000, MVT::i32));
3873 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
3876 /// GetExponent - Get the exponent:
3878 /// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
3880 /// where Op is the hexadecimal representation of floating point value.
3882 GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
3884 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3885 DAG.getConstant(0x7f800000, MVT::i32));
3886 SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0,
3887 DAG.getConstant(23, TLI.getPointerTy()));
3888 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
3889 DAG.getConstant(127, MVT::i32));
3890 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
3893 /// getF32Constant - Get 32-bit floating point constant.
3895 getF32Constant(SelectionDAG &DAG, unsigned Flt) {
3896 return DAG.getConstantFP(APFloat(APFloat::IEEEsingle, APInt(32, Flt)),
3900 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
3901 /// limited-precision mode.
3902 static SDValue expandExp(SDLoc dl, SDValue Op, SelectionDAG &DAG,
3903 const TargetLowering &TLI) {
3904 if (Op.getValueType() == MVT::f32 &&
3905 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3907 // Put the exponent in the right bit position for later addition to the
3910 // #define LOG2OFe 1.4426950f
3911 // IntegerPartOfX = ((int32_t)(X * LOG2OFe));
3912 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3913 getF32Constant(DAG, 0x3fb8aa3b));
3914 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3916 // FractionalPartOfX = (X * LOG2OFe) - (float)IntegerPartOfX;
3917 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3918 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3920 // IntegerPartOfX <<= 23;
3921 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3922 DAG.getConstant(23, TLI.getPointerTy()));
3924 SDValue TwoToFracPartOfX;
3925 if (LimitFloatPrecision <= 6) {
3926 // For floating-point precision of 6:
3928 // TwoToFractionalPartOfX =
3930 // (0.735607626f + 0.252464424f * x) * x;
3932 // error 0.0144103317, which is 6 bits
3933 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3934 getF32Constant(DAG, 0x3e814304));
3935 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3936 getF32Constant(DAG, 0x3f3c50c8));
3937 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3938 TwoToFracPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3939 getF32Constant(DAG, 0x3f7f5e7e));
3940 } else if (LimitFloatPrecision <= 12) {
3941 // For floating-point precision of 12:
3943 // TwoToFractionalPartOfX =
3946 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3948 // 0.000107046256 error, which is 13 to 14 bits
3949 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3950 getF32Constant(DAG, 0x3da235e3));
3951 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3952 getF32Constant(DAG, 0x3e65b8f3));
3953 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3954 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3955 getF32Constant(DAG, 0x3f324b07));
3956 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3957 TwoToFracPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3958 getF32Constant(DAG, 0x3f7ff8fd));
3959 } else { // LimitFloatPrecision <= 18
3960 // For floating-point precision of 18:
3962 // TwoToFractionalPartOfX =
3966 // (0.554906021e-1f +
3967 // (0.961591928e-2f +
3968 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3970 // error 2.47208000*10^(-7), which is better than 18 bits
3971 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3972 getF32Constant(DAG, 0x3924b03e));
3973 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3974 getF32Constant(DAG, 0x3ab24b87));
3975 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3976 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3977 getF32Constant(DAG, 0x3c1d8c17));
3978 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3979 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3980 getF32Constant(DAG, 0x3d634a1d));
3981 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3982 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3983 getF32Constant(DAG, 0x3e75fe14));
3984 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3985 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3986 getF32Constant(DAG, 0x3f317234));
3987 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3988 TwoToFracPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3989 getF32Constant(DAG, 0x3f800000));
3992 // Add the exponent into the result in integer domain.
3993 SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFracPartOfX);
3994 return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
3995 DAG.getNode(ISD::ADD, dl, MVT::i32,
3996 t13, IntegerPartOfX));
3999 // No special expansion.
4000 return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op);
4003 /// expandLog - Lower a log intrinsic. Handles the special sequences for
4004 /// limited-precision mode.
4005 static SDValue expandLog(SDLoc dl, SDValue Op, SelectionDAG &DAG,
4006 const TargetLowering &TLI) {
4007 if (Op.getValueType() == MVT::f32 &&
4008 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4009 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4011 // Scale the exponent by log(2) [0.69314718f].
4012 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4013 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4014 getF32Constant(DAG, 0x3f317218));
4016 // Get the significand and build it into a floating-point number with
4018 SDValue X = GetSignificand(DAG, Op1, dl);
4020 SDValue LogOfMantissa;
4021 if (LimitFloatPrecision <= 6) {
4022 // For floating-point precision of 6:
4026 // (1.4034025f - 0.23903021f * x) * x;
4028 // error 0.0034276066, which is better than 8 bits
4029 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4030 getF32Constant(DAG, 0xbe74c456));
4031 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4032 getF32Constant(DAG, 0x3fb3a2b1));
4033 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4034 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4035 getF32Constant(DAG, 0x3f949a29));
4036 } else if (LimitFloatPrecision <= 12) {
4037 // For floating-point precision of 12:
4043 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
4045 // error 0.000061011436, which is 14 bits
4046 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4047 getF32Constant(DAG, 0xbd67b6d6));
4048 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4049 getF32Constant(DAG, 0x3ee4f4b8));
4050 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4051 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4052 getF32Constant(DAG, 0x3fbc278b));
4053 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4054 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4055 getF32Constant(DAG, 0x40348e95));
4056 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4057 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4058 getF32Constant(DAG, 0x3fdef31a));
4059 } else { // LimitFloatPrecision <= 18
4060 // For floating-point precision of 18:
4068 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
4070 // error 0.0000023660568, which is better than 18 bits
4071 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4072 getF32Constant(DAG, 0xbc91e5ac));
4073 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4074 getF32Constant(DAG, 0x3e4350aa));
4075 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4076 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4077 getF32Constant(DAG, 0x3f60d3e3));
4078 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4079 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4080 getF32Constant(DAG, 0x4011cdf0));
4081 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4082 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4083 getF32Constant(DAG, 0x406cfd1c));
4084 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4085 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4086 getF32Constant(DAG, 0x408797cb));
4087 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4088 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4089 getF32Constant(DAG, 0x4006dcab));
4092 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
4095 // No special expansion.
4096 return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op);
4099 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
4100 /// limited-precision mode.
4101 static SDValue expandLog2(SDLoc dl, SDValue Op, SelectionDAG &DAG,
4102 const TargetLowering &TLI) {
4103 if (Op.getValueType() == MVT::f32 &&
4104 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4105 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4107 // Get the exponent.
4108 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
4110 // Get the significand and build it into a floating-point number with
4112 SDValue X = GetSignificand(DAG, Op1, dl);
4114 // Different possible minimax approximations of significand in
4115 // floating-point for various degrees of accuracy over [1,2].
4116 SDValue Log2ofMantissa;
4117 if (LimitFloatPrecision <= 6) {
4118 // For floating-point precision of 6:
4120 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
4122 // error 0.0049451742, which is more than 7 bits
4123 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4124 getF32Constant(DAG, 0xbeb08fe0));
4125 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4126 getF32Constant(DAG, 0x40019463));
4127 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4128 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4129 getF32Constant(DAG, 0x3fd6633d));
4130 } else if (LimitFloatPrecision <= 12) {
4131 // For floating-point precision of 12:
4137 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
4139 // error 0.0000876136000, which is better than 13 bits
4140 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4141 getF32Constant(DAG, 0xbda7262e));
4142 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4143 getF32Constant(DAG, 0x3f25280b));
4144 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4145 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4146 getF32Constant(DAG, 0x4007b923));
4147 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4148 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4149 getF32Constant(DAG, 0x40823e2f));
4150 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4151 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4152 getF32Constant(DAG, 0x4020d29c));
4153 } else { // LimitFloatPrecision <= 18
4154 // For floating-point precision of 18:
4163 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
4165 // error 0.0000018516, which is better than 18 bits
4166 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4167 getF32Constant(DAG, 0xbcd2769e));
4168 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4169 getF32Constant(DAG, 0x3e8ce0b9));
4170 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4171 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4172 getF32Constant(DAG, 0x3fa22ae7));
4173 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4174 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4175 getF32Constant(DAG, 0x40525723));
4176 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4177 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
4178 getF32Constant(DAG, 0x40aaf200));
4179 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4180 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4181 getF32Constant(DAG, 0x40c39dad));
4182 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4183 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
4184 getF32Constant(DAG, 0x4042902c));
4187 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
4190 // No special expansion.
4191 return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op);
4194 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
4195 /// limited-precision mode.
4196 static SDValue expandLog10(SDLoc dl, SDValue Op, SelectionDAG &DAG,
4197 const TargetLowering &TLI) {
4198 if (Op.getValueType() == MVT::f32 &&
4199 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4200 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
4202 // Scale the exponent by log10(2) [0.30102999f].
4203 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
4204 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
4205 getF32Constant(DAG, 0x3e9a209a));
4207 // Get the significand and build it into a floating-point number with
4209 SDValue X = GetSignificand(DAG, Op1, dl);
4211 SDValue Log10ofMantissa;
4212 if (LimitFloatPrecision <= 6) {
4213 // For floating-point precision of 6:
4215 // Log10ofMantissa =
4217 // (0.60948995f - 0.10380950f * x) * x;
4219 // error 0.0014886165, which is 6 bits
4220 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4221 getF32Constant(DAG, 0xbdd49a13));
4222 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
4223 getF32Constant(DAG, 0x3f1c0789));
4224 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4225 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
4226 getF32Constant(DAG, 0x3f011300));
4227 } else if (LimitFloatPrecision <= 12) {
4228 // For floating-point precision of 12:
4230 // Log10ofMantissa =
4233 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
4235 // error 0.00019228036, which is better than 12 bits
4236 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4237 getF32Constant(DAG, 0x3d431f31));
4238 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
4239 getF32Constant(DAG, 0x3ea21fb2));
4240 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4241 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4242 getF32Constant(DAG, 0x3f6ae232));
4243 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4244 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
4245 getF32Constant(DAG, 0x3f25f7c3));
4246 } else { // LimitFloatPrecision <= 18
4247 // For floating-point precision of 18:
4249 // Log10ofMantissa =
4254 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
4256 // error 0.0000037995730, which is better than 18 bits
4257 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4258 getF32Constant(DAG, 0x3c5d51ce));
4259 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
4260 getF32Constant(DAG, 0x3e00685a));
4261 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
4262 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4263 getF32Constant(DAG, 0x3efb6798));
4264 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4265 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
4266 getF32Constant(DAG, 0x3f88d192));
4267 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4268 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4269 getF32Constant(DAG, 0x3fc4316c));
4270 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4271 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
4272 getF32Constant(DAG, 0x3f57ce70));
4275 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
4278 // No special expansion.
4279 return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op);
4282 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
4283 /// limited-precision mode.
4284 static SDValue expandExp2(SDLoc dl, SDValue Op, SelectionDAG &DAG,
4285 const TargetLowering &TLI) {
4286 if (Op.getValueType() == MVT::f32 &&
4287 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4288 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op);
4290 // FractionalPartOfX = x - (float)IntegerPartOfX;
4291 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
4292 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, Op, t1);
4294 // IntegerPartOfX <<= 23;
4295 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
4296 DAG.getConstant(23, TLI.getPointerTy()));
4298 SDValue TwoToFractionalPartOfX;
4299 if (LimitFloatPrecision <= 6) {
4300 // For floating-point precision of 6:
4302 // TwoToFractionalPartOfX =
4304 // (0.735607626f + 0.252464424f * x) * x;
4306 // error 0.0144103317, which is 6 bits
4307 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4308 getF32Constant(DAG, 0x3e814304));
4309 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4310 getF32Constant(DAG, 0x3f3c50c8));
4311 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4312 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4313 getF32Constant(DAG, 0x3f7f5e7e));
4314 } else if (LimitFloatPrecision <= 12) {
4315 // For floating-point precision of 12:
4317 // TwoToFractionalPartOfX =
4320 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
4322 // error 0.000107046256, which is 13 to 14 bits
4323 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4324 getF32Constant(DAG, 0x3da235e3));
4325 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4326 getF32Constant(DAG, 0x3e65b8f3));
4327 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4328 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4329 getF32Constant(DAG, 0x3f324b07));
4330 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4331 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4332 getF32Constant(DAG, 0x3f7ff8fd));
4333 } else { // LimitFloatPrecision <= 18
4334 // For floating-point precision of 18:
4336 // TwoToFractionalPartOfX =
4340 // (0.554906021e-1f +
4341 // (0.961591928e-2f +
4342 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4343 // error 2.47208000*10^(-7), which is better than 18 bits
4344 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4345 getF32Constant(DAG, 0x3924b03e));
4346 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4347 getF32Constant(DAG, 0x3ab24b87));
4348 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4349 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4350 getF32Constant(DAG, 0x3c1d8c17));
4351 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4352 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4353 getF32Constant(DAG, 0x3d634a1d));
4354 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4355 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4356 getF32Constant(DAG, 0x3e75fe14));
4357 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4358 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4359 getF32Constant(DAG, 0x3f317234));
4360 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4361 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4362 getF32Constant(DAG, 0x3f800000));
4365 // Add the exponent into the result in integer domain.
4366 SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32,
4367 TwoToFractionalPartOfX);
4368 return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4369 DAG.getNode(ISD::ADD, dl, MVT::i32,
4370 t13, IntegerPartOfX));
4373 // No special expansion.
4374 return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op);
4377 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
4378 /// limited-precision mode with x == 10.0f.
4379 static SDValue expandPow(SDLoc dl, SDValue LHS, SDValue RHS,
4380 SelectionDAG &DAG, const TargetLowering &TLI) {
4381 bool IsExp10 = false;
4382 if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
4383 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
4384 if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
4386 IsExp10 = LHSC->isExactlyValue(Ten);
4391 // Put the exponent in the right bit position for later addition to the
4394 // #define LOG2OF10 3.3219281f
4395 // IntegerPartOfX = (int32_t)(x * LOG2OF10);
4396 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
4397 getF32Constant(DAG, 0x40549a78));
4398 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
4400 // FractionalPartOfX = x - (float)IntegerPartOfX;
4401 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
4402 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
4404 // IntegerPartOfX <<= 23;
4405 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
4406 DAG.getConstant(23, TLI.getPointerTy()));
4408 SDValue TwoToFractionalPartOfX;
4409 if (LimitFloatPrecision <= 6) {
4410 // For floating-point precision of 6:
4412 // twoToFractionalPartOfX =
4414 // (0.735607626f + 0.252464424f * x) * x;
4416 // error 0.0144103317, which is 6 bits
4417 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4418 getF32Constant(DAG, 0x3e814304));
4419 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4420 getF32Constant(DAG, 0x3f3c50c8));
4421 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4422 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4423 getF32Constant(DAG, 0x3f7f5e7e));
4424 } else if (LimitFloatPrecision <= 12) {
4425 // For floating-point precision of 12:
4427 // TwoToFractionalPartOfX =
4430 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
4432 // error 0.000107046256, which is 13 to 14 bits
4433 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4434 getF32Constant(DAG, 0x3da235e3));
4435 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4436 getF32Constant(DAG, 0x3e65b8f3));
4437 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4438 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4439 getF32Constant(DAG, 0x3f324b07));
4440 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4441 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4442 getF32Constant(DAG, 0x3f7ff8fd));
4443 } else { // LimitFloatPrecision <= 18
4444 // For floating-point precision of 18:
4446 // TwoToFractionalPartOfX =
4450 // (0.554906021e-1f +
4451 // (0.961591928e-2f +
4452 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
4453 // error 2.47208000*10^(-7), which is better than 18 bits
4454 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4455 getF32Constant(DAG, 0x3924b03e));
4456 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
4457 getF32Constant(DAG, 0x3ab24b87));
4458 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
4459 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
4460 getF32Constant(DAG, 0x3c1d8c17));
4461 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
4462 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
4463 getF32Constant(DAG, 0x3d634a1d));
4464 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
4465 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
4466 getF32Constant(DAG, 0x3e75fe14));
4467 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
4468 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
4469 getF32Constant(DAG, 0x3f317234));
4470 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
4471 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
4472 getF32Constant(DAG, 0x3f800000));
4475 SDValue t13 = DAG.getNode(ISD::BITCAST, dl,MVT::i32,TwoToFractionalPartOfX);
4476 return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4477 DAG.getNode(ISD::ADD, dl, MVT::i32,
4478 t13, IntegerPartOfX));
4481 // No special expansion.
4482 return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS);
4486 /// ExpandPowI - Expand a llvm.powi intrinsic.
4487 static SDValue ExpandPowI(SDLoc DL, SDValue LHS, SDValue RHS,
4488 SelectionDAG &DAG) {
4489 // If RHS is a constant, we can expand this out to a multiplication tree,
4490 // otherwise we end up lowering to a call to __powidf2 (for example). When
4491 // optimizing for size, we only want to do this if the expansion would produce
4492 // a small number of multiplies, otherwise we do the full expansion.
4493 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
4494 // Get the exponent as a positive value.
4495 unsigned Val = RHSC->getSExtValue();
4496 if ((int)Val < 0) Val = -Val;
4498 // powi(x, 0) -> 1.0
4500 return DAG.getConstantFP(1.0, LHS.getValueType());
4502 const Function *F = DAG.getMachineFunction().getFunction();
4503 if (!F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
4504 Attribute::OptimizeForSize) ||
4505 // If optimizing for size, don't insert too many multiplies. This
4506 // inserts up to 5 multiplies.
4507 CountPopulation_32(Val)+Log2_32(Val) < 7) {
4508 // We use the simple binary decomposition method to generate the multiply
4509 // sequence. There are more optimal ways to do this (for example,
4510 // powi(x,15) generates one more multiply than it should), but this has
4511 // the benefit of being both really simple and much better than a libcall.
4512 SDValue Res; // Logically starts equal to 1.0
4513 SDValue CurSquare = LHS;
4517 Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare);
4519 Res = CurSquare; // 1.0*CurSquare.
4522 CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
4523 CurSquare, CurSquare);
4527 // If the original was negative, invert the result, producing 1/(x*x*x).
4528 if (RHSC->getSExtValue() < 0)
4529 Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
4530 DAG.getConstantFP(1.0, LHS.getValueType()), Res);
4535 // Otherwise, expand to a libcall.
4536 return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
4539 // getTruncatedArgReg - Find underlying register used for an truncated
4541 static unsigned getTruncatedArgReg(const SDValue &N) {
4542 if (N.getOpcode() != ISD::TRUNCATE)
4545 const SDValue &Ext = N.getOperand(0);
4546 if (Ext.getOpcode() == ISD::AssertZext ||
4547 Ext.getOpcode() == ISD::AssertSext) {
4548 const SDValue &CFR = Ext.getOperand(0);
4549 if (CFR.getOpcode() == ISD::CopyFromReg)
4550 return cast<RegisterSDNode>(CFR.getOperand(1))->getReg();
4551 if (CFR.getOpcode() == ISD::TRUNCATE)
4552 return getTruncatedArgReg(CFR);
4557 /// EmitFuncArgumentDbgValue - If the DbgValueInst is a dbg_value of a function
4558 /// argument, create the corresponding DBG_VALUE machine instruction for it now.
4559 /// At the end of instruction selection, they will be inserted to the entry BB.
4561 SelectionDAGBuilder::EmitFuncArgumentDbgValue(const Value *V, MDNode *Variable,
4562 int64_t Offset, bool IsIndirect,
4564 const Argument *Arg = dyn_cast<Argument>(V);
4568 MachineFunction &MF = DAG.getMachineFunction();
4569 const TargetInstrInfo *TII = DAG.getTarget().getInstrInfo();
4571 // Ignore inlined function arguments here.
4572 DIVariable DV(Variable);
4573 if (DV.isInlinedFnArgument(MF.getFunction()))
4576 Optional<MachineOperand> Op;
4577 // Some arguments' frame index is recorded during argument lowering.
4578 if (int FI = FuncInfo.getArgumentFrameIndex(Arg))
4579 Op = MachineOperand::CreateFI(FI);
4581 if (!Op && N.getNode()) {
4583 if (N.getOpcode() == ISD::CopyFromReg)
4584 Reg = cast<RegisterSDNode>(N.getOperand(1))->getReg();
4586 Reg = getTruncatedArgReg(N);
4587 if (Reg && TargetRegisterInfo::isVirtualRegister(Reg)) {
4588 MachineRegisterInfo &RegInfo = MF.getRegInfo();
4589 unsigned PR = RegInfo.getLiveInPhysReg(Reg);
4594 Op = MachineOperand::CreateReg(Reg, false);
4598 // Check if ValueMap has reg number.
4599 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
4600 if (VMI != FuncInfo.ValueMap.end())
4601 Op = MachineOperand::CreateReg(VMI->second, false);
4604 if (!Op && N.getNode())
4605 // Check if frame index is available.
4606 if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(N.getNode()))
4607 if (FrameIndexSDNode *FINode =
4608 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
4609 Op = MachineOperand::CreateFI(FINode->getIndex());
4615 FuncInfo.ArgDbgValues.push_back(BuildMI(MF, getCurDebugLoc(),
4616 TII->get(TargetOpcode::DBG_VALUE),
4618 Op->getReg(), Offset, Variable));
4620 FuncInfo.ArgDbgValues.push_back(
4621 BuildMI(MF, getCurDebugLoc(), TII->get(TargetOpcode::DBG_VALUE))
4622 .addOperand(*Op).addImm(Offset).addMetadata(Variable));
4627 // VisualStudio defines setjmp as _setjmp
4628 #if defined(_MSC_VER) && defined(setjmp) && \
4629 !defined(setjmp_undefined_for_msvc)
4630 # pragma push_macro("setjmp")
4632 # define setjmp_undefined_for_msvc
4635 /// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
4636 /// we want to emit this as a call to a named external function, return the name
4637 /// otherwise lower it and return null.
4639 SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
4640 const TargetLowering *TLI = TM.getTargetLowering();
4641 SDLoc sdl = getCurSDLoc();
4642 DebugLoc dl = getCurDebugLoc();
4645 switch (Intrinsic) {
4647 // By default, turn this into a target intrinsic node.
4648 visitTargetIntrinsic(I, Intrinsic);
4650 case Intrinsic::vastart: visitVAStart(I); return nullptr;
4651 case Intrinsic::vaend: visitVAEnd(I); return nullptr;
4652 case Intrinsic::vacopy: visitVACopy(I); return nullptr;
4653 case Intrinsic::returnaddress:
4654 setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl, TLI->getPointerTy(),
4655 getValue(I.getArgOperand(0))));
4657 case Intrinsic::frameaddress:
4658 setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl, TLI->getPointerTy(),
4659 getValue(I.getArgOperand(0))));
4661 case Intrinsic::read_register: {
4662 Value *Reg = I.getArgOperand(0);
4663 SDValue RegName = DAG.getMDNode(cast<MDNode>(Reg));
4664 EVT VT = TM.getTargetLowering()->getValueType(I.getType());
4665 setValue(&I, DAG.getNode(ISD::READ_REGISTER, sdl, VT, RegName));
4668 case Intrinsic::write_register: {
4669 Value *Reg = I.getArgOperand(0);
4670 Value *RegValue = I.getArgOperand(1);
4671 SDValue Chain = getValue(RegValue).getOperand(0);
4672 SDValue RegName = DAG.getMDNode(cast<MDNode>(Reg));
4673 DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
4674 RegName, getValue(RegValue)));
4677 case Intrinsic::setjmp:
4678 return &"_setjmp"[!TLI->usesUnderscoreSetJmp()];
4679 case Intrinsic::longjmp:
4680 return &"_longjmp"[!TLI->usesUnderscoreLongJmp()];
4681 case Intrinsic::memcpy: {
4682 // Assert for address < 256 since we support only user defined address
4684 assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
4686 cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace()
4688 "Unknown address space");
4689 SDValue Op1 = getValue(I.getArgOperand(0));
4690 SDValue Op2 = getValue(I.getArgOperand(1));
4691 SDValue Op3 = getValue(I.getArgOperand(2));
4692 unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
4694 Align = 1; // @llvm.memcpy defines 0 and 1 to both mean no alignment.
4695 bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
4696 DAG.setRoot(DAG.getMemcpy(getRoot(), sdl, Op1, Op2, Op3, Align, isVol, false,
4697 MachinePointerInfo(I.getArgOperand(0)),
4698 MachinePointerInfo(I.getArgOperand(1))));
4701 case Intrinsic::memset: {
4702 // Assert for address < 256 since we support only user defined address
4704 assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
4706 "Unknown address space");
4707 SDValue Op1 = getValue(I.getArgOperand(0));
4708 SDValue Op2 = getValue(I.getArgOperand(1));
4709 SDValue Op3 = getValue(I.getArgOperand(2));
4710 unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
4712 Align = 1; // @llvm.memset defines 0 and 1 to both mean no alignment.
4713 bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
4714 DAG.setRoot(DAG.getMemset(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
4715 MachinePointerInfo(I.getArgOperand(0))));
4718 case Intrinsic::memmove: {
4719 // Assert for address < 256 since we support only user defined address
4721 assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
4723 cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace()
4725 "Unknown address space");
4726 SDValue Op1 = getValue(I.getArgOperand(0));
4727 SDValue Op2 = getValue(I.getArgOperand(1));
4728 SDValue Op3 = getValue(I.getArgOperand(2));
4729 unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
4731 Align = 1; // @llvm.memmove defines 0 and 1 to both mean no alignment.
4732 bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
4733 DAG.setRoot(DAG.getMemmove(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
4734 MachinePointerInfo(I.getArgOperand(0)),
4735 MachinePointerInfo(I.getArgOperand(1))));
4738 case Intrinsic::dbg_declare: {
4739 const DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
4740 MDNode *Variable = DI.getVariable();
4741 const Value *Address = DI.getAddress();
4742 DIVariable DIVar(Variable);
4743 assert((!DIVar || DIVar.isVariable()) &&
4744 "Variable in DbgDeclareInst should be either null or a DIVariable.");
4745 if (!Address || !DIVar) {
4746 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4750 // Check if address has undef value.
4751 if (isa<UndefValue>(Address) ||
4752 (Address->use_empty() && !isa<Argument>(Address))) {
4753 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4757 SDValue &N = NodeMap[Address];
4758 if (!N.getNode() && isa<Argument>(Address))
4759 // Check unused arguments map.
4760 N = UnusedArgNodeMap[Address];
4763 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
4764 Address = BCI->getOperand(0);
4765 // Parameters are handled specially.
4767 (DIVariable(Variable).getTag() == dwarf::DW_TAG_arg_variable ||
4768 isa<Argument>(Address));
4770 const AllocaInst *AI = dyn_cast<AllocaInst>(Address);
4772 if (isParameter && !AI) {
4773 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
4775 // Byval parameter. We have a frame index at this point.
4776 SDV = DAG.getFrameIndexDbgValue(Variable, FINode->getIndex(),
4777 0, dl, SDNodeOrder);
4779 // Address is an argument, so try to emit its dbg value using
4780 // virtual register info from the FuncInfo.ValueMap.
4781 EmitFuncArgumentDbgValue(Address, Variable, 0, false, N);
4785 SDV = DAG.getDbgValue(Variable, N.getNode(), N.getResNo(),
4786 true, 0, dl, SDNodeOrder);
4788 // Can't do anything with other non-AI cases yet.
4789 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4790 DEBUG(dbgs() << "non-AllocaInst issue for Address: \n\t");
4791 DEBUG(Address->dump());
4794 DAG.AddDbgValue(SDV, N.getNode(), isParameter);
4796 // If Address is an argument then try to emit its dbg value using
4797 // virtual register info from the FuncInfo.ValueMap.
4798 if (!EmitFuncArgumentDbgValue(Address, Variable, 0, false, N)) {
4799 // If variable is pinned by a alloca in dominating bb then
4800 // use StaticAllocaMap.
4801 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Address)) {
4802 if (AI->getParent() != DI.getParent()) {
4803 DenseMap<const AllocaInst*, int>::iterator SI =
4804 FuncInfo.StaticAllocaMap.find(AI);
4805 if (SI != FuncInfo.StaticAllocaMap.end()) {
4806 SDV = DAG.getFrameIndexDbgValue(Variable, SI->second,
4807 0, dl, SDNodeOrder);
4808 DAG.AddDbgValue(SDV, nullptr, false);
4813 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4818 case Intrinsic::dbg_value: {
4819 const DbgValueInst &DI = cast<DbgValueInst>(I);
4820 DIVariable DIVar(DI.getVariable());
4821 assert((!DIVar || DIVar.isVariable()) &&
4822 "Variable in DbgValueInst should be either null or a DIVariable.");
4826 MDNode *Variable = DI.getVariable();
4827 uint64_t Offset = DI.getOffset();
4828 const Value *V = DI.getValue();
4833 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V)) {
4834 SDV = DAG.getConstantDbgValue(Variable, V, Offset, dl, SDNodeOrder);
4835 DAG.AddDbgValue(SDV, nullptr, false);
4837 // Do not use getValue() in here; we don't want to generate code at
4838 // this point if it hasn't been done yet.
4839 SDValue N = NodeMap[V];
4840 if (!N.getNode() && isa<Argument>(V))
4841 // Check unused arguments map.
4842 N = UnusedArgNodeMap[V];
4844 // A dbg.value for an alloca is always indirect.
4845 bool IsIndirect = isa<AllocaInst>(V) || Offset != 0;
4846 if (!EmitFuncArgumentDbgValue(V, Variable, Offset, IsIndirect, N)) {
4847 SDV = DAG.getDbgValue(Variable, N.getNode(),
4848 N.getResNo(), IsIndirect,
4849 Offset, dl, SDNodeOrder);
4850 DAG.AddDbgValue(SDV, N.getNode(), false);
4852 } else if (!V->use_empty() ) {
4853 // Do not call getValue(V) yet, as we don't want to generate code.
4854 // Remember it for later.
4855 DanglingDebugInfo DDI(&DI, dl, SDNodeOrder);
4856 DanglingDebugInfoMap[V] = DDI;
4858 // We may expand this to cover more cases. One case where we have no
4859 // data available is an unreferenced parameter.
4860 DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
4864 // Build a debug info table entry.
4865 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(V))
4866 V = BCI->getOperand(0);
4867 const AllocaInst *AI = dyn_cast<AllocaInst>(V);
4868 // Don't handle byval struct arguments or VLAs, for example.
4870 DEBUG(dbgs() << "Dropping debug location info for:\n " << DI << "\n");
4871 DEBUG(dbgs() << " Last seen at:\n " << *V << "\n");
4874 DenseMap<const AllocaInst*, int>::iterator SI =
4875 FuncInfo.StaticAllocaMap.find(AI);
4876 if (SI == FuncInfo.StaticAllocaMap.end())
4877 return nullptr; // VLAs.
4881 case Intrinsic::eh_typeid_for: {
4882 // Find the type id for the given typeinfo.
4883 GlobalVariable *GV = ExtractTypeInfo(I.getArgOperand(0));
4884 unsigned TypeID = DAG.getMachineFunction().getMMI().getTypeIDFor(GV);
4885 Res = DAG.getConstant(TypeID, MVT::i32);
4890 case Intrinsic::eh_return_i32:
4891 case Intrinsic::eh_return_i64:
4892 DAG.getMachineFunction().getMMI().setCallsEHReturn(true);
4893 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
4896 getValue(I.getArgOperand(0)),
4897 getValue(I.getArgOperand(1))));
4899 case Intrinsic::eh_unwind_init:
4900 DAG.getMachineFunction().getMMI().setCallsUnwindInit(true);
4902 case Intrinsic::eh_dwarf_cfa: {
4903 SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getArgOperand(0)), sdl,
4904 TLI->getPointerTy());
4905 SDValue Offset = DAG.getNode(ISD::ADD, sdl,
4906 CfaArg.getValueType(),
4907 DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, sdl,
4908 CfaArg.getValueType()),
4910 SDValue FA = DAG.getNode(ISD::FRAMEADDR, sdl,
4911 TLI->getPointerTy(),
4912 DAG.getConstant(0, TLI->getPointerTy()));
4913 setValue(&I, DAG.getNode(ISD::ADD, sdl, FA.getValueType(),
4917 case Intrinsic::eh_sjlj_callsite: {
4918 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
4919 ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
4920 assert(CI && "Non-constant call site value in eh.sjlj.callsite!");
4921 assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
4923 MMI.setCurrentCallSite(CI->getZExtValue());
4926 case Intrinsic::eh_sjlj_functioncontext: {
4927 // Get and store the index of the function context.
4928 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
4930 cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
4931 int FI = FuncInfo.StaticAllocaMap[FnCtx];
4932 MFI->setFunctionContextIndex(FI);
4935 case Intrinsic::eh_sjlj_setjmp: {
4938 Ops[1] = getValue(I.getArgOperand(0));
4939 SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
4940 DAG.getVTList(MVT::i32, MVT::Other), Ops);
4941 setValue(&I, Op.getValue(0));
4942 DAG.setRoot(Op.getValue(1));
4945 case Intrinsic::eh_sjlj_longjmp: {
4946 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
4947 getRoot(), getValue(I.getArgOperand(0))));
4951 case Intrinsic::x86_mmx_pslli_w:
4952 case Intrinsic::x86_mmx_pslli_d:
4953 case Intrinsic::x86_mmx_pslli_q:
4954 case Intrinsic::x86_mmx_psrli_w:
4955 case Intrinsic::x86_mmx_psrli_d:
4956 case Intrinsic::x86_mmx_psrli_q:
4957 case Intrinsic::x86_mmx_psrai_w:
4958 case Intrinsic::x86_mmx_psrai_d: {
4959 SDValue ShAmt = getValue(I.getArgOperand(1));
4960 if (isa<ConstantSDNode>(ShAmt)) {
4961 visitTargetIntrinsic(I, Intrinsic);
4964 unsigned NewIntrinsic = 0;
4965 EVT ShAmtVT = MVT::v2i32;
4966 switch (Intrinsic) {
4967 case Intrinsic::x86_mmx_pslli_w:
4968 NewIntrinsic = Intrinsic::x86_mmx_psll_w;
4970 case Intrinsic::x86_mmx_pslli_d:
4971 NewIntrinsic = Intrinsic::x86_mmx_psll_d;
4973 case Intrinsic::x86_mmx_pslli_q:
4974 NewIntrinsic = Intrinsic::x86_mmx_psll_q;
4976 case Intrinsic::x86_mmx_psrli_w:
4977 NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
4979 case Intrinsic::x86_mmx_psrli_d:
4980 NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
4982 case Intrinsic::x86_mmx_psrli_q:
4983 NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
4985 case Intrinsic::x86_mmx_psrai_w:
4986 NewIntrinsic = Intrinsic::x86_mmx_psra_w;
4988 case Intrinsic::x86_mmx_psrai_d:
4989 NewIntrinsic = Intrinsic::x86_mmx_psra_d;
4991 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
4994 // The vector shift intrinsics with scalars uses 32b shift amounts but
4995 // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits
4997 // We must do this early because v2i32 is not a legal type.
5000 ShOps[1] = DAG.getConstant(0, MVT::i32);
5001 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, sdl, ShAmtVT, ShOps);
5002 EVT DestVT = TLI->getValueType(I.getType());
5003 ShAmt = DAG.getNode(ISD::BITCAST, sdl, DestVT, ShAmt);
5004 Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, sdl, DestVT,
5005 DAG.getConstant(NewIntrinsic, MVT::i32),
5006 getValue(I.getArgOperand(0)), ShAmt);
5010 case Intrinsic::x86_avx_vinsertf128_pd_256:
5011 case Intrinsic::x86_avx_vinsertf128_ps_256:
5012 case Intrinsic::x86_avx_vinsertf128_si_256:
5013 case Intrinsic::x86_avx2_vinserti128: {
5014 EVT DestVT = TLI->getValueType(I.getType());
5015 EVT ElVT = TLI->getValueType(I.getArgOperand(1)->getType());
5016 uint64_t Idx = (cast<ConstantInt>(I.getArgOperand(2))->getZExtValue() & 1) *
5017 ElVT.getVectorNumElements();
5018 Res = DAG.getNode(ISD::INSERT_SUBVECTOR, sdl, DestVT,
5019 getValue(I.getArgOperand(0)),
5020 getValue(I.getArgOperand(1)),
5021 DAG.getConstant(Idx, TLI->getVectorIdxTy()));
5025 case Intrinsic::x86_avx_vextractf128_pd_256:
5026 case Intrinsic::x86_avx_vextractf128_ps_256:
5027 case Intrinsic::x86_avx_vextractf128_si_256:
5028 case Intrinsic::x86_avx2_vextracti128: {
5029 EVT DestVT = TLI->getValueType(I.getType());
5030 uint64_t Idx = (cast<ConstantInt>(I.getArgOperand(1))->getZExtValue() & 1) *
5031 DestVT.getVectorNumElements();
5032 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, sdl, DestVT,
5033 getValue(I.getArgOperand(0)),
5034 DAG.getConstant(Idx, TLI->getVectorIdxTy()));
5038 case Intrinsic::convertff:
5039 case Intrinsic::convertfsi:
5040 case Intrinsic::convertfui:
5041 case Intrinsic::convertsif:
5042 case Intrinsic::convertuif:
5043 case Intrinsic::convertss:
5044 case Intrinsic::convertsu:
5045 case Intrinsic::convertus:
5046 case Intrinsic::convertuu: {
5047 ISD::CvtCode Code = ISD::CVT_INVALID;
5048 switch (Intrinsic) {
5049 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
5050 case Intrinsic::convertff: Code = ISD::CVT_FF; break;
5051 case Intrinsic::convertfsi: Code = ISD::CVT_FS; break;
5052 case Intrinsic::convertfui: Code = ISD::CVT_FU; break;
5053 case Intrinsic::convertsif: Code = ISD::CVT_SF; break;
5054 case Intrinsic::convertuif: Code = ISD::CVT_UF; break;
5055 case Intrinsic::convertss: Code = ISD::CVT_SS; break;
5056 case Intrinsic::convertsu: Code = ISD::CVT_SU; break;
5057 case Intrinsic::convertus: Code = ISD::CVT_US; break;
5058 case Intrinsic::convertuu: Code = ISD::CVT_UU; break;
5060 EVT DestVT = TLI->getValueType(I.getType());
5061 const Value *Op1 = I.getArgOperand(0);
5062 Res = DAG.getConvertRndSat(DestVT, sdl, getValue(Op1),
5063 DAG.getValueType(DestVT),
5064 DAG.getValueType(getValue(Op1).getValueType()),
5065 getValue(I.getArgOperand(1)),
5066 getValue(I.getArgOperand(2)),
5071 case Intrinsic::powi:
5072 setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
5073 getValue(I.getArgOperand(1)), DAG));
5075 case Intrinsic::log:
5076 setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
5078 case Intrinsic::log2:
5079 setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
5081 case Intrinsic::log10:
5082 setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
5084 case Intrinsic::exp:
5085 setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
5087 case Intrinsic::exp2:
5088 setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, *TLI));
5090 case Intrinsic::pow:
5091 setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
5092 getValue(I.getArgOperand(1)), DAG, *TLI));
5094 case Intrinsic::sqrt:
5095 case Intrinsic::fabs:
5096 case Intrinsic::sin:
5097 case Intrinsic::cos:
5098 case Intrinsic::floor:
5099 case Intrinsic::ceil:
5100 case Intrinsic::trunc:
5101 case Intrinsic::rint:
5102 case Intrinsic::nearbyint:
5103 case Intrinsic::round: {
5105 switch (Intrinsic) {
5106 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
5107 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break;
5108 case Intrinsic::fabs: Opcode = ISD::FABS; break;
5109 case Intrinsic::sin: Opcode = ISD::FSIN; break;
5110 case Intrinsic::cos: Opcode = ISD::FCOS; break;
5111 case Intrinsic::floor: Opcode = ISD::FFLOOR; break;
5112 case Intrinsic::ceil: Opcode = ISD::FCEIL; break;
5113 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break;
5114 case Intrinsic::rint: Opcode = ISD::FRINT; break;
5115 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
5116 case Intrinsic::round: Opcode = ISD::FROUND; break;
5119 setValue(&I, DAG.getNode(Opcode, sdl,
5120 getValue(I.getArgOperand(0)).getValueType(),
5121 getValue(I.getArgOperand(0))));
5124 case Intrinsic::copysign:
5125 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
5126 getValue(I.getArgOperand(0)).getValueType(),
5127 getValue(I.getArgOperand(0)),
5128 getValue(I.getArgOperand(1))));
5130 case Intrinsic::fma:
5131 setValue(&I, DAG.getNode(ISD::FMA, sdl,
5132 getValue(I.getArgOperand(0)).getValueType(),
5133 getValue(I.getArgOperand(0)),
5134 getValue(I.getArgOperand(1)),
5135 getValue(I.getArgOperand(2))));
5137 case Intrinsic::fmuladd: {
5138 EVT VT = TLI->getValueType(I.getType());
5139 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
5140 TLI->isFMAFasterThanFMulAndFAdd(VT)) {
5141 setValue(&I, DAG.getNode(ISD::FMA, sdl,
5142 getValue(I.getArgOperand(0)).getValueType(),
5143 getValue(I.getArgOperand(0)),
5144 getValue(I.getArgOperand(1)),
5145 getValue(I.getArgOperand(2))));
5147 SDValue Mul = DAG.getNode(ISD::FMUL, sdl,
5148 getValue(I.getArgOperand(0)).getValueType(),
5149 getValue(I.getArgOperand(0)),
5150 getValue(I.getArgOperand(1)));
5151 SDValue Add = DAG.getNode(ISD::FADD, sdl,
5152 getValue(I.getArgOperand(0)).getValueType(),
5154 getValue(I.getArgOperand(2)));
5159 case Intrinsic::convert_to_fp16:
5160 setValue(&I, DAG.getNode(ISD::FP32_TO_FP16, sdl,
5161 MVT::i16, getValue(I.getArgOperand(0))));
5163 case Intrinsic::convert_from_fp16:
5164 setValue(&I, DAG.getNode(ISD::FP16_TO_FP32, sdl,
5165 MVT::f32, getValue(I.getArgOperand(0))));
5167 case Intrinsic::pcmarker: {
5168 SDValue Tmp = getValue(I.getArgOperand(0));
5169 DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
5172 case Intrinsic::readcyclecounter: {
5173 SDValue Op = getRoot();
5174 Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
5175 DAG.getVTList(MVT::i64, MVT::Other), Op);
5177 DAG.setRoot(Res.getValue(1));
5180 case Intrinsic::bswap:
5181 setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
5182 getValue(I.getArgOperand(0)).getValueType(),
5183 getValue(I.getArgOperand(0))));
5185 case Intrinsic::cttz: {
5186 SDValue Arg = getValue(I.getArgOperand(0));
5187 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
5188 EVT Ty = Arg.getValueType();
5189 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
5193 case Intrinsic::ctlz: {
5194 SDValue Arg = getValue(I.getArgOperand(0));
5195 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
5196 EVT Ty = Arg.getValueType();
5197 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
5201 case Intrinsic::ctpop: {
5202 SDValue Arg = getValue(I.getArgOperand(0));
5203 EVT Ty = Arg.getValueType();
5204 setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
5207 case Intrinsic::stacksave: {
5208 SDValue Op = getRoot();
5209 Res = DAG.getNode(ISD::STACKSAVE, sdl,
5210 DAG.getVTList(TLI->getPointerTy(), MVT::Other), Op);
5212 DAG.setRoot(Res.getValue(1));
5215 case Intrinsic::stackrestore: {
5216 Res = getValue(I.getArgOperand(0));
5217 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
5220 case Intrinsic::stackprotector: {
5221 // Emit code into the DAG to store the stack guard onto the stack.
5222 MachineFunction &MF = DAG.getMachineFunction();
5223 MachineFrameInfo *MFI = MF.getFrameInfo();
5224 EVT PtrTy = TLI->getPointerTy();
5226 SDValue Src = getValue(I.getArgOperand(0)); // The guard's value.
5227 AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
5229 int FI = FuncInfo.StaticAllocaMap[Slot];
5230 MFI->setStackProtectorIndex(FI);
5232 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
5234 // Store the stack protector onto the stack.
5235 Res = DAG.getStore(getRoot(), sdl, Src, FIN,
5236 MachinePointerInfo::getFixedStack(FI),
5242 case Intrinsic::objectsize: {
5243 // If we don't know by now, we're never going to know.
5244 ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
5246 assert(CI && "Non-constant type in __builtin_object_size?");
5248 SDValue Arg = getValue(I.getCalledValue());
5249 EVT Ty = Arg.getValueType();
5252 Res = DAG.getConstant(-1ULL, Ty);
5254 Res = DAG.getConstant(0, Ty);
5259 case Intrinsic::annotation:
5260 case Intrinsic::ptr_annotation:
5261 // Drop the intrinsic, but forward the value
5262 setValue(&I, getValue(I.getOperand(0)));
5264 case Intrinsic::var_annotation:
5265 // Discard annotate attributes
5268 case Intrinsic::init_trampoline: {
5269 const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
5273 Ops[1] = getValue(I.getArgOperand(0));
5274 Ops[2] = getValue(I.getArgOperand(1));
5275 Ops[3] = getValue(I.getArgOperand(2));
5276 Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
5277 Ops[5] = DAG.getSrcValue(F);
5279 Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
5284 case Intrinsic::adjust_trampoline: {
5285 setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
5286 TLI->getPointerTy(),
5287 getValue(I.getArgOperand(0))));
5290 case Intrinsic::gcroot:
5292 const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
5293 const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
5295 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
5296 GFI->addStackRoot(FI->getIndex(), TypeMap);
5299 case Intrinsic::gcread:
5300 case Intrinsic::gcwrite:
5301 llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
5302 case Intrinsic::flt_rounds:
5303 setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, sdl, MVT::i32));
5306 case Intrinsic::expect: {
5307 // Just replace __builtin_expect(exp, c) with EXP.
5308 setValue(&I, getValue(I.getArgOperand(0)));
5312 case Intrinsic::debugtrap:
5313 case Intrinsic::trap: {
5314 StringRef TrapFuncName = TM.Options.getTrapFunctionName();
5315 if (TrapFuncName.empty()) {
5316 ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ?
5317 ISD::TRAP : ISD::DEBUGTRAP;
5318 DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot()));
5321 TargetLowering::ArgListTy Args;
5323 TargetLowering::CallLoweringInfo CLI(DAG);
5324 CLI.setDebugLoc(sdl).setChain(getRoot())
5325 .setCallee(CallingConv::C, I.getType(),
5326 DAG.getExternalSymbol(TrapFuncName.data(), TLI->getPointerTy()),
5329 std::pair<SDValue, SDValue> Result = TLI->LowerCallTo(CLI);
5330 DAG.setRoot(Result.second);
5334 case Intrinsic::uadd_with_overflow:
5335 case Intrinsic::sadd_with_overflow:
5336 case Intrinsic::usub_with_overflow:
5337 case Intrinsic::ssub_with_overflow:
5338 case Intrinsic::umul_with_overflow:
5339 case Intrinsic::smul_with_overflow: {
5341 switch (Intrinsic) {
5342 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
5343 case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
5344 case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
5345 case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
5346 case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
5347 case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
5348 case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
5350 SDValue Op1 = getValue(I.getArgOperand(0));
5351 SDValue Op2 = getValue(I.getArgOperand(1));
5353 SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
5354 setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
5357 case Intrinsic::prefetch: {
5359 unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
5361 Ops[1] = getValue(I.getArgOperand(0));
5362 Ops[2] = getValue(I.getArgOperand(1));
5363 Ops[3] = getValue(I.getArgOperand(2));
5364 Ops[4] = getValue(I.getArgOperand(3));
5365 DAG.setRoot(DAG.getMemIntrinsicNode(ISD::PREFETCH, sdl,
5366 DAG.getVTList(MVT::Other), Ops,
5367 EVT::getIntegerVT(*Context, 8),
5368 MachinePointerInfo(I.getArgOperand(0)),
5370 false, /* volatile */
5372 rw==1)); /* write */
5375 case Intrinsic::lifetime_start:
5376 case Intrinsic::lifetime_end: {
5377 bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
5378 // Stack coloring is not enabled in O0, discard region information.
5379 if (TM.getOptLevel() == CodeGenOpt::None)
5382 SmallVector<Value *, 4> Allocas;
5383 GetUnderlyingObjects(I.getArgOperand(1), Allocas, DL);
5385 for (SmallVectorImpl<Value*>::iterator Object = Allocas.begin(),
5386 E = Allocas.end(); Object != E; ++Object) {
5387 AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object);
5389 // Could not find an Alloca.
5390 if (!LifetimeObject)
5393 int FI = FuncInfo.StaticAllocaMap[LifetimeObject];
5397 Ops[1] = DAG.getFrameIndex(FI, TLI->getPointerTy(), true);
5398 unsigned Opcode = (IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END);
5400 Res = DAG.getNode(Opcode, sdl, MVT::Other, Ops);
5405 case Intrinsic::invariant_start:
5406 // Discard region information.
5407 setValue(&I, DAG.getUNDEF(TLI->getPointerTy()));
5409 case Intrinsic::invariant_end:
5410 // Discard region information.
5412 case Intrinsic::stackprotectorcheck: {
5413 // Do not actually emit anything for this basic block. Instead we initialize
5414 // the stack protector descriptor and export the guard variable so we can
5415 // access it in FinishBasicBlock.
5416 const BasicBlock *BB = I.getParent();
5417 SPDescriptor.initialize(BB, FuncInfo.MBBMap[BB], I);
5418 ExportFromCurrentBlock(SPDescriptor.getGuard());
5420 // Flush our exports since we are going to process a terminator.
5421 (void)getControlRoot();
5424 case Intrinsic::clear_cache:
5425 return TLI->getClearCacheBuiltinName();
5426 case Intrinsic::donothing:
5429 case Intrinsic::experimental_stackmap: {
5433 case Intrinsic::experimental_patchpoint_void:
5434 case Intrinsic::experimental_patchpoint_i64: {
5441 void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
5443 MachineBasicBlock *LandingPad) {
5444 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
5445 FunctionType *FTy = cast<FunctionType>(PT->getElementType());
5446 Type *RetTy = FTy->getReturnType();
5447 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
5448 MCSymbol *BeginLabel = nullptr;
5450 TargetLowering::ArgListTy Args;
5451 TargetLowering::ArgListEntry Entry;
5452 Args.reserve(CS.arg_size());
5454 // Check whether the function can return without sret-demotion.
5455 SmallVector<ISD::OutputArg, 4> Outs;
5456 const TargetLowering *TLI = TM.getTargetLowering();
5457 GetReturnInfo(RetTy, CS.getAttributes(), Outs, *TLI);
5459 bool CanLowerReturn = TLI->CanLowerReturn(CS.getCallingConv(),
5460 DAG.getMachineFunction(),
5461 FTy->isVarArg(), Outs,
5464 SDValue DemoteStackSlot;
5465 int DemoteStackIdx = -100;
5467 if (!CanLowerReturn) {
5468 assert(!CS.hasInAllocaArgument() &&
5469 "sret demotion is incompatible with inalloca");
5470 uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(
5471 FTy->getReturnType());
5472 unsigned Align = TLI->getDataLayout()->getPrefTypeAlignment(
5473 FTy->getReturnType());
5474 MachineFunction &MF = DAG.getMachineFunction();
5475 DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
5476 Type *StackSlotPtrType = PointerType::getUnqual(FTy->getReturnType());
5478 DemoteStackSlot = DAG.getFrameIndex(DemoteStackIdx, TLI->getPointerTy());
5479 Entry.Node = DemoteStackSlot;
5480 Entry.Ty = StackSlotPtrType;
5481 Entry.isSExt = false;
5482 Entry.isZExt = false;
5483 Entry.isInReg = false;
5484 Entry.isSRet = true;
5485 Entry.isNest = false;
5486 Entry.isByVal = false;
5487 Entry.isReturned = false;
5488 Entry.Alignment = Align;
5489 Args.push_back(Entry);
5490 RetTy = Type::getVoidTy(FTy->getContext());
5493 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
5495 const Value *V = *i;
5498 if (V->getType()->isEmptyTy())
5501 SDValue ArgNode = getValue(V);
5502 Entry.Node = ArgNode; Entry.Ty = V->getType();
5504 // Skip the first return-type Attribute to get to params.
5505 Entry.setAttributes(&CS, i - CS.arg_begin() + 1);
5506 Args.push_back(Entry);
5510 // Insert a label before the invoke call to mark the try range. This can be
5511 // used to detect deletion of the invoke via the MachineModuleInfo.
5512 BeginLabel = MMI.getContext().CreateTempSymbol();
5514 // For SjLj, keep track of which landing pads go with which invokes
5515 // so as to maintain the ordering of pads in the LSDA.
5516 unsigned CallSiteIndex = MMI.getCurrentCallSite();
5517 if (CallSiteIndex) {
5518 MMI.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
5519 LPadToCallSiteMap[LandingPad].push_back(CallSiteIndex);
5521 // Now that the call site is handled, stop tracking it.
5522 MMI.setCurrentCallSite(0);
5525 // Both PendingLoads and PendingExports must be flushed here;
5526 // this call might not return.
5528 DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel));
5531 // Check if target-independent constraints permit a tail call here.
5532 // Target-dependent constraints are checked within TLI->LowerCallTo.
5533 if (isTailCall && !isInTailCallPosition(CS, *TLI))
5536 TargetLowering::CallLoweringInfo CLI(DAG);
5537 CLI.setDebugLoc(getCurSDLoc()).setChain(getRoot())
5538 .setCallee(RetTy, FTy, Callee, &Args, CS).setTailCall(isTailCall);
5540 std::pair<SDValue,SDValue> Result = TLI->LowerCallTo(CLI);
5541 assert((isTailCall || Result.second.getNode()) &&
5542 "Non-null chain expected with non-tail call!");
5543 assert((Result.second.getNode() || !Result.first.getNode()) &&
5544 "Null value expected with tail call!");
5545 if (Result.first.getNode()) {
5546 setValue(CS.getInstruction(), Result.first);
5547 } else if (!CanLowerReturn && Result.second.getNode()) {
5548 // The instruction result is the result of loading from the
5549 // hidden sret parameter.
5550 SmallVector<EVT, 1> PVTs;
5551 Type *PtrRetTy = PointerType::getUnqual(FTy->getReturnType());
5553 ComputeValueVTs(*TLI, PtrRetTy, PVTs);
5554 assert(PVTs.size() == 1 && "Pointers should fit in one register");
5555 EVT PtrVT = PVTs[0];
5557 SmallVector<EVT, 4> RetTys;
5558 SmallVector<uint64_t, 4> Offsets;
5559 RetTy = FTy->getReturnType();
5560 ComputeValueVTs(*TLI, RetTy, RetTys, &Offsets);
5562 unsigned NumValues = RetTys.size();
5563 SmallVector<SDValue, 4> Values(NumValues);
5564 SmallVector<SDValue, 4> Chains(NumValues);
5566 for (unsigned i = 0; i < NumValues; ++i) {
5567 SDValue Add = DAG.getNode(ISD::ADD, getCurSDLoc(), PtrVT,
5569 DAG.getConstant(Offsets[i], PtrVT));
5570 SDValue L = DAG.getLoad(RetTys[i], getCurSDLoc(), Result.second, Add,
5571 MachinePointerInfo::getFixedStack(DemoteStackIdx, Offsets[i]),
5572 false, false, false, 1);
5574 Chains[i] = L.getValue(1);
5577 SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
5578 MVT::Other, Chains);
5579 PendingLoads.push_back(Chain);
5581 setValue(CS.getInstruction(),
5582 DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
5583 DAG.getVTList(RetTys), Values));
5586 if (!Result.second.getNode()) {
5587 // As a special case, a null chain means that a tail call has been emitted
5588 // and the DAG root is already updated.
5591 // Since there's no actual continuation from this block, nothing can be
5592 // relying on us setting vregs for them.
5593 PendingExports.clear();
5595 DAG.setRoot(Result.second);
5599 // Insert a label at the end of the invoke call to mark the try range. This
5600 // can be used to detect deletion of the invoke via the MachineModuleInfo.
5601 MCSymbol *EndLabel = MMI.getContext().CreateTempSymbol();
5602 DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel));
5604 // Inform MachineModuleInfo of range.
5605 MMI.addInvoke(LandingPad, BeginLabel, EndLabel);
5609 /// IsOnlyUsedInZeroEqualityComparison - Return true if it only matters that the
5610 /// value is equal or not-equal to zero.
5611 static bool IsOnlyUsedInZeroEqualityComparison(const Value *V) {
5612 for (const User *U : V->users()) {
5613 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
5614 if (IC->isEquality())
5615 if (const Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
5616 if (C->isNullValue())
5618 // Unknown instruction.
5624 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
5626 SelectionDAGBuilder &Builder) {
5628 // Check to see if this load can be trivially constant folded, e.g. if the
5629 // input is from a string literal.
5630 if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
5631 // Cast pointer to the type we really want to load.
5632 LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
5633 PointerType::getUnqual(LoadTy));
5635 if (const Constant *LoadCst =
5636 ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput),
5638 return Builder.getValue(LoadCst);
5641 // Otherwise, we have to emit the load. If the pointer is to unfoldable but
5642 // still constant memory, the input chain can be the entry node.
5644 bool ConstantMemory = false;
5646 // Do not serialize (non-volatile) loads of constant memory with anything.
5647 if (Builder.AA->pointsToConstantMemory(PtrVal)) {
5648 Root = Builder.DAG.getEntryNode();
5649 ConstantMemory = true;
5651 // Do not serialize non-volatile loads against each other.
5652 Root = Builder.DAG.getRoot();
5655 SDValue Ptr = Builder.getValue(PtrVal);
5656 SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root,
5657 Ptr, MachinePointerInfo(PtrVal),
5659 false /*nontemporal*/,
5660 false /*isinvariant*/, 1 /* align=1 */);
5662 if (!ConstantMemory)
5663 Builder.PendingLoads.push_back(LoadVal.getValue(1));
5667 /// processIntegerCallValue - Record the value for an instruction that
5668 /// produces an integer result, converting the type where necessary.
5669 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
5672 EVT VT = TM.getTargetLowering()->getValueType(I.getType(), true);
5674 Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT);
5676 Value = DAG.getZExtOrTrunc(Value, getCurSDLoc(), VT);
5677 setValue(&I, Value);
5680 /// visitMemCmpCall - See if we can lower a call to memcmp in an optimized form.
5681 /// If so, return true and lower it, otherwise return false and it will be
5682 /// lowered like a normal call.
5683 bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
5684 // Verify that the prototype makes sense. int memcmp(void*,void*,size_t)
5685 if (I.getNumArgOperands() != 3)
5688 const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
5689 if (!LHS->getType()->isPointerTy() || !RHS->getType()->isPointerTy() ||
5690 !I.getArgOperand(2)->getType()->isIntegerTy() ||
5691 !I.getType()->isIntegerTy())
5694 const Value *Size = I.getArgOperand(2);
5695 const ConstantInt *CSize = dyn_cast<ConstantInt>(Size);
5696 if (CSize && CSize->getZExtValue() == 0) {
5697 EVT CallVT = TM.getTargetLowering()->getValueType(I.getType(), true);
5698 setValue(&I, DAG.getConstant(0, CallVT));
5702 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
5703 std::pair<SDValue, SDValue> Res =
5704 TSI.EmitTargetCodeForMemcmp(DAG, getCurSDLoc(), DAG.getRoot(),
5705 getValue(LHS), getValue(RHS), getValue(Size),
5706 MachinePointerInfo(LHS),
5707 MachinePointerInfo(RHS));
5708 if (Res.first.getNode()) {
5709 processIntegerCallValue(I, Res.first, true);
5710 PendingLoads.push_back(Res.second);
5714 // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0
5715 // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0
5716 if (CSize && IsOnlyUsedInZeroEqualityComparison(&I)) {
5717 bool ActuallyDoIt = true;
5720 switch (CSize->getZExtValue()) {
5722 LoadVT = MVT::Other;
5724 ActuallyDoIt = false;
5728 LoadTy = Type::getInt16Ty(CSize->getContext());
5732 LoadTy = Type::getInt32Ty(CSize->getContext());
5736 LoadTy = Type::getInt64Ty(CSize->getContext());
5740 LoadVT = MVT::v4i32;
5741 LoadTy = Type::getInt32Ty(CSize->getContext());
5742 LoadTy = VectorType::get(LoadTy, 4);
5747 // This turns into unaligned loads. We only do this if the target natively
5748 // supports the MVT we'll be loading or if it is small enough (<= 4) that
5749 // we'll only produce a small number of byte loads.
5751 // Require that we can find a legal MVT, and only do this if the target
5752 // supports unaligned loads of that type. Expanding into byte loads would
5754 const TargetLowering *TLI = TM.getTargetLowering();
5755 if (ActuallyDoIt && CSize->getZExtValue() > 4) {
5756 unsigned DstAS = LHS->getType()->getPointerAddressSpace();
5757 unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
5758 // TODO: Handle 5 byte compare as 4-byte + 1 byte.
5759 // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
5760 if (!TLI->isTypeLegal(LoadVT) ||
5761 !TLI->allowsUnalignedMemoryAccesses(LoadVT, SrcAS) ||
5762 !TLI->allowsUnalignedMemoryAccesses(LoadVT, DstAS))
5763 ActuallyDoIt = false;
5767 SDValue LHSVal = getMemCmpLoad(LHS, LoadVT, LoadTy, *this);
5768 SDValue RHSVal = getMemCmpLoad(RHS, LoadVT, LoadTy, *this);
5770 SDValue Res = DAG.getSetCC(getCurSDLoc(), MVT::i1, LHSVal, RHSVal,
5772 processIntegerCallValue(I, Res, false);
5781 /// visitMemChrCall -- See if we can lower a memchr call into an optimized
5782 /// form. If so, return true and lower it, otherwise return false and it
5783 /// will be lowered like a normal call.
5784 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
5785 // Verify that the prototype makes sense. void *memchr(void *, int, size_t)
5786 if (I.getNumArgOperands() != 3)
5789 const Value *Src = I.getArgOperand(0);
5790 const Value *Char = I.getArgOperand(1);
5791 const Value *Length = I.getArgOperand(2);
5792 if (!Src->getType()->isPointerTy() ||
5793 !Char->getType()->isIntegerTy() ||
5794 !Length->getType()->isIntegerTy() ||
5795 !I.getType()->isPointerTy())
5798 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
5799 std::pair<SDValue, SDValue> Res =
5800 TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
5801 getValue(Src), getValue(Char), getValue(Length),
5802 MachinePointerInfo(Src));
5803 if (Res.first.getNode()) {
5804 setValue(&I, Res.first);
5805 PendingLoads.push_back(Res.second);
5812 /// visitStrCpyCall -- See if we can lower a strcpy or stpcpy call into an
5813 /// optimized form. If so, return true and lower it, otherwise return false
5814 /// and it will be lowered like a normal call.
5815 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
5816 // Verify that the prototype makes sense. char *strcpy(char *, char *)
5817 if (I.getNumArgOperands() != 2)
5820 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
5821 if (!Arg0->getType()->isPointerTy() ||
5822 !Arg1->getType()->isPointerTy() ||
5823 !I.getType()->isPointerTy())
5826 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
5827 std::pair<SDValue, SDValue> Res =
5828 TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
5829 getValue(Arg0), getValue(Arg1),
5830 MachinePointerInfo(Arg0),
5831 MachinePointerInfo(Arg1), isStpcpy);
5832 if (Res.first.getNode()) {
5833 setValue(&I, Res.first);
5834 DAG.setRoot(Res.second);
5841 /// visitStrCmpCall - See if we can lower a call to strcmp in an optimized form.
5842 /// If so, return true and lower it, otherwise return false and it will be
5843 /// lowered like a normal call.
5844 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
5845 // Verify that the prototype makes sense. int strcmp(void*,void*)
5846 if (I.getNumArgOperands() != 2)
5849 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
5850 if (!Arg0->getType()->isPointerTy() ||
5851 !Arg1->getType()->isPointerTy() ||
5852 !I.getType()->isIntegerTy())
5855 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
5856 std::pair<SDValue, SDValue> Res =
5857 TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
5858 getValue(Arg0), getValue(Arg1),
5859 MachinePointerInfo(Arg0),
5860 MachinePointerInfo(Arg1));
5861 if (Res.first.getNode()) {
5862 processIntegerCallValue(I, Res.first, true);
5863 PendingLoads.push_back(Res.second);
5870 /// visitStrLenCall -- See if we can lower a strlen call into an optimized
5871 /// form. If so, return true and lower it, otherwise return false and it
5872 /// will be lowered like a normal call.
5873 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
5874 // Verify that the prototype makes sense. size_t strlen(char *)
5875 if (I.getNumArgOperands() != 1)
5878 const Value *Arg0 = I.getArgOperand(0);
5879 if (!Arg0->getType()->isPointerTy() || !I.getType()->isIntegerTy())
5882 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
5883 std::pair<SDValue, SDValue> Res =
5884 TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
5885 getValue(Arg0), MachinePointerInfo(Arg0));
5886 if (Res.first.getNode()) {
5887 processIntegerCallValue(I, Res.first, false);
5888 PendingLoads.push_back(Res.second);
5895 /// visitStrNLenCall -- See if we can lower a strnlen call into an optimized
5896 /// form. If so, return true and lower it, otherwise return false and it
5897 /// will be lowered like a normal call.
5898 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
5899 // Verify that the prototype makes sense. size_t strnlen(char *, size_t)
5900 if (I.getNumArgOperands() != 2)
5903 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
5904 if (!Arg0->getType()->isPointerTy() ||
5905 !Arg1->getType()->isIntegerTy() ||
5906 !I.getType()->isIntegerTy())
5909 const TargetSelectionDAGInfo &TSI = DAG.getSelectionDAGInfo();
5910 std::pair<SDValue, SDValue> Res =
5911 TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
5912 getValue(Arg0), getValue(Arg1),
5913 MachinePointerInfo(Arg0));
5914 if (Res.first.getNode()) {
5915 processIntegerCallValue(I, Res.first, false);
5916 PendingLoads.push_back(Res.second);
5923 /// visitUnaryFloatCall - If a call instruction is a unary floating-point
5924 /// operation (as expected), translate it to an SDNode with the specified opcode
5925 /// and return true.
5926 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
5928 // Sanity check that it really is a unary floating-point call.
5929 if (I.getNumArgOperands() != 1 ||
5930 !I.getArgOperand(0)->getType()->isFloatingPointTy() ||
5931 I.getType() != I.getArgOperand(0)->getType() ||
5932 !I.onlyReadsMemory())
5935 SDValue Tmp = getValue(I.getArgOperand(0));
5936 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp));
5940 void SelectionDAGBuilder::visitCall(const CallInst &I) {
5941 // Handle inline assembly differently.
5942 if (isa<InlineAsm>(I.getCalledValue())) {
5947 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
5948 ComputeUsesVAFloatArgument(I, &MMI);
5950 const char *RenameFn = nullptr;
5951 if (Function *F = I.getCalledFunction()) {
5952 if (F->isDeclaration()) {
5953 if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo()) {
5954 if (unsigned IID = II->getIntrinsicID(F)) {
5955 RenameFn = visitIntrinsicCall(I, IID);
5960 if (unsigned IID = F->getIntrinsicID()) {
5961 RenameFn = visitIntrinsicCall(I, IID);
5967 // Check for well-known libc/libm calls. If the function is internal, it
5968 // can't be a library call.
5970 if (!F->hasLocalLinkage() && F->hasName() &&
5971 LibInfo->getLibFunc(F->getName(), Func) &&
5972 LibInfo->hasOptimizedCodeGen(Func)) {
5975 case LibFunc::copysign:
5976 case LibFunc::copysignf:
5977 case LibFunc::copysignl:
5978 if (I.getNumArgOperands() == 2 && // Basic sanity checks.
5979 I.getArgOperand(0)->getType()->isFloatingPointTy() &&
5980 I.getType() == I.getArgOperand(0)->getType() &&
5981 I.getType() == I.getArgOperand(1)->getType() &&
5982 I.onlyReadsMemory()) {
5983 SDValue LHS = getValue(I.getArgOperand(0));
5984 SDValue RHS = getValue(I.getArgOperand(1));
5985 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
5986 LHS.getValueType(), LHS, RHS));
5991 case LibFunc::fabsf:
5992 case LibFunc::fabsl:
5993 if (visitUnaryFloatCall(I, ISD::FABS))
5999 if (visitUnaryFloatCall(I, ISD::FSIN))
6005 if (visitUnaryFloatCall(I, ISD::FCOS))
6009 case LibFunc::sqrtf:
6010 case LibFunc::sqrtl:
6011 case LibFunc::sqrt_finite:
6012 case LibFunc::sqrtf_finite:
6013 case LibFunc::sqrtl_finite:
6014 if (visitUnaryFloatCall(I, ISD::FSQRT))
6017 case LibFunc::floor:
6018 case LibFunc::floorf:
6019 case LibFunc::floorl:
6020 if (visitUnaryFloatCall(I, ISD::FFLOOR))
6023 case LibFunc::nearbyint:
6024 case LibFunc::nearbyintf:
6025 case LibFunc::nearbyintl:
6026 if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
6030 case LibFunc::ceilf:
6031 case LibFunc::ceill:
6032 if (visitUnaryFloatCall(I, ISD::FCEIL))
6036 case LibFunc::rintf:
6037 case LibFunc::rintl:
6038 if (visitUnaryFloatCall(I, ISD::FRINT))
6041 case LibFunc::round:
6042 case LibFunc::roundf:
6043 case LibFunc::roundl:
6044 if (visitUnaryFloatCall(I, ISD::FROUND))
6047 case LibFunc::trunc:
6048 case LibFunc::truncf:
6049 case LibFunc::truncl:
6050 if (visitUnaryFloatCall(I, ISD::FTRUNC))
6054 case LibFunc::log2f:
6055 case LibFunc::log2l:
6056 if (visitUnaryFloatCall(I, ISD::FLOG2))
6060 case LibFunc::exp2f:
6061 case LibFunc::exp2l:
6062 if (visitUnaryFloatCall(I, ISD::FEXP2))
6065 case LibFunc::memcmp:
6066 if (visitMemCmpCall(I))
6069 case LibFunc::memchr:
6070 if (visitMemChrCall(I))
6073 case LibFunc::strcpy:
6074 if (visitStrCpyCall(I, false))
6077 case LibFunc::stpcpy:
6078 if (visitStrCpyCall(I, true))
6081 case LibFunc::strcmp:
6082 if (visitStrCmpCall(I))
6085 case LibFunc::strlen:
6086 if (visitStrLenCall(I))
6089 case LibFunc::strnlen:
6090 if (visitStrNLenCall(I))
6099 Callee = getValue(I.getCalledValue());
6101 Callee = DAG.getExternalSymbol(RenameFn,
6102 TM.getTargetLowering()->getPointerTy());
6104 // Check if we can potentially perform a tail call. More detailed checking is
6105 // be done within LowerCallTo, after more information about the call is known.
6106 LowerCallTo(&I, Callee, I.isTailCall());
6111 /// AsmOperandInfo - This contains information for each constraint that we are
6113 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
6115 /// CallOperand - If this is the result output operand or a clobber
6116 /// this is null, otherwise it is the incoming operand to the CallInst.
6117 /// This gets modified as the asm is processed.
6118 SDValue CallOperand;
6120 /// AssignedRegs - If this is a register or register class operand, this
6121 /// contains the set of register corresponding to the operand.
6122 RegsForValue AssignedRegs;
6124 explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
6125 : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr,0) {
6128 /// getCallOperandValEVT - Return the EVT of the Value* that this operand
6129 /// corresponds to. If there is no Value* for this operand, it returns
6131 EVT getCallOperandValEVT(LLVMContext &Context,
6132 const TargetLowering &TLI,
6133 const DataLayout *DL) const {
6134 if (!CallOperandVal) return MVT::Other;
6136 if (isa<BasicBlock>(CallOperandVal))
6137 return TLI.getPointerTy();
6139 llvm::Type *OpTy = CallOperandVal->getType();
6141 // FIXME: code duplicated from TargetLowering::ParseConstraints().
6142 // If this is an indirect operand, the operand is a pointer to the
6145 llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
6147 report_fatal_error("Indirect operand for inline asm not a pointer!");
6148 OpTy = PtrTy->getElementType();
6151 // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
6152 if (StructType *STy = dyn_cast<StructType>(OpTy))
6153 if (STy->getNumElements() == 1)
6154 OpTy = STy->getElementType(0);
6156 // If OpTy is not a single value, it may be a struct/union that we
6157 // can tile with integers.
6158 if (!OpTy->isSingleValueType() && OpTy->isSized()) {
6159 unsigned BitSize = DL->getTypeSizeInBits(OpTy);
6168 OpTy = IntegerType::get(Context, BitSize);
6173 return TLI.getValueType(OpTy, true);
6177 typedef SmallVector<SDISelAsmOperandInfo,16> SDISelAsmOperandInfoVector;
6179 } // end anonymous namespace
6181 /// GetRegistersForValue - Assign registers (virtual or physical) for the
6182 /// specified operand. We prefer to assign virtual registers, to allow the
6183 /// register allocator to handle the assignment process. However, if the asm
6184 /// uses features that we can't model on machineinstrs, we have SDISel do the
6185 /// allocation. This produces generally horrible, but correct, code.
6187 /// OpInfo describes the operand.
6189 static void GetRegistersForValue(SelectionDAG &DAG,
6190 const TargetLowering &TLI,
6192 SDISelAsmOperandInfo &OpInfo) {
6193 LLVMContext &Context = *DAG.getContext();
6195 MachineFunction &MF = DAG.getMachineFunction();
6196 SmallVector<unsigned, 4> Regs;
6198 // If this is a constraint for a single physreg, or a constraint for a
6199 // register class, find it.
6200 std::pair<unsigned, const TargetRegisterClass*> PhysReg =
6201 TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
6202 OpInfo.ConstraintVT);
6204 unsigned NumRegs = 1;
6205 if (OpInfo.ConstraintVT != MVT::Other) {
6206 // If this is a FP input in an integer register (or visa versa) insert a bit
6207 // cast of the input value. More generally, handle any case where the input
6208 // value disagrees with the register class we plan to stick this in.
6209 if (OpInfo.Type == InlineAsm::isInput &&
6210 PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) {
6211 // Try to convert to the first EVT that the reg class contains. If the
6212 // types are identical size, use a bitcast to convert (e.g. two differing
6214 MVT RegVT = *PhysReg.second->vt_begin();
6215 if (RegVT.getSizeInBits() == OpInfo.CallOperand.getValueSizeInBits()) {
6216 OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL,
6217 RegVT, OpInfo.CallOperand);
6218 OpInfo.ConstraintVT = RegVT;
6219 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
6220 // If the input is a FP value and we want it in FP registers, do a
6221 // bitcast to the corresponding integer type. This turns an f64 value
6222 // into i64, which can be passed with two i32 values on a 32-bit
6224 RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
6225 OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL,
6226 RegVT, OpInfo.CallOperand);
6227 OpInfo.ConstraintVT = RegVT;
6231 NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
6235 EVT ValueVT = OpInfo.ConstraintVT;
6237 // If this is a constraint for a specific physical register, like {r17},
6239 if (unsigned AssignedReg = PhysReg.first) {
6240 const TargetRegisterClass *RC = PhysReg.second;
6241 if (OpInfo.ConstraintVT == MVT::Other)
6242 ValueVT = *RC->vt_begin();
6244 // Get the actual register value type. This is important, because the user
6245 // may have asked for (e.g.) the AX register in i32 type. We need to
6246 // remember that AX is actually i16 to get the right extension.
6247 RegVT = *RC->vt_begin();
6249 // This is a explicit reference to a physical register.
6250 Regs.push_back(AssignedReg);
6252 // If this is an expanded reference, add the rest of the regs to Regs.
6254 TargetRegisterClass::iterator I = RC->begin();
6255 for (; *I != AssignedReg; ++I)
6256 assert(I != RC->end() && "Didn't find reg!");
6258 // Already added the first reg.
6260 for (; NumRegs; --NumRegs, ++I) {
6261 assert(I != RC->end() && "Ran out of registers to allocate!");
6266 OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
6270 // Otherwise, if this was a reference to an LLVM register class, create vregs
6271 // for this reference.
6272 if (const TargetRegisterClass *RC = PhysReg.second) {
6273 RegVT = *RC->vt_begin();
6274 if (OpInfo.ConstraintVT == MVT::Other)
6277 // Create the appropriate number of virtual registers.
6278 MachineRegisterInfo &RegInfo = MF.getRegInfo();
6279 for (; NumRegs; --NumRegs)
6280 Regs.push_back(RegInfo.createVirtualRegister(RC));
6282 OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
6286 // Otherwise, we couldn't allocate enough registers for this.
6289 /// visitInlineAsm - Handle a call to an InlineAsm object.
6291 void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
6292 const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
6294 /// ConstraintOperands - Information about all of the constraints.
6295 SDISelAsmOperandInfoVector ConstraintOperands;
6297 const TargetLowering *TLI = TM.getTargetLowering();
6298 TargetLowering::AsmOperandInfoVector
6299 TargetConstraints = TLI->ParseConstraints(CS);
6301 bool hasMemory = false;
6303 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
6304 unsigned ResNo = 0; // ResNo - The result number of the next output.
6305 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
6306 ConstraintOperands.push_back(SDISelAsmOperandInfo(TargetConstraints[i]));
6307 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
6309 MVT OpVT = MVT::Other;
6311 // Compute the value type for each operand.
6312 switch (OpInfo.Type) {
6313 case InlineAsm::isOutput:
6314 // Indirect outputs just consume an argument.
6315 if (OpInfo.isIndirect) {
6316 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
6320 // The return value of the call is this value. As such, there is no
6321 // corresponding argument.
6322 assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
6323 if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
6324 OpVT = TLI->getSimpleValueType(STy->getElementType(ResNo));
6326 assert(ResNo == 0 && "Asm only has one result!");
6327 OpVT = TLI->getSimpleValueType(CS.getType());
6331 case InlineAsm::isInput:
6332 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
6334 case InlineAsm::isClobber:
6339 // If this is an input or an indirect output, process the call argument.
6340 // BasicBlocks are labels, currently appearing only in asm's.
6341 if (OpInfo.CallOperandVal) {
6342 if (const BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
6343 OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
6345 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
6348 OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), *TLI, DL).
6352 OpInfo.ConstraintVT = OpVT;
6354 // Indirect operand accesses access memory.
6355 if (OpInfo.isIndirect)
6358 for (unsigned j = 0, ee = OpInfo.Codes.size(); j != ee; ++j) {
6359 TargetLowering::ConstraintType
6360 CType = TLI->getConstraintType(OpInfo.Codes[j]);
6361 if (CType == TargetLowering::C_Memory) {
6369 SDValue Chain, Flag;
6371 // We won't need to flush pending loads if this asm doesn't touch
6372 // memory and is nonvolatile.
6373 if (hasMemory || IA->hasSideEffects())
6376 Chain = DAG.getRoot();
6378 // Second pass over the constraints: compute which constraint option to use
6379 // and assign registers to constraints that want a specific physreg.
6380 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
6381 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
6383 // If this is an output operand with a matching input operand, look up the
6384 // matching input. If their types mismatch, e.g. one is an integer, the
6385 // other is floating point, or their sizes are different, flag it as an
6387 if (OpInfo.hasMatchingInput()) {
6388 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
6390 if (OpInfo.ConstraintVT != Input.ConstraintVT) {
6391 std::pair<unsigned, const TargetRegisterClass*> MatchRC =
6392 TLI->getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
6393 OpInfo.ConstraintVT);
6394 std::pair<unsigned, const TargetRegisterClass*> InputRC =
6395 TLI->getRegForInlineAsmConstraint(Input.ConstraintCode,
6396 Input.ConstraintVT);
6397 if ((OpInfo.ConstraintVT.isInteger() !=
6398 Input.ConstraintVT.isInteger()) ||
6399 (MatchRC.second != InputRC.second)) {
6400 report_fatal_error("Unsupported asm: input constraint"
6401 " with a matching output constraint of"
6402 " incompatible type!");
6404 Input.ConstraintVT = OpInfo.ConstraintVT;
6408 // Compute the constraint code and ConstraintType to use.
6409 TLI->ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
6411 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
6412 OpInfo.Type == InlineAsm::isClobber)
6415 // If this is a memory input, and if the operand is not indirect, do what we
6416 // need to to provide an address for the memory input.
6417 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
6418 !OpInfo.isIndirect) {
6419 assert((OpInfo.isMultipleAlternative ||
6420 (OpInfo.Type == InlineAsm::isInput)) &&
6421 "Can only indirectify direct input operands!");
6423 // Memory operands really want the address of the value. If we don't have
6424 // an indirect input, put it in the constpool if we can, otherwise spill
6425 // it to a stack slot.
6426 // TODO: This isn't quite right. We need to handle these according to
6427 // the addressing mode that the constraint wants. Also, this may take
6428 // an additional register for the computation and we don't want that
6431 // If the operand is a float, integer, or vector constant, spill to a
6432 // constant pool entry to get its address.
6433 const Value *OpVal = OpInfo.CallOperandVal;
6434 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
6435 isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
6436 OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
6437 TLI->getPointerTy());
6439 // Otherwise, create a stack slot and emit a store to it before the
6441 Type *Ty = OpVal->getType();
6442 uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(Ty);
6443 unsigned Align = TLI->getDataLayout()->getPrefTypeAlignment(Ty);
6444 MachineFunction &MF = DAG.getMachineFunction();
6445 int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
6446 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI->getPointerTy());
6447 Chain = DAG.getStore(Chain, getCurSDLoc(),
6448 OpInfo.CallOperand, StackSlot,
6449 MachinePointerInfo::getFixedStack(SSFI),
6451 OpInfo.CallOperand = StackSlot;
6454 // There is no longer a Value* corresponding to this operand.
6455 OpInfo.CallOperandVal = nullptr;
6457 // It is now an indirect operand.
6458 OpInfo.isIndirect = true;
6461 // If this constraint is for a specific register, allocate it before
6463 if (OpInfo.ConstraintType == TargetLowering::C_Register)
6464 GetRegistersForValue(DAG, *TLI, getCurSDLoc(), OpInfo);
6467 // Second pass - Loop over all of the operands, assigning virtual or physregs
6468 // to register class operands.
6469 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
6470 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
6472 // C_Register operands have already been allocated, Other/Memory don't need
6474 if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
6475 GetRegistersForValue(DAG, *TLI, getCurSDLoc(), OpInfo);
6478 // AsmNodeOperands - The operands for the ISD::INLINEASM node.
6479 std::vector<SDValue> AsmNodeOperands;
6480 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
6481 AsmNodeOperands.push_back(
6482 DAG.getTargetExternalSymbol(IA->getAsmString().c_str(),
6483 TLI->getPointerTy()));
6485 // If we have a !srcloc metadata node associated with it, we want to attach
6486 // this to the ultimately generated inline asm machineinstr. To do this, we
6487 // pass in the third operand as this (potentially null) inline asm MDNode.
6488 const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc");
6489 AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
6491 // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
6492 // bits as operand 3.
6493 unsigned ExtraInfo = 0;
6494 if (IA->hasSideEffects())
6495 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
6496 if (IA->isAlignStack())
6497 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
6498 // Set the asm dialect.
6499 ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
6501 // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
6502 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
6503 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
6505 // Compute the constraint code and ConstraintType to use.
6506 TLI->ComputeConstraintToUse(OpInfo, SDValue());
6508 // Ideally, we would only check against memory constraints. However, the
6509 // meaning of an other constraint can be target-specific and we can't easily
6510 // reason about it. Therefore, be conservative and set MayLoad/MayStore
6511 // for other constriants as well.
6512 if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
6513 OpInfo.ConstraintType == TargetLowering::C_Other) {
6514 if (OpInfo.Type == InlineAsm::isInput)
6515 ExtraInfo |= InlineAsm::Extra_MayLoad;
6516 else if (OpInfo.Type == InlineAsm::isOutput)
6517 ExtraInfo |= InlineAsm::Extra_MayStore;
6518 else if (OpInfo.Type == InlineAsm::isClobber)
6519 ExtraInfo |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
6523 AsmNodeOperands.push_back(DAG.getTargetConstant(ExtraInfo,
6524 TLI->getPointerTy()));
6526 // Loop over all of the inputs, copying the operand values into the
6527 // appropriate registers and processing the output regs.
6528 RegsForValue RetValRegs;
6530 // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
6531 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
6533 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
6534 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
6536 switch (OpInfo.Type) {
6537 case InlineAsm::isOutput: {
6538 if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
6539 OpInfo.ConstraintType != TargetLowering::C_Register) {
6540 // Memory output, or 'other' output (e.g. 'X' constraint).
6541 assert(OpInfo.isIndirect && "Memory output must be indirect operand");
6543 // Add information to the INLINEASM node to know about this output.
6544 unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
6545 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags,
6546 TLI->getPointerTy()));
6547 AsmNodeOperands.push_back(OpInfo.CallOperand);
6551 // Otherwise, this is a register or register class output.
6553 // Copy the output from the appropriate register. Find a register that
6555 if (OpInfo.AssignedRegs.Regs.empty()) {
6556 LLVMContext &Ctx = *DAG.getContext();
6557 Ctx.emitError(CS.getInstruction(),
6558 "couldn't allocate output register for constraint '" +
6559 Twine(OpInfo.ConstraintCode) + "'");
6563 // If this is an indirect operand, store through the pointer after the
6565 if (OpInfo.isIndirect) {
6566 IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
6567 OpInfo.CallOperandVal));
6569 // This is the result value of the call.
6570 assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
6571 // Concatenate this output onto the outputs list.
6572 RetValRegs.append(OpInfo.AssignedRegs);
6575 // Add information to the INLINEASM node to know that this register is
6578 .AddInlineAsmOperands(OpInfo.isEarlyClobber
6579 ? InlineAsm::Kind_RegDefEarlyClobber
6580 : InlineAsm::Kind_RegDef,
6581 false, 0, DAG, AsmNodeOperands);
6584 case InlineAsm::isInput: {
6585 SDValue InOperandVal = OpInfo.CallOperand;
6587 if (OpInfo.isMatchingInputConstraint()) { // Matching constraint?
6588 // If this is required to match an output register we have already set,
6589 // just use its register.
6590 unsigned OperandNo = OpInfo.getMatchedOperand();
6592 // Scan until we find the definition we already emitted of this operand.
6593 // When we find it, create a RegsForValue operand.
6594 unsigned CurOp = InlineAsm::Op_FirstOperand;
6595 for (; OperandNo; --OperandNo) {
6596 // Advance to the next operand.
6598 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
6599 assert((InlineAsm::isRegDefKind(OpFlag) ||
6600 InlineAsm::isRegDefEarlyClobberKind(OpFlag) ||
6601 InlineAsm::isMemKind(OpFlag)) && "Skipped past definitions?");
6602 CurOp += InlineAsm::getNumOperandRegisters(OpFlag)+1;
6606 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
6607 if (InlineAsm::isRegDefKind(OpFlag) ||
6608 InlineAsm::isRegDefEarlyClobberKind(OpFlag)) {
6609 // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
6610 if (OpInfo.isIndirect) {
6611 // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
6612 LLVMContext &Ctx = *DAG.getContext();
6613 Ctx.emitError(CS.getInstruction(), "inline asm not supported yet:"
6614 " don't know how to handle tied "
6615 "indirect register inputs");
6619 RegsForValue MatchedRegs;
6620 MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
6621 MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType();
6622 MatchedRegs.RegVTs.push_back(RegVT);
6623 MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
6624 for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag);
6626 if (const TargetRegisterClass *RC = TLI->getRegClassFor(RegVT))
6627 MatchedRegs.Regs.push_back(RegInfo.createVirtualRegister(RC));
6629 LLVMContext &Ctx = *DAG.getContext();
6630 Ctx.emitError(CS.getInstruction(),
6631 "inline asm error: This value"
6632 " type register class is not natively supported!");
6636 // Use the produced MatchedRegs object to
6637 MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurSDLoc(),
6638 Chain, &Flag, CS.getInstruction());
6639 MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
6640 true, OpInfo.getMatchedOperand(),
6641 DAG, AsmNodeOperands);
6645 assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!");
6646 assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 &&
6647 "Unexpected number of operands");
6648 // Add information to the INLINEASM node to know about this input.
6649 // See InlineAsm.h isUseOperandTiedToDef.
6650 OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag,
6651 OpInfo.getMatchedOperand());
6652 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag,
6653 TLI->getPointerTy()));
6654 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
6658 // Treat indirect 'X' constraint as memory.
6659 if (OpInfo.ConstraintType == TargetLowering::C_Other &&
6661 OpInfo.ConstraintType = TargetLowering::C_Memory;
6663 if (OpInfo.ConstraintType == TargetLowering::C_Other) {
6664 std::vector<SDValue> Ops;
6665 TLI->LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
6668 LLVMContext &Ctx = *DAG.getContext();
6669 Ctx.emitError(CS.getInstruction(),
6670 "invalid operand for inline asm constraint '" +
6671 Twine(OpInfo.ConstraintCode) + "'");
6675 // Add information to the INLINEASM node to know about this input.
6676 unsigned ResOpType =
6677 InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
6678 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
6679 TLI->getPointerTy()));
6680 AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
6684 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
6685 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
6686 assert(InOperandVal.getValueType() == TLI->getPointerTy() &&
6687 "Memory operands expect pointer values");
6689 // Add information to the INLINEASM node to know about this input.
6690 unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
6691 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
6692 TLI->getPointerTy()));
6693 AsmNodeOperands.push_back(InOperandVal);
6697 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
6698 OpInfo.ConstraintType == TargetLowering::C_Register) &&
6699 "Unknown constraint type!");
6701 // TODO: Support this.
6702 if (OpInfo.isIndirect) {
6703 LLVMContext &Ctx = *DAG.getContext();
6704 Ctx.emitError(CS.getInstruction(),
6705 "Don't know how to handle indirect register inputs yet "
6706 "for constraint '" +
6707 Twine(OpInfo.ConstraintCode) + "'");
6711 // Copy the input into the appropriate registers.
6712 if (OpInfo.AssignedRegs.Regs.empty()) {
6713 LLVMContext &Ctx = *DAG.getContext();
6714 Ctx.emitError(CS.getInstruction(),
6715 "couldn't allocate input reg for constraint '" +
6716 Twine(OpInfo.ConstraintCode) + "'");
6720 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurSDLoc(),
6721 Chain, &Flag, CS.getInstruction());
6723 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0,
6724 DAG, AsmNodeOperands);
6727 case InlineAsm::isClobber: {
6728 // Add the clobbered value to the operand list, so that the register
6729 // allocator is aware that the physreg got clobbered.
6730 if (!OpInfo.AssignedRegs.Regs.empty())
6731 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber,
6739 // Finish up input operands. Set the input chain and add the flag last.
6740 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
6741 if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
6743 Chain = DAG.getNode(ISD::INLINEASM, getCurSDLoc(),
6744 DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
6745 Flag = Chain.getValue(1);
6747 // If this asm returns a register value, copy the result from that register
6748 // and set it as the value of the call.
6749 if (!RetValRegs.Regs.empty()) {
6750 SDValue Val = RetValRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
6751 Chain, &Flag, CS.getInstruction());
6753 // FIXME: Why don't we do this for inline asms with MRVs?
6754 if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
6755 EVT ResultType = TLI->getValueType(CS.getType());
6757 // If any of the results of the inline asm is a vector, it may have the
6758 // wrong width/num elts. This can happen for register classes that can
6759 // contain multiple different value types. The preg or vreg allocated may
6760 // not have the same VT as was expected. Convert it to the right type
6761 // with bit_convert.
6762 if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
6763 Val = DAG.getNode(ISD::BITCAST, getCurSDLoc(),
6766 } else if (ResultType != Val.getValueType() &&
6767 ResultType.isInteger() && Val.getValueType().isInteger()) {
6768 // If a result value was tied to an input value, the computed result may
6769 // have a wider width than the expected result. Extract the relevant
6771 Val = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultType, Val);
6774 assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
6777 setValue(CS.getInstruction(), Val);
6778 // Don't need to use this as a chain in this case.
6779 if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
6783 std::vector<std::pair<SDValue, const Value *> > StoresToEmit;
6785 // Process indirect outputs, first output all of the flagged copies out of
6787 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
6788 RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
6789 const Value *Ptr = IndirectStoresToEmit[i].second;
6790 SDValue OutVal = OutRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
6792 StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
6795 // Emit the non-flagged stores from the physregs.
6796 SmallVector<SDValue, 8> OutChains;
6797 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) {
6798 SDValue Val = DAG.getStore(Chain, getCurSDLoc(),
6799 StoresToEmit[i].first,
6800 getValue(StoresToEmit[i].second),
6801 MachinePointerInfo(StoresToEmit[i].second),
6803 OutChains.push_back(Val);
6806 if (!OutChains.empty())
6807 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
6812 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
6813 DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
6814 MVT::Other, getRoot(),
6815 getValue(I.getArgOperand(0)),
6816 DAG.getSrcValue(I.getArgOperand(0))));
6819 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
6820 const TargetLowering *TLI = TM.getTargetLowering();
6821 const DataLayout &DL = *TLI->getDataLayout();
6822 SDValue V = DAG.getVAArg(TLI->getValueType(I.getType()), getCurSDLoc(),
6823 getRoot(), getValue(I.getOperand(0)),
6824 DAG.getSrcValue(I.getOperand(0)),
6825 DL.getABITypeAlignment(I.getType()));
6827 DAG.setRoot(V.getValue(1));
6830 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
6831 DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
6832 MVT::Other, getRoot(),
6833 getValue(I.getArgOperand(0)),
6834 DAG.getSrcValue(I.getArgOperand(0))));
6837 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
6838 DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
6839 MVT::Other, getRoot(),
6840 getValue(I.getArgOperand(0)),
6841 getValue(I.getArgOperand(1)),
6842 DAG.getSrcValue(I.getArgOperand(0)),
6843 DAG.getSrcValue(I.getArgOperand(1))));
6846 /// \brief Lower an argument list according to the target calling convention.
6848 /// \return A tuple of <return-value, token-chain>
6850 /// This is a helper for lowering intrinsics that follow a target calling
6851 /// convention or require stack pointer adjustment. Only a subset of the
6852 /// intrinsic's operands need to participate in the calling convention.
6853 std::pair<SDValue, SDValue>
6854 SelectionDAGBuilder::LowerCallOperands(const CallInst &CI, unsigned ArgIdx,
6855 unsigned NumArgs, SDValue Callee,
6857 TargetLowering::ArgListTy Args;
6858 Args.reserve(NumArgs);
6860 // Populate the argument list.
6861 // Attributes for args start at offset 1, after the return attribute.
6862 ImmutableCallSite CS(&CI);
6863 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs, AttrI = ArgIdx + 1;
6864 ArgI != ArgE; ++ArgI) {
6865 const Value *V = CI.getOperand(ArgI);
6867 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
6869 TargetLowering::ArgListEntry Entry;
6870 Entry.Node = getValue(V);
6871 Entry.Ty = V->getType();
6872 Entry.setAttributes(&CS, AttrI);
6873 Args.push_back(Entry);
6876 Type *retTy = useVoidTy ? Type::getVoidTy(*DAG.getContext()) : CI.getType();
6877 TargetLowering::CallLoweringInfo CLI(DAG);
6878 CLI.setDebugLoc(getCurSDLoc()).setChain(getRoot())
6879 .setCallee(CI.getCallingConv(), retTy, Callee, &Args, NumArgs)
6880 .setDiscardResult(!CI.use_empty());
6882 const TargetLowering *TLI = TM.getTargetLowering();
6883 return TLI->LowerCallTo(CLI);
6886 /// \brief Add a stack map intrinsic call's live variable operands to a stackmap
6887 /// or patchpoint target node's operand list.
6889 /// Constants are converted to TargetConstants purely as an optimization to
6890 /// avoid constant materialization and register allocation.
6892 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
6893 /// generate addess computation nodes, and so ExpandISelPseudo can convert the
6894 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
6895 /// address materialization and register allocation, but may also be required
6896 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
6897 /// alloca in the entry block, then the runtime may assume that the alloca's
6898 /// StackMap location can be read immediately after compilation and that the
6899 /// location is valid at any point during execution (this is similar to the
6900 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
6901 /// only available in a register, then the runtime would need to trap when
6902 /// execution reaches the StackMap in order to read the alloca's location.
6903 static void addStackMapLiveVars(const CallInst &CI, unsigned StartIdx,
6904 SmallVectorImpl<SDValue> &Ops,
6905 SelectionDAGBuilder &Builder) {
6906 for (unsigned i = StartIdx, e = CI.getNumArgOperands(); i != e; ++i) {
6907 SDValue OpVal = Builder.getValue(CI.getArgOperand(i));
6908 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) {
6910 Builder.DAG.getTargetConstant(StackMaps::ConstantOp, MVT::i64));
6912 Builder.DAG.getTargetConstant(C->getSExtValue(), MVT::i64));
6913 } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) {
6914 const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo();
6916 Builder.DAG.getTargetFrameIndex(FI->getIndex(), TLI.getPointerTy()));
6918 Ops.push_back(OpVal);
6922 /// \brief Lower llvm.experimental.stackmap directly to its target opcode.
6923 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
6924 // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>,
6925 // [live variables...])
6927 assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
6929 SDValue Chain, InFlag, Callee, NullPtr;
6930 SmallVector<SDValue, 32> Ops;
6932 SDLoc DL = getCurSDLoc();
6933 Callee = getValue(CI.getCalledValue());
6934 NullPtr = DAG.getIntPtrConstant(0, true);
6936 // The stackmap intrinsic only records the live variables (the arguemnts
6937 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
6938 // intrinsic, this won't be lowered to a function call. This means we don't
6939 // have to worry about calling conventions and target specific lowering code.
6940 // Instead we perform the call lowering right here.
6942 // chain, flag = CALLSEQ_START(chain, 0)
6943 // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
6944 // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
6946 Chain = DAG.getCALLSEQ_START(getRoot(), NullPtr, DL);
6947 InFlag = Chain.getValue(1);
6949 // Add the <id> and <numBytes> constants.
6950 SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos));
6951 Ops.push_back(DAG.getTargetConstant(
6952 cast<ConstantSDNode>(IDVal)->getZExtValue(), MVT::i64));
6953 SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos));
6954 Ops.push_back(DAG.getTargetConstant(
6955 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), MVT::i32));
6957 // Push live variables for the stack map.
6958 addStackMapLiveVars(CI, 2, Ops, *this);
6960 // We are not pushing any register mask info here on the operands list,
6961 // because the stackmap doesn't clobber anything.
6963 // Push the chain and the glue flag.
6964 Ops.push_back(Chain);
6965 Ops.push_back(InFlag);
6967 // Create the STACKMAP node.
6968 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6969 SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops);
6970 Chain = SDValue(SM, 0);
6971 InFlag = Chain.getValue(1);
6973 Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL);
6975 // Stackmaps don't generate values, so nothing goes into the NodeMap.
6977 // Set the root to the target-lowered call chain.
6980 // Inform the Frame Information that we have a stackmap in this function.
6981 FuncInfo.MF->getFrameInfo()->setHasStackMap();
6984 /// \brief Lower llvm.experimental.patchpoint directly to its target opcode.
6985 void SelectionDAGBuilder::visitPatchpoint(const CallInst &CI) {
6986 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
6991 // [live variables...])
6993 CallingConv::ID CC = CI.getCallingConv();
6994 bool isAnyRegCC = CC == CallingConv::AnyReg;
6995 bool hasDef = !CI.getType()->isVoidTy();
6996 SDValue Callee = getValue(CI.getOperand(2)); // <target>
6998 // Get the real number of arguments participating in the call <numArgs>
6999 SDValue NArgVal = getValue(CI.getArgOperand(PatchPointOpers::NArgPos));
7000 unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
7002 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
7003 // Intrinsics include all meta-operands up to but not including CC.
7004 unsigned NumMetaOpers = PatchPointOpers::CCPos;
7005 assert(CI.getNumArgOperands() >= NumMetaOpers + NumArgs &&
7006 "Not enough arguments provided to the patchpoint intrinsic");
7008 // For AnyRegCC the arguments are lowered later on manually.
7009 unsigned NumCallArgs = isAnyRegCC ? 0 : NumArgs;
7010 std::pair<SDValue, SDValue> Result =
7011 LowerCallOperands(CI, NumMetaOpers, NumCallArgs, Callee, isAnyRegCC);
7013 // Set the root to the target-lowered call chain.
7014 SDValue Chain = Result.second;
7017 SDNode *CallEnd = Chain.getNode();
7018 if (hasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
7019 CallEnd = CallEnd->getOperand(0).getNode();
7021 /// Get a call instruction from the call sequence chain.
7022 /// Tail calls are not allowed.
7023 assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
7024 "Expected a callseq node.");
7025 SDNode *Call = CallEnd->getOperand(0).getNode();
7026 bool hasGlue = Call->getGluedNode();
7028 // Replace the target specific call node with the patchable intrinsic.
7029 SmallVector<SDValue, 8> Ops;
7031 // Add the <id> and <numBytes> constants.
7032 SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos));
7033 Ops.push_back(DAG.getTargetConstant(
7034 cast<ConstantSDNode>(IDVal)->getZExtValue(), MVT::i64));
7035 SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos));
7036 Ops.push_back(DAG.getTargetConstant(
7037 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), MVT::i32));
7039 // Assume that the Callee is a constant address.
7040 // FIXME: handle function symbols in the future.
7042 DAG.getIntPtrConstant(cast<ConstantSDNode>(Callee)->getZExtValue(),
7043 /*isTarget=*/true));
7045 // Adjust <numArgs> to account for any arguments that have been passed on the
7047 // Call Node: Chain, Target, {Args}, RegMask, [Glue]
7048 unsigned NumCallRegArgs = Call->getNumOperands() - (hasGlue ? 4 : 3);
7049 NumCallRegArgs = isAnyRegCC ? NumArgs : NumCallRegArgs;
7050 Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, MVT::i32));
7052 // Add the calling convention
7053 Ops.push_back(DAG.getTargetConstant((unsigned)CC, MVT::i32));
7055 // Add the arguments we omitted previously. The register allocator should
7056 // place these in any free register.
7058 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
7059 Ops.push_back(getValue(CI.getArgOperand(i)));
7061 // Push the arguments from the call instruction up to the register mask.
7062 SDNode::op_iterator e = hasGlue ? Call->op_end()-2 : Call->op_end()-1;
7063 for (SDNode::op_iterator i = Call->op_begin()+2; i != e; ++i)
7066 // Push live variables for the stack map.
7067 addStackMapLiveVars(CI, NumMetaOpers + NumArgs, Ops, *this);
7069 // Push the register mask info.
7071 Ops.push_back(*(Call->op_end()-2));
7073 Ops.push_back(*(Call->op_end()-1));
7075 // Push the chain (this is originally the first operand of the call, but
7076 // becomes now the last or second to last operand).
7077 Ops.push_back(*(Call->op_begin()));
7079 // Push the glue flag (last operand).
7081 Ops.push_back(*(Call->op_end()-1));
7084 if (isAnyRegCC && hasDef) {
7085 // Create the return types based on the intrinsic definition
7086 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7087 SmallVector<EVT, 3> ValueVTs;
7088 ComputeValueVTs(TLI, CI.getType(), ValueVTs);
7089 assert(ValueVTs.size() == 1 && "Expected only one return value type.");
7091 // There is always a chain and a glue type at the end
7092 ValueVTs.push_back(MVT::Other);
7093 ValueVTs.push_back(MVT::Glue);
7094 NodeTys = DAG.getVTList(ValueVTs);
7096 NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7098 // Replace the target specific call node with a PATCHPOINT node.
7099 MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHPOINT,
7100 getCurSDLoc(), NodeTys, Ops);
7102 // Update the NodeMap.
7105 setValue(&CI, SDValue(MN, 0));
7107 setValue(&CI, Result.first);
7110 // Fixup the consumers of the intrinsic. The chain and glue may be used in the
7111 // call sequence. Furthermore the location of the chain and glue can change
7112 // when the AnyReg calling convention is used and the intrinsic returns a
7114 if (isAnyRegCC && hasDef) {
7115 SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
7116 SDValue To[] = {SDValue(MN, 1), SDValue(MN, 2)};
7117 DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
7119 DAG.ReplaceAllUsesWith(Call, MN);
7120 DAG.DeleteNode(Call);
7122 // Inform the Frame Information that we have a patchpoint in this function.
7123 FuncInfo.MF->getFrameInfo()->setHasPatchPoint();
7126 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
7127 /// implementation, which just calls LowerCall.
7128 /// FIXME: When all targets are
7129 /// migrated to using LowerCall, this hook should be integrated into SDISel.
7130 std::pair<SDValue, SDValue>
7131 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
7132 // Handle the incoming return values from the call.
7134 SmallVector<EVT, 4> RetTys;
7135 ComputeValueVTs(*this, CLI.RetTy, RetTys);
7136 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
7138 MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
7139 unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
7140 for (unsigned i = 0; i != NumRegs; ++i) {
7141 ISD::InputArg MyFlags;
7142 MyFlags.VT = RegisterVT;
7144 MyFlags.Used = CLI.IsReturnValueUsed;
7146 MyFlags.Flags.setSExt();
7148 MyFlags.Flags.setZExt();
7150 MyFlags.Flags.setInReg();
7151 CLI.Ins.push_back(MyFlags);
7155 // Handle all of the outgoing arguments.
7157 CLI.OutVals.clear();
7158 ArgListTy &Args = CLI.getArgs();
7159 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
7160 SmallVector<EVT, 4> ValueVTs;
7161 ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
7162 Type *FinalType = Args[i].Ty;
7163 if (Args[i].isByVal)
7164 FinalType = cast<PointerType>(Args[i].Ty)->getElementType();
7165 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
7166 FinalType, CLI.CallConv, CLI.IsVarArg);
7167 for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
7169 EVT VT = ValueVTs[Value];
7170 Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
7171 SDValue Op = SDValue(Args[i].Node.getNode(),
7172 Args[i].Node.getResNo() + Value);
7173 ISD::ArgFlagsTy Flags;
7174 unsigned OriginalAlignment = getDataLayout()->getABITypeAlignment(ArgTy);
7180 if (Args[i].isInReg)
7184 if (Args[i].isByVal)
7186 if (Args[i].isInAlloca) {
7187 Flags.setInAlloca();
7188 // Set the byval flag for CCAssignFn callbacks that don't know about
7189 // inalloca. This way we can know how many bytes we should've allocated
7190 // and how many bytes a callee cleanup function will pop. If we port
7191 // inalloca to more targets, we'll have to add custom inalloca handling
7192 // in the various CC lowering callbacks.
7195 if (Args[i].isByVal || Args[i].isInAlloca) {
7196 PointerType *Ty = cast<PointerType>(Args[i].Ty);
7197 Type *ElementTy = Ty->getElementType();
7198 Flags.setByValSize(getDataLayout()->getTypeAllocSize(ElementTy));
7199 // For ByVal, alignment should come from FE. BE will guess if this
7200 // info is not there but there are cases it cannot get right.
7201 unsigned FrameAlign;
7202 if (Args[i].Alignment)
7203 FrameAlign = Args[i].Alignment;
7205 FrameAlign = getByValTypeAlignment(ElementTy);
7206 Flags.setByValAlign(FrameAlign);
7211 Flags.setInConsecutiveRegs();
7212 Flags.setOrigAlign(OriginalAlignment);
7214 MVT PartVT = getRegisterType(CLI.RetTy->getContext(), VT);
7215 unsigned NumParts = getNumRegisters(CLI.RetTy->getContext(), VT);
7216 SmallVector<SDValue, 4> Parts(NumParts);
7217 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
7220 ExtendKind = ISD::SIGN_EXTEND;
7221 else if (Args[i].isZExt)
7222 ExtendKind = ISD::ZERO_EXTEND;
7224 // Conservatively only handle 'returned' on non-vectors for now
7225 if (Args[i].isReturned && !Op.getValueType().isVector()) {
7226 assert(CLI.RetTy == Args[i].Ty && RetTys.size() == NumValues &&
7227 "unexpected use of 'returned'");
7228 // Before passing 'returned' to the target lowering code, ensure that
7229 // either the register MVT and the actual EVT are the same size or that
7230 // the return value and argument are extended in the same way; in these
7231 // cases it's safe to pass the argument register value unchanged as the
7232 // return register value (although it's at the target's option whether
7234 // TODO: allow code generation to take advantage of partially preserved
7235 // registers rather than clobbering the entire register when the
7236 // parameter extension method is not compatible with the return
7238 if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
7239 (ExtendKind != ISD::ANY_EXTEND &&
7240 CLI.RetSExt == Args[i].isSExt && CLI.RetZExt == Args[i].isZExt))
7241 Flags.setReturned();
7244 getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT,
7245 CLI.CS ? CLI.CS->getInstruction() : nullptr, ExtendKind);
7247 for (unsigned j = 0; j != NumParts; ++j) {
7248 // if it isn't first piece, alignment must be 1
7249 ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(), VT,
7250 i < CLI.NumFixedArgs,
7251 i, j*Parts[j].getValueType().getStoreSize());
7252 if (NumParts > 1 && j == 0)
7253 MyFlags.Flags.setSplit();
7255 MyFlags.Flags.setOrigAlign(1);
7257 // Only mark the end at the last register of the last value.
7258 if (NeedsRegBlock && Value == NumValues - 1 && j == NumParts - 1)
7259 MyFlags.Flags.setInConsecutiveRegsLast();
7261 CLI.Outs.push_back(MyFlags);
7262 CLI.OutVals.push_back(Parts[j]);
7267 SmallVector<SDValue, 4> InVals;
7268 CLI.Chain = LowerCall(CLI, InVals);
7270 // Verify that the target's LowerCall behaved as expected.
7271 assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
7272 "LowerCall didn't return a valid chain!");
7273 assert((!CLI.IsTailCall || InVals.empty()) &&
7274 "LowerCall emitted a return value for a tail call!");
7275 assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
7276 "LowerCall didn't emit the correct number of values!");
7278 // For a tail call, the return value is merely live-out and there aren't
7279 // any nodes in the DAG representing it. Return a special value to
7280 // indicate that a tail call has been emitted and no more Instructions
7281 // should be processed in the current block.
7282 if (CLI.IsTailCall) {
7283 CLI.DAG.setRoot(CLI.Chain);
7284 return std::make_pair(SDValue(), SDValue());
7287 DEBUG(for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
7288 assert(InVals[i].getNode() &&
7289 "LowerCall emitted a null value!");
7290 assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
7291 "LowerCall emitted a value with the wrong type!");
7294 // Collect the legal value parts into potentially illegal values
7295 // that correspond to the original function's return values.
7296 ISD::NodeType AssertOp = ISD::DELETED_NODE;
7298 AssertOp = ISD::AssertSext;
7299 else if (CLI.RetZExt)
7300 AssertOp = ISD::AssertZext;
7301 SmallVector<SDValue, 4> ReturnValues;
7302 unsigned CurReg = 0;
7303 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
7305 MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), VT);
7306 unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT);
7308 ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
7309 NumRegs, RegisterVT, VT, nullptr,
7314 // For a function returning void, there is no return value. We can't create
7315 // such a node, so we just return a null return value in that case. In
7316 // that case, nothing will actually look at the value.
7317 if (ReturnValues.empty())
7318 return std::make_pair(SDValue(), CLI.Chain);
7320 SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
7321 CLI.DAG.getVTList(RetTys), ReturnValues);
7322 return std::make_pair(Res, CLI.Chain);
7325 void TargetLowering::LowerOperationWrapper(SDNode *N,
7326 SmallVectorImpl<SDValue> &Results,
7327 SelectionDAG &DAG) const {
7328 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
7330 Results.push_back(Res);
7333 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
7334 llvm_unreachable("LowerOperation not implemented for this target!");
7338 SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
7339 SDValue Op = getNonRegisterValue(V);
7340 assert((Op.getOpcode() != ISD::CopyFromReg ||
7341 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
7342 "Copy from a reg to the same reg!");
7343 assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
7345 const TargetLowering *TLI = TM.getTargetLowering();
7346 RegsForValue RFV(V->getContext(), *TLI, Reg, V->getType());
7347 SDValue Chain = DAG.getEntryNode();
7348 RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V);
7349 PendingExports.push_back(Chain);
7352 #include "llvm/CodeGen/SelectionDAGISel.h"
7354 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
7355 /// entry block, return true. This includes arguments used by switches, since
7356 /// the switch may expand into multiple basic blocks.
7357 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
7358 // With FastISel active, we may be splitting blocks, so force creation
7359 // of virtual registers for all non-dead arguments.
7361 return A->use_empty();
7363 const BasicBlock *Entry = A->getParent()->begin();
7364 for (const User *U : A->users())
7365 if (cast<Instruction>(U)->getParent() != Entry || isa<SwitchInst>(U))
7366 return false; // Use not in entry block.
7371 void SelectionDAGISel::LowerArguments(const Function &F) {
7372 SelectionDAG &DAG = SDB->DAG;
7373 SDLoc dl = SDB->getCurSDLoc();
7374 const TargetLowering *TLI = getTargetLowering();
7375 const DataLayout *DL = TLI->getDataLayout();
7376 SmallVector<ISD::InputArg, 16> Ins;
7378 if (!FuncInfo->CanLowerReturn) {
7379 // Put in an sret pointer parameter before all the other parameters.
7380 SmallVector<EVT, 1> ValueVTs;
7381 ComputeValueVTs(*getTargetLowering(),
7382 PointerType::getUnqual(F.getReturnType()), ValueVTs);
7384 // NOTE: Assuming that a pointer will never break down to more than one VT
7386 ISD::ArgFlagsTy Flags;
7388 MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
7389 ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true, 0, 0);
7390 Ins.push_back(RetArg);
7393 // Set up the incoming argument description vector.
7395 for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
7396 I != E; ++I, ++Idx) {
7397 SmallVector<EVT, 4> ValueVTs;
7398 ComputeValueVTs(*TLI, I->getType(), ValueVTs);
7399 bool isArgValueUsed = !I->use_empty();
7400 unsigned PartBase = 0;
7401 Type *FinalType = I->getType();
7402 if (F.getAttributes().hasAttribute(Idx, Attribute::ByVal))
7403 FinalType = cast<PointerType>(FinalType)->getElementType();
7404 bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
7405 FinalType, F.getCallingConv(), F.isVarArg());
7406 for (unsigned Value = 0, NumValues = ValueVTs.size();
7407 Value != NumValues; ++Value) {
7408 EVT VT = ValueVTs[Value];
7409 Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
7410 ISD::ArgFlagsTy Flags;
7411 unsigned OriginalAlignment = DL->getABITypeAlignment(ArgTy);
7413 if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt))
7415 if (F.getAttributes().hasAttribute(Idx, Attribute::SExt))
7417 if (F.getAttributes().hasAttribute(Idx, Attribute::InReg))
7419 if (F.getAttributes().hasAttribute(Idx, Attribute::StructRet))
7421 if (F.getAttributes().hasAttribute(Idx, Attribute::ByVal))
7423 if (F.getAttributes().hasAttribute(Idx, Attribute::InAlloca)) {
7424 Flags.setInAlloca();
7425 // Set the byval flag for CCAssignFn callbacks that don't know about
7426 // inalloca. This way we can know how many bytes we should've allocated
7427 // and how many bytes a callee cleanup function will pop. If we port
7428 // inalloca to more targets, we'll have to add custom inalloca handling
7429 // in the various CC lowering callbacks.
7432 if (Flags.isByVal() || Flags.isInAlloca()) {
7433 PointerType *Ty = cast<PointerType>(I->getType());
7434 Type *ElementTy = Ty->getElementType();
7435 Flags.setByValSize(DL->getTypeAllocSize(ElementTy));
7436 // For ByVal, alignment should be passed from FE. BE will guess if
7437 // this info is not there but there are cases it cannot get right.
7438 unsigned FrameAlign;
7439 if (F.getParamAlignment(Idx))
7440 FrameAlign = F.getParamAlignment(Idx);
7442 FrameAlign = TLI->getByValTypeAlignment(ElementTy);
7443 Flags.setByValAlign(FrameAlign);
7445 if (F.getAttributes().hasAttribute(Idx, Attribute::Nest))
7448 Flags.setInConsecutiveRegs();
7449 Flags.setOrigAlign(OriginalAlignment);
7451 MVT RegisterVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
7452 unsigned NumRegs = TLI->getNumRegisters(*CurDAG->getContext(), VT);
7453 for (unsigned i = 0; i != NumRegs; ++i) {
7454 ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed,
7455 Idx-1, PartBase+i*RegisterVT.getStoreSize());
7456 if (NumRegs > 1 && i == 0)
7457 MyFlags.Flags.setSplit();
7458 // if it isn't first piece, alignment must be 1
7460 MyFlags.Flags.setOrigAlign(1);
7462 // Only mark the end at the last register of the last value.
7463 if (NeedsRegBlock && Value == NumValues - 1 && i == NumRegs - 1)
7464 MyFlags.Flags.setInConsecutiveRegsLast();
7466 Ins.push_back(MyFlags);
7468 PartBase += VT.getStoreSize();
7472 // Call the target to set up the argument values.
7473 SmallVector<SDValue, 8> InVals;
7474 SDValue NewRoot = TLI->LowerFormalArguments(DAG.getRoot(), F.getCallingConv(),
7478 // Verify that the target's LowerFormalArguments behaved as expected.
7479 assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
7480 "LowerFormalArguments didn't return a valid chain!");
7481 assert(InVals.size() == Ins.size() &&
7482 "LowerFormalArguments didn't emit the correct number of values!");
7484 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
7485 assert(InVals[i].getNode() &&
7486 "LowerFormalArguments emitted a null value!");
7487 assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
7488 "LowerFormalArguments emitted a value with the wrong type!");
7492 // Update the DAG with the new chain value resulting from argument lowering.
7493 DAG.setRoot(NewRoot);
7495 // Set up the argument values.
7498 if (!FuncInfo->CanLowerReturn) {
7499 // Create a virtual register for the sret pointer, and put in a copy
7500 // from the sret argument into it.
7501 SmallVector<EVT, 1> ValueVTs;
7502 ComputeValueVTs(*TLI, PointerType::getUnqual(F.getReturnType()), ValueVTs);
7503 MVT VT = ValueVTs[0].getSimpleVT();
7504 MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
7505 ISD::NodeType AssertOp = ISD::DELETED_NODE;
7506 SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1,
7507 RegVT, VT, nullptr, AssertOp);
7509 MachineFunction& MF = SDB->DAG.getMachineFunction();
7510 MachineRegisterInfo& RegInfo = MF.getRegInfo();
7511 unsigned SRetReg = RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
7512 FuncInfo->DemoteRegister = SRetReg;
7513 NewRoot = SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(),
7515 DAG.setRoot(NewRoot);
7517 // i indexes lowered arguments. Bump it past the hidden sret argument.
7518 // Idx indexes LLVM arguments. Don't touch it.
7522 for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
7524 SmallVector<SDValue, 4> ArgValues;
7525 SmallVector<EVT, 4> ValueVTs;
7526 ComputeValueVTs(*TLI, I->getType(), ValueVTs);
7527 unsigned NumValues = ValueVTs.size();
7529 // If this argument is unused then remember its value. It is used to generate
7530 // debugging information.
7531 if (I->use_empty() && NumValues) {
7532 SDB->setUnusedArgValue(I, InVals[i]);
7534 // Also remember any frame index for use in FastISel.
7535 if (FrameIndexSDNode *FI =
7536 dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
7537 FuncInfo->setArgumentFrameIndex(I, FI->getIndex());
7540 for (unsigned Val = 0; Val != NumValues; ++Val) {
7541 EVT VT = ValueVTs[Val];
7542 MVT PartVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
7543 unsigned NumParts = TLI->getNumRegisters(*CurDAG->getContext(), VT);
7545 if (!I->use_empty()) {
7546 ISD::NodeType AssertOp = ISD::DELETED_NODE;
7547 if (F.getAttributes().hasAttribute(Idx, Attribute::SExt))
7548 AssertOp = ISD::AssertSext;
7549 else if (F.getAttributes().hasAttribute(Idx, Attribute::ZExt))
7550 AssertOp = ISD::AssertZext;
7552 ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i],
7553 NumParts, PartVT, VT,
7554 nullptr, AssertOp));
7560 // We don't need to do anything else for unused arguments.
7561 if (ArgValues.empty())
7564 // Note down frame index.
7565 if (FrameIndexSDNode *FI =
7566 dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
7567 FuncInfo->setArgumentFrameIndex(I, FI->getIndex());
7569 SDValue Res = DAG.getMergeValues(makeArrayRef(ArgValues.data(), NumValues),
7570 SDB->getCurSDLoc());
7572 SDB->setValue(I, Res);
7573 if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
7574 if (LoadSDNode *LNode =
7575 dyn_cast<LoadSDNode>(Res.getOperand(0).getNode()))
7576 if (FrameIndexSDNode *FI =
7577 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
7578 FuncInfo->setArgumentFrameIndex(I, FI->getIndex());
7581 // If this argument is live outside of the entry block, insert a copy from
7582 // wherever we got it to the vreg that other BB's will reference it as.
7583 if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::CopyFromReg) {
7584 // If we can, though, try to skip creating an unnecessary vreg.
7585 // FIXME: This isn't very clean... it would be nice to make this more
7586 // general. It's also subtly incompatible with the hacks FastISel
7588 unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
7589 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
7590 FuncInfo->ValueMap[I] = Reg;
7594 if (!isOnlyUsedInEntryBlock(I, TM.Options.EnableFastISel)) {
7595 FuncInfo->InitializeRegForValue(I);
7596 SDB->CopyToExportRegsIfNeeded(I);
7600 assert(i == InVals.size() && "Argument register count mismatch!");
7602 // Finally, if the target has anything special to do, allow it to do so.
7603 // FIXME: this should insert code into the DAG!
7604 EmitFunctionEntryCode();
7607 /// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
7608 /// ensure constants are generated when needed. Remember the virtual registers
7609 /// that need to be added to the Machine PHI nodes as input. We cannot just
7610 /// directly add them, because expansion might result in multiple MBB's for one
7611 /// BB. As such, the start of the BB might correspond to a different MBB than
7615 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
7616 const TerminatorInst *TI = LLVMBB->getTerminator();
7618 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
7620 // Check successor nodes' PHI nodes that expect a constant to be available
7622 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
7623 const BasicBlock *SuccBB = TI->getSuccessor(succ);
7624 if (!isa<PHINode>(SuccBB->begin())) continue;
7625 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
7627 // If this terminator has multiple identical successors (common for
7628 // switches), only handle each succ once.
7629 if (!SuccsHandled.insert(SuccMBB)) continue;
7631 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
7633 // At this point we know that there is a 1-1 correspondence between LLVM PHI
7634 // nodes and Machine PHI nodes, but the incoming operands have not been
7636 for (BasicBlock::const_iterator I = SuccBB->begin();
7637 const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
7638 // Ignore dead phi's.
7639 if (PN->use_empty()) continue;
7642 if (PN->getType()->isEmptyTy())
7646 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
7648 if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
7649 unsigned &RegOut = ConstantsOut[C];
7651 RegOut = FuncInfo.CreateRegs(C->getType());
7652 CopyValueToVirtualRegister(C, RegOut);
7656 DenseMap<const Value *, unsigned>::iterator I =
7657 FuncInfo.ValueMap.find(PHIOp);
7658 if (I != FuncInfo.ValueMap.end())
7661 assert(isa<AllocaInst>(PHIOp) &&
7662 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
7663 "Didn't codegen value into a register!??");
7664 Reg = FuncInfo.CreateRegs(PHIOp->getType());
7665 CopyValueToVirtualRegister(PHIOp, Reg);
7669 // Remember that this register needs to added to the machine PHI node as
7670 // the input for this MBB.
7671 SmallVector<EVT, 4> ValueVTs;
7672 const TargetLowering *TLI = TM.getTargetLowering();
7673 ComputeValueVTs(*TLI, PN->getType(), ValueVTs);
7674 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
7675 EVT VT = ValueVTs[vti];
7676 unsigned NumRegisters = TLI->getNumRegisters(*DAG.getContext(), VT);
7677 for (unsigned i = 0, e = NumRegisters; i != e; ++i)
7678 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
7679 Reg += NumRegisters;
7684 ConstantsOut.clear();
7687 /// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB
7690 SelectionDAGBuilder::StackProtectorDescriptor::
7691 AddSuccessorMBB(const BasicBlock *BB,
7692 MachineBasicBlock *ParentMBB,
7693 MachineBasicBlock *SuccMBB) {
7694 // If SuccBB has not been created yet, create it.
7696 MachineFunction *MF = ParentMBB->getParent();
7697 MachineFunction::iterator BBI = ParentMBB;
7698 SuccMBB = MF->CreateMachineBasicBlock(BB);
7699 MF->insert(++BBI, SuccMBB);
7701 // Add it as a successor of ParentMBB.
7702 ParentMBB->addSuccessor(SuccMBB);