1 //===-- SelectionDAGBuild.cpp - Selection-DAG building --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements routines for translating from LLVM IR into SelectionDAG IR.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "isel"
15 #include "SelectionDAGBuild.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Constants.h"
20 #include "llvm/Constants.h"
21 #include "llvm/CallingConv.h"
22 #include "llvm/DerivedTypes.h"
23 #include "llvm/Function.h"
24 #include "llvm/GlobalVariable.h"
25 #include "llvm/InlineAsm.h"
26 #include "llvm/Instructions.h"
27 #include "llvm/Intrinsics.h"
28 #include "llvm/IntrinsicInst.h"
29 #include "llvm/Module.h"
30 #include "llvm/CodeGen/FastISel.h"
31 #include "llvm/CodeGen/GCStrategy.h"
32 #include "llvm/CodeGen/GCMetadata.h"
33 #include "llvm/CodeGen/MachineFunction.h"
34 #include "llvm/CodeGen/MachineFrameInfo.h"
35 #include "llvm/CodeGen/MachineInstrBuilder.h"
36 #include "llvm/CodeGen/MachineJumpTableInfo.h"
37 #include "llvm/CodeGen/MachineModuleInfo.h"
38 #include "llvm/CodeGen/MachineRegisterInfo.h"
39 #include "llvm/CodeGen/PseudoSourceValue.h"
40 #include "llvm/CodeGen/SelectionDAG.h"
41 #include "llvm/CodeGen/DwarfWriter.h"
42 #include "llvm/Analysis/DebugInfo.h"
43 #include "llvm/Target/TargetRegisterInfo.h"
44 #include "llvm/Target/TargetData.h"
45 #include "llvm/Target/TargetFrameInfo.h"
46 #include "llvm/Target/TargetInstrInfo.h"
47 #include "llvm/Target/TargetIntrinsicInfo.h"
48 #include "llvm/Target/TargetLowering.h"
49 #include "llvm/Target/TargetOptions.h"
50 #include "llvm/Support/Compiler.h"
51 #include "llvm/Support/CommandLine.h"
52 #include "llvm/Support/Debug.h"
53 #include "llvm/Support/ErrorHandling.h"
54 #include "llvm/Support/MathExtras.h"
55 #include "llvm/Support/raw_ostream.h"
59 /// LimitFloatPrecision - Generate low-precision inline sequences for
60 /// some float libcalls (6, 8 or 12 bits).
61 static unsigned LimitFloatPrecision;
63 static cl::opt<unsigned, true>
64 LimitFPPrecision("limit-float-precision",
65 cl::desc("Generate low-precision inline sequences "
66 "for some float libcalls"),
67 cl::location(LimitFloatPrecision),
70 /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
71 /// of insertvalue or extractvalue indices that identify a member, return
72 /// the linearized index of the start of the member.
74 static unsigned ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
75 const unsigned *Indices,
76 const unsigned *IndicesEnd,
77 unsigned CurIndex = 0) {
78 // Base case: We're done.
79 if (Indices && Indices == IndicesEnd)
82 // Given a struct type, recursively traverse the elements.
83 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
84 for (StructType::element_iterator EB = STy->element_begin(),
86 EE = STy->element_end();
88 if (Indices && *Indices == unsigned(EI - EB))
89 return ComputeLinearIndex(TLI, *EI, Indices+1, IndicesEnd, CurIndex);
90 CurIndex = ComputeLinearIndex(TLI, *EI, 0, 0, CurIndex);
94 // Given an array type, recursively traverse the elements.
95 else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
96 const Type *EltTy = ATy->getElementType();
97 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
98 if (Indices && *Indices == i)
99 return ComputeLinearIndex(TLI, EltTy, Indices+1, IndicesEnd, CurIndex);
100 CurIndex = ComputeLinearIndex(TLI, EltTy, 0, 0, CurIndex);
104 // We haven't found the type we're looking for, so keep searching.
108 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
109 /// EVTs that represent all the individual underlying
110 /// non-aggregate types that comprise it.
112 /// If Offsets is non-null, it points to a vector to be filled in
113 /// with the in-memory offsets of each of the individual values.
115 static void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
116 SmallVectorImpl<EVT> &ValueVTs,
117 SmallVectorImpl<uint64_t> *Offsets = 0,
118 uint64_t StartingOffset = 0) {
119 // Given a struct type, recursively traverse the elements.
120 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
121 const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
122 for (StructType::element_iterator EB = STy->element_begin(),
124 EE = STy->element_end();
126 ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
127 StartingOffset + SL->getElementOffset(EI - EB));
130 // Given an array type, recursively traverse the elements.
131 if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
132 const Type *EltTy = ATy->getElementType();
133 uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
134 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
135 ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
136 StartingOffset + i * EltSize);
139 // Interpret void as zero return values.
140 if (Ty == Type::getVoidTy(Ty->getContext()))
142 // Base case: we can get an EVT for this LLVM IR type.
143 ValueVTs.push_back(TLI.getValueType(Ty));
145 Offsets->push_back(StartingOffset);
149 /// RegsForValue - This struct represents the registers (physical or virtual)
150 /// that a particular set of values is assigned, and the type information about
151 /// the value. The most common situation is to represent one value at a time,
152 /// but struct or array values are handled element-wise as multiple values.
153 /// The splitting of aggregates is performed recursively, so that we never
154 /// have aggregate-typed registers. The values at this point do not necessarily
155 /// have legal types, so each value may require one or more registers of some
158 struct VISIBILITY_HIDDEN RegsForValue {
159 /// TLI - The TargetLowering object.
161 const TargetLowering *TLI;
163 /// ValueVTs - The value types of the values, which may not be legal, and
164 /// may need be promoted or synthesized from one or more registers.
166 SmallVector<EVT, 4> ValueVTs;
168 /// RegVTs - The value types of the registers. This is the same size as
169 /// ValueVTs and it records, for each value, what the type of the assigned
170 /// register or registers are. (Individual values are never synthesized
171 /// from more than one type of register.)
173 /// With virtual registers, the contents of RegVTs is redundant with TLI's
174 /// getRegisterType member function, however when with physical registers
175 /// it is necessary to have a separate record of the types.
177 SmallVector<EVT, 4> RegVTs;
179 /// Regs - This list holds the registers assigned to the values.
180 /// Each legal or promoted value requires one register, and each
181 /// expanded value requires multiple registers.
183 SmallVector<unsigned, 4> Regs;
185 RegsForValue() : TLI(0) {}
187 RegsForValue(const TargetLowering &tli,
188 const SmallVector<unsigned, 4> ®s,
189 EVT regvt, EVT valuevt)
190 : TLI(&tli), ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
191 RegsForValue(const TargetLowering &tli,
192 const SmallVector<unsigned, 4> ®s,
193 const SmallVector<EVT, 4> ®vts,
194 const SmallVector<EVT, 4> &valuevts)
195 : TLI(&tli), ValueVTs(valuevts), RegVTs(regvts), Regs(regs) {}
196 RegsForValue(LLVMContext &Context, const TargetLowering &tli,
197 unsigned Reg, const Type *Ty) : TLI(&tli) {
198 ComputeValueVTs(tli, Ty, ValueVTs);
200 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
201 EVT ValueVT = ValueVTs[Value];
202 unsigned NumRegs = TLI->getNumRegisters(Context, ValueVT);
203 EVT RegisterVT = TLI->getRegisterType(Context, ValueVT);
204 for (unsigned i = 0; i != NumRegs; ++i)
205 Regs.push_back(Reg + i);
206 RegVTs.push_back(RegisterVT);
211 /// append - Add the specified values to this one.
212 void append(const RegsForValue &RHS) {
214 ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
215 RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
216 Regs.append(RHS.Regs.begin(), RHS.Regs.end());
220 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
221 /// this value and returns the result as a ValueVTs value. This uses
222 /// Chain/Flag as the input and updates them for the output Chain/Flag.
223 /// If the Flag pointer is NULL, no flag is used.
224 SDValue getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
225 SDValue &Chain, SDValue *Flag) const;
227 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
228 /// specified value into the registers specified by this object. This uses
229 /// Chain/Flag as the input and updates them for the output Chain/Flag.
230 /// If the Flag pointer is NULL, no flag is used.
231 void getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
232 SDValue &Chain, SDValue *Flag) const;
234 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
235 /// operand list. This adds the code marker, matching input operand index
236 /// (if applicable), and includes the number of values added into it.
237 void AddInlineAsmOperands(unsigned Code,
238 bool HasMatching, unsigned MatchingIdx,
239 SelectionDAG &DAG, std::vector<SDValue> &Ops) const;
243 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
244 /// PHI nodes or outside of the basic block that defines it, or used by a
245 /// switch or atomic instruction, which may expand to multiple basic blocks.
246 static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
247 if (isa<PHINode>(I)) return true;
248 BasicBlock *BB = I->getParent();
249 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
250 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI))
255 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
256 /// entry block, return true. This includes arguments used by switches, since
257 /// the switch may expand into multiple basic blocks.
258 static bool isOnlyUsedInEntryBlock(Argument *A, bool EnableFastISel) {
259 // With FastISel active, we may be splitting blocks, so force creation
260 // of virtual registers for all non-dead arguments.
261 // Don't force virtual registers for byval arguments though, because
262 // fast-isel can't handle those in all cases.
263 if (EnableFastISel && !A->hasByValAttr())
264 return A->use_empty();
266 BasicBlock *Entry = A->getParent()->begin();
267 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
268 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI))
269 return false; // Use not in entry block.
273 FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli)
277 void FunctionLoweringInfo::set(Function &fn, MachineFunction &mf,
279 bool EnableFastISel) {
282 RegInfo = &MF->getRegInfo();
284 // Create a vreg for each argument register that is not dead and is used
285 // outside of the entry block for the function.
286 for (Function::arg_iterator AI = Fn->arg_begin(), E = Fn->arg_end();
288 if (!isOnlyUsedInEntryBlock(AI, EnableFastISel))
289 InitializeRegForValue(AI);
291 // Initialize the mapping of values to registers. This is only set up for
292 // instruction values that are used outside of the block that defines
294 Function::iterator BB = Fn->begin(), EB = Fn->end();
295 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
296 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
297 if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
298 const Type *Ty = AI->getAllocatedType();
299 uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
301 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
304 TySize *= CUI->getZExtValue(); // Get total allocated size.
305 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
306 StaticAllocaMap[AI] =
307 MF->getFrameInfo()->CreateStackObject(TySize, Align);
310 for (; BB != EB; ++BB)
311 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
312 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
313 if (!isa<AllocaInst>(I) ||
314 !StaticAllocaMap.count(cast<AllocaInst>(I)))
315 InitializeRegForValue(I);
317 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
318 // also creates the initial PHI MachineInstrs, though none of the input
319 // operands are populated.
320 for (BB = Fn->begin(), EB = Fn->end(); BB != EB; ++BB) {
321 MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(BB);
325 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
329 for (BasicBlock::iterator
330 I = BB->begin(), E = BB->end(); I != E; ++I) {
331 if (CallInst *CI = dyn_cast<CallInst>(I)) {
332 if (Function *F = CI->getCalledFunction()) {
333 switch (F->getIntrinsicID()) {
335 case Intrinsic::dbg_stoppoint: {
336 DbgStopPointInst *SPI = cast<DbgStopPointInst>(I);
337 if (isValidDebugInfoIntrinsic(*SPI, CodeGenOpt::Default))
338 DL = ExtractDebugLocation(*SPI, MF->getDebugLocInfo());
341 case Intrinsic::dbg_func_start: {
342 DbgFuncStartInst *FSI = cast<DbgFuncStartInst>(I);
343 if (isValidDebugInfoIntrinsic(*FSI, CodeGenOpt::Default))
344 DL = ExtractDebugLocation(*FSI, MF->getDebugLocInfo());
351 PN = dyn_cast<PHINode>(I);
352 if (!PN || PN->use_empty()) continue;
354 unsigned PHIReg = ValueMap[PN];
355 assert(PHIReg && "PHI node does not have an assigned virtual register!");
357 SmallVector<EVT, 4> ValueVTs;
358 ComputeValueVTs(TLI, PN->getType(), ValueVTs);
359 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
360 EVT VT = ValueVTs[vti];
361 unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
362 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
363 for (unsigned i = 0; i != NumRegisters; ++i)
364 BuildMI(MBB, DL, TII->get(TargetInstrInfo::PHI), PHIReg + i);
365 PHIReg += NumRegisters;
371 unsigned FunctionLoweringInfo::MakeReg(EVT VT) {
372 return RegInfo->createVirtualRegister(TLI.getRegClassFor(VT));
375 /// CreateRegForValue - Allocate the appropriate number of virtual registers of
376 /// the correctly promoted or expanded types. Assign these registers
377 /// consecutive vreg numbers and return the first assigned number.
379 /// In the case that the given value has struct or array type, this function
380 /// will assign registers for each member or element.
382 unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
383 SmallVector<EVT, 4> ValueVTs;
384 ComputeValueVTs(TLI, V->getType(), ValueVTs);
386 unsigned FirstReg = 0;
387 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
388 EVT ValueVT = ValueVTs[Value];
389 EVT RegisterVT = TLI.getRegisterType(V->getContext(), ValueVT);
391 unsigned NumRegs = TLI.getNumRegisters(V->getContext(), ValueVT);
392 for (unsigned i = 0; i != NumRegs; ++i) {
393 unsigned R = MakeReg(RegisterVT);
394 if (!FirstReg) FirstReg = R;
400 /// getCopyFromParts - Create a value that contains the specified legal parts
401 /// combined into the value they represent. If the parts combine to a type
402 /// larger then ValueVT then AssertOp can be used to specify whether the extra
403 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
404 /// (ISD::AssertSext).
405 static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
406 const SDValue *Parts,
407 unsigned NumParts, EVT PartVT, EVT ValueVT,
408 ISD::NodeType AssertOp = ISD::DELETED_NODE) {
409 assert(NumParts > 0 && "No parts to assemble!");
410 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
411 SDValue Val = Parts[0];
414 // Assemble the value from multiple parts.
415 if (!ValueVT.isVector() && ValueVT.isInteger()) {
416 unsigned PartBits = PartVT.getSizeInBits();
417 unsigned ValueBits = ValueVT.getSizeInBits();
419 // Assemble the power of 2 part.
420 unsigned RoundParts = NumParts & (NumParts - 1) ?
421 1 << Log2_32(NumParts) : NumParts;
422 unsigned RoundBits = PartBits * RoundParts;
423 EVT RoundVT = RoundBits == ValueBits ?
424 ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
427 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
429 if (RoundParts > 2) {
430 Lo = getCopyFromParts(DAG, dl, Parts, RoundParts/2, PartVT, HalfVT);
431 Hi = getCopyFromParts(DAG, dl, Parts+RoundParts/2, RoundParts/2,
434 Lo = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[0]);
435 Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[1]);
437 if (TLI.isBigEndian())
439 Val = DAG.getNode(ISD::BUILD_PAIR, dl, RoundVT, Lo, Hi);
441 if (RoundParts < NumParts) {
442 // Assemble the trailing non-power-of-2 part.
443 unsigned OddParts = NumParts - RoundParts;
444 EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
445 Hi = getCopyFromParts(DAG, dl,
446 Parts+RoundParts, OddParts, PartVT, OddVT);
448 // Combine the round and odd parts.
450 if (TLI.isBigEndian())
452 EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
453 Hi = DAG.getNode(ISD::ANY_EXTEND, dl, TotalVT, Hi);
454 Hi = DAG.getNode(ISD::SHL, dl, TotalVT, Hi,
455 DAG.getConstant(Lo.getValueType().getSizeInBits(),
456 TLI.getPointerTy()));
457 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, TotalVT, Lo);
458 Val = DAG.getNode(ISD::OR, dl, TotalVT, Lo, Hi);
460 } else if (ValueVT.isVector()) {
461 // Handle a multi-element vector.
462 EVT IntermediateVT, RegisterVT;
463 unsigned NumIntermediates;
465 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
466 NumIntermediates, RegisterVT);
467 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
468 NumParts = NumRegs; // Silence a compiler warning.
469 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
470 assert(RegisterVT == Parts[0].getValueType() &&
471 "Part type doesn't match part!");
473 // Assemble the parts into intermediate operands.
474 SmallVector<SDValue, 8> Ops(NumIntermediates);
475 if (NumIntermediates == NumParts) {
476 // If the register was not expanded, truncate or copy the value,
478 for (unsigned i = 0; i != NumParts; ++i)
479 Ops[i] = getCopyFromParts(DAG, dl, &Parts[i], 1,
480 PartVT, IntermediateVT);
481 } else if (NumParts > 0) {
482 // If the intermediate type was expanded, build the intermediate operands
484 assert(NumParts % NumIntermediates == 0 &&
485 "Must expand into a divisible number of parts!");
486 unsigned Factor = NumParts / NumIntermediates;
487 for (unsigned i = 0; i != NumIntermediates; ++i)
488 Ops[i] = getCopyFromParts(DAG, dl, &Parts[i * Factor], Factor,
489 PartVT, IntermediateVT);
492 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the intermediate
494 Val = DAG.getNode(IntermediateVT.isVector() ?
495 ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, dl,
496 ValueVT, &Ops[0], NumIntermediates);
497 } else if (PartVT.isFloatingPoint()) {
498 // FP split into multiple FP parts (for ppcf128)
499 assert(ValueVT == EVT(MVT::ppcf128) && PartVT == EVT(MVT::f64) &&
502 Lo = DAG.getNode(ISD::BIT_CONVERT, dl, EVT(MVT::f64), Parts[0]);
503 Hi = DAG.getNode(ISD::BIT_CONVERT, dl, EVT(MVT::f64), Parts[1]);
504 if (TLI.isBigEndian())
506 Val = DAG.getNode(ISD::BUILD_PAIR, dl, ValueVT, Lo, Hi);
508 // FP split into integer parts (soft fp)
509 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
510 !PartVT.isVector() && "Unexpected split");
511 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
512 Val = getCopyFromParts(DAG, dl, Parts, NumParts, PartVT, IntVT);
516 // There is now one part, held in Val. Correct it to match ValueVT.
517 PartVT = Val.getValueType();
519 if (PartVT == ValueVT)
522 if (PartVT.isVector()) {
523 assert(ValueVT.isVector() && "Unknown vector conversion!");
524 return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
527 if (ValueVT.isVector()) {
528 assert(ValueVT.getVectorElementType() == PartVT &&
529 ValueVT.getVectorNumElements() == 1 &&
530 "Only trivial scalar-to-vector conversions should get here!");
531 return DAG.getNode(ISD::BUILD_VECTOR, dl, ValueVT, Val);
534 if (PartVT.isInteger() &&
535 ValueVT.isInteger()) {
536 if (ValueVT.bitsLT(PartVT)) {
537 // For a truncate, see if we have any information to
538 // indicate whether the truncated bits will always be
539 // zero or sign-extension.
540 if (AssertOp != ISD::DELETED_NODE)
541 Val = DAG.getNode(AssertOp, dl, PartVT, Val,
542 DAG.getValueType(ValueVT));
543 return DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
545 return DAG.getNode(ISD::ANY_EXTEND, dl, ValueVT, Val);
549 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
550 if (ValueVT.bitsLT(Val.getValueType()))
551 // FP_ROUND's are always exact here.
552 return DAG.getNode(ISD::FP_ROUND, dl, ValueVT, Val,
553 DAG.getIntPtrConstant(1));
554 return DAG.getNode(ISD::FP_EXTEND, dl, ValueVT, Val);
557 if (PartVT.getSizeInBits() == ValueVT.getSizeInBits())
558 return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
560 llvm_unreachable("Unknown mismatch!");
564 /// getCopyToParts - Create a series of nodes that contain the specified value
565 /// split into legal parts. If the parts contain more bits than Val, then, for
566 /// integers, ExtendKind can be used to specify how to generate the extra bits.
567 static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val,
568 SDValue *Parts, unsigned NumParts, EVT PartVT,
569 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
570 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
571 EVT PtrVT = TLI.getPointerTy();
572 EVT ValueVT = Val.getValueType();
573 unsigned PartBits = PartVT.getSizeInBits();
574 unsigned OrigNumParts = NumParts;
575 assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
580 if (!ValueVT.isVector()) {
581 if (PartVT == ValueVT) {
582 assert(NumParts == 1 && "No-op copy with multiple parts!");
587 if (NumParts * PartBits > ValueVT.getSizeInBits()) {
588 // If the parts cover more bits than the value has, promote the value.
589 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
590 assert(NumParts == 1 && "Do not know what to promote to!");
591 Val = DAG.getNode(ISD::FP_EXTEND, dl, PartVT, Val);
592 } else if (PartVT.isInteger() && ValueVT.isInteger()) {
593 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
594 Val = DAG.getNode(ExtendKind, dl, ValueVT, Val);
596 llvm_unreachable("Unknown mismatch!");
598 } else if (PartBits == ValueVT.getSizeInBits()) {
599 // Different types of the same size.
600 assert(NumParts == 1 && PartVT != ValueVT);
601 Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
602 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
603 // If the parts cover less bits than value has, truncate the value.
604 if (PartVT.isInteger() && ValueVT.isInteger()) {
605 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
606 Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
608 llvm_unreachable("Unknown mismatch!");
612 // The value may have changed - recompute ValueVT.
613 ValueVT = Val.getValueType();
614 assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
615 "Failed to tile the value with PartVT!");
618 assert(PartVT == ValueVT && "Type conversion failed!");
623 // Expand the value into multiple parts.
624 if (NumParts & (NumParts - 1)) {
625 // The number of parts is not a power of 2. Split off and copy the tail.
626 assert(PartVT.isInteger() && ValueVT.isInteger() &&
627 "Do not know what to expand to!");
628 unsigned RoundParts = 1 << Log2_32(NumParts);
629 unsigned RoundBits = RoundParts * PartBits;
630 unsigned OddParts = NumParts - RoundParts;
631 SDValue OddVal = DAG.getNode(ISD::SRL, dl, ValueVT, Val,
632 DAG.getConstant(RoundBits,
633 TLI.getPointerTy()));
634 getCopyToParts(DAG, dl, OddVal, Parts + RoundParts, OddParts, PartVT);
635 if (TLI.isBigEndian())
636 // The odd parts were reversed by getCopyToParts - unreverse them.
637 std::reverse(Parts + RoundParts, Parts + NumParts);
638 NumParts = RoundParts;
639 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
640 Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
643 // The number of parts is a power of 2. Repeatedly bisect the value using
645 Parts[0] = DAG.getNode(ISD::BIT_CONVERT, dl,
646 EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()),
648 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
649 for (unsigned i = 0; i < NumParts; i += StepSize) {
650 unsigned ThisBits = StepSize * PartBits / 2;
651 EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
652 SDValue &Part0 = Parts[i];
653 SDValue &Part1 = Parts[i+StepSize/2];
655 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
657 DAG.getConstant(1, PtrVT));
658 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
660 DAG.getConstant(0, PtrVT));
662 if (ThisBits == PartBits && ThisVT != PartVT) {
663 Part0 = DAG.getNode(ISD::BIT_CONVERT, dl,
665 Part1 = DAG.getNode(ISD::BIT_CONVERT, dl,
671 if (TLI.isBigEndian())
672 std::reverse(Parts, Parts + OrigNumParts);
679 if (PartVT != ValueVT) {
680 if (PartVT.isVector()) {
681 Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
683 assert(ValueVT.getVectorElementType() == PartVT &&
684 ValueVT.getVectorNumElements() == 1 &&
685 "Only trivial vector-to-scalar conversions should get here!");
686 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
688 DAG.getConstant(0, PtrVT));
696 // Handle a multi-element vector.
697 EVT IntermediateVT, RegisterVT;
698 unsigned NumIntermediates;
699 unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
700 IntermediateVT, NumIntermediates, RegisterVT);
701 unsigned NumElements = ValueVT.getVectorNumElements();
703 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
704 NumParts = NumRegs; // Silence a compiler warning.
705 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
707 // Split the vector into intermediate operands.
708 SmallVector<SDValue, 8> Ops(NumIntermediates);
709 for (unsigned i = 0; i != NumIntermediates; ++i)
710 if (IntermediateVT.isVector())
711 Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
713 DAG.getConstant(i * (NumElements / NumIntermediates),
716 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
718 DAG.getConstant(i, PtrVT));
720 // Split the intermediate operands into legal parts.
721 if (NumParts == NumIntermediates) {
722 // If the register was not expanded, promote or copy the value,
724 for (unsigned i = 0; i != NumParts; ++i)
725 getCopyToParts(DAG, dl, Ops[i], &Parts[i], 1, PartVT);
726 } else if (NumParts > 0) {
727 // If the intermediate type was expanded, split each the value into
729 assert(NumParts % NumIntermediates == 0 &&
730 "Must expand into a divisible number of parts!");
731 unsigned Factor = NumParts / NumIntermediates;
732 for (unsigned i = 0; i != NumIntermediates; ++i)
733 getCopyToParts(DAG, dl, Ops[i], &Parts[i * Factor], Factor, PartVT);
738 void SelectionDAGLowering::init(GCFunctionInfo *gfi, AliasAnalysis &aa) {
741 TD = DAG.getTarget().getTargetData();
744 /// clear - Clear out the curret SelectionDAG and the associated
745 /// state and prepare this SelectionDAGLowering object to be used
746 /// for a new block. This doesn't clear out information about
747 /// additional blocks that are needed to complete switch lowering
748 /// or PHI node updating; that information is cleared out as it is
750 void SelectionDAGLowering::clear() {
752 PendingLoads.clear();
753 PendingExports.clear();
755 CurDebugLoc = DebugLoc::getUnknownLoc();
759 /// getRoot - Return the current virtual root of the Selection DAG,
760 /// flushing any PendingLoad items. This must be done before emitting
761 /// a store or any other node that may need to be ordered after any
762 /// prior load instructions.
764 SDValue SelectionDAGLowering::getRoot() {
765 if (PendingLoads.empty())
766 return DAG.getRoot();
768 if (PendingLoads.size() == 1) {
769 SDValue Root = PendingLoads[0];
771 PendingLoads.clear();
775 // Otherwise, we have to make a token factor node.
776 SDValue Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
777 &PendingLoads[0], PendingLoads.size());
778 PendingLoads.clear();
783 /// getControlRoot - Similar to getRoot, but instead of flushing all the
784 /// PendingLoad items, flush all the PendingExports items. It is necessary
785 /// to do this before emitting a terminator instruction.
787 SDValue SelectionDAGLowering::getControlRoot() {
788 SDValue Root = DAG.getRoot();
790 if (PendingExports.empty())
793 // Turn all of the CopyToReg chains into one factored node.
794 if (Root.getOpcode() != ISD::EntryToken) {
795 unsigned i = 0, e = PendingExports.size();
796 for (; i != e; ++i) {
797 assert(PendingExports[i].getNode()->getNumOperands() > 1);
798 if (PendingExports[i].getNode()->getOperand(0) == Root)
799 break; // Don't add the root if we already indirectly depend on it.
803 PendingExports.push_back(Root);
806 Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
808 PendingExports.size());
809 PendingExports.clear();
814 void SelectionDAGLowering::visit(Instruction &I) {
815 visit(I.getOpcode(), I);
818 void SelectionDAGLowering::visit(unsigned Opcode, User &I) {
819 // Note: this doesn't use InstVisitor, because it has to work with
820 // ConstantExpr's in addition to instructions.
822 default: llvm_unreachable("Unknown instruction type encountered!");
823 // Build the switch statement using the Instruction.def file.
824 #define HANDLE_INST(NUM, OPCODE, CLASS) \
825 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
826 #include "llvm/Instruction.def"
830 SDValue SelectionDAGLowering::getValue(const Value *V) {
831 SDValue &N = NodeMap[V];
832 if (N.getNode()) return N;
834 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
835 EVT VT = TLI.getValueType(V->getType(), true);
837 if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
838 return N = DAG.getConstant(*CI, VT);
840 if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
841 return N = DAG.getGlobalAddress(GV, VT);
843 if (isa<ConstantPointerNull>(C))
844 return N = DAG.getConstant(0, TLI.getPointerTy());
846 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C))
847 return N = DAG.getConstantFP(*CFP, VT);
849 if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
850 return N = DAG.getUNDEF(VT);
852 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
853 visit(CE->getOpcode(), *CE);
854 SDValue N1 = NodeMap[V];
855 assert(N1.getNode() && "visit didn't populate the ValueMap!");
859 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
860 SmallVector<SDValue, 4> Constants;
861 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
863 SDNode *Val = getValue(*OI).getNode();
864 // If the operand is an empty aggregate, there are no values.
866 // Add each leaf value from the operand to the Constants list
867 // to form a flattened list of all the values.
868 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
869 Constants.push_back(SDValue(Val, i));
871 return DAG.getMergeValues(&Constants[0], Constants.size(),
875 if (isa<StructType>(C->getType()) || isa<ArrayType>(C->getType())) {
876 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
877 "Unknown struct or array constant!");
879 SmallVector<EVT, 4> ValueVTs;
880 ComputeValueVTs(TLI, C->getType(), ValueVTs);
881 unsigned NumElts = ValueVTs.size();
883 return SDValue(); // empty struct
884 SmallVector<SDValue, 4> Constants(NumElts);
885 for (unsigned i = 0; i != NumElts; ++i) {
886 EVT EltVT = ValueVTs[i];
887 if (isa<UndefValue>(C))
888 Constants[i] = DAG.getUNDEF(EltVT);
889 else if (EltVT.isFloatingPoint())
890 Constants[i] = DAG.getConstantFP(0, EltVT);
892 Constants[i] = DAG.getConstant(0, EltVT);
894 return DAG.getMergeValues(&Constants[0], NumElts, getCurDebugLoc());
897 const VectorType *VecTy = cast<VectorType>(V->getType());
898 unsigned NumElements = VecTy->getNumElements();
900 // Now that we know the number and type of the elements, get that number of
901 // elements into the Ops array based on what kind of constant it is.
902 SmallVector<SDValue, 16> Ops;
903 if (ConstantVector *CP = dyn_cast<ConstantVector>(C)) {
904 for (unsigned i = 0; i != NumElements; ++i)
905 Ops.push_back(getValue(CP->getOperand(i)));
907 assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
908 EVT EltVT = TLI.getValueType(VecTy->getElementType());
911 if (EltVT.isFloatingPoint())
912 Op = DAG.getConstantFP(0, EltVT);
914 Op = DAG.getConstant(0, EltVT);
915 Ops.assign(NumElements, Op);
918 // Create a BUILD_VECTOR node.
919 return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
920 VT, &Ops[0], Ops.size());
923 // If this is a static alloca, generate it as the frameindex instead of
925 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
926 DenseMap<const AllocaInst*, int>::iterator SI =
927 FuncInfo.StaticAllocaMap.find(AI);
928 if (SI != FuncInfo.StaticAllocaMap.end())
929 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
932 unsigned InReg = FuncInfo.ValueMap[V];
933 assert(InReg && "Value not in map!");
935 RegsForValue RFV(*DAG.getContext(), TLI, InReg, V->getType());
936 SDValue Chain = DAG.getEntryNode();
937 return RFV.getCopyFromRegs(DAG, getCurDebugLoc(), Chain, NULL);
941 void SelectionDAGLowering::visitRet(ReturnInst &I) {
942 SDValue Chain = getControlRoot();
943 SmallVector<ISD::OutputArg, 8> Outs;
944 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
945 SmallVector<EVT, 4> ValueVTs;
946 ComputeValueVTs(TLI, I.getOperand(i)->getType(), ValueVTs);
947 unsigned NumValues = ValueVTs.size();
948 if (NumValues == 0) continue;
950 SDValue RetOp = getValue(I.getOperand(i));
951 for (unsigned j = 0, f = NumValues; j != f; ++j) {
952 EVT VT = ValueVTs[j];
954 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
956 const Function *F = I.getParent()->getParent();
957 if (F->paramHasAttr(0, Attribute::SExt))
958 ExtendKind = ISD::SIGN_EXTEND;
959 else if (F->paramHasAttr(0, Attribute::ZExt))
960 ExtendKind = ISD::ZERO_EXTEND;
962 // FIXME: C calling convention requires the return type to be promoted to
963 // at least 32-bit. But this is not necessary for non-C calling
964 // conventions. The frontend should mark functions whose return values
965 // require promoting with signext or zeroext attributes.
966 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
967 EVT MinVT = TLI.getRegisterType(*DAG.getContext(), MVT::i32);
968 if (VT.bitsLT(MinVT))
972 unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), VT);
973 EVT PartVT = TLI.getRegisterType(*DAG.getContext(), VT);
974 SmallVector<SDValue, 4> Parts(NumParts);
975 getCopyToParts(DAG, getCurDebugLoc(),
976 SDValue(RetOp.getNode(), RetOp.getResNo() + j),
977 &Parts[0], NumParts, PartVT, ExtendKind);
979 // 'inreg' on function refers to return value
980 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
981 if (F->paramHasAttr(0, Attribute::InReg))
984 // Propagate extension type if any
985 if (F->paramHasAttr(0, Attribute::SExt))
987 else if (F->paramHasAttr(0, Attribute::ZExt))
990 for (unsigned i = 0; i < NumParts; ++i)
991 Outs.push_back(ISD::OutputArg(Flags, Parts[i], /*isfixed=*/true));
995 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
996 CallingConv::ID CallConv =
997 DAG.getMachineFunction().getFunction()->getCallingConv();
998 Chain = TLI.LowerReturn(Chain, CallConv, isVarArg,
999 Outs, getCurDebugLoc(), DAG);
1001 // Verify that the target's LowerReturn behaved as expected.
1002 assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
1003 "LowerReturn didn't return a valid chain!");
1005 // Update the DAG with the new chain value resulting from return lowering.
1009 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
1010 /// created for it, emit nodes to copy the value into the virtual
1012 void SelectionDAGLowering::CopyToExportRegsIfNeeded(Value *V) {
1013 if (!V->use_empty()) {
1014 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
1015 if (VMI != FuncInfo.ValueMap.end())
1016 CopyValueToVirtualRegister(V, VMI->second);
1020 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
1021 /// the current basic block, add it to ValueMap now so that we'll get a
1023 void SelectionDAGLowering::ExportFromCurrentBlock(Value *V) {
1024 // No need to export constants.
1025 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1027 // Already exported?
1028 if (FuncInfo.isExportedInst(V)) return;
1030 unsigned Reg = FuncInfo.InitializeRegForValue(V);
1031 CopyValueToVirtualRegister(V, Reg);
1034 bool SelectionDAGLowering::isExportableFromCurrentBlock(Value *V,
1035 const BasicBlock *FromBB) {
1036 // The operands of the setcc have to be in this block. We don't know
1037 // how to export them from some other block.
1038 if (Instruction *VI = dyn_cast<Instruction>(V)) {
1039 // Can export from current BB.
1040 if (VI->getParent() == FromBB)
1043 // Is already exported, noop.
1044 return FuncInfo.isExportedInst(V);
1047 // If this is an argument, we can export it if the BB is the entry block or
1048 // if it is already exported.
1049 if (isa<Argument>(V)) {
1050 if (FromBB == &FromBB->getParent()->getEntryBlock())
1053 // Otherwise, can only export this if it is already exported.
1054 return FuncInfo.isExportedInst(V);
1057 // Otherwise, constants can always be exported.
1061 static bool InBlock(const Value *V, const BasicBlock *BB) {
1062 if (const Instruction *I = dyn_cast<Instruction>(V))
1063 return I->getParent() == BB;
1067 /// getFCmpCondCode - Return the ISD condition code corresponding to
1068 /// the given LLVM IR floating-point condition code. This includes
1069 /// consideration of global floating-point math flags.
1071 static ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred) {
1072 ISD::CondCode FPC, FOC;
1074 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
1075 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
1076 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
1077 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
1078 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
1079 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
1080 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break;
1081 case FCmpInst::FCMP_ORD: FOC = FPC = ISD::SETO; break;
1082 case FCmpInst::FCMP_UNO: FOC = FPC = ISD::SETUO; break;
1083 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
1084 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
1085 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
1086 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break;
1087 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break;
1088 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
1089 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break;
1091 llvm_unreachable("Invalid FCmp predicate opcode!");
1092 FOC = FPC = ISD::SETFALSE;
1095 if (FiniteOnlyFPMath())
1101 /// getICmpCondCode - Return the ISD condition code corresponding to
1102 /// the given LLVM IR integer condition code.
1104 static ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred) {
1106 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
1107 case ICmpInst::ICMP_NE: return ISD::SETNE;
1108 case ICmpInst::ICMP_SLE: return ISD::SETLE;
1109 case ICmpInst::ICMP_ULE: return ISD::SETULE;
1110 case ICmpInst::ICMP_SGE: return ISD::SETGE;
1111 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
1112 case ICmpInst::ICMP_SLT: return ISD::SETLT;
1113 case ICmpInst::ICMP_ULT: return ISD::SETULT;
1114 case ICmpInst::ICMP_SGT: return ISD::SETGT;
1115 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
1117 llvm_unreachable("Invalid ICmp predicate opcode!");
1122 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1123 /// This function emits a branch and is used at the leaves of an OR or an
1124 /// AND operator tree.
1127 SelectionDAGLowering::EmitBranchForMergedCondition(Value *Cond,
1128 MachineBasicBlock *TBB,
1129 MachineBasicBlock *FBB,
1130 MachineBasicBlock *CurBB) {
1131 const BasicBlock *BB = CurBB->getBasicBlock();
1133 // If the leaf of the tree is a comparison, merge the condition into
1135 if (CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1136 // The operands of the cmp have to be in this block. We don't know
1137 // how to export them from some other block. If this is the first block
1138 // of the sequence, no exporting is needed.
1139 if (CurBB == CurMBB ||
1140 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1141 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1142 ISD::CondCode Condition;
1143 if (ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1144 Condition = getICmpCondCode(IC->getPredicate());
1145 } else if (FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
1146 Condition = getFCmpCondCode(FC->getPredicate());
1148 Condition = ISD::SETEQ; // silence warning.
1149 llvm_unreachable("Unknown compare instruction");
1152 CaseBlock CB(Condition, BOp->getOperand(0),
1153 BOp->getOperand(1), NULL, TBB, FBB, CurBB);
1154 SwitchCases.push_back(CB);
1159 // Create a CaseBlock record representing this branch.
1160 CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(*DAG.getContext()),
1161 NULL, TBB, FBB, CurBB);
1162 SwitchCases.push_back(CB);
1165 /// FindMergedConditions - If Cond is an expression like
1166 void SelectionDAGLowering::FindMergedConditions(Value *Cond,
1167 MachineBasicBlock *TBB,
1168 MachineBasicBlock *FBB,
1169 MachineBasicBlock *CurBB,
1171 // If this node is not part of the or/and tree, emit it as a branch.
1172 Instruction *BOp = dyn_cast<Instruction>(Cond);
1173 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1174 (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
1175 BOp->getParent() != CurBB->getBasicBlock() ||
1176 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1177 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1178 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB);
1182 // Create TmpBB after CurBB.
1183 MachineFunction::iterator BBI = CurBB;
1184 MachineFunction &MF = DAG.getMachineFunction();
1185 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1186 CurBB->getParent()->insert(++BBI, TmpBB);
1188 if (Opc == Instruction::Or) {
1189 // Codegen X | Y as:
1197 // Emit the LHS condition.
1198 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, Opc);
1200 // Emit the RHS condition into TmpBB.
1201 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1203 assert(Opc == Instruction::And && "Unknown merge op!");
1204 // Codegen X & Y as:
1211 // This requires creation of TmpBB after CurBB.
1213 // Emit the LHS condition.
1214 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, Opc);
1216 // Emit the RHS condition into TmpBB.
1217 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1221 /// If the set of cases should be emitted as a series of branches, return true.
1222 /// If we should emit this as a bunch of and/or'd together conditions, return
1225 SelectionDAGLowering::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){
1226 if (Cases.size() != 2) return true;
1228 // If this is two comparisons of the same values or'd or and'd together, they
1229 // will get folded into a single comparison, so don't emit two blocks.
1230 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1231 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1232 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1233 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1240 void SelectionDAGLowering::visitBr(BranchInst &I) {
1241 // Update machine-CFG edges.
1242 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1244 // Figure out which block is immediately after the current one.
1245 MachineBasicBlock *NextBlock = 0;
1246 MachineFunction::iterator BBI = CurMBB;
1247 if (++BBI != FuncInfo.MF->end())
1250 if (I.isUnconditional()) {
1251 // Update machine-CFG edges.
1252 CurMBB->addSuccessor(Succ0MBB);
1254 // If this is not a fall-through branch, emit the branch.
1255 if (Succ0MBB != NextBlock)
1256 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
1257 MVT::Other, getControlRoot(),
1258 DAG.getBasicBlock(Succ0MBB)));
1262 // If this condition is one of the special cases we handle, do special stuff
1264 Value *CondVal = I.getCondition();
1265 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
1267 // If this is a series of conditions that are or'd or and'd together, emit
1268 // this as a sequence of branches instead of setcc's with and/or operations.
1269 // For example, instead of something like:
1282 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
1283 if (BOp->hasOneUse() &&
1284 (BOp->getOpcode() == Instruction::And ||
1285 BOp->getOpcode() == Instruction::Or)) {
1286 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, CurMBB, BOp->getOpcode());
1287 // If the compares in later blocks need to use values not currently
1288 // exported from this block, export them now. This block should always
1289 // be the first entry.
1290 assert(SwitchCases[0].ThisBB == CurMBB && "Unexpected lowering!");
1292 // Allow some cases to be rejected.
1293 if (ShouldEmitAsBranches(SwitchCases)) {
1294 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
1295 ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
1296 ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
1299 // Emit the branch for this block.
1300 visitSwitchCase(SwitchCases[0]);
1301 SwitchCases.erase(SwitchCases.begin());
1305 // Okay, we decided not to do this, remove any inserted MBB's and clear
1307 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
1308 FuncInfo.MF->erase(SwitchCases[i].ThisBB);
1310 SwitchCases.clear();
1314 // Create a CaseBlock record representing this branch.
1315 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
1316 NULL, Succ0MBB, Succ1MBB, CurMBB);
1317 // Use visitSwitchCase to actually insert the fast branch sequence for this
1319 visitSwitchCase(CB);
1322 /// visitSwitchCase - Emits the necessary code to represent a single node in
1323 /// the binary search tree resulting from lowering a switch instruction.
1324 void SelectionDAGLowering::visitSwitchCase(CaseBlock &CB) {
1326 SDValue CondLHS = getValue(CB.CmpLHS);
1327 DebugLoc dl = getCurDebugLoc();
1329 // Build the setcc now.
1330 if (CB.CmpMHS == NULL) {
1331 // Fold "(X == true)" to X and "(X == false)" to !X to
1332 // handle common cases produced by branch lowering.
1333 if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
1334 CB.CC == ISD::SETEQ)
1336 else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
1337 CB.CC == ISD::SETEQ) {
1338 SDValue True = DAG.getConstant(1, CondLHS.getValueType());
1339 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
1341 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
1343 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
1345 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
1346 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
1348 SDValue CmpOp = getValue(CB.CmpMHS);
1349 EVT VT = CmpOp.getValueType();
1351 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
1352 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, VT),
1355 SDValue SUB = DAG.getNode(ISD::SUB, dl,
1356 VT, CmpOp, DAG.getConstant(Low, VT));
1357 Cond = DAG.getSetCC(dl, MVT::i1, SUB,
1358 DAG.getConstant(High-Low, VT), ISD::SETULE);
1362 // Update successor info
1363 CurMBB->addSuccessor(CB.TrueBB);
1364 CurMBB->addSuccessor(CB.FalseBB);
1366 // Set NextBlock to be the MBB immediately after the current one, if any.
1367 // This is used to avoid emitting unnecessary branches to the next block.
1368 MachineBasicBlock *NextBlock = 0;
1369 MachineFunction::iterator BBI = CurMBB;
1370 if (++BBI != FuncInfo.MF->end())
1373 // If the lhs block is the next block, invert the condition so that we can
1374 // fall through to the lhs instead of the rhs block.
1375 if (CB.TrueBB == NextBlock) {
1376 std::swap(CB.TrueBB, CB.FalseBB);
1377 SDValue True = DAG.getConstant(1, Cond.getValueType());
1378 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
1380 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1381 MVT::Other, getControlRoot(), Cond,
1382 DAG.getBasicBlock(CB.TrueBB));
1384 // If the branch was constant folded, fix up the CFG.
1385 if (BrCond.getOpcode() == ISD::BR) {
1386 CurMBB->removeSuccessor(CB.FalseBB);
1387 DAG.setRoot(BrCond);
1389 // Otherwise, go ahead and insert the false branch.
1390 if (BrCond == getControlRoot())
1391 CurMBB->removeSuccessor(CB.TrueBB);
1393 if (CB.FalseBB == NextBlock)
1394 DAG.setRoot(BrCond);
1396 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
1397 DAG.getBasicBlock(CB.FalseBB)));
1401 /// visitJumpTable - Emit JumpTable node in the current MBB
1402 void SelectionDAGLowering::visitJumpTable(JumpTable &JT) {
1403 // Emit the code for the jump table
1404 assert(JT.Reg != -1U && "Should lower JT Header first!");
1405 EVT PTy = TLI.getPointerTy();
1406 SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(),
1408 SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
1409 DAG.setRoot(DAG.getNode(ISD::BR_JT, getCurDebugLoc(),
1410 MVT::Other, Index.getValue(1),
1414 /// visitJumpTableHeader - This function emits necessary code to produce index
1415 /// in the JumpTable from switch case.
1416 void SelectionDAGLowering::visitJumpTableHeader(JumpTable &JT,
1417 JumpTableHeader &JTH) {
1418 // Subtract the lowest switch case value from the value being switched on and
1419 // conditional branch to default mbb if the result is greater than the
1420 // difference between smallest and largest cases.
1421 SDValue SwitchOp = getValue(JTH.SValue);
1422 EVT VT = SwitchOp.getValueType();
1423 SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1424 DAG.getConstant(JTH.First, VT));
1426 // The SDNode we just created, which holds the value being switched on minus
1427 // the the smallest case value, needs to be copied to a virtual register so it
1428 // can be used as an index into the jump table in a subsequent basic block.
1429 // This value may be smaller or larger than the target's pointer type, and
1430 // therefore require extension or truncating.
1431 if (VT.bitsGT(TLI.getPointerTy()))
1432 SwitchOp = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
1433 TLI.getPointerTy(), SUB);
1435 SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
1436 TLI.getPointerTy(), SUB);
1438 unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
1439 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1440 JumpTableReg, SwitchOp);
1441 JT.Reg = JumpTableReg;
1443 // Emit the range check for the jump table, and branch to the default block
1444 // for the switch statement if the value being switched on exceeds the largest
1445 // case in the switch.
1446 SDValue CMP = DAG.getSetCC(getCurDebugLoc(),
1447 TLI.getSetCCResultType(SUB.getValueType()), SUB,
1448 DAG.getConstant(JTH.Last-JTH.First,VT),
1451 // Set NextBlock to be the MBB immediately after the current one, if any.
1452 // This is used to avoid emitting unnecessary branches to the next block.
1453 MachineBasicBlock *NextBlock = 0;
1454 MachineFunction::iterator BBI = CurMBB;
1455 if (++BBI != FuncInfo.MF->end())
1458 SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1459 MVT::Other, CopyTo, CMP,
1460 DAG.getBasicBlock(JT.Default));
1462 if (JT.MBB == NextBlock)
1463 DAG.setRoot(BrCond);
1465 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrCond,
1466 DAG.getBasicBlock(JT.MBB)));
1469 /// visitBitTestHeader - This function emits necessary code to produce value
1470 /// suitable for "bit tests"
1471 void SelectionDAGLowering::visitBitTestHeader(BitTestBlock &B) {
1472 // Subtract the minimum value
1473 SDValue SwitchOp = getValue(B.SValue);
1474 EVT VT = SwitchOp.getValueType();
1475 SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1476 DAG.getConstant(B.First, VT));
1479 SDValue RangeCmp = DAG.getSetCC(getCurDebugLoc(),
1480 TLI.getSetCCResultType(SUB.getValueType()),
1481 SUB, DAG.getConstant(B.Range, VT),
1485 if (VT.bitsGT(TLI.getPointerTy()))
1486 ShiftOp = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
1487 TLI.getPointerTy(), SUB);
1489 ShiftOp = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
1490 TLI.getPointerTy(), SUB);
1492 B.Reg = FuncInfo.MakeReg(TLI.getPointerTy());
1493 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1496 // Set NextBlock to be the MBB immediately after the current one, if any.
1497 // This is used to avoid emitting unnecessary branches to the next block.
1498 MachineBasicBlock *NextBlock = 0;
1499 MachineFunction::iterator BBI = CurMBB;
1500 if (++BBI != FuncInfo.MF->end())
1503 MachineBasicBlock* MBB = B.Cases[0].ThisBB;
1505 CurMBB->addSuccessor(B.Default);
1506 CurMBB->addSuccessor(MBB);
1508 SDValue BrRange = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1509 MVT::Other, CopyTo, RangeCmp,
1510 DAG.getBasicBlock(B.Default));
1512 if (MBB == NextBlock)
1513 DAG.setRoot(BrRange);
1515 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, CopyTo,
1516 DAG.getBasicBlock(MBB)));
1519 /// visitBitTestCase - this function produces one "bit test"
1520 void SelectionDAGLowering::visitBitTestCase(MachineBasicBlock* NextMBB,
1523 // Make desired shift
1524 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(), Reg,
1525 TLI.getPointerTy());
1526 SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(),
1528 DAG.getConstant(1, TLI.getPointerTy()),
1531 // Emit bit tests and jumps
1532 SDValue AndOp = DAG.getNode(ISD::AND, getCurDebugLoc(),
1533 TLI.getPointerTy(), SwitchVal,
1534 DAG.getConstant(B.Mask, TLI.getPointerTy()));
1535 SDValue AndCmp = DAG.getSetCC(getCurDebugLoc(),
1536 TLI.getSetCCResultType(AndOp.getValueType()),
1537 AndOp, DAG.getConstant(0, TLI.getPointerTy()),
1540 CurMBB->addSuccessor(B.TargetBB);
1541 CurMBB->addSuccessor(NextMBB);
1543 SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1544 MVT::Other, getControlRoot(),
1545 AndCmp, DAG.getBasicBlock(B.TargetBB));
1547 // Set NextBlock to be the MBB immediately after the current one, if any.
1548 // This is used to avoid emitting unnecessary branches to the next block.
1549 MachineBasicBlock *NextBlock = 0;
1550 MachineFunction::iterator BBI = CurMBB;
1551 if (++BBI != FuncInfo.MF->end())
1554 if (NextMBB == NextBlock)
1557 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrAnd,
1558 DAG.getBasicBlock(NextMBB)));
1561 void SelectionDAGLowering::visitInvoke(InvokeInst &I) {
1562 // Retrieve successors.
1563 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
1564 MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
1566 const Value *Callee(I.getCalledValue());
1567 if (isa<InlineAsm>(Callee))
1570 LowerCallTo(&I, getValue(Callee), false, LandingPad);
1572 // If the value of the invoke is used outside of its defining block, make it
1573 // available as a virtual register.
1574 CopyToExportRegsIfNeeded(&I);
1576 // Update successor info
1577 CurMBB->addSuccessor(Return);
1578 CurMBB->addSuccessor(LandingPad);
1580 // Drop into normal successor.
1581 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
1582 MVT::Other, getControlRoot(),
1583 DAG.getBasicBlock(Return)));
1586 void SelectionDAGLowering::visitUnwind(UnwindInst &I) {
1589 /// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
1590 /// small case ranges).
1591 bool SelectionDAGLowering::handleSmallSwitchRange(CaseRec& CR,
1592 CaseRecVector& WorkList,
1594 MachineBasicBlock* Default) {
1595 Case& BackCase = *(CR.Range.second-1);
1597 // Size is the number of Cases represented by this range.
1598 size_t Size = CR.Range.second - CR.Range.first;
1602 // Get the MachineFunction which holds the current MBB. This is used when
1603 // inserting any additional MBBs necessary to represent the switch.
1604 MachineFunction *CurMF = FuncInfo.MF;
1606 // Figure out which block is immediately after the current one.
1607 MachineBasicBlock *NextBlock = 0;
1608 MachineFunction::iterator BBI = CR.CaseBB;
1610 if (++BBI != FuncInfo.MF->end())
1613 // TODO: If any two of the cases has the same destination, and if one value
1614 // is the same as the other, but has one bit unset that the other has set,
1615 // use bit manipulation to do two compares at once. For example:
1616 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
1618 // Rearrange the case blocks so that the last one falls through if possible.
1619 if (NextBlock && Default != NextBlock && BackCase.BB != NextBlock) {
1620 // The last case block won't fall through into 'NextBlock' if we emit the
1621 // branches in this order. See if rearranging a case value would help.
1622 for (CaseItr I = CR.Range.first, E = CR.Range.second-1; I != E; ++I) {
1623 if (I->BB == NextBlock) {
1624 std::swap(*I, BackCase);
1630 // Create a CaseBlock record representing a conditional branch to
1631 // the Case's target mbb if the value being switched on SV is equal
1633 MachineBasicBlock *CurBlock = CR.CaseBB;
1634 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
1635 MachineBasicBlock *FallThrough;
1637 FallThrough = CurMF->CreateMachineBasicBlock(CurBlock->getBasicBlock());
1638 CurMF->insert(BBI, FallThrough);
1640 // Put SV in a virtual register to make it available from the new blocks.
1641 ExportFromCurrentBlock(SV);
1643 // If the last case doesn't match, go to the default block.
1644 FallThrough = Default;
1647 Value *RHS, *LHS, *MHS;
1649 if (I->High == I->Low) {
1650 // This is just small small case range :) containing exactly 1 case
1652 LHS = SV; RHS = I->High; MHS = NULL;
1655 LHS = I->Low; MHS = SV; RHS = I->High;
1657 CaseBlock CB(CC, LHS, RHS, MHS, I->BB, FallThrough, CurBlock);
1659 // If emitting the first comparison, just call visitSwitchCase to emit the
1660 // code into the current block. Otherwise, push the CaseBlock onto the
1661 // vector to be later processed by SDISel, and insert the node's MBB
1662 // before the next MBB.
1663 if (CurBlock == CurMBB)
1664 visitSwitchCase(CB);
1666 SwitchCases.push_back(CB);
1668 CurBlock = FallThrough;
1674 static inline bool areJTsAllowed(const TargetLowering &TLI) {
1675 return !DisableJumpTables &&
1676 (TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1677 TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
1680 static APInt ComputeRange(const APInt &First, const APInt &Last) {
1681 APInt LastExt(Last), FirstExt(First);
1682 uint32_t BitWidth = std::max(Last.getBitWidth(), First.getBitWidth()) + 1;
1683 LastExt.sext(BitWidth); FirstExt.sext(BitWidth);
1684 return (LastExt - FirstExt + 1ULL);
1687 /// handleJTSwitchCase - Emit jumptable for current switch case range
1688 bool SelectionDAGLowering::handleJTSwitchCase(CaseRec& CR,
1689 CaseRecVector& WorkList,
1691 MachineBasicBlock* Default) {
1692 Case& FrontCase = *CR.Range.first;
1693 Case& BackCase = *(CR.Range.second-1);
1695 const APInt& First = cast<ConstantInt>(FrontCase.Low)->getValue();
1696 const APInt& Last = cast<ConstantInt>(BackCase.High)->getValue();
1699 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1703 if (!areJTsAllowed(TLI) || TSize <= 3)
1706 APInt Range = ComputeRange(First, Last);
1707 double Density = (double)TSize / Range.roundToDouble();
1711 DEBUG(errs() << "Lowering jump table\n"
1712 << "First entry: " << First << ". Last entry: " << Last << '\n'
1713 << "Range: " << Range
1714 << "Size: " << TSize << ". Density: " << Density << "\n\n");
1716 // Get the MachineFunction which holds the current MBB. This is used when
1717 // inserting any additional MBBs necessary to represent the switch.
1718 MachineFunction *CurMF = FuncInfo.MF;
1720 // Figure out which block is immediately after the current one.
1721 MachineFunction::iterator BBI = CR.CaseBB;
1724 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1726 // Create a new basic block to hold the code for loading the address
1727 // of the jump table, and jumping to it. Update successor information;
1728 // we will either branch to the default case for the switch, or the jump
1730 MachineBasicBlock *JumpTableBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1731 CurMF->insert(BBI, JumpTableBB);
1732 CR.CaseBB->addSuccessor(Default);
1733 CR.CaseBB->addSuccessor(JumpTableBB);
1735 // Build a vector of destination BBs, corresponding to each target
1736 // of the jump table. If the value of the jump table slot corresponds to
1737 // a case statement, push the case's BB onto the vector, otherwise, push
1739 std::vector<MachineBasicBlock*> DestBBs;
1741 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++TEI) {
1742 const APInt& Low = cast<ConstantInt>(I->Low)->getValue();
1743 const APInt& High = cast<ConstantInt>(I->High)->getValue();
1745 if (Low.sle(TEI) && TEI.sle(High)) {
1746 DestBBs.push_back(I->BB);
1750 DestBBs.push_back(Default);
1754 // Update successor info. Add one edge to each unique successor.
1755 BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs());
1756 for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
1757 E = DestBBs.end(); I != E; ++I) {
1758 if (!SuccsHandled[(*I)->getNumber()]) {
1759 SuccsHandled[(*I)->getNumber()] = true;
1760 JumpTableBB->addSuccessor(*I);
1764 // Create a jump table index for this jump table, or return an existing
1766 unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs);
1768 // Set the jump table information so that we can codegen it as a second
1769 // MachineBasicBlock
1770 JumpTable JT(-1U, JTI, JumpTableBB, Default);
1771 JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == CurMBB));
1772 if (CR.CaseBB == CurMBB)
1773 visitJumpTableHeader(JT, JTH);
1775 JTCases.push_back(JumpTableBlock(JTH, JT));
1780 /// handleBTSplitSwitchCase - emit comparison and split binary search tree into
1782 bool SelectionDAGLowering::handleBTSplitSwitchCase(CaseRec& CR,
1783 CaseRecVector& WorkList,
1785 MachineBasicBlock* Default) {
1786 // Get the MachineFunction which holds the current MBB. This is used when
1787 // inserting any additional MBBs necessary to represent the switch.
1788 MachineFunction *CurMF = FuncInfo.MF;
1790 // Figure out which block is immediately after the current one.
1791 MachineFunction::iterator BBI = CR.CaseBB;
1794 Case& FrontCase = *CR.Range.first;
1795 Case& BackCase = *(CR.Range.second-1);
1796 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1798 // Size is the number of Cases represented by this range.
1799 unsigned Size = CR.Range.second - CR.Range.first;
1801 const APInt& First = cast<ConstantInt>(FrontCase.Low)->getValue();
1802 const APInt& Last = cast<ConstantInt>(BackCase.High)->getValue();
1804 CaseItr Pivot = CR.Range.first + Size/2;
1806 // Select optimal pivot, maximizing sum density of LHS and RHS. This will
1807 // (heuristically) allow us to emit JumpTable's later.
1809 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1813 size_t LSize = FrontCase.size();
1814 size_t RSize = TSize-LSize;
1815 DEBUG(errs() << "Selecting best pivot: \n"
1816 << "First: " << First << ", Last: " << Last <<'\n'
1817 << "LSize: " << LSize << ", RSize: " << RSize << '\n');
1818 for (CaseItr I = CR.Range.first, J=I+1, E = CR.Range.second;
1820 const APInt& LEnd = cast<ConstantInt>(I->High)->getValue();
1821 const APInt& RBegin = cast<ConstantInt>(J->Low)->getValue();
1822 APInt Range = ComputeRange(LEnd, RBegin);
1823 assert((Range - 2ULL).isNonNegative() &&
1824 "Invalid case distance");
1825 double LDensity = (double)LSize / (LEnd - First + 1ULL).roundToDouble();
1826 double RDensity = (double)RSize / (Last - RBegin + 1ULL).roundToDouble();
1827 double Metric = Range.logBase2()*(LDensity+RDensity);
1828 // Should always split in some non-trivial place
1829 DEBUG(errs() <<"=>Step\n"
1830 << "LEnd: " << LEnd << ", RBegin: " << RBegin << '\n'
1831 << "LDensity: " << LDensity
1832 << ", RDensity: " << RDensity << '\n'
1833 << "Metric: " << Metric << '\n');
1834 if (FMetric < Metric) {
1837 DEBUG(errs() << "Current metric set to: " << FMetric << '\n');
1843 if (areJTsAllowed(TLI)) {
1844 // If our case is dense we *really* should handle it earlier!
1845 assert((FMetric > 0) && "Should handle dense range earlier!");
1847 Pivot = CR.Range.first + Size/2;
1850 CaseRange LHSR(CR.Range.first, Pivot);
1851 CaseRange RHSR(Pivot, CR.Range.second);
1852 Constant *C = Pivot->Low;
1853 MachineBasicBlock *FalseBB = 0, *TrueBB = 0;
1855 // We know that we branch to the LHS if the Value being switched on is
1856 // less than the Pivot value, C. We use this to optimize our binary
1857 // tree a bit, by recognizing that if SV is greater than or equal to the
1858 // LHS's Case Value, and that Case Value is exactly one less than the
1859 // Pivot's Value, then we can branch directly to the LHS's Target,
1860 // rather than creating a leaf node for it.
1861 if ((LHSR.second - LHSR.first) == 1 &&
1862 LHSR.first->High == CR.GE &&
1863 cast<ConstantInt>(C)->getValue() ==
1864 (cast<ConstantInt>(CR.GE)->getValue() + 1LL)) {
1865 TrueBB = LHSR.first->BB;
1867 TrueBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1868 CurMF->insert(BBI, TrueBB);
1869 WorkList.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
1871 // Put SV in a virtual register to make it available from the new blocks.
1872 ExportFromCurrentBlock(SV);
1875 // Similar to the optimization above, if the Value being switched on is
1876 // known to be less than the Constant CR.LT, and the current Case Value
1877 // is CR.LT - 1, then we can branch directly to the target block for
1878 // the current Case Value, rather than emitting a RHS leaf node for it.
1879 if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
1880 cast<ConstantInt>(RHSR.first->Low)->getValue() ==
1881 (cast<ConstantInt>(CR.LT)->getValue() - 1LL)) {
1882 FalseBB = RHSR.first->BB;
1884 FalseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1885 CurMF->insert(BBI, FalseBB);
1886 WorkList.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
1888 // Put SV in a virtual register to make it available from the new blocks.
1889 ExportFromCurrentBlock(SV);
1892 // Create a CaseBlock record representing a conditional branch to
1893 // the LHS node if the value being switched on SV is less than C.
1894 // Otherwise, branch to LHS.
1895 CaseBlock CB(ISD::SETLT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB);
1897 if (CR.CaseBB == CurMBB)
1898 visitSwitchCase(CB);
1900 SwitchCases.push_back(CB);
1905 /// handleBitTestsSwitchCase - if current case range has few destination and
1906 /// range span less, than machine word bitwidth, encode case range into series
1907 /// of masks and emit bit tests with these masks.
1908 bool SelectionDAGLowering::handleBitTestsSwitchCase(CaseRec& CR,
1909 CaseRecVector& WorkList,
1911 MachineBasicBlock* Default){
1912 EVT PTy = TLI.getPointerTy();
1913 unsigned IntPtrBits = PTy.getSizeInBits();
1915 Case& FrontCase = *CR.Range.first;
1916 Case& BackCase = *(CR.Range.second-1);
1918 // Get the MachineFunction which holds the current MBB. This is used when
1919 // inserting any additional MBBs necessary to represent the switch.
1920 MachineFunction *CurMF = FuncInfo.MF;
1922 // If target does not have legal shift left, do not emit bit tests at all.
1923 if (!TLI.isOperationLegal(ISD::SHL, TLI.getPointerTy()))
1927 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1929 // Single case counts one, case range - two.
1930 numCmps += (I->Low == I->High ? 1 : 2);
1933 // Count unique destinations
1934 SmallSet<MachineBasicBlock*, 4> Dests;
1935 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1936 Dests.insert(I->BB);
1937 if (Dests.size() > 3)
1938 // Don't bother the code below, if there are too much unique destinations
1941 DEBUG(errs() << "Total number of unique destinations: " << Dests.size() << '\n'
1942 << "Total number of comparisons: " << numCmps << '\n');
1944 // Compute span of values.
1945 const APInt& minValue = cast<ConstantInt>(FrontCase.Low)->getValue();
1946 const APInt& maxValue = cast<ConstantInt>(BackCase.High)->getValue();
1947 APInt cmpRange = maxValue - minValue;
1949 DEBUG(errs() << "Compare range: " << cmpRange << '\n'
1950 << "Low bound: " << minValue << '\n'
1951 << "High bound: " << maxValue << '\n');
1953 if (cmpRange.uge(APInt(cmpRange.getBitWidth(), IntPtrBits)) ||
1954 (!(Dests.size() == 1 && numCmps >= 3) &&
1955 !(Dests.size() == 2 && numCmps >= 5) &&
1956 !(Dests.size() >= 3 && numCmps >= 6)))
1959 DEBUG(errs() << "Emitting bit tests\n");
1960 APInt lowBound = APInt::getNullValue(cmpRange.getBitWidth());
1962 // Optimize the case where all the case values fit in a
1963 // word without having to subtract minValue. In this case,
1964 // we can optimize away the subtraction.
1965 if (minValue.isNonNegative() &&
1966 maxValue.slt(APInt(maxValue.getBitWidth(), IntPtrBits))) {
1967 cmpRange = maxValue;
1969 lowBound = minValue;
1972 CaseBitsVector CasesBits;
1973 unsigned i, count = 0;
1975 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1976 MachineBasicBlock* Dest = I->BB;
1977 for (i = 0; i < count; ++i)
1978 if (Dest == CasesBits[i].BB)
1982 assert((count < 3) && "Too much destinations to test!");
1983 CasesBits.push_back(CaseBits(0, Dest, 0));
1987 const APInt& lowValue = cast<ConstantInt>(I->Low)->getValue();
1988 const APInt& highValue = cast<ConstantInt>(I->High)->getValue();
1990 uint64_t lo = (lowValue - lowBound).getZExtValue();
1991 uint64_t hi = (highValue - lowBound).getZExtValue();
1993 for (uint64_t j = lo; j <= hi; j++) {
1994 CasesBits[i].Mask |= 1ULL << j;
1995 CasesBits[i].Bits++;
1999 std::sort(CasesBits.begin(), CasesBits.end(), CaseBitsCmp());
2003 // Figure out which block is immediately after the current one.
2004 MachineFunction::iterator BBI = CR.CaseBB;
2007 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
2009 DEBUG(errs() << "Cases:\n");
2010 for (unsigned i = 0, e = CasesBits.size(); i!=e; ++i) {
2011 DEBUG(errs() << "Mask: " << CasesBits[i].Mask
2012 << ", Bits: " << CasesBits[i].Bits
2013 << ", BB: " << CasesBits[i].BB << '\n');
2015 MachineBasicBlock *CaseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
2016 CurMF->insert(BBI, CaseBB);
2017 BTC.push_back(BitTestCase(CasesBits[i].Mask,
2021 // Put SV in a virtual register to make it available from the new blocks.
2022 ExportFromCurrentBlock(SV);
2025 BitTestBlock BTB(lowBound, cmpRange, SV,
2026 -1U, (CR.CaseBB == CurMBB),
2027 CR.CaseBB, Default, BTC);
2029 if (CR.CaseBB == CurMBB)
2030 visitBitTestHeader(BTB);
2032 BitTestCases.push_back(BTB);
2038 /// Clusterify - Transform simple list of Cases into list of CaseRange's
2039 size_t SelectionDAGLowering::Clusterify(CaseVector& Cases,
2040 const SwitchInst& SI) {
2043 // Start with "simple" cases
2044 for (size_t i = 1; i < SI.getNumSuccessors(); ++i) {
2045 MachineBasicBlock *SMBB = FuncInfo.MBBMap[SI.getSuccessor(i)];
2046 Cases.push_back(Case(SI.getSuccessorValue(i),
2047 SI.getSuccessorValue(i),
2050 std::sort(Cases.begin(), Cases.end(), CaseCmp());
2052 // Merge case into clusters
2053 if (Cases.size() >= 2)
2054 // Must recompute end() each iteration because it may be
2055 // invalidated by erase if we hold on to it
2056 for (CaseItr I = Cases.begin(), J = ++(Cases.begin()); J != Cases.end(); ) {
2057 const APInt& nextValue = cast<ConstantInt>(J->Low)->getValue();
2058 const APInt& currentValue = cast<ConstantInt>(I->High)->getValue();
2059 MachineBasicBlock* nextBB = J->BB;
2060 MachineBasicBlock* currentBB = I->BB;
2062 // If the two neighboring cases go to the same destination, merge them
2063 // into a single case.
2064 if ((nextValue - currentValue == 1) && (currentBB == nextBB)) {
2072 for (CaseItr I=Cases.begin(), E=Cases.end(); I!=E; ++I, ++numCmps) {
2073 if (I->Low != I->High)
2074 // A range counts double, since it requires two compares.
2081 void SelectionDAGLowering::visitSwitch(SwitchInst &SI) {
2082 // Figure out which block is immediately after the current one.
2083 MachineBasicBlock *NextBlock = 0;
2085 MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
2087 // If there is only the default destination, branch to it if it is not the
2088 // next basic block. Otherwise, just fall through.
2089 if (SI.getNumOperands() == 2) {
2090 // Update machine-CFG edges.
2092 // If this is not a fall-through branch, emit the branch.
2093 CurMBB->addSuccessor(Default);
2094 if (Default != NextBlock)
2095 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
2096 MVT::Other, getControlRoot(),
2097 DAG.getBasicBlock(Default)));
2101 // If there are any non-default case statements, create a vector of Cases
2102 // representing each one, and sort the vector so that we can efficiently
2103 // create a binary search tree from them.
2105 size_t numCmps = Clusterify(Cases, SI);
2106 DEBUG(errs() << "Clusterify finished. Total clusters: " << Cases.size()
2107 << ". Total compares: " << numCmps << '\n');
2110 // Get the Value to be switched on and default basic blocks, which will be
2111 // inserted into CaseBlock records, representing basic blocks in the binary
2113 Value *SV = SI.getOperand(0);
2115 // Push the initial CaseRec onto the worklist
2116 CaseRecVector WorkList;
2117 WorkList.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
2119 while (!WorkList.empty()) {
2120 // Grab a record representing a case range to process off the worklist
2121 CaseRec CR = WorkList.back();
2122 WorkList.pop_back();
2124 if (handleBitTestsSwitchCase(CR, WorkList, SV, Default))
2127 // If the range has few cases (two or less) emit a series of specific
2129 if (handleSmallSwitchRange(CR, WorkList, SV, Default))
2132 // If the switch has more than 5 blocks, and at least 40% dense, and the
2133 // target supports indirect branches, then emit a jump table rather than
2134 // lowering the switch to a binary tree of conditional branches.
2135 if (handleJTSwitchCase(CR, WorkList, SV, Default))
2138 // Emit binary tree. We need to pick a pivot, and push left and right ranges
2139 // onto the worklist. Leafs are handled via handleSmallSwitchRange() call.
2140 handleBTSplitSwitchCase(CR, WorkList, SV, Default);
2145 void SelectionDAGLowering::visitFSub(User &I) {
2146 // -0.0 - X --> fneg
2147 const Type *Ty = I.getType();
2148 if (isa<VectorType>(Ty)) {
2149 if (ConstantVector *CV = dyn_cast<ConstantVector>(I.getOperand(0))) {
2150 const VectorType *DestTy = cast<VectorType>(I.getType());
2151 const Type *ElTy = DestTy->getElementType();
2152 unsigned VL = DestTy->getNumElements();
2153 std::vector<Constant*> NZ(VL, ConstantFP::getNegativeZero(ElTy));
2154 Constant *CNZ = ConstantVector::get(&NZ[0], NZ.size());
2156 SDValue Op2 = getValue(I.getOperand(1));
2157 setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2158 Op2.getValueType(), Op2));
2163 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
2164 if (CFP->isExactlyValue(ConstantFP::getNegativeZero(Ty)->getValueAPF())) {
2165 SDValue Op2 = getValue(I.getOperand(1));
2166 setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2167 Op2.getValueType(), Op2));
2171 visitBinary(I, ISD::FSUB);
2174 void SelectionDAGLowering::visitBinary(User &I, unsigned OpCode) {
2175 SDValue Op1 = getValue(I.getOperand(0));
2176 SDValue Op2 = getValue(I.getOperand(1));
2178 setValue(&I, DAG.getNode(OpCode, getCurDebugLoc(),
2179 Op1.getValueType(), Op1, Op2));
2182 void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
2183 SDValue Op1 = getValue(I.getOperand(0));
2184 SDValue Op2 = getValue(I.getOperand(1));
2185 if (!isa<VectorType>(I.getType()) &&
2186 Op2.getValueType() != TLI.getShiftAmountTy()) {
2187 // If the operand is smaller than the shift count type, promote it.
2188 EVT PTy = TLI.getPointerTy();
2189 EVT STy = TLI.getShiftAmountTy();
2190 if (STy.bitsGT(Op2.getValueType()))
2191 Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2192 TLI.getShiftAmountTy(), Op2);
2193 // If the operand is larger than the shift count type but the shift
2194 // count type has enough bits to represent any shift value, truncate
2195 // it now. This is a common case and it exposes the truncate to
2196 // optimization early.
2197 else if (STy.getSizeInBits() >=
2198 Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
2199 Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2200 TLI.getShiftAmountTy(), Op2);
2201 // Otherwise we'll need to temporarily settle for some other
2202 // convenient type; type legalization will make adjustments as
2204 else if (PTy.bitsLT(Op2.getValueType()))
2205 Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2206 TLI.getPointerTy(), Op2);
2207 else if (PTy.bitsGT(Op2.getValueType()))
2208 Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2209 TLI.getPointerTy(), Op2);
2212 setValue(&I, DAG.getNode(Opcode, getCurDebugLoc(),
2213 Op1.getValueType(), Op1, Op2));
2216 void SelectionDAGLowering::visitICmp(User &I) {
2217 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2218 if (ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2219 predicate = IC->getPredicate();
2220 else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2221 predicate = ICmpInst::Predicate(IC->getPredicate());
2222 SDValue Op1 = getValue(I.getOperand(0));
2223 SDValue Op2 = getValue(I.getOperand(1));
2224 ISD::CondCode Opcode = getICmpCondCode(predicate);
2226 EVT DestVT = TLI.getValueType(I.getType());
2227 setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Opcode));
2230 void SelectionDAGLowering::visitFCmp(User &I) {
2231 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2232 if (FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2233 predicate = FC->getPredicate();
2234 else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2235 predicate = FCmpInst::Predicate(FC->getPredicate());
2236 SDValue Op1 = getValue(I.getOperand(0));
2237 SDValue Op2 = getValue(I.getOperand(1));
2238 ISD::CondCode Condition = getFCmpCondCode(predicate);
2239 EVT DestVT = TLI.getValueType(I.getType());
2240 setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Condition));
2243 void SelectionDAGLowering::visitSelect(User &I) {
2244 SmallVector<EVT, 4> ValueVTs;
2245 ComputeValueVTs(TLI, I.getType(), ValueVTs);
2246 unsigned NumValues = ValueVTs.size();
2247 if (NumValues != 0) {
2248 SmallVector<SDValue, 4> Values(NumValues);
2249 SDValue Cond = getValue(I.getOperand(0));
2250 SDValue TrueVal = getValue(I.getOperand(1));
2251 SDValue FalseVal = getValue(I.getOperand(2));
2253 for (unsigned i = 0; i != NumValues; ++i)
2254 Values[i] = DAG.getNode(ISD::SELECT, getCurDebugLoc(),
2255 TrueVal.getValueType(), Cond,
2256 SDValue(TrueVal.getNode(), TrueVal.getResNo() + i),
2257 SDValue(FalseVal.getNode(), FalseVal.getResNo() + i));
2259 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2260 DAG.getVTList(&ValueVTs[0], NumValues),
2261 &Values[0], NumValues));
2266 void SelectionDAGLowering::visitTrunc(User &I) {
2267 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
2268 SDValue N = getValue(I.getOperand(0));
2269 EVT DestVT = TLI.getValueType(I.getType());
2270 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
2273 void SelectionDAGLowering::visitZExt(User &I) {
2274 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2275 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
2276 SDValue N = getValue(I.getOperand(0));
2277 EVT DestVT = TLI.getValueType(I.getType());
2278 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N));
2281 void SelectionDAGLowering::visitSExt(User &I) {
2282 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2283 // SExt also can't be a cast to bool for same reason. So, nothing much to do
2284 SDValue N = getValue(I.getOperand(0));
2285 EVT DestVT = TLI.getValueType(I.getType());
2286 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(), DestVT, N));
2289 void SelectionDAGLowering::visitFPTrunc(User &I) {
2290 // FPTrunc is never a no-op cast, no need to check
2291 SDValue N = getValue(I.getOperand(0));
2292 EVT DestVT = TLI.getValueType(I.getType());
2293 setValue(&I, DAG.getNode(ISD::FP_ROUND, getCurDebugLoc(),
2294 DestVT, N, DAG.getIntPtrConstant(0)));
2297 void SelectionDAGLowering::visitFPExt(User &I){
2298 // FPTrunc is never a no-op cast, no need to check
2299 SDValue N = getValue(I.getOperand(0));
2300 EVT DestVT = TLI.getValueType(I.getType());
2301 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurDebugLoc(), DestVT, N));
2304 void SelectionDAGLowering::visitFPToUI(User &I) {
2305 // FPToUI is never a no-op cast, no need to check
2306 SDValue N = getValue(I.getOperand(0));
2307 EVT DestVT = TLI.getValueType(I.getType());
2308 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurDebugLoc(), DestVT, N));
2311 void SelectionDAGLowering::visitFPToSI(User &I) {
2312 // FPToSI is never a no-op cast, no need to check
2313 SDValue N = getValue(I.getOperand(0));
2314 EVT DestVT = TLI.getValueType(I.getType());
2315 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurDebugLoc(), DestVT, N));
2318 void SelectionDAGLowering::visitUIToFP(User &I) {
2319 // UIToFP is never a no-op cast, no need to check
2320 SDValue N = getValue(I.getOperand(0));
2321 EVT DestVT = TLI.getValueType(I.getType());
2322 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurDebugLoc(), DestVT, N));
2325 void SelectionDAGLowering::visitSIToFP(User &I){
2326 // SIToFP is never a no-op cast, no need to check
2327 SDValue N = getValue(I.getOperand(0));
2328 EVT DestVT = TLI.getValueType(I.getType());
2329 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurDebugLoc(), DestVT, N));
2332 void SelectionDAGLowering::visitPtrToInt(User &I) {
2333 // What to do depends on the size of the integer and the size of the pointer.
2334 // We can either truncate, zero extend, or no-op, accordingly.
2335 SDValue N = getValue(I.getOperand(0));
2336 EVT SrcVT = N.getValueType();
2337 EVT DestVT = TLI.getValueType(I.getType());
2339 if (DestVT.bitsLT(SrcVT))
2340 Result = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N);
2342 // Note: ZERO_EXTEND can handle cases where the sizes are equal too
2343 Result = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N);
2344 setValue(&I, Result);
2347 void SelectionDAGLowering::visitIntToPtr(User &I) {
2348 // What to do depends on the size of the integer and the size of the pointer.
2349 // We can either truncate, zero extend, or no-op, accordingly.
2350 SDValue N = getValue(I.getOperand(0));
2351 EVT SrcVT = N.getValueType();
2352 EVT DestVT = TLI.getValueType(I.getType());
2353 if (DestVT.bitsLT(SrcVT))
2354 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
2356 // Note: ZERO_EXTEND can handle cases where the sizes are equal too
2357 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2361 void SelectionDAGLowering::visitBitCast(User &I) {
2362 SDValue N = getValue(I.getOperand(0));
2363 EVT DestVT = TLI.getValueType(I.getType());
2365 // BitCast assures us that source and destination are the same size so this
2366 // is either a BIT_CONVERT or a no-op.
2367 if (DestVT != N.getValueType())
2368 setValue(&I, DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
2369 DestVT, N)); // convert types
2371 setValue(&I, N); // noop cast.
2374 void SelectionDAGLowering::visitInsertElement(User &I) {
2375 SDValue InVec = getValue(I.getOperand(0));
2376 SDValue InVal = getValue(I.getOperand(1));
2377 SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2379 getValue(I.getOperand(2)));
2381 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurDebugLoc(),
2382 TLI.getValueType(I.getType()),
2383 InVec, InVal, InIdx));
2386 void SelectionDAGLowering::visitExtractElement(User &I) {
2387 SDValue InVec = getValue(I.getOperand(0));
2388 SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2390 getValue(I.getOperand(1)));
2391 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2392 TLI.getValueType(I.getType()), InVec, InIdx));
2396 // Utility for visitShuffleVector - Returns true if the mask is mask starting
2397 // from SIndx and increasing to the element length (undefs are allowed).
2398 static bool SequentialMask(SmallVectorImpl<int> &Mask, unsigned SIndx) {
2399 unsigned MaskNumElts = Mask.size();
2400 for (unsigned i = 0; i != MaskNumElts; ++i)
2401 if ((Mask[i] >= 0) && (Mask[i] != (int)(i + SIndx)))
2406 void SelectionDAGLowering::visitShuffleVector(User &I) {
2407 SmallVector<int, 8> Mask;
2408 SDValue Src1 = getValue(I.getOperand(0));
2409 SDValue Src2 = getValue(I.getOperand(1));
2411 // Convert the ConstantVector mask operand into an array of ints, with -1
2412 // representing undef values.
2413 SmallVector<Constant*, 8> MaskElts;
2414 cast<Constant>(I.getOperand(2))->getVectorElements(*DAG.getContext(),
2416 unsigned MaskNumElts = MaskElts.size();
2417 for (unsigned i = 0; i != MaskNumElts; ++i) {
2418 if (isa<UndefValue>(MaskElts[i]))
2421 Mask.push_back(cast<ConstantInt>(MaskElts[i])->getSExtValue());
2424 EVT VT = TLI.getValueType(I.getType());
2425 EVT SrcVT = Src1.getValueType();
2426 unsigned SrcNumElts = SrcVT.getVectorNumElements();
2428 if (SrcNumElts == MaskNumElts) {
2429 setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2434 // Normalize the shuffle vector since mask and vector length don't match.
2435 if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) {
2436 // Mask is longer than the source vectors and is a multiple of the source
2437 // vectors. We can use concatenate vector to make the mask and vectors
2439 if (SrcNumElts*2 == MaskNumElts && SequentialMask(Mask, 0)) {
2440 // The shuffle is concatenating two vectors together.
2441 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(),
2446 // Pad both vectors with undefs to make them the same length as the mask.
2447 unsigned NumConcat = MaskNumElts / SrcNumElts;
2448 bool Src1U = Src1.getOpcode() == ISD::UNDEF;
2449 bool Src2U = Src2.getOpcode() == ISD::UNDEF;
2450 SDValue UndefVal = DAG.getUNDEF(SrcVT);
2452 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
2453 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
2457 Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2458 getCurDebugLoc(), VT,
2459 &MOps1[0], NumConcat);
2460 Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2461 getCurDebugLoc(), VT,
2462 &MOps2[0], NumConcat);
2464 // Readjust mask for new input vector length.
2465 SmallVector<int, 8> MappedOps;
2466 for (unsigned i = 0; i != MaskNumElts; ++i) {
2468 if (Idx < (int)SrcNumElts)
2469 MappedOps.push_back(Idx);
2471 MappedOps.push_back(Idx + MaskNumElts - SrcNumElts);
2473 setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2478 if (SrcNumElts > MaskNumElts) {
2479 // Analyze the access pattern of the vector to see if we can extract
2480 // two subvectors and do the shuffle. The analysis is done by calculating
2481 // the range of elements the mask access on both vectors.
2482 int MinRange[2] = { SrcNumElts+1, SrcNumElts+1};
2483 int MaxRange[2] = {-1, -1};
2485 for (unsigned i = 0; i != MaskNumElts; ++i) {
2491 if (Idx >= (int)SrcNumElts) {
2495 if (Idx > MaxRange[Input])
2496 MaxRange[Input] = Idx;
2497 if (Idx < MinRange[Input])
2498 MinRange[Input] = Idx;
2501 // Check if the access is smaller than the vector size and can we find
2502 // a reasonable extract index.
2503 int RangeUse[2] = { 2, 2 }; // 0 = Unused, 1 = Extract, 2 = Can not Extract.
2504 int StartIdx[2]; // StartIdx to extract from
2505 for (int Input=0; Input < 2; ++Input) {
2506 if (MinRange[Input] == (int)(SrcNumElts+1) && MaxRange[Input] == -1) {
2507 RangeUse[Input] = 0; // Unused
2508 StartIdx[Input] = 0;
2509 } else if (MaxRange[Input] - MinRange[Input] < (int)MaskNumElts) {
2510 // Fits within range but we should see if we can find a good
2511 // start index that is a multiple of the mask length.
2512 if (MaxRange[Input] < (int)MaskNumElts) {
2513 RangeUse[Input] = 1; // Extract from beginning of the vector
2514 StartIdx[Input] = 0;
2516 StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
2517 if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
2518 StartIdx[Input] + MaskNumElts < SrcNumElts)
2519 RangeUse[Input] = 1; // Extract from a multiple of the mask length.
2524 if (RangeUse[0] == 0 && RangeUse[1] == 0) {
2525 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
2528 else if (RangeUse[0] < 2 && RangeUse[1] < 2) {
2529 // Extract appropriate subvector and generate a vector shuffle
2530 for (int Input=0; Input < 2; ++Input) {
2531 SDValue& Src = Input == 0 ? Src1 : Src2;
2532 if (RangeUse[Input] == 0) {
2533 Src = DAG.getUNDEF(VT);
2535 Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, getCurDebugLoc(), VT,
2536 Src, DAG.getIntPtrConstant(StartIdx[Input]));
2539 // Calculate new mask.
2540 SmallVector<int, 8> MappedOps;
2541 for (unsigned i = 0; i != MaskNumElts; ++i) {
2544 MappedOps.push_back(Idx);
2545 else if (Idx < (int)SrcNumElts)
2546 MappedOps.push_back(Idx - StartIdx[0]);
2548 MappedOps.push_back(Idx - SrcNumElts - StartIdx[1] + MaskNumElts);
2550 setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2556 // We can't use either concat vectors or extract subvectors so fall back to
2557 // replacing the shuffle with extract and build vector.
2558 // to insert and build vector.
2559 EVT EltVT = VT.getVectorElementType();
2560 EVT PtrVT = TLI.getPointerTy();
2561 SmallVector<SDValue,8> Ops;
2562 for (unsigned i = 0; i != MaskNumElts; ++i) {
2564 Ops.push_back(DAG.getUNDEF(EltVT));
2567 if (Idx < (int)SrcNumElts)
2568 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2569 EltVT, Src1, DAG.getConstant(Idx, PtrVT)));
2571 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2573 DAG.getConstant(Idx - SrcNumElts, PtrVT)));
2576 setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
2577 VT, &Ops[0], Ops.size()));
2580 void SelectionDAGLowering::visitInsertValue(InsertValueInst &I) {
2581 const Value *Op0 = I.getOperand(0);
2582 const Value *Op1 = I.getOperand(1);
2583 const Type *AggTy = I.getType();
2584 const Type *ValTy = Op1->getType();
2585 bool IntoUndef = isa<UndefValue>(Op0);
2586 bool FromUndef = isa<UndefValue>(Op1);
2588 unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2589 I.idx_begin(), I.idx_end());
2591 SmallVector<EVT, 4> AggValueVTs;
2592 ComputeValueVTs(TLI, AggTy, AggValueVTs);
2593 SmallVector<EVT, 4> ValValueVTs;
2594 ComputeValueVTs(TLI, ValTy, ValValueVTs);
2596 unsigned NumAggValues = AggValueVTs.size();
2597 unsigned NumValValues = ValValueVTs.size();
2598 SmallVector<SDValue, 4> Values(NumAggValues);
2600 SDValue Agg = getValue(Op0);
2601 SDValue Val = getValue(Op1);
2603 // Copy the beginning value(s) from the original aggregate.
2604 for (; i != LinearIndex; ++i)
2605 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2606 SDValue(Agg.getNode(), Agg.getResNo() + i);
2607 // Copy values from the inserted value(s).
2608 for (; i != LinearIndex + NumValValues; ++i)
2609 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2610 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
2611 // Copy remaining value(s) from the original aggregate.
2612 for (; i != NumAggValues; ++i)
2613 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2614 SDValue(Agg.getNode(), Agg.getResNo() + i);
2616 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2617 DAG.getVTList(&AggValueVTs[0], NumAggValues),
2618 &Values[0], NumAggValues));
2621 void SelectionDAGLowering::visitExtractValue(ExtractValueInst &I) {
2622 const Value *Op0 = I.getOperand(0);
2623 const Type *AggTy = Op0->getType();
2624 const Type *ValTy = I.getType();
2625 bool OutOfUndef = isa<UndefValue>(Op0);
2627 unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2628 I.idx_begin(), I.idx_end());
2630 SmallVector<EVT, 4> ValValueVTs;
2631 ComputeValueVTs(TLI, ValTy, ValValueVTs);
2633 unsigned NumValValues = ValValueVTs.size();
2634 SmallVector<SDValue, 4> Values(NumValValues);
2636 SDValue Agg = getValue(Op0);
2637 // Copy out the selected value(s).
2638 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
2639 Values[i - LinearIndex] =
2641 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
2642 SDValue(Agg.getNode(), Agg.getResNo() + i);
2644 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2645 DAG.getVTList(&ValValueVTs[0], NumValValues),
2646 &Values[0], NumValValues));
2650 void SelectionDAGLowering::visitGetElementPtr(User &I) {
2651 SDValue N = getValue(I.getOperand(0));
2652 const Type *Ty = I.getOperand(0)->getType();
2654 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
2657 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
2658 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
2661 uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field);
2662 N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2663 DAG.getIntPtrConstant(Offset));
2665 Ty = StTy->getElementType(Field);
2667 Ty = cast<SequentialType>(Ty)->getElementType();
2669 // If this is a constant subscript, handle it quickly.
2670 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
2671 if (CI->getZExtValue() == 0) continue;
2673 TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
2675 EVT PTy = TLI.getPointerTy();
2676 unsigned PtrBits = PTy.getSizeInBits();
2678 OffsVal = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2680 DAG.getConstant(Offs, MVT::i64));
2682 OffsVal = DAG.getIntPtrConstant(Offs);
2683 N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2688 // N = N + Idx * ElementSize;
2689 uint64_t ElementSize = TD->getTypeAllocSize(Ty);
2690 SDValue IdxN = getValue(Idx);
2692 // If the index is smaller or larger than intptr_t, truncate or extend
2694 if (IdxN.getValueType().bitsLT(N.getValueType()))
2695 IdxN = DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(),
2696 N.getValueType(), IdxN);
2697 else if (IdxN.getValueType().bitsGT(N.getValueType()))
2698 IdxN = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2699 N.getValueType(), IdxN);
2701 // If this is a multiply by a power of two, turn it into a shl
2702 // immediately. This is a very common case.
2703 if (ElementSize != 1) {
2704 if (isPowerOf2_64(ElementSize)) {
2705 unsigned Amt = Log2_64(ElementSize);
2706 IdxN = DAG.getNode(ISD::SHL, getCurDebugLoc(),
2707 N.getValueType(), IdxN,
2708 DAG.getConstant(Amt, TLI.getPointerTy()));
2710 SDValue Scale = DAG.getIntPtrConstant(ElementSize);
2711 IdxN = DAG.getNode(ISD::MUL, getCurDebugLoc(),
2712 N.getValueType(), IdxN, Scale);
2716 N = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2717 N.getValueType(), N, IdxN);
2723 void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
2724 // If this is a fixed sized alloca in the entry block of the function,
2725 // allocate it statically on the stack.
2726 if (FuncInfo.StaticAllocaMap.count(&I))
2727 return; // getValue will auto-populate this.
2729 const Type *Ty = I.getAllocatedType();
2730 uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
2732 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
2735 SDValue AllocSize = getValue(I.getArraySize());
2737 AllocSize = DAG.getNode(ISD::MUL, getCurDebugLoc(), AllocSize.getValueType(),
2739 DAG.getConstant(TySize, AllocSize.getValueType()));
2743 EVT IntPtr = TLI.getPointerTy();
2744 if (IntPtr.bitsLT(AllocSize.getValueType()))
2745 AllocSize = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2747 else if (IntPtr.bitsGT(AllocSize.getValueType()))
2748 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2751 // Handle alignment. If the requested alignment is less than or equal to
2752 // the stack alignment, ignore it. If the size is greater than or equal to
2753 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
2754 unsigned StackAlign =
2755 TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
2756 if (Align <= StackAlign)
2759 // Round the size of the allocation up to the stack alignment size
2760 // by add SA-1 to the size.
2761 AllocSize = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2762 AllocSize.getValueType(), AllocSize,
2763 DAG.getIntPtrConstant(StackAlign-1));
2764 // Mask out the low bits for alignment purposes.
2765 AllocSize = DAG.getNode(ISD::AND, getCurDebugLoc(),
2766 AllocSize.getValueType(), AllocSize,
2767 DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1)));
2769 SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) };
2770 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
2771 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, getCurDebugLoc(),
2774 DAG.setRoot(DSA.getValue(1));
2776 // Inform the Frame Information that we have just allocated a variable-sized
2778 FuncInfo.MF->getFrameInfo()->CreateVariableSizedObject();
2781 void SelectionDAGLowering::visitLoad(LoadInst &I) {
2782 const Value *SV = I.getOperand(0);
2783 SDValue Ptr = getValue(SV);
2785 const Type *Ty = I.getType();
2786 bool isVolatile = I.isVolatile();
2787 unsigned Alignment = I.getAlignment();
2789 SmallVector<EVT, 4> ValueVTs;
2790 SmallVector<uint64_t, 4> Offsets;
2791 ComputeValueVTs(TLI, Ty, ValueVTs, &Offsets);
2792 unsigned NumValues = ValueVTs.size();
2797 bool ConstantMemory = false;
2799 // Serialize volatile loads with other side effects.
2801 else if (AA->pointsToConstantMemory(SV)) {
2802 // Do not serialize (non-volatile) loads of constant memory with anything.
2803 Root = DAG.getEntryNode();
2804 ConstantMemory = true;
2806 // Do not serialize non-volatile loads against each other.
2807 Root = DAG.getRoot();
2810 SmallVector<SDValue, 4> Values(NumValues);
2811 SmallVector<SDValue, 4> Chains(NumValues);
2812 EVT PtrVT = Ptr.getValueType();
2813 for (unsigned i = 0; i != NumValues; ++i) {
2814 SDValue L = DAG.getLoad(ValueVTs[i], getCurDebugLoc(), Root,
2815 DAG.getNode(ISD::ADD, getCurDebugLoc(),
2817 DAG.getConstant(Offsets[i], PtrVT)),
2818 SV, Offsets[i], isVolatile, Alignment);
2820 Chains[i] = L.getValue(1);
2823 if (!ConstantMemory) {
2824 SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
2826 &Chains[0], NumValues);
2830 PendingLoads.push_back(Chain);
2833 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2834 DAG.getVTList(&ValueVTs[0], NumValues),
2835 &Values[0], NumValues));
2839 void SelectionDAGLowering::visitStore(StoreInst &I) {
2840 Value *SrcV = I.getOperand(0);
2841 Value *PtrV = I.getOperand(1);
2843 SmallVector<EVT, 4> ValueVTs;
2844 SmallVector<uint64_t, 4> Offsets;
2845 ComputeValueVTs(TLI, SrcV->getType(), ValueVTs, &Offsets);
2846 unsigned NumValues = ValueVTs.size();
2850 // Get the lowered operands. Note that we do this after
2851 // checking if NumResults is zero, because with zero results
2852 // the operands won't have values in the map.
2853 SDValue Src = getValue(SrcV);
2854 SDValue Ptr = getValue(PtrV);
2856 SDValue Root = getRoot();
2857 SmallVector<SDValue, 4> Chains(NumValues);
2858 EVT PtrVT = Ptr.getValueType();
2859 bool isVolatile = I.isVolatile();
2860 unsigned Alignment = I.getAlignment();
2861 for (unsigned i = 0; i != NumValues; ++i)
2862 Chains[i] = DAG.getStore(Root, getCurDebugLoc(),
2863 SDValue(Src.getNode(), Src.getResNo() + i),
2864 DAG.getNode(ISD::ADD, getCurDebugLoc(),
2866 DAG.getConstant(Offsets[i], PtrVT)),
2867 PtrV, Offsets[i], isVolatile, Alignment);
2869 DAG.setRoot(DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
2870 MVT::Other, &Chains[0], NumValues));
2873 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
2875 void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I,
2876 unsigned Intrinsic) {
2877 bool HasChain = !I.doesNotAccessMemory();
2878 bool OnlyLoad = HasChain && I.onlyReadsMemory();
2880 // Build the operand list.
2881 SmallVector<SDValue, 8> Ops;
2882 if (HasChain) { // If this intrinsic has side-effects, chainify it.
2884 // We don't need to serialize loads against other loads.
2885 Ops.push_back(DAG.getRoot());
2887 Ops.push_back(getRoot());
2891 // Info is set by getTgtMemInstrinsic
2892 TargetLowering::IntrinsicInfo Info;
2893 bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic);
2895 // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
2896 if (!IsTgtIntrinsic)
2897 Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
2899 // Add all operands of the call to the operand list.
2900 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
2901 SDValue Op = getValue(I.getOperand(i));
2902 assert(TLI.isTypeLegal(Op.getValueType()) &&
2903 "Intrinsic uses a non-legal type?");
2907 SmallVector<EVT, 4> ValueVTs;
2908 ComputeValueVTs(TLI, I.getType(), ValueVTs);
2910 for (unsigned Val = 0, E = ValueVTs.size(); Val != E; ++Val) {
2911 assert(TLI.isTypeLegal(ValueVTs[Val]) &&
2912 "Intrinsic uses a non-legal type?");
2916 ValueVTs.push_back(MVT::Other);
2918 SDVTList VTs = DAG.getVTList(ValueVTs.data(), ValueVTs.size());
2922 if (IsTgtIntrinsic) {
2923 // This is target intrinsic that touches memory
2924 Result = DAG.getMemIntrinsicNode(Info.opc, getCurDebugLoc(),
2925 VTs, &Ops[0], Ops.size(),
2926 Info.memVT, Info.ptrVal, Info.offset,
2927 Info.align, Info.vol,
2928 Info.readMem, Info.writeMem);
2931 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurDebugLoc(),
2932 VTs, &Ops[0], Ops.size());
2933 else if (I.getType() != Type::getVoidTy(*DAG.getContext()))
2934 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurDebugLoc(),
2935 VTs, &Ops[0], Ops.size());
2937 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurDebugLoc(),
2938 VTs, &Ops[0], Ops.size());
2941 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
2943 PendingLoads.push_back(Chain);
2947 if (I.getType() != Type::getVoidTy(*DAG.getContext())) {
2948 if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
2949 EVT VT = TLI.getValueType(PTy);
2950 Result = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), VT, Result);
2952 setValue(&I, Result);
2956 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
2957 static GlobalVariable *ExtractTypeInfo(Value *V) {
2958 V = V->stripPointerCasts();
2959 GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
2960 assert ((GV || isa<ConstantPointerNull>(V)) &&
2961 "TypeInfo must be a global variable or NULL");
2967 /// AddCatchInfo - Extract the personality and type infos from an eh.selector
2968 /// call, and add them to the specified machine basic block.
2969 void AddCatchInfo(CallInst &I, MachineModuleInfo *MMI,
2970 MachineBasicBlock *MBB) {
2971 // Inform the MachineModuleInfo of the personality for this landing pad.
2972 ConstantExpr *CE = cast<ConstantExpr>(I.getOperand(2));
2973 assert(CE->getOpcode() == Instruction::BitCast &&
2974 isa<Function>(CE->getOperand(0)) &&
2975 "Personality should be a function");
2976 MMI->addPersonality(MBB, cast<Function>(CE->getOperand(0)));
2978 // Gather all the type infos for this landing pad and pass them along to
2979 // MachineModuleInfo.
2980 std::vector<GlobalVariable *> TyInfo;
2981 unsigned N = I.getNumOperands();
2983 for (unsigned i = N - 1; i > 2; --i) {
2984 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(i))) {
2985 unsigned FilterLength = CI->getZExtValue();
2986 unsigned FirstCatch = i + FilterLength + !FilterLength;
2987 assert (FirstCatch <= N && "Invalid filter length");
2989 if (FirstCatch < N) {
2990 TyInfo.reserve(N - FirstCatch);
2991 for (unsigned j = FirstCatch; j < N; ++j)
2992 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
2993 MMI->addCatchTypeInfo(MBB, TyInfo);
2997 if (!FilterLength) {
2999 MMI->addCleanup(MBB);
3002 TyInfo.reserve(FilterLength - 1);
3003 for (unsigned j = i + 1; j < FirstCatch; ++j)
3004 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
3005 MMI->addFilterTypeInfo(MBB, TyInfo);
3014 TyInfo.reserve(N - 3);
3015 for (unsigned j = 3; j < N; ++j)
3016 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
3017 MMI->addCatchTypeInfo(MBB, TyInfo);
3023 /// GetSignificand - Get the significand and build it into a floating-point
3024 /// number with exponent of 1:
3026 /// Op = (Op & 0x007fffff) | 0x3f800000;
3028 /// where Op is the hexidecimal representation of floating point value.
3030 GetSignificand(SelectionDAG &DAG, SDValue Op, DebugLoc dl) {
3031 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3032 DAG.getConstant(0x007fffff, MVT::i32));
3033 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
3034 DAG.getConstant(0x3f800000, MVT::i32));
3035 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t2);
3038 /// GetExponent - Get the exponent:
3040 /// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
3042 /// where Op is the hexidecimal representation of floating point value.
3044 GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
3046 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3047 DAG.getConstant(0x7f800000, MVT::i32));
3048 SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0,
3049 DAG.getConstant(23, TLI.getPointerTy()));
3050 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
3051 DAG.getConstant(127, MVT::i32));
3052 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
3055 /// getF32Constant - Get 32-bit floating point constant.
3057 getF32Constant(SelectionDAG &DAG, unsigned Flt) {
3058 return DAG.getConstantFP(APFloat(APInt(32, Flt)), MVT::f32);
3061 /// Inlined utility function to implement binary input atomic intrinsics for
3062 /// visitIntrinsicCall: I is a call instruction
3063 /// Op is the associated NodeType for I
3065 SelectionDAGLowering::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) {
3066 SDValue Root = getRoot();
3068 DAG.getAtomic(Op, getCurDebugLoc(),
3069 getValue(I.getOperand(2)).getValueType().getSimpleVT(),
3071 getValue(I.getOperand(1)),
3072 getValue(I.getOperand(2)),
3075 DAG.setRoot(L.getValue(1));
3079 // implVisitAluOverflow - Lower arithmetic overflow instrinsics.
3081 SelectionDAGLowering::implVisitAluOverflow(CallInst &I, ISD::NodeType Op) {
3082 SDValue Op1 = getValue(I.getOperand(1));
3083 SDValue Op2 = getValue(I.getOperand(2));
3085 SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
3086 SDValue Result = DAG.getNode(Op, getCurDebugLoc(), VTs, Op1, Op2);
3088 setValue(&I, Result);
3092 /// visitExp - Lower an exp intrinsic. Handles the special sequences for
3093 /// limited-precision mode.
3095 SelectionDAGLowering::visitExp(CallInst &I) {
3097 DebugLoc dl = getCurDebugLoc();
3099 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3100 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3101 SDValue Op = getValue(I.getOperand(1));
3103 // Put the exponent in the right bit position for later addition to the
3106 // #define LOG2OFe 1.4426950f
3107 // IntegerPartOfX = ((int32_t)(X * LOG2OFe));
3108 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3109 getF32Constant(DAG, 0x3fb8aa3b));
3110 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3112 // FractionalPartOfX = (X * LOG2OFe) - (float)IntegerPartOfX;
3113 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3114 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3116 // IntegerPartOfX <<= 23;
3117 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3118 DAG.getConstant(23, TLI.getPointerTy()));
3120 if (LimitFloatPrecision <= 6) {
3121 // For floating-point precision of 6:
3123 // TwoToFractionalPartOfX =
3125 // (0.735607626f + 0.252464424f * x) * x;
3127 // error 0.0144103317, which is 6 bits
3128 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3129 getF32Constant(DAG, 0x3e814304));
3130 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3131 getF32Constant(DAG, 0x3f3c50c8));
3132 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3133 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3134 getF32Constant(DAG, 0x3f7f5e7e));
3135 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t5);
3137 // Add the exponent into the result in integer domain.
3138 SDValue t6 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3139 TwoToFracPartOfX, IntegerPartOfX);
3141 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t6);
3142 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3143 // For floating-point precision of 12:
3145 // TwoToFractionalPartOfX =
3148 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3150 // 0.000107046256 error, which is 13 to 14 bits
3151 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3152 getF32Constant(DAG, 0x3da235e3));
3153 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3154 getF32Constant(DAG, 0x3e65b8f3));
3155 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3156 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3157 getF32Constant(DAG, 0x3f324b07));
3158 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3159 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3160 getF32Constant(DAG, 0x3f7ff8fd));
3161 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t7);
3163 // Add the exponent into the result in integer domain.
3164 SDValue t8 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3165 TwoToFracPartOfX, IntegerPartOfX);
3167 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t8);
3168 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3169 // For floating-point precision of 18:
3171 // TwoToFractionalPartOfX =
3175 // (0.554906021e-1f +
3176 // (0.961591928e-2f +
3177 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3179 // error 2.47208000*10^(-7), which is better than 18 bits
3180 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3181 getF32Constant(DAG, 0x3924b03e));
3182 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3183 getF32Constant(DAG, 0x3ab24b87));
3184 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3185 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3186 getF32Constant(DAG, 0x3c1d8c17));
3187 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3188 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3189 getF32Constant(DAG, 0x3d634a1d));
3190 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3191 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3192 getF32Constant(DAG, 0x3e75fe14));
3193 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3194 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3195 getF32Constant(DAG, 0x3f317234));
3196 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3197 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3198 getF32Constant(DAG, 0x3f800000));
3199 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,
3202 // Add the exponent into the result in integer domain.
3203 SDValue t14 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3204 TwoToFracPartOfX, IntegerPartOfX);
3206 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t14);
3209 // No special expansion.
3210 result = DAG.getNode(ISD::FEXP, dl,
3211 getValue(I.getOperand(1)).getValueType(),
3212 getValue(I.getOperand(1)));
3215 setValue(&I, result);
3218 /// visitLog - Lower a log intrinsic. Handles the special sequences for
3219 /// limited-precision mode.
3221 SelectionDAGLowering::visitLog(CallInst &I) {
3223 DebugLoc dl = getCurDebugLoc();
3225 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3226 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3227 SDValue Op = getValue(I.getOperand(1));
3228 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3230 // Scale the exponent by log(2) [0.69314718f].
3231 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
3232 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3233 getF32Constant(DAG, 0x3f317218));
3235 // Get the significand and build it into a floating-point number with
3237 SDValue X = GetSignificand(DAG, Op1, dl);
3239 if (LimitFloatPrecision <= 6) {
3240 // For floating-point precision of 6:
3244 // (1.4034025f - 0.23903021f * x) * x;
3246 // error 0.0034276066, which is better than 8 bits
3247 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3248 getF32Constant(DAG, 0xbe74c456));
3249 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3250 getF32Constant(DAG, 0x3fb3a2b1));
3251 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3252 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3253 getF32Constant(DAG, 0x3f949a29));
3255 result = DAG.getNode(ISD::FADD, dl,
3256 MVT::f32, LogOfExponent, LogOfMantissa);
3257 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3258 // For floating-point precision of 12:
3264 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
3266 // error 0.000061011436, which is 14 bits
3267 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3268 getF32Constant(DAG, 0xbd67b6d6));
3269 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3270 getF32Constant(DAG, 0x3ee4f4b8));
3271 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3272 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3273 getF32Constant(DAG, 0x3fbc278b));
3274 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3275 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3276 getF32Constant(DAG, 0x40348e95));
3277 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3278 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3279 getF32Constant(DAG, 0x3fdef31a));
3281 result = DAG.getNode(ISD::FADD, dl,
3282 MVT::f32, LogOfExponent, LogOfMantissa);
3283 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3284 // For floating-point precision of 18:
3292 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
3294 // error 0.0000023660568, which is better than 18 bits
3295 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3296 getF32Constant(DAG, 0xbc91e5ac));
3297 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3298 getF32Constant(DAG, 0x3e4350aa));
3299 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3300 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3301 getF32Constant(DAG, 0x3f60d3e3));
3302 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3303 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3304 getF32Constant(DAG, 0x4011cdf0));
3305 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3306 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3307 getF32Constant(DAG, 0x406cfd1c));
3308 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3309 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3310 getF32Constant(DAG, 0x408797cb));
3311 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3312 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3313 getF32Constant(DAG, 0x4006dcab));
3315 result = DAG.getNode(ISD::FADD, dl,
3316 MVT::f32, LogOfExponent, LogOfMantissa);
3319 // No special expansion.
3320 result = DAG.getNode(ISD::FLOG, dl,
3321 getValue(I.getOperand(1)).getValueType(),
3322 getValue(I.getOperand(1)));
3325 setValue(&I, result);
3328 /// visitLog2 - Lower a log2 intrinsic. Handles the special sequences for
3329 /// limited-precision mode.
3331 SelectionDAGLowering::visitLog2(CallInst &I) {
3333 DebugLoc dl = getCurDebugLoc();
3335 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3336 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3337 SDValue Op = getValue(I.getOperand(1));
3338 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3340 // Get the exponent.
3341 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
3343 // Get the significand and build it into a floating-point number with
3345 SDValue X = GetSignificand(DAG, Op1, dl);
3347 // Different possible minimax approximations of significand in
3348 // floating-point for various degrees of accuracy over [1,2].
3349 if (LimitFloatPrecision <= 6) {
3350 // For floating-point precision of 6:
3352 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
3354 // error 0.0049451742, which is more than 7 bits
3355 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3356 getF32Constant(DAG, 0xbeb08fe0));
3357 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3358 getF32Constant(DAG, 0x40019463));
3359 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3360 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3361 getF32Constant(DAG, 0x3fd6633d));
3363 result = DAG.getNode(ISD::FADD, dl,
3364 MVT::f32, LogOfExponent, Log2ofMantissa);
3365 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3366 // For floating-point precision of 12:
3372 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
3374 // error 0.0000876136000, which is better than 13 bits
3375 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3376 getF32Constant(DAG, 0xbda7262e));
3377 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3378 getF32Constant(DAG, 0x3f25280b));
3379 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3380 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3381 getF32Constant(DAG, 0x4007b923));
3382 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3383 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3384 getF32Constant(DAG, 0x40823e2f));
3385 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3386 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3387 getF32Constant(DAG, 0x4020d29c));
3389 result = DAG.getNode(ISD::FADD, dl,
3390 MVT::f32, LogOfExponent, Log2ofMantissa);
3391 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3392 // For floating-point precision of 18:
3401 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
3403 // error 0.0000018516, which is better than 18 bits
3404 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3405 getF32Constant(DAG, 0xbcd2769e));
3406 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3407 getF32Constant(DAG, 0x3e8ce0b9));
3408 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3409 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3410 getF32Constant(DAG, 0x3fa22ae7));
3411 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3412 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3413 getF32Constant(DAG, 0x40525723));
3414 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3415 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3416 getF32Constant(DAG, 0x40aaf200));
3417 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3418 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3419 getF32Constant(DAG, 0x40c39dad));
3420 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3421 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3422 getF32Constant(DAG, 0x4042902c));
3424 result = DAG.getNode(ISD::FADD, dl,
3425 MVT::f32, LogOfExponent, Log2ofMantissa);
3428 // No special expansion.
3429 result = DAG.getNode(ISD::FLOG2, dl,
3430 getValue(I.getOperand(1)).getValueType(),
3431 getValue(I.getOperand(1)));
3434 setValue(&I, result);
3437 /// visitLog10 - Lower a log10 intrinsic. Handles the special sequences for
3438 /// limited-precision mode.
3440 SelectionDAGLowering::visitLog10(CallInst &I) {
3442 DebugLoc dl = getCurDebugLoc();
3444 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3445 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3446 SDValue Op = getValue(I.getOperand(1));
3447 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3449 // Scale the exponent by log10(2) [0.30102999f].
3450 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
3451 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3452 getF32Constant(DAG, 0x3e9a209a));
3454 // Get the significand and build it into a floating-point number with
3456 SDValue X = GetSignificand(DAG, Op1, dl);
3458 if (LimitFloatPrecision <= 6) {
3459 // For floating-point precision of 6:
3461 // Log10ofMantissa =
3463 // (0.60948995f - 0.10380950f * x) * x;
3465 // error 0.0014886165, which is 6 bits
3466 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3467 getF32Constant(DAG, 0xbdd49a13));
3468 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3469 getF32Constant(DAG, 0x3f1c0789));
3470 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3471 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3472 getF32Constant(DAG, 0x3f011300));
3474 result = DAG.getNode(ISD::FADD, dl,
3475 MVT::f32, LogOfExponent, Log10ofMantissa);
3476 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3477 // For floating-point precision of 12:
3479 // Log10ofMantissa =
3482 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
3484 // error 0.00019228036, which is better than 12 bits
3485 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3486 getF32Constant(DAG, 0x3d431f31));
3487 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3488 getF32Constant(DAG, 0x3ea21fb2));
3489 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3490 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3491 getF32Constant(DAG, 0x3f6ae232));
3492 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3493 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3494 getF32Constant(DAG, 0x3f25f7c3));
3496 result = DAG.getNode(ISD::FADD, dl,
3497 MVT::f32, LogOfExponent, Log10ofMantissa);
3498 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3499 // For floating-point precision of 18:
3501 // Log10ofMantissa =
3506 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
3508 // error 0.0000037995730, which is better than 18 bits
3509 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3510 getF32Constant(DAG, 0x3c5d51ce));
3511 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3512 getF32Constant(DAG, 0x3e00685a));
3513 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3514 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3515 getF32Constant(DAG, 0x3efb6798));
3516 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3517 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3518 getF32Constant(DAG, 0x3f88d192));
3519 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3520 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3521 getF32Constant(DAG, 0x3fc4316c));
3522 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3523 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
3524 getF32Constant(DAG, 0x3f57ce70));
3526 result = DAG.getNode(ISD::FADD, dl,
3527 MVT::f32, LogOfExponent, Log10ofMantissa);
3530 // No special expansion.
3531 result = DAG.getNode(ISD::FLOG10, dl,
3532 getValue(I.getOperand(1)).getValueType(),
3533 getValue(I.getOperand(1)));
3536 setValue(&I, result);
3539 /// visitExp2 - Lower an exp2 intrinsic. Handles the special sequences for
3540 /// limited-precision mode.
3542 SelectionDAGLowering::visitExp2(CallInst &I) {
3544 DebugLoc dl = getCurDebugLoc();
3546 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3547 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3548 SDValue Op = getValue(I.getOperand(1));
3550 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op);
3552 // FractionalPartOfX = x - (float)IntegerPartOfX;
3553 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3554 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, Op, t1);
3556 // IntegerPartOfX <<= 23;
3557 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3558 DAG.getConstant(23, TLI.getPointerTy()));
3560 if (LimitFloatPrecision <= 6) {
3561 // For floating-point precision of 6:
3563 // TwoToFractionalPartOfX =
3565 // (0.735607626f + 0.252464424f * x) * x;
3567 // error 0.0144103317, which is 6 bits
3568 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3569 getF32Constant(DAG, 0x3e814304));
3570 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3571 getF32Constant(DAG, 0x3f3c50c8));
3572 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3573 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3574 getF32Constant(DAG, 0x3f7f5e7e));
3575 SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3576 SDValue TwoToFractionalPartOfX =
3577 DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3579 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3580 MVT::f32, TwoToFractionalPartOfX);
3581 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3582 // For floating-point precision of 12:
3584 // TwoToFractionalPartOfX =
3587 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3589 // error 0.000107046256, which is 13 to 14 bits
3590 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3591 getF32Constant(DAG, 0x3da235e3));
3592 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3593 getF32Constant(DAG, 0x3e65b8f3));
3594 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3595 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3596 getF32Constant(DAG, 0x3f324b07));
3597 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3598 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3599 getF32Constant(DAG, 0x3f7ff8fd));
3600 SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3601 SDValue TwoToFractionalPartOfX =
3602 DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3604 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3605 MVT::f32, TwoToFractionalPartOfX);
3606 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3607 // For floating-point precision of 18:
3609 // TwoToFractionalPartOfX =
3613 // (0.554906021e-1f +
3614 // (0.961591928e-2f +
3615 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3616 // error 2.47208000*10^(-7), which is better than 18 bits
3617 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3618 getF32Constant(DAG, 0x3924b03e));
3619 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3620 getF32Constant(DAG, 0x3ab24b87));
3621 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3622 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3623 getF32Constant(DAG, 0x3c1d8c17));
3624 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3625 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3626 getF32Constant(DAG, 0x3d634a1d));
3627 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3628 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3629 getF32Constant(DAG, 0x3e75fe14));
3630 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3631 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3632 getF32Constant(DAG, 0x3f317234));
3633 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3634 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3635 getF32Constant(DAG, 0x3f800000));
3636 SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
3637 SDValue TwoToFractionalPartOfX =
3638 DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
3640 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3641 MVT::f32, TwoToFractionalPartOfX);
3644 // No special expansion.
3645 result = DAG.getNode(ISD::FEXP2, dl,
3646 getValue(I.getOperand(1)).getValueType(),
3647 getValue(I.getOperand(1)));
3650 setValue(&I, result);
3653 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
3654 /// limited-precision mode with x == 10.0f.
3656 SelectionDAGLowering::visitPow(CallInst &I) {
3658 Value *Val = I.getOperand(1);
3659 DebugLoc dl = getCurDebugLoc();
3660 bool IsExp10 = false;
3662 if (getValue(Val).getValueType() == MVT::f32 &&
3663 getValue(I.getOperand(2)).getValueType() == MVT::f32 &&
3664 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3665 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(Val))) {
3666 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3668 IsExp10 = CFP->getValueAPF().bitwiseIsEqual(Ten);
3673 if (IsExp10 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3674 SDValue Op = getValue(I.getOperand(2));
3676 // Put the exponent in the right bit position for later addition to the
3679 // #define LOG2OF10 3.3219281f
3680 // IntegerPartOfX = (int32_t)(x * LOG2OF10);
3681 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3682 getF32Constant(DAG, 0x40549a78));
3683 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3685 // FractionalPartOfX = x - (float)IntegerPartOfX;
3686 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3687 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3689 // IntegerPartOfX <<= 23;
3690 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3691 DAG.getConstant(23, TLI.getPointerTy()));
3693 if (LimitFloatPrecision <= 6) {
3694 // For floating-point precision of 6:
3696 // twoToFractionalPartOfX =
3698 // (0.735607626f + 0.252464424f * x) * x;
3700 // error 0.0144103317, which is 6 bits
3701 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3702 getF32Constant(DAG, 0x3e814304));
3703 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3704 getF32Constant(DAG, 0x3f3c50c8));
3705 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3706 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3707 getF32Constant(DAG, 0x3f7f5e7e));
3708 SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3709 SDValue TwoToFractionalPartOfX =
3710 DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3712 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3713 MVT::f32, TwoToFractionalPartOfX);
3714 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3715 // For floating-point precision of 12:
3717 // TwoToFractionalPartOfX =
3720 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3722 // error 0.000107046256, which is 13 to 14 bits
3723 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3724 getF32Constant(DAG, 0x3da235e3));
3725 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3726 getF32Constant(DAG, 0x3e65b8f3));
3727 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3728 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3729 getF32Constant(DAG, 0x3f324b07));
3730 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3731 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3732 getF32Constant(DAG, 0x3f7ff8fd));
3733 SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3734 SDValue TwoToFractionalPartOfX =
3735 DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3737 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3738 MVT::f32, TwoToFractionalPartOfX);
3739 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3740 // For floating-point precision of 18:
3742 // TwoToFractionalPartOfX =
3746 // (0.554906021e-1f +
3747 // (0.961591928e-2f +
3748 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3749 // error 2.47208000*10^(-7), which is better than 18 bits
3750 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3751 getF32Constant(DAG, 0x3924b03e));
3752 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3753 getF32Constant(DAG, 0x3ab24b87));
3754 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3755 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3756 getF32Constant(DAG, 0x3c1d8c17));
3757 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3758 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3759 getF32Constant(DAG, 0x3d634a1d));
3760 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3761 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3762 getF32Constant(DAG, 0x3e75fe14));
3763 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3764 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3765 getF32Constant(DAG, 0x3f317234));
3766 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3767 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3768 getF32Constant(DAG, 0x3f800000));
3769 SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
3770 SDValue TwoToFractionalPartOfX =
3771 DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
3773 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3774 MVT::f32, TwoToFractionalPartOfX);
3777 // No special expansion.
3778 result = DAG.getNode(ISD::FPOW, dl,
3779 getValue(I.getOperand(1)).getValueType(),
3780 getValue(I.getOperand(1)),
3781 getValue(I.getOperand(2)));
3784 setValue(&I, result);
3787 /// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
3788 /// we want to emit this as a call to a named external function, return the name
3789 /// otherwise lower it and return null.
3791 SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
3792 DebugLoc dl = getCurDebugLoc();
3793 switch (Intrinsic) {
3795 // By default, turn this into a target intrinsic node.
3796 visitTargetIntrinsic(I, Intrinsic);
3798 case Intrinsic::vastart: visitVAStart(I); return 0;
3799 case Intrinsic::vaend: visitVAEnd(I); return 0;
3800 case Intrinsic::vacopy: visitVACopy(I); return 0;
3801 case Intrinsic::returnaddress:
3802 setValue(&I, DAG.getNode(ISD::RETURNADDR, dl, TLI.getPointerTy(),
3803 getValue(I.getOperand(1))));
3805 case Intrinsic::frameaddress:
3806 setValue(&I, DAG.getNode(ISD::FRAMEADDR, dl, TLI.getPointerTy(),
3807 getValue(I.getOperand(1))));
3809 case Intrinsic::setjmp:
3810 return "_setjmp"+!TLI.usesUnderscoreSetJmp();
3812 case Intrinsic::longjmp:
3813 return "_longjmp"+!TLI.usesUnderscoreLongJmp();
3815 case Intrinsic::memcpy: {
3816 SDValue Op1 = getValue(I.getOperand(1));
3817 SDValue Op2 = getValue(I.getOperand(2));
3818 SDValue Op3 = getValue(I.getOperand(3));
3819 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3820 DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
3821 I.getOperand(1), 0, I.getOperand(2), 0));
3824 case Intrinsic::memset: {
3825 SDValue Op1 = getValue(I.getOperand(1));
3826 SDValue Op2 = getValue(I.getOperand(2));
3827 SDValue Op3 = getValue(I.getOperand(3));
3828 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3829 DAG.setRoot(DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align,
3830 I.getOperand(1), 0));
3833 case Intrinsic::memmove: {
3834 SDValue Op1 = getValue(I.getOperand(1));
3835 SDValue Op2 = getValue(I.getOperand(2));
3836 SDValue Op3 = getValue(I.getOperand(3));
3837 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3839 // If the source and destination are known to not be aliases, we can
3840 // lower memmove as memcpy.
3841 uint64_t Size = -1ULL;
3842 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op3))
3843 Size = C->getZExtValue();
3844 if (AA->alias(I.getOperand(1), Size, I.getOperand(2), Size) ==
3845 AliasAnalysis::NoAlias) {
3846 DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
3847 I.getOperand(1), 0, I.getOperand(2), 0));
3851 DAG.setRoot(DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align,
3852 I.getOperand(1), 0, I.getOperand(2), 0));
3855 case Intrinsic::dbg_stoppoint: {
3856 DbgStopPointInst &SPI = cast<DbgStopPointInst>(I);
3857 if (isValidDebugInfoIntrinsic(SPI, CodeGenOpt::Default)) {
3858 MachineFunction &MF = DAG.getMachineFunction();
3859 DebugLoc Loc = ExtractDebugLocation(SPI, MF.getDebugLocInfo());
3860 setCurDebugLoc(Loc);
3862 if (OptLevel == CodeGenOpt::None)
3863 DAG.setRoot(DAG.getDbgStopPoint(Loc, getRoot(),
3870 case Intrinsic::dbg_region_start: {
3871 DwarfWriter *DW = DAG.getDwarfWriter();
3872 DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I);
3873 if (isValidDebugInfoIntrinsic(RSI, OptLevel) && DW
3874 && DW->ShouldEmitDwarfDebug()) {
3876 DW->RecordRegionStart(RSI.getContext());
3877 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3878 getRoot(), LabelID));
3882 case Intrinsic::dbg_region_end: {
3883 DwarfWriter *DW = DAG.getDwarfWriter();
3884 DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I);
3886 if (!isValidDebugInfoIntrinsic(REI, OptLevel) || !DW
3887 || !DW->ShouldEmitDwarfDebug())
3890 MachineFunction &MF = DAG.getMachineFunction();
3891 DISubprogram Subprogram(REI.getContext());
3893 if (isInlinedFnEnd(REI, MF.getFunction())) {
3894 // This is end of inlined function. Debugging information for inlined
3895 // function is not handled yet (only supported by FastISel).
3896 if (OptLevel == CodeGenOpt::None) {
3897 unsigned ID = DW->RecordInlinedFnEnd(Subprogram);
3899 // Returned ID is 0 if this is unbalanced "end of inlined
3900 // scope". This could happen if optimizer eats dbg intrinsics or
3901 // "beginning of inlined scope" is not recoginized due to missing
3902 // location info. In such cases, do ignore this region.end.
3903 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3910 DW->RecordRegionEnd(REI.getContext());
3911 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3912 getRoot(), LabelID));
3915 case Intrinsic::dbg_func_start: {
3916 DwarfWriter *DW = DAG.getDwarfWriter();
3917 DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I);
3918 if (!isValidDebugInfoIntrinsic(FSI, CodeGenOpt::None))
3921 MachineFunction &MF = DAG.getMachineFunction();
3922 // This is a beginning of an inlined function.
3923 if (isInlinedFnStart(FSI, MF.getFunction())) {
3924 if (OptLevel != CodeGenOpt::None)
3925 // FIXME: Debugging informaation for inlined function is only
3926 // supported at CodeGenOpt::Node.
3929 DebugLoc PrevLoc = CurDebugLoc;
3930 // If llvm.dbg.func.start is seen in a new block before any
3931 // llvm.dbg.stoppoint intrinsic then the location info is unknown.
3932 // FIXME : Why DebugLoc is reset at the beginning of each block ?
3933 if (PrevLoc.isUnknown())
3936 // Record the source line.
3937 setCurDebugLoc(ExtractDebugLocation(FSI, MF.getDebugLocInfo()));
3939 if (!DW || !DW->ShouldEmitDwarfDebug())
3941 DebugLocTuple PrevLocTpl = MF.getDebugLocTuple(PrevLoc);
3942 DISubprogram SP(FSI.getSubprogram());
3943 DICompileUnit CU(PrevLocTpl.CompileUnit);
3944 unsigned LabelID = DW->RecordInlinedFnStart(SP, CU,
3947 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3948 getRoot(), LabelID));
3952 // This is a beginning of a new function.
3953 MF.setDefaultDebugLoc(ExtractDebugLocation(FSI, MF.getDebugLocInfo()));
3955 if (!DW || !DW->ShouldEmitDwarfDebug())
3957 // llvm.dbg.func_start also defines beginning of function scope.
3958 DW->RecordRegionStart(FSI.getSubprogram());
3961 case Intrinsic::dbg_declare: {
3962 if (OptLevel != CodeGenOpt::None)
3963 // FIXME: Variable debug info is not supported here.
3965 DwarfWriter *DW = DAG.getDwarfWriter();
3968 DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
3969 if (!isValidDebugInfoIntrinsic(DI, CodeGenOpt::None))
3972 Value *Variable = DI.getVariable();
3973 Value *Address = DI.getAddress();
3974 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
3975 Address = BCI->getOperand(0);
3976 AllocaInst *AI = dyn_cast<AllocaInst>(Address);
3977 // Don't handle byval struct arguments or VLAs, for example.
3980 DenseMap<const AllocaInst*, int>::iterator SI =
3981 FuncInfo.StaticAllocaMap.find(AI);
3982 if (SI == FuncInfo.StaticAllocaMap.end())
3984 int FI = SI->second;
3985 DW->RecordVariable(cast<MDNode>(Variable), FI);
3988 case Intrinsic::eh_exception: {
3989 // Insert the EXCEPTIONADDR instruction.
3990 assert(CurMBB->isLandingPad() &&"Call to eh.exception not in landing pad!");
3991 SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
3993 Ops[0] = DAG.getRoot();
3994 SDValue Op = DAG.getNode(ISD::EXCEPTIONADDR, dl, VTs, Ops, 1);
3996 DAG.setRoot(Op.getValue(1));
4000 case Intrinsic::eh_selector_i32:
4001 case Intrinsic::eh_selector_i64: {
4002 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4004 if (CurMBB->isLandingPad())
4005 AddCatchInfo(I, MMI, CurMBB);
4008 FuncInfo.CatchInfoLost.insert(&I);
4010 // FIXME: Mark exception selector register as live in. Hack for PR1508.
4011 unsigned Reg = TLI.getExceptionSelectorRegister();
4012 if (Reg) CurMBB->addLiveIn(Reg);
4015 // Insert the EHSELECTION instruction.
4016 SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
4018 Ops[0] = getValue(I.getOperand(1));
4020 SDValue Op = DAG.getNode(ISD::EHSELECTION, dl, VTs, Ops, 2);
4022 DAG.setRoot(Op.getValue(1));
4024 MVT::SimpleValueType VT =
4025 (Intrinsic == Intrinsic::eh_selector_i32 ? MVT::i32 : MVT::i64);
4026 if (Op.getValueType().getSimpleVT() < VT)
4027 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op);
4028 else if (Op.getValueType().getSimpleVT() < VT)
4029 Op = DAG.getNode(ISD::TRUNCATE, dl, VT, Op);
4035 case Intrinsic::eh_typeid_for_i32:
4036 case Intrinsic::eh_typeid_for_i64: {
4037 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4038 EVT VT = (Intrinsic == Intrinsic::eh_typeid_for_i32 ?
4039 MVT::i32 : MVT::i64);
4042 // Find the type id for the given typeinfo.
4043 GlobalVariable *GV = ExtractTypeInfo(I.getOperand(1));
4045 unsigned TypeID = MMI->getTypeIDFor(GV);
4046 setValue(&I, DAG.getConstant(TypeID, VT));
4048 // Return something different to eh_selector.
4049 setValue(&I, DAG.getConstant(1, VT));
4055 case Intrinsic::eh_return_i32:
4056 case Intrinsic::eh_return_i64:
4057 if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
4058 MMI->setCallsEHReturn(true);
4059 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, dl,
4062 getValue(I.getOperand(1)),
4063 getValue(I.getOperand(2))));
4065 setValue(&I, DAG.getConstant(0, TLI.getPointerTy()));
4069 case Intrinsic::eh_unwind_init:
4070 if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
4071 MMI->setCallsUnwindInit(true);
4076 case Intrinsic::eh_dwarf_cfa: {
4077 EVT VT = getValue(I.getOperand(1)).getValueType();
4079 if (VT.bitsGT(TLI.getPointerTy()))
4080 CfaArg = DAG.getNode(ISD::TRUNCATE, dl,
4081 TLI.getPointerTy(), getValue(I.getOperand(1)));
4083 CfaArg = DAG.getNode(ISD::SIGN_EXTEND, dl,
4084 TLI.getPointerTy(), getValue(I.getOperand(1)));
4086 SDValue Offset = DAG.getNode(ISD::ADD, dl,
4088 DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, dl,
4089 TLI.getPointerTy()),
4091 setValue(&I, DAG.getNode(ISD::ADD, dl,
4093 DAG.getNode(ISD::FRAMEADDR, dl,
4096 TLI.getPointerTy())),
4100 case Intrinsic::convertff:
4101 case Intrinsic::convertfsi:
4102 case Intrinsic::convertfui:
4103 case Intrinsic::convertsif:
4104 case Intrinsic::convertuif:
4105 case Intrinsic::convertss:
4106 case Intrinsic::convertsu:
4107 case Intrinsic::convertus:
4108 case Intrinsic::convertuu: {
4109 ISD::CvtCode Code = ISD::CVT_INVALID;
4110 switch (Intrinsic) {
4111 case Intrinsic::convertff: Code = ISD::CVT_FF; break;
4112 case Intrinsic::convertfsi: Code = ISD::CVT_FS; break;
4113 case Intrinsic::convertfui: Code = ISD::CVT_FU; break;
4114 case Intrinsic::convertsif: Code = ISD::CVT_SF; break;
4115 case Intrinsic::convertuif: Code = ISD::CVT_UF; break;
4116 case Intrinsic::convertss: Code = ISD::CVT_SS; break;
4117 case Intrinsic::convertsu: Code = ISD::CVT_SU; break;
4118 case Intrinsic::convertus: Code = ISD::CVT_US; break;
4119 case Intrinsic::convertuu: Code = ISD::CVT_UU; break;
4121 EVT DestVT = TLI.getValueType(I.getType());
4122 Value* Op1 = I.getOperand(1);
4123 setValue(&I, DAG.getConvertRndSat(DestVT, getCurDebugLoc(), getValue(Op1),
4124 DAG.getValueType(DestVT),
4125 DAG.getValueType(getValue(Op1).getValueType()),
4126 getValue(I.getOperand(2)),
4127 getValue(I.getOperand(3)),
4132 case Intrinsic::sqrt:
4133 setValue(&I, DAG.getNode(ISD::FSQRT, dl,
4134 getValue(I.getOperand(1)).getValueType(),
4135 getValue(I.getOperand(1))));
4137 case Intrinsic::powi:
4138 setValue(&I, DAG.getNode(ISD::FPOWI, dl,
4139 getValue(I.getOperand(1)).getValueType(),
4140 getValue(I.getOperand(1)),
4141 getValue(I.getOperand(2))));
4143 case Intrinsic::sin:
4144 setValue(&I, DAG.getNode(ISD::FSIN, dl,
4145 getValue(I.getOperand(1)).getValueType(),
4146 getValue(I.getOperand(1))));
4148 case Intrinsic::cos:
4149 setValue(&I, DAG.getNode(ISD::FCOS, dl,
4150 getValue(I.getOperand(1)).getValueType(),
4151 getValue(I.getOperand(1))));
4153 case Intrinsic::log:
4156 case Intrinsic::log2:
4159 case Intrinsic::log10:
4162 case Intrinsic::exp:
4165 case Intrinsic::exp2:
4168 case Intrinsic::pow:
4171 case Intrinsic::pcmarker: {
4172 SDValue Tmp = getValue(I.getOperand(1));
4173 DAG.setRoot(DAG.getNode(ISD::PCMARKER, dl, MVT::Other, getRoot(), Tmp));
4176 case Intrinsic::readcyclecounter: {
4177 SDValue Op = getRoot();
4178 SDValue Tmp = DAG.getNode(ISD::READCYCLECOUNTER, dl,
4179 DAG.getVTList(MVT::i64, MVT::Other),
4182 DAG.setRoot(Tmp.getValue(1));
4185 case Intrinsic::bswap:
4186 setValue(&I, DAG.getNode(ISD::BSWAP, dl,
4187 getValue(I.getOperand(1)).getValueType(),
4188 getValue(I.getOperand(1))));
4190 case Intrinsic::cttz: {
4191 SDValue Arg = getValue(I.getOperand(1));
4192 EVT Ty = Arg.getValueType();
4193 SDValue result = DAG.getNode(ISD::CTTZ, dl, Ty, Arg);
4194 setValue(&I, result);
4197 case Intrinsic::ctlz: {
4198 SDValue Arg = getValue(I.getOperand(1));
4199 EVT Ty = Arg.getValueType();
4200 SDValue result = DAG.getNode(ISD::CTLZ, dl, Ty, Arg);
4201 setValue(&I, result);
4204 case Intrinsic::ctpop: {
4205 SDValue Arg = getValue(I.getOperand(1));
4206 EVT Ty = Arg.getValueType();
4207 SDValue result = DAG.getNode(ISD::CTPOP, dl, Ty, Arg);
4208 setValue(&I, result);
4211 case Intrinsic::stacksave: {
4212 SDValue Op = getRoot();
4213 SDValue Tmp = DAG.getNode(ISD::STACKSAVE, dl,
4214 DAG.getVTList(TLI.getPointerTy(), MVT::Other), &Op, 1);
4216 DAG.setRoot(Tmp.getValue(1));
4219 case Intrinsic::stackrestore: {
4220 SDValue Tmp = getValue(I.getOperand(1));
4221 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, dl, MVT::Other, getRoot(), Tmp));
4224 case Intrinsic::stackprotector: {
4225 // Emit code into the DAG to store the stack guard onto the stack.
4226 MachineFunction &MF = DAG.getMachineFunction();
4227 MachineFrameInfo *MFI = MF.getFrameInfo();
4228 EVT PtrTy = TLI.getPointerTy();
4230 SDValue Src = getValue(I.getOperand(1)); // The guard's value.
4231 AllocaInst *Slot = cast<AllocaInst>(I.getOperand(2));
4233 int FI = FuncInfo.StaticAllocaMap[Slot];
4234 MFI->setStackProtectorIndex(FI);
4236 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
4238 // Store the stack protector onto the stack.
4239 SDValue Result = DAG.getStore(getRoot(), getCurDebugLoc(), Src, FIN,
4240 PseudoSourceValue::getFixedStack(FI),
4242 setValue(&I, Result);
4243 DAG.setRoot(Result);
4246 case Intrinsic::var_annotation:
4247 // Discard annotate attributes
4250 case Intrinsic::init_trampoline: {
4251 const Function *F = cast<Function>(I.getOperand(2)->stripPointerCasts());
4255 Ops[1] = getValue(I.getOperand(1));
4256 Ops[2] = getValue(I.getOperand(2));
4257 Ops[3] = getValue(I.getOperand(3));
4258 Ops[4] = DAG.getSrcValue(I.getOperand(1));
4259 Ops[5] = DAG.getSrcValue(F);
4261 SDValue Tmp = DAG.getNode(ISD::TRAMPOLINE, dl,
4262 DAG.getVTList(TLI.getPointerTy(), MVT::Other),
4266 DAG.setRoot(Tmp.getValue(1));
4270 case Intrinsic::gcroot:
4272 Value *Alloca = I.getOperand(1);
4273 Constant *TypeMap = cast<Constant>(I.getOperand(2));
4275 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
4276 GFI->addStackRoot(FI->getIndex(), TypeMap);
4280 case Intrinsic::gcread:
4281 case Intrinsic::gcwrite:
4282 llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
4285 case Intrinsic::flt_rounds: {
4286 setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, dl, MVT::i32));
4290 case Intrinsic::trap: {
4291 DAG.setRoot(DAG.getNode(ISD::TRAP, dl,MVT::Other, getRoot()));
4295 case Intrinsic::uadd_with_overflow:
4296 return implVisitAluOverflow(I, ISD::UADDO);
4297 case Intrinsic::sadd_with_overflow:
4298 return implVisitAluOverflow(I, ISD::SADDO);
4299 case Intrinsic::usub_with_overflow:
4300 return implVisitAluOverflow(I, ISD::USUBO);
4301 case Intrinsic::ssub_with_overflow:
4302 return implVisitAluOverflow(I, ISD::SSUBO);
4303 case Intrinsic::umul_with_overflow:
4304 return implVisitAluOverflow(I, ISD::UMULO);
4305 case Intrinsic::smul_with_overflow:
4306 return implVisitAluOverflow(I, ISD::SMULO);
4308 case Intrinsic::prefetch: {
4311 Ops[1] = getValue(I.getOperand(1));
4312 Ops[2] = getValue(I.getOperand(2));
4313 Ops[3] = getValue(I.getOperand(3));
4314 DAG.setRoot(DAG.getNode(ISD::PREFETCH, dl, MVT::Other, &Ops[0], 4));
4318 case Intrinsic::memory_barrier: {
4321 for (int x = 1; x < 6; ++x)
4322 Ops[x] = getValue(I.getOperand(x));
4324 DAG.setRoot(DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, &Ops[0], 6));
4327 case Intrinsic::atomic_cmp_swap: {
4328 SDValue Root = getRoot();
4330 DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, getCurDebugLoc(),
4331 getValue(I.getOperand(2)).getValueType().getSimpleVT(),
4333 getValue(I.getOperand(1)),
4334 getValue(I.getOperand(2)),
4335 getValue(I.getOperand(3)),
4338 DAG.setRoot(L.getValue(1));
4341 case Intrinsic::atomic_load_add:
4342 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD);
4343 case Intrinsic::atomic_load_sub:
4344 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB);
4345 case Intrinsic::atomic_load_or:
4346 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR);
4347 case Intrinsic::atomic_load_xor:
4348 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR);
4349 case Intrinsic::atomic_load_and:
4350 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND);
4351 case Intrinsic::atomic_load_nand:
4352 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND);
4353 case Intrinsic::atomic_load_max:
4354 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX);
4355 case Intrinsic::atomic_load_min:
4356 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN);
4357 case Intrinsic::atomic_load_umin:
4358 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN);
4359 case Intrinsic::atomic_load_umax:
4360 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX);
4361 case Intrinsic::atomic_swap:
4362 return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP);
4366 /// Test if the given instruction is in a position to be optimized
4367 /// with a tail-call. This roughly means that it's in a block with
4368 /// a return and there's nothing that needs to be scheduled
4369 /// between it and the return.
4371 /// This function only tests target-independent requirements.
4372 /// For target-dependent requirements, a target should override
4373 /// TargetLowering::IsEligibleForTailCallOptimization.
4376 isInTailCallPosition(const Instruction *I, Attributes RetAttr,
4377 const TargetLowering &TLI) {
4378 const BasicBlock *ExitBB = I->getParent();
4379 const TerminatorInst *Term = ExitBB->getTerminator();
4380 const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
4381 const Function *F = ExitBB->getParent();
4383 // The block must end in a return statement or an unreachable.
4384 if (!Ret && !isa<UnreachableInst>(Term)) return false;
4386 // If I will have a chain, make sure no other instruction that will have a
4387 // chain interposes between I and the return.
4388 if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
4389 !I->isSafeToSpeculativelyExecute())
4390 for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
4394 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
4395 !BBI->isSafeToSpeculativelyExecute())
4399 // If the block ends with a void return or unreachable, it doesn't matter
4400 // what the call's return type is.
4401 if (!Ret || Ret->getNumOperands() == 0) return true;
4403 // Conservatively require the attributes of the call to match those of
4405 if (F->getAttributes().getRetAttributes() != RetAttr)
4408 // Otherwise, make sure the unmodified return value of I is the return value.
4409 for (const Instruction *U = dyn_cast<Instruction>(Ret->getOperand(0)); ;
4410 U = dyn_cast<Instruction>(U->getOperand(0))) {
4413 if (!U->hasOneUse())
4417 // Check for a truly no-op truncate.
4418 if (isa<TruncInst>(U) &&
4419 TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType()))
4421 // Check for a truly no-op bitcast.
4422 if (isa<BitCastInst>(U) &&
4423 (U->getOperand(0)->getType() == U->getType() ||
4424 (isa<PointerType>(U->getOperand(0)->getType()) &&
4425 isa<PointerType>(U->getType()))))
4427 // Otherwise it's not a true no-op.
4434 void SelectionDAGLowering::LowerCallTo(CallSite CS, SDValue Callee,
4436 MachineBasicBlock *LandingPad) {
4437 const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
4438 const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
4439 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4440 unsigned BeginLabel = 0, EndLabel = 0;
4442 TargetLowering::ArgListTy Args;
4443 TargetLowering::ArgListEntry Entry;
4444 Args.reserve(CS.arg_size());
4446 for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
4448 SDValue ArgNode = getValue(*i);
4449 Entry.Node = ArgNode; Entry.Ty = (*i)->getType();
4451 unsigned attrInd = i - CS.arg_begin() + 1;
4452 Entry.isSExt = CS.paramHasAttr(attrInd, Attribute::SExt);
4453 Entry.isZExt = CS.paramHasAttr(attrInd, Attribute::ZExt);
4454 Entry.isInReg = CS.paramHasAttr(attrInd, Attribute::InReg);
4455 Entry.isSRet = CS.paramHasAttr(attrInd, Attribute::StructRet);
4456 Entry.isNest = CS.paramHasAttr(attrInd, Attribute::Nest);
4457 Entry.isByVal = CS.paramHasAttr(attrInd, Attribute::ByVal);
4458 Entry.Alignment = CS.getParamAlignment(attrInd);
4459 Args.push_back(Entry);
4462 if (LandingPad && MMI) {
4463 // Insert a label before the invoke call to mark the try range. This can be
4464 // used to detect deletion of the invoke via the MachineModuleInfo.
4465 BeginLabel = MMI->NextLabelID();
4467 // Both PendingLoads and PendingExports must be flushed here;
4468 // this call might not return.
4470 DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
4471 getControlRoot(), BeginLabel));
4474 // Check if target-independent constraints permit a tail call here.
4475 // Target-dependent constraints are checked within TLI.LowerCallTo.
4477 !isInTailCallPosition(CS.getInstruction(),
4478 CS.getAttributes().getRetAttributes(),
4482 std::pair<SDValue,SDValue> Result =
4483 TLI.LowerCallTo(getRoot(), CS.getType(),
4484 CS.paramHasAttr(0, Attribute::SExt),
4485 CS.paramHasAttr(0, Attribute::ZExt), FTy->isVarArg(),
4486 CS.paramHasAttr(0, Attribute::InReg), FTy->getNumParams(),
4487 CS.getCallingConv(),
4489 !CS.getInstruction()->use_empty(),
4490 Callee, Args, DAG, getCurDebugLoc());
4491 assert((isTailCall || Result.second.getNode()) &&
4492 "Non-null chain expected with non-tail call!");
4493 assert((Result.second.getNode() || !Result.first.getNode()) &&
4494 "Null value expected with tail call!");
4495 if (Result.first.getNode())
4496 setValue(CS.getInstruction(), Result.first);
4497 // As a special case, a null chain means that a tail call has
4498 // been emitted and the DAG root is already updated.
4499 if (Result.second.getNode())
4500 DAG.setRoot(Result.second);
4504 if (LandingPad && MMI) {
4505 // Insert a label at the end of the invoke call to mark the try range. This
4506 // can be used to detect deletion of the invoke via the MachineModuleInfo.
4507 EndLabel = MMI->NextLabelID();
4508 DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
4509 getRoot(), EndLabel));
4511 // Inform MachineModuleInfo of range.
4512 MMI->addInvoke(LandingPad, BeginLabel, EndLabel);
4517 void SelectionDAGLowering::visitCall(CallInst &I) {
4518 const char *RenameFn = 0;
4519 if (Function *F = I.getCalledFunction()) {
4520 if (F->isDeclaration()) {
4521 const TargetIntrinsicInfo *II = TLI.getTargetMachine().getIntrinsicInfo();
4523 if (unsigned IID = II->getIntrinsicID(F)) {
4524 RenameFn = visitIntrinsicCall(I, IID);
4529 if (unsigned IID = F->getIntrinsicID()) {
4530 RenameFn = visitIntrinsicCall(I, IID);
4536 // Check for well-known libc/libm calls. If the function is internal, it
4537 // can't be a library call.
4538 if (!F->hasLocalLinkage() && F->hasName()) {
4539 StringRef Name = F->getName();
4540 if (Name == "copysign" || Name == "copysignf") {
4541 if (I.getNumOperands() == 3 && // Basic sanity checks.
4542 I.getOperand(1)->getType()->isFloatingPoint() &&
4543 I.getType() == I.getOperand(1)->getType() &&
4544 I.getType() == I.getOperand(2)->getType()) {
4545 SDValue LHS = getValue(I.getOperand(1));
4546 SDValue RHS = getValue(I.getOperand(2));
4547 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurDebugLoc(),
4548 LHS.getValueType(), LHS, RHS));
4551 } else if (Name == "fabs" || Name == "fabsf" || Name == "fabsl") {
4552 if (I.getNumOperands() == 2 && // Basic sanity checks.
4553 I.getOperand(1)->getType()->isFloatingPoint() &&
4554 I.getType() == I.getOperand(1)->getType()) {
4555 SDValue Tmp = getValue(I.getOperand(1));
4556 setValue(&I, DAG.getNode(ISD::FABS, getCurDebugLoc(),
4557 Tmp.getValueType(), Tmp));
4560 } else if (Name == "sin" || Name == "sinf" || Name == "sinl") {
4561 if (I.getNumOperands() == 2 && // Basic sanity checks.
4562 I.getOperand(1)->getType()->isFloatingPoint() &&
4563 I.getType() == I.getOperand(1)->getType()) {
4564 SDValue Tmp = getValue(I.getOperand(1));
4565 setValue(&I, DAG.getNode(ISD::FSIN, getCurDebugLoc(),
4566 Tmp.getValueType(), Tmp));
4569 } else if (Name == "cos" || Name == "cosf" || Name == "cosl") {
4570 if (I.getNumOperands() == 2 && // Basic sanity checks.
4571 I.getOperand(1)->getType()->isFloatingPoint() &&
4572 I.getType() == I.getOperand(1)->getType()) {
4573 SDValue Tmp = getValue(I.getOperand(1));
4574 setValue(&I, DAG.getNode(ISD::FCOS, getCurDebugLoc(),
4575 Tmp.getValueType(), Tmp));
4580 } else if (isa<InlineAsm>(I.getOperand(0))) {
4587 Callee = getValue(I.getOperand(0));
4589 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
4591 // Check if we can potentially perform a tail call. More detailed
4592 // checking is be done within LowerCallTo, after more information
4593 // about the call is known.
4594 bool isTailCall = PerformTailCallOpt && I.isTailCall();
4596 LowerCallTo(&I, Callee, isTailCall);
4600 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
4601 /// this value and returns the result as a ValueVT value. This uses
4602 /// Chain/Flag as the input and updates them for the output Chain/Flag.
4603 /// If the Flag pointer is NULL, no flag is used.
4604 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
4606 SDValue *Flag) const {
4607 // Assemble the legal parts into the final values.
4608 SmallVector<SDValue, 4> Values(ValueVTs.size());
4609 SmallVector<SDValue, 8> Parts;
4610 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4611 // Copy the legal parts from the registers.
4612 EVT ValueVT = ValueVTs[Value];
4613 unsigned NumRegs = TLI->getNumRegisters(*DAG.getContext(), ValueVT);
4614 EVT RegisterVT = RegVTs[Value];
4616 Parts.resize(NumRegs);
4617 for (unsigned i = 0; i != NumRegs; ++i) {
4620 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
4622 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
4623 *Flag = P.getValue(2);
4625 Chain = P.getValue(1);
4627 // If the source register was virtual and if we know something about it,
4628 // add an assert node.
4629 if (TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) &&
4630 RegisterVT.isInteger() && !RegisterVT.isVector()) {
4631 unsigned SlotNo = Regs[Part+i]-TargetRegisterInfo::FirstVirtualRegister;
4632 FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
4633 if (FLI.LiveOutRegInfo.size() > SlotNo) {
4634 FunctionLoweringInfo::LiveOutInfo &LOI = FLI.LiveOutRegInfo[SlotNo];
4636 unsigned RegSize = RegisterVT.getSizeInBits();
4637 unsigned NumSignBits = LOI.NumSignBits;
4638 unsigned NumZeroBits = LOI.KnownZero.countLeadingOnes();
4640 // FIXME: We capture more information than the dag can represent. For
4641 // now, just use the tightest assertzext/assertsext possible.
4643 EVT FromVT(MVT::Other);
4644 if (NumSignBits == RegSize)
4645 isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1
4646 else if (NumZeroBits >= RegSize-1)
4647 isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1
4648 else if (NumSignBits > RegSize-8)
4649 isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8
4650 else if (NumZeroBits >= RegSize-8)
4651 isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8
4652 else if (NumSignBits > RegSize-16)
4653 isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16
4654 else if (NumZeroBits >= RegSize-16)
4655 isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
4656 else if (NumSignBits > RegSize-32)
4657 isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32
4658 else if (NumZeroBits >= RegSize-32)
4659 isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
4661 if (FromVT != MVT::Other) {
4662 P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
4663 RegisterVT, P, DAG.getValueType(FromVT));
4672 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
4673 NumRegs, RegisterVT, ValueVT);
4678 return DAG.getNode(ISD::MERGE_VALUES, dl,
4679 DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
4680 &Values[0], ValueVTs.size());
4683 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
4684 /// specified value into the registers specified by this object. This uses
4685 /// Chain/Flag as the input and updates them for the output Chain/Flag.
4686 /// If the Flag pointer is NULL, no flag is used.
4687 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
4688 SDValue &Chain, SDValue *Flag) const {
4689 // Get the list of the values's legal parts.
4690 unsigned NumRegs = Regs.size();
4691 SmallVector<SDValue, 8> Parts(NumRegs);
4692 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4693 EVT ValueVT = ValueVTs[Value];
4694 unsigned NumParts = TLI->getNumRegisters(*DAG.getContext(), ValueVT);
4695 EVT RegisterVT = RegVTs[Value];
4697 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
4698 &Parts[Part], NumParts, RegisterVT);
4702 // Copy the parts into the registers.
4703 SmallVector<SDValue, 8> Chains(NumRegs);
4704 for (unsigned i = 0; i != NumRegs; ++i) {
4707 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
4709 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
4710 *Flag = Part.getValue(1);
4712 Chains[i] = Part.getValue(0);
4715 if (NumRegs == 1 || Flag)
4716 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
4717 // flagged to it. That is the CopyToReg nodes and the user are considered
4718 // a single scheduling unit. If we create a TokenFactor and return it as
4719 // chain, then the TokenFactor is both a predecessor (operand) of the
4720 // user as well as a successor (the TF operands are flagged to the user).
4721 // c1, f1 = CopyToReg
4722 // c2, f2 = CopyToReg
4723 // c3 = TokenFactor c1, c2
4726 Chain = Chains[NumRegs-1];
4728 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs);
4731 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
4732 /// operand list. This adds the code marker and includes the number of
4733 /// values added into it.
4734 void RegsForValue::AddInlineAsmOperands(unsigned Code,
4735 bool HasMatching,unsigned MatchingIdx,
4737 std::vector<SDValue> &Ops) const {
4738 EVT IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy();
4739 assert(Regs.size() < (1 << 13) && "Too many inline asm outputs!");
4740 unsigned Flag = Code | (Regs.size() << 3);
4742 Flag |= 0x80000000 | (MatchingIdx << 16);
4743 Ops.push_back(DAG.getTargetConstant(Flag, IntPtrTy));
4744 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
4745 unsigned NumRegs = TLI->getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
4746 EVT RegisterVT = RegVTs[Value];
4747 for (unsigned i = 0; i != NumRegs; ++i) {
4748 assert(Reg < Regs.size() && "Mismatch in # registers expected");
4749 Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT));
4754 /// isAllocatableRegister - If the specified register is safe to allocate,
4755 /// i.e. it isn't a stack pointer or some other special register, return the
4756 /// register class for the register. Otherwise, return null.
4757 static const TargetRegisterClass *
4758 isAllocatableRegister(unsigned Reg, MachineFunction &MF,
4759 const TargetLowering &TLI,
4760 const TargetRegisterInfo *TRI) {
4761 EVT FoundVT = MVT::Other;
4762 const TargetRegisterClass *FoundRC = 0;
4763 for (TargetRegisterInfo::regclass_iterator RCI = TRI->regclass_begin(),
4764 E = TRI->regclass_end(); RCI != E; ++RCI) {
4765 EVT ThisVT = MVT::Other;
4767 const TargetRegisterClass *RC = *RCI;
4768 // If none of the the value types for this register class are valid, we
4769 // can't use it. For example, 64-bit reg classes on 32-bit targets.
4770 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
4772 if (TLI.isTypeLegal(*I)) {
4773 // If we have already found this register in a different register class,
4774 // choose the one with the largest VT specified. For example, on
4775 // PowerPC, we favor f64 register classes over f32.
4776 if (FoundVT == MVT::Other || FoundVT.bitsLT(*I)) {
4783 if (ThisVT == MVT::Other) continue;
4785 // NOTE: This isn't ideal. In particular, this might allocate the
4786 // frame pointer in functions that need it (due to them not being taken
4787 // out of allocation, because a variable sized allocation hasn't been seen
4788 // yet). This is a slight code pessimization, but should still work.
4789 for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
4790 E = RC->allocation_order_end(MF); I != E; ++I)
4792 // We found a matching register class. Keep looking at others in case
4793 // we find one with larger registers that this physreg is also in.
4804 /// AsmOperandInfo - This contains information for each constraint that we are
4806 class VISIBILITY_HIDDEN SDISelAsmOperandInfo :
4807 public TargetLowering::AsmOperandInfo {
4809 /// CallOperand - If this is the result output operand or a clobber
4810 /// this is null, otherwise it is the incoming operand to the CallInst.
4811 /// This gets modified as the asm is processed.
4812 SDValue CallOperand;
4814 /// AssignedRegs - If this is a register or register class operand, this
4815 /// contains the set of register corresponding to the operand.
4816 RegsForValue AssignedRegs;
4818 explicit SDISelAsmOperandInfo(const InlineAsm::ConstraintInfo &info)
4819 : TargetLowering::AsmOperandInfo(info), CallOperand(0,0) {
4822 /// MarkAllocatedRegs - Once AssignedRegs is set, mark the assigned registers
4823 /// busy in OutputRegs/InputRegs.
4824 void MarkAllocatedRegs(bool isOutReg, bool isInReg,
4825 std::set<unsigned> &OutputRegs,
4826 std::set<unsigned> &InputRegs,
4827 const TargetRegisterInfo &TRI) const {
4829 for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4830 MarkRegAndAliases(AssignedRegs.Regs[i], OutputRegs, TRI);
4833 for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4834 MarkRegAndAliases(AssignedRegs.Regs[i], InputRegs, TRI);
4838 /// getCallOperandValEVT - Return the EVT of the Value* that this operand
4839 /// corresponds to. If there is no Value* for this operand, it returns
4841 EVT getCallOperandValEVT(LLVMContext &Context,
4842 const TargetLowering &TLI,
4843 const TargetData *TD) const {
4844 if (CallOperandVal == 0) return MVT::Other;
4846 if (isa<BasicBlock>(CallOperandVal))
4847 return TLI.getPointerTy();
4849 const llvm::Type *OpTy = CallOperandVal->getType();
4851 // If this is an indirect operand, the operand is a pointer to the
4854 OpTy = cast<PointerType>(OpTy)->getElementType();
4856 // If OpTy is not a single value, it may be a struct/union that we
4857 // can tile with integers.
4858 if (!OpTy->isSingleValueType() && OpTy->isSized()) {
4859 unsigned BitSize = TD->getTypeSizeInBits(OpTy);
4868 OpTy = IntegerType::get(Context, BitSize);
4873 return TLI.getValueType(OpTy, true);
4877 /// MarkRegAndAliases - Mark the specified register and all aliases in the
4879 static void MarkRegAndAliases(unsigned Reg, std::set<unsigned> &Regs,
4880 const TargetRegisterInfo &TRI) {
4881 assert(TargetRegisterInfo::isPhysicalRegister(Reg) && "Isn't a physreg");
4883 if (const unsigned *Aliases = TRI.getAliasSet(Reg))
4884 for (; *Aliases; ++Aliases)
4885 Regs.insert(*Aliases);
4888 } // end llvm namespace.
4891 /// GetRegistersForValue - Assign registers (virtual or physical) for the
4892 /// specified operand. We prefer to assign virtual registers, to allow the
4893 /// register allocator handle the assignment process. However, if the asm uses
4894 /// features that we can't model on machineinstrs, we have SDISel do the
4895 /// allocation. This produces generally horrible, but correct, code.
4897 /// OpInfo describes the operand.
4898 /// Input and OutputRegs are the set of already allocated physical registers.
4900 void SelectionDAGLowering::
4901 GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
4902 std::set<unsigned> &OutputRegs,
4903 std::set<unsigned> &InputRegs) {
4904 LLVMContext &Context = FuncInfo.Fn->getContext();
4906 // Compute whether this value requires an input register, an output register,
4908 bool isOutReg = false;
4909 bool isInReg = false;
4910 switch (OpInfo.Type) {
4911 case InlineAsm::isOutput:
4914 // If there is an input constraint that matches this, we need to reserve
4915 // the input register so no other inputs allocate to it.
4916 isInReg = OpInfo.hasMatchingInput();
4918 case InlineAsm::isInput:
4922 case InlineAsm::isClobber:
4929 MachineFunction &MF = DAG.getMachineFunction();
4930 SmallVector<unsigned, 4> Regs;
4932 // If this is a constraint for a single physreg, or a constraint for a
4933 // register class, find it.
4934 std::pair<unsigned, const TargetRegisterClass*> PhysReg =
4935 TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
4936 OpInfo.ConstraintVT);
4938 unsigned NumRegs = 1;
4939 if (OpInfo.ConstraintVT != MVT::Other) {
4940 // If this is a FP input in an integer register (or visa versa) insert a bit
4941 // cast of the input value. More generally, handle any case where the input
4942 // value disagrees with the register class we plan to stick this in.
4943 if (OpInfo.Type == InlineAsm::isInput &&
4944 PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) {
4945 // Try to convert to the first EVT that the reg class contains. If the
4946 // types are identical size, use a bitcast to convert (e.g. two differing
4948 EVT RegVT = *PhysReg.second->vt_begin();
4949 if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
4950 OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
4951 RegVT, OpInfo.CallOperand);
4952 OpInfo.ConstraintVT = RegVT;
4953 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
4954 // If the input is a FP value and we want it in FP registers, do a
4955 // bitcast to the corresponding integer type. This turns an f64 value
4956 // into i64, which can be passed with two i32 values on a 32-bit
4958 RegVT = EVT::getIntegerVT(Context,
4959 OpInfo.ConstraintVT.getSizeInBits());
4960 OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
4961 RegVT, OpInfo.CallOperand);
4962 OpInfo.ConstraintVT = RegVT;
4966 NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
4970 EVT ValueVT = OpInfo.ConstraintVT;
4972 // If this is a constraint for a specific physical register, like {r17},
4974 if (unsigned AssignedReg = PhysReg.first) {
4975 const TargetRegisterClass *RC = PhysReg.second;
4976 if (OpInfo.ConstraintVT == MVT::Other)
4977 ValueVT = *RC->vt_begin();
4979 // Get the actual register value type. This is important, because the user
4980 // may have asked for (e.g.) the AX register in i32 type. We need to
4981 // remember that AX is actually i16 to get the right extension.
4982 RegVT = *RC->vt_begin();
4984 // This is a explicit reference to a physical register.
4985 Regs.push_back(AssignedReg);
4987 // If this is an expanded reference, add the rest of the regs to Regs.
4989 TargetRegisterClass::iterator I = RC->begin();
4990 for (; *I != AssignedReg; ++I)
4991 assert(I != RC->end() && "Didn't find reg!");
4993 // Already added the first reg.
4995 for (; NumRegs; --NumRegs, ++I) {
4996 assert(I != RC->end() && "Ran out of registers to allocate!");
5000 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
5001 const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
5002 OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
5006 // Otherwise, if this was a reference to an LLVM register class, create vregs
5007 // for this reference.
5008 if (const TargetRegisterClass *RC = PhysReg.second) {
5009 RegVT = *RC->vt_begin();
5010 if (OpInfo.ConstraintVT == MVT::Other)
5013 // Create the appropriate number of virtual registers.
5014 MachineRegisterInfo &RegInfo = MF.getRegInfo();
5015 for (; NumRegs; --NumRegs)
5016 Regs.push_back(RegInfo.createVirtualRegister(RC));
5018 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
5022 // This is a reference to a register class that doesn't directly correspond
5023 // to an LLVM register class. Allocate NumRegs consecutive, available,
5024 // registers from the class.
5025 std::vector<unsigned> RegClassRegs
5026 = TLI.getRegClassForInlineAsmConstraint(OpInfo.ConstraintCode,
5027 OpInfo.ConstraintVT);
5029 const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
5030 unsigned NumAllocated = 0;
5031 for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
5032 unsigned Reg = RegClassRegs[i];
5033 // See if this register is available.
5034 if ((isOutReg && OutputRegs.count(Reg)) || // Already used.
5035 (isInReg && InputRegs.count(Reg))) { // Already used.
5036 // Make sure we find consecutive registers.
5041 // Check to see if this register is allocatable (i.e. don't give out the
5043 const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, TRI);
5044 if (!RC) { // Couldn't allocate this register.
5045 // Reset NumAllocated to make sure we return consecutive registers.
5050 // Okay, this register is good, we can use it.
5053 // If we allocated enough consecutive registers, succeed.
5054 if (NumAllocated == NumRegs) {
5055 unsigned RegStart = (i-NumAllocated)+1;
5056 unsigned RegEnd = i+1;
5057 // Mark all of the allocated registers used.
5058 for (unsigned i = RegStart; i != RegEnd; ++i)
5059 Regs.push_back(RegClassRegs[i]);
5061 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, *RC->vt_begin(),
5062 OpInfo.ConstraintVT);
5063 OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
5068 // Otherwise, we couldn't allocate enough registers for this.
5071 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
5072 /// processed uses a memory 'm' constraint.
5074 hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos,
5075 const TargetLowering &TLI) {
5076 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
5077 InlineAsm::ConstraintInfo &CI = CInfos[i];
5078 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
5079 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
5080 if (CType == TargetLowering::C_Memory)
5084 // Indirect operand accesses access memory.
5092 /// visitInlineAsm - Handle a call to an InlineAsm object.
5094 void SelectionDAGLowering::visitInlineAsm(CallSite CS) {
5095 InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
5097 /// ConstraintOperands - Information about all of the constraints.
5098 std::vector<SDISelAsmOperandInfo> ConstraintOperands;
5100 std::set<unsigned> OutputRegs, InputRegs;
5102 // Do a prepass over the constraints, canonicalizing them, and building up the
5103 // ConstraintOperands list.
5104 std::vector<InlineAsm::ConstraintInfo>
5105 ConstraintInfos = IA->ParseConstraints();
5107 bool hasMemory = hasInlineAsmMemConstraint(ConstraintInfos, TLI);
5109 SDValue Chain, Flag;
5111 // We won't need to flush pending loads if this asm doesn't touch
5112 // memory and is nonvolatile.
5113 if (hasMemory || IA->hasSideEffects())
5116 Chain = DAG.getRoot();
5118 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
5119 unsigned ResNo = 0; // ResNo - The result number of the next output.
5120 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5121 ConstraintOperands.push_back(SDISelAsmOperandInfo(ConstraintInfos[i]));
5122 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
5124 EVT OpVT = MVT::Other;
5126 // Compute the value type for each operand.
5127 switch (OpInfo.Type) {
5128 case InlineAsm::isOutput:
5129 // Indirect outputs just consume an argument.
5130 if (OpInfo.isIndirect) {
5131 OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5135 // The return value of the call is this value. As such, there is no
5136 // corresponding argument.
5137 assert(CS.getType() != Type::getVoidTy(*DAG.getContext()) &&
5139 if (const StructType *STy = dyn_cast<StructType>(CS.getType())) {
5140 OpVT = TLI.getValueType(STy->getElementType(ResNo));
5142 assert(ResNo == 0 && "Asm only has one result!");
5143 OpVT = TLI.getValueType(CS.getType());
5147 case InlineAsm::isInput:
5148 OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5150 case InlineAsm::isClobber:
5155 // If this is an input or an indirect output, process the call argument.
5156 // BasicBlocks are labels, currently appearing only in asm's.
5157 if (OpInfo.CallOperandVal) {
5158 // Strip bitcasts, if any. This mostly comes up for functions.
5159 OpInfo.CallOperandVal = OpInfo.CallOperandVal->stripPointerCasts();
5161 if (BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
5162 OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
5164 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
5167 OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI, TD);
5170 OpInfo.ConstraintVT = OpVT;
5173 // Second pass over the constraints: compute which constraint option to use
5174 // and assign registers to constraints that want a specific physreg.
5175 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5176 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5178 // If this is an output operand with a matching input operand, look up the
5179 // matching input. If their types mismatch, e.g. one is an integer, the
5180 // other is floating point, or their sizes are different, flag it as an
5182 if (OpInfo.hasMatchingInput()) {
5183 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
5184 if (OpInfo.ConstraintVT != Input.ConstraintVT) {
5185 if ((OpInfo.ConstraintVT.isInteger() !=
5186 Input.ConstraintVT.isInteger()) ||
5187 (OpInfo.ConstraintVT.getSizeInBits() !=
5188 Input.ConstraintVT.getSizeInBits())) {
5189 llvm_report_error("Unsupported asm: input constraint"
5190 " with a matching output constraint of incompatible"
5193 Input.ConstraintVT = OpInfo.ConstraintVT;
5197 // Compute the constraint code and ConstraintType to use.
5198 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, hasMemory, &DAG);
5200 // If this is a memory input, and if the operand is not indirect, do what we
5201 // need to to provide an address for the memory input.
5202 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
5203 !OpInfo.isIndirect) {
5204 assert(OpInfo.Type == InlineAsm::isInput &&
5205 "Can only indirectify direct input operands!");
5207 // Memory operands really want the address of the value. If we don't have
5208 // an indirect input, put it in the constpool if we can, otherwise spill
5209 // it to a stack slot.
5211 // If the operand is a float, integer, or vector constant, spill to a
5212 // constant pool entry to get its address.
5213 Value *OpVal = OpInfo.CallOperandVal;
5214 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
5215 isa<ConstantVector>(OpVal)) {
5216 OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
5217 TLI.getPointerTy());
5219 // Otherwise, create a stack slot and emit a store to it before the
5221 const Type *Ty = OpVal->getType();
5222 uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
5223 unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty);
5224 MachineFunction &MF = DAG.getMachineFunction();
5225 int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align);
5226 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
5227 Chain = DAG.getStore(Chain, getCurDebugLoc(),
5228 OpInfo.CallOperand, StackSlot, NULL, 0);
5229 OpInfo.CallOperand = StackSlot;
5232 // There is no longer a Value* corresponding to this operand.
5233 OpInfo.CallOperandVal = 0;
5234 // It is now an indirect operand.
5235 OpInfo.isIndirect = true;
5238 // If this constraint is for a specific register, allocate it before
5240 if (OpInfo.ConstraintType == TargetLowering::C_Register)
5241 GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5243 ConstraintInfos.clear();
5246 // Second pass - Loop over all of the operands, assigning virtual or physregs
5247 // to register class operands.
5248 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5249 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5251 // C_Register operands have already been allocated, Other/Memory don't need
5253 if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
5254 GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5257 // AsmNodeOperands - The operands for the ISD::INLINEASM node.
5258 std::vector<SDValue> AsmNodeOperands;
5259 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
5260 AsmNodeOperands.push_back(
5261 DAG.getTargetExternalSymbol(IA->getAsmString().c_str(), MVT::Other));
5264 // Loop over all of the inputs, copying the operand values into the
5265 // appropriate registers and processing the output regs.
5266 RegsForValue RetValRegs;
5268 // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
5269 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
5271 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5272 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5274 switch (OpInfo.Type) {
5275 case InlineAsm::isOutput: {
5276 if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
5277 OpInfo.ConstraintType != TargetLowering::C_Register) {
5278 // Memory output, or 'other' output (e.g. 'X' constraint).
5279 assert(OpInfo.isIndirect && "Memory output must be indirect operand");
5281 // Add information to the INLINEASM node to know about this output.
5282 unsigned ResOpType = 4/*MEM*/ | (1<<3);
5283 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5284 TLI.getPointerTy()));
5285 AsmNodeOperands.push_back(OpInfo.CallOperand);
5289 // Otherwise, this is a register or register class output.
5291 // Copy the output from the appropriate register. Find a register that
5293 if (OpInfo.AssignedRegs.Regs.empty()) {
5294 llvm_report_error("Couldn't allocate output reg for"
5295 " constraint '" + OpInfo.ConstraintCode + "'!");
5298 // If this is an indirect operand, store through the pointer after the
5300 if (OpInfo.isIndirect) {
5301 IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
5302 OpInfo.CallOperandVal));
5304 // This is the result value of the call.
5305 assert(CS.getType() != Type::getVoidTy(*DAG.getContext()) &&
5307 // Concatenate this output onto the outputs list.
5308 RetValRegs.append(OpInfo.AssignedRegs);
5311 // Add information to the INLINEASM node to know that this register is
5313 OpInfo.AssignedRegs.AddInlineAsmOperands(OpInfo.isEarlyClobber ?
5314 6 /* EARLYCLOBBER REGDEF */ :
5318 DAG, AsmNodeOperands);
5321 case InlineAsm::isInput: {
5322 SDValue InOperandVal = OpInfo.CallOperand;
5324 if (OpInfo.isMatchingInputConstraint()) { // Matching constraint?
5325 // If this is required to match an output register we have already set,
5326 // just use its register.
5327 unsigned OperandNo = OpInfo.getMatchedOperand();
5329 // Scan until we find the definition we already emitted of this operand.
5330 // When we find it, create a RegsForValue operand.
5331 unsigned CurOp = 2; // The first operand.
5332 for (; OperandNo; --OperandNo) {
5333 // Advance to the next operand.
5335 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5336 assert(((OpFlag & 7) == 2 /*REGDEF*/ ||
5337 (OpFlag & 7) == 6 /*EARLYCLOBBER REGDEF*/ ||
5338 (OpFlag & 7) == 4 /*MEM*/) &&
5339 "Skipped past definitions?");
5340 CurOp += InlineAsm::getNumOperandRegisters(OpFlag)+1;
5344 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5345 if ((OpFlag & 7) == 2 /*REGDEF*/
5346 || (OpFlag & 7) == 6 /* EARLYCLOBBER REGDEF */) {
5347 // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
5348 if (OpInfo.isIndirect) {
5349 llvm_report_error("Don't know how to handle tied indirect "
5350 "register inputs yet!");
5352 RegsForValue MatchedRegs;
5353 MatchedRegs.TLI = &TLI;
5354 MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
5355 EVT RegVT = AsmNodeOperands[CurOp+1].getValueType();
5356 MatchedRegs.RegVTs.push_back(RegVT);
5357 MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
5358 for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag);
5361 push_back(RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT)));
5363 // Use the produced MatchedRegs object to
5364 MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
5366 MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/,
5367 true, OpInfo.getMatchedOperand(),
5368 DAG, AsmNodeOperands);
5371 assert(((OpFlag & 7) == 4) && "Unknown matching constraint!");
5372 assert((InlineAsm::getNumOperandRegisters(OpFlag)) == 1 &&
5373 "Unexpected number of operands");
5374 // Add information to the INLINEASM node to know about this input.
5375 // See InlineAsm.h isUseOperandTiedToDef.
5376 OpFlag |= 0x80000000 | (OpInfo.getMatchedOperand() << 16);
5377 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag,
5378 TLI.getPointerTy()));
5379 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
5384 if (OpInfo.ConstraintType == TargetLowering::C_Other) {
5385 assert(!OpInfo.isIndirect &&
5386 "Don't know how to handle indirect other inputs yet!");
5388 std::vector<SDValue> Ops;
5389 TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode[0],
5390 hasMemory, Ops, DAG);
5392 llvm_report_error("Invalid operand for inline asm"
5393 " constraint '" + OpInfo.ConstraintCode + "'!");
5396 // Add information to the INLINEASM node to know about this input.
5397 unsigned ResOpType = 3 /*IMM*/ | (Ops.size() << 3);
5398 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5399 TLI.getPointerTy()));
5400 AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
5402 } else if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
5403 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
5404 assert(InOperandVal.getValueType() == TLI.getPointerTy() &&
5405 "Memory operands expect pointer values");
5407 // Add information to the INLINEASM node to know about this input.
5408 unsigned ResOpType = 4/*MEM*/ | (1<<3);
5409 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5410 TLI.getPointerTy()));
5411 AsmNodeOperands.push_back(InOperandVal);
5415 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
5416 OpInfo.ConstraintType == TargetLowering::C_Register) &&
5417 "Unknown constraint type!");
5418 assert(!OpInfo.isIndirect &&
5419 "Don't know how to handle indirect register inputs yet!");
5421 // Copy the input into the appropriate registers.
5422 if (OpInfo.AssignedRegs.Regs.empty()) {
5423 llvm_report_error("Couldn't allocate input reg for"
5424 " constraint '"+ OpInfo.ConstraintCode +"'!");
5427 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
5430 OpInfo.AssignedRegs.AddInlineAsmOperands(1/*REGUSE*/, false, 0,
5431 DAG, AsmNodeOperands);
5434 case InlineAsm::isClobber: {
5435 // Add the clobbered value to the operand list, so that the register
5436 // allocator is aware that the physreg got clobbered.
5437 if (!OpInfo.AssignedRegs.Regs.empty())
5438 OpInfo.AssignedRegs.AddInlineAsmOperands(6 /* EARLYCLOBBER REGDEF */,
5439 false, 0, DAG,AsmNodeOperands);
5445 // Finish up input operands.
5446 AsmNodeOperands[0] = Chain;
5447 if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
5449 Chain = DAG.getNode(ISD::INLINEASM, getCurDebugLoc(),
5450 DAG.getVTList(MVT::Other, MVT::Flag),
5451 &AsmNodeOperands[0], AsmNodeOperands.size());
5452 Flag = Chain.getValue(1);
5454 // If this asm returns a register value, copy the result from that register
5455 // and set it as the value of the call.
5456 if (!RetValRegs.Regs.empty()) {
5457 SDValue Val = RetValRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
5460 // FIXME: Why don't we do this for inline asms with MRVs?
5461 if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
5462 EVT ResultType = TLI.getValueType(CS.getType());
5464 // If any of the results of the inline asm is a vector, it may have the
5465 // wrong width/num elts. This can happen for register classes that can
5466 // contain multiple different value types. The preg or vreg allocated may
5467 // not have the same VT as was expected. Convert it to the right type
5468 // with bit_convert.
5469 if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
5470 Val = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
5473 } else if (ResultType != Val.getValueType() &&
5474 ResultType.isInteger() && Val.getValueType().isInteger()) {
5475 // If a result value was tied to an input value, the computed result may
5476 // have a wider width than the expected result. Extract the relevant
5478 Val = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), ResultType, Val);
5481 assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
5484 setValue(CS.getInstruction(), Val);
5485 // Don't need to use this as a chain in this case.
5486 if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
5490 std::vector<std::pair<SDValue, Value*> > StoresToEmit;
5492 // Process indirect outputs, first output all of the flagged copies out of
5494 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
5495 RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
5496 Value *Ptr = IndirectStoresToEmit[i].second;
5497 SDValue OutVal = OutRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
5499 StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
5503 // Emit the non-flagged stores from the physregs.
5504 SmallVector<SDValue, 8> OutChains;
5505 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i)
5506 OutChains.push_back(DAG.getStore(Chain, getCurDebugLoc(),
5507 StoresToEmit[i].first,
5508 getValue(StoresToEmit[i].second),
5509 StoresToEmit[i].second, 0));
5510 if (!OutChains.empty())
5511 Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
5512 &OutChains[0], OutChains.size());
5517 void SelectionDAGLowering::visitMalloc(MallocInst &I) {
5518 SDValue Src = getValue(I.getOperand(0));
5520 // Scale up by the type size in the original i32 type width. Various
5521 // mid-level optimizers may make assumptions about demanded bits etc from the
5522 // i32-ness of the optimizer: we do not want to promote to i64 and then
5523 // multiply on 64-bit targets.
5524 // FIXME: Malloc inst should go away: PR715.
5525 uint64_t ElementSize = TD->getTypeAllocSize(I.getType()->getElementType());
5526 if (ElementSize != 1) {
5527 // Src is always 32-bits, make sure the constant fits.
5528 assert(Src.getValueType() == MVT::i32);
5529 ElementSize = (uint32_t)ElementSize;
5530 Src = DAG.getNode(ISD::MUL, getCurDebugLoc(), Src.getValueType(),
5531 Src, DAG.getConstant(ElementSize, Src.getValueType()));
5534 EVT IntPtr = TLI.getPointerTy();
5536 if (IntPtr.bitsLT(Src.getValueType()))
5537 Src = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), IntPtr, Src);
5538 else if (IntPtr.bitsGT(Src.getValueType()))
5539 Src = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), IntPtr, Src);
5541 TargetLowering::ArgListTy Args;
5542 TargetLowering::ArgListEntry Entry;
5544 Entry.Ty = TLI.getTargetData()->getIntPtrType(*DAG.getContext());
5545 Args.push_back(Entry);
5547 bool isTailCall = PerformTailCallOpt &&
5548 isInTailCallPosition(&I, Attribute::None, TLI);
5549 std::pair<SDValue,SDValue> Result =
5550 TLI.LowerCallTo(getRoot(), I.getType(), false, false, false, false,
5551 0, CallingConv::C, isTailCall,
5552 /*isReturnValueUsed=*/true,
5553 DAG.getExternalSymbol("malloc", IntPtr),
5554 Args, DAG, getCurDebugLoc());
5555 if (Result.first.getNode())
5556 setValue(&I, Result.first); // Pointers always fit in registers
5557 if (Result.second.getNode())
5558 DAG.setRoot(Result.second);
5561 void SelectionDAGLowering::visitFree(FreeInst &I) {
5562 TargetLowering::ArgListTy Args;
5563 TargetLowering::ArgListEntry Entry;
5564 Entry.Node = getValue(I.getOperand(0));
5565 Entry.Ty = TLI.getTargetData()->getIntPtrType(*DAG.getContext());
5566 Args.push_back(Entry);
5567 EVT IntPtr = TLI.getPointerTy();
5568 bool isTailCall = PerformTailCallOpt &&
5569 isInTailCallPosition(&I, Attribute::None, TLI);
5570 std::pair<SDValue,SDValue> Result =
5571 TLI.LowerCallTo(getRoot(), Type::getVoidTy(*DAG.getContext()),
5572 false, false, false, false,
5573 0, CallingConv::C, isTailCall,
5574 /*isReturnValueUsed=*/true,
5575 DAG.getExternalSymbol("free", IntPtr), Args, DAG,
5577 if (Result.second.getNode())
5578 DAG.setRoot(Result.second);
5581 void SelectionDAGLowering::visitVAStart(CallInst &I) {
5582 DAG.setRoot(DAG.getNode(ISD::VASTART, getCurDebugLoc(),
5583 MVT::Other, getRoot(),
5584 getValue(I.getOperand(1)),
5585 DAG.getSrcValue(I.getOperand(1))));
5588 void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
5589 SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurDebugLoc(),
5590 getRoot(), getValue(I.getOperand(0)),
5591 DAG.getSrcValue(I.getOperand(0)));
5593 DAG.setRoot(V.getValue(1));
5596 void SelectionDAGLowering::visitVAEnd(CallInst &I) {
5597 DAG.setRoot(DAG.getNode(ISD::VAEND, getCurDebugLoc(),
5598 MVT::Other, getRoot(),
5599 getValue(I.getOperand(1)),
5600 DAG.getSrcValue(I.getOperand(1))));
5603 void SelectionDAGLowering::visitVACopy(CallInst &I) {
5604 DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurDebugLoc(),
5605 MVT::Other, getRoot(),
5606 getValue(I.getOperand(1)),
5607 getValue(I.getOperand(2)),
5608 DAG.getSrcValue(I.getOperand(1)),
5609 DAG.getSrcValue(I.getOperand(2))));
5612 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
5613 /// implementation, which just calls LowerCall.
5614 /// FIXME: When all targets are
5615 /// migrated to using LowerCall, this hook should be integrated into SDISel.
5616 std::pair<SDValue, SDValue>
5617 TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
5618 bool RetSExt, bool RetZExt, bool isVarArg,
5619 bool isInreg, unsigned NumFixedArgs,
5620 CallingConv::ID CallConv, bool isTailCall,
5621 bool isReturnValueUsed,
5623 ArgListTy &Args, SelectionDAG &DAG, DebugLoc dl) {
5625 assert((!isTailCall || PerformTailCallOpt) &&
5626 "isTailCall set when tail-call optimizations are disabled!");
5628 // Handle all of the outgoing arguments.
5629 SmallVector<ISD::OutputArg, 32> Outs;
5630 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
5631 SmallVector<EVT, 4> ValueVTs;
5632 ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
5633 for (unsigned Value = 0, NumValues = ValueVTs.size();
5634 Value != NumValues; ++Value) {
5635 EVT VT = ValueVTs[Value];
5636 const Type *ArgTy = VT.getTypeForEVT(RetTy->getContext());
5637 SDValue Op = SDValue(Args[i].Node.getNode(),
5638 Args[i].Node.getResNo() + Value);
5639 ISD::ArgFlagsTy Flags;
5640 unsigned OriginalAlignment =
5641 getTargetData()->getABITypeAlignment(ArgTy);
5647 if (Args[i].isInReg)
5651 if (Args[i].isByVal) {
5653 const PointerType *Ty = cast<PointerType>(Args[i].Ty);
5654 const Type *ElementTy = Ty->getElementType();
5655 unsigned FrameAlign = getByValTypeAlignment(ElementTy);
5656 unsigned FrameSize = getTargetData()->getTypeAllocSize(ElementTy);
5657 // For ByVal, alignment should come from FE. BE will guess if this
5658 // info is not there but there are cases it cannot get right.
5659 if (Args[i].Alignment)
5660 FrameAlign = Args[i].Alignment;
5661 Flags.setByValAlign(FrameAlign);
5662 Flags.setByValSize(FrameSize);
5666 Flags.setOrigAlign(OriginalAlignment);
5668 EVT PartVT = getRegisterType(RetTy->getContext(), VT);
5669 unsigned NumParts = getNumRegisters(RetTy->getContext(), VT);
5670 SmallVector<SDValue, 4> Parts(NumParts);
5671 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
5674 ExtendKind = ISD::SIGN_EXTEND;
5675 else if (Args[i].isZExt)
5676 ExtendKind = ISD::ZERO_EXTEND;
5678 getCopyToParts(DAG, dl, Op, &Parts[0], NumParts, PartVT, ExtendKind);
5680 for (unsigned j = 0; j != NumParts; ++j) {
5681 // if it isn't first piece, alignment must be 1
5682 ISD::OutputArg MyFlags(Flags, Parts[j], i < NumFixedArgs);
5683 if (NumParts > 1 && j == 0)
5684 MyFlags.Flags.setSplit();
5686 MyFlags.Flags.setOrigAlign(1);
5688 Outs.push_back(MyFlags);
5693 // Handle the incoming return values from the call.
5694 SmallVector<ISD::InputArg, 32> Ins;
5695 SmallVector<EVT, 4> RetTys;
5696 ComputeValueVTs(*this, RetTy, RetTys);
5697 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5699 EVT RegisterVT = getRegisterType(RetTy->getContext(), VT);
5700 unsigned NumRegs = getNumRegisters(RetTy->getContext(), VT);
5701 for (unsigned i = 0; i != NumRegs; ++i) {
5702 ISD::InputArg MyFlags;
5703 MyFlags.VT = RegisterVT;
5704 MyFlags.Used = isReturnValueUsed;
5706 MyFlags.Flags.setSExt();
5708 MyFlags.Flags.setZExt();
5710 MyFlags.Flags.setInReg();
5711 Ins.push_back(MyFlags);
5715 // Check if target-dependent constraints permit a tail call here.
5716 // Target-independent constraints should be checked by the caller.
5718 !IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, Ins, DAG))
5721 SmallVector<SDValue, 4> InVals;
5722 Chain = LowerCall(Chain, Callee, CallConv, isVarArg, isTailCall,
5723 Outs, Ins, dl, DAG, InVals);
5725 // Verify that the target's LowerCall behaved as expected.
5726 assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
5727 "LowerCall didn't return a valid chain!");
5728 assert((!isTailCall || InVals.empty()) &&
5729 "LowerCall emitted a return value for a tail call!");
5730 assert((isTailCall || InVals.size() == Ins.size()) &&
5731 "LowerCall didn't emit the correct number of values!");
5732 DEBUG(for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
5733 assert(InVals[i].getNode() &&
5734 "LowerCall emitted a null value!");
5735 assert(Ins[i].VT == InVals[i].getValueType() &&
5736 "LowerCall emitted a value with the wrong type!");
5739 // For a tail call, the return value is merely live-out and there aren't
5740 // any nodes in the DAG representing it. Return a special value to
5741 // indicate that a tail call has been emitted and no more Instructions
5742 // should be processed in the current block.
5745 return std::make_pair(SDValue(), SDValue());
5748 // Collect the legal value parts into potentially illegal values
5749 // that correspond to the original function's return values.
5750 ISD::NodeType AssertOp = ISD::DELETED_NODE;
5752 AssertOp = ISD::AssertSext;
5754 AssertOp = ISD::AssertZext;
5755 SmallVector<SDValue, 4> ReturnValues;
5756 unsigned CurReg = 0;
5757 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5759 EVT RegisterVT = getRegisterType(RetTy->getContext(), VT);
5760 unsigned NumRegs = getNumRegisters(RetTy->getContext(), VT);
5762 SDValue ReturnValue =
5763 getCopyFromParts(DAG, dl, &InVals[CurReg], NumRegs, RegisterVT, VT,
5765 ReturnValues.push_back(ReturnValue);
5769 // For a function returning void, there is no return value. We can't create
5770 // such a node, so we just return a null return value in that case. In
5771 // that case, nothing will actualy look at the value.
5772 if (ReturnValues.empty())
5773 return std::make_pair(SDValue(), Chain);
5775 SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
5776 DAG.getVTList(&RetTys[0], RetTys.size()),
5777 &ReturnValues[0], ReturnValues.size());
5779 return std::make_pair(Res, Chain);
5782 void TargetLowering::LowerOperationWrapper(SDNode *N,
5783 SmallVectorImpl<SDValue> &Results,
5784 SelectionDAG &DAG) {
5785 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
5787 Results.push_back(Res);
5790 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
5791 llvm_unreachable("LowerOperation not implemented for this target!");
5796 void SelectionDAGLowering::CopyValueToVirtualRegister(Value *V, unsigned Reg) {
5797 SDValue Op = getValue(V);
5798 assert((Op.getOpcode() != ISD::CopyFromReg ||
5799 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
5800 "Copy from a reg to the same reg!");
5801 assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
5803 RegsForValue RFV(V->getContext(), TLI, Reg, V->getType());
5804 SDValue Chain = DAG.getEntryNode();
5805 RFV.getCopyToRegs(Op, DAG, getCurDebugLoc(), Chain, 0);
5806 PendingExports.push_back(Chain);
5809 #include "llvm/CodeGen/SelectionDAGISel.h"
5811 void SelectionDAGISel::
5812 LowerArguments(BasicBlock *LLVMBB) {
5813 // If this is the entry block, emit arguments.
5814 Function &F = *LLVMBB->getParent();
5815 SelectionDAG &DAG = SDL->DAG;
5816 SDValue OldRoot = DAG.getRoot();
5817 DebugLoc dl = SDL->getCurDebugLoc();
5818 const TargetData *TD = TLI.getTargetData();
5820 // Set up the incoming argument description vector.
5821 SmallVector<ISD::InputArg, 16> Ins;
5823 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
5824 I != E; ++I, ++Idx) {
5825 SmallVector<EVT, 4> ValueVTs;
5826 ComputeValueVTs(TLI, I->getType(), ValueVTs);
5827 bool isArgValueUsed = !I->use_empty();
5828 for (unsigned Value = 0, NumValues = ValueVTs.size();
5829 Value != NumValues; ++Value) {
5830 EVT VT = ValueVTs[Value];
5831 const Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
5832 ISD::ArgFlagsTy Flags;
5833 unsigned OriginalAlignment =
5834 TD->getABITypeAlignment(ArgTy);
5836 if (F.paramHasAttr(Idx, Attribute::ZExt))
5838 if (F.paramHasAttr(Idx, Attribute::SExt))
5840 if (F.paramHasAttr(Idx, Attribute::InReg))
5842 if (F.paramHasAttr(Idx, Attribute::StructRet))
5844 if (F.paramHasAttr(Idx, Attribute::ByVal)) {
5846 const PointerType *Ty = cast<PointerType>(I->getType());
5847 const Type *ElementTy = Ty->getElementType();
5848 unsigned FrameAlign = TLI.getByValTypeAlignment(ElementTy);
5849 unsigned FrameSize = TD->getTypeAllocSize(ElementTy);
5850 // For ByVal, alignment should be passed from FE. BE will guess if
5851 // this info is not there but there are cases it cannot get right.
5852 if (F.getParamAlignment(Idx))
5853 FrameAlign = F.getParamAlignment(Idx);
5854 Flags.setByValAlign(FrameAlign);
5855 Flags.setByValSize(FrameSize);
5857 if (F.paramHasAttr(Idx, Attribute::Nest))
5859 Flags.setOrigAlign(OriginalAlignment);
5861 EVT RegisterVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
5862 unsigned NumRegs = TLI.getNumRegisters(*CurDAG->getContext(), VT);
5863 for (unsigned i = 0; i != NumRegs; ++i) {
5864 ISD::InputArg MyFlags(Flags, RegisterVT, isArgValueUsed);
5865 if (NumRegs > 1 && i == 0)
5866 MyFlags.Flags.setSplit();
5867 // if it isn't first piece, alignment must be 1
5869 MyFlags.Flags.setOrigAlign(1);
5870 Ins.push_back(MyFlags);
5875 // Call the target to set up the argument values.
5876 SmallVector<SDValue, 8> InVals;
5877 SDValue NewRoot = TLI.LowerFormalArguments(DAG.getRoot(), F.getCallingConv(),
5881 // Verify that the target's LowerFormalArguments behaved as expected.
5882 assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
5883 "LowerFormalArguments didn't return a valid chain!");
5884 assert(InVals.size() == Ins.size() &&
5885 "LowerFormalArguments didn't emit the correct number of values!");
5886 DEBUG(for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
5887 assert(InVals[i].getNode() &&
5888 "LowerFormalArguments emitted a null value!");
5889 assert(Ins[i].VT == InVals[i].getValueType() &&
5890 "LowerFormalArguments emitted a value with the wrong type!");
5893 // Update the DAG with the new chain value resulting from argument lowering.
5894 DAG.setRoot(NewRoot);
5896 // Set up the argument values.
5899 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
5901 SmallVector<SDValue, 4> ArgValues;
5902 SmallVector<EVT, 4> ValueVTs;
5903 ComputeValueVTs(TLI, I->getType(), ValueVTs);
5904 unsigned NumValues = ValueVTs.size();
5905 for (unsigned Value = 0; Value != NumValues; ++Value) {
5906 EVT VT = ValueVTs[Value];
5907 EVT PartVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
5908 unsigned NumParts = TLI.getNumRegisters(*CurDAG->getContext(), VT);
5910 if (!I->use_empty()) {
5911 ISD::NodeType AssertOp = ISD::DELETED_NODE;
5912 if (F.paramHasAttr(Idx, Attribute::SExt))
5913 AssertOp = ISD::AssertSext;
5914 else if (F.paramHasAttr(Idx, Attribute::ZExt))
5915 AssertOp = ISD::AssertZext;
5917 ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
5918 PartVT, VT, AssertOp));
5922 if (!I->use_empty()) {
5923 SDL->setValue(I, DAG.getMergeValues(&ArgValues[0], NumValues,
5924 SDL->getCurDebugLoc()));
5925 // If this argument is live outside of the entry block, insert a copy from
5926 // whereever we got it to the vreg that other BB's will reference it as.
5927 SDL->CopyToExportRegsIfNeeded(I);
5930 assert(i == InVals.size() && "Argument register count mismatch!");
5932 // Finally, if the target has anything special to do, allow it to do so.
5933 // FIXME: this should insert code into the DAG!
5934 EmitFunctionEntryCode(F, SDL->DAG.getMachineFunction());
5937 /// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
5938 /// ensure constants are generated when needed. Remember the virtual registers
5939 /// that need to be added to the Machine PHI nodes as input. We cannot just
5940 /// directly add them, because expansion might result in multiple MBB's for one
5941 /// BB. As such, the start of the BB might correspond to a different MBB than
5945 SelectionDAGISel::HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB) {
5946 TerminatorInst *TI = LLVMBB->getTerminator();
5948 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
5950 // Check successor nodes' PHI nodes that expect a constant to be available
5952 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
5953 BasicBlock *SuccBB = TI->getSuccessor(succ);
5954 if (!isa<PHINode>(SuccBB->begin())) continue;
5955 MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
5957 // If this terminator has multiple identical successors (common for
5958 // switches), only handle each succ once.
5959 if (!SuccsHandled.insert(SuccMBB)) continue;
5961 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
5964 // At this point we know that there is a 1-1 correspondence between LLVM PHI
5965 // nodes and Machine PHI nodes, but the incoming operands have not been
5967 for (BasicBlock::iterator I = SuccBB->begin();
5968 (PN = dyn_cast<PHINode>(I)); ++I) {
5969 // Ignore dead phi's.
5970 if (PN->use_empty()) continue;
5973 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
5975 if (Constant *C = dyn_cast<Constant>(PHIOp)) {
5976 unsigned &RegOut = SDL->ConstantsOut[C];
5978 RegOut = FuncInfo->CreateRegForValue(C);
5979 SDL->CopyValueToVirtualRegister(C, RegOut);
5983 Reg = FuncInfo->ValueMap[PHIOp];
5985 assert(isa<AllocaInst>(PHIOp) &&
5986 FuncInfo->StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
5987 "Didn't codegen value into a register!??");
5988 Reg = FuncInfo->CreateRegForValue(PHIOp);
5989 SDL->CopyValueToVirtualRegister(PHIOp, Reg);
5993 // Remember that this register needs to added to the machine PHI node as
5994 // the input for this MBB.
5995 SmallVector<EVT, 4> ValueVTs;
5996 ComputeValueVTs(TLI, PN->getType(), ValueVTs);
5997 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
5998 EVT VT = ValueVTs[vti];
5999 unsigned NumRegisters = TLI.getNumRegisters(*CurDAG->getContext(), VT);
6000 for (unsigned i = 0, e = NumRegisters; i != e; ++i)
6001 SDL->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
6002 Reg += NumRegisters;
6006 SDL->ConstantsOut.clear();
6009 /// This is the Fast-ISel version of HandlePHINodesInSuccessorBlocks. It only
6010 /// supports legal types, and it emits MachineInstrs directly instead of
6011 /// creating SelectionDAG nodes.
6014 SelectionDAGISel::HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB,
6016 TerminatorInst *TI = LLVMBB->getTerminator();
6018 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
6019 unsigned OrigNumPHINodesToUpdate = SDL->PHINodesToUpdate.size();
6021 // Check successor nodes' PHI nodes that expect a constant to be available
6023 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
6024 BasicBlock *SuccBB = TI->getSuccessor(succ);
6025 if (!isa<PHINode>(SuccBB->begin())) continue;
6026 MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
6028 // If this terminator has multiple identical successors (common for
6029 // switches), only handle each succ once.
6030 if (!SuccsHandled.insert(SuccMBB)) continue;
6032 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
6035 // At this point we know that there is a 1-1 correspondence between LLVM PHI
6036 // nodes and Machine PHI nodes, but the incoming operands have not been
6038 for (BasicBlock::iterator I = SuccBB->begin();
6039 (PN = dyn_cast<PHINode>(I)); ++I) {
6040 // Ignore dead phi's.
6041 if (PN->use_empty()) continue;
6043 // Only handle legal types. Two interesting things to note here. First,
6044 // by bailing out early, we may leave behind some dead instructions,
6045 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
6046 // own moves. Second, this check is necessary becuase FastISel doesn't
6047 // use CreateRegForValue to create registers, so it always creates
6048 // exactly one register for each non-void instruction.
6049 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
6050 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
6053 VT = TLI.getTypeToTransformTo(*CurDAG->getContext(), VT);
6055 SDL->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
6060 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
6062 unsigned Reg = F->getRegForValue(PHIOp);
6064 SDL->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
6067 SDL->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));