1 //===-- SelectionDAGBuild.cpp - Selection-DAG building --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements routines for translating from LLVM IR into SelectionDAG IR.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "isel"
15 #include "SelectionDAGBuild.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Constants.h"
20 #include "llvm/CallingConv.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/Function.h"
23 #include "llvm/GlobalVariable.h"
24 #include "llvm/InlineAsm.h"
25 #include "llvm/Instructions.h"
26 #include "llvm/Intrinsics.h"
27 #include "llvm/IntrinsicInst.h"
28 #include "llvm/Module.h"
29 #include "llvm/CodeGen/FastISel.h"
30 #include "llvm/CodeGen/GCStrategy.h"
31 #include "llvm/CodeGen/GCMetadata.h"
32 #include "llvm/CodeGen/MachineFunction.h"
33 #include "llvm/CodeGen/MachineFrameInfo.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineJumpTableInfo.h"
36 #include "llvm/CodeGen/MachineModuleInfo.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/PseudoSourceValue.h"
39 #include "llvm/CodeGen/SelectionDAG.h"
40 #include "llvm/CodeGen/DwarfWriter.h"
41 #include "llvm/Analysis/DebugInfo.h"
42 #include "llvm/Target/TargetRegisterInfo.h"
43 #include "llvm/Target/TargetData.h"
44 #include "llvm/Target/TargetFrameInfo.h"
45 #include "llvm/Target/TargetInstrInfo.h"
46 #include "llvm/Target/TargetIntrinsicInfo.h"
47 #include "llvm/Target/TargetLowering.h"
48 #include "llvm/Target/TargetOptions.h"
49 #include "llvm/Support/Compiler.h"
50 #include "llvm/Support/CommandLine.h"
51 #include "llvm/Support/Debug.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Support/raw_ostream.h"
57 /// LimitFloatPrecision - Generate low-precision inline sequences for
58 /// some float libcalls (6, 8 or 12 bits).
59 static unsigned LimitFloatPrecision;
61 static cl::opt<unsigned, true>
62 LimitFPPrecision("limit-float-precision",
63 cl::desc("Generate low-precision inline sequences "
64 "for some float libcalls"),
65 cl::location(LimitFloatPrecision),
68 /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
69 /// of insertvalue or extractvalue indices that identify a member, return
70 /// the linearized index of the start of the member.
72 static unsigned ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
73 const unsigned *Indices,
74 const unsigned *IndicesEnd,
75 unsigned CurIndex = 0) {
76 // Base case: We're done.
77 if (Indices && Indices == IndicesEnd)
80 // Given a struct type, recursively traverse the elements.
81 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
82 for (StructType::element_iterator EB = STy->element_begin(),
84 EE = STy->element_end();
86 if (Indices && *Indices == unsigned(EI - EB))
87 return ComputeLinearIndex(TLI, *EI, Indices+1, IndicesEnd, CurIndex);
88 CurIndex = ComputeLinearIndex(TLI, *EI, 0, 0, CurIndex);
92 // Given an array type, recursively traverse the elements.
93 else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
94 const Type *EltTy = ATy->getElementType();
95 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
96 if (Indices && *Indices == i)
97 return ComputeLinearIndex(TLI, EltTy, Indices+1, IndicesEnd, CurIndex);
98 CurIndex = ComputeLinearIndex(TLI, EltTy, 0, 0, CurIndex);
102 // We haven't found the type we're looking for, so keep searching.
106 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
107 /// MVTs that represent all the individual underlying
108 /// non-aggregate types that comprise it.
110 /// If Offsets is non-null, it points to a vector to be filled in
111 /// with the in-memory offsets of each of the individual values.
113 static void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
114 SmallVectorImpl<MVT> &ValueVTs,
115 SmallVectorImpl<uint64_t> *Offsets = 0,
116 uint64_t StartingOffset = 0) {
117 // Given a struct type, recursively traverse the elements.
118 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
119 const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
120 for (StructType::element_iterator EB = STy->element_begin(),
122 EE = STy->element_end();
124 ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
125 StartingOffset + SL->getElementOffset(EI - EB));
128 // Given an array type, recursively traverse the elements.
129 if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
130 const Type *EltTy = ATy->getElementType();
131 uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
132 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
133 ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
134 StartingOffset + i * EltSize);
137 // Interpret void as zero return values.
138 if (Ty == Type::VoidTy)
140 // Base case: we can get an MVT for this LLVM IR type.
141 ValueVTs.push_back(TLI.getValueType(Ty));
143 Offsets->push_back(StartingOffset);
147 /// RegsForValue - This struct represents the registers (physical or virtual)
148 /// that a particular set of values is assigned, and the type information about
149 /// the value. The most common situation is to represent one value at a time,
150 /// but struct or array values are handled element-wise as multiple values.
151 /// The splitting of aggregates is performed recursively, so that we never
152 /// have aggregate-typed registers. The values at this point do not necessarily
153 /// have legal types, so each value may require one or more registers of some
156 struct VISIBILITY_HIDDEN RegsForValue {
157 /// TLI - The TargetLowering object.
159 const TargetLowering *TLI;
161 /// ValueVTs - The value types of the values, which may not be legal, and
162 /// may need be promoted or synthesized from one or more registers.
164 SmallVector<MVT, 4> ValueVTs;
166 /// RegVTs - The value types of the registers. This is the same size as
167 /// ValueVTs and it records, for each value, what the type of the assigned
168 /// register or registers are. (Individual values are never synthesized
169 /// from more than one type of register.)
171 /// With virtual registers, the contents of RegVTs is redundant with TLI's
172 /// getRegisterType member function, however when with physical registers
173 /// it is necessary to have a separate record of the types.
175 SmallVector<MVT, 4> RegVTs;
177 /// Regs - This list holds the registers assigned to the values.
178 /// Each legal or promoted value requires one register, and each
179 /// expanded value requires multiple registers.
181 SmallVector<unsigned, 4> Regs;
183 RegsForValue() : TLI(0) {}
185 RegsForValue(const TargetLowering &tli,
186 const SmallVector<unsigned, 4> ®s,
187 MVT regvt, MVT valuevt)
188 : TLI(&tli), ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
189 RegsForValue(const TargetLowering &tli,
190 const SmallVector<unsigned, 4> ®s,
191 const SmallVector<MVT, 4> ®vts,
192 const SmallVector<MVT, 4> &valuevts)
193 : TLI(&tli), ValueVTs(valuevts), RegVTs(regvts), Regs(regs) {}
194 RegsForValue(const TargetLowering &tli,
195 unsigned Reg, const Type *Ty) : TLI(&tli) {
196 ComputeValueVTs(tli, Ty, ValueVTs);
198 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
199 MVT ValueVT = ValueVTs[Value];
200 unsigned NumRegs = TLI->getNumRegisters(ValueVT);
201 MVT RegisterVT = TLI->getRegisterType(ValueVT);
202 for (unsigned i = 0; i != NumRegs; ++i)
203 Regs.push_back(Reg + i);
204 RegVTs.push_back(RegisterVT);
209 /// append - Add the specified values to this one.
210 void append(const RegsForValue &RHS) {
212 ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
213 RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
214 Regs.append(RHS.Regs.begin(), RHS.Regs.end());
218 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
219 /// this value and returns the result as a ValueVTs value. This uses
220 /// Chain/Flag as the input and updates them for the output Chain/Flag.
221 /// If the Flag pointer is NULL, no flag is used.
222 SDValue getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
223 SDValue &Chain, SDValue *Flag) const;
225 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
226 /// specified value into the registers specified by this object. This uses
227 /// Chain/Flag as the input and updates them for the output Chain/Flag.
228 /// If the Flag pointer is NULL, no flag is used.
229 void getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
230 SDValue &Chain, SDValue *Flag) const;
232 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
233 /// operand list. This adds the code marker, matching input operand index
234 /// (if applicable), and includes the number of values added into it.
235 void AddInlineAsmOperands(unsigned Code,
236 bool HasMatching, unsigned MatchingIdx,
237 SelectionDAG &DAG, std::vector<SDValue> &Ops) const;
241 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
242 /// PHI nodes or outside of the basic block that defines it, or used by a
243 /// switch or atomic instruction, which may expand to multiple basic blocks.
244 static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
245 if (isa<PHINode>(I)) return true;
246 BasicBlock *BB = I->getParent();
247 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
248 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI))
253 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
254 /// entry block, return true. This includes arguments used by switches, since
255 /// the switch may expand into multiple basic blocks.
256 static bool isOnlyUsedInEntryBlock(Argument *A, bool EnableFastISel) {
257 // With FastISel active, we may be splitting blocks, so force creation
258 // of virtual registers for all non-dead arguments.
259 // Don't force virtual registers for byval arguments though, because
260 // fast-isel can't handle those in all cases.
261 if (EnableFastISel && !A->hasByValAttr())
262 return A->use_empty();
264 BasicBlock *Entry = A->getParent()->begin();
265 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
266 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI))
267 return false; // Use not in entry block.
271 FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli)
275 void FunctionLoweringInfo::set(Function &fn, MachineFunction &mf,
277 bool EnableFastISel) {
280 RegInfo = &MF->getRegInfo();
282 // Create a vreg for each argument register that is not dead and is used
283 // outside of the entry block for the function.
284 for (Function::arg_iterator AI = Fn->arg_begin(), E = Fn->arg_end();
286 if (!isOnlyUsedInEntryBlock(AI, EnableFastISel))
287 InitializeRegForValue(AI);
289 // Initialize the mapping of values to registers. This is only set up for
290 // instruction values that are used outside of the block that defines
292 Function::iterator BB = Fn->begin(), EB = Fn->end();
293 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
294 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
295 if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
296 const Type *Ty = AI->getAllocatedType();
297 uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
299 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
302 TySize *= CUI->getZExtValue(); // Get total allocated size.
303 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
304 StaticAllocaMap[AI] =
305 MF->getFrameInfo()->CreateStackObject(TySize, Align);
308 for (; BB != EB; ++BB)
309 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
310 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
311 if (!isa<AllocaInst>(I) ||
312 !StaticAllocaMap.count(cast<AllocaInst>(I)))
313 InitializeRegForValue(I);
315 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
316 // also creates the initial PHI MachineInstrs, though none of the input
317 // operands are populated.
318 for (BB = Fn->begin(), EB = Fn->end(); BB != EB; ++BB) {
319 MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(BB);
323 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
327 for (BasicBlock::iterator
328 I = BB->begin(), E = BB->end(); I != E; ++I) {
329 if (CallInst *CI = dyn_cast<CallInst>(I)) {
330 if (Function *F = CI->getCalledFunction()) {
331 switch (F->getIntrinsicID()) {
333 case Intrinsic::dbg_stoppoint: {
334 DbgStopPointInst *SPI = cast<DbgStopPointInst>(I);
335 if (isValidDebugInfoIntrinsic(*SPI, CodeGenOpt::Default))
336 DL = ExtractDebugLocation(*SPI, MF->getDebugLocInfo());
339 case Intrinsic::dbg_func_start: {
340 DbgFuncStartInst *FSI = cast<DbgFuncStartInst>(I);
341 if (isValidDebugInfoIntrinsic(*FSI, CodeGenOpt::Default))
342 DL = ExtractDebugLocation(*FSI, MF->getDebugLocInfo());
349 PN = dyn_cast<PHINode>(I);
350 if (!PN || PN->use_empty()) continue;
352 unsigned PHIReg = ValueMap[PN];
353 assert(PHIReg && "PHI node does not have an assigned virtual register!");
355 SmallVector<MVT, 4> ValueVTs;
356 ComputeValueVTs(TLI, PN->getType(), ValueVTs);
357 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
358 MVT VT = ValueVTs[vti];
359 unsigned NumRegisters = TLI.getNumRegisters(VT);
360 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
361 for (unsigned i = 0; i != NumRegisters; ++i)
362 BuildMI(MBB, DL, TII->get(TargetInstrInfo::PHI), PHIReg + i);
363 PHIReg += NumRegisters;
369 unsigned FunctionLoweringInfo::MakeReg(MVT VT) {
370 return RegInfo->createVirtualRegister(TLI.getRegClassFor(VT));
373 /// CreateRegForValue - Allocate the appropriate number of virtual registers of
374 /// the correctly promoted or expanded types. Assign these registers
375 /// consecutive vreg numbers and return the first assigned number.
377 /// In the case that the given value has struct or array type, this function
378 /// will assign registers for each member or element.
380 unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
381 SmallVector<MVT, 4> ValueVTs;
382 ComputeValueVTs(TLI, V->getType(), ValueVTs);
384 unsigned FirstReg = 0;
385 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
386 MVT ValueVT = ValueVTs[Value];
387 MVT RegisterVT = TLI.getRegisterType(ValueVT);
389 unsigned NumRegs = TLI.getNumRegisters(ValueVT);
390 for (unsigned i = 0; i != NumRegs; ++i) {
391 unsigned R = MakeReg(RegisterVT);
392 if (!FirstReg) FirstReg = R;
398 /// getCopyFromParts - Create a value that contains the specified legal parts
399 /// combined into the value they represent. If the parts combine to a type
400 /// larger then ValueVT then AssertOp can be used to specify whether the extra
401 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
402 /// (ISD::AssertSext).
403 static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
404 const SDValue *Parts,
405 unsigned NumParts, MVT PartVT, MVT ValueVT,
406 ISD::NodeType AssertOp = ISD::DELETED_NODE) {
407 assert(NumParts > 0 && "No parts to assemble!");
408 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
409 SDValue Val = Parts[0];
412 // Assemble the value from multiple parts.
413 if (!ValueVT.isVector() && ValueVT.isInteger()) {
414 unsigned PartBits = PartVT.getSizeInBits();
415 unsigned ValueBits = ValueVT.getSizeInBits();
417 // Assemble the power of 2 part.
418 unsigned RoundParts = NumParts & (NumParts - 1) ?
419 1 << Log2_32(NumParts) : NumParts;
420 unsigned RoundBits = PartBits * RoundParts;
421 MVT RoundVT = RoundBits == ValueBits ?
422 ValueVT : MVT::getIntegerVT(RoundBits);
425 MVT HalfVT = MVT::getIntegerVT(RoundBits/2);
427 if (RoundParts > 2) {
428 Lo = getCopyFromParts(DAG, dl, Parts, RoundParts/2, PartVT, HalfVT);
429 Hi = getCopyFromParts(DAG, dl, Parts+RoundParts/2, RoundParts/2,
432 Lo = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[0]);
433 Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[1]);
435 if (TLI.isBigEndian())
437 Val = DAG.getNode(ISD::BUILD_PAIR, dl, RoundVT, Lo, Hi);
439 if (RoundParts < NumParts) {
440 // Assemble the trailing non-power-of-2 part.
441 unsigned OddParts = NumParts - RoundParts;
442 MVT OddVT = MVT::getIntegerVT(OddParts * PartBits);
443 Hi = getCopyFromParts(DAG, dl,
444 Parts+RoundParts, OddParts, PartVT, OddVT);
446 // Combine the round and odd parts.
448 if (TLI.isBigEndian())
450 MVT TotalVT = MVT::getIntegerVT(NumParts * PartBits);
451 Hi = DAG.getNode(ISD::ANY_EXTEND, dl, TotalVT, Hi);
452 Hi = DAG.getNode(ISD::SHL, dl, TotalVT, Hi,
453 DAG.getConstant(Lo.getValueType().getSizeInBits(),
454 TLI.getPointerTy()));
455 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, TotalVT, Lo);
456 Val = DAG.getNode(ISD::OR, dl, TotalVT, Lo, Hi);
458 } else if (ValueVT.isVector()) {
459 // Handle a multi-element vector.
460 MVT IntermediateVT, RegisterVT;
461 unsigned NumIntermediates;
463 TLI.getVectorTypeBreakdown(ValueVT, IntermediateVT, NumIntermediates,
465 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
466 NumParts = NumRegs; // Silence a compiler warning.
467 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
468 assert(RegisterVT == Parts[0].getValueType() &&
469 "Part type doesn't match part!");
471 // Assemble the parts into intermediate operands.
472 SmallVector<SDValue, 8> Ops(NumIntermediates);
473 if (NumIntermediates == NumParts) {
474 // If the register was not expanded, truncate or copy the value,
476 for (unsigned i = 0; i != NumParts; ++i)
477 Ops[i] = getCopyFromParts(DAG, dl, &Parts[i], 1,
478 PartVT, IntermediateVT);
479 } else if (NumParts > 0) {
480 // If the intermediate type was expanded, build the intermediate operands
482 assert(NumParts % NumIntermediates == 0 &&
483 "Must expand into a divisible number of parts!");
484 unsigned Factor = NumParts / NumIntermediates;
485 for (unsigned i = 0; i != NumIntermediates; ++i)
486 Ops[i] = getCopyFromParts(DAG, dl, &Parts[i * Factor], Factor,
487 PartVT, IntermediateVT);
490 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the intermediate
492 Val = DAG.getNode(IntermediateVT.isVector() ?
493 ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, dl,
494 ValueVT, &Ops[0], NumIntermediates);
495 } else if (PartVT.isFloatingPoint()) {
496 // FP split into multiple FP parts (for ppcf128)
497 assert(ValueVT == MVT(MVT::ppcf128) && PartVT == MVT(MVT::f64) &&
500 Lo = DAG.getNode(ISD::BIT_CONVERT, dl, MVT(MVT::f64), Parts[0]);
501 Hi = DAG.getNode(ISD::BIT_CONVERT, dl, MVT(MVT::f64), Parts[1]);
502 if (TLI.isBigEndian())
504 Val = DAG.getNode(ISD::BUILD_PAIR, dl, ValueVT, Lo, Hi);
506 // FP split into integer parts (soft fp)
507 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
508 !PartVT.isVector() && "Unexpected split");
509 MVT IntVT = MVT::getIntegerVT(ValueVT.getSizeInBits());
510 Val = getCopyFromParts(DAG, dl, Parts, NumParts, PartVT, IntVT);
514 // There is now one part, held in Val. Correct it to match ValueVT.
515 PartVT = Val.getValueType();
517 if (PartVT == ValueVT)
520 if (PartVT.isVector()) {
521 assert(ValueVT.isVector() && "Unknown vector conversion!");
522 return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
525 if (ValueVT.isVector()) {
526 assert(ValueVT.getVectorElementType() == PartVT &&
527 ValueVT.getVectorNumElements() == 1 &&
528 "Only trivial scalar-to-vector conversions should get here!");
529 return DAG.getNode(ISD::BUILD_VECTOR, dl, ValueVT, Val);
532 if (PartVT.isInteger() &&
533 ValueVT.isInteger()) {
534 if (ValueVT.bitsLT(PartVT)) {
535 // For a truncate, see if we have any information to
536 // indicate whether the truncated bits will always be
537 // zero or sign-extension.
538 if (AssertOp != ISD::DELETED_NODE)
539 Val = DAG.getNode(AssertOp, dl, PartVT, Val,
540 DAG.getValueType(ValueVT));
541 return DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
543 return DAG.getNode(ISD::ANY_EXTEND, dl, ValueVT, Val);
547 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
548 if (ValueVT.bitsLT(Val.getValueType()))
549 // FP_ROUND's are always exact here.
550 return DAG.getNode(ISD::FP_ROUND, dl, ValueVT, Val,
551 DAG.getIntPtrConstant(1));
552 return DAG.getNode(ISD::FP_EXTEND, dl, ValueVT, Val);
555 if (PartVT.getSizeInBits() == ValueVT.getSizeInBits())
556 return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
558 assert(0 && "Unknown mismatch!");
562 /// getCopyToParts - Create a series of nodes that contain the specified value
563 /// split into legal parts. If the parts contain more bits than Val, then, for
564 /// integers, ExtendKind can be used to specify how to generate the extra bits.
565 static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val,
566 SDValue *Parts, unsigned NumParts, MVT PartVT,
567 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
568 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
569 MVT PtrVT = TLI.getPointerTy();
570 MVT ValueVT = Val.getValueType();
571 unsigned PartBits = PartVT.getSizeInBits();
572 unsigned OrigNumParts = NumParts;
573 assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
578 if (!ValueVT.isVector()) {
579 if (PartVT == ValueVT) {
580 assert(NumParts == 1 && "No-op copy with multiple parts!");
585 if (NumParts * PartBits > ValueVT.getSizeInBits()) {
586 // If the parts cover more bits than the value has, promote the value.
587 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
588 assert(NumParts == 1 && "Do not know what to promote to!");
589 Val = DAG.getNode(ISD::FP_EXTEND, dl, PartVT, Val);
590 } else if (PartVT.isInteger() && ValueVT.isInteger()) {
591 ValueVT = MVT::getIntegerVT(NumParts * PartBits);
592 Val = DAG.getNode(ExtendKind, dl, ValueVT, Val);
594 assert(0 && "Unknown mismatch!");
596 } else if (PartBits == ValueVT.getSizeInBits()) {
597 // Different types of the same size.
598 assert(NumParts == 1 && PartVT != ValueVT);
599 Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
600 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
601 // If the parts cover less bits than value has, truncate the value.
602 if (PartVT.isInteger() && ValueVT.isInteger()) {
603 ValueVT = MVT::getIntegerVT(NumParts * PartBits);
604 Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
606 assert(0 && "Unknown mismatch!");
610 // The value may have changed - recompute ValueVT.
611 ValueVT = Val.getValueType();
612 assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
613 "Failed to tile the value with PartVT!");
616 assert(PartVT == ValueVT && "Type conversion failed!");
621 // Expand the value into multiple parts.
622 if (NumParts & (NumParts - 1)) {
623 // The number of parts is not a power of 2. Split off and copy the tail.
624 assert(PartVT.isInteger() && ValueVT.isInteger() &&
625 "Do not know what to expand to!");
626 unsigned RoundParts = 1 << Log2_32(NumParts);
627 unsigned RoundBits = RoundParts * PartBits;
628 unsigned OddParts = NumParts - RoundParts;
629 SDValue OddVal = DAG.getNode(ISD::SRL, dl, ValueVT, Val,
630 DAG.getConstant(RoundBits,
631 TLI.getPointerTy()));
632 getCopyToParts(DAG, dl, OddVal, Parts + RoundParts, OddParts, PartVT);
633 if (TLI.isBigEndian())
634 // The odd parts were reversed by getCopyToParts - unreverse them.
635 std::reverse(Parts + RoundParts, Parts + NumParts);
636 NumParts = RoundParts;
637 ValueVT = MVT::getIntegerVT(NumParts * PartBits);
638 Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
641 // The number of parts is a power of 2. Repeatedly bisect the value using
643 Parts[0] = DAG.getNode(ISD::BIT_CONVERT, dl,
644 MVT::getIntegerVT(ValueVT.getSizeInBits()),
646 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
647 for (unsigned i = 0; i < NumParts; i += StepSize) {
648 unsigned ThisBits = StepSize * PartBits / 2;
649 MVT ThisVT = MVT::getIntegerVT (ThisBits);
650 SDValue &Part0 = Parts[i];
651 SDValue &Part1 = Parts[i+StepSize/2];
653 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
655 DAG.getConstant(1, PtrVT));
656 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
658 DAG.getConstant(0, PtrVT));
660 if (ThisBits == PartBits && ThisVT != PartVT) {
661 Part0 = DAG.getNode(ISD::BIT_CONVERT, dl,
663 Part1 = DAG.getNode(ISD::BIT_CONVERT, dl,
669 if (TLI.isBigEndian())
670 std::reverse(Parts, Parts + OrigNumParts);
677 if (PartVT != ValueVT) {
678 if (PartVT.isVector()) {
679 Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
681 assert(ValueVT.getVectorElementType() == PartVT &&
682 ValueVT.getVectorNumElements() == 1 &&
683 "Only trivial vector-to-scalar conversions should get here!");
684 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
686 DAG.getConstant(0, PtrVT));
694 // Handle a multi-element vector.
695 MVT IntermediateVT, RegisterVT;
696 unsigned NumIntermediates;
697 unsigned NumRegs = TLI
698 .getVectorTypeBreakdown(ValueVT, IntermediateVT, NumIntermediates,
700 unsigned NumElements = ValueVT.getVectorNumElements();
702 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
703 NumParts = NumRegs; // Silence a compiler warning.
704 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
706 // Split the vector into intermediate operands.
707 SmallVector<SDValue, 8> Ops(NumIntermediates);
708 for (unsigned i = 0; i != NumIntermediates; ++i)
709 if (IntermediateVT.isVector())
710 Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
712 DAG.getConstant(i * (NumElements / NumIntermediates),
715 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
717 DAG.getConstant(i, PtrVT));
719 // Split the intermediate operands into legal parts.
720 if (NumParts == NumIntermediates) {
721 // If the register was not expanded, promote or copy the value,
723 for (unsigned i = 0; i != NumParts; ++i)
724 getCopyToParts(DAG, dl, Ops[i], &Parts[i], 1, PartVT);
725 } else if (NumParts > 0) {
726 // If the intermediate type was expanded, split each the value into
728 assert(NumParts % NumIntermediates == 0 &&
729 "Must expand into a divisible number of parts!");
730 unsigned Factor = NumParts / NumIntermediates;
731 for (unsigned i = 0; i != NumIntermediates; ++i)
732 getCopyToParts(DAG, dl, Ops[i], &Parts[i * Factor], Factor, PartVT);
737 void SelectionDAGLowering::init(GCFunctionInfo *gfi, AliasAnalysis &aa) {
740 TD = DAG.getTarget().getTargetData();
743 /// clear - Clear out the curret SelectionDAG and the associated
744 /// state and prepare this SelectionDAGLowering object to be used
745 /// for a new block. This doesn't clear out information about
746 /// additional blocks that are needed to complete switch lowering
747 /// or PHI node updating; that information is cleared out as it is
749 void SelectionDAGLowering::clear() {
751 PendingLoads.clear();
752 PendingExports.clear();
754 CurDebugLoc = DebugLoc::getUnknownLoc();
757 /// getRoot - Return the current virtual root of the Selection DAG,
758 /// flushing any PendingLoad items. This must be done before emitting
759 /// a store or any other node that may need to be ordered after any
760 /// prior load instructions.
762 SDValue SelectionDAGLowering::getRoot() {
763 if (PendingLoads.empty())
764 return DAG.getRoot();
766 if (PendingLoads.size() == 1) {
767 SDValue Root = PendingLoads[0];
769 PendingLoads.clear();
773 // Otherwise, we have to make a token factor node.
774 SDValue Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
775 &PendingLoads[0], PendingLoads.size());
776 PendingLoads.clear();
781 /// getControlRoot - Similar to getRoot, but instead of flushing all the
782 /// PendingLoad items, flush all the PendingExports items. It is necessary
783 /// to do this before emitting a terminator instruction.
785 SDValue SelectionDAGLowering::getControlRoot() {
786 SDValue Root = DAG.getRoot();
788 if (PendingExports.empty())
791 // Turn all of the CopyToReg chains into one factored node.
792 if (Root.getOpcode() != ISD::EntryToken) {
793 unsigned i = 0, e = PendingExports.size();
794 for (; i != e; ++i) {
795 assert(PendingExports[i].getNode()->getNumOperands() > 1);
796 if (PendingExports[i].getNode()->getOperand(0) == Root)
797 break; // Don't add the root if we already indirectly depend on it.
801 PendingExports.push_back(Root);
804 Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
806 PendingExports.size());
807 PendingExports.clear();
812 void SelectionDAGLowering::visit(Instruction &I) {
813 visit(I.getOpcode(), I);
816 void SelectionDAGLowering::visit(unsigned Opcode, User &I) {
817 // Note: this doesn't use InstVisitor, because it has to work with
818 // ConstantExpr's in addition to instructions.
820 default: assert(0 && "Unknown instruction type encountered!");
822 // Build the switch statement using the Instruction.def file.
823 #define HANDLE_INST(NUM, OPCODE, CLASS) \
824 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
825 #include "llvm/Instruction.def"
829 SDValue SelectionDAGLowering::getValue(const Value *V) {
830 SDValue &N = NodeMap[V];
831 if (N.getNode()) return N;
833 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
834 MVT VT = TLI.getValueType(V->getType(), true);
836 if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
837 return N = DAG.getConstant(*CI, VT);
839 if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
840 return N = DAG.getGlobalAddress(GV, VT);
842 if (isa<ConstantPointerNull>(C))
843 return N = DAG.getConstant(0, TLI.getPointerTy());
845 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C))
846 return N = DAG.getConstantFP(*CFP, VT);
848 if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
849 return N = DAG.getUNDEF(VT);
851 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
852 visit(CE->getOpcode(), *CE);
853 SDValue N1 = NodeMap[V];
854 assert(N1.getNode() && "visit didn't populate the ValueMap!");
858 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
859 SmallVector<SDValue, 4> Constants;
860 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
862 SDNode *Val = getValue(*OI).getNode();
863 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
864 Constants.push_back(SDValue(Val, i));
866 return DAG.getMergeValues(&Constants[0], Constants.size(),
870 if (isa<StructType>(C->getType()) || isa<ArrayType>(C->getType())) {
871 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
872 "Unknown struct or array constant!");
874 SmallVector<MVT, 4> ValueVTs;
875 ComputeValueVTs(TLI, C->getType(), ValueVTs);
876 unsigned NumElts = ValueVTs.size();
878 return SDValue(); // empty struct
879 SmallVector<SDValue, 4> Constants(NumElts);
880 for (unsigned i = 0; i != NumElts; ++i) {
881 MVT EltVT = ValueVTs[i];
882 if (isa<UndefValue>(C))
883 Constants[i] = DAG.getUNDEF(EltVT);
884 else if (EltVT.isFloatingPoint())
885 Constants[i] = DAG.getConstantFP(0, EltVT);
887 Constants[i] = DAG.getConstant(0, EltVT);
889 return DAG.getMergeValues(&Constants[0], NumElts, getCurDebugLoc());
892 const VectorType *VecTy = cast<VectorType>(V->getType());
893 unsigned NumElements = VecTy->getNumElements();
895 // Now that we know the number and type of the elements, get that number of
896 // elements into the Ops array based on what kind of constant it is.
897 SmallVector<SDValue, 16> Ops;
898 if (ConstantVector *CP = dyn_cast<ConstantVector>(C)) {
899 for (unsigned i = 0; i != NumElements; ++i)
900 Ops.push_back(getValue(CP->getOperand(i)));
902 assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
903 MVT EltVT = TLI.getValueType(VecTy->getElementType());
906 if (EltVT.isFloatingPoint())
907 Op = DAG.getConstantFP(0, EltVT);
909 Op = DAG.getConstant(0, EltVT);
910 Ops.assign(NumElements, Op);
913 // Create a BUILD_VECTOR node.
914 return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
915 VT, &Ops[0], Ops.size());
918 // If this is a static alloca, generate it as the frameindex instead of
920 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
921 DenseMap<const AllocaInst*, int>::iterator SI =
922 FuncInfo.StaticAllocaMap.find(AI);
923 if (SI != FuncInfo.StaticAllocaMap.end())
924 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
927 unsigned InReg = FuncInfo.ValueMap[V];
928 assert(InReg && "Value not in map!");
930 RegsForValue RFV(TLI, InReg, V->getType());
931 SDValue Chain = DAG.getEntryNode();
932 return RFV.getCopyFromRegs(DAG, getCurDebugLoc(), Chain, NULL);
936 void SelectionDAGLowering::visitRet(ReturnInst &I) {
937 if (I.getNumOperands() == 0) {
938 DAG.setRoot(DAG.getNode(ISD::RET, getCurDebugLoc(),
939 MVT::Other, getControlRoot()));
943 SmallVector<SDValue, 8> NewValues;
944 NewValues.push_back(getControlRoot());
945 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
946 SmallVector<MVT, 4> ValueVTs;
947 ComputeValueVTs(TLI, I.getOperand(i)->getType(), ValueVTs);
948 unsigned NumValues = ValueVTs.size();
949 if (NumValues == 0) continue;
951 SDValue RetOp = getValue(I.getOperand(i));
952 for (unsigned j = 0, f = NumValues; j != f; ++j) {
953 MVT VT = ValueVTs[j];
955 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
957 const Function *F = I.getParent()->getParent();
958 if (F->paramHasAttr(0, Attribute::SExt))
959 ExtendKind = ISD::SIGN_EXTEND;
960 else if (F->paramHasAttr(0, Attribute::ZExt))
961 ExtendKind = ISD::ZERO_EXTEND;
963 // FIXME: C calling convention requires the return type to be promoted to
964 // at least 32-bit. But this is not necessary for non-C calling
965 // conventions. The frontend should mark functions whose return values
966 // require promoting with signext or zeroext attributes.
967 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
968 MVT MinVT = TLI.getRegisterType(MVT::i32);
969 if (VT.bitsLT(MinVT))
973 unsigned NumParts = TLI.getNumRegisters(VT);
974 MVT PartVT = TLI.getRegisterType(VT);
975 SmallVector<SDValue, 4> Parts(NumParts);
976 getCopyToParts(DAG, getCurDebugLoc(),
977 SDValue(RetOp.getNode(), RetOp.getResNo() + j),
978 &Parts[0], NumParts, PartVT, ExtendKind);
980 // 'inreg' on function refers to return value
981 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
982 if (F->paramHasAttr(0, Attribute::InReg))
984 for (unsigned i = 0; i < NumParts; ++i) {
985 NewValues.push_back(Parts[i]);
986 NewValues.push_back(DAG.getArgFlags(Flags));
990 DAG.setRoot(DAG.getNode(ISD::RET, getCurDebugLoc(), MVT::Other,
991 &NewValues[0], NewValues.size()));
994 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
995 /// created for it, emit nodes to copy the value into the virtual
997 void SelectionDAGLowering::CopyToExportRegsIfNeeded(Value *V) {
998 if (!V->use_empty()) {
999 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
1000 if (VMI != FuncInfo.ValueMap.end())
1001 CopyValueToVirtualRegister(V, VMI->second);
1005 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
1006 /// the current basic block, add it to ValueMap now so that we'll get a
1008 void SelectionDAGLowering::ExportFromCurrentBlock(Value *V) {
1009 // No need to export constants.
1010 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1012 // Already exported?
1013 if (FuncInfo.isExportedInst(V)) return;
1015 unsigned Reg = FuncInfo.InitializeRegForValue(V);
1016 CopyValueToVirtualRegister(V, Reg);
1019 bool SelectionDAGLowering::isExportableFromCurrentBlock(Value *V,
1020 const BasicBlock *FromBB) {
1021 // The operands of the setcc have to be in this block. We don't know
1022 // how to export them from some other block.
1023 if (Instruction *VI = dyn_cast<Instruction>(V)) {
1024 // Can export from current BB.
1025 if (VI->getParent() == FromBB)
1028 // Is already exported, noop.
1029 return FuncInfo.isExportedInst(V);
1032 // If this is an argument, we can export it if the BB is the entry block or
1033 // if it is already exported.
1034 if (isa<Argument>(V)) {
1035 if (FromBB == &FromBB->getParent()->getEntryBlock())
1038 // Otherwise, can only export this if it is already exported.
1039 return FuncInfo.isExportedInst(V);
1042 // Otherwise, constants can always be exported.
1046 static bool InBlock(const Value *V, const BasicBlock *BB) {
1047 if (const Instruction *I = dyn_cast<Instruction>(V))
1048 return I->getParent() == BB;
1052 /// getFCmpCondCode - Return the ISD condition code corresponding to
1053 /// the given LLVM IR floating-point condition code. This includes
1054 /// consideration of global floating-point math flags.
1056 static ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred) {
1057 ISD::CondCode FPC, FOC;
1059 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
1060 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
1061 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
1062 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
1063 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
1064 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
1065 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break;
1066 case FCmpInst::FCMP_ORD: FOC = FPC = ISD::SETO; break;
1067 case FCmpInst::FCMP_UNO: FOC = FPC = ISD::SETUO; break;
1068 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
1069 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
1070 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
1071 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break;
1072 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break;
1073 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
1074 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break;
1076 assert(0 && "Invalid FCmp predicate opcode!");
1077 FOC = FPC = ISD::SETFALSE;
1080 if (FiniteOnlyFPMath())
1086 /// getICmpCondCode - Return the ISD condition code corresponding to
1087 /// the given LLVM IR integer condition code.
1089 static ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred) {
1091 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
1092 case ICmpInst::ICMP_NE: return ISD::SETNE;
1093 case ICmpInst::ICMP_SLE: return ISD::SETLE;
1094 case ICmpInst::ICMP_ULE: return ISD::SETULE;
1095 case ICmpInst::ICMP_SGE: return ISD::SETGE;
1096 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
1097 case ICmpInst::ICMP_SLT: return ISD::SETLT;
1098 case ICmpInst::ICMP_ULT: return ISD::SETULT;
1099 case ICmpInst::ICMP_SGT: return ISD::SETGT;
1100 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
1102 assert(0 && "Invalid ICmp predicate opcode!");
1107 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1108 /// This function emits a branch and is used at the leaves of an OR or an
1109 /// AND operator tree.
1112 SelectionDAGLowering::EmitBranchForMergedCondition(Value *Cond,
1113 MachineBasicBlock *TBB,
1114 MachineBasicBlock *FBB,
1115 MachineBasicBlock *CurBB) {
1116 const BasicBlock *BB = CurBB->getBasicBlock();
1118 // If the leaf of the tree is a comparison, merge the condition into
1120 if (CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1121 // The operands of the cmp have to be in this block. We don't know
1122 // how to export them from some other block. If this is the first block
1123 // of the sequence, no exporting is needed.
1124 if (CurBB == CurMBB ||
1125 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1126 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1127 ISD::CondCode Condition;
1128 if (ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1129 Condition = getICmpCondCode(IC->getPredicate());
1130 } else if (FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
1131 Condition = getFCmpCondCode(FC->getPredicate());
1133 Condition = ISD::SETEQ; // silence warning.
1134 assert(0 && "Unknown compare instruction");
1137 CaseBlock CB(Condition, BOp->getOperand(0),
1138 BOp->getOperand(1), NULL, TBB, FBB, CurBB);
1139 SwitchCases.push_back(CB);
1144 // Create a CaseBlock record representing this branch.
1145 CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(),
1146 NULL, TBB, FBB, CurBB);
1147 SwitchCases.push_back(CB);
1150 /// FindMergedConditions - If Cond is an expression like
1151 void SelectionDAGLowering::FindMergedConditions(Value *Cond,
1152 MachineBasicBlock *TBB,
1153 MachineBasicBlock *FBB,
1154 MachineBasicBlock *CurBB,
1156 // If this node is not part of the or/and tree, emit it as a branch.
1157 Instruction *BOp = dyn_cast<Instruction>(Cond);
1158 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1159 (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
1160 BOp->getParent() != CurBB->getBasicBlock() ||
1161 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1162 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1163 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB);
1167 // Create TmpBB after CurBB.
1168 MachineFunction::iterator BBI = CurBB;
1169 MachineFunction &MF = DAG.getMachineFunction();
1170 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1171 CurBB->getParent()->insert(++BBI, TmpBB);
1173 if (Opc == Instruction::Or) {
1174 // Codegen X | Y as:
1182 // Emit the LHS condition.
1183 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, Opc);
1185 // Emit the RHS condition into TmpBB.
1186 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1188 assert(Opc == Instruction::And && "Unknown merge op!");
1189 // Codegen X & Y as:
1196 // This requires creation of TmpBB after CurBB.
1198 // Emit the LHS condition.
1199 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, Opc);
1201 // Emit the RHS condition into TmpBB.
1202 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1206 /// If the set of cases should be emitted as a series of branches, return true.
1207 /// If we should emit this as a bunch of and/or'd together conditions, return
1210 SelectionDAGLowering::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){
1211 if (Cases.size() != 2) return true;
1213 // If this is two comparisons of the same values or'd or and'd together, they
1214 // will get folded into a single comparison, so don't emit two blocks.
1215 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1216 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1217 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1218 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1225 void SelectionDAGLowering::visitBr(BranchInst &I) {
1226 // Update machine-CFG edges.
1227 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1229 // Figure out which block is immediately after the current one.
1230 MachineBasicBlock *NextBlock = 0;
1231 MachineFunction::iterator BBI = CurMBB;
1232 if (++BBI != CurMBB->getParent()->end())
1235 if (I.isUnconditional()) {
1236 // Update machine-CFG edges.
1237 CurMBB->addSuccessor(Succ0MBB);
1239 // If this is not a fall-through branch, emit the branch.
1240 if (Succ0MBB != NextBlock)
1241 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
1242 MVT::Other, getControlRoot(),
1243 DAG.getBasicBlock(Succ0MBB)));
1247 // If this condition is one of the special cases we handle, do special stuff
1249 Value *CondVal = I.getCondition();
1250 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
1252 // If this is a series of conditions that are or'd or and'd together, emit
1253 // this as a sequence of branches instead of setcc's with and/or operations.
1254 // For example, instead of something like:
1267 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
1268 if (BOp->hasOneUse() &&
1269 (BOp->getOpcode() == Instruction::And ||
1270 BOp->getOpcode() == Instruction::Or)) {
1271 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, CurMBB, BOp->getOpcode());
1272 // If the compares in later blocks need to use values not currently
1273 // exported from this block, export them now. This block should always
1274 // be the first entry.
1275 assert(SwitchCases[0].ThisBB == CurMBB && "Unexpected lowering!");
1277 // Allow some cases to be rejected.
1278 if (ShouldEmitAsBranches(SwitchCases)) {
1279 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
1280 ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
1281 ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
1284 // Emit the branch for this block.
1285 visitSwitchCase(SwitchCases[0]);
1286 SwitchCases.erase(SwitchCases.begin());
1290 // Okay, we decided not to do this, remove any inserted MBB's and clear
1292 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
1293 CurMBB->getParent()->erase(SwitchCases[i].ThisBB);
1295 SwitchCases.clear();
1299 // Create a CaseBlock record representing this branch.
1300 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(),
1301 NULL, Succ0MBB, Succ1MBB, CurMBB);
1302 // Use visitSwitchCase to actually insert the fast branch sequence for this
1304 visitSwitchCase(CB);
1307 /// visitSwitchCase - Emits the necessary code to represent a single node in
1308 /// the binary search tree resulting from lowering a switch instruction.
1309 void SelectionDAGLowering::visitSwitchCase(CaseBlock &CB) {
1311 SDValue CondLHS = getValue(CB.CmpLHS);
1312 DebugLoc dl = getCurDebugLoc();
1314 // Build the setcc now.
1315 if (CB.CmpMHS == NULL) {
1316 // Fold "(X == true)" to X and "(X == false)" to !X to
1317 // handle common cases produced by branch lowering.
1318 if (CB.CmpRHS == ConstantInt::getTrue() && CB.CC == ISD::SETEQ)
1320 else if (CB.CmpRHS == ConstantInt::getFalse() && CB.CC == ISD::SETEQ) {
1321 SDValue True = DAG.getConstant(1, CondLHS.getValueType());
1322 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
1324 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
1326 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
1328 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
1329 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
1331 SDValue CmpOp = getValue(CB.CmpMHS);
1332 MVT VT = CmpOp.getValueType();
1334 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
1335 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, VT),
1338 SDValue SUB = DAG.getNode(ISD::SUB, dl,
1339 VT, CmpOp, DAG.getConstant(Low, VT));
1340 Cond = DAG.getSetCC(dl, MVT::i1, SUB,
1341 DAG.getConstant(High-Low, VT), ISD::SETULE);
1345 // Update successor info
1346 CurMBB->addSuccessor(CB.TrueBB);
1347 CurMBB->addSuccessor(CB.FalseBB);
1349 // Set NextBlock to be the MBB immediately after the current one, if any.
1350 // This is used to avoid emitting unnecessary branches to the next block.
1351 MachineBasicBlock *NextBlock = 0;
1352 MachineFunction::iterator BBI = CurMBB;
1353 if (++BBI != CurMBB->getParent()->end())
1356 // If the lhs block is the next block, invert the condition so that we can
1357 // fall through to the lhs instead of the rhs block.
1358 if (CB.TrueBB == NextBlock) {
1359 std::swap(CB.TrueBB, CB.FalseBB);
1360 SDValue True = DAG.getConstant(1, Cond.getValueType());
1361 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
1363 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1364 MVT::Other, getControlRoot(), Cond,
1365 DAG.getBasicBlock(CB.TrueBB));
1367 // If the branch was constant folded, fix up the CFG.
1368 if (BrCond.getOpcode() == ISD::BR) {
1369 CurMBB->removeSuccessor(CB.FalseBB);
1370 DAG.setRoot(BrCond);
1372 // Otherwise, go ahead and insert the false branch.
1373 if (BrCond == getControlRoot())
1374 CurMBB->removeSuccessor(CB.TrueBB);
1376 if (CB.FalseBB == NextBlock)
1377 DAG.setRoot(BrCond);
1379 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
1380 DAG.getBasicBlock(CB.FalseBB)));
1384 /// visitJumpTable - Emit JumpTable node in the current MBB
1385 void SelectionDAGLowering::visitJumpTable(JumpTable &JT) {
1386 // Emit the code for the jump table
1387 assert(JT.Reg != -1U && "Should lower JT Header first!");
1388 MVT PTy = TLI.getPointerTy();
1389 SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(),
1391 SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
1392 DAG.setRoot(DAG.getNode(ISD::BR_JT, getCurDebugLoc(),
1393 MVT::Other, Index.getValue(1),
1397 /// visitJumpTableHeader - This function emits necessary code to produce index
1398 /// in the JumpTable from switch case.
1399 void SelectionDAGLowering::visitJumpTableHeader(JumpTable &JT,
1400 JumpTableHeader &JTH) {
1401 // Subtract the lowest switch case value from the value being switched on and
1402 // conditional branch to default mbb if the result is greater than the
1403 // difference between smallest and largest cases.
1404 SDValue SwitchOp = getValue(JTH.SValue);
1405 MVT VT = SwitchOp.getValueType();
1406 SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1407 DAG.getConstant(JTH.First, VT));
1409 // The SDNode we just created, which holds the value being switched on minus
1410 // the the smallest case value, needs to be copied to a virtual register so it
1411 // can be used as an index into the jump table in a subsequent basic block.
1412 // This value may be smaller or larger than the target's pointer type, and
1413 // therefore require extension or truncating.
1414 if (VT.bitsGT(TLI.getPointerTy()))
1415 SwitchOp = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
1416 TLI.getPointerTy(), SUB);
1418 SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
1419 TLI.getPointerTy(), SUB);
1421 unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
1422 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1423 JumpTableReg, SwitchOp);
1424 JT.Reg = JumpTableReg;
1426 // Emit the range check for the jump table, and branch to the default block
1427 // for the switch statement if the value being switched on exceeds the largest
1428 // case in the switch.
1429 SDValue CMP = DAG.getSetCC(getCurDebugLoc(),
1430 TLI.getSetCCResultType(SUB.getValueType()), SUB,
1431 DAG.getConstant(JTH.Last-JTH.First,VT),
1434 // Set NextBlock to be the MBB immediately after the current one, if any.
1435 // This is used to avoid emitting unnecessary branches to the next block.
1436 MachineBasicBlock *NextBlock = 0;
1437 MachineFunction::iterator BBI = CurMBB;
1438 if (++BBI != CurMBB->getParent()->end())
1441 SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1442 MVT::Other, CopyTo, CMP,
1443 DAG.getBasicBlock(JT.Default));
1445 if (JT.MBB == NextBlock)
1446 DAG.setRoot(BrCond);
1448 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrCond,
1449 DAG.getBasicBlock(JT.MBB)));
1452 /// visitBitTestHeader - This function emits necessary code to produce value
1453 /// suitable for "bit tests"
1454 void SelectionDAGLowering::visitBitTestHeader(BitTestBlock &B) {
1455 // Subtract the minimum value
1456 SDValue SwitchOp = getValue(B.SValue);
1457 MVT VT = SwitchOp.getValueType();
1458 SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1459 DAG.getConstant(B.First, VT));
1462 SDValue RangeCmp = DAG.getSetCC(getCurDebugLoc(),
1463 TLI.getSetCCResultType(SUB.getValueType()),
1464 SUB, DAG.getConstant(B.Range, VT),
1468 if (VT.bitsGT(TLI.getPointerTy()))
1469 ShiftOp = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
1470 TLI.getPointerTy(), SUB);
1472 ShiftOp = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
1473 TLI.getPointerTy(), SUB);
1475 B.Reg = FuncInfo.MakeReg(TLI.getPointerTy());
1476 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1479 // Set NextBlock to be the MBB immediately after the current one, if any.
1480 // This is used to avoid emitting unnecessary branches to the next block.
1481 MachineBasicBlock *NextBlock = 0;
1482 MachineFunction::iterator BBI = CurMBB;
1483 if (++BBI != CurMBB->getParent()->end())
1486 MachineBasicBlock* MBB = B.Cases[0].ThisBB;
1488 CurMBB->addSuccessor(B.Default);
1489 CurMBB->addSuccessor(MBB);
1491 SDValue BrRange = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1492 MVT::Other, CopyTo, RangeCmp,
1493 DAG.getBasicBlock(B.Default));
1495 if (MBB == NextBlock)
1496 DAG.setRoot(BrRange);
1498 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, CopyTo,
1499 DAG.getBasicBlock(MBB)));
1502 /// visitBitTestCase - this function produces one "bit test"
1503 void SelectionDAGLowering::visitBitTestCase(MachineBasicBlock* NextMBB,
1506 // Make desired shift
1507 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(), Reg,
1508 TLI.getPointerTy());
1509 SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(),
1511 DAG.getConstant(1, TLI.getPointerTy()),
1514 // Emit bit tests and jumps
1515 SDValue AndOp = DAG.getNode(ISD::AND, getCurDebugLoc(),
1516 TLI.getPointerTy(), SwitchVal,
1517 DAG.getConstant(B.Mask, TLI.getPointerTy()));
1518 SDValue AndCmp = DAG.getSetCC(getCurDebugLoc(),
1519 TLI.getSetCCResultType(AndOp.getValueType()),
1520 AndOp, DAG.getConstant(0, TLI.getPointerTy()),
1523 CurMBB->addSuccessor(B.TargetBB);
1524 CurMBB->addSuccessor(NextMBB);
1526 SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1527 MVT::Other, getControlRoot(),
1528 AndCmp, DAG.getBasicBlock(B.TargetBB));
1530 // Set NextBlock to be the MBB immediately after the current one, if any.
1531 // This is used to avoid emitting unnecessary branches to the next block.
1532 MachineBasicBlock *NextBlock = 0;
1533 MachineFunction::iterator BBI = CurMBB;
1534 if (++BBI != CurMBB->getParent()->end())
1537 if (NextMBB == NextBlock)
1540 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrAnd,
1541 DAG.getBasicBlock(NextMBB)));
1544 void SelectionDAGLowering::visitInvoke(InvokeInst &I) {
1545 // Retrieve successors.
1546 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
1547 MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
1549 const Value *Callee(I.getCalledValue());
1550 if (isa<InlineAsm>(Callee))
1553 LowerCallTo(&I, getValue(Callee), false, LandingPad);
1555 // If the value of the invoke is used outside of its defining block, make it
1556 // available as a virtual register.
1557 CopyToExportRegsIfNeeded(&I);
1559 // Update successor info
1560 CurMBB->addSuccessor(Return);
1561 CurMBB->addSuccessor(LandingPad);
1563 // Drop into normal successor.
1564 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
1565 MVT::Other, getControlRoot(),
1566 DAG.getBasicBlock(Return)));
1569 void SelectionDAGLowering::visitUnwind(UnwindInst &I) {
1572 /// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
1573 /// small case ranges).
1574 bool SelectionDAGLowering::handleSmallSwitchRange(CaseRec& CR,
1575 CaseRecVector& WorkList,
1577 MachineBasicBlock* Default) {
1578 Case& BackCase = *(CR.Range.second-1);
1580 // Size is the number of Cases represented by this range.
1581 size_t Size = CR.Range.second - CR.Range.first;
1585 // Get the MachineFunction which holds the current MBB. This is used when
1586 // inserting any additional MBBs necessary to represent the switch.
1587 MachineFunction *CurMF = CurMBB->getParent();
1589 // Figure out which block is immediately after the current one.
1590 MachineBasicBlock *NextBlock = 0;
1591 MachineFunction::iterator BBI = CR.CaseBB;
1593 if (++BBI != CurMBB->getParent()->end())
1596 // TODO: If any two of the cases has the same destination, and if one value
1597 // is the same as the other, but has one bit unset that the other has set,
1598 // use bit manipulation to do two compares at once. For example:
1599 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
1601 // Rearrange the case blocks so that the last one falls through if possible.
1602 if (NextBlock && Default != NextBlock && BackCase.BB != NextBlock) {
1603 // The last case block won't fall through into 'NextBlock' if we emit the
1604 // branches in this order. See if rearranging a case value would help.
1605 for (CaseItr I = CR.Range.first, E = CR.Range.second-1; I != E; ++I) {
1606 if (I->BB == NextBlock) {
1607 std::swap(*I, BackCase);
1613 // Create a CaseBlock record representing a conditional branch to
1614 // the Case's target mbb if the value being switched on SV is equal
1616 MachineBasicBlock *CurBlock = CR.CaseBB;
1617 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
1618 MachineBasicBlock *FallThrough;
1620 FallThrough = CurMF->CreateMachineBasicBlock(CurBlock->getBasicBlock());
1621 CurMF->insert(BBI, FallThrough);
1623 // Put SV in a virtual register to make it available from the new blocks.
1624 ExportFromCurrentBlock(SV);
1626 // If the last case doesn't match, go to the default block.
1627 FallThrough = Default;
1630 Value *RHS, *LHS, *MHS;
1632 if (I->High == I->Low) {
1633 // This is just small small case range :) containing exactly 1 case
1635 LHS = SV; RHS = I->High; MHS = NULL;
1638 LHS = I->Low; MHS = SV; RHS = I->High;
1640 CaseBlock CB(CC, LHS, RHS, MHS, I->BB, FallThrough, CurBlock);
1642 // If emitting the first comparison, just call visitSwitchCase to emit the
1643 // code into the current block. Otherwise, push the CaseBlock onto the
1644 // vector to be later processed by SDISel, and insert the node's MBB
1645 // before the next MBB.
1646 if (CurBlock == CurMBB)
1647 visitSwitchCase(CB);
1649 SwitchCases.push_back(CB);
1651 CurBlock = FallThrough;
1657 static inline bool areJTsAllowed(const TargetLowering &TLI) {
1658 return !DisableJumpTables &&
1659 (TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1660 TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
1663 static APInt ComputeRange(const APInt &First, const APInt &Last) {
1664 APInt LastExt(Last), FirstExt(First);
1665 uint32_t BitWidth = std::max(Last.getBitWidth(), First.getBitWidth()) + 1;
1666 LastExt.sext(BitWidth); FirstExt.sext(BitWidth);
1667 return (LastExt - FirstExt + 1ULL);
1670 /// handleJTSwitchCase - Emit jumptable for current switch case range
1671 bool SelectionDAGLowering::handleJTSwitchCase(CaseRec& CR,
1672 CaseRecVector& WorkList,
1674 MachineBasicBlock* Default) {
1675 Case& FrontCase = *CR.Range.first;
1676 Case& BackCase = *(CR.Range.second-1);
1678 const APInt& First = cast<ConstantInt>(FrontCase.Low)->getValue();
1679 const APInt& Last = cast<ConstantInt>(BackCase.High)->getValue();
1682 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1686 if (!areJTsAllowed(TLI) || TSize <= 3)
1689 APInt Range = ComputeRange(First, Last);
1690 double Density = (double)TSize / Range.roundToDouble();
1694 DEBUG(errs() << "Lowering jump table\n"
1695 << "First entry: " << First << ". Last entry: " << Last << '\n'
1696 << "Range: " << Range
1697 << "Size: " << TSize << ". Density: " << Density << "\n\n");
1699 // Get the MachineFunction which holds the current MBB. This is used when
1700 // inserting any additional MBBs necessary to represent the switch.
1701 MachineFunction *CurMF = CurMBB->getParent();
1703 // Figure out which block is immediately after the current one.
1704 MachineBasicBlock *NextBlock = 0;
1705 MachineFunction::iterator BBI = CR.CaseBB;
1707 if (++BBI != CurMBB->getParent()->end())
1710 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1712 // Create a new basic block to hold the code for loading the address
1713 // of the jump table, and jumping to it. Update successor information;
1714 // we will either branch to the default case for the switch, or the jump
1716 MachineBasicBlock *JumpTableBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1717 CurMF->insert(BBI, JumpTableBB);
1718 CR.CaseBB->addSuccessor(Default);
1719 CR.CaseBB->addSuccessor(JumpTableBB);
1721 // Build a vector of destination BBs, corresponding to each target
1722 // of the jump table. If the value of the jump table slot corresponds to
1723 // a case statement, push the case's BB onto the vector, otherwise, push
1725 std::vector<MachineBasicBlock*> DestBBs;
1727 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++TEI) {
1728 const APInt& Low = cast<ConstantInt>(I->Low)->getValue();
1729 const APInt& High = cast<ConstantInt>(I->High)->getValue();
1731 if (Low.sle(TEI) && TEI.sle(High)) {
1732 DestBBs.push_back(I->BB);
1736 DestBBs.push_back(Default);
1740 // Update successor info. Add one edge to each unique successor.
1741 BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs());
1742 for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
1743 E = DestBBs.end(); I != E; ++I) {
1744 if (!SuccsHandled[(*I)->getNumber()]) {
1745 SuccsHandled[(*I)->getNumber()] = true;
1746 JumpTableBB->addSuccessor(*I);
1750 // Create a jump table index for this jump table, or return an existing
1752 unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs);
1754 // Set the jump table information so that we can codegen it as a second
1755 // MachineBasicBlock
1756 JumpTable JT(-1U, JTI, JumpTableBB, Default);
1757 JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == CurMBB));
1758 if (CR.CaseBB == CurMBB)
1759 visitJumpTableHeader(JT, JTH);
1761 JTCases.push_back(JumpTableBlock(JTH, JT));
1766 /// handleBTSplitSwitchCase - emit comparison and split binary search tree into
1768 bool SelectionDAGLowering::handleBTSplitSwitchCase(CaseRec& CR,
1769 CaseRecVector& WorkList,
1771 MachineBasicBlock* Default) {
1772 // Get the MachineFunction which holds the current MBB. This is used when
1773 // inserting any additional MBBs necessary to represent the switch.
1774 MachineFunction *CurMF = CurMBB->getParent();
1776 // Figure out which block is immediately after the current one.
1777 MachineBasicBlock *NextBlock = 0;
1778 MachineFunction::iterator BBI = CR.CaseBB;
1780 if (++BBI != CurMBB->getParent()->end())
1783 Case& FrontCase = *CR.Range.first;
1784 Case& BackCase = *(CR.Range.second-1);
1785 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1787 // Size is the number of Cases represented by this range.
1788 unsigned Size = CR.Range.second - CR.Range.first;
1790 const APInt& First = cast<ConstantInt>(FrontCase.Low)->getValue();
1791 const APInt& Last = cast<ConstantInt>(BackCase.High)->getValue();
1793 CaseItr Pivot = CR.Range.first + Size/2;
1795 // Select optimal pivot, maximizing sum density of LHS and RHS. This will
1796 // (heuristically) allow us to emit JumpTable's later.
1798 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1802 size_t LSize = FrontCase.size();
1803 size_t RSize = TSize-LSize;
1804 DEBUG(errs() << "Selecting best pivot: \n"
1805 << "First: " << First << ", Last: " << Last <<'\n'
1806 << "LSize: " << LSize << ", RSize: " << RSize << '\n');
1807 for (CaseItr I = CR.Range.first, J=I+1, E = CR.Range.second;
1809 const APInt& LEnd = cast<ConstantInt>(I->High)->getValue();
1810 const APInt& RBegin = cast<ConstantInt>(J->Low)->getValue();
1811 APInt Range = ComputeRange(LEnd, RBegin);
1812 assert((Range - 2ULL).isNonNegative() &&
1813 "Invalid case distance");
1814 double LDensity = (double)LSize / (LEnd - First + 1ULL).roundToDouble();
1815 double RDensity = (double)RSize / (Last - RBegin + 1ULL).roundToDouble();
1816 double Metric = Range.logBase2()*(LDensity+RDensity);
1817 // Should always split in some non-trivial place
1818 DEBUG(errs() <<"=>Step\n"
1819 << "LEnd: " << LEnd << ", RBegin: " << RBegin << '\n'
1820 << "LDensity: " << LDensity
1821 << ", RDensity: " << RDensity << '\n'
1822 << "Metric: " << Metric << '\n');
1823 if (FMetric < Metric) {
1826 DEBUG(errs() << "Current metric set to: " << FMetric << '\n');
1832 if (areJTsAllowed(TLI)) {
1833 // If our case is dense we *really* should handle it earlier!
1834 assert((FMetric > 0) && "Should handle dense range earlier!");
1836 Pivot = CR.Range.first + Size/2;
1839 CaseRange LHSR(CR.Range.first, Pivot);
1840 CaseRange RHSR(Pivot, CR.Range.second);
1841 Constant *C = Pivot->Low;
1842 MachineBasicBlock *FalseBB = 0, *TrueBB = 0;
1844 // We know that we branch to the LHS if the Value being switched on is
1845 // less than the Pivot value, C. We use this to optimize our binary
1846 // tree a bit, by recognizing that if SV is greater than or equal to the
1847 // LHS's Case Value, and that Case Value is exactly one less than the
1848 // Pivot's Value, then we can branch directly to the LHS's Target,
1849 // rather than creating a leaf node for it.
1850 if ((LHSR.second - LHSR.first) == 1 &&
1851 LHSR.first->High == CR.GE &&
1852 cast<ConstantInt>(C)->getValue() ==
1853 (cast<ConstantInt>(CR.GE)->getValue() + 1LL)) {
1854 TrueBB = LHSR.first->BB;
1856 TrueBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1857 CurMF->insert(BBI, TrueBB);
1858 WorkList.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
1860 // Put SV in a virtual register to make it available from the new blocks.
1861 ExportFromCurrentBlock(SV);
1864 // Similar to the optimization above, if the Value being switched on is
1865 // known to be less than the Constant CR.LT, and the current Case Value
1866 // is CR.LT - 1, then we can branch directly to the target block for
1867 // the current Case Value, rather than emitting a RHS leaf node for it.
1868 if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
1869 cast<ConstantInt>(RHSR.first->Low)->getValue() ==
1870 (cast<ConstantInt>(CR.LT)->getValue() - 1LL)) {
1871 FalseBB = RHSR.first->BB;
1873 FalseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1874 CurMF->insert(BBI, FalseBB);
1875 WorkList.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
1877 // Put SV in a virtual register to make it available from the new blocks.
1878 ExportFromCurrentBlock(SV);
1881 // Create a CaseBlock record representing a conditional branch to
1882 // the LHS node if the value being switched on SV is less than C.
1883 // Otherwise, branch to LHS.
1884 CaseBlock CB(ISD::SETLT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB);
1886 if (CR.CaseBB == CurMBB)
1887 visitSwitchCase(CB);
1889 SwitchCases.push_back(CB);
1894 /// handleBitTestsSwitchCase - if current case range has few destination and
1895 /// range span less, than machine word bitwidth, encode case range into series
1896 /// of masks and emit bit tests with these masks.
1897 bool SelectionDAGLowering::handleBitTestsSwitchCase(CaseRec& CR,
1898 CaseRecVector& WorkList,
1900 MachineBasicBlock* Default){
1901 unsigned IntPtrBits = TLI.getPointerTy().getSizeInBits();
1903 Case& FrontCase = *CR.Range.first;
1904 Case& BackCase = *(CR.Range.second-1);
1906 // Get the MachineFunction which holds the current MBB. This is used when
1907 // inserting any additional MBBs necessary to represent the switch.
1908 MachineFunction *CurMF = CurMBB->getParent();
1910 // If target does not have legal shift left, do not emit bit tests at all.
1911 if (!TLI.isOperationLegal(ISD::SHL, TLI.getPointerTy()))
1915 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1917 // Single case counts one, case range - two.
1918 numCmps += (I->Low == I->High ? 1 : 2);
1921 // Count unique destinations
1922 SmallSet<MachineBasicBlock*, 4> Dests;
1923 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1924 Dests.insert(I->BB);
1925 if (Dests.size() > 3)
1926 // Don't bother the code below, if there are too much unique destinations
1929 DEBUG(errs() << "Total number of unique destinations: " << Dests.size() << '\n'
1930 << "Total number of comparisons: " << numCmps << '\n');
1932 // Compute span of values.
1933 const APInt& minValue = cast<ConstantInt>(FrontCase.Low)->getValue();
1934 const APInt& maxValue = cast<ConstantInt>(BackCase.High)->getValue();
1935 APInt cmpRange = maxValue - minValue;
1937 DEBUG(errs() << "Compare range: " << cmpRange << '\n'
1938 << "Low bound: " << minValue << '\n'
1939 << "High bound: " << maxValue << '\n');
1941 if (cmpRange.uge(APInt(cmpRange.getBitWidth(), IntPtrBits)) ||
1942 (!(Dests.size() == 1 && numCmps >= 3) &&
1943 !(Dests.size() == 2 && numCmps >= 5) &&
1944 !(Dests.size() >= 3 && numCmps >= 6)))
1947 DEBUG(errs() << "Emitting bit tests\n");
1948 APInt lowBound = APInt::getNullValue(cmpRange.getBitWidth());
1950 // Optimize the case where all the case values fit in a
1951 // word without having to subtract minValue. In this case,
1952 // we can optimize away the subtraction.
1953 if (minValue.isNonNegative() &&
1954 maxValue.slt(APInt(maxValue.getBitWidth(), IntPtrBits))) {
1955 cmpRange = maxValue;
1957 lowBound = minValue;
1960 CaseBitsVector CasesBits;
1961 unsigned i, count = 0;
1963 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1964 MachineBasicBlock* Dest = I->BB;
1965 for (i = 0; i < count; ++i)
1966 if (Dest == CasesBits[i].BB)
1970 assert((count < 3) && "Too much destinations to test!");
1971 CasesBits.push_back(CaseBits(0, Dest, 0));
1975 const APInt& lowValue = cast<ConstantInt>(I->Low)->getValue();
1976 const APInt& highValue = cast<ConstantInt>(I->High)->getValue();
1978 uint64_t lo = (lowValue - lowBound).getZExtValue();
1979 uint64_t hi = (highValue - lowBound).getZExtValue();
1981 for (uint64_t j = lo; j <= hi; j++) {
1982 CasesBits[i].Mask |= 1ULL << j;
1983 CasesBits[i].Bits++;
1987 std::sort(CasesBits.begin(), CasesBits.end(), CaseBitsCmp());
1991 // Figure out which block is immediately after the current one.
1992 MachineFunction::iterator BBI = CR.CaseBB;
1995 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1997 DEBUG(errs() << "Cases:\n");
1998 for (unsigned i = 0, e = CasesBits.size(); i!=e; ++i) {
1999 DEBUG(errs() << "Mask: " << CasesBits[i].Mask
2000 << ", Bits: " << CasesBits[i].Bits
2001 << ", BB: " << CasesBits[i].BB << '\n');
2003 MachineBasicBlock *CaseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
2004 CurMF->insert(BBI, CaseBB);
2005 BTC.push_back(BitTestCase(CasesBits[i].Mask,
2009 // Put SV in a virtual register to make it available from the new blocks.
2010 ExportFromCurrentBlock(SV);
2013 BitTestBlock BTB(lowBound, cmpRange, SV,
2014 -1U, (CR.CaseBB == CurMBB),
2015 CR.CaseBB, Default, BTC);
2017 if (CR.CaseBB == CurMBB)
2018 visitBitTestHeader(BTB);
2020 BitTestCases.push_back(BTB);
2026 /// Clusterify - Transform simple list of Cases into list of CaseRange's
2027 size_t SelectionDAGLowering::Clusterify(CaseVector& Cases,
2028 const SwitchInst& SI) {
2031 // Start with "simple" cases
2032 for (size_t i = 1; i < SI.getNumSuccessors(); ++i) {
2033 MachineBasicBlock *SMBB = FuncInfo.MBBMap[SI.getSuccessor(i)];
2034 Cases.push_back(Case(SI.getSuccessorValue(i),
2035 SI.getSuccessorValue(i),
2038 std::sort(Cases.begin(), Cases.end(), CaseCmp());
2040 // Merge case into clusters
2041 if (Cases.size() >= 2)
2042 // Must recompute end() each iteration because it may be
2043 // invalidated by erase if we hold on to it
2044 for (CaseItr I = Cases.begin(), J = ++(Cases.begin()); J != Cases.end(); ) {
2045 const APInt& nextValue = cast<ConstantInt>(J->Low)->getValue();
2046 const APInt& currentValue = cast<ConstantInt>(I->High)->getValue();
2047 MachineBasicBlock* nextBB = J->BB;
2048 MachineBasicBlock* currentBB = I->BB;
2050 // If the two neighboring cases go to the same destination, merge them
2051 // into a single case.
2052 if ((nextValue - currentValue == 1) && (currentBB == nextBB)) {
2060 for (CaseItr I=Cases.begin(), E=Cases.end(); I!=E; ++I, ++numCmps) {
2061 if (I->Low != I->High)
2062 // A range counts double, since it requires two compares.
2069 void SelectionDAGLowering::visitSwitch(SwitchInst &SI) {
2070 // Figure out which block is immediately after the current one.
2071 MachineBasicBlock *NextBlock = 0;
2072 MachineFunction::iterator BBI = CurMBB;
2074 MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
2076 // If there is only the default destination, branch to it if it is not the
2077 // next basic block. Otherwise, just fall through.
2078 if (SI.getNumOperands() == 2) {
2079 // Update machine-CFG edges.
2081 // If this is not a fall-through branch, emit the branch.
2082 CurMBB->addSuccessor(Default);
2083 if (Default != NextBlock)
2084 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
2085 MVT::Other, getControlRoot(),
2086 DAG.getBasicBlock(Default)));
2090 // If there are any non-default case statements, create a vector of Cases
2091 // representing each one, and sort the vector so that we can efficiently
2092 // create a binary search tree from them.
2094 size_t numCmps = Clusterify(Cases, SI);
2095 DEBUG(errs() << "Clusterify finished. Total clusters: " << Cases.size()
2096 << ". Total compares: " << numCmps << '\n');
2099 // Get the Value to be switched on and default basic blocks, which will be
2100 // inserted into CaseBlock records, representing basic blocks in the binary
2102 Value *SV = SI.getOperand(0);
2104 // Push the initial CaseRec onto the worklist
2105 CaseRecVector WorkList;
2106 WorkList.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
2108 while (!WorkList.empty()) {
2109 // Grab a record representing a case range to process off the worklist
2110 CaseRec CR = WorkList.back();
2111 WorkList.pop_back();
2113 if (handleBitTestsSwitchCase(CR, WorkList, SV, Default))
2116 // If the range has few cases (two or less) emit a series of specific
2118 if (handleSmallSwitchRange(CR, WorkList, SV, Default))
2121 // If the switch has more than 5 blocks, and at least 40% dense, and the
2122 // target supports indirect branches, then emit a jump table rather than
2123 // lowering the switch to a binary tree of conditional branches.
2124 if (handleJTSwitchCase(CR, WorkList, SV, Default))
2127 // Emit binary tree. We need to pick a pivot, and push left and right ranges
2128 // onto the worklist. Leafs are handled via handleSmallSwitchRange() call.
2129 handleBTSplitSwitchCase(CR, WorkList, SV, Default);
2134 void SelectionDAGLowering::visitFSub(User &I) {
2135 // -0.0 - X --> fneg
2136 const Type *Ty = I.getType();
2137 if (isa<VectorType>(Ty)) {
2138 if (ConstantVector *CV = dyn_cast<ConstantVector>(I.getOperand(0))) {
2139 const VectorType *DestTy = cast<VectorType>(I.getType());
2140 const Type *ElTy = DestTy->getElementType();
2141 unsigned VL = DestTy->getNumElements();
2142 std::vector<Constant*> NZ(VL, ConstantFP::getNegativeZero(ElTy));
2143 Constant *CNZ = ConstantVector::get(&NZ[0], NZ.size());
2145 SDValue Op2 = getValue(I.getOperand(1));
2146 setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2147 Op2.getValueType(), Op2));
2152 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
2153 if (CFP->isExactlyValue(ConstantFP::getNegativeZero(Ty)->getValueAPF())) {
2154 SDValue Op2 = getValue(I.getOperand(1));
2155 setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2156 Op2.getValueType(), Op2));
2160 visitBinary(I, ISD::FSUB);
2163 void SelectionDAGLowering::visitBinary(User &I, unsigned OpCode) {
2164 SDValue Op1 = getValue(I.getOperand(0));
2165 SDValue Op2 = getValue(I.getOperand(1));
2167 setValue(&I, DAG.getNode(OpCode, getCurDebugLoc(),
2168 Op1.getValueType(), Op1, Op2));
2171 void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
2172 SDValue Op1 = getValue(I.getOperand(0));
2173 SDValue Op2 = getValue(I.getOperand(1));
2174 if (!isa<VectorType>(I.getType()) &&
2175 Op2.getValueType() != TLI.getShiftAmountTy()) {
2176 // If the operand is smaller than the shift count type, promote it.
2177 if (TLI.getShiftAmountTy().bitsGT(Op2.getValueType()))
2178 Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2179 TLI.getShiftAmountTy(), Op2);
2180 // If the operand is larger than the shift count type but the shift
2181 // count type has enough bits to represent any shift value, truncate
2182 // it now. This is a common case and it exposes the truncate to
2183 // optimization early.
2184 else if (TLI.getShiftAmountTy().getSizeInBits() >=
2185 Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
2186 Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2187 TLI.getShiftAmountTy(), Op2);
2188 // Otherwise we'll need to temporarily settle for some other
2189 // convenient type; type legalization will make adjustments as
2191 else if (TLI.getPointerTy().bitsLT(Op2.getValueType()))
2192 Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2193 TLI.getPointerTy(), Op2);
2194 else if (TLI.getPointerTy().bitsGT(Op2.getValueType()))
2195 Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2196 TLI.getPointerTy(), Op2);
2199 setValue(&I, DAG.getNode(Opcode, getCurDebugLoc(),
2200 Op1.getValueType(), Op1, Op2));
2203 void SelectionDAGLowering::visitICmp(User &I) {
2204 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2205 if (ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2206 predicate = IC->getPredicate();
2207 else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2208 predicate = ICmpInst::Predicate(IC->getPredicate());
2209 SDValue Op1 = getValue(I.getOperand(0));
2210 SDValue Op2 = getValue(I.getOperand(1));
2211 ISD::CondCode Opcode = getICmpCondCode(predicate);
2213 MVT DestVT = TLI.getValueType(I.getType());
2214 setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Opcode));
2217 void SelectionDAGLowering::visitFCmp(User &I) {
2218 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2219 if (FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2220 predicate = FC->getPredicate();
2221 else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2222 predicate = FCmpInst::Predicate(FC->getPredicate());
2223 SDValue Op1 = getValue(I.getOperand(0));
2224 SDValue Op2 = getValue(I.getOperand(1));
2225 ISD::CondCode Condition = getFCmpCondCode(predicate);
2226 MVT DestVT = TLI.getValueType(I.getType());
2227 setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Condition));
2230 void SelectionDAGLowering::visitSelect(User &I) {
2231 SmallVector<MVT, 4> ValueVTs;
2232 ComputeValueVTs(TLI, I.getType(), ValueVTs);
2233 unsigned NumValues = ValueVTs.size();
2234 if (NumValues != 0) {
2235 SmallVector<SDValue, 4> Values(NumValues);
2236 SDValue Cond = getValue(I.getOperand(0));
2237 SDValue TrueVal = getValue(I.getOperand(1));
2238 SDValue FalseVal = getValue(I.getOperand(2));
2240 for (unsigned i = 0; i != NumValues; ++i)
2241 Values[i] = DAG.getNode(ISD::SELECT, getCurDebugLoc(),
2242 TrueVal.getValueType(), Cond,
2243 SDValue(TrueVal.getNode(), TrueVal.getResNo() + i),
2244 SDValue(FalseVal.getNode(), FalseVal.getResNo() + i));
2246 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2247 DAG.getVTList(&ValueVTs[0], NumValues),
2248 &Values[0], NumValues));
2253 void SelectionDAGLowering::visitTrunc(User &I) {
2254 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
2255 SDValue N = getValue(I.getOperand(0));
2256 MVT DestVT = TLI.getValueType(I.getType());
2257 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
2260 void SelectionDAGLowering::visitZExt(User &I) {
2261 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2262 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
2263 SDValue N = getValue(I.getOperand(0));
2264 MVT DestVT = TLI.getValueType(I.getType());
2265 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N));
2268 void SelectionDAGLowering::visitSExt(User &I) {
2269 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2270 // SExt also can't be a cast to bool for same reason. So, nothing much to do
2271 SDValue N = getValue(I.getOperand(0));
2272 MVT DestVT = TLI.getValueType(I.getType());
2273 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(), DestVT, N));
2276 void SelectionDAGLowering::visitFPTrunc(User &I) {
2277 // FPTrunc is never a no-op cast, no need to check
2278 SDValue N = getValue(I.getOperand(0));
2279 MVT DestVT = TLI.getValueType(I.getType());
2280 setValue(&I, DAG.getNode(ISD::FP_ROUND, getCurDebugLoc(),
2281 DestVT, N, DAG.getIntPtrConstant(0)));
2284 void SelectionDAGLowering::visitFPExt(User &I){
2285 // FPTrunc is never a no-op cast, no need to check
2286 SDValue N = getValue(I.getOperand(0));
2287 MVT DestVT = TLI.getValueType(I.getType());
2288 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurDebugLoc(), DestVT, N));
2291 void SelectionDAGLowering::visitFPToUI(User &I) {
2292 // FPToUI is never a no-op cast, no need to check
2293 SDValue N = getValue(I.getOperand(0));
2294 MVT DestVT = TLI.getValueType(I.getType());
2295 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurDebugLoc(), DestVT, N));
2298 void SelectionDAGLowering::visitFPToSI(User &I) {
2299 // FPToSI is never a no-op cast, no need to check
2300 SDValue N = getValue(I.getOperand(0));
2301 MVT DestVT = TLI.getValueType(I.getType());
2302 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurDebugLoc(), DestVT, N));
2305 void SelectionDAGLowering::visitUIToFP(User &I) {
2306 // UIToFP is never a no-op cast, no need to check
2307 SDValue N = getValue(I.getOperand(0));
2308 MVT DestVT = TLI.getValueType(I.getType());
2309 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurDebugLoc(), DestVT, N));
2312 void SelectionDAGLowering::visitSIToFP(User &I){
2313 // SIToFP is never a no-op cast, no need to check
2314 SDValue N = getValue(I.getOperand(0));
2315 MVT DestVT = TLI.getValueType(I.getType());
2316 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurDebugLoc(), DestVT, N));
2319 void SelectionDAGLowering::visitPtrToInt(User &I) {
2320 // What to do depends on the size of the integer and the size of the pointer.
2321 // We can either truncate, zero extend, or no-op, accordingly.
2322 SDValue N = getValue(I.getOperand(0));
2323 MVT SrcVT = N.getValueType();
2324 MVT DestVT = TLI.getValueType(I.getType());
2326 if (DestVT.bitsLT(SrcVT))
2327 Result = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N);
2329 // Note: ZERO_EXTEND can handle cases where the sizes are equal too
2330 Result = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N);
2331 setValue(&I, Result);
2334 void SelectionDAGLowering::visitIntToPtr(User &I) {
2335 // What to do depends on the size of the integer and the size of the pointer.
2336 // We can either truncate, zero extend, or no-op, accordingly.
2337 SDValue N = getValue(I.getOperand(0));
2338 MVT SrcVT = N.getValueType();
2339 MVT DestVT = TLI.getValueType(I.getType());
2340 if (DestVT.bitsLT(SrcVT))
2341 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
2343 // Note: ZERO_EXTEND can handle cases where the sizes are equal too
2344 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2348 void SelectionDAGLowering::visitBitCast(User &I) {
2349 SDValue N = getValue(I.getOperand(0));
2350 MVT DestVT = TLI.getValueType(I.getType());
2352 // BitCast assures us that source and destination are the same size so this
2353 // is either a BIT_CONVERT or a no-op.
2354 if (DestVT != N.getValueType())
2355 setValue(&I, DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
2356 DestVT, N)); // convert types
2358 setValue(&I, N); // noop cast.
2361 void SelectionDAGLowering::visitInsertElement(User &I) {
2362 SDValue InVec = getValue(I.getOperand(0));
2363 SDValue InVal = getValue(I.getOperand(1));
2364 SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2366 getValue(I.getOperand(2)));
2368 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurDebugLoc(),
2369 TLI.getValueType(I.getType()),
2370 InVec, InVal, InIdx));
2373 void SelectionDAGLowering::visitExtractElement(User &I) {
2374 SDValue InVec = getValue(I.getOperand(0));
2375 SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2377 getValue(I.getOperand(1)));
2378 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2379 TLI.getValueType(I.getType()), InVec, InIdx));
2383 // Utility for visitShuffleVector - Returns true if the mask is mask starting
2384 // from SIndx and increasing to the element length (undefs are allowed).
2385 static bool SequentialMask(SmallVectorImpl<int> &Mask, unsigned SIndx) {
2386 unsigned MaskNumElts = Mask.size();
2387 for (unsigned i = 0; i != MaskNumElts; ++i)
2388 if ((Mask[i] >= 0) && (Mask[i] != (int)(i + SIndx)))
2393 void SelectionDAGLowering::visitShuffleVector(User &I) {
2394 SmallVector<int, 8> Mask;
2395 SDValue Src1 = getValue(I.getOperand(0));
2396 SDValue Src2 = getValue(I.getOperand(1));
2398 // Convert the ConstantVector mask operand into an array of ints, with -1
2399 // representing undef values.
2400 SmallVector<Constant*, 8> MaskElts;
2401 cast<Constant>(I.getOperand(2))->getVectorElements(MaskElts);
2402 unsigned MaskNumElts = MaskElts.size();
2403 for (unsigned i = 0; i != MaskNumElts; ++i) {
2404 if (isa<UndefValue>(MaskElts[i]))
2407 Mask.push_back(cast<ConstantInt>(MaskElts[i])->getSExtValue());
2410 MVT VT = TLI.getValueType(I.getType());
2411 MVT SrcVT = Src1.getValueType();
2412 unsigned SrcNumElts = SrcVT.getVectorNumElements();
2414 if (SrcNumElts == MaskNumElts) {
2415 setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2420 // Normalize the shuffle vector since mask and vector length don't match.
2421 if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) {
2422 // Mask is longer than the source vectors and is a multiple of the source
2423 // vectors. We can use concatenate vector to make the mask and vectors
2425 if (SrcNumElts*2 == MaskNumElts && SequentialMask(Mask, 0)) {
2426 // The shuffle is concatenating two vectors together.
2427 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(),
2432 // Pad both vectors with undefs to make them the same length as the mask.
2433 unsigned NumConcat = MaskNumElts / SrcNumElts;
2434 bool Src1U = Src1.getOpcode() == ISD::UNDEF;
2435 bool Src2U = Src2.getOpcode() == ISD::UNDEF;
2436 SDValue UndefVal = DAG.getUNDEF(SrcVT);
2438 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
2439 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
2443 Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2444 getCurDebugLoc(), VT,
2445 &MOps1[0], NumConcat);
2446 Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2447 getCurDebugLoc(), VT,
2448 &MOps2[0], NumConcat);
2450 // Readjust mask for new input vector length.
2451 SmallVector<int, 8> MappedOps;
2452 for (unsigned i = 0; i != MaskNumElts; ++i) {
2454 if (Idx < (int)SrcNumElts)
2455 MappedOps.push_back(Idx);
2457 MappedOps.push_back(Idx + MaskNumElts - SrcNumElts);
2459 setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2464 if (SrcNumElts > MaskNumElts) {
2465 // Analyze the access pattern of the vector to see if we can extract
2466 // two subvectors and do the shuffle. The analysis is done by calculating
2467 // the range of elements the mask access on both vectors.
2468 int MinRange[2] = { SrcNumElts+1, SrcNumElts+1};
2469 int MaxRange[2] = {-1, -1};
2471 for (unsigned i = 0; i != MaskNumElts; ++i) {
2477 if (Idx >= (int)SrcNumElts) {
2481 if (Idx > MaxRange[Input])
2482 MaxRange[Input] = Idx;
2483 if (Idx < MinRange[Input])
2484 MinRange[Input] = Idx;
2487 // Check if the access is smaller than the vector size and can we find
2488 // a reasonable extract index.
2489 int RangeUse[2] = { 2, 2 }; // 0 = Unused, 1 = Extract, 2 = Can not Extract.
2490 int StartIdx[2]; // StartIdx to extract from
2491 for (int Input=0; Input < 2; ++Input) {
2492 if (MinRange[Input] == (int)(SrcNumElts+1) && MaxRange[Input] == -1) {
2493 RangeUse[Input] = 0; // Unused
2494 StartIdx[Input] = 0;
2495 } else if (MaxRange[Input] - MinRange[Input] < (int)MaskNumElts) {
2496 // Fits within range but we should see if we can find a good
2497 // start index that is a multiple of the mask length.
2498 if (MaxRange[Input] < (int)MaskNumElts) {
2499 RangeUse[Input] = 1; // Extract from beginning of the vector
2500 StartIdx[Input] = 0;
2502 StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
2503 if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
2504 StartIdx[Input] + MaskNumElts < SrcNumElts)
2505 RangeUse[Input] = 1; // Extract from a multiple of the mask length.
2510 if (RangeUse[0] == 0 && RangeUse[0] == 0) {
2511 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
2514 else if (RangeUse[0] < 2 && RangeUse[1] < 2) {
2515 // Extract appropriate subvector and generate a vector shuffle
2516 for (int Input=0; Input < 2; ++Input) {
2517 SDValue& Src = Input == 0 ? Src1 : Src2;
2518 if (RangeUse[Input] == 0) {
2519 Src = DAG.getUNDEF(VT);
2521 Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, getCurDebugLoc(), VT,
2522 Src, DAG.getIntPtrConstant(StartIdx[Input]));
2525 // Calculate new mask.
2526 SmallVector<int, 8> MappedOps;
2527 for (unsigned i = 0; i != MaskNumElts; ++i) {
2530 MappedOps.push_back(Idx);
2531 else if (Idx < (int)SrcNumElts)
2532 MappedOps.push_back(Idx - StartIdx[0]);
2534 MappedOps.push_back(Idx - SrcNumElts - StartIdx[1] + MaskNumElts);
2536 setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2542 // We can't use either concat vectors or extract subvectors so fall back to
2543 // replacing the shuffle with extract and build vector.
2544 // to insert and build vector.
2545 MVT EltVT = VT.getVectorElementType();
2546 MVT PtrVT = TLI.getPointerTy();
2547 SmallVector<SDValue,8> Ops;
2548 for (unsigned i = 0; i != MaskNumElts; ++i) {
2550 Ops.push_back(DAG.getUNDEF(EltVT));
2553 if (Idx < (int)SrcNumElts)
2554 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2555 EltVT, Src1, DAG.getConstant(Idx, PtrVT)));
2557 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2559 DAG.getConstant(Idx - SrcNumElts, PtrVT)));
2562 setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
2563 VT, &Ops[0], Ops.size()));
2566 void SelectionDAGLowering::visitInsertValue(InsertValueInst &I) {
2567 const Value *Op0 = I.getOperand(0);
2568 const Value *Op1 = I.getOperand(1);
2569 const Type *AggTy = I.getType();
2570 const Type *ValTy = Op1->getType();
2571 bool IntoUndef = isa<UndefValue>(Op0);
2572 bool FromUndef = isa<UndefValue>(Op1);
2574 unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2575 I.idx_begin(), I.idx_end());
2577 SmallVector<MVT, 4> AggValueVTs;
2578 ComputeValueVTs(TLI, AggTy, AggValueVTs);
2579 SmallVector<MVT, 4> ValValueVTs;
2580 ComputeValueVTs(TLI, ValTy, ValValueVTs);
2582 unsigned NumAggValues = AggValueVTs.size();
2583 unsigned NumValValues = ValValueVTs.size();
2584 SmallVector<SDValue, 4> Values(NumAggValues);
2586 SDValue Agg = getValue(Op0);
2587 SDValue Val = getValue(Op1);
2589 // Copy the beginning value(s) from the original aggregate.
2590 for (; i != LinearIndex; ++i)
2591 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2592 SDValue(Agg.getNode(), Agg.getResNo() + i);
2593 // Copy values from the inserted value(s).
2594 for (; i != LinearIndex + NumValValues; ++i)
2595 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2596 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
2597 // Copy remaining value(s) from the original aggregate.
2598 for (; i != NumAggValues; ++i)
2599 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2600 SDValue(Agg.getNode(), Agg.getResNo() + i);
2602 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2603 DAG.getVTList(&AggValueVTs[0], NumAggValues),
2604 &Values[0], NumAggValues));
2607 void SelectionDAGLowering::visitExtractValue(ExtractValueInst &I) {
2608 const Value *Op0 = I.getOperand(0);
2609 const Type *AggTy = Op0->getType();
2610 const Type *ValTy = I.getType();
2611 bool OutOfUndef = isa<UndefValue>(Op0);
2613 unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2614 I.idx_begin(), I.idx_end());
2616 SmallVector<MVT, 4> ValValueVTs;
2617 ComputeValueVTs(TLI, ValTy, ValValueVTs);
2619 unsigned NumValValues = ValValueVTs.size();
2620 SmallVector<SDValue, 4> Values(NumValValues);
2622 SDValue Agg = getValue(Op0);
2623 // Copy out the selected value(s).
2624 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
2625 Values[i - LinearIndex] =
2627 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
2628 SDValue(Agg.getNode(), Agg.getResNo() + i);
2630 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2631 DAG.getVTList(&ValValueVTs[0], NumValValues),
2632 &Values[0], NumValValues));
2636 void SelectionDAGLowering::visitGetElementPtr(User &I) {
2637 SDValue N = getValue(I.getOperand(0));
2638 const Type *Ty = I.getOperand(0)->getType();
2640 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
2643 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
2644 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
2647 uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field);
2648 N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2649 DAG.getIntPtrConstant(Offset));
2651 Ty = StTy->getElementType(Field);
2653 Ty = cast<SequentialType>(Ty)->getElementType();
2655 // If this is a constant subscript, handle it quickly.
2656 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
2657 if (CI->getZExtValue() == 0) continue;
2659 TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
2661 unsigned PtrBits = TLI.getPointerTy().getSizeInBits();
2663 OffsVal = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2665 DAG.getConstant(Offs, MVT::i64));
2667 OffsVal = DAG.getIntPtrConstant(Offs);
2668 N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2673 // N = N + Idx * ElementSize;
2674 uint64_t ElementSize = TD->getTypeAllocSize(Ty);
2675 SDValue IdxN = getValue(Idx);
2677 // If the index is smaller or larger than intptr_t, truncate or extend
2679 if (IdxN.getValueType().bitsLT(N.getValueType()))
2680 IdxN = DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(),
2681 N.getValueType(), IdxN);
2682 else if (IdxN.getValueType().bitsGT(N.getValueType()))
2683 IdxN = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2684 N.getValueType(), IdxN);
2686 // If this is a multiply by a power of two, turn it into a shl
2687 // immediately. This is a very common case.
2688 if (ElementSize != 1) {
2689 if (isPowerOf2_64(ElementSize)) {
2690 unsigned Amt = Log2_64(ElementSize);
2691 IdxN = DAG.getNode(ISD::SHL, getCurDebugLoc(),
2692 N.getValueType(), IdxN,
2693 DAG.getConstant(Amt, TLI.getPointerTy()));
2695 SDValue Scale = DAG.getIntPtrConstant(ElementSize);
2696 IdxN = DAG.getNode(ISD::MUL, getCurDebugLoc(),
2697 N.getValueType(), IdxN, Scale);
2701 N = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2702 N.getValueType(), N, IdxN);
2708 void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
2709 // If this is a fixed sized alloca in the entry block of the function,
2710 // allocate it statically on the stack.
2711 if (FuncInfo.StaticAllocaMap.count(&I))
2712 return; // getValue will auto-populate this.
2714 const Type *Ty = I.getAllocatedType();
2715 uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
2717 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
2720 SDValue AllocSize = getValue(I.getArraySize());
2722 AllocSize = DAG.getNode(ISD::MUL, getCurDebugLoc(), AllocSize.getValueType(),
2724 DAG.getConstant(TySize, AllocSize.getValueType()));
2728 MVT IntPtr = TLI.getPointerTy();
2729 if (IntPtr.bitsLT(AllocSize.getValueType()))
2730 AllocSize = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2732 else if (IntPtr.bitsGT(AllocSize.getValueType()))
2733 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2736 // Handle alignment. If the requested alignment is less than or equal to
2737 // the stack alignment, ignore it. If the size is greater than or equal to
2738 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
2739 unsigned StackAlign =
2740 TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
2741 if (Align <= StackAlign)
2744 // Round the size of the allocation up to the stack alignment size
2745 // by add SA-1 to the size.
2746 AllocSize = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2747 AllocSize.getValueType(), AllocSize,
2748 DAG.getIntPtrConstant(StackAlign-1));
2749 // Mask out the low bits for alignment purposes.
2750 AllocSize = DAG.getNode(ISD::AND, getCurDebugLoc(),
2751 AllocSize.getValueType(), AllocSize,
2752 DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1)));
2754 SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) };
2755 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
2756 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, getCurDebugLoc(),
2759 DAG.setRoot(DSA.getValue(1));
2761 // Inform the Frame Information that we have just allocated a variable-sized
2763 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
2766 void SelectionDAGLowering::visitLoad(LoadInst &I) {
2767 const Value *SV = I.getOperand(0);
2768 SDValue Ptr = getValue(SV);
2770 const Type *Ty = I.getType();
2771 bool isVolatile = I.isVolatile();
2772 unsigned Alignment = I.getAlignment();
2774 SmallVector<MVT, 4> ValueVTs;
2775 SmallVector<uint64_t, 4> Offsets;
2776 ComputeValueVTs(TLI, Ty, ValueVTs, &Offsets);
2777 unsigned NumValues = ValueVTs.size();
2782 bool ConstantMemory = false;
2784 // Serialize volatile loads with other side effects.
2786 else if (AA->pointsToConstantMemory(SV)) {
2787 // Do not serialize (non-volatile) loads of constant memory with anything.
2788 Root = DAG.getEntryNode();
2789 ConstantMemory = true;
2791 // Do not serialize non-volatile loads against each other.
2792 Root = DAG.getRoot();
2795 SmallVector<SDValue, 4> Values(NumValues);
2796 SmallVector<SDValue, 4> Chains(NumValues);
2797 MVT PtrVT = Ptr.getValueType();
2798 for (unsigned i = 0; i != NumValues; ++i) {
2799 SDValue L = DAG.getLoad(ValueVTs[i], getCurDebugLoc(), Root,
2800 DAG.getNode(ISD::ADD, getCurDebugLoc(),
2802 DAG.getConstant(Offsets[i], PtrVT)),
2804 isVolatile, Alignment);
2806 Chains[i] = L.getValue(1);
2809 if (!ConstantMemory) {
2810 SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
2812 &Chains[0], NumValues);
2816 PendingLoads.push_back(Chain);
2819 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2820 DAG.getVTList(&ValueVTs[0], NumValues),
2821 &Values[0], NumValues));
2825 void SelectionDAGLowering::visitStore(StoreInst &I) {
2826 Value *SrcV = I.getOperand(0);
2827 Value *PtrV = I.getOperand(1);
2829 SmallVector<MVT, 4> ValueVTs;
2830 SmallVector<uint64_t, 4> Offsets;
2831 ComputeValueVTs(TLI, SrcV->getType(), ValueVTs, &Offsets);
2832 unsigned NumValues = ValueVTs.size();
2836 // Get the lowered operands. Note that we do this after
2837 // checking if NumResults is zero, because with zero results
2838 // the operands won't have values in the map.
2839 SDValue Src = getValue(SrcV);
2840 SDValue Ptr = getValue(PtrV);
2842 SDValue Root = getRoot();
2843 SmallVector<SDValue, 4> Chains(NumValues);
2844 MVT PtrVT = Ptr.getValueType();
2845 bool isVolatile = I.isVolatile();
2846 unsigned Alignment = I.getAlignment();
2847 for (unsigned i = 0; i != NumValues; ++i)
2848 Chains[i] = DAG.getStore(Root, getCurDebugLoc(),
2849 SDValue(Src.getNode(), Src.getResNo() + i),
2850 DAG.getNode(ISD::ADD, getCurDebugLoc(),
2852 DAG.getConstant(Offsets[i], PtrVT)),
2854 isVolatile, Alignment);
2856 DAG.setRoot(DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
2857 MVT::Other, &Chains[0], NumValues));
2860 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
2862 void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I,
2863 unsigned Intrinsic) {
2864 bool HasChain = !I.doesNotAccessMemory();
2865 bool OnlyLoad = HasChain && I.onlyReadsMemory();
2867 // Build the operand list.
2868 SmallVector<SDValue, 8> Ops;
2869 if (HasChain) { // If this intrinsic has side-effects, chainify it.
2871 // We don't need to serialize loads against other loads.
2872 Ops.push_back(DAG.getRoot());
2874 Ops.push_back(getRoot());
2878 // Info is set by getTgtMemInstrinsic
2879 TargetLowering::IntrinsicInfo Info;
2880 bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic);
2882 // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
2883 if (!IsTgtIntrinsic)
2884 Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
2886 // Add all operands of the call to the operand list.
2887 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
2888 SDValue Op = getValue(I.getOperand(i));
2889 assert(TLI.isTypeLegal(Op.getValueType()) &&
2890 "Intrinsic uses a non-legal type?");
2894 std::vector<MVT> VTArray;
2895 if (I.getType() != Type::VoidTy) {
2896 MVT VT = TLI.getValueType(I.getType());
2897 if (VT.isVector()) {
2898 const VectorType *DestTy = cast<VectorType>(I.getType());
2899 MVT EltVT = TLI.getValueType(DestTy->getElementType());
2901 VT = MVT::getVectorVT(EltVT, DestTy->getNumElements());
2902 assert(VT != MVT::Other && "Intrinsic uses a non-legal type?");
2905 assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?");
2906 VTArray.push_back(VT);
2909 VTArray.push_back(MVT::Other);
2911 SDVTList VTs = DAG.getVTList(&VTArray[0], VTArray.size());
2915 if (IsTgtIntrinsic) {
2916 // This is target intrinsic that touches memory
2917 Result = DAG.getMemIntrinsicNode(Info.opc, getCurDebugLoc(),
2918 VTs, &Ops[0], Ops.size(),
2919 Info.memVT, Info.ptrVal, Info.offset,
2920 Info.align, Info.vol,
2921 Info.readMem, Info.writeMem);
2924 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurDebugLoc(),
2925 VTs, &Ops[0], Ops.size());
2926 else if (I.getType() != Type::VoidTy)
2927 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurDebugLoc(),
2928 VTs, &Ops[0], Ops.size());
2930 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurDebugLoc(),
2931 VTs, &Ops[0], Ops.size());
2934 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
2936 PendingLoads.push_back(Chain);
2940 if (I.getType() != Type::VoidTy) {
2941 if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
2942 MVT VT = TLI.getValueType(PTy);
2943 Result = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), VT, Result);
2945 setValue(&I, Result);
2949 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
2950 static GlobalVariable *ExtractTypeInfo(Value *V) {
2951 V = V->stripPointerCasts();
2952 GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
2953 assert ((GV || isa<ConstantPointerNull>(V)) &&
2954 "TypeInfo must be a global variable or NULL");
2960 /// AddCatchInfo - Extract the personality and type infos from an eh.selector
2961 /// call, and add them to the specified machine basic block.
2962 void AddCatchInfo(CallInst &I, MachineModuleInfo *MMI,
2963 MachineBasicBlock *MBB) {
2964 // Inform the MachineModuleInfo of the personality for this landing pad.
2965 ConstantExpr *CE = cast<ConstantExpr>(I.getOperand(2));
2966 assert(CE->getOpcode() == Instruction::BitCast &&
2967 isa<Function>(CE->getOperand(0)) &&
2968 "Personality should be a function");
2969 MMI->addPersonality(MBB, cast<Function>(CE->getOperand(0)));
2971 // Gather all the type infos for this landing pad and pass them along to
2972 // MachineModuleInfo.
2973 std::vector<GlobalVariable *> TyInfo;
2974 unsigned N = I.getNumOperands();
2976 for (unsigned i = N - 1; i > 2; --i) {
2977 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(i))) {
2978 unsigned FilterLength = CI->getZExtValue();
2979 unsigned FirstCatch = i + FilterLength + !FilterLength;
2980 assert (FirstCatch <= N && "Invalid filter length");
2982 if (FirstCatch < N) {
2983 TyInfo.reserve(N - FirstCatch);
2984 for (unsigned j = FirstCatch; j < N; ++j)
2985 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
2986 MMI->addCatchTypeInfo(MBB, TyInfo);
2990 if (!FilterLength) {
2992 MMI->addCleanup(MBB);
2995 TyInfo.reserve(FilterLength - 1);
2996 for (unsigned j = i + 1; j < FirstCatch; ++j)
2997 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
2998 MMI->addFilterTypeInfo(MBB, TyInfo);
3007 TyInfo.reserve(N - 3);
3008 for (unsigned j = 3; j < N; ++j)
3009 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
3010 MMI->addCatchTypeInfo(MBB, TyInfo);
3016 /// GetSignificand - Get the significand and build it into a floating-point
3017 /// number with exponent of 1:
3019 /// Op = (Op & 0x007fffff) | 0x3f800000;
3021 /// where Op is the hexidecimal representation of floating point value.
3023 GetSignificand(SelectionDAG &DAG, SDValue Op, DebugLoc dl) {
3024 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3025 DAG.getConstant(0x007fffff, MVT::i32));
3026 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
3027 DAG.getConstant(0x3f800000, MVT::i32));
3028 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t2);
3031 /// GetExponent - Get the exponent:
3033 /// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
3035 /// where Op is the hexidecimal representation of floating point value.
3037 GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
3039 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3040 DAG.getConstant(0x7f800000, MVT::i32));
3041 SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0,
3042 DAG.getConstant(23, TLI.getPointerTy()));
3043 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
3044 DAG.getConstant(127, MVT::i32));
3045 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
3048 /// getF32Constant - Get 32-bit floating point constant.
3050 getF32Constant(SelectionDAG &DAG, unsigned Flt) {
3051 return DAG.getConstantFP(APFloat(APInt(32, Flt)), MVT::f32);
3054 /// Inlined utility function to implement binary input atomic intrinsics for
3055 /// visitIntrinsicCall: I is a call instruction
3056 /// Op is the associated NodeType for I
3058 SelectionDAGLowering::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) {
3059 SDValue Root = getRoot();
3061 DAG.getAtomic(Op, getCurDebugLoc(),
3062 getValue(I.getOperand(2)).getValueType().getSimpleVT(),
3064 getValue(I.getOperand(1)),
3065 getValue(I.getOperand(2)),
3068 DAG.setRoot(L.getValue(1));
3072 // implVisitAluOverflow - Lower arithmetic overflow instrinsics.
3074 SelectionDAGLowering::implVisitAluOverflow(CallInst &I, ISD::NodeType Op) {
3075 SDValue Op1 = getValue(I.getOperand(1));
3076 SDValue Op2 = getValue(I.getOperand(2));
3078 SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
3079 SDValue Result = DAG.getNode(Op, getCurDebugLoc(), VTs, Op1, Op2);
3081 setValue(&I, Result);
3085 /// visitExp - Lower an exp intrinsic. Handles the special sequences for
3086 /// limited-precision mode.
3088 SelectionDAGLowering::visitExp(CallInst &I) {
3090 DebugLoc dl = getCurDebugLoc();
3092 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3093 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3094 SDValue Op = getValue(I.getOperand(1));
3096 // Put the exponent in the right bit position for later addition to the
3099 // #define LOG2OFe 1.4426950f
3100 // IntegerPartOfX = ((int32_t)(X * LOG2OFe));
3101 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3102 getF32Constant(DAG, 0x3fb8aa3b));
3103 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3105 // FractionalPartOfX = (X * LOG2OFe) - (float)IntegerPartOfX;
3106 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3107 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3109 // IntegerPartOfX <<= 23;
3110 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3111 DAG.getConstant(23, TLI.getPointerTy()));
3113 if (LimitFloatPrecision <= 6) {
3114 // For floating-point precision of 6:
3116 // TwoToFractionalPartOfX =
3118 // (0.735607626f + 0.252464424f * x) * x;
3120 // error 0.0144103317, which is 6 bits
3121 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3122 getF32Constant(DAG, 0x3e814304));
3123 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3124 getF32Constant(DAG, 0x3f3c50c8));
3125 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3126 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3127 getF32Constant(DAG, 0x3f7f5e7e));
3128 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t5);
3130 // Add the exponent into the result in integer domain.
3131 SDValue t6 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3132 TwoToFracPartOfX, IntegerPartOfX);
3134 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t6);
3135 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3136 // For floating-point precision of 12:
3138 // TwoToFractionalPartOfX =
3141 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3143 // 0.000107046256 error, which is 13 to 14 bits
3144 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3145 getF32Constant(DAG, 0x3da235e3));
3146 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3147 getF32Constant(DAG, 0x3e65b8f3));
3148 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3149 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3150 getF32Constant(DAG, 0x3f324b07));
3151 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3152 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3153 getF32Constant(DAG, 0x3f7ff8fd));
3154 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t7);
3156 // Add the exponent into the result in integer domain.
3157 SDValue t8 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3158 TwoToFracPartOfX, IntegerPartOfX);
3160 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t8);
3161 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3162 // For floating-point precision of 18:
3164 // TwoToFractionalPartOfX =
3168 // (0.554906021e-1f +
3169 // (0.961591928e-2f +
3170 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3172 // error 2.47208000*10^(-7), which is better than 18 bits
3173 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3174 getF32Constant(DAG, 0x3924b03e));
3175 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3176 getF32Constant(DAG, 0x3ab24b87));
3177 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3178 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3179 getF32Constant(DAG, 0x3c1d8c17));
3180 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3181 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3182 getF32Constant(DAG, 0x3d634a1d));
3183 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3184 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3185 getF32Constant(DAG, 0x3e75fe14));
3186 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3187 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3188 getF32Constant(DAG, 0x3f317234));
3189 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3190 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3191 getF32Constant(DAG, 0x3f800000));
3192 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,
3195 // Add the exponent into the result in integer domain.
3196 SDValue t14 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3197 TwoToFracPartOfX, IntegerPartOfX);
3199 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t14);
3202 // No special expansion.
3203 result = DAG.getNode(ISD::FEXP, dl,
3204 getValue(I.getOperand(1)).getValueType(),
3205 getValue(I.getOperand(1)));
3208 setValue(&I, result);
3211 /// visitLog - Lower a log intrinsic. Handles the special sequences for
3212 /// limited-precision mode.
3214 SelectionDAGLowering::visitLog(CallInst &I) {
3216 DebugLoc dl = getCurDebugLoc();
3218 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3219 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3220 SDValue Op = getValue(I.getOperand(1));
3221 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3223 // Scale the exponent by log(2) [0.69314718f].
3224 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
3225 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3226 getF32Constant(DAG, 0x3f317218));
3228 // Get the significand and build it into a floating-point number with
3230 SDValue X = GetSignificand(DAG, Op1, dl);
3232 if (LimitFloatPrecision <= 6) {
3233 // For floating-point precision of 6:
3237 // (1.4034025f - 0.23903021f * x) * x;
3239 // error 0.0034276066, which is better than 8 bits
3240 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3241 getF32Constant(DAG, 0xbe74c456));
3242 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3243 getF32Constant(DAG, 0x3fb3a2b1));
3244 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3245 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3246 getF32Constant(DAG, 0x3f949a29));
3248 result = DAG.getNode(ISD::FADD, dl,
3249 MVT::f32, LogOfExponent, LogOfMantissa);
3250 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3251 // For floating-point precision of 12:
3257 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
3259 // error 0.000061011436, which is 14 bits
3260 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3261 getF32Constant(DAG, 0xbd67b6d6));
3262 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3263 getF32Constant(DAG, 0x3ee4f4b8));
3264 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3265 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3266 getF32Constant(DAG, 0x3fbc278b));
3267 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3268 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3269 getF32Constant(DAG, 0x40348e95));
3270 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3271 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3272 getF32Constant(DAG, 0x3fdef31a));
3274 result = DAG.getNode(ISD::FADD, dl,
3275 MVT::f32, LogOfExponent, LogOfMantissa);
3276 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3277 // For floating-point precision of 18:
3285 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
3287 // error 0.0000023660568, which is better than 18 bits
3288 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3289 getF32Constant(DAG, 0xbc91e5ac));
3290 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3291 getF32Constant(DAG, 0x3e4350aa));
3292 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3293 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3294 getF32Constant(DAG, 0x3f60d3e3));
3295 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3296 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3297 getF32Constant(DAG, 0x4011cdf0));
3298 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3299 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3300 getF32Constant(DAG, 0x406cfd1c));
3301 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3302 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3303 getF32Constant(DAG, 0x408797cb));
3304 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3305 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3306 getF32Constant(DAG, 0x4006dcab));
3308 result = DAG.getNode(ISD::FADD, dl,
3309 MVT::f32, LogOfExponent, LogOfMantissa);
3312 // No special expansion.
3313 result = DAG.getNode(ISD::FLOG, dl,
3314 getValue(I.getOperand(1)).getValueType(),
3315 getValue(I.getOperand(1)));
3318 setValue(&I, result);
3321 /// visitLog2 - Lower a log2 intrinsic. Handles the special sequences for
3322 /// limited-precision mode.
3324 SelectionDAGLowering::visitLog2(CallInst &I) {
3326 DebugLoc dl = getCurDebugLoc();
3328 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3329 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3330 SDValue Op = getValue(I.getOperand(1));
3331 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3333 // Get the exponent.
3334 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
3336 // Get the significand and build it into a floating-point number with
3338 SDValue X = GetSignificand(DAG, Op1, dl);
3340 // Different possible minimax approximations of significand in
3341 // floating-point for various degrees of accuracy over [1,2].
3342 if (LimitFloatPrecision <= 6) {
3343 // For floating-point precision of 6:
3345 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
3347 // error 0.0049451742, which is more than 7 bits
3348 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3349 getF32Constant(DAG, 0xbeb08fe0));
3350 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3351 getF32Constant(DAG, 0x40019463));
3352 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3353 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3354 getF32Constant(DAG, 0x3fd6633d));
3356 result = DAG.getNode(ISD::FADD, dl,
3357 MVT::f32, LogOfExponent, Log2ofMantissa);
3358 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3359 // For floating-point precision of 12:
3365 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
3367 // error 0.0000876136000, which is better than 13 bits
3368 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3369 getF32Constant(DAG, 0xbda7262e));
3370 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3371 getF32Constant(DAG, 0x3f25280b));
3372 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3373 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3374 getF32Constant(DAG, 0x4007b923));
3375 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3376 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3377 getF32Constant(DAG, 0x40823e2f));
3378 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3379 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3380 getF32Constant(DAG, 0x4020d29c));
3382 result = DAG.getNode(ISD::FADD, dl,
3383 MVT::f32, LogOfExponent, Log2ofMantissa);
3384 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3385 // For floating-point precision of 18:
3394 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
3396 // error 0.0000018516, which is better than 18 bits
3397 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3398 getF32Constant(DAG, 0xbcd2769e));
3399 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3400 getF32Constant(DAG, 0x3e8ce0b9));
3401 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3402 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3403 getF32Constant(DAG, 0x3fa22ae7));
3404 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3405 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3406 getF32Constant(DAG, 0x40525723));
3407 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3408 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3409 getF32Constant(DAG, 0x40aaf200));
3410 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3411 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3412 getF32Constant(DAG, 0x40c39dad));
3413 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3414 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3415 getF32Constant(DAG, 0x4042902c));
3417 result = DAG.getNode(ISD::FADD, dl,
3418 MVT::f32, LogOfExponent, Log2ofMantissa);
3421 // No special expansion.
3422 result = DAG.getNode(ISD::FLOG2, dl,
3423 getValue(I.getOperand(1)).getValueType(),
3424 getValue(I.getOperand(1)));
3427 setValue(&I, result);
3430 /// visitLog10 - Lower a log10 intrinsic. Handles the special sequences for
3431 /// limited-precision mode.
3433 SelectionDAGLowering::visitLog10(CallInst &I) {
3435 DebugLoc dl = getCurDebugLoc();
3437 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3438 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3439 SDValue Op = getValue(I.getOperand(1));
3440 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3442 // Scale the exponent by log10(2) [0.30102999f].
3443 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
3444 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3445 getF32Constant(DAG, 0x3e9a209a));
3447 // Get the significand and build it into a floating-point number with
3449 SDValue X = GetSignificand(DAG, Op1, dl);
3451 if (LimitFloatPrecision <= 6) {
3452 // For floating-point precision of 6:
3454 // Log10ofMantissa =
3456 // (0.60948995f - 0.10380950f * x) * x;
3458 // error 0.0014886165, which is 6 bits
3459 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3460 getF32Constant(DAG, 0xbdd49a13));
3461 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3462 getF32Constant(DAG, 0x3f1c0789));
3463 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3464 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3465 getF32Constant(DAG, 0x3f011300));
3467 result = DAG.getNode(ISD::FADD, dl,
3468 MVT::f32, LogOfExponent, Log10ofMantissa);
3469 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3470 // For floating-point precision of 12:
3472 // Log10ofMantissa =
3475 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
3477 // error 0.00019228036, which is better than 12 bits
3478 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3479 getF32Constant(DAG, 0x3d431f31));
3480 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3481 getF32Constant(DAG, 0x3ea21fb2));
3482 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3483 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3484 getF32Constant(DAG, 0x3f6ae232));
3485 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3486 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3487 getF32Constant(DAG, 0x3f25f7c3));
3489 result = DAG.getNode(ISD::FADD, dl,
3490 MVT::f32, LogOfExponent, Log10ofMantissa);
3491 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3492 // For floating-point precision of 18:
3494 // Log10ofMantissa =
3499 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
3501 // error 0.0000037995730, which is better than 18 bits
3502 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3503 getF32Constant(DAG, 0x3c5d51ce));
3504 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3505 getF32Constant(DAG, 0x3e00685a));
3506 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3507 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3508 getF32Constant(DAG, 0x3efb6798));
3509 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3510 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3511 getF32Constant(DAG, 0x3f88d192));
3512 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3513 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3514 getF32Constant(DAG, 0x3fc4316c));
3515 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3516 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
3517 getF32Constant(DAG, 0x3f57ce70));
3519 result = DAG.getNode(ISD::FADD, dl,
3520 MVT::f32, LogOfExponent, Log10ofMantissa);
3523 // No special expansion.
3524 result = DAG.getNode(ISD::FLOG10, dl,
3525 getValue(I.getOperand(1)).getValueType(),
3526 getValue(I.getOperand(1)));
3529 setValue(&I, result);
3532 /// visitExp2 - Lower an exp2 intrinsic. Handles the special sequences for
3533 /// limited-precision mode.
3535 SelectionDAGLowering::visitExp2(CallInst &I) {
3537 DebugLoc dl = getCurDebugLoc();
3539 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3540 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3541 SDValue Op = getValue(I.getOperand(1));
3543 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op);
3545 // FractionalPartOfX = x - (float)IntegerPartOfX;
3546 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3547 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, Op, t1);
3549 // IntegerPartOfX <<= 23;
3550 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3551 DAG.getConstant(23, TLI.getPointerTy()));
3553 if (LimitFloatPrecision <= 6) {
3554 // For floating-point precision of 6:
3556 // TwoToFractionalPartOfX =
3558 // (0.735607626f + 0.252464424f * x) * x;
3560 // error 0.0144103317, which is 6 bits
3561 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3562 getF32Constant(DAG, 0x3e814304));
3563 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3564 getF32Constant(DAG, 0x3f3c50c8));
3565 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3566 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3567 getF32Constant(DAG, 0x3f7f5e7e));
3568 SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3569 SDValue TwoToFractionalPartOfX =
3570 DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3572 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3573 MVT::f32, TwoToFractionalPartOfX);
3574 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3575 // For floating-point precision of 12:
3577 // TwoToFractionalPartOfX =
3580 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3582 // error 0.000107046256, which is 13 to 14 bits
3583 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3584 getF32Constant(DAG, 0x3da235e3));
3585 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3586 getF32Constant(DAG, 0x3e65b8f3));
3587 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3588 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3589 getF32Constant(DAG, 0x3f324b07));
3590 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3591 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3592 getF32Constant(DAG, 0x3f7ff8fd));
3593 SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3594 SDValue TwoToFractionalPartOfX =
3595 DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3597 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3598 MVT::f32, TwoToFractionalPartOfX);
3599 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3600 // For floating-point precision of 18:
3602 // TwoToFractionalPartOfX =
3606 // (0.554906021e-1f +
3607 // (0.961591928e-2f +
3608 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3609 // error 2.47208000*10^(-7), which is better than 18 bits
3610 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3611 getF32Constant(DAG, 0x3924b03e));
3612 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3613 getF32Constant(DAG, 0x3ab24b87));
3614 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3615 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3616 getF32Constant(DAG, 0x3c1d8c17));
3617 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3618 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3619 getF32Constant(DAG, 0x3d634a1d));
3620 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3621 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3622 getF32Constant(DAG, 0x3e75fe14));
3623 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3624 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3625 getF32Constant(DAG, 0x3f317234));
3626 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3627 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3628 getF32Constant(DAG, 0x3f800000));
3629 SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
3630 SDValue TwoToFractionalPartOfX =
3631 DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
3633 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3634 MVT::f32, TwoToFractionalPartOfX);
3637 // No special expansion.
3638 result = DAG.getNode(ISD::FEXP2, dl,
3639 getValue(I.getOperand(1)).getValueType(),
3640 getValue(I.getOperand(1)));
3643 setValue(&I, result);
3646 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
3647 /// limited-precision mode with x == 10.0f.
3649 SelectionDAGLowering::visitPow(CallInst &I) {
3651 Value *Val = I.getOperand(1);
3652 DebugLoc dl = getCurDebugLoc();
3653 bool IsExp10 = false;
3655 if (getValue(Val).getValueType() == MVT::f32 &&
3656 getValue(I.getOperand(2)).getValueType() == MVT::f32 &&
3657 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3658 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(Val))) {
3659 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3661 IsExp10 = CFP->getValueAPF().bitwiseIsEqual(Ten);
3666 if (IsExp10 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3667 SDValue Op = getValue(I.getOperand(2));
3669 // Put the exponent in the right bit position for later addition to the
3672 // #define LOG2OF10 3.3219281f
3673 // IntegerPartOfX = (int32_t)(x * LOG2OF10);
3674 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3675 getF32Constant(DAG, 0x40549a78));
3676 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3678 // FractionalPartOfX = x - (float)IntegerPartOfX;
3679 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3680 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3682 // IntegerPartOfX <<= 23;
3683 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3684 DAG.getConstant(23, TLI.getPointerTy()));
3686 if (LimitFloatPrecision <= 6) {
3687 // For floating-point precision of 6:
3689 // twoToFractionalPartOfX =
3691 // (0.735607626f + 0.252464424f * x) * x;
3693 // error 0.0144103317, which is 6 bits
3694 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3695 getF32Constant(DAG, 0x3e814304));
3696 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3697 getF32Constant(DAG, 0x3f3c50c8));
3698 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3699 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3700 getF32Constant(DAG, 0x3f7f5e7e));
3701 SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3702 SDValue TwoToFractionalPartOfX =
3703 DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3705 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3706 MVT::f32, TwoToFractionalPartOfX);
3707 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3708 // For floating-point precision of 12:
3710 // TwoToFractionalPartOfX =
3713 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3715 // error 0.000107046256, which is 13 to 14 bits
3716 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3717 getF32Constant(DAG, 0x3da235e3));
3718 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3719 getF32Constant(DAG, 0x3e65b8f3));
3720 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3721 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3722 getF32Constant(DAG, 0x3f324b07));
3723 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3724 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3725 getF32Constant(DAG, 0x3f7ff8fd));
3726 SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3727 SDValue TwoToFractionalPartOfX =
3728 DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3730 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3731 MVT::f32, TwoToFractionalPartOfX);
3732 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3733 // For floating-point precision of 18:
3735 // TwoToFractionalPartOfX =
3739 // (0.554906021e-1f +
3740 // (0.961591928e-2f +
3741 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3742 // error 2.47208000*10^(-7), which is better than 18 bits
3743 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3744 getF32Constant(DAG, 0x3924b03e));
3745 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3746 getF32Constant(DAG, 0x3ab24b87));
3747 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3748 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3749 getF32Constant(DAG, 0x3c1d8c17));
3750 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3751 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3752 getF32Constant(DAG, 0x3d634a1d));
3753 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3754 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3755 getF32Constant(DAG, 0x3e75fe14));
3756 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3757 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3758 getF32Constant(DAG, 0x3f317234));
3759 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3760 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3761 getF32Constant(DAG, 0x3f800000));
3762 SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
3763 SDValue TwoToFractionalPartOfX =
3764 DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
3766 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3767 MVT::f32, TwoToFractionalPartOfX);
3770 // No special expansion.
3771 result = DAG.getNode(ISD::FPOW, dl,
3772 getValue(I.getOperand(1)).getValueType(),
3773 getValue(I.getOperand(1)),
3774 getValue(I.getOperand(2)));
3777 setValue(&I, result);
3780 /// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
3781 /// we want to emit this as a call to a named external function, return the name
3782 /// otherwise lower it and return null.
3784 SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
3785 DebugLoc dl = getCurDebugLoc();
3786 switch (Intrinsic) {
3788 // By default, turn this into a target intrinsic node.
3789 visitTargetIntrinsic(I, Intrinsic);
3791 case Intrinsic::vastart: visitVAStart(I); return 0;
3792 case Intrinsic::vaend: visitVAEnd(I); return 0;
3793 case Intrinsic::vacopy: visitVACopy(I); return 0;
3794 case Intrinsic::returnaddress:
3795 setValue(&I, DAG.getNode(ISD::RETURNADDR, dl, TLI.getPointerTy(),
3796 getValue(I.getOperand(1))));
3798 case Intrinsic::frameaddress:
3799 setValue(&I, DAG.getNode(ISD::FRAMEADDR, dl, TLI.getPointerTy(),
3800 getValue(I.getOperand(1))));
3802 case Intrinsic::setjmp:
3803 return "_setjmp"+!TLI.usesUnderscoreSetJmp();
3805 case Intrinsic::longjmp:
3806 return "_longjmp"+!TLI.usesUnderscoreLongJmp();
3808 case Intrinsic::memcpy: {
3809 SDValue Op1 = getValue(I.getOperand(1));
3810 SDValue Op2 = getValue(I.getOperand(2));
3811 SDValue Op3 = getValue(I.getOperand(3));
3812 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3813 DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
3814 I.getOperand(1), 0, I.getOperand(2), 0));
3817 case Intrinsic::memset: {
3818 SDValue Op1 = getValue(I.getOperand(1));
3819 SDValue Op2 = getValue(I.getOperand(2));
3820 SDValue Op3 = getValue(I.getOperand(3));
3821 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3822 DAG.setRoot(DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align,
3823 I.getOperand(1), 0));
3826 case Intrinsic::memmove: {
3827 SDValue Op1 = getValue(I.getOperand(1));
3828 SDValue Op2 = getValue(I.getOperand(2));
3829 SDValue Op3 = getValue(I.getOperand(3));
3830 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3832 // If the source and destination are known to not be aliases, we can
3833 // lower memmove as memcpy.
3834 uint64_t Size = -1ULL;
3835 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op3))
3836 Size = C->getZExtValue();
3837 if (AA->alias(I.getOperand(1), Size, I.getOperand(2), Size) ==
3838 AliasAnalysis::NoAlias) {
3839 DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
3840 I.getOperand(1), 0, I.getOperand(2), 0));
3844 DAG.setRoot(DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align,
3845 I.getOperand(1), 0, I.getOperand(2), 0));
3848 case Intrinsic::dbg_stoppoint: {
3849 DbgStopPointInst &SPI = cast<DbgStopPointInst>(I);
3850 if (isValidDebugInfoIntrinsic(SPI, CodeGenOpt::Default)) {
3851 MachineFunction &MF = DAG.getMachineFunction();
3852 DebugLoc Loc = ExtractDebugLocation(SPI, MF.getDebugLocInfo());
3853 setCurDebugLoc(Loc);
3855 if (OptLevel == CodeGenOpt::None)
3856 DAG.setRoot(DAG.getDbgStopPoint(Loc, getRoot(),
3863 case Intrinsic::dbg_region_start: {
3864 DwarfWriter *DW = DAG.getDwarfWriter();
3865 DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I);
3866 if (isValidDebugInfoIntrinsic(RSI, OptLevel) && DW
3867 && DW->ShouldEmitDwarfDebug()) {
3869 DW->RecordRegionStart(cast<GlobalVariable>(RSI.getContext()));
3870 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3871 getRoot(), LabelID));
3875 case Intrinsic::dbg_region_end: {
3876 DwarfWriter *DW = DAG.getDwarfWriter();
3877 DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I);
3879 if (!isValidDebugInfoIntrinsic(REI, OptLevel) || !DW
3880 || !DW->ShouldEmitDwarfDebug())
3883 MachineFunction &MF = DAG.getMachineFunction();
3884 DISubprogram Subprogram(cast<GlobalVariable>(REI.getContext()));
3886 if (isInlinedFnEnd(REI, MF.getFunction())) {
3887 // This is end of inlined function. Debugging information for inlined
3888 // function is not handled yet (only supported by FastISel).
3889 if (OptLevel == CodeGenOpt::None) {
3890 unsigned ID = DW->RecordInlinedFnEnd(Subprogram);
3892 // Returned ID is 0 if this is unbalanced "end of inlined
3893 // scope". This could happen if optimizer eats dbg intrinsics or
3894 // "beginning of inlined scope" is not recoginized due to missing
3895 // location info. In such cases, do ignore this region.end.
3896 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3903 DW->RecordRegionEnd(cast<GlobalVariable>(REI.getContext()));
3904 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3905 getRoot(), LabelID));
3908 case Intrinsic::dbg_func_start: {
3909 DwarfWriter *DW = DAG.getDwarfWriter();
3910 DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I);
3911 if (!isValidDebugInfoIntrinsic(FSI, CodeGenOpt::None) || !DW
3912 || !DW->ShouldEmitDwarfDebug())
3915 MachineFunction &MF = DAG.getMachineFunction();
3916 // This is a beginning of an inlined function.
3917 if (isInlinedFnStart(FSI, MF.getFunction())) {
3918 if (OptLevel != CodeGenOpt::None)
3919 // FIXME: Debugging informaation for inlined function is only
3920 // supported at CodeGenOpt::Node.
3923 DebugLoc PrevLoc = CurDebugLoc;
3924 // If llvm.dbg.func.start is seen in a new block before any
3925 // llvm.dbg.stoppoint intrinsic then the location info is unknown.
3926 // FIXME : Why DebugLoc is reset at the beginning of each block ?
3927 if (PrevLoc.isUnknown())
3930 // Record the source line.
3931 setCurDebugLoc(ExtractDebugLocation(FSI, MF.getDebugLocInfo()));
3933 DebugLocTuple PrevLocTpl = MF.getDebugLocTuple(PrevLoc);
3934 DISubprogram SP(cast<GlobalVariable>(FSI.getSubprogram()));
3935 DICompileUnit CU(PrevLocTpl.CompileUnit);
3936 unsigned LabelID = DW->RecordInlinedFnStart(SP, CU,
3939 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3940 getRoot(), LabelID));
3944 // This is a beginning of a new function.
3945 MF.setDefaultDebugLoc(ExtractDebugLocation(FSI, MF.getDebugLocInfo()));
3947 // llvm.dbg.func_start also defines beginning of function scope.
3948 DW->RecordRegionStart(cast<GlobalVariable>(FSI.getSubprogram()));
3951 case Intrinsic::dbg_declare: {
3952 if (OptLevel != CodeGenOpt::None)
3953 // FIXME: Variable debug info is not supported here.
3956 DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
3957 if (!isValidDebugInfoIntrinsic(DI, CodeGenOpt::None))
3960 Value *Variable = DI.getVariable();
3961 DAG.setRoot(DAG.getNode(ISD::DECLARE, dl, MVT::Other, getRoot(),
3962 getValue(DI.getAddress()), getValue(Variable)));
3965 case Intrinsic::eh_exception: {
3966 // Insert the EXCEPTIONADDR instruction.
3967 assert(CurMBB->isLandingPad() &&"Call to eh.exception not in landing pad!");
3968 SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
3970 Ops[0] = DAG.getRoot();
3971 SDValue Op = DAG.getNode(ISD::EXCEPTIONADDR, dl, VTs, Ops, 1);
3973 DAG.setRoot(Op.getValue(1));
3977 case Intrinsic::eh_selector_i32:
3978 case Intrinsic::eh_selector_i64: {
3979 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
3980 MVT VT = (Intrinsic == Intrinsic::eh_selector_i32 ?
3981 MVT::i32 : MVT::i64);
3984 if (CurMBB->isLandingPad())
3985 AddCatchInfo(I, MMI, CurMBB);
3988 FuncInfo.CatchInfoLost.insert(&I);
3990 // FIXME: Mark exception selector register as live in. Hack for PR1508.
3991 unsigned Reg = TLI.getExceptionSelectorRegister();
3992 if (Reg) CurMBB->addLiveIn(Reg);
3995 // Insert the EHSELECTION instruction.
3996 SDVTList VTs = DAG.getVTList(VT, MVT::Other);
3998 Ops[0] = getValue(I.getOperand(1));
4000 SDValue Op = DAG.getNode(ISD::EHSELECTION, dl, VTs, Ops, 2);
4002 DAG.setRoot(Op.getValue(1));
4004 setValue(&I, DAG.getConstant(0, VT));
4010 case Intrinsic::eh_typeid_for_i32:
4011 case Intrinsic::eh_typeid_for_i64: {
4012 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4013 MVT VT = (Intrinsic == Intrinsic::eh_typeid_for_i32 ?
4014 MVT::i32 : MVT::i64);
4017 // Find the type id for the given typeinfo.
4018 GlobalVariable *GV = ExtractTypeInfo(I.getOperand(1));
4020 unsigned TypeID = MMI->getTypeIDFor(GV);
4021 setValue(&I, DAG.getConstant(TypeID, VT));
4023 // Return something different to eh_selector.
4024 setValue(&I, DAG.getConstant(1, VT));
4030 case Intrinsic::eh_return_i32:
4031 case Intrinsic::eh_return_i64:
4032 if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
4033 MMI->setCallsEHReturn(true);
4034 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, dl,
4037 getValue(I.getOperand(1)),
4038 getValue(I.getOperand(2))));
4040 setValue(&I, DAG.getConstant(0, TLI.getPointerTy()));
4044 case Intrinsic::eh_unwind_init:
4045 if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
4046 MMI->setCallsUnwindInit(true);
4051 case Intrinsic::eh_dwarf_cfa: {
4052 MVT VT = getValue(I.getOperand(1)).getValueType();
4054 if (VT.bitsGT(TLI.getPointerTy()))
4055 CfaArg = DAG.getNode(ISD::TRUNCATE, dl,
4056 TLI.getPointerTy(), getValue(I.getOperand(1)));
4058 CfaArg = DAG.getNode(ISD::SIGN_EXTEND, dl,
4059 TLI.getPointerTy(), getValue(I.getOperand(1)));
4061 SDValue Offset = DAG.getNode(ISD::ADD, dl,
4063 DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, dl,
4064 TLI.getPointerTy()),
4066 setValue(&I, DAG.getNode(ISD::ADD, dl,
4068 DAG.getNode(ISD::FRAMEADDR, dl,
4071 TLI.getPointerTy())),
4076 case Intrinsic::convertff:
4077 case Intrinsic::convertfsi:
4078 case Intrinsic::convertfui:
4079 case Intrinsic::convertsif:
4080 case Intrinsic::convertuif:
4081 case Intrinsic::convertss:
4082 case Intrinsic::convertsu:
4083 case Intrinsic::convertus:
4084 case Intrinsic::convertuu: {
4085 ISD::CvtCode Code = ISD::CVT_INVALID;
4086 switch (Intrinsic) {
4087 case Intrinsic::convertff: Code = ISD::CVT_FF; break;
4088 case Intrinsic::convertfsi: Code = ISD::CVT_FS; break;
4089 case Intrinsic::convertfui: Code = ISD::CVT_FU; break;
4090 case Intrinsic::convertsif: Code = ISD::CVT_SF; break;
4091 case Intrinsic::convertuif: Code = ISD::CVT_UF; break;
4092 case Intrinsic::convertss: Code = ISD::CVT_SS; break;
4093 case Intrinsic::convertsu: Code = ISD::CVT_SU; break;
4094 case Intrinsic::convertus: Code = ISD::CVT_US; break;
4095 case Intrinsic::convertuu: Code = ISD::CVT_UU; break;
4097 MVT DestVT = TLI.getValueType(I.getType());
4098 Value* Op1 = I.getOperand(1);
4099 setValue(&I, DAG.getConvertRndSat(DestVT, getCurDebugLoc(), getValue(Op1),
4100 DAG.getValueType(DestVT),
4101 DAG.getValueType(getValue(Op1).getValueType()),
4102 getValue(I.getOperand(2)),
4103 getValue(I.getOperand(3)),
4108 case Intrinsic::sqrt:
4109 setValue(&I, DAG.getNode(ISD::FSQRT, dl,
4110 getValue(I.getOperand(1)).getValueType(),
4111 getValue(I.getOperand(1))));
4113 case Intrinsic::powi:
4114 setValue(&I, DAG.getNode(ISD::FPOWI, dl,
4115 getValue(I.getOperand(1)).getValueType(),
4116 getValue(I.getOperand(1)),
4117 getValue(I.getOperand(2))));
4119 case Intrinsic::sin:
4120 setValue(&I, DAG.getNode(ISD::FSIN, dl,
4121 getValue(I.getOperand(1)).getValueType(),
4122 getValue(I.getOperand(1))));
4124 case Intrinsic::cos:
4125 setValue(&I, DAG.getNode(ISD::FCOS, dl,
4126 getValue(I.getOperand(1)).getValueType(),
4127 getValue(I.getOperand(1))));
4129 case Intrinsic::log:
4132 case Intrinsic::log2:
4135 case Intrinsic::log10:
4138 case Intrinsic::exp:
4141 case Intrinsic::exp2:
4144 case Intrinsic::pow:
4147 case Intrinsic::pcmarker: {
4148 SDValue Tmp = getValue(I.getOperand(1));
4149 DAG.setRoot(DAG.getNode(ISD::PCMARKER, dl, MVT::Other, getRoot(), Tmp));
4152 case Intrinsic::readcyclecounter: {
4153 SDValue Op = getRoot();
4154 SDValue Tmp = DAG.getNode(ISD::READCYCLECOUNTER, dl,
4155 DAG.getVTList(MVT::i64, MVT::Other),
4158 DAG.setRoot(Tmp.getValue(1));
4161 case Intrinsic::part_select: {
4162 // Currently not implemented: just abort
4163 assert(0 && "part_select intrinsic not implemented");
4166 case Intrinsic::part_set: {
4167 // Currently not implemented: just abort
4168 assert(0 && "part_set intrinsic not implemented");
4171 case Intrinsic::bswap:
4172 setValue(&I, DAG.getNode(ISD::BSWAP, dl,
4173 getValue(I.getOperand(1)).getValueType(),
4174 getValue(I.getOperand(1))));
4176 case Intrinsic::cttz: {
4177 SDValue Arg = getValue(I.getOperand(1));
4178 MVT Ty = Arg.getValueType();
4179 SDValue result = DAG.getNode(ISD::CTTZ, dl, Ty, Arg);
4180 setValue(&I, result);
4183 case Intrinsic::ctlz: {
4184 SDValue Arg = getValue(I.getOperand(1));
4185 MVT Ty = Arg.getValueType();
4186 SDValue result = DAG.getNode(ISD::CTLZ, dl, Ty, Arg);
4187 setValue(&I, result);
4190 case Intrinsic::ctpop: {
4191 SDValue Arg = getValue(I.getOperand(1));
4192 MVT Ty = Arg.getValueType();
4193 SDValue result = DAG.getNode(ISD::CTPOP, dl, Ty, Arg);
4194 setValue(&I, result);
4197 case Intrinsic::stacksave: {
4198 SDValue Op = getRoot();
4199 SDValue Tmp = DAG.getNode(ISD::STACKSAVE, dl,
4200 DAG.getVTList(TLI.getPointerTy(), MVT::Other), &Op, 1);
4202 DAG.setRoot(Tmp.getValue(1));
4205 case Intrinsic::stackrestore: {
4206 SDValue Tmp = getValue(I.getOperand(1));
4207 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, dl, MVT::Other, getRoot(), Tmp));
4210 case Intrinsic::stackprotector: {
4211 // Emit code into the DAG to store the stack guard onto the stack.
4212 MachineFunction &MF = DAG.getMachineFunction();
4213 MachineFrameInfo *MFI = MF.getFrameInfo();
4214 MVT PtrTy = TLI.getPointerTy();
4216 SDValue Src = getValue(I.getOperand(1)); // The guard's value.
4217 AllocaInst *Slot = cast<AllocaInst>(I.getOperand(2));
4219 int FI = FuncInfo.StaticAllocaMap[Slot];
4220 MFI->setStackProtectorIndex(FI);
4222 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
4224 // Store the stack protector onto the stack.
4225 SDValue Result = DAG.getStore(getRoot(), getCurDebugLoc(), Src, FIN,
4226 PseudoSourceValue::getFixedStack(FI),
4228 setValue(&I, Result);
4229 DAG.setRoot(Result);
4232 case Intrinsic::var_annotation:
4233 // Discard annotate attributes
4236 case Intrinsic::init_trampoline: {
4237 const Function *F = cast<Function>(I.getOperand(2)->stripPointerCasts());
4241 Ops[1] = getValue(I.getOperand(1));
4242 Ops[2] = getValue(I.getOperand(2));
4243 Ops[3] = getValue(I.getOperand(3));
4244 Ops[4] = DAG.getSrcValue(I.getOperand(1));
4245 Ops[5] = DAG.getSrcValue(F);
4247 SDValue Tmp = DAG.getNode(ISD::TRAMPOLINE, dl,
4248 DAG.getVTList(TLI.getPointerTy(), MVT::Other),
4252 DAG.setRoot(Tmp.getValue(1));
4256 case Intrinsic::gcroot:
4258 Value *Alloca = I.getOperand(1);
4259 Constant *TypeMap = cast<Constant>(I.getOperand(2));
4261 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
4262 GFI->addStackRoot(FI->getIndex(), TypeMap);
4266 case Intrinsic::gcread:
4267 case Intrinsic::gcwrite:
4268 assert(0 && "GC failed to lower gcread/gcwrite intrinsics!");
4271 case Intrinsic::flt_rounds: {
4272 setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, dl, MVT::i32));
4276 case Intrinsic::trap: {
4277 DAG.setRoot(DAG.getNode(ISD::TRAP, dl,MVT::Other, getRoot()));
4281 case Intrinsic::uadd_with_overflow:
4282 return implVisitAluOverflow(I, ISD::UADDO);
4283 case Intrinsic::sadd_with_overflow:
4284 return implVisitAluOverflow(I, ISD::SADDO);
4285 case Intrinsic::usub_with_overflow:
4286 return implVisitAluOverflow(I, ISD::USUBO);
4287 case Intrinsic::ssub_with_overflow:
4288 return implVisitAluOverflow(I, ISD::SSUBO);
4289 case Intrinsic::umul_with_overflow:
4290 return implVisitAluOverflow(I, ISD::UMULO);
4291 case Intrinsic::smul_with_overflow:
4292 return implVisitAluOverflow(I, ISD::SMULO);
4294 case Intrinsic::prefetch: {
4297 Ops[1] = getValue(I.getOperand(1));
4298 Ops[2] = getValue(I.getOperand(2));
4299 Ops[3] = getValue(I.getOperand(3));
4300 DAG.setRoot(DAG.getNode(ISD::PREFETCH, dl, MVT::Other, &Ops[0], 4));
4304 case Intrinsic::memory_barrier: {
4307 for (int x = 1; x < 6; ++x)
4308 Ops[x] = getValue(I.getOperand(x));
4310 DAG.setRoot(DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, &Ops[0], 6));
4313 case Intrinsic::atomic_cmp_swap: {
4314 SDValue Root = getRoot();
4316 DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, getCurDebugLoc(),
4317 getValue(I.getOperand(2)).getValueType().getSimpleVT(),
4319 getValue(I.getOperand(1)),
4320 getValue(I.getOperand(2)),
4321 getValue(I.getOperand(3)),
4324 DAG.setRoot(L.getValue(1));
4327 case Intrinsic::atomic_load_add:
4328 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD);
4329 case Intrinsic::atomic_load_sub:
4330 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB);
4331 case Intrinsic::atomic_load_or:
4332 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR);
4333 case Intrinsic::atomic_load_xor:
4334 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR);
4335 case Intrinsic::atomic_load_and:
4336 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND);
4337 case Intrinsic::atomic_load_nand:
4338 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND);
4339 case Intrinsic::atomic_load_max:
4340 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX);
4341 case Intrinsic::atomic_load_min:
4342 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN);
4343 case Intrinsic::atomic_load_umin:
4344 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN);
4345 case Intrinsic::atomic_load_umax:
4346 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX);
4347 case Intrinsic::atomic_swap:
4348 return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP);
4353 void SelectionDAGLowering::LowerCallTo(CallSite CS, SDValue Callee,
4355 MachineBasicBlock *LandingPad) {
4356 const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
4357 const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
4358 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4359 unsigned BeginLabel = 0, EndLabel = 0;
4361 TargetLowering::ArgListTy Args;
4362 TargetLowering::ArgListEntry Entry;
4363 Args.reserve(CS.arg_size());
4364 for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
4366 SDValue ArgNode = getValue(*i);
4367 Entry.Node = ArgNode; Entry.Ty = (*i)->getType();
4369 unsigned attrInd = i - CS.arg_begin() + 1;
4370 Entry.isSExt = CS.paramHasAttr(attrInd, Attribute::SExt);
4371 Entry.isZExt = CS.paramHasAttr(attrInd, Attribute::ZExt);
4372 Entry.isInReg = CS.paramHasAttr(attrInd, Attribute::InReg);
4373 Entry.isSRet = CS.paramHasAttr(attrInd, Attribute::StructRet);
4374 Entry.isNest = CS.paramHasAttr(attrInd, Attribute::Nest);
4375 Entry.isByVal = CS.paramHasAttr(attrInd, Attribute::ByVal);
4376 Entry.Alignment = CS.getParamAlignment(attrInd);
4377 Args.push_back(Entry);
4380 if (LandingPad && MMI) {
4381 // Insert a label before the invoke call to mark the try range. This can be
4382 // used to detect deletion of the invoke via the MachineModuleInfo.
4383 BeginLabel = MMI->NextLabelID();
4384 // Both PendingLoads and PendingExports must be flushed here;
4385 // this call might not return.
4387 DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
4388 getControlRoot(), BeginLabel));
4391 std::pair<SDValue,SDValue> Result =
4392 TLI.LowerCallTo(getRoot(), CS.getType(),
4393 CS.paramHasAttr(0, Attribute::SExt),
4394 CS.paramHasAttr(0, Attribute::ZExt), FTy->isVarArg(),
4395 CS.paramHasAttr(0, Attribute::InReg), FTy->getNumParams(),
4396 CS.getCallingConv(),
4397 IsTailCall && PerformTailCallOpt,
4398 Callee, Args, DAG, getCurDebugLoc());
4399 if (CS.getType() != Type::VoidTy)
4400 setValue(CS.getInstruction(), Result.first);
4401 DAG.setRoot(Result.second);
4403 if (LandingPad && MMI) {
4404 // Insert a label at the end of the invoke call to mark the try range. This
4405 // can be used to detect deletion of the invoke via the MachineModuleInfo.
4406 EndLabel = MMI->NextLabelID();
4407 DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
4408 getRoot(), EndLabel));
4410 // Inform MachineModuleInfo of range.
4411 MMI->addInvoke(LandingPad, BeginLabel, EndLabel);
4416 void SelectionDAGLowering::visitCall(CallInst &I) {
4417 const char *RenameFn = 0;
4418 if (Function *F = I.getCalledFunction()) {
4419 if (F->isDeclaration()) {
4420 const TargetIntrinsicInfo *II = TLI.getTargetMachine().getIntrinsicInfo();
4422 if (unsigned IID = II->getIntrinsicID(F)) {
4423 RenameFn = visitIntrinsicCall(I, IID);
4428 if (unsigned IID = F->getIntrinsicID()) {
4429 RenameFn = visitIntrinsicCall(I, IID);
4435 // Check for well-known libc/libm calls. If the function is internal, it
4436 // can't be a library call.
4437 unsigned NameLen = F->getNameLen();
4438 if (!F->hasLocalLinkage() && NameLen) {
4439 const char *NameStr = F->getNameStart();
4440 if (NameStr[0] == 'c' &&
4441 ((NameLen == 8 && !strcmp(NameStr, "copysign")) ||
4442 (NameLen == 9 && !strcmp(NameStr, "copysignf")))) {
4443 if (I.getNumOperands() == 3 && // Basic sanity checks.
4444 I.getOperand(1)->getType()->isFloatingPoint() &&
4445 I.getType() == I.getOperand(1)->getType() &&
4446 I.getType() == I.getOperand(2)->getType()) {
4447 SDValue LHS = getValue(I.getOperand(1));
4448 SDValue RHS = getValue(I.getOperand(2));
4449 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurDebugLoc(),
4450 LHS.getValueType(), LHS, RHS));
4453 } else if (NameStr[0] == 'f' &&
4454 ((NameLen == 4 && !strcmp(NameStr, "fabs")) ||
4455 (NameLen == 5 && !strcmp(NameStr, "fabsf")) ||
4456 (NameLen == 5 && !strcmp(NameStr, "fabsl")))) {
4457 if (I.getNumOperands() == 2 && // Basic sanity checks.
4458 I.getOperand(1)->getType()->isFloatingPoint() &&
4459 I.getType() == I.getOperand(1)->getType()) {
4460 SDValue Tmp = getValue(I.getOperand(1));
4461 setValue(&I, DAG.getNode(ISD::FABS, getCurDebugLoc(),
4462 Tmp.getValueType(), Tmp));
4465 } else if (NameStr[0] == 's' &&
4466 ((NameLen == 3 && !strcmp(NameStr, "sin")) ||
4467 (NameLen == 4 && !strcmp(NameStr, "sinf")) ||
4468 (NameLen == 4 && !strcmp(NameStr, "sinl")))) {
4469 if (I.getNumOperands() == 2 && // Basic sanity checks.
4470 I.getOperand(1)->getType()->isFloatingPoint() &&
4471 I.getType() == I.getOperand(1)->getType()) {
4472 SDValue Tmp = getValue(I.getOperand(1));
4473 setValue(&I, DAG.getNode(ISD::FSIN, getCurDebugLoc(),
4474 Tmp.getValueType(), Tmp));
4477 } else if (NameStr[0] == 'c' &&
4478 ((NameLen == 3 && !strcmp(NameStr, "cos")) ||
4479 (NameLen == 4 && !strcmp(NameStr, "cosf")) ||
4480 (NameLen == 4 && !strcmp(NameStr, "cosl")))) {
4481 if (I.getNumOperands() == 2 && // Basic sanity checks.
4482 I.getOperand(1)->getType()->isFloatingPoint() &&
4483 I.getType() == I.getOperand(1)->getType()) {
4484 SDValue Tmp = getValue(I.getOperand(1));
4485 setValue(&I, DAG.getNode(ISD::FCOS, getCurDebugLoc(),
4486 Tmp.getValueType(), Tmp));
4491 } else if (isa<InlineAsm>(I.getOperand(0))) {
4498 Callee = getValue(I.getOperand(0));
4500 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
4502 LowerCallTo(&I, Callee, I.isTailCall());
4506 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
4507 /// this value and returns the result as a ValueVT value. This uses
4508 /// Chain/Flag as the input and updates them for the output Chain/Flag.
4509 /// If the Flag pointer is NULL, no flag is used.
4510 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
4512 SDValue *Flag) const {
4513 // Assemble the legal parts into the final values.
4514 SmallVector<SDValue, 4> Values(ValueVTs.size());
4515 SmallVector<SDValue, 8> Parts;
4516 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4517 // Copy the legal parts from the registers.
4518 MVT ValueVT = ValueVTs[Value];
4519 unsigned NumRegs = TLI->getNumRegisters(ValueVT);
4520 MVT RegisterVT = RegVTs[Value];
4522 Parts.resize(NumRegs);
4523 for (unsigned i = 0; i != NumRegs; ++i) {
4526 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
4528 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
4529 *Flag = P.getValue(2);
4531 Chain = P.getValue(1);
4533 // If the source register was virtual and if we know something about it,
4534 // add an assert node.
4535 if (TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) &&
4536 RegisterVT.isInteger() && !RegisterVT.isVector()) {
4537 unsigned SlotNo = Regs[Part+i]-TargetRegisterInfo::FirstVirtualRegister;
4538 FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
4539 if (FLI.LiveOutRegInfo.size() > SlotNo) {
4540 FunctionLoweringInfo::LiveOutInfo &LOI = FLI.LiveOutRegInfo[SlotNo];
4542 unsigned RegSize = RegisterVT.getSizeInBits();
4543 unsigned NumSignBits = LOI.NumSignBits;
4544 unsigned NumZeroBits = LOI.KnownZero.countLeadingOnes();
4546 // FIXME: We capture more information than the dag can represent. For
4547 // now, just use the tightest assertzext/assertsext possible.
4549 MVT FromVT(MVT::Other);
4550 if (NumSignBits == RegSize)
4551 isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1
4552 else if (NumZeroBits >= RegSize-1)
4553 isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1
4554 else if (NumSignBits > RegSize-8)
4555 isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8
4556 else if (NumZeroBits >= RegSize-8)
4557 isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8
4558 else if (NumSignBits > RegSize-16)
4559 isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16
4560 else if (NumZeroBits >= RegSize-16)
4561 isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
4562 else if (NumSignBits > RegSize-32)
4563 isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32
4564 else if (NumZeroBits >= RegSize-32)
4565 isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
4567 if (FromVT != MVT::Other) {
4568 P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
4569 RegisterVT, P, DAG.getValueType(FromVT));
4578 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
4579 NumRegs, RegisterVT, ValueVT);
4584 return DAG.getNode(ISD::MERGE_VALUES, dl,
4585 DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
4586 &Values[0], ValueVTs.size());
4589 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
4590 /// specified value into the registers specified by this object. This uses
4591 /// Chain/Flag as the input and updates them for the output Chain/Flag.
4592 /// If the Flag pointer is NULL, no flag is used.
4593 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
4594 SDValue &Chain, SDValue *Flag) const {
4595 // Get the list of the values's legal parts.
4596 unsigned NumRegs = Regs.size();
4597 SmallVector<SDValue, 8> Parts(NumRegs);
4598 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4599 MVT ValueVT = ValueVTs[Value];
4600 unsigned NumParts = TLI->getNumRegisters(ValueVT);
4601 MVT RegisterVT = RegVTs[Value];
4603 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
4604 &Parts[Part], NumParts, RegisterVT);
4608 // Copy the parts into the registers.
4609 SmallVector<SDValue, 8> Chains(NumRegs);
4610 for (unsigned i = 0; i != NumRegs; ++i) {
4613 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
4615 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
4616 *Flag = Part.getValue(1);
4618 Chains[i] = Part.getValue(0);
4621 if (NumRegs == 1 || Flag)
4622 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
4623 // flagged to it. That is the CopyToReg nodes and the user are considered
4624 // a single scheduling unit. If we create a TokenFactor and return it as
4625 // chain, then the TokenFactor is both a predecessor (operand) of the
4626 // user as well as a successor (the TF operands are flagged to the user).
4627 // c1, f1 = CopyToReg
4628 // c2, f2 = CopyToReg
4629 // c3 = TokenFactor c1, c2
4632 Chain = Chains[NumRegs-1];
4634 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs);
4637 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
4638 /// operand list. This adds the code marker and includes the number of
4639 /// values added into it.
4640 void RegsForValue::AddInlineAsmOperands(unsigned Code,
4641 bool HasMatching,unsigned MatchingIdx,
4643 std::vector<SDValue> &Ops) const {
4644 MVT IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy();
4645 assert(Regs.size() < (1 << 13) && "Too many inline asm outputs!");
4646 unsigned Flag = Code | (Regs.size() << 3);
4648 Flag |= 0x80000000 | (MatchingIdx << 16);
4649 Ops.push_back(DAG.getTargetConstant(Flag, IntPtrTy));
4650 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
4651 unsigned NumRegs = TLI->getNumRegisters(ValueVTs[Value]);
4652 MVT RegisterVT = RegVTs[Value];
4653 for (unsigned i = 0; i != NumRegs; ++i) {
4654 assert(Reg < Regs.size() && "Mismatch in # registers expected");
4655 Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT));
4660 /// isAllocatableRegister - If the specified register is safe to allocate,
4661 /// i.e. it isn't a stack pointer or some other special register, return the
4662 /// register class for the register. Otherwise, return null.
4663 static const TargetRegisterClass *
4664 isAllocatableRegister(unsigned Reg, MachineFunction &MF,
4665 const TargetLowering &TLI,
4666 const TargetRegisterInfo *TRI) {
4667 MVT FoundVT = MVT::Other;
4668 const TargetRegisterClass *FoundRC = 0;
4669 for (TargetRegisterInfo::regclass_iterator RCI = TRI->regclass_begin(),
4670 E = TRI->regclass_end(); RCI != E; ++RCI) {
4671 MVT ThisVT = MVT::Other;
4673 const TargetRegisterClass *RC = *RCI;
4674 // If none of the the value types for this register class are valid, we
4675 // can't use it. For example, 64-bit reg classes on 32-bit targets.
4676 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
4678 if (TLI.isTypeLegal(*I)) {
4679 // If we have already found this register in a different register class,
4680 // choose the one with the largest VT specified. For example, on
4681 // PowerPC, we favor f64 register classes over f32.
4682 if (FoundVT == MVT::Other || FoundVT.bitsLT(*I)) {
4689 if (ThisVT == MVT::Other) continue;
4691 // NOTE: This isn't ideal. In particular, this might allocate the
4692 // frame pointer in functions that need it (due to them not being taken
4693 // out of allocation, because a variable sized allocation hasn't been seen
4694 // yet). This is a slight code pessimization, but should still work.
4695 for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
4696 E = RC->allocation_order_end(MF); I != E; ++I)
4698 // We found a matching register class. Keep looking at others in case
4699 // we find one with larger registers that this physreg is also in.
4710 /// AsmOperandInfo - This contains information for each constraint that we are
4712 class VISIBILITY_HIDDEN SDISelAsmOperandInfo :
4713 public TargetLowering::AsmOperandInfo {
4715 /// CallOperand - If this is the result output operand or a clobber
4716 /// this is null, otherwise it is the incoming operand to the CallInst.
4717 /// This gets modified as the asm is processed.
4718 SDValue CallOperand;
4720 /// AssignedRegs - If this is a register or register class operand, this
4721 /// contains the set of register corresponding to the operand.
4722 RegsForValue AssignedRegs;
4724 explicit SDISelAsmOperandInfo(const InlineAsm::ConstraintInfo &info)
4725 : TargetLowering::AsmOperandInfo(info), CallOperand(0,0) {
4728 /// MarkAllocatedRegs - Once AssignedRegs is set, mark the assigned registers
4729 /// busy in OutputRegs/InputRegs.
4730 void MarkAllocatedRegs(bool isOutReg, bool isInReg,
4731 std::set<unsigned> &OutputRegs,
4732 std::set<unsigned> &InputRegs,
4733 const TargetRegisterInfo &TRI) const {
4735 for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4736 MarkRegAndAliases(AssignedRegs.Regs[i], OutputRegs, TRI);
4739 for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4740 MarkRegAndAliases(AssignedRegs.Regs[i], InputRegs, TRI);
4744 /// getCallOperandValMVT - Return the MVT of the Value* that this operand
4745 /// corresponds to. If there is no Value* for this operand, it returns
4747 MVT getCallOperandValMVT(const TargetLowering &TLI,
4748 const TargetData *TD) const {
4749 if (CallOperandVal == 0) return MVT::Other;
4751 if (isa<BasicBlock>(CallOperandVal))
4752 return TLI.getPointerTy();
4754 const llvm::Type *OpTy = CallOperandVal->getType();
4756 // If this is an indirect operand, the operand is a pointer to the
4759 OpTy = cast<PointerType>(OpTy)->getElementType();
4761 // If OpTy is not a single value, it may be a struct/union that we
4762 // can tile with integers.
4763 if (!OpTy->isSingleValueType() && OpTy->isSized()) {
4764 unsigned BitSize = TD->getTypeSizeInBits(OpTy);
4773 OpTy = IntegerType::get(BitSize);
4778 return TLI.getValueType(OpTy, true);
4782 /// MarkRegAndAliases - Mark the specified register and all aliases in the
4784 static void MarkRegAndAliases(unsigned Reg, std::set<unsigned> &Regs,
4785 const TargetRegisterInfo &TRI) {
4786 assert(TargetRegisterInfo::isPhysicalRegister(Reg) && "Isn't a physreg");
4788 if (const unsigned *Aliases = TRI.getAliasSet(Reg))
4789 for (; *Aliases; ++Aliases)
4790 Regs.insert(*Aliases);
4793 } // end llvm namespace.
4796 /// GetRegistersForValue - Assign registers (virtual or physical) for the
4797 /// specified operand. We prefer to assign virtual registers, to allow the
4798 /// register allocator handle the assignment process. However, if the asm uses
4799 /// features that we can't model on machineinstrs, we have SDISel do the
4800 /// allocation. This produces generally horrible, but correct, code.
4802 /// OpInfo describes the operand.
4803 /// Input and OutputRegs are the set of already allocated physical registers.
4805 void SelectionDAGLowering::
4806 GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
4807 std::set<unsigned> &OutputRegs,
4808 std::set<unsigned> &InputRegs) {
4809 // Compute whether this value requires an input register, an output register,
4811 bool isOutReg = false;
4812 bool isInReg = false;
4813 switch (OpInfo.Type) {
4814 case InlineAsm::isOutput:
4817 // If there is an input constraint that matches this, we need to reserve
4818 // the input register so no other inputs allocate to it.
4819 isInReg = OpInfo.hasMatchingInput();
4821 case InlineAsm::isInput:
4825 case InlineAsm::isClobber:
4832 MachineFunction &MF = DAG.getMachineFunction();
4833 SmallVector<unsigned, 4> Regs;
4835 // If this is a constraint for a single physreg, or a constraint for a
4836 // register class, find it.
4837 std::pair<unsigned, const TargetRegisterClass*> PhysReg =
4838 TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
4839 OpInfo.ConstraintVT);
4841 unsigned NumRegs = 1;
4842 if (OpInfo.ConstraintVT != MVT::Other) {
4843 // If this is a FP input in an integer register (or visa versa) insert a bit
4844 // cast of the input value. More generally, handle any case where the input
4845 // value disagrees with the register class we plan to stick this in.
4846 if (OpInfo.Type == InlineAsm::isInput &&
4847 PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) {
4848 // Try to convert to the first MVT that the reg class contains. If the
4849 // types are identical size, use a bitcast to convert (e.g. two differing
4851 MVT RegVT = *PhysReg.second->vt_begin();
4852 if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
4853 OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
4854 RegVT, OpInfo.CallOperand);
4855 OpInfo.ConstraintVT = RegVT;
4856 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
4857 // If the input is a FP value and we want it in FP registers, do a
4858 // bitcast to the corresponding integer type. This turns an f64 value
4859 // into i64, which can be passed with two i32 values on a 32-bit
4861 RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
4862 OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
4863 RegVT, OpInfo.CallOperand);
4864 OpInfo.ConstraintVT = RegVT;
4868 NumRegs = TLI.getNumRegisters(OpInfo.ConstraintVT);
4872 MVT ValueVT = OpInfo.ConstraintVT;
4874 // If this is a constraint for a specific physical register, like {r17},
4876 if (unsigned AssignedReg = PhysReg.first) {
4877 const TargetRegisterClass *RC = PhysReg.second;
4878 if (OpInfo.ConstraintVT == MVT::Other)
4879 ValueVT = *RC->vt_begin();
4881 // Get the actual register value type. This is important, because the user
4882 // may have asked for (e.g.) the AX register in i32 type. We need to
4883 // remember that AX is actually i16 to get the right extension.
4884 RegVT = *RC->vt_begin();
4886 // This is a explicit reference to a physical register.
4887 Regs.push_back(AssignedReg);
4889 // If this is an expanded reference, add the rest of the regs to Regs.
4891 TargetRegisterClass::iterator I = RC->begin();
4892 for (; *I != AssignedReg; ++I)
4893 assert(I != RC->end() && "Didn't find reg!");
4895 // Already added the first reg.
4897 for (; NumRegs; --NumRegs, ++I) {
4898 assert(I != RC->end() && "Ran out of registers to allocate!");
4902 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
4903 const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
4904 OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
4908 // Otherwise, if this was a reference to an LLVM register class, create vregs
4909 // for this reference.
4910 if (const TargetRegisterClass *RC = PhysReg.second) {
4911 RegVT = *RC->vt_begin();
4912 if (OpInfo.ConstraintVT == MVT::Other)
4915 // Create the appropriate number of virtual registers.
4916 MachineRegisterInfo &RegInfo = MF.getRegInfo();
4917 for (; NumRegs; --NumRegs)
4918 Regs.push_back(RegInfo.createVirtualRegister(RC));
4920 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
4924 // This is a reference to a register class that doesn't directly correspond
4925 // to an LLVM register class. Allocate NumRegs consecutive, available,
4926 // registers from the class.
4927 std::vector<unsigned> RegClassRegs
4928 = TLI.getRegClassForInlineAsmConstraint(OpInfo.ConstraintCode,
4929 OpInfo.ConstraintVT);
4931 const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
4932 unsigned NumAllocated = 0;
4933 for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
4934 unsigned Reg = RegClassRegs[i];
4935 // See if this register is available.
4936 if ((isOutReg && OutputRegs.count(Reg)) || // Already used.
4937 (isInReg && InputRegs.count(Reg))) { // Already used.
4938 // Make sure we find consecutive registers.
4943 // Check to see if this register is allocatable (i.e. don't give out the
4945 const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, TRI);
4946 if (!RC) { // Couldn't allocate this register.
4947 // Reset NumAllocated to make sure we return consecutive registers.
4952 // Okay, this register is good, we can use it.
4955 // If we allocated enough consecutive registers, succeed.
4956 if (NumAllocated == NumRegs) {
4957 unsigned RegStart = (i-NumAllocated)+1;
4958 unsigned RegEnd = i+1;
4959 // Mark all of the allocated registers used.
4960 for (unsigned i = RegStart; i != RegEnd; ++i)
4961 Regs.push_back(RegClassRegs[i]);
4963 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, *RC->vt_begin(),
4964 OpInfo.ConstraintVT);
4965 OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
4970 // Otherwise, we couldn't allocate enough registers for this.
4973 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
4974 /// processed uses a memory 'm' constraint.
4976 hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos,
4977 const TargetLowering &TLI) {
4978 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
4979 InlineAsm::ConstraintInfo &CI = CInfos[i];
4980 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
4981 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
4982 if (CType == TargetLowering::C_Memory)
4986 // Indirect operand accesses access memory.
4994 /// visitInlineAsm - Handle a call to an InlineAsm object.
4996 void SelectionDAGLowering::visitInlineAsm(CallSite CS) {
4997 InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
4999 /// ConstraintOperands - Information about all of the constraints.
5000 std::vector<SDISelAsmOperandInfo> ConstraintOperands;
5002 std::set<unsigned> OutputRegs, InputRegs;
5004 // Do a prepass over the constraints, canonicalizing them, and building up the
5005 // ConstraintOperands list.
5006 std::vector<InlineAsm::ConstraintInfo>
5007 ConstraintInfos = IA->ParseConstraints();
5009 bool hasMemory = hasInlineAsmMemConstraint(ConstraintInfos, TLI);
5011 SDValue Chain, Flag;
5013 // We won't need to flush pending loads if this asm doesn't touch
5014 // memory and is nonvolatile.
5015 if (hasMemory || IA->hasSideEffects())
5018 Chain = DAG.getRoot();
5020 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
5021 unsigned ResNo = 0; // ResNo - The result number of the next output.
5022 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5023 ConstraintOperands.push_back(SDISelAsmOperandInfo(ConstraintInfos[i]));
5024 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
5026 MVT OpVT = MVT::Other;
5028 // Compute the value type for each operand.
5029 switch (OpInfo.Type) {
5030 case InlineAsm::isOutput:
5031 // Indirect outputs just consume an argument.
5032 if (OpInfo.isIndirect) {
5033 OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5037 // The return value of the call is this value. As such, there is no
5038 // corresponding argument.
5039 assert(CS.getType() != Type::VoidTy && "Bad inline asm!");
5040 if (const StructType *STy = dyn_cast<StructType>(CS.getType())) {
5041 OpVT = TLI.getValueType(STy->getElementType(ResNo));
5043 assert(ResNo == 0 && "Asm only has one result!");
5044 OpVT = TLI.getValueType(CS.getType());
5048 case InlineAsm::isInput:
5049 OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5051 case InlineAsm::isClobber:
5056 // If this is an input or an indirect output, process the call argument.
5057 // BasicBlocks are labels, currently appearing only in asm's.
5058 if (OpInfo.CallOperandVal) {
5059 if (BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
5060 OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
5062 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
5065 OpVT = OpInfo.getCallOperandValMVT(TLI, TD);
5068 OpInfo.ConstraintVT = OpVT;
5071 // Second pass over the constraints: compute which constraint option to use
5072 // and assign registers to constraints that want a specific physreg.
5073 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5074 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5076 // If this is an output operand with a matching input operand, look up the
5077 // matching input. If their types mismatch, e.g. one is an integer, the
5078 // other is floating point, or their sizes are different, flag it as an
5080 if (OpInfo.hasMatchingInput()) {
5081 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
5082 if (OpInfo.ConstraintVT != Input.ConstraintVT) {
5083 if ((OpInfo.ConstraintVT.isInteger() !=
5084 Input.ConstraintVT.isInteger()) ||
5085 (OpInfo.ConstraintVT.getSizeInBits() !=
5086 Input.ConstraintVT.getSizeInBits())) {
5087 cerr << "llvm: error: Unsupported asm: input constraint with a "
5088 << "matching output constraint of incompatible type!\n";
5091 Input.ConstraintVT = OpInfo.ConstraintVT;
5095 // Compute the constraint code and ConstraintType to use.
5096 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, hasMemory, &DAG);
5098 // If this is a memory input, and if the operand is not indirect, do what we
5099 // need to to provide an address for the memory input.
5100 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
5101 !OpInfo.isIndirect) {
5102 assert(OpInfo.Type == InlineAsm::isInput &&
5103 "Can only indirectify direct input operands!");
5105 // Memory operands really want the address of the value. If we don't have
5106 // an indirect input, put it in the constpool if we can, otherwise spill
5107 // it to a stack slot.
5109 // If the operand is a float, integer, or vector constant, spill to a
5110 // constant pool entry to get its address.
5111 Value *OpVal = OpInfo.CallOperandVal;
5112 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
5113 isa<ConstantVector>(OpVal)) {
5114 OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
5115 TLI.getPointerTy());
5117 // Otherwise, create a stack slot and emit a store to it before the
5119 const Type *Ty = OpVal->getType();
5120 uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
5121 unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty);
5122 MachineFunction &MF = DAG.getMachineFunction();
5123 int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align);
5124 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
5125 Chain = DAG.getStore(Chain, getCurDebugLoc(),
5126 OpInfo.CallOperand, StackSlot, NULL, 0);
5127 OpInfo.CallOperand = StackSlot;
5130 // There is no longer a Value* corresponding to this operand.
5131 OpInfo.CallOperandVal = 0;
5132 // It is now an indirect operand.
5133 OpInfo.isIndirect = true;
5136 // If this constraint is for a specific register, allocate it before
5138 if (OpInfo.ConstraintType == TargetLowering::C_Register)
5139 GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5141 ConstraintInfos.clear();
5144 // Second pass - Loop over all of the operands, assigning virtual or physregs
5145 // to register class operands.
5146 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5147 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5149 // C_Register operands have already been allocated, Other/Memory don't need
5151 if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
5152 GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5155 // AsmNodeOperands - The operands for the ISD::INLINEASM node.
5156 std::vector<SDValue> AsmNodeOperands;
5157 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
5158 AsmNodeOperands.push_back(
5159 DAG.getTargetExternalSymbol(IA->getAsmString().c_str(), MVT::Other));
5162 // Loop over all of the inputs, copying the operand values into the
5163 // appropriate registers and processing the output regs.
5164 RegsForValue RetValRegs;
5166 // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
5167 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
5169 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5170 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5172 switch (OpInfo.Type) {
5173 case InlineAsm::isOutput: {
5174 if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
5175 OpInfo.ConstraintType != TargetLowering::C_Register) {
5176 // Memory output, or 'other' output (e.g. 'X' constraint).
5177 assert(OpInfo.isIndirect && "Memory output must be indirect operand");
5179 // Add information to the INLINEASM node to know about this output.
5180 unsigned ResOpType = 4/*MEM*/ | (1<<3);
5181 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5182 TLI.getPointerTy()));
5183 AsmNodeOperands.push_back(OpInfo.CallOperand);
5187 // Otherwise, this is a register or register class output.
5189 // Copy the output from the appropriate register. Find a register that
5191 if (OpInfo.AssignedRegs.Regs.empty()) {
5192 cerr << "llvm: error: Couldn't allocate output reg for constraint '"
5193 << OpInfo.ConstraintCode << "'!\n";
5197 // If this is an indirect operand, store through the pointer after the
5199 if (OpInfo.isIndirect) {
5200 IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
5201 OpInfo.CallOperandVal));
5203 // This is the result value of the call.
5204 assert(CS.getType() != Type::VoidTy && "Bad inline asm!");
5205 // Concatenate this output onto the outputs list.
5206 RetValRegs.append(OpInfo.AssignedRegs);
5209 // Add information to the INLINEASM node to know that this register is
5211 OpInfo.AssignedRegs.AddInlineAsmOperands(OpInfo.isEarlyClobber ?
5212 6 /* EARLYCLOBBER REGDEF */ :
5216 DAG, AsmNodeOperands);
5219 case InlineAsm::isInput: {
5220 SDValue InOperandVal = OpInfo.CallOperand;
5222 if (OpInfo.isMatchingInputConstraint()) { // Matching constraint?
5223 // If this is required to match an output register we have already set,
5224 // just use its register.
5225 unsigned OperandNo = OpInfo.getMatchedOperand();
5227 // Scan until we find the definition we already emitted of this operand.
5228 // When we find it, create a RegsForValue operand.
5229 unsigned CurOp = 2; // The first operand.
5230 for (; OperandNo; --OperandNo) {
5231 // Advance to the next operand.
5233 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5234 assert(((OpFlag & 7) == 2 /*REGDEF*/ ||
5235 (OpFlag & 7) == 6 /*EARLYCLOBBER REGDEF*/ ||
5236 (OpFlag & 7) == 4 /*MEM*/) &&
5237 "Skipped past definitions?");
5238 CurOp += InlineAsm::getNumOperandRegisters(OpFlag)+1;
5242 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5243 if ((OpFlag & 7) == 2 /*REGDEF*/
5244 || (OpFlag & 7) == 6 /* EARLYCLOBBER REGDEF */) {
5245 // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
5246 if (OpInfo.isIndirect) {
5247 cerr << "llvm: error: "
5248 "Don't know how to handle tied indirect "
5249 "register inputs yet!\n";
5252 RegsForValue MatchedRegs;
5253 MatchedRegs.TLI = &TLI;
5254 MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
5255 MVT RegVT = AsmNodeOperands[CurOp+1].getValueType();
5256 MatchedRegs.RegVTs.push_back(RegVT);
5257 MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
5258 for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag);
5261 push_back(RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT)));
5263 // Use the produced MatchedRegs object to
5264 MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
5266 MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/,
5267 true, OpInfo.getMatchedOperand(),
5268 DAG, AsmNodeOperands);
5271 assert(((OpFlag & 7) == 4) && "Unknown matching constraint!");
5272 assert((InlineAsm::getNumOperandRegisters(OpFlag)) == 1 &&
5273 "Unexpected number of operands");
5274 // Add information to the INLINEASM node to know about this input.
5275 // See InlineAsm.h isUseOperandTiedToDef.
5276 OpFlag |= 0x80000000 | (OpInfo.getMatchedOperand() << 16);
5277 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag,
5278 TLI.getPointerTy()));
5279 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
5284 if (OpInfo.ConstraintType == TargetLowering::C_Other) {
5285 assert(!OpInfo.isIndirect &&
5286 "Don't know how to handle indirect other inputs yet!");
5288 std::vector<SDValue> Ops;
5289 TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode[0],
5290 hasMemory, Ops, DAG);
5292 cerr << "llvm: error: Invalid operand for inline asm constraint '"
5293 << OpInfo.ConstraintCode << "'!\n";
5297 // Add information to the INLINEASM node to know about this input.
5298 unsigned ResOpType = 3 /*IMM*/ | (Ops.size() << 3);
5299 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5300 TLI.getPointerTy()));
5301 AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
5303 } else if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
5304 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
5305 assert(InOperandVal.getValueType() == TLI.getPointerTy() &&
5306 "Memory operands expect pointer values");
5308 // Add information to the INLINEASM node to know about this input.
5309 unsigned ResOpType = 4/*MEM*/ | (1<<3);
5310 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5311 TLI.getPointerTy()));
5312 AsmNodeOperands.push_back(InOperandVal);
5316 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
5317 OpInfo.ConstraintType == TargetLowering::C_Register) &&
5318 "Unknown constraint type!");
5319 assert(!OpInfo.isIndirect &&
5320 "Don't know how to handle indirect register inputs yet!");
5322 // Copy the input into the appropriate registers.
5323 if (OpInfo.AssignedRegs.Regs.empty()) {
5324 cerr << "llvm: error: Couldn't allocate output reg for constraint '"
5325 << OpInfo.ConstraintCode << "'!\n";
5329 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
5332 OpInfo.AssignedRegs.AddInlineAsmOperands(1/*REGUSE*/, false, 0,
5333 DAG, AsmNodeOperands);
5336 case InlineAsm::isClobber: {
5337 // Add the clobbered value to the operand list, so that the register
5338 // allocator is aware that the physreg got clobbered.
5339 if (!OpInfo.AssignedRegs.Regs.empty())
5340 OpInfo.AssignedRegs.AddInlineAsmOperands(6 /* EARLYCLOBBER REGDEF */,
5341 false, 0, DAG,AsmNodeOperands);
5347 // Finish up input operands.
5348 AsmNodeOperands[0] = Chain;
5349 if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
5351 Chain = DAG.getNode(ISD::INLINEASM, getCurDebugLoc(),
5352 DAG.getVTList(MVT::Other, MVT::Flag),
5353 &AsmNodeOperands[0], AsmNodeOperands.size());
5354 Flag = Chain.getValue(1);
5356 // If this asm returns a register value, copy the result from that register
5357 // and set it as the value of the call.
5358 if (!RetValRegs.Regs.empty()) {
5359 SDValue Val = RetValRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
5362 // FIXME: Why don't we do this for inline asms with MRVs?
5363 if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
5364 MVT ResultType = TLI.getValueType(CS.getType());
5366 // If any of the results of the inline asm is a vector, it may have the
5367 // wrong width/num elts. This can happen for register classes that can
5368 // contain multiple different value types. The preg or vreg allocated may
5369 // not have the same VT as was expected. Convert it to the right type
5370 // with bit_convert.
5371 if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
5372 Val = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
5375 } else if (ResultType != Val.getValueType() &&
5376 ResultType.isInteger() && Val.getValueType().isInteger()) {
5377 // If a result value was tied to an input value, the computed result may
5378 // have a wider width than the expected result. Extract the relevant
5380 Val = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), ResultType, Val);
5383 assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
5386 setValue(CS.getInstruction(), Val);
5387 // Don't need to use this as a chain in this case.
5388 if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
5392 std::vector<std::pair<SDValue, Value*> > StoresToEmit;
5394 // Process indirect outputs, first output all of the flagged copies out of
5396 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
5397 RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
5398 Value *Ptr = IndirectStoresToEmit[i].second;
5399 SDValue OutVal = OutRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
5401 StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
5405 // Emit the non-flagged stores from the physregs.
5406 SmallVector<SDValue, 8> OutChains;
5407 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i)
5408 OutChains.push_back(DAG.getStore(Chain, getCurDebugLoc(),
5409 StoresToEmit[i].first,
5410 getValue(StoresToEmit[i].second),
5411 StoresToEmit[i].second, 0));
5412 if (!OutChains.empty())
5413 Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
5414 &OutChains[0], OutChains.size());
5419 void SelectionDAGLowering::visitMalloc(MallocInst &I) {
5420 SDValue Src = getValue(I.getOperand(0));
5422 // Scale up by the type size in the original i32 type width. Various
5423 // mid-level optimizers may make assumptions about demanded bits etc from the
5424 // i32-ness of the optimizer: we do not want to promote to i64 and then
5425 // multiply on 64-bit targets.
5426 // FIXME: Malloc inst should go away: PR715.
5427 uint64_t ElementSize = TD->getTypeAllocSize(I.getType()->getElementType());
5428 if (ElementSize != 1)
5429 Src = DAG.getNode(ISD::MUL, getCurDebugLoc(), Src.getValueType(),
5430 Src, DAG.getConstant(ElementSize, Src.getValueType()));
5432 MVT IntPtr = TLI.getPointerTy();
5434 if (IntPtr.bitsLT(Src.getValueType()))
5435 Src = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), IntPtr, Src);
5436 else if (IntPtr.bitsGT(Src.getValueType()))
5437 Src = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), IntPtr, Src);
5439 TargetLowering::ArgListTy Args;
5440 TargetLowering::ArgListEntry Entry;
5442 Entry.Ty = TLI.getTargetData()->getIntPtrType();
5443 Args.push_back(Entry);
5445 std::pair<SDValue,SDValue> Result =
5446 TLI.LowerCallTo(getRoot(), I.getType(), false, false, false, false,
5447 0, CallingConv::C, PerformTailCallOpt,
5448 DAG.getExternalSymbol("malloc", IntPtr),
5449 Args, DAG, getCurDebugLoc());
5450 setValue(&I, Result.first); // Pointers always fit in registers
5451 DAG.setRoot(Result.second);
5454 void SelectionDAGLowering::visitFree(FreeInst &I) {
5455 TargetLowering::ArgListTy Args;
5456 TargetLowering::ArgListEntry Entry;
5457 Entry.Node = getValue(I.getOperand(0));
5458 Entry.Ty = TLI.getTargetData()->getIntPtrType();
5459 Args.push_back(Entry);
5460 MVT IntPtr = TLI.getPointerTy();
5461 std::pair<SDValue,SDValue> Result =
5462 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, false, false, false,
5463 0, CallingConv::C, PerformTailCallOpt,
5464 DAG.getExternalSymbol("free", IntPtr), Args, DAG,
5466 DAG.setRoot(Result.second);
5469 void SelectionDAGLowering::visitVAStart(CallInst &I) {
5470 DAG.setRoot(DAG.getNode(ISD::VASTART, getCurDebugLoc(),
5471 MVT::Other, getRoot(),
5472 getValue(I.getOperand(1)),
5473 DAG.getSrcValue(I.getOperand(1))));
5476 void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
5477 SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurDebugLoc(),
5478 getRoot(), getValue(I.getOperand(0)),
5479 DAG.getSrcValue(I.getOperand(0)));
5481 DAG.setRoot(V.getValue(1));
5484 void SelectionDAGLowering::visitVAEnd(CallInst &I) {
5485 DAG.setRoot(DAG.getNode(ISD::VAEND, getCurDebugLoc(),
5486 MVT::Other, getRoot(),
5487 getValue(I.getOperand(1)),
5488 DAG.getSrcValue(I.getOperand(1))));
5491 void SelectionDAGLowering::visitVACopy(CallInst &I) {
5492 DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurDebugLoc(),
5493 MVT::Other, getRoot(),
5494 getValue(I.getOperand(1)),
5495 getValue(I.getOperand(2)),
5496 DAG.getSrcValue(I.getOperand(1)),
5497 DAG.getSrcValue(I.getOperand(2))));
5500 /// TargetLowering::LowerArguments - This is the default LowerArguments
5501 /// implementation, which just inserts a FORMAL_ARGUMENTS node. FIXME: When all
5502 /// targets are migrated to using FORMAL_ARGUMENTS, this hook should be
5503 /// integrated into SDISel.
5504 void TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG,
5505 SmallVectorImpl<SDValue> &ArgValues,
5507 // Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node.
5508 SmallVector<SDValue, 3+16> Ops;
5509 Ops.push_back(DAG.getRoot());
5510 Ops.push_back(DAG.getConstant(F.getCallingConv(), getPointerTy()));
5511 Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy()));
5513 // Add one result value for each formal argument.
5514 SmallVector<MVT, 16> RetVals;
5516 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
5518 SmallVector<MVT, 4> ValueVTs;
5519 ComputeValueVTs(*this, I->getType(), ValueVTs);
5520 for (unsigned Value = 0, NumValues = ValueVTs.size();
5521 Value != NumValues; ++Value) {
5522 MVT VT = ValueVTs[Value];
5523 const Type *ArgTy = VT.getTypeForMVT(*DAG.getContext());
5524 ISD::ArgFlagsTy Flags;
5525 unsigned OriginalAlignment =
5526 getTargetData()->getABITypeAlignment(ArgTy);
5528 if (F.paramHasAttr(j, Attribute::ZExt))
5530 if (F.paramHasAttr(j, Attribute::SExt))
5532 if (F.paramHasAttr(j, Attribute::InReg))
5534 if (F.paramHasAttr(j, Attribute::StructRet))
5536 if (F.paramHasAttr(j, Attribute::ByVal)) {
5538 const PointerType *Ty = cast<PointerType>(I->getType());
5539 const Type *ElementTy = Ty->getElementType();
5540 unsigned FrameAlign = getByValTypeAlignment(ElementTy);
5541 unsigned FrameSize = getTargetData()->getTypeAllocSize(ElementTy);
5542 // For ByVal, alignment should be passed from FE. BE will guess if
5543 // this info is not there but there are cases it cannot get right.
5544 if (F.getParamAlignment(j))
5545 FrameAlign = F.getParamAlignment(j);
5546 Flags.setByValAlign(FrameAlign);
5547 Flags.setByValSize(FrameSize);
5549 if (F.paramHasAttr(j, Attribute::Nest))
5551 Flags.setOrigAlign(OriginalAlignment);
5553 MVT RegisterVT = getRegisterType(VT);
5554 unsigned NumRegs = getNumRegisters(VT);
5555 for (unsigned i = 0; i != NumRegs; ++i) {
5556 RetVals.push_back(RegisterVT);
5557 ISD::ArgFlagsTy MyFlags = Flags;
5558 if (NumRegs > 1 && i == 0)
5560 // if it isn't first piece, alignment must be 1
5562 MyFlags.setOrigAlign(1);
5563 Ops.push_back(DAG.getArgFlags(MyFlags));
5568 RetVals.push_back(MVT::Other);
5571 SDNode *Result = DAG.getNode(ISD::FORMAL_ARGUMENTS, dl,
5572 DAG.getVTList(&RetVals[0], RetVals.size()),
5573 &Ops[0], Ops.size()).getNode();
5575 // Prelower FORMAL_ARGUMENTS. This isn't required for functionality, but
5576 // allows exposing the loads that may be part of the argument access to the
5577 // first DAGCombiner pass.
5578 SDValue TmpRes = LowerOperation(SDValue(Result, 0), DAG);
5580 // The number of results should match up, except that the lowered one may have
5581 // an extra flag result.
5582 assert((Result->getNumValues() == TmpRes.getNode()->getNumValues() ||
5583 (Result->getNumValues()+1 == TmpRes.getNode()->getNumValues() &&
5584 TmpRes.getValue(Result->getNumValues()).getValueType() == MVT::Flag))
5585 && "Lowering produced unexpected number of results!");
5587 // The FORMAL_ARGUMENTS node itself is likely no longer needed.
5588 if (Result != TmpRes.getNode() && Result->use_empty()) {
5589 HandleSDNode Dummy(DAG.getRoot());
5590 DAG.RemoveDeadNode(Result);
5593 Result = TmpRes.getNode();
5595 unsigned NumArgRegs = Result->getNumValues() - 1;
5596 DAG.setRoot(SDValue(Result, NumArgRegs));
5598 // Set up the return result vector.
5601 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
5603 SmallVector<MVT, 4> ValueVTs;
5604 ComputeValueVTs(*this, I->getType(), ValueVTs);
5605 for (unsigned Value = 0, NumValues = ValueVTs.size();
5606 Value != NumValues; ++Value) {
5607 MVT VT = ValueVTs[Value];
5608 MVT PartVT = getRegisterType(VT);
5610 unsigned NumParts = getNumRegisters(VT);
5611 SmallVector<SDValue, 4> Parts(NumParts);
5612 for (unsigned j = 0; j != NumParts; ++j)
5613 Parts[j] = SDValue(Result, i++);
5615 ISD::NodeType AssertOp = ISD::DELETED_NODE;
5616 if (F.paramHasAttr(Idx, Attribute::SExt))
5617 AssertOp = ISD::AssertSext;
5618 else if (F.paramHasAttr(Idx, Attribute::ZExt))
5619 AssertOp = ISD::AssertZext;
5621 ArgValues.push_back(getCopyFromParts(DAG, dl, &Parts[0], NumParts,
5622 PartVT, VT, AssertOp));
5625 assert(i == NumArgRegs && "Argument register count mismatch!");
5629 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
5630 /// implementation, which just inserts an ISD::CALL node, which is later custom
5631 /// lowered by the target to something concrete. FIXME: When all targets are
5632 /// migrated to using ISD::CALL, this hook should be integrated into SDISel.
5633 std::pair<SDValue, SDValue>
5634 TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
5635 bool RetSExt, bool RetZExt, bool isVarArg,
5636 bool isInreg, unsigned NumFixedArgs,
5637 unsigned CallingConv, bool isTailCall,
5639 ArgListTy &Args, SelectionDAG &DAG, DebugLoc dl) {
5640 assert((!isTailCall || PerformTailCallOpt) &&
5641 "isTailCall set when tail-call optimizations are disabled!");
5643 SmallVector<SDValue, 32> Ops;
5644 Ops.push_back(Chain); // Op#0 - Chain
5645 Ops.push_back(Callee);
5647 // Handle all of the outgoing arguments.
5648 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
5649 SmallVector<MVT, 4> ValueVTs;
5650 ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
5651 for (unsigned Value = 0, NumValues = ValueVTs.size();
5652 Value != NumValues; ++Value) {
5653 MVT VT = ValueVTs[Value];
5654 const Type *ArgTy = VT.getTypeForMVT(*DAG.getContext());
5655 SDValue Op = SDValue(Args[i].Node.getNode(),
5656 Args[i].Node.getResNo() + Value);
5657 ISD::ArgFlagsTy Flags;
5658 unsigned OriginalAlignment =
5659 getTargetData()->getABITypeAlignment(ArgTy);
5665 if (Args[i].isInReg)
5669 if (Args[i].isByVal) {
5671 const PointerType *Ty = cast<PointerType>(Args[i].Ty);
5672 const Type *ElementTy = Ty->getElementType();
5673 unsigned FrameAlign = getByValTypeAlignment(ElementTy);
5674 unsigned FrameSize = getTargetData()->getTypeAllocSize(ElementTy);
5675 // For ByVal, alignment should come from FE. BE will guess if this
5676 // info is not there but there are cases it cannot get right.
5677 if (Args[i].Alignment)
5678 FrameAlign = Args[i].Alignment;
5679 Flags.setByValAlign(FrameAlign);
5680 Flags.setByValSize(FrameSize);
5684 Flags.setOrigAlign(OriginalAlignment);
5686 MVT PartVT = getRegisterType(VT);
5687 unsigned NumParts = getNumRegisters(VT);
5688 SmallVector<SDValue, 4> Parts(NumParts);
5689 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
5692 ExtendKind = ISD::SIGN_EXTEND;
5693 else if (Args[i].isZExt)
5694 ExtendKind = ISD::ZERO_EXTEND;
5696 getCopyToParts(DAG, dl, Op, &Parts[0], NumParts, PartVT, ExtendKind);
5698 for (unsigned i = 0; i != NumParts; ++i) {
5699 // if it isn't first piece, alignment must be 1
5700 ISD::ArgFlagsTy MyFlags = Flags;
5701 if (NumParts > 1 && i == 0)
5704 MyFlags.setOrigAlign(1);
5706 Ops.push_back(Parts[i]);
5707 Ops.push_back(DAG.getArgFlags(MyFlags));
5712 // Figure out the result value types. We start by making a list of
5713 // the potentially illegal return value types.
5714 SmallVector<MVT, 4> LoweredRetTys;
5715 SmallVector<MVT, 4> RetTys;
5716 ComputeValueVTs(*this, RetTy, RetTys);
5718 // Then we translate that to a list of legal types.
5719 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5721 MVT RegisterVT = getRegisterType(VT);
5722 unsigned NumRegs = getNumRegisters(VT);
5723 for (unsigned i = 0; i != NumRegs; ++i)
5724 LoweredRetTys.push_back(RegisterVT);
5727 LoweredRetTys.push_back(MVT::Other); // Always has a chain.
5729 // Create the CALL node.
5730 SDValue Res = DAG.getCall(CallingConv, dl,
5731 isVarArg, isTailCall, isInreg,
5732 DAG.getVTList(&LoweredRetTys[0],
5733 LoweredRetTys.size()),
5734 &Ops[0], Ops.size(), NumFixedArgs
5736 Chain = Res.getValue(LoweredRetTys.size() - 1);
5738 // Gather up the call result into a single value.
5739 if (RetTy != Type::VoidTy && !RetTys.empty()) {
5740 ISD::NodeType AssertOp = ISD::DELETED_NODE;
5743 AssertOp = ISD::AssertSext;
5745 AssertOp = ISD::AssertZext;
5747 SmallVector<SDValue, 4> ReturnValues;
5749 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5751 MVT RegisterVT = getRegisterType(VT);
5752 unsigned NumRegs = getNumRegisters(VT);
5753 unsigned RegNoEnd = NumRegs + RegNo;
5754 SmallVector<SDValue, 4> Results;
5755 for (; RegNo != RegNoEnd; ++RegNo)
5756 Results.push_back(Res.getValue(RegNo));
5757 SDValue ReturnValue =
5758 getCopyFromParts(DAG, dl, &Results[0], NumRegs, RegisterVT, VT,
5760 ReturnValues.push_back(ReturnValue);
5762 Res = DAG.getNode(ISD::MERGE_VALUES, dl,
5763 DAG.getVTList(&RetTys[0], RetTys.size()),
5764 &ReturnValues[0], ReturnValues.size());
5767 return std::make_pair(Res, Chain);
5770 void TargetLowering::LowerOperationWrapper(SDNode *N,
5771 SmallVectorImpl<SDValue> &Results,
5772 SelectionDAG &DAG) {
5773 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
5775 Results.push_back(Res);
5778 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
5779 assert(0 && "LowerOperation not implemented for this target!");
5785 void SelectionDAGLowering::CopyValueToVirtualRegister(Value *V, unsigned Reg) {
5786 SDValue Op = getValue(V);
5787 assert((Op.getOpcode() != ISD::CopyFromReg ||
5788 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
5789 "Copy from a reg to the same reg!");
5790 assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
5792 RegsForValue RFV(TLI, Reg, V->getType());
5793 SDValue Chain = DAG.getEntryNode();
5794 RFV.getCopyToRegs(Op, DAG, getCurDebugLoc(), Chain, 0);
5795 PendingExports.push_back(Chain);
5798 #include "llvm/CodeGen/SelectionDAGISel.h"
5800 void SelectionDAGISel::
5801 LowerArguments(BasicBlock *LLVMBB) {
5802 // If this is the entry block, emit arguments.
5803 Function &F = *LLVMBB->getParent();
5804 SDValue OldRoot = SDL->DAG.getRoot();
5805 SmallVector<SDValue, 16> Args;
5806 TLI.LowerArguments(F, SDL->DAG, Args, SDL->getCurDebugLoc());
5809 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
5811 SmallVector<MVT, 4> ValueVTs;
5812 ComputeValueVTs(TLI, AI->getType(), ValueVTs);
5813 unsigned NumValues = ValueVTs.size();
5814 if (!AI->use_empty()) {
5815 SDL->setValue(AI, SDL->DAG.getMergeValues(&Args[a], NumValues,
5816 SDL->getCurDebugLoc()));
5817 // If this argument is live outside of the entry block, insert a copy from
5818 // whereever we got it to the vreg that other BB's will reference it as.
5819 SDL->CopyToExportRegsIfNeeded(AI);
5824 // Finally, if the target has anything special to do, allow it to do so.
5825 // FIXME: this should insert code into the DAG!
5826 EmitFunctionEntryCode(F, SDL->DAG.getMachineFunction());
5829 /// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
5830 /// ensure constants are generated when needed. Remember the virtual registers
5831 /// that need to be added to the Machine PHI nodes as input. We cannot just
5832 /// directly add them, because expansion might result in multiple MBB's for one
5833 /// BB. As such, the start of the BB might correspond to a different MBB than
5837 SelectionDAGISel::HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB) {
5838 TerminatorInst *TI = LLVMBB->getTerminator();
5840 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
5842 // Check successor nodes' PHI nodes that expect a constant to be available
5844 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
5845 BasicBlock *SuccBB = TI->getSuccessor(succ);
5846 if (!isa<PHINode>(SuccBB->begin())) continue;
5847 MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
5849 // If this terminator has multiple identical successors (common for
5850 // switches), only handle each succ once.
5851 if (!SuccsHandled.insert(SuccMBB)) continue;
5853 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
5856 // At this point we know that there is a 1-1 correspondence between LLVM PHI
5857 // nodes and Machine PHI nodes, but the incoming operands have not been
5859 for (BasicBlock::iterator I = SuccBB->begin();
5860 (PN = dyn_cast<PHINode>(I)); ++I) {
5861 // Ignore dead phi's.
5862 if (PN->use_empty()) continue;
5865 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
5867 if (Constant *C = dyn_cast<Constant>(PHIOp)) {
5868 unsigned &RegOut = SDL->ConstantsOut[C];
5870 RegOut = FuncInfo->CreateRegForValue(C);
5871 SDL->CopyValueToVirtualRegister(C, RegOut);
5875 Reg = FuncInfo->ValueMap[PHIOp];
5877 assert(isa<AllocaInst>(PHIOp) &&
5878 FuncInfo->StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
5879 "Didn't codegen value into a register!??");
5880 Reg = FuncInfo->CreateRegForValue(PHIOp);
5881 SDL->CopyValueToVirtualRegister(PHIOp, Reg);
5885 // Remember that this register needs to added to the machine PHI node as
5886 // the input for this MBB.
5887 SmallVector<MVT, 4> ValueVTs;
5888 ComputeValueVTs(TLI, PN->getType(), ValueVTs);
5889 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
5890 MVT VT = ValueVTs[vti];
5891 unsigned NumRegisters = TLI.getNumRegisters(VT);
5892 for (unsigned i = 0, e = NumRegisters; i != e; ++i)
5893 SDL->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
5894 Reg += NumRegisters;
5898 SDL->ConstantsOut.clear();
5901 /// This is the Fast-ISel version of HandlePHINodesInSuccessorBlocks. It only
5902 /// supports legal types, and it emits MachineInstrs directly instead of
5903 /// creating SelectionDAG nodes.
5906 SelectionDAGISel::HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB,
5908 TerminatorInst *TI = LLVMBB->getTerminator();
5910 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
5911 unsigned OrigNumPHINodesToUpdate = SDL->PHINodesToUpdate.size();
5913 // Check successor nodes' PHI nodes that expect a constant to be available
5915 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
5916 BasicBlock *SuccBB = TI->getSuccessor(succ);
5917 if (!isa<PHINode>(SuccBB->begin())) continue;
5918 MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
5920 // If this terminator has multiple identical successors (common for
5921 // switches), only handle each succ once.
5922 if (!SuccsHandled.insert(SuccMBB)) continue;
5924 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
5927 // At this point we know that there is a 1-1 correspondence between LLVM PHI
5928 // nodes and Machine PHI nodes, but the incoming operands have not been
5930 for (BasicBlock::iterator I = SuccBB->begin();
5931 (PN = dyn_cast<PHINode>(I)); ++I) {
5932 // Ignore dead phi's.
5933 if (PN->use_empty()) continue;
5935 // Only handle legal types. Two interesting things to note here. First,
5936 // by bailing out early, we may leave behind some dead instructions,
5937 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
5938 // own moves. Second, this check is necessary becuase FastISel doesn't
5939 // use CreateRegForValue to create registers, so it always creates
5940 // exactly one register for each non-void instruction.
5941 MVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
5942 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
5945 VT = TLI.getTypeToTransformTo(VT);
5947 SDL->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
5952 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
5954 unsigned Reg = F->getRegForValue(PHIOp);
5956 SDL->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
5959 SDL->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));