1 //===-- SelectionDAGBuild.cpp - Selection-DAG building --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements routines for translating from LLVM IR into SelectionDAG IR.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "isel"
15 #include "SelectionDAGBuild.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Constants.h"
20 #include "llvm/CallingConv.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/Function.h"
23 #include "llvm/GlobalVariable.h"
24 #include "llvm/InlineAsm.h"
25 #include "llvm/Instructions.h"
26 #include "llvm/Intrinsics.h"
27 #include "llvm/IntrinsicInst.h"
28 #include "llvm/Module.h"
29 #include "llvm/CodeGen/FastISel.h"
30 #include "llvm/CodeGen/GCStrategy.h"
31 #include "llvm/CodeGen/GCMetadata.h"
32 #include "llvm/CodeGen/MachineFunction.h"
33 #include "llvm/CodeGen/MachineFrameInfo.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineJumpTableInfo.h"
36 #include "llvm/CodeGen/MachineModuleInfo.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/PseudoSourceValue.h"
39 #include "llvm/CodeGen/SelectionDAG.h"
40 #include "llvm/Target/TargetRegisterInfo.h"
41 #include "llvm/Target/TargetData.h"
42 #include "llvm/Target/TargetFrameInfo.h"
43 #include "llvm/Target/TargetInstrInfo.h"
44 #include "llvm/Target/TargetLowering.h"
45 #include "llvm/Target/TargetMachine.h"
46 #include "llvm/Target/TargetOptions.h"
47 #include "llvm/Support/Compiler.h"
48 #include "llvm/Support/Debug.h"
49 #include "llvm/Support/MathExtras.h"
53 /// LimitFloatPrecision - Generate low-precision inline sequences for
54 /// some float libcalls (6, 8 or 12 bits).
55 static unsigned LimitFloatPrecision;
57 static cl::opt<unsigned, true>
58 LimitFPPrecision("limit-float-precision",
59 cl::desc("Generate low-precision inline sequences "
60 "for some float libcalls"),
61 cl::location(LimitFloatPrecision),
64 /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
65 /// insertvalue or extractvalue indices that identify a member, return
66 /// the linearized index of the start of the member.
68 static unsigned ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
69 const unsigned *Indices,
70 const unsigned *IndicesEnd,
71 unsigned CurIndex = 0) {
72 // Base case: We're done.
73 if (Indices && Indices == IndicesEnd)
76 // Given a struct type, recursively traverse the elements.
77 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
78 for (StructType::element_iterator EB = STy->element_begin(),
80 EE = STy->element_end();
82 if (Indices && *Indices == unsigned(EI - EB))
83 return ComputeLinearIndex(TLI, *EI, Indices+1, IndicesEnd, CurIndex);
84 CurIndex = ComputeLinearIndex(TLI, *EI, 0, 0, CurIndex);
87 // Given an array type, recursively traverse the elements.
88 else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
89 const Type *EltTy = ATy->getElementType();
90 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
91 if (Indices && *Indices == i)
92 return ComputeLinearIndex(TLI, EltTy, Indices+1, IndicesEnd, CurIndex);
93 CurIndex = ComputeLinearIndex(TLI, EltTy, 0, 0, CurIndex);
96 // We haven't found the type we're looking for, so keep searching.
100 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
101 /// MVTs that represent all the individual underlying
102 /// non-aggregate types that comprise it.
104 /// If Offsets is non-null, it points to a vector to be filled in
105 /// with the in-memory offsets of each of the individual values.
107 static void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
108 SmallVectorImpl<MVT> &ValueVTs,
109 SmallVectorImpl<uint64_t> *Offsets = 0,
110 uint64_t StartingOffset = 0) {
111 // Given a struct type, recursively traverse the elements.
112 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
113 const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
114 for (StructType::element_iterator EB = STy->element_begin(),
116 EE = STy->element_end();
118 ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
119 StartingOffset + SL->getElementOffset(EI - EB));
122 // Given an array type, recursively traverse the elements.
123 if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
124 const Type *EltTy = ATy->getElementType();
125 uint64_t EltSize = TLI.getTargetData()->getABITypeSize(EltTy);
126 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
127 ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
128 StartingOffset + i * EltSize);
131 // Base case: we can get an MVT for this LLVM IR type.
132 ValueVTs.push_back(TLI.getValueType(Ty));
134 Offsets->push_back(StartingOffset);
138 /// RegsForValue - This struct represents the registers (physical or virtual)
139 /// that a particular set of values is assigned, and the type information about
140 /// the value. The most common situation is to represent one value at a time,
141 /// but struct or array values are handled element-wise as multiple values.
142 /// The splitting of aggregates is performed recursively, so that we never
143 /// have aggregate-typed registers. The values at this point do not necessarily
144 /// have legal types, so each value may require one or more registers of some
147 struct VISIBILITY_HIDDEN RegsForValue {
148 /// TLI - The TargetLowering object.
150 const TargetLowering *TLI;
152 /// ValueVTs - The value types of the values, which may not be legal, and
153 /// may need be promoted or synthesized from one or more registers.
155 SmallVector<MVT, 4> ValueVTs;
157 /// RegVTs - The value types of the registers. This is the same size as
158 /// ValueVTs and it records, for each value, what the type of the assigned
159 /// register or registers are. (Individual values are never synthesized
160 /// from more than one type of register.)
162 /// With virtual registers, the contents of RegVTs is redundant with TLI's
163 /// getRegisterType member function, however when with physical registers
164 /// it is necessary to have a separate record of the types.
166 SmallVector<MVT, 4> RegVTs;
168 /// Regs - This list holds the registers assigned to the values.
169 /// Each legal or promoted value requires one register, and each
170 /// expanded value requires multiple registers.
172 SmallVector<unsigned, 4> Regs;
174 RegsForValue() : TLI(0) {}
176 RegsForValue(const TargetLowering &tli,
177 const SmallVector<unsigned, 4> ®s,
178 MVT regvt, MVT valuevt)
179 : TLI(&tli), ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
180 RegsForValue(const TargetLowering &tli,
181 const SmallVector<unsigned, 4> ®s,
182 const SmallVector<MVT, 4> ®vts,
183 const SmallVector<MVT, 4> &valuevts)
184 : TLI(&tli), ValueVTs(valuevts), RegVTs(regvts), Regs(regs) {}
185 RegsForValue(const TargetLowering &tli,
186 unsigned Reg, const Type *Ty) : TLI(&tli) {
187 ComputeValueVTs(tli, Ty, ValueVTs);
189 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
190 MVT ValueVT = ValueVTs[Value];
191 unsigned NumRegs = TLI->getNumRegisters(ValueVT);
192 MVT RegisterVT = TLI->getRegisterType(ValueVT);
193 for (unsigned i = 0; i != NumRegs; ++i)
194 Regs.push_back(Reg + i);
195 RegVTs.push_back(RegisterVT);
200 /// append - Add the specified values to this one.
201 void append(const RegsForValue &RHS) {
203 ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
204 RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
205 Regs.append(RHS.Regs.begin(), RHS.Regs.end());
209 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
210 /// this value and returns the result as a ValueVTs value. This uses
211 /// Chain/Flag as the input and updates them for the output Chain/Flag.
212 /// If the Flag pointer is NULL, no flag is used.
213 SDValue getCopyFromRegs(SelectionDAG &DAG,
214 SDValue &Chain, SDValue *Flag) const;
216 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
217 /// specified value into the registers specified by this object. This uses
218 /// Chain/Flag as the input and updates them for the output Chain/Flag.
219 /// If the Flag pointer is NULL, no flag is used.
220 void getCopyToRegs(SDValue Val, SelectionDAG &DAG,
221 SDValue &Chain, SDValue *Flag) const;
223 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
224 /// operand list. This adds the code marker and includes the number of
225 /// values added into it.
226 void AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG,
227 std::vector<SDValue> &Ops) const;
231 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
232 /// PHI nodes or outside of the basic block that defines it, or used by a
233 /// switch or atomic instruction, which may expand to multiple basic blocks.
234 static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
235 if (isa<PHINode>(I)) return true;
236 BasicBlock *BB = I->getParent();
237 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
238 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI) ||
239 // FIXME: Remove switchinst special case.
240 isa<SwitchInst>(*UI))
245 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
246 /// entry block, return true. This includes arguments used by switches, since
247 /// the switch may expand into multiple basic blocks.
248 static bool isOnlyUsedInEntryBlock(Argument *A, bool EnableFastISel) {
249 // With FastISel active, we may be splitting blocks, so force creation
250 // of virtual registers for all non-dead arguments.
251 // Don't force virtual registers for byval arguments though, because
252 // fast-isel can't handle those in all cases.
253 if (EnableFastISel && !A->hasByValAttr())
254 return A->use_empty();
256 BasicBlock *Entry = A->getParent()->begin();
257 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
258 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI))
259 return false; // Use not in entry block.
263 FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli)
267 void FunctionLoweringInfo::set(Function &fn, MachineFunction &mf,
268 bool EnableFastISel) {
271 RegInfo = &MF->getRegInfo();
273 // Create a vreg for each argument register that is not dead and is used
274 // outside of the entry block for the function.
275 for (Function::arg_iterator AI = Fn->arg_begin(), E = Fn->arg_end();
277 if (!isOnlyUsedInEntryBlock(AI, EnableFastISel))
278 InitializeRegForValue(AI);
280 // Initialize the mapping of values to registers. This is only set up for
281 // instruction values that are used outside of the block that defines
283 Function::iterator BB = Fn->begin(), EB = Fn->end();
284 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
285 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
286 if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
287 const Type *Ty = AI->getAllocatedType();
288 uint64_t TySize = TLI.getTargetData()->getABITypeSize(Ty);
290 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
293 TySize *= CUI->getZExtValue(); // Get total allocated size.
294 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
295 StaticAllocaMap[AI] =
296 MF->getFrameInfo()->CreateStackObject(TySize, Align);
299 for (; BB != EB; ++BB)
300 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
301 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
302 if (!isa<AllocaInst>(I) ||
303 !StaticAllocaMap.count(cast<AllocaInst>(I)))
304 InitializeRegForValue(I);
306 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
307 // also creates the initial PHI MachineInstrs, though none of the input
308 // operands are populated.
309 for (BB = Fn->begin(), EB = Fn->end(); BB != EB; ++BB) {
310 MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(BB);
314 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
317 for (BasicBlock::iterator I = BB->begin();(PN = dyn_cast<PHINode>(I)); ++I){
318 if (PN->use_empty()) continue;
320 unsigned PHIReg = ValueMap[PN];
321 assert(PHIReg && "PHI node does not have an assigned virtual register!");
323 SmallVector<MVT, 4> ValueVTs;
324 ComputeValueVTs(TLI, PN->getType(), ValueVTs);
325 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
326 MVT VT = ValueVTs[vti];
327 unsigned NumRegisters = TLI.getNumRegisters(VT);
328 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
329 for (unsigned i = 0; i != NumRegisters; ++i)
330 BuildMI(MBB, TII->get(TargetInstrInfo::PHI), PHIReg+i);
331 PHIReg += NumRegisters;
337 unsigned FunctionLoweringInfo::MakeReg(MVT VT) {
338 return RegInfo->createVirtualRegister(TLI.getRegClassFor(VT));
341 /// CreateRegForValue - Allocate the appropriate number of virtual registers of
342 /// the correctly promoted or expanded types. Assign these registers
343 /// consecutive vreg numbers and return the first assigned number.
345 /// In the case that the given value has struct or array type, this function
346 /// will assign registers for each member or element.
348 unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
349 SmallVector<MVT, 4> ValueVTs;
350 ComputeValueVTs(TLI, V->getType(), ValueVTs);
352 unsigned FirstReg = 0;
353 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
354 MVT ValueVT = ValueVTs[Value];
355 MVT RegisterVT = TLI.getRegisterType(ValueVT);
357 unsigned NumRegs = TLI.getNumRegisters(ValueVT);
358 for (unsigned i = 0; i != NumRegs; ++i) {
359 unsigned R = MakeReg(RegisterVT);
360 if (!FirstReg) FirstReg = R;
366 /// getCopyFromParts - Create a value that contains the specified legal parts
367 /// combined into the value they represent. If the parts combine to a type
368 /// larger then ValueVT then AssertOp can be used to specify whether the extra
369 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
370 /// (ISD::AssertSext).
371 static SDValue getCopyFromParts(SelectionDAG &DAG,
372 const SDValue *Parts,
376 ISD::NodeType AssertOp = ISD::DELETED_NODE) {
377 assert(NumParts > 0 && "No parts to assemble!");
378 TargetLowering &TLI = DAG.getTargetLoweringInfo();
379 SDValue Val = Parts[0];
382 // Assemble the value from multiple parts.
383 if (!ValueVT.isVector()) {
384 unsigned PartBits = PartVT.getSizeInBits();
385 unsigned ValueBits = ValueVT.getSizeInBits();
387 // Assemble the power of 2 part.
388 unsigned RoundParts = NumParts & (NumParts - 1) ?
389 1 << Log2_32(NumParts) : NumParts;
390 unsigned RoundBits = PartBits * RoundParts;
391 MVT RoundVT = RoundBits == ValueBits ?
392 ValueVT : MVT::getIntegerVT(RoundBits);
395 MVT HalfVT = ValueVT.isInteger() ?
396 MVT::getIntegerVT(RoundBits/2) :
397 MVT::getFloatingPointVT(RoundBits/2);
399 if (RoundParts > 2) {
400 Lo = getCopyFromParts(DAG, Parts, RoundParts/2, PartVT, HalfVT);
401 Hi = getCopyFromParts(DAG, Parts+RoundParts/2, RoundParts/2,
404 Lo = DAG.getNode(ISD::BIT_CONVERT, HalfVT, Parts[0]);
405 Hi = DAG.getNode(ISD::BIT_CONVERT, HalfVT, Parts[1]);
407 if (TLI.isBigEndian())
409 Val = DAG.getNode(ISD::BUILD_PAIR, RoundVT, Lo, Hi);
411 if (RoundParts < NumParts) {
412 // Assemble the trailing non-power-of-2 part.
413 unsigned OddParts = NumParts - RoundParts;
414 MVT OddVT = MVT::getIntegerVT(OddParts * PartBits);
415 Hi = getCopyFromParts(DAG, Parts+RoundParts, OddParts, PartVT, OddVT);
417 // Combine the round and odd parts.
419 if (TLI.isBigEndian())
421 MVT TotalVT = MVT::getIntegerVT(NumParts * PartBits);
422 Hi = DAG.getNode(ISD::ANY_EXTEND, TotalVT, Hi);
423 Hi = DAG.getNode(ISD::SHL, TotalVT, Hi,
424 DAG.getConstant(Lo.getValueType().getSizeInBits(),
425 TLI.getShiftAmountTy()));
426 Lo = DAG.getNode(ISD::ZERO_EXTEND, TotalVT, Lo);
427 Val = DAG.getNode(ISD::OR, TotalVT, Lo, Hi);
430 // Handle a multi-element vector.
431 MVT IntermediateVT, RegisterVT;
432 unsigned NumIntermediates;
434 TLI.getVectorTypeBreakdown(ValueVT, IntermediateVT, NumIntermediates,
436 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
437 NumParts = NumRegs; // Silence a compiler warning.
438 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
439 assert(RegisterVT == Parts[0].getValueType() &&
440 "Part type doesn't match part!");
442 // Assemble the parts into intermediate operands.
443 SmallVector<SDValue, 8> Ops(NumIntermediates);
444 if (NumIntermediates == NumParts) {
445 // If the register was not expanded, truncate or copy the value,
447 for (unsigned i = 0; i != NumParts; ++i)
448 Ops[i] = getCopyFromParts(DAG, &Parts[i], 1,
449 PartVT, IntermediateVT);
450 } else if (NumParts > 0) {
451 // If the intermediate type was expanded, build the intermediate operands
453 assert(NumParts % NumIntermediates == 0 &&
454 "Must expand into a divisible number of parts!");
455 unsigned Factor = NumParts / NumIntermediates;
456 for (unsigned i = 0; i != NumIntermediates; ++i)
457 Ops[i] = getCopyFromParts(DAG, &Parts[i * Factor], Factor,
458 PartVT, IntermediateVT);
461 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the intermediate
463 Val = DAG.getNode(IntermediateVT.isVector() ?
464 ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR,
465 ValueVT, &Ops[0], NumIntermediates);
469 // There is now one part, held in Val. Correct it to match ValueVT.
470 PartVT = Val.getValueType();
472 if (PartVT == ValueVT)
475 if (PartVT.isVector()) {
476 assert(ValueVT.isVector() && "Unknown vector conversion!");
477 return DAG.getNode(ISD::BIT_CONVERT, ValueVT, Val);
480 if (ValueVT.isVector()) {
481 assert(ValueVT.getVectorElementType() == PartVT &&
482 ValueVT.getVectorNumElements() == 1 &&
483 "Only trivial scalar-to-vector conversions should get here!");
484 return DAG.getNode(ISD::BUILD_VECTOR, ValueVT, Val);
487 if (PartVT.isInteger() &&
488 ValueVT.isInteger()) {
489 if (ValueVT.bitsLT(PartVT)) {
490 // For a truncate, see if we have any information to
491 // indicate whether the truncated bits will always be
492 // zero or sign-extension.
493 if (AssertOp != ISD::DELETED_NODE)
494 Val = DAG.getNode(AssertOp, PartVT, Val,
495 DAG.getValueType(ValueVT));
496 return DAG.getNode(ISD::TRUNCATE, ValueVT, Val);
498 return DAG.getNode(ISD::ANY_EXTEND, ValueVT, Val);
502 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
503 if (ValueVT.bitsLT(Val.getValueType()))
504 // FP_ROUND's are always exact here.
505 return DAG.getNode(ISD::FP_ROUND, ValueVT, Val,
506 DAG.getIntPtrConstant(1));
507 return DAG.getNode(ISD::FP_EXTEND, ValueVT, Val);
510 if (PartVT.getSizeInBits() == ValueVT.getSizeInBits())
511 return DAG.getNode(ISD::BIT_CONVERT, ValueVT, Val);
513 assert(0 && "Unknown mismatch!");
517 /// getCopyToParts - Create a series of nodes that contain the specified value
518 /// split into legal parts. If the parts contain more bits than Val, then, for
519 /// integers, ExtendKind can be used to specify how to generate the extra bits.
520 static void getCopyToParts(SelectionDAG &DAG, SDValue Val,
521 SDValue *Parts, unsigned NumParts, MVT PartVT,
522 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
523 TargetLowering &TLI = DAG.getTargetLoweringInfo();
524 MVT PtrVT = TLI.getPointerTy();
525 MVT ValueVT = Val.getValueType();
526 unsigned PartBits = PartVT.getSizeInBits();
527 assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
532 if (!ValueVT.isVector()) {
533 if (PartVT == ValueVT) {
534 assert(NumParts == 1 && "No-op copy with multiple parts!");
539 if (NumParts * PartBits > ValueVT.getSizeInBits()) {
540 // If the parts cover more bits than the value has, promote the value.
541 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
542 assert(NumParts == 1 && "Do not know what to promote to!");
543 Val = DAG.getNode(ISD::FP_EXTEND, PartVT, Val);
544 } else if (PartVT.isInteger() && ValueVT.isInteger()) {
545 ValueVT = MVT::getIntegerVT(NumParts * PartBits);
546 Val = DAG.getNode(ExtendKind, ValueVT, Val);
548 assert(0 && "Unknown mismatch!");
550 } else if (PartBits == ValueVT.getSizeInBits()) {
551 // Different types of the same size.
552 assert(NumParts == 1 && PartVT != ValueVT);
553 Val = DAG.getNode(ISD::BIT_CONVERT, PartVT, Val);
554 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
555 // If the parts cover less bits than value has, truncate the value.
556 if (PartVT.isInteger() && ValueVT.isInteger()) {
557 ValueVT = MVT::getIntegerVT(NumParts * PartBits);
558 Val = DAG.getNode(ISD::TRUNCATE, ValueVT, Val);
560 assert(0 && "Unknown mismatch!");
564 // The value may have changed - recompute ValueVT.
565 ValueVT = Val.getValueType();
566 assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
567 "Failed to tile the value with PartVT!");
570 assert(PartVT == ValueVT && "Type conversion failed!");
575 // Expand the value into multiple parts.
576 if (NumParts & (NumParts - 1)) {
577 // The number of parts is not a power of 2. Split off and copy the tail.
578 assert(PartVT.isInteger() && ValueVT.isInteger() &&
579 "Do not know what to expand to!");
580 unsigned RoundParts = 1 << Log2_32(NumParts);
581 unsigned RoundBits = RoundParts * PartBits;
582 unsigned OddParts = NumParts - RoundParts;
583 SDValue OddVal = DAG.getNode(ISD::SRL, ValueVT, Val,
584 DAG.getConstant(RoundBits,
585 TLI.getShiftAmountTy()));
586 getCopyToParts(DAG, OddVal, Parts + RoundParts, OddParts, PartVT);
587 if (TLI.isBigEndian())
588 // The odd parts were reversed by getCopyToParts - unreverse them.
589 std::reverse(Parts + RoundParts, Parts + NumParts);
590 NumParts = RoundParts;
591 ValueVT = MVT::getIntegerVT(NumParts * PartBits);
592 Val = DAG.getNode(ISD::TRUNCATE, ValueVT, Val);
595 // The number of parts is a power of 2. Repeatedly bisect the value using
597 Parts[0] = DAG.getNode(ISD::BIT_CONVERT,
598 MVT::getIntegerVT(ValueVT.getSizeInBits()),
600 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
601 for (unsigned i = 0; i < NumParts; i += StepSize) {
602 unsigned ThisBits = StepSize * PartBits / 2;
603 MVT ThisVT = MVT::getIntegerVT (ThisBits);
604 SDValue &Part0 = Parts[i];
605 SDValue &Part1 = Parts[i+StepSize/2];
607 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, ThisVT, Part0,
608 DAG.getConstant(1, PtrVT));
609 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, ThisVT, Part0,
610 DAG.getConstant(0, PtrVT));
612 if (ThisBits == PartBits && ThisVT != PartVT) {
613 Part0 = DAG.getNode(ISD::BIT_CONVERT, PartVT, Part0);
614 Part1 = DAG.getNode(ISD::BIT_CONVERT, PartVT, Part1);
619 if (TLI.isBigEndian())
620 std::reverse(Parts, Parts + NumParts);
627 if (PartVT != ValueVT) {
628 if (PartVT.isVector()) {
629 Val = DAG.getNode(ISD::BIT_CONVERT, PartVT, Val);
631 assert(ValueVT.getVectorElementType() == PartVT &&
632 ValueVT.getVectorNumElements() == 1 &&
633 "Only trivial vector-to-scalar conversions should get here!");
634 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, PartVT, Val,
635 DAG.getConstant(0, PtrVT));
643 // Handle a multi-element vector.
644 MVT IntermediateVT, RegisterVT;
645 unsigned NumIntermediates;
647 DAG.getTargetLoweringInfo()
648 .getVectorTypeBreakdown(ValueVT, IntermediateVT, NumIntermediates,
650 unsigned NumElements = ValueVT.getVectorNumElements();
652 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
653 NumParts = NumRegs; // Silence a compiler warning.
654 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
656 // Split the vector into intermediate operands.
657 SmallVector<SDValue, 8> Ops(NumIntermediates);
658 for (unsigned i = 0; i != NumIntermediates; ++i)
659 if (IntermediateVT.isVector())
660 Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR,
662 DAG.getConstant(i * (NumElements / NumIntermediates),
665 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT,
667 DAG.getConstant(i, PtrVT));
669 // Split the intermediate operands into legal parts.
670 if (NumParts == NumIntermediates) {
671 // If the register was not expanded, promote or copy the value,
673 for (unsigned i = 0; i != NumParts; ++i)
674 getCopyToParts(DAG, Ops[i], &Parts[i], 1, PartVT);
675 } else if (NumParts > 0) {
676 // If the intermediate type was expanded, split each the value into
678 assert(NumParts % NumIntermediates == 0 &&
679 "Must expand into a divisible number of parts!");
680 unsigned Factor = NumParts / NumIntermediates;
681 for (unsigned i = 0; i != NumIntermediates; ++i)
682 getCopyToParts(DAG, Ops[i], &Parts[i * Factor], Factor, PartVT);
687 void SelectionDAGLowering::init(GCFunctionInfo *gfi, AliasAnalysis &aa) {
690 TD = DAG.getTarget().getTargetData();
693 /// clear - Clear out the curret SelectionDAG and the associated
694 /// state and prepare this SelectionDAGLowering object to be used
695 /// for a new block. This doesn't clear out information about
696 /// additional blocks that are needed to complete switch lowering
697 /// or PHI node updating; that information is cleared out as it is
699 void SelectionDAGLowering::clear() {
701 PendingLoads.clear();
702 PendingExports.clear();
706 /// getRoot - Return the current virtual root of the Selection DAG,
707 /// flushing any PendingLoad items. This must be done before emitting
708 /// a store or any other node that may need to be ordered after any
709 /// prior load instructions.
711 SDValue SelectionDAGLowering::getRoot() {
712 if (PendingLoads.empty())
713 return DAG.getRoot();
715 if (PendingLoads.size() == 1) {
716 SDValue Root = PendingLoads[0];
718 PendingLoads.clear();
722 // Otherwise, we have to make a token factor node.
723 SDValue Root = DAG.getNode(ISD::TokenFactor, MVT::Other,
724 &PendingLoads[0], PendingLoads.size());
725 PendingLoads.clear();
730 /// getControlRoot - Similar to getRoot, but instead of flushing all the
731 /// PendingLoad items, flush all the PendingExports items. It is necessary
732 /// to do this before emitting a terminator instruction.
734 SDValue SelectionDAGLowering::getControlRoot() {
735 SDValue Root = DAG.getRoot();
737 if (PendingExports.empty())
740 // Turn all of the CopyToReg chains into one factored node.
741 if (Root.getOpcode() != ISD::EntryToken) {
742 unsigned i = 0, e = PendingExports.size();
743 for (; i != e; ++i) {
744 assert(PendingExports[i].getNode()->getNumOperands() > 1);
745 if (PendingExports[i].getNode()->getOperand(0) == Root)
746 break; // Don't add the root if we already indirectly depend on it.
750 PendingExports.push_back(Root);
753 Root = DAG.getNode(ISD::TokenFactor, MVT::Other,
755 PendingExports.size());
756 PendingExports.clear();
761 void SelectionDAGLowering::visit(Instruction &I) {
762 visit(I.getOpcode(), I);
765 void SelectionDAGLowering::visit(unsigned Opcode, User &I) {
766 // Note: this doesn't use InstVisitor, because it has to work with
767 // ConstantExpr's in addition to instructions.
769 default: assert(0 && "Unknown instruction type encountered!");
771 // Build the switch statement using the Instruction.def file.
772 #define HANDLE_INST(NUM, OPCODE, CLASS) \
773 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
774 #include "llvm/Instruction.def"
778 void SelectionDAGLowering::visitAdd(User &I) {
779 if (I.getType()->isFPOrFPVector())
780 visitBinary(I, ISD::FADD);
782 visitBinary(I, ISD::ADD);
785 void SelectionDAGLowering::visitMul(User &I) {
786 if (I.getType()->isFPOrFPVector())
787 visitBinary(I, ISD::FMUL);
789 visitBinary(I, ISD::MUL);
792 SDValue SelectionDAGLowering::getValue(const Value *V) {
793 SDValue &N = NodeMap[V];
794 if (N.getNode()) return N;
796 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
797 MVT VT = TLI.getValueType(V->getType(), true);
799 if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
800 return N = DAG.getConstant(*CI, VT);
802 if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
803 return N = DAG.getGlobalAddress(GV, VT);
805 if (isa<ConstantPointerNull>(C))
806 return N = DAG.getConstant(0, TLI.getPointerTy());
808 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C))
809 return N = DAG.getConstantFP(*CFP, VT);
811 if (isa<UndefValue>(C) && !isa<VectorType>(V->getType()) &&
812 !V->getType()->isAggregateType())
813 return N = DAG.getNode(ISD::UNDEF, VT);
815 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
816 visit(CE->getOpcode(), *CE);
817 SDValue N1 = NodeMap[V];
818 assert(N1.getNode() && "visit didn't populate the ValueMap!");
822 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
823 SmallVector<SDValue, 4> Constants;
824 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
826 SDNode *Val = getValue(*OI).getNode();
827 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
828 Constants.push_back(SDValue(Val, i));
830 return DAG.getMergeValues(&Constants[0], Constants.size());
833 if (isa<StructType>(C->getType()) || isa<ArrayType>(C->getType())) {
834 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
835 "Unknown struct or array constant!");
837 SmallVector<MVT, 4> ValueVTs;
838 ComputeValueVTs(TLI, C->getType(), ValueVTs);
839 unsigned NumElts = ValueVTs.size();
841 return SDValue(); // empty struct
842 SmallVector<SDValue, 4> Constants(NumElts);
843 for (unsigned i = 0; i != NumElts; ++i) {
844 MVT EltVT = ValueVTs[i];
845 if (isa<UndefValue>(C))
846 Constants[i] = DAG.getNode(ISD::UNDEF, EltVT);
847 else if (EltVT.isFloatingPoint())
848 Constants[i] = DAG.getConstantFP(0, EltVT);
850 Constants[i] = DAG.getConstant(0, EltVT);
852 return DAG.getMergeValues(&Constants[0], NumElts);
855 const VectorType *VecTy = cast<VectorType>(V->getType());
856 unsigned NumElements = VecTy->getNumElements();
858 // Now that we know the number and type of the elements, get that number of
859 // elements into the Ops array based on what kind of constant it is.
860 SmallVector<SDValue, 16> Ops;
861 if (ConstantVector *CP = dyn_cast<ConstantVector>(C)) {
862 for (unsigned i = 0; i != NumElements; ++i)
863 Ops.push_back(getValue(CP->getOperand(i)));
865 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
866 "Unknown vector constant!");
867 MVT EltVT = TLI.getValueType(VecTy->getElementType());
870 if (isa<UndefValue>(C))
871 Op = DAG.getNode(ISD::UNDEF, EltVT);
872 else if (EltVT.isFloatingPoint())
873 Op = DAG.getConstantFP(0, EltVT);
875 Op = DAG.getConstant(0, EltVT);
876 Ops.assign(NumElements, Op);
879 // Create a BUILD_VECTOR node.
880 return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, VT, &Ops[0], Ops.size());
883 // If this is a static alloca, generate it as the frameindex instead of
885 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
886 DenseMap<const AllocaInst*, int>::iterator SI =
887 FuncInfo.StaticAllocaMap.find(AI);
888 if (SI != FuncInfo.StaticAllocaMap.end())
889 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
892 unsigned InReg = FuncInfo.ValueMap[V];
893 assert(InReg && "Value not in map!");
895 RegsForValue RFV(TLI, InReg, V->getType());
896 SDValue Chain = DAG.getEntryNode();
897 return RFV.getCopyFromRegs(DAG, Chain, NULL);
901 void SelectionDAGLowering::visitRet(ReturnInst &I) {
902 if (I.getNumOperands() == 0) {
903 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getControlRoot()));
907 SmallVector<SDValue, 8> NewValues;
908 NewValues.push_back(getControlRoot());
909 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
910 SmallVector<MVT, 4> ValueVTs;
911 ComputeValueVTs(TLI, I.getOperand(i)->getType(), ValueVTs);
912 unsigned NumValues = ValueVTs.size();
913 if (NumValues == 0) continue;
915 SDValue RetOp = getValue(I.getOperand(i));
916 for (unsigned j = 0, f = NumValues; j != f; ++j) {
917 MVT VT = ValueVTs[j];
919 // FIXME: C calling convention requires the return type to be promoted to
920 // at least 32-bit. But this is not necessary for non-C calling
922 if (VT.isInteger()) {
923 MVT MinVT = TLI.getRegisterType(MVT::i32);
924 if (VT.bitsLT(MinVT))
928 unsigned NumParts = TLI.getNumRegisters(VT);
929 MVT PartVT = TLI.getRegisterType(VT);
930 SmallVector<SDValue, 4> Parts(NumParts);
931 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
933 const Function *F = I.getParent()->getParent();
934 if (F->paramHasAttr(0, Attribute::SExt))
935 ExtendKind = ISD::SIGN_EXTEND;
936 else if (F->paramHasAttr(0, Attribute::ZExt))
937 ExtendKind = ISD::ZERO_EXTEND;
939 getCopyToParts(DAG, SDValue(RetOp.getNode(), RetOp.getResNo() + j),
940 &Parts[0], NumParts, PartVT, ExtendKind);
942 // 'inreg' on function refers to return value
943 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
944 if (F->paramHasAttr(0, Attribute::InReg))
946 for (unsigned i = 0; i < NumParts; ++i) {
947 NewValues.push_back(Parts[i]);
948 NewValues.push_back(DAG.getArgFlags(Flags));
952 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other,
953 &NewValues[0], NewValues.size()));
956 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
957 /// the current basic block, add it to ValueMap now so that we'll get a
959 void SelectionDAGLowering::ExportFromCurrentBlock(Value *V) {
960 // No need to export constants.
961 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
964 if (FuncInfo.isExportedInst(V)) return;
966 unsigned Reg = FuncInfo.InitializeRegForValue(V);
967 CopyValueToVirtualRegister(V, Reg);
970 bool SelectionDAGLowering::isExportableFromCurrentBlock(Value *V,
971 const BasicBlock *FromBB) {
972 // The operands of the setcc have to be in this block. We don't know
973 // how to export them from some other block.
974 if (Instruction *VI = dyn_cast<Instruction>(V)) {
975 // Can export from current BB.
976 if (VI->getParent() == FromBB)
979 // Is already exported, noop.
980 return FuncInfo.isExportedInst(V);
983 // If this is an argument, we can export it if the BB is the entry block or
984 // if it is already exported.
985 if (isa<Argument>(V)) {
986 if (FromBB == &FromBB->getParent()->getEntryBlock())
989 // Otherwise, can only export this if it is already exported.
990 return FuncInfo.isExportedInst(V);
993 // Otherwise, constants can always be exported.
997 static bool InBlock(const Value *V, const BasicBlock *BB) {
998 if (const Instruction *I = dyn_cast<Instruction>(V))
999 return I->getParent() == BB;
1003 /// getFCmpCondCode - Return the ISD condition code corresponding to
1004 /// the given LLVM IR floating-point condition code. This includes
1005 /// consideration of global floating-point math flags.
1007 static ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred) {
1008 ISD::CondCode FPC, FOC;
1010 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
1011 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
1012 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
1013 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
1014 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
1015 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
1016 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break;
1017 case FCmpInst::FCMP_ORD: FOC = FPC = ISD::SETO; break;
1018 case FCmpInst::FCMP_UNO: FOC = FPC = ISD::SETUO; break;
1019 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
1020 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
1021 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
1022 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break;
1023 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break;
1024 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
1025 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break;
1027 assert(0 && "Invalid FCmp predicate opcode!");
1028 FOC = FPC = ISD::SETFALSE;
1031 if (FiniteOnlyFPMath())
1037 /// getICmpCondCode - Return the ISD condition code corresponding to
1038 /// the given LLVM IR integer condition code.
1040 static ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred) {
1042 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
1043 case ICmpInst::ICMP_NE: return ISD::SETNE;
1044 case ICmpInst::ICMP_SLE: return ISD::SETLE;
1045 case ICmpInst::ICMP_ULE: return ISD::SETULE;
1046 case ICmpInst::ICMP_SGE: return ISD::SETGE;
1047 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
1048 case ICmpInst::ICMP_SLT: return ISD::SETLT;
1049 case ICmpInst::ICMP_ULT: return ISD::SETULT;
1050 case ICmpInst::ICMP_SGT: return ISD::SETGT;
1051 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
1053 assert(0 && "Invalid ICmp predicate opcode!");
1058 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1059 /// This function emits a branch and is used at the leaves of an OR or an
1060 /// AND operator tree.
1063 SelectionDAGLowering::EmitBranchForMergedCondition(Value *Cond,
1064 MachineBasicBlock *TBB,
1065 MachineBasicBlock *FBB,
1066 MachineBasicBlock *CurBB) {
1067 const BasicBlock *BB = CurBB->getBasicBlock();
1069 // If the leaf of the tree is a comparison, merge the condition into
1071 if (CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1072 // The operands of the cmp have to be in this block. We don't know
1073 // how to export them from some other block. If this is the first block
1074 // of the sequence, no exporting is needed.
1075 if (CurBB == CurMBB ||
1076 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1077 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1078 ISD::CondCode Condition;
1079 if (ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1080 Condition = getICmpCondCode(IC->getPredicate());
1081 } else if (FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
1082 Condition = getFCmpCondCode(FC->getPredicate());
1084 Condition = ISD::SETEQ; // silence warning.
1085 assert(0 && "Unknown compare instruction");
1088 CaseBlock CB(Condition, BOp->getOperand(0),
1089 BOp->getOperand(1), NULL, TBB, FBB, CurBB);
1090 SwitchCases.push_back(CB);
1095 // Create a CaseBlock record representing this branch.
1096 CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(),
1097 NULL, TBB, FBB, CurBB);
1098 SwitchCases.push_back(CB);
1101 /// FindMergedConditions - If Cond is an expression like
1102 void SelectionDAGLowering::FindMergedConditions(Value *Cond,
1103 MachineBasicBlock *TBB,
1104 MachineBasicBlock *FBB,
1105 MachineBasicBlock *CurBB,
1107 // If this node is not part of the or/and tree, emit it as a branch.
1108 Instruction *BOp = dyn_cast<Instruction>(Cond);
1109 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1110 (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
1111 BOp->getParent() != CurBB->getBasicBlock() ||
1112 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1113 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1114 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB);
1118 // Create TmpBB after CurBB.
1119 MachineFunction::iterator BBI = CurBB;
1120 MachineFunction &MF = DAG.getMachineFunction();
1121 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1122 CurBB->getParent()->insert(++BBI, TmpBB);
1124 if (Opc == Instruction::Or) {
1125 // Codegen X | Y as:
1133 // Emit the LHS condition.
1134 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, Opc);
1136 // Emit the RHS condition into TmpBB.
1137 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1139 assert(Opc == Instruction::And && "Unknown merge op!");
1140 // Codegen X & Y as:
1147 // This requires creation of TmpBB after CurBB.
1149 // Emit the LHS condition.
1150 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, Opc);
1152 // Emit the RHS condition into TmpBB.
1153 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1157 /// If the set of cases should be emitted as a series of branches, return true.
1158 /// If we should emit this as a bunch of and/or'd together conditions, return
1161 SelectionDAGLowering::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){
1162 if (Cases.size() != 2) return true;
1164 // If this is two comparisons of the same values or'd or and'd together, they
1165 // will get folded into a single comparison, so don't emit two blocks.
1166 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1167 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1168 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1169 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1176 void SelectionDAGLowering::visitBr(BranchInst &I) {
1177 // Update machine-CFG edges.
1178 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1180 // Figure out which block is immediately after the current one.
1181 MachineBasicBlock *NextBlock = 0;
1182 MachineFunction::iterator BBI = CurMBB;
1183 if (++BBI != CurMBB->getParent()->end())
1186 if (I.isUnconditional()) {
1187 // Update machine-CFG edges.
1188 CurMBB->addSuccessor(Succ0MBB);
1190 // If this is not a fall-through branch, emit the branch.
1191 if (Succ0MBB != NextBlock)
1192 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getControlRoot(),
1193 DAG.getBasicBlock(Succ0MBB)));
1197 // If this condition is one of the special cases we handle, do special stuff
1199 Value *CondVal = I.getCondition();
1200 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
1202 // If this is a series of conditions that are or'd or and'd together, emit
1203 // this as a sequence of branches instead of setcc's with and/or operations.
1204 // For example, instead of something like:
1217 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
1218 if (BOp->hasOneUse() &&
1219 (BOp->getOpcode() == Instruction::And ||
1220 BOp->getOpcode() == Instruction::Or)) {
1221 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, CurMBB, BOp->getOpcode());
1222 // If the compares in later blocks need to use values not currently
1223 // exported from this block, export them now. This block should always
1224 // be the first entry.
1225 assert(SwitchCases[0].ThisBB == CurMBB && "Unexpected lowering!");
1227 // Allow some cases to be rejected.
1228 if (ShouldEmitAsBranches(SwitchCases)) {
1229 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
1230 ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
1231 ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
1234 // Emit the branch for this block.
1235 visitSwitchCase(SwitchCases[0]);
1236 SwitchCases.erase(SwitchCases.begin());
1240 // Okay, we decided not to do this, remove any inserted MBB's and clear
1242 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
1243 CurMBB->getParent()->erase(SwitchCases[i].ThisBB);
1245 SwitchCases.clear();
1249 // Create a CaseBlock record representing this branch.
1250 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(),
1251 NULL, Succ0MBB, Succ1MBB, CurMBB);
1252 // Use visitSwitchCase to actually insert the fast branch sequence for this
1254 visitSwitchCase(CB);
1257 /// visitSwitchCase - Emits the necessary code to represent a single node in
1258 /// the binary search tree resulting from lowering a switch instruction.
1259 void SelectionDAGLowering::visitSwitchCase(CaseBlock &CB) {
1261 SDValue CondLHS = getValue(CB.CmpLHS);
1263 // Build the setcc now.
1264 if (CB.CmpMHS == NULL) {
1265 // Fold "(X == true)" to X and "(X == false)" to !X to
1266 // handle common cases produced by branch lowering.
1267 if (CB.CmpRHS == ConstantInt::getTrue() && CB.CC == ISD::SETEQ)
1269 else if (CB.CmpRHS == ConstantInt::getFalse() && CB.CC == ISD::SETEQ) {
1270 SDValue True = DAG.getConstant(1, CondLHS.getValueType());
1271 Cond = DAG.getNode(ISD::XOR, CondLHS.getValueType(), CondLHS, True);
1273 Cond = DAG.getSetCC(MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
1275 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
1277 uint64_t Low = cast<ConstantInt>(CB.CmpLHS)->getSExtValue();
1278 uint64_t High = cast<ConstantInt>(CB.CmpRHS)->getSExtValue();
1280 SDValue CmpOp = getValue(CB.CmpMHS);
1281 MVT VT = CmpOp.getValueType();
1283 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
1284 Cond = DAG.getSetCC(MVT::i1, CmpOp, DAG.getConstant(High, VT), ISD::SETLE);
1286 SDValue SUB = DAG.getNode(ISD::SUB, VT, CmpOp, DAG.getConstant(Low, VT));
1287 Cond = DAG.getSetCC(MVT::i1, SUB,
1288 DAG.getConstant(High-Low, VT), ISD::SETULE);
1292 // Update successor info
1293 CurMBB->addSuccessor(CB.TrueBB);
1294 CurMBB->addSuccessor(CB.FalseBB);
1296 // Set NextBlock to be the MBB immediately after the current one, if any.
1297 // This is used to avoid emitting unnecessary branches to the next block.
1298 MachineBasicBlock *NextBlock = 0;
1299 MachineFunction::iterator BBI = CurMBB;
1300 if (++BBI != CurMBB->getParent()->end())
1303 // If the lhs block is the next block, invert the condition so that we can
1304 // fall through to the lhs instead of the rhs block.
1305 if (CB.TrueBB == NextBlock) {
1306 std::swap(CB.TrueBB, CB.FalseBB);
1307 SDValue True = DAG.getConstant(1, Cond.getValueType());
1308 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
1310 SDValue BrCond = DAG.getNode(ISD::BRCOND, MVT::Other, getControlRoot(), Cond,
1311 DAG.getBasicBlock(CB.TrueBB));
1313 // If the branch was constant folded, fix up the CFG.
1314 if (BrCond.getOpcode() == ISD::BR) {
1315 CurMBB->removeSuccessor(CB.FalseBB);
1316 DAG.setRoot(BrCond);
1318 // Otherwise, go ahead and insert the false branch.
1319 if (BrCond == getControlRoot())
1320 CurMBB->removeSuccessor(CB.TrueBB);
1322 if (CB.FalseBB == NextBlock)
1323 DAG.setRoot(BrCond);
1325 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, BrCond,
1326 DAG.getBasicBlock(CB.FalseBB)));
1330 /// visitJumpTable - Emit JumpTable node in the current MBB
1331 void SelectionDAGLowering::visitJumpTable(JumpTable &JT) {
1332 // Emit the code for the jump table
1333 assert(JT.Reg != -1U && "Should lower JT Header first!");
1334 MVT PTy = TLI.getPointerTy();
1335 SDValue Index = DAG.getCopyFromReg(getControlRoot(), JT.Reg, PTy);
1336 SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
1337 DAG.setRoot(DAG.getNode(ISD::BR_JT, MVT::Other, Index.getValue(1),
1342 /// visitJumpTableHeader - This function emits necessary code to produce index
1343 /// in the JumpTable from switch case.
1344 void SelectionDAGLowering::visitJumpTableHeader(JumpTable &JT,
1345 JumpTableHeader &JTH) {
1346 // Subtract the lowest switch case value from the value being switched on
1347 // and conditional branch to default mbb if the result is greater than the
1348 // difference between smallest and largest cases.
1349 SDValue SwitchOp = getValue(JTH.SValue);
1350 MVT VT = SwitchOp.getValueType();
1351 SDValue SUB = DAG.getNode(ISD::SUB, VT, SwitchOp,
1352 DAG.getConstant(JTH.First, VT));
1354 // The SDNode we just created, which holds the value being switched on
1355 // minus the the smallest case value, needs to be copied to a virtual
1356 // register so it can be used as an index into the jump table in a
1357 // subsequent basic block. This value may be smaller or larger than the
1358 // target's pointer type, and therefore require extension or truncating.
1359 if (VT.bitsGT(TLI.getPointerTy()))
1360 SwitchOp = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), SUB);
1362 SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), SUB);
1364 unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
1365 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), JumpTableReg, SwitchOp);
1366 JT.Reg = JumpTableReg;
1368 // Emit the range check for the jump table, and branch to the default
1369 // block for the switch statement if the value being switched on exceeds
1370 // the largest case in the switch.
1371 SDValue CMP = DAG.getSetCC(TLI.getSetCCResultType(SUB), SUB,
1372 DAG.getConstant(JTH.Last-JTH.First,VT),
1375 // Set NextBlock to be the MBB immediately after the current one, if any.
1376 // This is used to avoid emitting unnecessary branches to the next block.
1377 MachineBasicBlock *NextBlock = 0;
1378 MachineFunction::iterator BBI = CurMBB;
1379 if (++BBI != CurMBB->getParent()->end())
1382 SDValue BrCond = DAG.getNode(ISD::BRCOND, MVT::Other, CopyTo, CMP,
1383 DAG.getBasicBlock(JT.Default));
1385 if (JT.MBB == NextBlock)
1386 DAG.setRoot(BrCond);
1388 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, BrCond,
1389 DAG.getBasicBlock(JT.MBB)));
1394 /// visitBitTestHeader - This function emits necessary code to produce value
1395 /// suitable for "bit tests"
1396 void SelectionDAGLowering::visitBitTestHeader(BitTestBlock &B) {
1397 // Subtract the minimum value
1398 SDValue SwitchOp = getValue(B.SValue);
1399 MVT VT = SwitchOp.getValueType();
1400 SDValue SUB = DAG.getNode(ISD::SUB, VT, SwitchOp,
1401 DAG.getConstant(B.First, VT));
1404 SDValue RangeCmp = DAG.getSetCC(TLI.getSetCCResultType(SUB), SUB,
1405 DAG.getConstant(B.Range, VT),
1409 if (VT.bitsGT(TLI.getShiftAmountTy()))
1410 ShiftOp = DAG.getNode(ISD::TRUNCATE, TLI.getShiftAmountTy(), SUB);
1412 ShiftOp = DAG.getNode(ISD::ZERO_EXTEND, TLI.getShiftAmountTy(), SUB);
1414 // Make desired shift
1415 SDValue SwitchVal = DAG.getNode(ISD::SHL, TLI.getPointerTy(),
1416 DAG.getConstant(1, TLI.getPointerTy()),
1419 unsigned SwitchReg = FuncInfo.MakeReg(TLI.getPointerTy());
1420 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), SwitchReg, SwitchVal);
1423 // Set NextBlock to be the MBB immediately after the current one, if any.
1424 // This is used to avoid emitting unnecessary branches to the next block.
1425 MachineBasicBlock *NextBlock = 0;
1426 MachineFunction::iterator BBI = CurMBB;
1427 if (++BBI != CurMBB->getParent()->end())
1430 MachineBasicBlock* MBB = B.Cases[0].ThisBB;
1432 CurMBB->addSuccessor(B.Default);
1433 CurMBB->addSuccessor(MBB);
1435 SDValue BrRange = DAG.getNode(ISD::BRCOND, MVT::Other, CopyTo, RangeCmp,
1436 DAG.getBasicBlock(B.Default));
1438 if (MBB == NextBlock)
1439 DAG.setRoot(BrRange);
1441 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, CopyTo,
1442 DAG.getBasicBlock(MBB)));
1447 /// visitBitTestCase - this function produces one "bit test"
1448 void SelectionDAGLowering::visitBitTestCase(MachineBasicBlock* NextMBB,
1451 // Emit bit tests and jumps
1452 SDValue SwitchVal = DAG.getCopyFromReg(getControlRoot(), Reg,
1453 TLI.getPointerTy());
1455 SDValue AndOp = DAG.getNode(ISD::AND, TLI.getPointerTy(), SwitchVal,
1456 DAG.getConstant(B.Mask, TLI.getPointerTy()));
1457 SDValue AndCmp = DAG.getSetCC(TLI.getSetCCResultType(AndOp), AndOp,
1458 DAG.getConstant(0, TLI.getPointerTy()),
1461 CurMBB->addSuccessor(B.TargetBB);
1462 CurMBB->addSuccessor(NextMBB);
1464 SDValue BrAnd = DAG.getNode(ISD::BRCOND, MVT::Other, getControlRoot(),
1465 AndCmp, DAG.getBasicBlock(B.TargetBB));
1467 // Set NextBlock to be the MBB immediately after the current one, if any.
1468 // This is used to avoid emitting unnecessary branches to the next block.
1469 MachineBasicBlock *NextBlock = 0;
1470 MachineFunction::iterator BBI = CurMBB;
1471 if (++BBI != CurMBB->getParent()->end())
1474 if (NextMBB == NextBlock)
1477 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, BrAnd,
1478 DAG.getBasicBlock(NextMBB)));
1483 void SelectionDAGLowering::visitInvoke(InvokeInst &I) {
1484 // Retrieve successors.
1485 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
1486 MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
1488 if (isa<InlineAsm>(I.getCalledValue()))
1491 LowerCallTo(&I, getValue(I.getOperand(0)), false, LandingPad);
1493 // If the value of the invoke is used outside of its defining block, make it
1494 // available as a virtual register.
1495 if (!I.use_empty()) {
1496 DenseMap<const Value*, unsigned>::iterator VMI = FuncInfo.ValueMap.find(&I);
1497 if (VMI != FuncInfo.ValueMap.end())
1498 CopyValueToVirtualRegister(&I, VMI->second);
1501 // Update successor info
1502 CurMBB->addSuccessor(Return);
1503 CurMBB->addSuccessor(LandingPad);
1505 // Drop into normal successor.
1506 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getControlRoot(),
1507 DAG.getBasicBlock(Return)));
1510 void SelectionDAGLowering::visitUnwind(UnwindInst &I) {
1513 /// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
1514 /// small case ranges).
1515 bool SelectionDAGLowering::handleSmallSwitchRange(CaseRec& CR,
1516 CaseRecVector& WorkList,
1518 MachineBasicBlock* Default) {
1519 Case& BackCase = *(CR.Range.second-1);
1521 // Size is the number of Cases represented by this range.
1522 unsigned Size = CR.Range.second - CR.Range.first;
1526 // Get the MachineFunction which holds the current MBB. This is used when
1527 // inserting any additional MBBs necessary to represent the switch.
1528 MachineFunction *CurMF = CurMBB->getParent();
1530 // Figure out which block is immediately after the current one.
1531 MachineBasicBlock *NextBlock = 0;
1532 MachineFunction::iterator BBI = CR.CaseBB;
1534 if (++BBI != CurMBB->getParent()->end())
1537 // TODO: If any two of the cases has the same destination, and if one value
1538 // is the same as the other, but has one bit unset that the other has set,
1539 // use bit manipulation to do two compares at once. For example:
1540 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
1542 // Rearrange the case blocks so that the last one falls through if possible.
1543 if (NextBlock && Default != NextBlock && BackCase.BB != NextBlock) {
1544 // The last case block won't fall through into 'NextBlock' if we emit the
1545 // branches in this order. See if rearranging a case value would help.
1546 for (CaseItr I = CR.Range.first, E = CR.Range.second-1; I != E; ++I) {
1547 if (I->BB == NextBlock) {
1548 std::swap(*I, BackCase);
1554 // Create a CaseBlock record representing a conditional branch to
1555 // the Case's target mbb if the value being switched on SV is equal
1557 MachineBasicBlock *CurBlock = CR.CaseBB;
1558 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
1559 MachineBasicBlock *FallThrough;
1561 FallThrough = CurMF->CreateMachineBasicBlock(CurBlock->getBasicBlock());
1562 CurMF->insert(BBI, FallThrough);
1564 // If the last case doesn't match, go to the default block.
1565 FallThrough = Default;
1568 Value *RHS, *LHS, *MHS;
1570 if (I->High == I->Low) {
1571 // This is just small small case range :) containing exactly 1 case
1573 LHS = SV; RHS = I->High; MHS = NULL;
1576 LHS = I->Low; MHS = SV; RHS = I->High;
1578 CaseBlock CB(CC, LHS, RHS, MHS, I->BB, FallThrough, CurBlock);
1580 // If emitting the first comparison, just call visitSwitchCase to emit the
1581 // code into the current block. Otherwise, push the CaseBlock onto the
1582 // vector to be later processed by SDISel, and insert the node's MBB
1583 // before the next MBB.
1584 if (CurBlock == CurMBB)
1585 visitSwitchCase(CB);
1587 SwitchCases.push_back(CB);
1589 CurBlock = FallThrough;
1595 static inline bool areJTsAllowed(const TargetLowering &TLI) {
1596 return !DisableJumpTables &&
1597 (TLI.isOperationLegal(ISD::BR_JT, MVT::Other) ||
1598 TLI.isOperationLegal(ISD::BRIND, MVT::Other));
1601 /// handleJTSwitchCase - Emit jumptable for current switch case range
1602 bool SelectionDAGLowering::handleJTSwitchCase(CaseRec& CR,
1603 CaseRecVector& WorkList,
1605 MachineBasicBlock* Default) {
1606 Case& FrontCase = *CR.Range.first;
1607 Case& BackCase = *(CR.Range.second-1);
1609 int64_t First = cast<ConstantInt>(FrontCase.Low)->getSExtValue();
1610 int64_t Last = cast<ConstantInt>(BackCase.High)->getSExtValue();
1613 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1617 if (!areJTsAllowed(TLI) || TSize <= 3)
1620 double Density = (double)TSize / (double)((Last - First) + 1ULL);
1624 DOUT << "Lowering jump table\n"
1625 << "First entry: " << First << ". Last entry: " << Last << "\n"
1626 << "Size: " << TSize << ". Density: " << Density << "\n\n";
1628 // Get the MachineFunction which holds the current MBB. This is used when
1629 // inserting any additional MBBs necessary to represent the switch.
1630 MachineFunction *CurMF = CurMBB->getParent();
1632 // Figure out which block is immediately after the current one.
1633 MachineBasicBlock *NextBlock = 0;
1634 MachineFunction::iterator BBI = CR.CaseBB;
1636 if (++BBI != CurMBB->getParent()->end())
1639 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1641 // Create a new basic block to hold the code for loading the address
1642 // of the jump table, and jumping to it. Update successor information;
1643 // we will either branch to the default case for the switch, or the jump
1645 MachineBasicBlock *JumpTableBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1646 CurMF->insert(BBI, JumpTableBB);
1647 CR.CaseBB->addSuccessor(Default);
1648 CR.CaseBB->addSuccessor(JumpTableBB);
1650 // Build a vector of destination BBs, corresponding to each target
1651 // of the jump table. If the value of the jump table slot corresponds to
1652 // a case statement, push the case's BB onto the vector, otherwise, push
1654 std::vector<MachineBasicBlock*> DestBBs;
1655 int64_t TEI = First;
1656 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++TEI) {
1657 int64_t Low = cast<ConstantInt>(I->Low)->getSExtValue();
1658 int64_t High = cast<ConstantInt>(I->High)->getSExtValue();
1660 if ((Low <= TEI) && (TEI <= High)) {
1661 DestBBs.push_back(I->BB);
1665 DestBBs.push_back(Default);
1669 // Update successor info. Add one edge to each unique successor.
1670 BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs());
1671 for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
1672 E = DestBBs.end(); I != E; ++I) {
1673 if (!SuccsHandled[(*I)->getNumber()]) {
1674 SuccsHandled[(*I)->getNumber()] = true;
1675 JumpTableBB->addSuccessor(*I);
1679 // Create a jump table index for this jump table, or return an existing
1681 unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs);
1683 // Set the jump table information so that we can codegen it as a second
1684 // MachineBasicBlock
1685 JumpTable JT(-1U, JTI, JumpTableBB, Default);
1686 JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == CurMBB));
1687 if (CR.CaseBB == CurMBB)
1688 visitJumpTableHeader(JT, JTH);
1690 JTCases.push_back(JumpTableBlock(JTH, JT));
1695 /// handleBTSplitSwitchCase - emit comparison and split binary search tree into
1697 bool SelectionDAGLowering::handleBTSplitSwitchCase(CaseRec& CR,
1698 CaseRecVector& WorkList,
1700 MachineBasicBlock* Default) {
1701 // Get the MachineFunction which holds the current MBB. This is used when
1702 // inserting any additional MBBs necessary to represent the switch.
1703 MachineFunction *CurMF = CurMBB->getParent();
1705 // Figure out which block is immediately after the current one.
1706 MachineBasicBlock *NextBlock = 0;
1707 MachineFunction::iterator BBI = CR.CaseBB;
1709 if (++BBI != CurMBB->getParent()->end())
1712 Case& FrontCase = *CR.Range.first;
1713 Case& BackCase = *(CR.Range.second-1);
1714 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1716 // Size is the number of Cases represented by this range.
1717 unsigned Size = CR.Range.second - CR.Range.first;
1719 int64_t First = cast<ConstantInt>(FrontCase.Low)->getSExtValue();
1720 int64_t Last = cast<ConstantInt>(BackCase.High)->getSExtValue();
1722 CaseItr Pivot = CR.Range.first + Size/2;
1724 // Select optimal pivot, maximizing sum density of LHS and RHS. This will
1725 // (heuristically) allow us to emit JumpTable's later.
1727 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1731 uint64_t LSize = FrontCase.size();
1732 uint64_t RSize = TSize-LSize;
1733 DOUT << "Selecting best pivot: \n"
1734 << "First: " << First << ", Last: " << Last <<"\n"
1735 << "LSize: " << LSize << ", RSize: " << RSize << "\n";
1736 for (CaseItr I = CR.Range.first, J=I+1, E = CR.Range.second;
1738 int64_t LEnd = cast<ConstantInt>(I->High)->getSExtValue();
1739 int64_t RBegin = cast<ConstantInt>(J->Low)->getSExtValue();
1740 assert((RBegin-LEnd>=1) && "Invalid case distance");
1741 double LDensity = (double)LSize / (double)((LEnd - First) + 1ULL);
1742 double RDensity = (double)RSize / (double)((Last - RBegin) + 1ULL);
1743 double Metric = Log2_64(RBegin-LEnd)*(LDensity+RDensity);
1744 // Should always split in some non-trivial place
1746 << "LEnd: " << LEnd << ", RBegin: " << RBegin << "\n"
1747 << "LDensity: " << LDensity << ", RDensity: " << RDensity << "\n"
1748 << "Metric: " << Metric << "\n";
1749 if (FMetric < Metric) {
1752 DOUT << "Current metric set to: " << FMetric << "\n";
1758 if (areJTsAllowed(TLI)) {
1759 // If our case is dense we *really* should handle it earlier!
1760 assert((FMetric > 0) && "Should handle dense range earlier!");
1762 Pivot = CR.Range.first + Size/2;
1765 CaseRange LHSR(CR.Range.first, Pivot);
1766 CaseRange RHSR(Pivot, CR.Range.second);
1767 Constant *C = Pivot->Low;
1768 MachineBasicBlock *FalseBB = 0, *TrueBB = 0;
1770 // We know that we branch to the LHS if the Value being switched on is
1771 // less than the Pivot value, C. We use this to optimize our binary
1772 // tree a bit, by recognizing that if SV is greater than or equal to the
1773 // LHS's Case Value, and that Case Value is exactly one less than the
1774 // Pivot's Value, then we can branch directly to the LHS's Target,
1775 // rather than creating a leaf node for it.
1776 if ((LHSR.second - LHSR.first) == 1 &&
1777 LHSR.first->High == CR.GE &&
1778 cast<ConstantInt>(C)->getSExtValue() ==
1779 (cast<ConstantInt>(CR.GE)->getSExtValue() + 1LL)) {
1780 TrueBB = LHSR.first->BB;
1782 TrueBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1783 CurMF->insert(BBI, TrueBB);
1784 WorkList.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
1787 // Similar to the optimization above, if the Value being switched on is
1788 // known to be less than the Constant CR.LT, and the current Case Value
1789 // is CR.LT - 1, then we can branch directly to the target block for
1790 // the current Case Value, rather than emitting a RHS leaf node for it.
1791 if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
1792 cast<ConstantInt>(RHSR.first->Low)->getSExtValue() ==
1793 (cast<ConstantInt>(CR.LT)->getSExtValue() - 1LL)) {
1794 FalseBB = RHSR.first->BB;
1796 FalseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1797 CurMF->insert(BBI, FalseBB);
1798 WorkList.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
1801 // Create a CaseBlock record representing a conditional branch to
1802 // the LHS node if the value being switched on SV is less than C.
1803 // Otherwise, branch to LHS.
1804 CaseBlock CB(ISD::SETLT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB);
1806 if (CR.CaseBB == CurMBB)
1807 visitSwitchCase(CB);
1809 SwitchCases.push_back(CB);
1814 /// handleBitTestsSwitchCase - if current case range has few destination and
1815 /// range span less, than machine word bitwidth, encode case range into series
1816 /// of masks and emit bit tests with these masks.
1817 bool SelectionDAGLowering::handleBitTestsSwitchCase(CaseRec& CR,
1818 CaseRecVector& WorkList,
1820 MachineBasicBlock* Default){
1821 unsigned IntPtrBits = TLI.getPointerTy().getSizeInBits();
1823 Case& FrontCase = *CR.Range.first;
1824 Case& BackCase = *(CR.Range.second-1);
1826 // Get the MachineFunction which holds the current MBB. This is used when
1827 // inserting any additional MBBs necessary to represent the switch.
1828 MachineFunction *CurMF = CurMBB->getParent();
1830 unsigned numCmps = 0;
1831 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1833 // Single case counts one, case range - two.
1834 if (I->Low == I->High)
1840 // Count unique destinations
1841 SmallSet<MachineBasicBlock*, 4> Dests;
1842 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1843 Dests.insert(I->BB);
1844 if (Dests.size() > 3)
1845 // Don't bother the code below, if there are too much unique destinations
1848 DOUT << "Total number of unique destinations: " << Dests.size() << "\n"
1849 << "Total number of comparisons: " << numCmps << "\n";
1851 // Compute span of values.
1852 Constant* minValue = FrontCase.Low;
1853 Constant* maxValue = BackCase.High;
1854 uint64_t range = cast<ConstantInt>(maxValue)->getSExtValue() -
1855 cast<ConstantInt>(minValue)->getSExtValue();
1856 DOUT << "Compare range: " << range << "\n"
1857 << "Low bound: " << cast<ConstantInt>(minValue)->getSExtValue() << "\n"
1858 << "High bound: " << cast<ConstantInt>(maxValue)->getSExtValue() << "\n";
1860 if (range>=IntPtrBits ||
1861 (!(Dests.size() == 1 && numCmps >= 3) &&
1862 !(Dests.size() == 2 && numCmps >= 5) &&
1863 !(Dests.size() >= 3 && numCmps >= 6)))
1866 DOUT << "Emitting bit tests\n";
1867 int64_t lowBound = 0;
1869 // Optimize the case where all the case values fit in a
1870 // word without having to subtract minValue. In this case,
1871 // we can optimize away the subtraction.
1872 if (cast<ConstantInt>(minValue)->getSExtValue() >= 0 &&
1873 cast<ConstantInt>(maxValue)->getSExtValue() < IntPtrBits) {
1874 range = cast<ConstantInt>(maxValue)->getSExtValue();
1876 lowBound = cast<ConstantInt>(minValue)->getSExtValue();
1879 CaseBitsVector CasesBits;
1880 unsigned i, count = 0;
1882 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1883 MachineBasicBlock* Dest = I->BB;
1884 for (i = 0; i < count; ++i)
1885 if (Dest == CasesBits[i].BB)
1889 assert((count < 3) && "Too much destinations to test!");
1890 CasesBits.push_back(CaseBits(0, Dest, 0));
1894 uint64_t lo = cast<ConstantInt>(I->Low)->getSExtValue() - lowBound;
1895 uint64_t hi = cast<ConstantInt>(I->High)->getSExtValue() - lowBound;
1897 for (uint64_t j = lo; j <= hi; j++) {
1898 CasesBits[i].Mask |= 1ULL << j;
1899 CasesBits[i].Bits++;
1903 std::sort(CasesBits.begin(), CasesBits.end(), CaseBitsCmp());
1907 // Figure out which block is immediately after the current one.
1908 MachineFunction::iterator BBI = CR.CaseBB;
1911 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1914 for (unsigned i = 0, e = CasesBits.size(); i!=e; ++i) {
1915 DOUT << "Mask: " << CasesBits[i].Mask << ", Bits: " << CasesBits[i].Bits
1916 << ", BB: " << CasesBits[i].BB << "\n";
1918 MachineBasicBlock *CaseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1919 CurMF->insert(BBI, CaseBB);
1920 BTC.push_back(BitTestCase(CasesBits[i].Mask,
1925 BitTestBlock BTB(lowBound, range, SV,
1926 -1U, (CR.CaseBB == CurMBB),
1927 CR.CaseBB, Default, BTC);
1929 if (CR.CaseBB == CurMBB)
1930 visitBitTestHeader(BTB);
1932 BitTestCases.push_back(BTB);
1938 /// Clusterify - Transform simple list of Cases into list of CaseRange's
1939 unsigned SelectionDAGLowering::Clusterify(CaseVector& Cases,
1940 const SwitchInst& SI) {
1941 unsigned numCmps = 0;
1943 // Start with "simple" cases
1944 for (unsigned i = 1; i < SI.getNumSuccessors(); ++i) {
1945 MachineBasicBlock *SMBB = FuncInfo.MBBMap[SI.getSuccessor(i)];
1946 Cases.push_back(Case(SI.getSuccessorValue(i),
1947 SI.getSuccessorValue(i),
1950 std::sort(Cases.begin(), Cases.end(), CaseCmp());
1952 // Merge case into clusters
1953 if (Cases.size()>=2)
1954 // Must recompute end() each iteration because it may be
1955 // invalidated by erase if we hold on to it
1956 for (CaseItr I=Cases.begin(), J=++(Cases.begin()); J!=Cases.end(); ) {
1957 int64_t nextValue = cast<ConstantInt>(J->Low)->getSExtValue();
1958 int64_t currentValue = cast<ConstantInt>(I->High)->getSExtValue();
1959 MachineBasicBlock* nextBB = J->BB;
1960 MachineBasicBlock* currentBB = I->BB;
1962 // If the two neighboring cases go to the same destination, merge them
1963 // into a single case.
1964 if ((nextValue-currentValue==1) && (currentBB == nextBB)) {
1972 for (CaseItr I=Cases.begin(), E=Cases.end(); I!=E; ++I, ++numCmps) {
1973 if (I->Low != I->High)
1974 // A range counts double, since it requires two compares.
1981 void SelectionDAGLowering::visitSwitch(SwitchInst &SI) {
1982 // Figure out which block is immediately after the current one.
1983 MachineBasicBlock *NextBlock = 0;
1984 MachineFunction::iterator BBI = CurMBB;
1986 MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
1988 // If there is only the default destination, branch to it if it is not the
1989 // next basic block. Otherwise, just fall through.
1990 if (SI.getNumOperands() == 2) {
1991 // Update machine-CFG edges.
1993 // If this is not a fall-through branch, emit the branch.
1994 CurMBB->addSuccessor(Default);
1995 if (Default != NextBlock)
1996 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getControlRoot(),
1997 DAG.getBasicBlock(Default)));
2002 // If there are any non-default case statements, create a vector of Cases
2003 // representing each one, and sort the vector so that we can efficiently
2004 // create a binary search tree from them.
2006 unsigned numCmps = Clusterify(Cases, SI);
2007 DOUT << "Clusterify finished. Total clusters: " << Cases.size()
2008 << ". Total compares: " << numCmps << "\n";
2010 // Get the Value to be switched on and default basic blocks, which will be
2011 // inserted into CaseBlock records, representing basic blocks in the binary
2013 Value *SV = SI.getOperand(0);
2015 // Push the initial CaseRec onto the worklist
2016 CaseRecVector WorkList;
2017 WorkList.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
2019 while (!WorkList.empty()) {
2020 // Grab a record representing a case range to process off the worklist
2021 CaseRec CR = WorkList.back();
2022 WorkList.pop_back();
2024 if (handleBitTestsSwitchCase(CR, WorkList, SV, Default))
2027 // If the range has few cases (two or less) emit a series of specific
2029 if (handleSmallSwitchRange(CR, WorkList, SV, Default))
2032 // If the switch has more than 5 blocks, and at least 40% dense, and the
2033 // target supports indirect branches, then emit a jump table rather than
2034 // lowering the switch to a binary tree of conditional branches.
2035 if (handleJTSwitchCase(CR, WorkList, SV, Default))
2038 // Emit binary tree. We need to pick a pivot, and push left and right ranges
2039 // onto the worklist. Leafs are handled via handleSmallSwitchRange() call.
2040 handleBTSplitSwitchCase(CR, WorkList, SV, Default);
2045 void SelectionDAGLowering::visitSub(User &I) {
2046 // -0.0 - X --> fneg
2047 const Type *Ty = I.getType();
2048 if (isa<VectorType>(Ty)) {
2049 if (ConstantVector *CV = dyn_cast<ConstantVector>(I.getOperand(0))) {
2050 const VectorType *DestTy = cast<VectorType>(I.getType());
2051 const Type *ElTy = DestTy->getElementType();
2052 if (ElTy->isFloatingPoint()) {
2053 unsigned VL = DestTy->getNumElements();
2054 std::vector<Constant*> NZ(VL, ConstantFP::getNegativeZero(ElTy));
2055 Constant *CNZ = ConstantVector::get(&NZ[0], NZ.size());
2057 SDValue Op2 = getValue(I.getOperand(1));
2058 setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2));
2064 if (Ty->isFloatingPoint()) {
2065 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
2066 if (CFP->isExactlyValue(ConstantFP::getNegativeZero(Ty)->getValueAPF())) {
2067 SDValue Op2 = getValue(I.getOperand(1));
2068 setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2));
2073 visitBinary(I, Ty->isFPOrFPVector() ? ISD::FSUB : ISD::SUB);
2076 void SelectionDAGLowering::visitBinary(User &I, unsigned OpCode) {
2077 SDValue Op1 = getValue(I.getOperand(0));
2078 SDValue Op2 = getValue(I.getOperand(1));
2080 setValue(&I, DAG.getNode(OpCode, Op1.getValueType(), Op1, Op2));
2083 void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
2084 SDValue Op1 = getValue(I.getOperand(0));
2085 SDValue Op2 = getValue(I.getOperand(1));
2086 if (!isa<VectorType>(I.getType())) {
2087 if (TLI.getShiftAmountTy().bitsLT(Op2.getValueType()))
2088 Op2 = DAG.getNode(ISD::TRUNCATE, TLI.getShiftAmountTy(), Op2);
2089 else if (TLI.getShiftAmountTy().bitsGT(Op2.getValueType()))
2090 Op2 = DAG.getNode(ISD::ANY_EXTEND, TLI.getShiftAmountTy(), Op2);
2093 setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2));
2096 void SelectionDAGLowering::visitICmp(User &I) {
2097 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2098 if (ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2099 predicate = IC->getPredicate();
2100 else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2101 predicate = ICmpInst::Predicate(IC->getPredicate());
2102 SDValue Op1 = getValue(I.getOperand(0));
2103 SDValue Op2 = getValue(I.getOperand(1));
2104 ISD::CondCode Opcode = getICmpCondCode(predicate);
2105 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode));
2108 void SelectionDAGLowering::visitFCmp(User &I) {
2109 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2110 if (FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2111 predicate = FC->getPredicate();
2112 else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2113 predicate = FCmpInst::Predicate(FC->getPredicate());
2114 SDValue Op1 = getValue(I.getOperand(0));
2115 SDValue Op2 = getValue(I.getOperand(1));
2116 ISD::CondCode Condition = getFCmpCondCode(predicate);
2117 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Condition));
2120 void SelectionDAGLowering::visitVICmp(User &I) {
2121 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2122 if (VICmpInst *IC = dyn_cast<VICmpInst>(&I))
2123 predicate = IC->getPredicate();
2124 else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2125 predicate = ICmpInst::Predicate(IC->getPredicate());
2126 SDValue Op1 = getValue(I.getOperand(0));
2127 SDValue Op2 = getValue(I.getOperand(1));
2128 ISD::CondCode Opcode = getICmpCondCode(predicate);
2129 setValue(&I, DAG.getVSetCC(Op1.getValueType(), Op1, Op2, Opcode));
2132 void SelectionDAGLowering::visitVFCmp(User &I) {
2133 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2134 if (VFCmpInst *FC = dyn_cast<VFCmpInst>(&I))
2135 predicate = FC->getPredicate();
2136 else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2137 predicate = FCmpInst::Predicate(FC->getPredicate());
2138 SDValue Op1 = getValue(I.getOperand(0));
2139 SDValue Op2 = getValue(I.getOperand(1));
2140 ISD::CondCode Condition = getFCmpCondCode(predicate);
2141 MVT DestVT = TLI.getValueType(I.getType());
2143 setValue(&I, DAG.getVSetCC(DestVT, Op1, Op2, Condition));
2146 void SelectionDAGLowering::visitSelect(User &I) {
2147 SmallVector<MVT, 4> ValueVTs;
2148 ComputeValueVTs(TLI, I.getType(), ValueVTs);
2149 unsigned NumValues = ValueVTs.size();
2150 if (NumValues != 0) {
2151 SmallVector<SDValue, 4> Values(NumValues);
2152 SDValue Cond = getValue(I.getOperand(0));
2153 SDValue TrueVal = getValue(I.getOperand(1));
2154 SDValue FalseVal = getValue(I.getOperand(2));
2156 for (unsigned i = 0; i != NumValues; ++i)
2157 Values[i] = DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond,
2158 SDValue(TrueVal.getNode(), TrueVal.getResNo() + i),
2159 SDValue(FalseVal.getNode(), FalseVal.getResNo() + i));
2161 setValue(&I, DAG.getNode(ISD::MERGE_VALUES,
2162 DAG.getVTList(&ValueVTs[0], NumValues),
2163 &Values[0], NumValues));
2168 void SelectionDAGLowering::visitTrunc(User &I) {
2169 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
2170 SDValue N = getValue(I.getOperand(0));
2171 MVT DestVT = TLI.getValueType(I.getType());
2172 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N));
2175 void SelectionDAGLowering::visitZExt(User &I) {
2176 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2177 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
2178 SDValue N = getValue(I.getOperand(0));
2179 MVT DestVT = TLI.getValueType(I.getType());
2180 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N));
2183 void SelectionDAGLowering::visitSExt(User &I) {
2184 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2185 // SExt also can't be a cast to bool for same reason. So, nothing much to do
2186 SDValue N = getValue(I.getOperand(0));
2187 MVT DestVT = TLI.getValueType(I.getType());
2188 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestVT, N));
2191 void SelectionDAGLowering::visitFPTrunc(User &I) {
2192 // FPTrunc is never a no-op cast, no need to check
2193 SDValue N = getValue(I.getOperand(0));
2194 MVT DestVT = TLI.getValueType(I.getType());
2195 setValue(&I, DAG.getNode(ISD::FP_ROUND, DestVT, N, DAG.getIntPtrConstant(0)));
2198 void SelectionDAGLowering::visitFPExt(User &I){
2199 // FPTrunc is never a no-op cast, no need to check
2200 SDValue N = getValue(I.getOperand(0));
2201 MVT DestVT = TLI.getValueType(I.getType());
2202 setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestVT, N));
2205 void SelectionDAGLowering::visitFPToUI(User &I) {
2206 // FPToUI is never a no-op cast, no need to check
2207 SDValue N = getValue(I.getOperand(0));
2208 MVT DestVT = TLI.getValueType(I.getType());
2209 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestVT, N));
2212 void SelectionDAGLowering::visitFPToSI(User &I) {
2213 // FPToSI is never a no-op cast, no need to check
2214 SDValue N = getValue(I.getOperand(0));
2215 MVT DestVT = TLI.getValueType(I.getType());
2216 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestVT, N));
2219 void SelectionDAGLowering::visitUIToFP(User &I) {
2220 // UIToFP is never a no-op cast, no need to check
2221 SDValue N = getValue(I.getOperand(0));
2222 MVT DestVT = TLI.getValueType(I.getType());
2223 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestVT, N));
2226 void SelectionDAGLowering::visitSIToFP(User &I){
2227 // SIToFP is never a no-op cast, no need to check
2228 SDValue N = getValue(I.getOperand(0));
2229 MVT DestVT = TLI.getValueType(I.getType());
2230 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestVT, N));
2233 void SelectionDAGLowering::visitPtrToInt(User &I) {
2234 // What to do depends on the size of the integer and the size of the pointer.
2235 // We can either truncate, zero extend, or no-op, accordingly.
2236 SDValue N = getValue(I.getOperand(0));
2237 MVT SrcVT = N.getValueType();
2238 MVT DestVT = TLI.getValueType(I.getType());
2240 if (DestVT.bitsLT(SrcVT))
2241 Result = DAG.getNode(ISD::TRUNCATE, DestVT, N);
2243 // Note: ZERO_EXTEND can handle cases where the sizes are equal too
2244 Result = DAG.getNode(ISD::ZERO_EXTEND, DestVT, N);
2245 setValue(&I, Result);
2248 void SelectionDAGLowering::visitIntToPtr(User &I) {
2249 // What to do depends on the size of the integer and the size of the pointer.
2250 // We can either truncate, zero extend, or no-op, accordingly.
2251 SDValue N = getValue(I.getOperand(0));
2252 MVT SrcVT = N.getValueType();
2253 MVT DestVT = TLI.getValueType(I.getType());
2254 if (DestVT.bitsLT(SrcVT))
2255 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N));
2257 // Note: ZERO_EXTEND can handle cases where the sizes are equal too
2258 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N));
2261 void SelectionDAGLowering::visitBitCast(User &I) {
2262 SDValue N = getValue(I.getOperand(0));
2263 MVT DestVT = TLI.getValueType(I.getType());
2265 // BitCast assures us that source and destination are the same size so this
2266 // is either a BIT_CONVERT or a no-op.
2267 if (DestVT != N.getValueType())
2268 setValue(&I, DAG.getNode(ISD::BIT_CONVERT, DestVT, N)); // convert types
2270 setValue(&I, N); // noop cast.
2273 void SelectionDAGLowering::visitInsertElement(User &I) {
2274 SDValue InVec = getValue(I.getOperand(0));
2275 SDValue InVal = getValue(I.getOperand(1));
2276 SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(),
2277 getValue(I.getOperand(2)));
2279 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT,
2280 TLI.getValueType(I.getType()),
2281 InVec, InVal, InIdx));
2284 void SelectionDAGLowering::visitExtractElement(User &I) {
2285 SDValue InVec = getValue(I.getOperand(0));
2286 SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(),
2287 getValue(I.getOperand(1)));
2288 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT,
2289 TLI.getValueType(I.getType()), InVec, InIdx));
2293 // Utility for visitShuffleVector - Returns true if the mask is mask starting
2294 // from SIndx and increasing to the element length (undefs are allowed).
2295 static bool SequentialMask(SDValue Mask, unsigned SIndx) {
2296 unsigned MaskNumElts = Mask.getNumOperands();
2297 for (unsigned i = 0; i != MaskNumElts; ++i) {
2298 if (Mask.getOperand(i).getOpcode() != ISD::UNDEF) {
2299 unsigned Idx = cast<ConstantSDNode>(Mask.getOperand(i))->getZExtValue();
2300 if (Idx != i + SIndx)
2307 void SelectionDAGLowering::visitShuffleVector(User &I) {
2308 SDValue Src1 = getValue(I.getOperand(0));
2309 SDValue Src2 = getValue(I.getOperand(1));
2310 SDValue Mask = getValue(I.getOperand(2));
2312 MVT VT = TLI.getValueType(I.getType());
2313 MVT SrcVT = Src1.getValueType();
2314 int MaskNumElts = Mask.getNumOperands();
2315 int SrcNumElts = SrcVT.getVectorNumElements();
2317 if (SrcNumElts == MaskNumElts) {
2318 setValue(&I, DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Src1, Src2, Mask));
2322 // Normalize the shuffle vector since mask and vector length don't match.
2323 MVT MaskEltVT = Mask.getValueType().getVectorElementType();
2325 if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) {
2326 // Mask is longer than the source vectors and is a multiple of the source
2327 // vectors. We can use concatenate vector to make the mask and vectors
2329 if (SrcNumElts*2 == MaskNumElts && SequentialMask(Mask, 0)) {
2330 // The shuffle is concatenating two vectors together.
2331 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, VT, Src1, Src2));
2335 // Pad both vectors with undefs to make them the same length as the mask.
2336 unsigned NumConcat = MaskNumElts / SrcNumElts;
2337 SDValue UndefVal = DAG.getNode(ISD::UNDEF, SrcVT);
2339 SDValue* MOps1 = new SDValue[NumConcat];
2340 SDValue* MOps2 = new SDValue[NumConcat];
2343 for (unsigned i = 1; i != NumConcat; ++i) {
2344 MOps1[i] = UndefVal;
2345 MOps2[i] = UndefVal;
2347 Src1 = DAG.getNode(ISD::CONCAT_VECTORS, VT, MOps1, NumConcat);
2348 Src2 = DAG.getNode(ISD::CONCAT_VECTORS, VT, MOps2, NumConcat);
2353 // Readjust mask for new input vector length.
2354 SmallVector<SDValue, 8> MappedOps;
2355 for (int i = 0; i != MaskNumElts; ++i) {
2356 if (Mask.getOperand(i).getOpcode() == ISD::UNDEF) {
2357 MappedOps.push_back(Mask.getOperand(i));
2359 int Idx = cast<ConstantSDNode>(Mask.getOperand(i))->getZExtValue();
2360 if (Idx < SrcNumElts)
2361 MappedOps.push_back(DAG.getConstant(Idx, MaskEltVT));
2363 MappedOps.push_back(DAG.getConstant(Idx + MaskNumElts - SrcNumElts,
2367 Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(),
2368 &MappedOps[0], MappedOps.size());
2370 setValue(&I, DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Src1, Src2, Mask));
2374 if (SrcNumElts > MaskNumElts) {
2375 // Resulting vector is shorter than the incoming vector.
2376 if (SrcNumElts == MaskNumElts && SequentialMask(Mask,0)) {
2377 // Shuffle extracts 1st vector.
2382 if (SrcNumElts == MaskNumElts && SequentialMask(Mask,MaskNumElts)) {
2383 // Shuffle extracts 2nd vector.
2388 // Analyze the access pattern of the vector to see if we can extract
2389 // two subvectors and do the shuffle. The analysis is done by calculating
2390 // the range of elements the mask access on both vectors.
2391 int MinRange[2] = { SrcNumElts+1, SrcNumElts+1};
2392 int MaxRange[2] = {-1, -1};
2394 for (int i = 0; i != MaskNumElts; ++i) {
2395 SDValue Arg = Mask.getOperand(i);
2396 if (Arg.getOpcode() != ISD::UNDEF) {
2397 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2398 int Idx = cast<ConstantSDNode>(Arg)->getZExtValue();
2400 if (Idx >= SrcNumElts) {
2404 if (Idx > MaxRange[Input])
2405 MaxRange[Input] = Idx;
2406 if (Idx < MinRange[Input])
2407 MinRange[Input] = Idx;
2411 // Check if the access is smaller than the vector size and can we find
2412 // a reasonable extract index.
2413 int RangeUse[2] = { 2, 2 }; // 0 = Unused, 1 = Extract, 2 = Can not Extract.
2414 int StartIdx[2]; // StartIdx to extract from
2415 for (int Input=0; Input < 2; ++Input) {
2416 if (MinRange[Input] == SrcNumElts+1 && MaxRange[Input] == -1) {
2417 RangeUse[Input] = 0; // Unused
2418 StartIdx[Input] = 0;
2419 } else if (MaxRange[Input] - MinRange[Input] < MaskNumElts) {
2420 // Fits within range but we should see if we can find a good
2421 // start index that is a multiple of the mask length.
2422 if (MaxRange[Input] < MaskNumElts) {
2423 RangeUse[Input] = 1; // Extract from beginning of the vector
2424 StartIdx[Input] = 0;
2426 StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
2427 if (MaxRange[Input] - StartIdx[Input] < MaskNumElts &&
2428 StartIdx[Input] + MaskNumElts < SrcNumElts)
2429 RangeUse[Input] = 1; // Extract from a multiple of the mask length.
2434 if (RangeUse[0] == 0 && RangeUse[0] == 0) {
2435 setValue(&I, DAG.getNode(ISD::UNDEF, VT)); // Vectors are not used.
2438 else if (RangeUse[0] < 2 && RangeUse[1] < 2) {
2439 // Extract appropriate subvector and generate a vector shuffle
2440 for (int Input=0; Input < 2; ++Input) {
2441 SDValue& Src = Input == 0 ? Src1 : Src2;
2442 if (RangeUse[Input] == 0) {
2443 Src = DAG.getNode(ISD::UNDEF, VT);
2445 Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, VT, Src,
2446 DAG.getIntPtrConstant(StartIdx[Input]));
2449 // Calculate new mask.
2450 SmallVector<SDValue, 8> MappedOps;
2451 for (int i = 0; i != MaskNumElts; ++i) {
2452 SDValue Arg = Mask.getOperand(i);
2453 if (Arg.getOpcode() == ISD::UNDEF) {
2454 MappedOps.push_back(Arg);
2456 int Idx = cast<ConstantSDNode>(Arg)->getZExtValue();
2457 if (Idx < SrcNumElts)
2458 MappedOps.push_back(DAG.getConstant(Idx - StartIdx[0], MaskEltVT));
2460 Idx = Idx - SrcNumElts - StartIdx[1] + MaskNumElts;
2461 MappedOps.push_back(DAG.getConstant(Idx, MaskEltVT));
2465 Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(),
2466 &MappedOps[0], MappedOps.size());
2467 setValue(&I, DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Src1, Src2, Mask));
2472 // We can't use either concat vectors or extract subvectors so fall back to
2473 // replacing the shuffle with extract and build vector.
2474 // to insert and build vector.
2475 MVT EltVT = VT.getVectorElementType();
2476 MVT PtrVT = TLI.getPointerTy();
2477 SmallVector<SDValue,8> Ops;
2478 for (int i = 0; i != MaskNumElts; ++i) {
2479 SDValue Arg = Mask.getOperand(i);
2480 if (Arg.getOpcode() == ISD::UNDEF) {
2481 Ops.push_back(DAG.getNode(ISD::UNDEF, EltVT));
2483 assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
2484 int Idx = cast<ConstantSDNode>(Arg)->getZExtValue();
2485 if (Idx < SrcNumElts)
2486 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, EltVT, Src1,
2487 DAG.getConstant(Idx, PtrVT)));
2489 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, EltVT, Src2,
2490 DAG.getConstant(Idx - SrcNumElts, PtrVT)));
2493 setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, VT, &Ops[0], Ops.size()));
2496 void SelectionDAGLowering::visitInsertValue(InsertValueInst &I) {
2497 const Value *Op0 = I.getOperand(0);
2498 const Value *Op1 = I.getOperand(1);
2499 const Type *AggTy = I.getType();
2500 const Type *ValTy = Op1->getType();
2501 bool IntoUndef = isa<UndefValue>(Op0);
2502 bool FromUndef = isa<UndefValue>(Op1);
2504 unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2505 I.idx_begin(), I.idx_end());
2507 SmallVector<MVT, 4> AggValueVTs;
2508 ComputeValueVTs(TLI, AggTy, AggValueVTs);
2509 SmallVector<MVT, 4> ValValueVTs;
2510 ComputeValueVTs(TLI, ValTy, ValValueVTs);
2512 unsigned NumAggValues = AggValueVTs.size();
2513 unsigned NumValValues = ValValueVTs.size();
2514 SmallVector<SDValue, 4> Values(NumAggValues);
2516 SDValue Agg = getValue(Op0);
2517 SDValue Val = getValue(Op1);
2519 // Copy the beginning value(s) from the original aggregate.
2520 for (; i != LinearIndex; ++i)
2521 Values[i] = IntoUndef ? DAG.getNode(ISD::UNDEF, AggValueVTs[i]) :
2522 SDValue(Agg.getNode(), Agg.getResNo() + i);
2523 // Copy values from the inserted value(s).
2524 for (; i != LinearIndex + NumValValues; ++i)
2525 Values[i] = FromUndef ? DAG.getNode(ISD::UNDEF, AggValueVTs[i]) :
2526 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
2527 // Copy remaining value(s) from the original aggregate.
2528 for (; i != NumAggValues; ++i)
2529 Values[i] = IntoUndef ? DAG.getNode(ISD::UNDEF, AggValueVTs[i]) :
2530 SDValue(Agg.getNode(), Agg.getResNo() + i);
2532 setValue(&I, DAG.getNode(ISD::MERGE_VALUES,
2533 DAG.getVTList(&AggValueVTs[0], NumAggValues),
2534 &Values[0], NumAggValues));
2537 void SelectionDAGLowering::visitExtractValue(ExtractValueInst &I) {
2538 const Value *Op0 = I.getOperand(0);
2539 const Type *AggTy = Op0->getType();
2540 const Type *ValTy = I.getType();
2541 bool OutOfUndef = isa<UndefValue>(Op0);
2543 unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2544 I.idx_begin(), I.idx_end());
2546 SmallVector<MVT, 4> ValValueVTs;
2547 ComputeValueVTs(TLI, ValTy, ValValueVTs);
2549 unsigned NumValValues = ValValueVTs.size();
2550 SmallVector<SDValue, 4> Values(NumValValues);
2552 SDValue Agg = getValue(Op0);
2553 // Copy out the selected value(s).
2554 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
2555 Values[i - LinearIndex] =
2557 DAG.getNode(ISD::UNDEF,
2558 Agg.getNode()->getValueType(Agg.getResNo() + i)) :
2559 SDValue(Agg.getNode(), Agg.getResNo() + i);
2561 setValue(&I, DAG.getNode(ISD::MERGE_VALUES,
2562 DAG.getVTList(&ValValueVTs[0], NumValValues),
2563 &Values[0], NumValValues));
2567 void SelectionDAGLowering::visitGetElementPtr(User &I) {
2568 SDValue N = getValue(I.getOperand(0));
2569 const Type *Ty = I.getOperand(0)->getType();
2571 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
2574 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
2575 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
2578 uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field);
2579 N = DAG.getNode(ISD::ADD, N.getValueType(), N,
2580 DAG.getIntPtrConstant(Offset));
2582 Ty = StTy->getElementType(Field);
2584 Ty = cast<SequentialType>(Ty)->getElementType();
2586 // If this is a constant subscript, handle it quickly.
2587 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
2588 if (CI->getZExtValue() == 0) continue;
2590 TD->getABITypeSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
2591 N = DAG.getNode(ISD::ADD, N.getValueType(), N,
2592 DAG.getIntPtrConstant(Offs));
2596 // N = N + Idx * ElementSize;
2597 uint64_t ElementSize = TD->getABITypeSize(Ty);
2598 SDValue IdxN = getValue(Idx);
2600 // If the index is smaller or larger than intptr_t, truncate or extend
2602 if (IdxN.getValueType().bitsLT(N.getValueType()))
2603 IdxN = DAG.getNode(ISD::SIGN_EXTEND, N.getValueType(), IdxN);
2604 else if (IdxN.getValueType().bitsGT(N.getValueType()))
2605 IdxN = DAG.getNode(ISD::TRUNCATE, N.getValueType(), IdxN);
2607 // If this is a multiply by a power of two, turn it into a shl
2608 // immediately. This is a very common case.
2609 if (ElementSize != 1) {
2610 if (isPowerOf2_64(ElementSize)) {
2611 unsigned Amt = Log2_64(ElementSize);
2612 IdxN = DAG.getNode(ISD::SHL, N.getValueType(), IdxN,
2613 DAG.getConstant(Amt, TLI.getShiftAmountTy()));
2615 SDValue Scale = DAG.getIntPtrConstant(ElementSize);
2616 IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale);
2620 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
2626 void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
2627 // If this is a fixed sized alloca in the entry block of the function,
2628 // allocate it statically on the stack.
2629 if (FuncInfo.StaticAllocaMap.count(&I))
2630 return; // getValue will auto-populate this.
2632 const Type *Ty = I.getAllocatedType();
2633 uint64_t TySize = TLI.getTargetData()->getABITypeSize(Ty);
2635 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
2638 SDValue AllocSize = getValue(I.getArraySize());
2639 MVT IntPtr = TLI.getPointerTy();
2640 if (IntPtr.bitsLT(AllocSize.getValueType()))
2641 AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize);
2642 else if (IntPtr.bitsGT(AllocSize.getValueType()))
2643 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, AllocSize);
2645 AllocSize = DAG.getNode(ISD::MUL, IntPtr, AllocSize,
2646 DAG.getIntPtrConstant(TySize));
2648 // Handle alignment. If the requested alignment is less than or equal to
2649 // the stack alignment, ignore it. If the size is greater than or equal to
2650 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
2651 unsigned StackAlign =
2652 TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
2653 if (Align <= StackAlign)
2656 // Round the size of the allocation up to the stack alignment size
2657 // by add SA-1 to the size.
2658 AllocSize = DAG.getNode(ISD::ADD, AllocSize.getValueType(), AllocSize,
2659 DAG.getIntPtrConstant(StackAlign-1));
2660 // Mask out the low bits for alignment purposes.
2661 AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize,
2662 DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1)));
2664 SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) };
2665 const MVT *VTs = DAG.getNodeValueTypes(AllocSize.getValueType(),
2667 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, 2, Ops, 3);
2669 DAG.setRoot(DSA.getValue(1));
2671 // Inform the Frame Information that we have just allocated a variable-sized
2673 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
2676 void SelectionDAGLowering::visitLoad(LoadInst &I) {
2677 const Value *SV = I.getOperand(0);
2678 SDValue Ptr = getValue(SV);
2680 const Type *Ty = I.getType();
2681 bool isVolatile = I.isVolatile();
2682 unsigned Alignment = I.getAlignment();
2684 SmallVector<MVT, 4> ValueVTs;
2685 SmallVector<uint64_t, 4> Offsets;
2686 ComputeValueVTs(TLI, Ty, ValueVTs, &Offsets);
2687 unsigned NumValues = ValueVTs.size();
2692 bool ConstantMemory = false;
2694 // Serialize volatile loads with other side effects.
2696 else if (AA->pointsToConstantMemory(SV)) {
2697 // Do not serialize (non-volatile) loads of constant memory with anything.
2698 Root = DAG.getEntryNode();
2699 ConstantMemory = true;
2701 // Do not serialize non-volatile loads against each other.
2702 Root = DAG.getRoot();
2705 SmallVector<SDValue, 4> Values(NumValues);
2706 SmallVector<SDValue, 4> Chains(NumValues);
2707 MVT PtrVT = Ptr.getValueType();
2708 for (unsigned i = 0; i != NumValues; ++i) {
2709 SDValue L = DAG.getLoad(ValueVTs[i], Root,
2710 DAG.getNode(ISD::ADD, PtrVT, Ptr,
2711 DAG.getConstant(Offsets[i], PtrVT)),
2713 isVolatile, Alignment);
2715 Chains[i] = L.getValue(1);
2718 if (!ConstantMemory) {
2719 SDValue Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
2720 &Chains[0], NumValues);
2724 PendingLoads.push_back(Chain);
2727 setValue(&I, DAG.getNode(ISD::MERGE_VALUES,
2728 DAG.getVTList(&ValueVTs[0], NumValues),
2729 &Values[0], NumValues));
2733 void SelectionDAGLowering::visitStore(StoreInst &I) {
2734 Value *SrcV = I.getOperand(0);
2735 Value *PtrV = I.getOperand(1);
2737 SmallVector<MVT, 4> ValueVTs;
2738 SmallVector<uint64_t, 4> Offsets;
2739 ComputeValueVTs(TLI, SrcV->getType(), ValueVTs, &Offsets);
2740 unsigned NumValues = ValueVTs.size();
2744 // Get the lowered operands. Note that we do this after
2745 // checking if NumResults is zero, because with zero results
2746 // the operands won't have values in the map.
2747 SDValue Src = getValue(SrcV);
2748 SDValue Ptr = getValue(PtrV);
2750 SDValue Root = getRoot();
2751 SmallVector<SDValue, 4> Chains(NumValues);
2752 MVT PtrVT = Ptr.getValueType();
2753 bool isVolatile = I.isVolatile();
2754 unsigned Alignment = I.getAlignment();
2755 for (unsigned i = 0; i != NumValues; ++i)
2756 Chains[i] = DAG.getStore(Root, SDValue(Src.getNode(), Src.getResNo() + i),
2757 DAG.getNode(ISD::ADD, PtrVT, Ptr,
2758 DAG.getConstant(Offsets[i], PtrVT)),
2760 isVolatile, Alignment);
2762 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, &Chains[0], NumValues));
2765 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
2767 void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I,
2768 unsigned Intrinsic) {
2769 bool HasChain = !I.doesNotAccessMemory();
2770 bool OnlyLoad = HasChain && I.onlyReadsMemory();
2772 // Build the operand list.
2773 SmallVector<SDValue, 8> Ops;
2774 if (HasChain) { // If this intrinsic has side-effects, chainify it.
2776 // We don't need to serialize loads against other loads.
2777 Ops.push_back(DAG.getRoot());
2779 Ops.push_back(getRoot());
2783 // Info is set by getTgtMemInstrinsic
2784 TargetLowering::IntrinsicInfo Info;
2785 bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic);
2787 // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
2788 if (!IsTgtIntrinsic)
2789 Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
2791 // Add all operands of the call to the operand list.
2792 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
2793 SDValue Op = getValue(I.getOperand(i));
2794 assert(TLI.isTypeLegal(Op.getValueType()) &&
2795 "Intrinsic uses a non-legal type?");
2799 std::vector<MVT> VTs;
2800 if (I.getType() != Type::VoidTy) {
2801 MVT VT = TLI.getValueType(I.getType());
2802 if (VT.isVector()) {
2803 const VectorType *DestTy = cast<VectorType>(I.getType());
2804 MVT EltVT = TLI.getValueType(DestTy->getElementType());
2806 VT = MVT::getVectorVT(EltVT, DestTy->getNumElements());
2807 assert(VT != MVT::Other && "Intrinsic uses a non-legal type?");
2810 assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?");
2814 VTs.push_back(MVT::Other);
2816 const MVT *VTList = DAG.getNodeValueTypes(VTs);
2820 if (IsTgtIntrinsic) {
2821 // This is target intrinsic that touches memory
2822 Result = DAG.getMemIntrinsicNode(Info.opc, VTList, VTs.size(),
2823 &Ops[0], Ops.size(),
2824 Info.memVT, Info.ptrVal, Info.offset,
2825 Info.align, Info.vol,
2826 Info.readMem, Info.writeMem);
2829 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VTList, VTs.size(),
2830 &Ops[0], Ops.size());
2831 else if (I.getType() != Type::VoidTy)
2832 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, VTList, VTs.size(),
2833 &Ops[0], Ops.size());
2835 Result = DAG.getNode(ISD::INTRINSIC_VOID, VTList, VTs.size(),
2836 &Ops[0], Ops.size());
2839 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
2841 PendingLoads.push_back(Chain);
2845 if (I.getType() != Type::VoidTy) {
2846 if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
2847 MVT VT = TLI.getValueType(PTy);
2848 Result = DAG.getNode(ISD::BIT_CONVERT, VT, Result);
2850 setValue(&I, Result);
2854 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
2855 static GlobalVariable *ExtractTypeInfo(Value *V) {
2856 V = V->stripPointerCasts();
2857 GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
2858 assert ((GV || isa<ConstantPointerNull>(V)) &&
2859 "TypeInfo must be a global variable or NULL");
2865 /// AddCatchInfo - Extract the personality and type infos from an eh.selector
2866 /// call, and add them to the specified machine basic block.
2867 void AddCatchInfo(CallInst &I, MachineModuleInfo *MMI,
2868 MachineBasicBlock *MBB) {
2869 // Inform the MachineModuleInfo of the personality for this landing pad.
2870 ConstantExpr *CE = cast<ConstantExpr>(I.getOperand(2));
2871 assert(CE->getOpcode() == Instruction::BitCast &&
2872 isa<Function>(CE->getOperand(0)) &&
2873 "Personality should be a function");
2874 MMI->addPersonality(MBB, cast<Function>(CE->getOperand(0)));
2876 // Gather all the type infos for this landing pad and pass them along to
2877 // MachineModuleInfo.
2878 std::vector<GlobalVariable *> TyInfo;
2879 unsigned N = I.getNumOperands();
2881 for (unsigned i = N - 1; i > 2; --i) {
2882 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(i))) {
2883 unsigned FilterLength = CI->getZExtValue();
2884 unsigned FirstCatch = i + FilterLength + !FilterLength;
2885 assert (FirstCatch <= N && "Invalid filter length");
2887 if (FirstCatch < N) {
2888 TyInfo.reserve(N - FirstCatch);
2889 for (unsigned j = FirstCatch; j < N; ++j)
2890 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
2891 MMI->addCatchTypeInfo(MBB, TyInfo);
2895 if (!FilterLength) {
2897 MMI->addCleanup(MBB);
2900 TyInfo.reserve(FilterLength - 1);
2901 for (unsigned j = i + 1; j < FirstCatch; ++j)
2902 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
2903 MMI->addFilterTypeInfo(MBB, TyInfo);
2912 TyInfo.reserve(N - 3);
2913 for (unsigned j = 3; j < N; ++j)
2914 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
2915 MMI->addCatchTypeInfo(MBB, TyInfo);
2921 /// GetSignificand - Get the significand and build it into a floating-point
2922 /// number with exponent of 1:
2924 /// Op = (Op & 0x007fffff) | 0x3f800000;
2926 /// where Op is the hexidecimal representation of floating point value.
2928 GetSignificand(SelectionDAG &DAG, SDValue Op) {
2929 SDValue t1 = DAG.getNode(ISD::AND, MVT::i32, Op,
2930 DAG.getConstant(0x007fffff, MVT::i32));
2931 SDValue t2 = DAG.getNode(ISD::OR, MVT::i32, t1,
2932 DAG.getConstant(0x3f800000, MVT::i32));
2933 return DAG.getNode(ISD::BIT_CONVERT, MVT::f32, t2);
2936 /// GetExponent - Get the exponent:
2938 /// (float)((Op1 >> 23) - 127);
2940 /// where Op is the hexidecimal representation of floating point value.
2942 GetExponent(SelectionDAG &DAG, SDValue Op) {
2943 SDValue t1 = DAG.getNode(ISD::SRL, MVT::i32, Op,
2944 DAG.getConstant(23, MVT::i32));
2945 SDValue t2 = DAG.getNode(ISD::SUB, MVT::i32, t1,
2946 DAG.getConstant(127, MVT::i32));
2947 return DAG.getNode(ISD::UINT_TO_FP, MVT::f32, t2);
2950 /// getF32Constant - Get 32-bit floating point constant.
2952 getF32Constant(SelectionDAG &DAG, unsigned Flt) {
2953 return DAG.getConstantFP(APFloat(APInt(32, Flt)), MVT::f32);
2956 /// Inlined utility function to implement binary input atomic intrinsics for
2957 /// visitIntrinsicCall: I is a call instruction
2958 /// Op is the associated NodeType for I
2960 SelectionDAGLowering::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) {
2961 SDValue Root = getRoot();
2962 SDValue L = DAG.getAtomic(Op, Root,
2963 getValue(I.getOperand(1)),
2964 getValue(I.getOperand(2)),
2967 DAG.setRoot(L.getValue(1));
2971 // implVisitAluOverflow - Lower an overflow instrinsics
2973 SelectionDAGLowering::implVisitAluOverflow(CallInst &I, ISD::NodeType Op) {
2974 SDValue Op1 = getValue(I.getOperand(1));
2975 SDValue Op2 = getValue(I.getOperand(2));
2977 MVT ValueVTs[] = { Op1.getValueType(), MVT::i1 };
2978 SDValue Ops[] = { Op1, Op2 };
2982 DAG.getVTList(&ValueVTs[0], 2), &Ops[0], 2);
2984 setValue(&I, Result);
2988 /// visitExp - Lower an exp intrinsic. Handles the special sequences for
2989 /// limited-precision mode.
2991 SelectionDAGLowering::visitExp(CallInst &I) {
2994 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
2995 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
2996 SDValue Op = getValue(I.getOperand(1));
2998 // Put the exponent in the right bit position for later addition to the
3001 // #define LOG2OFe 1.4426950f
3002 // IntegerPartOfX = ((int32_t)(X * LOG2OFe));
3003 SDValue t0 = DAG.getNode(ISD::FMUL, MVT::f32, Op,
3004 getF32Constant(DAG, 0x3fb8aa3b));
3005 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, MVT::i32, t0);
3007 // FractionalPartOfX = (X * LOG2OFe) - (float)IntegerPartOfX;
3008 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, MVT::f32, IntegerPartOfX);
3009 SDValue X = DAG.getNode(ISD::FSUB, MVT::f32, t0, t1);
3011 // IntegerPartOfX <<= 23;
3012 IntegerPartOfX = DAG.getNode(ISD::SHL, MVT::i32, IntegerPartOfX,
3013 DAG.getConstant(23, MVT::i32));
3015 if (LimitFloatPrecision <= 6) {
3016 // For floating-point precision of 6:
3018 // TwoToFractionalPartOfX =
3020 // (0.735607626f + 0.252464424f * x) * x;
3022 // error 0.0144103317, which is 6 bits
3023 SDValue t2 = DAG.getNode(ISD::FMUL, MVT::f32, X,
3024 getF32Constant(DAG, 0x3e814304));
3025 SDValue t3 = DAG.getNode(ISD::FADD, MVT::f32, t2,
3026 getF32Constant(DAG, 0x3f3c50c8));
3027 SDValue t4 = DAG.getNode(ISD::FMUL, MVT::f32, t3, X);
3028 SDValue t5 = DAG.getNode(ISD::FADD, MVT::f32, t4,
3029 getF32Constant(DAG, 0x3f7f5e7e));
3030 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, MVT::i32, t5);
3032 // Add the exponent into the result in integer domain.
3033 SDValue t6 = DAG.getNode(ISD::ADD, MVT::i32,
3034 TwoToFracPartOfX, IntegerPartOfX);
3036 result = DAG.getNode(ISD::BIT_CONVERT, MVT::f32, t6);
3037 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3038 // For floating-point precision of 12:
3040 // TwoToFractionalPartOfX =
3043 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3045 // 0.000107046256 error, which is 13 to 14 bits
3046 SDValue t2 = DAG.getNode(ISD::FMUL, MVT::f32, X,
3047 getF32Constant(DAG, 0x3da235e3));
3048 SDValue t3 = DAG.getNode(ISD::FADD, MVT::f32, t2,
3049 getF32Constant(DAG, 0x3e65b8f3));
3050 SDValue t4 = DAG.getNode(ISD::FMUL, MVT::f32, t3, X);
3051 SDValue t5 = DAG.getNode(ISD::FADD, MVT::f32, t4,
3052 getF32Constant(DAG, 0x3f324b07));
3053 SDValue t6 = DAG.getNode(ISD::FMUL, MVT::f32, t5, X);
3054 SDValue t7 = DAG.getNode(ISD::FADD, MVT::f32, t6,
3055 getF32Constant(DAG, 0x3f7ff8fd));
3056 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, MVT::i32, t7);
3058 // Add the exponent into the result in integer domain.
3059 SDValue t8 = DAG.getNode(ISD::ADD, MVT::i32,
3060 TwoToFracPartOfX, IntegerPartOfX);
3062 result = DAG.getNode(ISD::BIT_CONVERT, MVT::f32, t8);
3063 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3064 // For floating-point precision of 18:
3066 // TwoToFractionalPartOfX =
3070 // (0.554906021e-1f +
3071 // (0.961591928e-2f +
3072 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3074 // error 2.47208000*10^(-7), which is better than 18 bits
3075 SDValue t2 = DAG.getNode(ISD::FMUL, MVT::f32, X,
3076 getF32Constant(DAG, 0x3924b03e));
3077 SDValue t3 = DAG.getNode(ISD::FADD, MVT::f32, t2,
3078 getF32Constant(DAG, 0x3ab24b87));
3079 SDValue t4 = DAG.getNode(ISD::FMUL, MVT::f32, t3, X);
3080 SDValue t5 = DAG.getNode(ISD::FADD, MVT::f32, t4,
3081 getF32Constant(DAG, 0x3c1d8c17));
3082 SDValue t6 = DAG.getNode(ISD::FMUL, MVT::f32, t5, X);
3083 SDValue t7 = DAG.getNode(ISD::FADD, MVT::f32, t6,
3084 getF32Constant(DAG, 0x3d634a1d));
3085 SDValue t8 = DAG.getNode(ISD::FMUL, MVT::f32, t7, X);
3086 SDValue t9 = DAG.getNode(ISD::FADD, MVT::f32, t8,
3087 getF32Constant(DAG, 0x3e75fe14));
3088 SDValue t10 = DAG.getNode(ISD::FMUL, MVT::f32, t9, X);
3089 SDValue t11 = DAG.getNode(ISD::FADD, MVT::f32, t10,
3090 getF32Constant(DAG, 0x3f317234));
3091 SDValue t12 = DAG.getNode(ISD::FMUL, MVT::f32, t11, X);
3092 SDValue t13 = DAG.getNode(ISD::FADD, MVT::f32, t12,
3093 getF32Constant(DAG, 0x3f800000));
3094 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, MVT::i32, t13);
3096 // Add the exponent into the result in integer domain.
3097 SDValue t14 = DAG.getNode(ISD::ADD, MVT::i32,
3098 TwoToFracPartOfX, IntegerPartOfX);
3100 result = DAG.getNode(ISD::BIT_CONVERT, MVT::f32, t14);
3103 // No special expansion.
3104 result = DAG.getNode(ISD::FEXP,
3105 getValue(I.getOperand(1)).getValueType(),
3106 getValue(I.getOperand(1)));
3109 setValue(&I, result);
3112 /// visitLog - Lower a log intrinsic. Handles the special sequences for
3113 /// limited-precision mode.
3115 SelectionDAGLowering::visitLog(CallInst &I) {
3118 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3119 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3120 SDValue Op = getValue(I.getOperand(1));
3121 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, MVT::i32, Op);
3123 // Scale the exponent by log(2) [0.69314718f].
3124 SDValue Exp = GetExponent(DAG, Op1);
3125 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, MVT::f32, Exp,
3126 getF32Constant(DAG, 0x3f317218));
3128 // Get the significand and build it into a floating-point number with
3130 SDValue X = GetSignificand(DAG, Op1);
3132 if (LimitFloatPrecision <= 6) {
3133 // For floating-point precision of 6:
3137 // (1.4034025f - 0.23903021f * x) * x;
3139 // error 0.0034276066, which is better than 8 bits
3140 SDValue t0 = DAG.getNode(ISD::FMUL, MVT::f32, X,
3141 getF32Constant(DAG, 0xbe74c456));
3142 SDValue t1 = DAG.getNode(ISD::FADD, MVT::f32, t0,
3143 getF32Constant(DAG, 0x3fb3a2b1));
3144 SDValue t2 = DAG.getNode(ISD::FMUL, MVT::f32, t1, X);
3145 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, MVT::f32, t2,
3146 getF32Constant(DAG, 0x3f949a29));
3148 result = DAG.getNode(ISD::FADD, MVT::f32, LogOfExponent, LogOfMantissa);
3149 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3150 // For floating-point precision of 12:
3156 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
3158 // error 0.000061011436, which is 14 bits
3159 SDValue t0 = DAG.getNode(ISD::FMUL, MVT::f32, X,
3160 getF32Constant(DAG, 0xbd67b6d6));
3161 SDValue t1 = DAG.getNode(ISD::FADD, MVT::f32, t0,
3162 getF32Constant(DAG, 0x3ee4f4b8));
3163 SDValue t2 = DAG.getNode(ISD::FMUL, MVT::f32, t1, X);
3164 SDValue t3 = DAG.getNode(ISD::FSUB, MVT::f32, t2,
3165 getF32Constant(DAG, 0x3fbc278b));
3166 SDValue t4 = DAG.getNode(ISD::FMUL, MVT::f32, t3, X);
3167 SDValue t5 = DAG.getNode(ISD::FADD, MVT::f32, t4,
3168 getF32Constant(DAG, 0x40348e95));
3169 SDValue t6 = DAG.getNode(ISD::FMUL, MVT::f32, t5, X);
3170 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, MVT::f32, t6,
3171 getF32Constant(DAG, 0x3fdef31a));
3173 result = DAG.getNode(ISD::FADD, MVT::f32, LogOfExponent, LogOfMantissa);
3174 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3175 // For floating-point precision of 18:
3183 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
3185 // error 0.0000023660568, which is better than 18 bits
3186 SDValue t0 = DAG.getNode(ISD::FMUL, MVT::f32, X,
3187 getF32Constant(DAG, 0xbc91e5ac));
3188 SDValue t1 = DAG.getNode(ISD::FADD, MVT::f32, t0,
3189 getF32Constant(DAG, 0x3e4350aa));
3190 SDValue t2 = DAG.getNode(ISD::FMUL, MVT::f32, t1, X);
3191 SDValue t3 = DAG.getNode(ISD::FSUB, MVT::f32, t2,
3192 getF32Constant(DAG, 0x3f60d3e3));
3193 SDValue t4 = DAG.getNode(ISD::FMUL, MVT::f32, t3, X);
3194 SDValue t5 = DAG.getNode(ISD::FADD, MVT::f32, t4,
3195 getF32Constant(DAG, 0x4011cdf0));
3196 SDValue t6 = DAG.getNode(ISD::FMUL, MVT::f32, t5, X);
3197 SDValue t7 = DAG.getNode(ISD::FSUB, MVT::f32, t6,
3198 getF32Constant(DAG, 0x406cfd1c));
3199 SDValue t8 = DAG.getNode(ISD::FMUL, MVT::f32, t7, X);
3200 SDValue t9 = DAG.getNode(ISD::FADD, MVT::f32, t8,
3201 getF32Constant(DAG, 0x408797cb));
3202 SDValue t10 = DAG.getNode(ISD::FMUL, MVT::f32, t9, X);
3203 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, MVT::f32, t10,
3204 getF32Constant(DAG, 0x4006dcab));
3206 result = DAG.getNode(ISD::FADD, MVT::f32, LogOfExponent, LogOfMantissa);
3209 // No special expansion.
3210 result = DAG.getNode(ISD::FLOG,
3211 getValue(I.getOperand(1)).getValueType(),
3212 getValue(I.getOperand(1)));
3215 setValue(&I, result);
3218 /// visitLog2 - Lower a log2 intrinsic. Handles the special sequences for
3219 /// limited-precision mode.
3221 SelectionDAGLowering::visitLog2(CallInst &I) {
3224 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3225 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3226 SDValue Op = getValue(I.getOperand(1));
3227 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, MVT::i32, Op);
3229 // Get the exponent.
3230 SDValue LogOfExponent = GetExponent(DAG, Op1);
3232 // Get the significand and build it into a floating-point number with
3234 SDValue X = GetSignificand(DAG, Op1);
3236 // Different possible minimax approximations of significand in
3237 // floating-point for various degrees of accuracy over [1,2].
3238 if (LimitFloatPrecision <= 6) {
3239 // For floating-point precision of 6:
3241 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
3243 // error 0.0049451742, which is more than 7 bits
3244 SDValue t0 = DAG.getNode(ISD::FMUL, MVT::f32, X,
3245 getF32Constant(DAG, 0xbeb08fe0));
3246 SDValue t1 = DAG.getNode(ISD::FADD, MVT::f32, t0,
3247 getF32Constant(DAG, 0x40019463));
3248 SDValue t2 = DAG.getNode(ISD::FMUL, MVT::f32, t1, X);
3249 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, MVT::f32, t2,
3250 getF32Constant(DAG, 0x3fd6633d));
3252 result = DAG.getNode(ISD::FADD, MVT::f32, LogOfExponent, Log2ofMantissa);
3253 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3254 // For floating-point precision of 12:
3260 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
3262 // error 0.0000876136000, which is better than 13 bits
3263 SDValue t0 = DAG.getNode(ISD::FMUL, MVT::f32, X,
3264 getF32Constant(DAG, 0xbda7262e));
3265 SDValue t1 = DAG.getNode(ISD::FADD, MVT::f32, t0,
3266 getF32Constant(DAG, 0x3f25280b));
3267 SDValue t2 = DAG.getNode(ISD::FMUL, MVT::f32, t1, X);
3268 SDValue t3 = DAG.getNode(ISD::FSUB, MVT::f32, t2,
3269 getF32Constant(DAG, 0x4007b923));
3270 SDValue t4 = DAG.getNode(ISD::FMUL, MVT::f32, t3, X);
3271 SDValue t5 = DAG.getNode(ISD::FADD, MVT::f32, t4,
3272 getF32Constant(DAG, 0x40823e2f));
3273 SDValue t6 = DAG.getNode(ISD::FMUL, MVT::f32, t5, X);
3274 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, MVT::f32, t6,
3275 getF32Constant(DAG, 0x4020d29c));
3277 result = DAG.getNode(ISD::FADD, MVT::f32, LogOfExponent, Log2ofMantissa);
3278 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3279 // For floating-point precision of 18:
3288 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
3290 // error 0.0000018516, which is better than 18 bits
3291 SDValue t0 = DAG.getNode(ISD::FMUL, MVT::f32, X,
3292 getF32Constant(DAG, 0xbcd2769e));
3293 SDValue t1 = DAG.getNode(ISD::FADD, MVT::f32, t0,
3294 getF32Constant(DAG, 0x3e8ce0b9));
3295 SDValue t2 = DAG.getNode(ISD::FMUL, MVT::f32, t1, X);
3296 SDValue t3 = DAG.getNode(ISD::FSUB, MVT::f32, t2,
3297 getF32Constant(DAG, 0x3fa22ae7));
3298 SDValue t4 = DAG.getNode(ISD::FMUL, MVT::f32, t3, X);
3299 SDValue t5 = DAG.getNode(ISD::FADD, MVT::f32, t4,
3300 getF32Constant(DAG, 0x40525723));
3301 SDValue t6 = DAG.getNode(ISD::FMUL, MVT::f32, t5, X);
3302 SDValue t7 = DAG.getNode(ISD::FSUB, MVT::f32, t6,
3303 getF32Constant(DAG, 0x40aaf200));
3304 SDValue t8 = DAG.getNode(ISD::FMUL, MVT::f32, t7, X);
3305 SDValue t9 = DAG.getNode(ISD::FADD, MVT::f32, t8,
3306 getF32Constant(DAG, 0x40c39dad));
3307 SDValue t10 = DAG.getNode(ISD::FMUL, MVT::f32, t9, X);
3308 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, MVT::f32, t10,
3309 getF32Constant(DAG, 0x4042902c));
3311 result = DAG.getNode(ISD::FADD, MVT::f32, LogOfExponent, Log2ofMantissa);
3314 // No special expansion.
3315 result = DAG.getNode(ISD::FLOG2,
3316 getValue(I.getOperand(1)).getValueType(),
3317 getValue(I.getOperand(1)));
3320 setValue(&I, result);
3323 /// visitLog10 - Lower a log10 intrinsic. Handles the special sequences for
3324 /// limited-precision mode.
3326 SelectionDAGLowering::visitLog10(CallInst &I) {
3329 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3330 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3331 SDValue Op = getValue(I.getOperand(1));
3332 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, MVT::i32, Op);
3334 // Scale the exponent by log10(2) [0.30102999f].
3335 SDValue Exp = GetExponent(DAG, Op1);
3336 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, MVT::f32, Exp,
3337 getF32Constant(DAG, 0x3e9a209a));
3339 // Get the significand and build it into a floating-point number with
3341 SDValue X = GetSignificand(DAG, Op1);
3343 if (LimitFloatPrecision <= 6) {
3344 // For floating-point precision of 6:
3346 // Log10ofMantissa =
3348 // (0.60948995f - 0.10380950f * x) * x;
3350 // error 0.0014886165, which is 6 bits
3351 SDValue t0 = DAG.getNode(ISD::FMUL, MVT::f32, X,
3352 getF32Constant(DAG, 0xbdd49a13));
3353 SDValue t1 = DAG.getNode(ISD::FADD, MVT::f32, t0,
3354 getF32Constant(DAG, 0x3f1c0789));
3355 SDValue t2 = DAG.getNode(ISD::FMUL, MVT::f32, t1, X);
3356 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, MVT::f32, t2,
3357 getF32Constant(DAG, 0x3f011300));
3359 result = DAG.getNode(ISD::FADD, MVT::f32, LogOfExponent, Log10ofMantissa);
3360 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3361 // For floating-point precision of 12:
3363 // Log10ofMantissa =
3366 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
3368 // error 0.00019228036, which is better than 12 bits
3369 SDValue t0 = DAG.getNode(ISD::FMUL, MVT::f32, X,
3370 getF32Constant(DAG, 0x3d431f31));
3371 SDValue t1 = DAG.getNode(ISD::FSUB, MVT::f32, t0,
3372 getF32Constant(DAG, 0x3ea21fb2));
3373 SDValue t2 = DAG.getNode(ISD::FMUL, MVT::f32, t1, X);
3374 SDValue t3 = DAG.getNode(ISD::FADD, MVT::f32, t2,
3375 getF32Constant(DAG, 0x3f6ae232));
3376 SDValue t4 = DAG.getNode(ISD::FMUL, MVT::f32, t3, X);
3377 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, MVT::f32, t4,
3378 getF32Constant(DAG, 0x3f25f7c3));
3380 result = DAG.getNode(ISD::FADD, MVT::f32, LogOfExponent, Log10ofMantissa);
3381 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3382 // For floating-point precision of 18:
3384 // Log10ofMantissa =
3389 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
3391 // error 0.0000037995730, which is better than 18 bits
3392 SDValue t0 = DAG.getNode(ISD::FMUL, MVT::f32, X,
3393 getF32Constant(DAG, 0x3c5d51ce));
3394 SDValue t1 = DAG.getNode(ISD::FSUB, MVT::f32, t0,
3395 getF32Constant(DAG, 0x3e00685a));
3396 SDValue t2 = DAG.getNode(ISD::FMUL, MVT::f32, t1, X);
3397 SDValue t3 = DAG.getNode(ISD::FADD, MVT::f32, t2,
3398 getF32Constant(DAG, 0x3efb6798));
3399 SDValue t4 = DAG.getNode(ISD::FMUL, MVT::f32, t3, X);
3400 SDValue t5 = DAG.getNode(ISD::FSUB, MVT::f32, t4,
3401 getF32Constant(DAG, 0x3f88d192));
3402 SDValue t6 = DAG.getNode(ISD::FMUL, MVT::f32, t5, X);
3403 SDValue t7 = DAG.getNode(ISD::FADD, MVT::f32, t6,
3404 getF32Constant(DAG, 0x3fc4316c));
3405 SDValue t8 = DAG.getNode(ISD::FMUL, MVT::f32, t7, X);
3406 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, MVT::f32, t8,
3407 getF32Constant(DAG, 0x3f57ce70));
3409 result = DAG.getNode(ISD::FADD, MVT::f32, LogOfExponent, Log10ofMantissa);
3412 // No special expansion.
3413 result = DAG.getNode(ISD::FLOG10,
3414 getValue(I.getOperand(1)).getValueType(),
3415 getValue(I.getOperand(1)));
3418 setValue(&I, result);
3421 /// visitExp2 - Lower an exp2 intrinsic. Handles the special sequences for
3422 /// limited-precision mode.
3424 SelectionDAGLowering::visitExp2(CallInst &I) {
3427 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3428 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3429 SDValue Op = getValue(I.getOperand(1));
3431 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, MVT::i32, Op);
3433 // FractionalPartOfX = x - (float)IntegerPartOfX;
3434 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, MVT::f32, IntegerPartOfX);
3435 SDValue X = DAG.getNode(ISD::FSUB, MVT::f32, Op, t1);
3437 // IntegerPartOfX <<= 23;
3438 IntegerPartOfX = DAG.getNode(ISD::SHL, MVT::i32, IntegerPartOfX,
3439 DAG.getConstant(23, MVT::i32));
3441 if (LimitFloatPrecision <= 6) {
3442 // For floating-point precision of 6:
3444 // TwoToFractionalPartOfX =
3446 // (0.735607626f + 0.252464424f * x) * x;
3448 // error 0.0144103317, which is 6 bits
3449 SDValue t2 = DAG.getNode(ISD::FMUL, MVT::f32, X,
3450 getF32Constant(DAG, 0x3e814304));
3451 SDValue t3 = DAG.getNode(ISD::FADD, MVT::f32, t2,
3452 getF32Constant(DAG, 0x3f3c50c8));
3453 SDValue t4 = DAG.getNode(ISD::FMUL, MVT::f32, t3, X);
3454 SDValue t5 = DAG.getNode(ISD::FADD, MVT::f32, t4,
3455 getF32Constant(DAG, 0x3f7f5e7e));
3456 SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, MVT::i32, t5);
3457 SDValue TwoToFractionalPartOfX =
3458 DAG.getNode(ISD::ADD, MVT::i32, t6, IntegerPartOfX);
3460 result = DAG.getNode(ISD::BIT_CONVERT, MVT::f32, TwoToFractionalPartOfX);
3461 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3462 // For floating-point precision of 12:
3464 // TwoToFractionalPartOfX =
3467 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3469 // error 0.000107046256, which is 13 to 14 bits
3470 SDValue t2 = DAG.getNode(ISD::FMUL, MVT::f32, X,
3471 getF32Constant(DAG, 0x3da235e3));
3472 SDValue t3 = DAG.getNode(ISD::FADD, MVT::f32, t2,
3473 getF32Constant(DAG, 0x3e65b8f3));
3474 SDValue t4 = DAG.getNode(ISD::FMUL, MVT::f32, t3, X);
3475 SDValue t5 = DAG.getNode(ISD::FADD, MVT::f32, t4,
3476 getF32Constant(DAG, 0x3f324b07));
3477 SDValue t6 = DAG.getNode(ISD::FMUL, MVT::f32, t5, X);
3478 SDValue t7 = DAG.getNode(ISD::FADD, MVT::f32, t6,
3479 getF32Constant(DAG, 0x3f7ff8fd));
3480 SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, MVT::i32, t7);
3481 SDValue TwoToFractionalPartOfX =
3482 DAG.getNode(ISD::ADD, MVT::i32, t8, IntegerPartOfX);
3484 result = DAG.getNode(ISD::BIT_CONVERT, MVT::f32, TwoToFractionalPartOfX);
3485 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3486 // For floating-point precision of 18:
3488 // TwoToFractionalPartOfX =
3492 // (0.554906021e-1f +
3493 // (0.961591928e-2f +
3494 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3495 // error 2.47208000*10^(-7), which is better than 18 bits
3496 SDValue t2 = DAG.getNode(ISD::FMUL, MVT::f32, X,
3497 getF32Constant(DAG, 0x3924b03e));
3498 SDValue t3 = DAG.getNode(ISD::FADD, MVT::f32, t2,
3499 getF32Constant(DAG, 0x3ab24b87));
3500 SDValue t4 = DAG.getNode(ISD::FMUL, MVT::f32, t3, X);
3501 SDValue t5 = DAG.getNode(ISD::FADD, MVT::f32, t4,
3502 getF32Constant(DAG, 0x3c1d8c17));
3503 SDValue t6 = DAG.getNode(ISD::FMUL, MVT::f32, t5, X);
3504 SDValue t7 = DAG.getNode(ISD::FADD, MVT::f32, t6,
3505 getF32Constant(DAG, 0x3d634a1d));
3506 SDValue t8 = DAG.getNode(ISD::FMUL, MVT::f32, t7, X);
3507 SDValue t9 = DAG.getNode(ISD::FADD, MVT::f32, t8,
3508 getF32Constant(DAG, 0x3e75fe14));
3509 SDValue t10 = DAG.getNode(ISD::FMUL, MVT::f32, t9, X);
3510 SDValue t11 = DAG.getNode(ISD::FADD, MVT::f32, t10,
3511 getF32Constant(DAG, 0x3f317234));
3512 SDValue t12 = DAG.getNode(ISD::FMUL, MVT::f32, t11, X);
3513 SDValue t13 = DAG.getNode(ISD::FADD, MVT::f32, t12,
3514 getF32Constant(DAG, 0x3f800000));
3515 SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, MVT::i32, t13);
3516 SDValue TwoToFractionalPartOfX =
3517 DAG.getNode(ISD::ADD, MVT::i32, t14, IntegerPartOfX);
3519 result = DAG.getNode(ISD::BIT_CONVERT, MVT::f32, TwoToFractionalPartOfX);
3522 // No special expansion.
3523 result = DAG.getNode(ISD::FEXP2,
3524 getValue(I.getOperand(1)).getValueType(),
3525 getValue(I.getOperand(1)));
3528 setValue(&I, result);
3531 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
3532 /// limited-precision mode with x == 10.0f.
3534 SelectionDAGLowering::visitPow(CallInst &I) {
3536 Value *Val = I.getOperand(1);
3537 bool IsExp10 = false;
3539 if (getValue(Val).getValueType() == MVT::f32 &&
3540 getValue(I.getOperand(2)).getValueType() == MVT::f32 &&
3541 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3542 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(Val))) {
3543 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3545 IsExp10 = CFP->getValueAPF().bitwiseIsEqual(Ten);
3550 if (IsExp10 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3551 SDValue Op = getValue(I.getOperand(2));
3553 // Put the exponent in the right bit position for later addition to the
3556 // #define LOG2OF10 3.3219281f
3557 // IntegerPartOfX = (int32_t)(x * LOG2OF10);
3558 SDValue t0 = DAG.getNode(ISD::FMUL, MVT::f32, Op,
3559 getF32Constant(DAG, 0x40549a78));
3560 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, MVT::i32, t0);
3562 // FractionalPartOfX = x - (float)IntegerPartOfX;
3563 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, MVT::f32, IntegerPartOfX);
3564 SDValue X = DAG.getNode(ISD::FSUB, MVT::f32, t0, t1);
3566 // IntegerPartOfX <<= 23;
3567 IntegerPartOfX = DAG.getNode(ISD::SHL, MVT::i32, IntegerPartOfX,
3568 DAG.getConstant(23, MVT::i32));
3570 if (LimitFloatPrecision <= 6) {
3571 // For floating-point precision of 6:
3573 // twoToFractionalPartOfX =
3575 // (0.735607626f + 0.252464424f * x) * x;
3577 // error 0.0144103317, which is 6 bits
3578 SDValue t2 = DAG.getNode(ISD::FMUL, MVT::f32, X,
3579 getF32Constant(DAG, 0x3e814304));
3580 SDValue t3 = DAG.getNode(ISD::FADD, MVT::f32, t2,
3581 getF32Constant(DAG, 0x3f3c50c8));
3582 SDValue t4 = DAG.getNode(ISD::FMUL, MVT::f32, t3, X);
3583 SDValue t5 = DAG.getNode(ISD::FADD, MVT::f32, t4,
3584 getF32Constant(DAG, 0x3f7f5e7e));
3585 SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, MVT::i32, t5);
3586 SDValue TwoToFractionalPartOfX =
3587 DAG.getNode(ISD::ADD, MVT::i32, t6, IntegerPartOfX);
3589 result = DAG.getNode(ISD::BIT_CONVERT, MVT::f32, TwoToFractionalPartOfX);
3590 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3591 // For floating-point precision of 12:
3593 // TwoToFractionalPartOfX =
3596 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3598 // error 0.000107046256, which is 13 to 14 bits
3599 SDValue t2 = DAG.getNode(ISD::FMUL, MVT::f32, X,
3600 getF32Constant(DAG, 0x3da235e3));
3601 SDValue t3 = DAG.getNode(ISD::FADD, MVT::f32, t2,
3602 getF32Constant(DAG, 0x3e65b8f3));
3603 SDValue t4 = DAG.getNode(ISD::FMUL, MVT::f32, t3, X);
3604 SDValue t5 = DAG.getNode(ISD::FADD, MVT::f32, t4,
3605 getF32Constant(DAG, 0x3f324b07));
3606 SDValue t6 = DAG.getNode(ISD::FMUL, MVT::f32, t5, X);
3607 SDValue t7 = DAG.getNode(ISD::FADD, MVT::f32, t6,
3608 getF32Constant(DAG, 0x3f7ff8fd));
3609 SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, MVT::i32, t7);
3610 SDValue TwoToFractionalPartOfX =
3611 DAG.getNode(ISD::ADD, MVT::i32, t8, IntegerPartOfX);
3613 result = DAG.getNode(ISD::BIT_CONVERT, MVT::f32, TwoToFractionalPartOfX);
3614 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3615 // For floating-point precision of 18:
3617 // TwoToFractionalPartOfX =
3621 // (0.554906021e-1f +
3622 // (0.961591928e-2f +
3623 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3624 // error 2.47208000*10^(-7), which is better than 18 bits
3625 SDValue t2 = DAG.getNode(ISD::FMUL, MVT::f32, X,
3626 getF32Constant(DAG, 0x3924b03e));
3627 SDValue t3 = DAG.getNode(ISD::FADD, MVT::f32, t2,
3628 getF32Constant(DAG, 0x3ab24b87));
3629 SDValue t4 = DAG.getNode(ISD::FMUL, MVT::f32, t3, X);
3630 SDValue t5 = DAG.getNode(ISD::FADD, MVT::f32, t4,
3631 getF32Constant(DAG, 0x3c1d8c17));
3632 SDValue t6 = DAG.getNode(ISD::FMUL, MVT::f32, t5, X);
3633 SDValue t7 = DAG.getNode(ISD::FADD, MVT::f32, t6,
3634 getF32Constant(DAG, 0x3d634a1d));
3635 SDValue t8 = DAG.getNode(ISD::FMUL, MVT::f32, t7, X);
3636 SDValue t9 = DAG.getNode(ISD::FADD, MVT::f32, t8,
3637 getF32Constant(DAG, 0x3e75fe14));
3638 SDValue t10 = DAG.getNode(ISD::FMUL, MVT::f32, t9, X);
3639 SDValue t11 = DAG.getNode(ISD::FADD, MVT::f32, t10,
3640 getF32Constant(DAG, 0x3f317234));
3641 SDValue t12 = DAG.getNode(ISD::FMUL, MVT::f32, t11, X);
3642 SDValue t13 = DAG.getNode(ISD::FADD, MVT::f32, t12,
3643 getF32Constant(DAG, 0x3f800000));
3644 SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, MVT::i32, t13);
3645 SDValue TwoToFractionalPartOfX =
3646 DAG.getNode(ISD::ADD, MVT::i32, t14, IntegerPartOfX);
3648 result = DAG.getNode(ISD::BIT_CONVERT, MVT::f32, TwoToFractionalPartOfX);
3651 // No special expansion.
3652 result = DAG.getNode(ISD::FPOW,
3653 getValue(I.getOperand(1)).getValueType(),
3654 getValue(I.getOperand(1)),
3655 getValue(I.getOperand(2)));
3658 setValue(&I, result);
3661 /// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
3662 /// we want to emit this as a call to a named external function, return the name
3663 /// otherwise lower it and return null.
3665 SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
3666 switch (Intrinsic) {
3668 // By default, turn this into a target intrinsic node.
3669 visitTargetIntrinsic(I, Intrinsic);
3671 case Intrinsic::vastart: visitVAStart(I); return 0;
3672 case Intrinsic::vaend: visitVAEnd(I); return 0;
3673 case Intrinsic::vacopy: visitVACopy(I); return 0;
3674 case Intrinsic::returnaddress:
3675 setValue(&I, DAG.getNode(ISD::RETURNADDR, TLI.getPointerTy(),
3676 getValue(I.getOperand(1))));
3678 case Intrinsic::frameaddress:
3679 setValue(&I, DAG.getNode(ISD::FRAMEADDR, TLI.getPointerTy(),
3680 getValue(I.getOperand(1))));
3682 case Intrinsic::setjmp:
3683 return "_setjmp"+!TLI.usesUnderscoreSetJmp();
3685 case Intrinsic::longjmp:
3686 return "_longjmp"+!TLI.usesUnderscoreLongJmp();
3688 case Intrinsic::memcpy: {
3689 SDValue Op1 = getValue(I.getOperand(1));
3690 SDValue Op2 = getValue(I.getOperand(2));
3691 SDValue Op3 = getValue(I.getOperand(3));
3692 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3693 DAG.setRoot(DAG.getMemcpy(getRoot(), Op1, Op2, Op3, Align, false,
3694 I.getOperand(1), 0, I.getOperand(2), 0));
3697 case Intrinsic::memset: {
3698 SDValue Op1 = getValue(I.getOperand(1));
3699 SDValue Op2 = getValue(I.getOperand(2));
3700 SDValue Op3 = getValue(I.getOperand(3));
3701 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3702 DAG.setRoot(DAG.getMemset(getRoot(), Op1, Op2, Op3, Align,
3703 I.getOperand(1), 0));
3706 case Intrinsic::memmove: {
3707 SDValue Op1 = getValue(I.getOperand(1));
3708 SDValue Op2 = getValue(I.getOperand(2));
3709 SDValue Op3 = getValue(I.getOperand(3));
3710 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3712 // If the source and destination are known to not be aliases, we can
3713 // lower memmove as memcpy.
3714 uint64_t Size = -1ULL;
3715 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op3))
3716 Size = C->getZExtValue();
3717 if (AA->alias(I.getOperand(1), Size, I.getOperand(2), Size) ==
3718 AliasAnalysis::NoAlias) {
3719 DAG.setRoot(DAG.getMemcpy(getRoot(), Op1, Op2, Op3, Align, false,
3720 I.getOperand(1), 0, I.getOperand(2), 0));
3724 DAG.setRoot(DAG.getMemmove(getRoot(), Op1, Op2, Op3, Align,
3725 I.getOperand(1), 0, I.getOperand(2), 0));
3728 case Intrinsic::dbg_stoppoint: {
3729 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
3730 DbgStopPointInst &SPI = cast<DbgStopPointInst>(I);
3731 if (MMI && SPI.getContext() && MMI->Verify(SPI.getContext())) {
3732 DebugInfoDesc *DD = MMI->getDescFor(SPI.getContext());
3733 assert(DD && "Not a debug information descriptor");
3734 DAG.setRoot(DAG.getDbgStopPoint(getRoot(),
3737 cast<CompileUnitDesc>(DD)));
3742 case Intrinsic::dbg_region_start: {
3743 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
3744 DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I);
3745 if (MMI && RSI.getContext() && MMI->Verify(RSI.getContext())) {
3746 unsigned LabelID = MMI->RecordRegionStart(RSI.getContext());
3747 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getRoot(), LabelID));
3752 case Intrinsic::dbg_region_end: {
3753 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
3754 DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I);
3755 if (MMI && REI.getContext() && MMI->Verify(REI.getContext())) {
3756 unsigned LabelID = MMI->RecordRegionEnd(REI.getContext());
3757 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getRoot(), LabelID));
3762 case Intrinsic::dbg_func_start: {
3763 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
3765 DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I);
3766 Value *SP = FSI.getSubprogram();
3767 if (SP && MMI->Verify(SP)) {
3768 // llvm.dbg.func.start implicitly defines a dbg_stoppoint which is
3769 // what (most?) gdb expects.
3770 DebugInfoDesc *DD = MMI->getDescFor(SP);
3771 assert(DD && "Not a debug information descriptor");
3772 SubprogramDesc *Subprogram = cast<SubprogramDesc>(DD);
3773 const CompileUnitDesc *CompileUnit = Subprogram->getFile();
3774 unsigned SrcFile = MMI->RecordSource(CompileUnit);
3775 // Record the source line but does not create a label for the normal
3776 // function start. It will be emitted at asm emission time. However,
3777 // create a label if this is a beginning of inlined function.
3778 unsigned LabelID = MMI->RecordSourceLine(Subprogram->getLine(), 0, SrcFile);
3779 if (MMI->getSourceLines().size() != 1)
3780 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getRoot(), LabelID));
3785 case Intrinsic::dbg_declare: {
3786 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
3787 DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
3788 Value *Variable = DI.getVariable();
3789 if (MMI && Variable && MMI->Verify(Variable))
3790 DAG.setRoot(DAG.getNode(ISD::DECLARE, MVT::Other, getRoot(),
3791 getValue(DI.getAddress()), getValue(Variable)));
3795 case Intrinsic::eh_exception: {
3796 if (!CurMBB->isLandingPad()) {
3797 // FIXME: Mark exception register as live in. Hack for PR1508.
3798 unsigned Reg = TLI.getExceptionAddressRegister();
3799 if (Reg) CurMBB->addLiveIn(Reg);
3801 // Insert the EXCEPTIONADDR instruction.
3802 SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
3804 Ops[0] = DAG.getRoot();
3805 SDValue Op = DAG.getNode(ISD::EXCEPTIONADDR, VTs, Ops, 1);
3807 DAG.setRoot(Op.getValue(1));
3811 case Intrinsic::eh_selector_i32:
3812 case Intrinsic::eh_selector_i64: {
3813 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
3814 MVT VT = (Intrinsic == Intrinsic::eh_selector_i32 ?
3815 MVT::i32 : MVT::i64);
3818 if (CurMBB->isLandingPad())
3819 AddCatchInfo(I, MMI, CurMBB);
3822 FuncInfo.CatchInfoLost.insert(&I);
3824 // FIXME: Mark exception selector register as live in. Hack for PR1508.
3825 unsigned Reg = TLI.getExceptionSelectorRegister();
3826 if (Reg) CurMBB->addLiveIn(Reg);
3829 // Insert the EHSELECTION instruction.
3830 SDVTList VTs = DAG.getVTList(VT, MVT::Other);
3832 Ops[0] = getValue(I.getOperand(1));
3834 SDValue Op = DAG.getNode(ISD::EHSELECTION, VTs, Ops, 2);
3836 DAG.setRoot(Op.getValue(1));
3838 setValue(&I, DAG.getConstant(0, VT));
3844 case Intrinsic::eh_typeid_for_i32:
3845 case Intrinsic::eh_typeid_for_i64: {
3846 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
3847 MVT VT = (Intrinsic == Intrinsic::eh_typeid_for_i32 ?
3848 MVT::i32 : MVT::i64);
3851 // Find the type id for the given typeinfo.
3852 GlobalVariable *GV = ExtractTypeInfo(I.getOperand(1));
3854 unsigned TypeID = MMI->getTypeIDFor(GV);
3855 setValue(&I, DAG.getConstant(TypeID, VT));
3857 // Return something different to eh_selector.
3858 setValue(&I, DAG.getConstant(1, VT));
3864 case Intrinsic::eh_return_i32:
3865 case Intrinsic::eh_return_i64:
3866 if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
3867 MMI->setCallsEHReturn(true);
3868 DAG.setRoot(DAG.getNode(ISD::EH_RETURN,
3871 getValue(I.getOperand(1)),
3872 getValue(I.getOperand(2))));
3874 setValue(&I, DAG.getConstant(0, TLI.getPointerTy()));
3878 case Intrinsic::eh_unwind_init:
3879 if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
3880 MMI->setCallsUnwindInit(true);
3885 case Intrinsic::eh_dwarf_cfa: {
3886 MVT VT = getValue(I.getOperand(1)).getValueType();
3888 if (VT.bitsGT(TLI.getPointerTy()))
3889 CfaArg = DAG.getNode(ISD::TRUNCATE,
3890 TLI.getPointerTy(), getValue(I.getOperand(1)));
3892 CfaArg = DAG.getNode(ISD::SIGN_EXTEND,
3893 TLI.getPointerTy(), getValue(I.getOperand(1)));
3895 SDValue Offset = DAG.getNode(ISD::ADD,
3897 DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET,
3898 TLI.getPointerTy()),
3900 setValue(&I, DAG.getNode(ISD::ADD,
3902 DAG.getNode(ISD::FRAMEADDR,
3905 TLI.getPointerTy())),
3910 case Intrinsic::convertff:
3911 case Intrinsic::convertfsi:
3912 case Intrinsic::convertfui:
3913 case Intrinsic::convertsif:
3914 case Intrinsic::convertuif:
3915 case Intrinsic::convertss:
3916 case Intrinsic::convertsu:
3917 case Intrinsic::convertus:
3918 case Intrinsic::convertuu: {
3919 ISD::CvtCode Code = ISD::CVT_INVALID;
3920 switch (Intrinsic) {
3921 case Intrinsic::convertff: Code = ISD::CVT_FF; break;
3922 case Intrinsic::convertfsi: Code = ISD::CVT_FS; break;
3923 case Intrinsic::convertfui: Code = ISD::CVT_FU; break;
3924 case Intrinsic::convertsif: Code = ISD::CVT_SF; break;
3925 case Intrinsic::convertuif: Code = ISD::CVT_UF; break;
3926 case Intrinsic::convertss: Code = ISD::CVT_SS; break;
3927 case Intrinsic::convertsu: Code = ISD::CVT_SU; break;
3928 case Intrinsic::convertus: Code = ISD::CVT_US; break;
3929 case Intrinsic::convertuu: Code = ISD::CVT_UU; break;
3931 MVT DestVT = TLI.getValueType(I.getType());
3932 Value* Op1 = I.getOperand(1);
3933 setValue(&I, DAG.getConvertRndSat(DestVT, getValue(Op1),
3934 DAG.getValueType(DestVT),
3935 DAG.getValueType(getValue(Op1).getValueType()),
3936 getValue(I.getOperand(2)),
3937 getValue(I.getOperand(3)),
3942 case Intrinsic::sqrt:
3943 setValue(&I, DAG.getNode(ISD::FSQRT,
3944 getValue(I.getOperand(1)).getValueType(),
3945 getValue(I.getOperand(1))));
3947 case Intrinsic::powi:
3948 setValue(&I, DAG.getNode(ISD::FPOWI,
3949 getValue(I.getOperand(1)).getValueType(),
3950 getValue(I.getOperand(1)),
3951 getValue(I.getOperand(2))));
3953 case Intrinsic::sin:
3954 setValue(&I, DAG.getNode(ISD::FSIN,
3955 getValue(I.getOperand(1)).getValueType(),
3956 getValue(I.getOperand(1))));
3958 case Intrinsic::cos:
3959 setValue(&I, DAG.getNode(ISD::FCOS,
3960 getValue(I.getOperand(1)).getValueType(),
3961 getValue(I.getOperand(1))));
3963 case Intrinsic::log:
3966 case Intrinsic::log2:
3969 case Intrinsic::log10:
3972 case Intrinsic::exp:
3975 case Intrinsic::exp2:
3978 case Intrinsic::pow:
3981 case Intrinsic::pcmarker: {
3982 SDValue Tmp = getValue(I.getOperand(1));
3983 DAG.setRoot(DAG.getNode(ISD::PCMARKER, MVT::Other, getRoot(), Tmp));
3986 case Intrinsic::readcyclecounter: {
3987 SDValue Op = getRoot();
3988 SDValue Tmp = DAG.getNode(ISD::READCYCLECOUNTER,
3989 DAG.getNodeValueTypes(MVT::i64, MVT::Other), 2,
3992 DAG.setRoot(Tmp.getValue(1));
3995 case Intrinsic::part_select: {
3996 // Currently not implemented: just abort
3997 assert(0 && "part_select intrinsic not implemented");
4000 case Intrinsic::part_set: {
4001 // Currently not implemented: just abort
4002 assert(0 && "part_set intrinsic not implemented");
4005 case Intrinsic::bswap:
4006 setValue(&I, DAG.getNode(ISD::BSWAP,
4007 getValue(I.getOperand(1)).getValueType(),
4008 getValue(I.getOperand(1))));
4010 case Intrinsic::cttz: {
4011 SDValue Arg = getValue(I.getOperand(1));
4012 MVT Ty = Arg.getValueType();
4013 SDValue result = DAG.getNode(ISD::CTTZ, Ty, Arg);
4014 setValue(&I, result);
4017 case Intrinsic::ctlz: {
4018 SDValue Arg = getValue(I.getOperand(1));
4019 MVT Ty = Arg.getValueType();
4020 SDValue result = DAG.getNode(ISD::CTLZ, Ty, Arg);
4021 setValue(&I, result);
4024 case Intrinsic::ctpop: {
4025 SDValue Arg = getValue(I.getOperand(1));
4026 MVT Ty = Arg.getValueType();
4027 SDValue result = DAG.getNode(ISD::CTPOP, Ty, Arg);
4028 setValue(&I, result);
4031 case Intrinsic::stacksave: {
4032 SDValue Op = getRoot();
4033 SDValue Tmp = DAG.getNode(ISD::STACKSAVE,
4034 DAG.getNodeValueTypes(TLI.getPointerTy(), MVT::Other), 2, &Op, 1);
4036 DAG.setRoot(Tmp.getValue(1));
4039 case Intrinsic::stackrestore: {
4040 SDValue Tmp = getValue(I.getOperand(1));
4041 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, MVT::Other, getRoot(), Tmp));
4044 case Intrinsic::stackprotector: {
4045 // Emit code into the DAG to store the stack guard onto the stack.
4046 MachineFunction &MF = DAG.getMachineFunction();
4047 MachineFrameInfo *MFI = MF.getFrameInfo();
4048 MVT PtrTy = TLI.getPointerTy();
4050 SDValue Src = getValue(I.getOperand(1)); // The guard's value.
4051 AllocaInst *Slot = cast<AllocaInst>(I.getOperand(2));
4053 int FI = FuncInfo.StaticAllocaMap[Slot];
4054 MFI->setStackProtectorIndex(FI);
4056 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
4058 // Store the stack protector onto the stack.
4059 SDValue Result = DAG.getStore(getRoot(), Src, FIN,
4060 PseudoSourceValue::getFixedStack(FI),
4062 setValue(&I, Result);
4063 DAG.setRoot(Result);
4066 case Intrinsic::var_annotation:
4067 // Discard annotate attributes
4070 case Intrinsic::init_trampoline: {
4071 const Function *F = cast<Function>(I.getOperand(2)->stripPointerCasts());
4075 Ops[1] = getValue(I.getOperand(1));
4076 Ops[2] = getValue(I.getOperand(2));
4077 Ops[3] = getValue(I.getOperand(3));
4078 Ops[4] = DAG.getSrcValue(I.getOperand(1));
4079 Ops[5] = DAG.getSrcValue(F);
4081 SDValue Tmp = DAG.getNode(ISD::TRAMPOLINE,
4082 DAG.getNodeValueTypes(TLI.getPointerTy(),
4087 DAG.setRoot(Tmp.getValue(1));
4091 case Intrinsic::gcroot:
4093 Value *Alloca = I.getOperand(1);
4094 Constant *TypeMap = cast<Constant>(I.getOperand(2));
4096 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
4097 GFI->addStackRoot(FI->getIndex(), TypeMap);
4101 case Intrinsic::gcread:
4102 case Intrinsic::gcwrite:
4103 assert(0 && "GC failed to lower gcread/gcwrite intrinsics!");
4106 case Intrinsic::flt_rounds: {
4107 setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, MVT::i32));
4111 case Intrinsic::trap: {
4112 DAG.setRoot(DAG.getNode(ISD::TRAP, MVT::Other, getRoot()));
4116 case Intrinsic::uadd_with_overflow:
4117 return implVisitAluOverflow(I, ISD::UADDO);
4118 case Intrinsic::sadd_with_overflow:
4119 return implVisitAluOverflow(I, ISD::SADDO);
4120 case Intrinsic::usub_with_overflow:
4121 return implVisitAluOverflow(I, ISD::USUBO);
4122 case Intrinsic::ssub_with_overflow:
4123 return implVisitAluOverflow(I, ISD::SSUBO);
4124 case Intrinsic::umul_with_overflow:
4125 return implVisitAluOverflow(I, ISD::UMULO);
4126 case Intrinsic::smul_with_overflow:
4127 return implVisitAluOverflow(I, ISD::SMULO);
4129 case Intrinsic::prefetch: {
4132 Ops[1] = getValue(I.getOperand(1));
4133 Ops[2] = getValue(I.getOperand(2));
4134 Ops[3] = getValue(I.getOperand(3));
4135 DAG.setRoot(DAG.getNode(ISD::PREFETCH, MVT::Other, &Ops[0], 4));
4139 case Intrinsic::memory_barrier: {
4142 for (int x = 1; x < 6; ++x)
4143 Ops[x] = getValue(I.getOperand(x));
4145 DAG.setRoot(DAG.getNode(ISD::MEMBARRIER, MVT::Other, &Ops[0], 6));
4148 case Intrinsic::atomic_cmp_swap: {
4149 SDValue Root = getRoot();
4151 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) {
4153 L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP_8, Root,
4154 getValue(I.getOperand(1)),
4155 getValue(I.getOperand(2)),
4156 getValue(I.getOperand(3)),
4160 L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP_16, Root,
4161 getValue(I.getOperand(1)),
4162 getValue(I.getOperand(2)),
4163 getValue(I.getOperand(3)),
4167 L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP_32, Root,
4168 getValue(I.getOperand(1)),
4169 getValue(I.getOperand(2)),
4170 getValue(I.getOperand(3)),
4174 L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP_64, Root,
4175 getValue(I.getOperand(1)),
4176 getValue(I.getOperand(2)),
4177 getValue(I.getOperand(3)),
4181 assert(0 && "Invalid atomic type");
4185 DAG.setRoot(L.getValue(1));
4188 case Intrinsic::atomic_load_add:
4189 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) {
4191 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD_8);
4193 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD_16);
4195 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD_32);
4197 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD_64);
4199 assert(0 && "Invalid atomic type");
4202 case Intrinsic::atomic_load_sub:
4203 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) {
4205 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB_8);
4207 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB_16);
4209 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB_32);
4211 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB_64);
4213 assert(0 && "Invalid atomic type");
4216 case Intrinsic::atomic_load_or:
4217 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) {
4219 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR_8);
4221 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR_16);
4223 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR_32);
4225 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR_64);
4227 assert(0 && "Invalid atomic type");
4230 case Intrinsic::atomic_load_xor:
4231 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) {
4233 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR_8);
4235 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR_16);
4237 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR_32);
4239 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR_64);
4241 assert(0 && "Invalid atomic type");
4244 case Intrinsic::atomic_load_and:
4245 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) {
4247 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND_8);
4249 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND_16);
4251 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND_32);
4253 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND_64);
4255 assert(0 && "Invalid atomic type");
4258 case Intrinsic::atomic_load_nand:
4259 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) {
4261 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND_8);
4263 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND_16);
4265 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND_32);
4267 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND_64);
4269 assert(0 && "Invalid atomic type");
4272 case Intrinsic::atomic_load_max:
4273 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) {
4275 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX_8);
4277 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX_16);
4279 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX_32);
4281 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX_64);
4283 assert(0 && "Invalid atomic type");
4286 case Intrinsic::atomic_load_min:
4287 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) {
4289 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN_8);
4291 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN_16);
4293 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN_32);
4295 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN_64);
4297 assert(0 && "Invalid atomic type");
4300 case Intrinsic::atomic_load_umin:
4301 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) {
4303 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN_8);
4305 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN_16);
4307 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN_32);
4309 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN_64);
4311 assert(0 && "Invalid atomic type");
4314 case Intrinsic::atomic_load_umax:
4315 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) {
4317 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX_8);
4319 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX_16);
4321 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX_32);
4323 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX_64);
4325 assert(0 && "Invalid atomic type");
4328 case Intrinsic::atomic_swap:
4329 switch (getValue(I.getOperand(2)).getValueType().getSimpleVT()) {
4331 return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP_8);
4333 return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP_16);
4335 return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP_32);
4337 return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP_64);
4339 assert(0 && "Invalid atomic type");
4346 void SelectionDAGLowering::LowerCallTo(CallSite CS, SDValue Callee,
4348 MachineBasicBlock *LandingPad) {
4349 const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
4350 const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
4351 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4352 unsigned BeginLabel = 0, EndLabel = 0;
4354 TargetLowering::ArgListTy Args;
4355 TargetLowering::ArgListEntry Entry;
4356 Args.reserve(CS.arg_size());
4357 for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
4359 SDValue ArgNode = getValue(*i);
4360 Entry.Node = ArgNode; Entry.Ty = (*i)->getType();
4362 unsigned attrInd = i - CS.arg_begin() + 1;
4363 Entry.isSExt = CS.paramHasAttr(attrInd, Attribute::SExt);
4364 Entry.isZExt = CS.paramHasAttr(attrInd, Attribute::ZExt);
4365 Entry.isInReg = CS.paramHasAttr(attrInd, Attribute::InReg);
4366 Entry.isSRet = CS.paramHasAttr(attrInd, Attribute::StructRet);
4367 Entry.isNest = CS.paramHasAttr(attrInd, Attribute::Nest);
4368 Entry.isByVal = CS.paramHasAttr(attrInd, Attribute::ByVal);
4369 Entry.Alignment = CS.getParamAlignment(attrInd);
4370 Args.push_back(Entry);
4373 if (LandingPad && MMI) {
4374 // Insert a label before the invoke call to mark the try range. This can be
4375 // used to detect deletion of the invoke via the MachineModuleInfo.
4376 BeginLabel = MMI->NextLabelID();
4377 // Both PendingLoads and PendingExports must be flushed here;
4378 // this call might not return.
4380 DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getControlRoot(), BeginLabel));
4383 std::pair<SDValue,SDValue> Result =
4384 TLI.LowerCallTo(getRoot(), CS.getType(),
4385 CS.paramHasAttr(0, Attribute::SExt),
4386 CS.paramHasAttr(0, Attribute::ZExt), FTy->isVarArg(),
4387 CS.paramHasAttr(0, Attribute::InReg),
4388 CS.getCallingConv(),
4389 IsTailCall && PerformTailCallOpt,
4391 if (CS.getType() != Type::VoidTy)
4392 setValue(CS.getInstruction(), Result.first);
4393 DAG.setRoot(Result.second);
4395 if (LandingPad && MMI) {
4396 // Insert a label at the end of the invoke call to mark the try range. This
4397 // can be used to detect deletion of the invoke via the MachineModuleInfo.
4398 EndLabel = MMI->NextLabelID();
4399 DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getRoot(), EndLabel));
4401 // Inform MachineModuleInfo of range.
4402 MMI->addInvoke(LandingPad, BeginLabel, EndLabel);
4407 void SelectionDAGLowering::visitCall(CallInst &I) {
4408 const char *RenameFn = 0;
4409 if (Function *F = I.getCalledFunction()) {
4410 if (F->isDeclaration()) {
4411 if (unsigned IID = F->getIntrinsicID()) {
4412 RenameFn = visitIntrinsicCall(I, IID);
4418 // Check for well-known libc/libm calls. If the function is internal, it
4419 // can't be a library call.
4420 unsigned NameLen = F->getNameLen();
4421 if (!F->hasInternalLinkage() && NameLen) {
4422 const char *NameStr = F->getNameStart();
4423 if (NameStr[0] == 'c' &&
4424 ((NameLen == 8 && !strcmp(NameStr, "copysign")) ||
4425 (NameLen == 9 && !strcmp(NameStr, "copysignf")))) {
4426 if (I.getNumOperands() == 3 && // Basic sanity checks.
4427 I.getOperand(1)->getType()->isFloatingPoint() &&
4428 I.getType() == I.getOperand(1)->getType() &&
4429 I.getType() == I.getOperand(2)->getType()) {
4430 SDValue LHS = getValue(I.getOperand(1));
4431 SDValue RHS = getValue(I.getOperand(2));
4432 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, LHS.getValueType(),
4436 } else if (NameStr[0] == 'f' &&
4437 ((NameLen == 4 && !strcmp(NameStr, "fabs")) ||
4438 (NameLen == 5 && !strcmp(NameStr, "fabsf")) ||
4439 (NameLen == 5 && !strcmp(NameStr, "fabsl")))) {
4440 if (I.getNumOperands() == 2 && // Basic sanity checks.
4441 I.getOperand(1)->getType()->isFloatingPoint() &&
4442 I.getType() == I.getOperand(1)->getType()) {
4443 SDValue Tmp = getValue(I.getOperand(1));
4444 setValue(&I, DAG.getNode(ISD::FABS, Tmp.getValueType(), Tmp));
4447 } else if (NameStr[0] == 's' &&
4448 ((NameLen == 3 && !strcmp(NameStr, "sin")) ||
4449 (NameLen == 4 && !strcmp(NameStr, "sinf")) ||
4450 (NameLen == 4 && !strcmp(NameStr, "sinl")))) {
4451 if (I.getNumOperands() == 2 && // Basic sanity checks.
4452 I.getOperand(1)->getType()->isFloatingPoint() &&
4453 I.getType() == I.getOperand(1)->getType()) {
4454 SDValue Tmp = getValue(I.getOperand(1));
4455 setValue(&I, DAG.getNode(ISD::FSIN, Tmp.getValueType(), Tmp));
4458 } else if (NameStr[0] == 'c' &&
4459 ((NameLen == 3 && !strcmp(NameStr, "cos")) ||
4460 (NameLen == 4 && !strcmp(NameStr, "cosf")) ||
4461 (NameLen == 4 && !strcmp(NameStr, "cosl")))) {
4462 if (I.getNumOperands() == 2 && // Basic sanity checks.
4463 I.getOperand(1)->getType()->isFloatingPoint() &&
4464 I.getType() == I.getOperand(1)->getType()) {
4465 SDValue Tmp = getValue(I.getOperand(1));
4466 setValue(&I, DAG.getNode(ISD::FCOS, Tmp.getValueType(), Tmp));
4471 } else if (isa<InlineAsm>(I.getOperand(0))) {
4478 Callee = getValue(I.getOperand(0));
4480 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
4482 LowerCallTo(&I, Callee, I.isTailCall());
4486 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
4487 /// this value and returns the result as a ValueVT value. This uses
4488 /// Chain/Flag as the input and updates them for the output Chain/Flag.
4489 /// If the Flag pointer is NULL, no flag is used.
4490 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
4492 SDValue *Flag) const {
4493 // Assemble the legal parts into the final values.
4494 SmallVector<SDValue, 4> Values(ValueVTs.size());
4495 SmallVector<SDValue, 8> Parts;
4496 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4497 // Copy the legal parts from the registers.
4498 MVT ValueVT = ValueVTs[Value];
4499 unsigned NumRegs = TLI->getNumRegisters(ValueVT);
4500 MVT RegisterVT = RegVTs[Value];
4502 Parts.resize(NumRegs);
4503 for (unsigned i = 0; i != NumRegs; ++i) {
4506 P = DAG.getCopyFromReg(Chain, Regs[Part+i], RegisterVT);
4508 P = DAG.getCopyFromReg(Chain, Regs[Part+i], RegisterVT, *Flag);
4509 *Flag = P.getValue(2);
4511 Chain = P.getValue(1);
4513 // If the source register was virtual and if we know something about it,
4514 // add an assert node.
4515 if (TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) &&
4516 RegisterVT.isInteger() && !RegisterVT.isVector()) {
4517 unsigned SlotNo = Regs[Part+i]-TargetRegisterInfo::FirstVirtualRegister;
4518 FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
4519 if (FLI.LiveOutRegInfo.size() > SlotNo) {
4520 FunctionLoweringInfo::LiveOutInfo &LOI = FLI.LiveOutRegInfo[SlotNo];
4522 unsigned RegSize = RegisterVT.getSizeInBits();
4523 unsigned NumSignBits = LOI.NumSignBits;
4524 unsigned NumZeroBits = LOI.KnownZero.countLeadingOnes();
4526 // FIXME: We capture more information than the dag can represent. For
4527 // now, just use the tightest assertzext/assertsext possible.
4529 MVT FromVT(MVT::Other);
4530 if (NumSignBits == RegSize)
4531 isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1
4532 else if (NumZeroBits >= RegSize-1)
4533 isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1
4534 else if (NumSignBits > RegSize-8)
4535 isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8
4536 else if (NumZeroBits >= RegSize-9)
4537 isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8
4538 else if (NumSignBits > RegSize-16)
4539 isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16
4540 else if (NumZeroBits >= RegSize-17)
4541 isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
4542 else if (NumSignBits > RegSize-32)
4543 isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32
4544 else if (NumZeroBits >= RegSize-33)
4545 isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
4547 if (FromVT != MVT::Other) {
4548 P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext,
4549 RegisterVT, P, DAG.getValueType(FromVT));
4558 Values[Value] = getCopyFromParts(DAG, Parts.begin(), NumRegs, RegisterVT,
4564 return DAG.getNode(ISD::MERGE_VALUES,
4565 DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
4566 &Values[0], ValueVTs.size());
4569 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
4570 /// specified value into the registers specified by this object. This uses
4571 /// Chain/Flag as the input and updates them for the output Chain/Flag.
4572 /// If the Flag pointer is NULL, no flag is used.
4573 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
4574 SDValue &Chain, SDValue *Flag) const {
4575 // Get the list of the values's legal parts.
4576 unsigned NumRegs = Regs.size();
4577 SmallVector<SDValue, 8> Parts(NumRegs);
4578 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4579 MVT ValueVT = ValueVTs[Value];
4580 unsigned NumParts = TLI->getNumRegisters(ValueVT);
4581 MVT RegisterVT = RegVTs[Value];
4583 getCopyToParts(DAG, Val.getValue(Val.getResNo() + Value),
4584 &Parts[Part], NumParts, RegisterVT);
4588 // Copy the parts into the registers.
4589 SmallVector<SDValue, 8> Chains(NumRegs);
4590 for (unsigned i = 0; i != NumRegs; ++i) {
4593 Part = DAG.getCopyToReg(Chain, Regs[i], Parts[i]);
4595 Part = DAG.getCopyToReg(Chain, Regs[i], Parts[i], *Flag);
4596 *Flag = Part.getValue(1);
4598 Chains[i] = Part.getValue(0);
4601 if (NumRegs == 1 || Flag)
4602 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
4603 // flagged to it. That is the CopyToReg nodes and the user are considered
4604 // a single scheduling unit. If we create a TokenFactor and return it as
4605 // chain, then the TokenFactor is both a predecessor (operand) of the
4606 // user as well as a successor (the TF operands are flagged to the user).
4607 // c1, f1 = CopyToReg
4608 // c2, f2 = CopyToReg
4609 // c3 = TokenFactor c1, c2
4612 Chain = Chains[NumRegs-1];
4614 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Chains[0], NumRegs);
4617 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
4618 /// operand list. This adds the code marker and includes the number of
4619 /// values added into it.
4620 void RegsForValue::AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG,
4621 std::vector<SDValue> &Ops) const {
4622 MVT IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy();
4623 Ops.push_back(DAG.getTargetConstant(Code | (Regs.size() << 3), IntPtrTy));
4624 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
4625 unsigned NumRegs = TLI->getNumRegisters(ValueVTs[Value]);
4626 MVT RegisterVT = RegVTs[Value];
4627 for (unsigned i = 0; i != NumRegs; ++i) {
4628 assert(Reg < Regs.size() && "Mismatch in # registers expected");
4629 Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT));
4634 /// isAllocatableRegister - If the specified register is safe to allocate,
4635 /// i.e. it isn't a stack pointer or some other special register, return the
4636 /// register class for the register. Otherwise, return null.
4637 static const TargetRegisterClass *
4638 isAllocatableRegister(unsigned Reg, MachineFunction &MF,
4639 const TargetLowering &TLI,
4640 const TargetRegisterInfo *TRI) {
4641 MVT FoundVT = MVT::Other;
4642 const TargetRegisterClass *FoundRC = 0;
4643 for (TargetRegisterInfo::regclass_iterator RCI = TRI->regclass_begin(),
4644 E = TRI->regclass_end(); RCI != E; ++RCI) {
4645 MVT ThisVT = MVT::Other;
4647 const TargetRegisterClass *RC = *RCI;
4648 // If none of the the value types for this register class are valid, we
4649 // can't use it. For example, 64-bit reg classes on 32-bit targets.
4650 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
4652 if (TLI.isTypeLegal(*I)) {
4653 // If we have already found this register in a different register class,
4654 // choose the one with the largest VT specified. For example, on
4655 // PowerPC, we favor f64 register classes over f32.
4656 if (FoundVT == MVT::Other || FoundVT.bitsLT(*I)) {
4663 if (ThisVT == MVT::Other) continue;
4665 // NOTE: This isn't ideal. In particular, this might allocate the
4666 // frame pointer in functions that need it (due to them not being taken
4667 // out of allocation, because a variable sized allocation hasn't been seen
4668 // yet). This is a slight code pessimization, but should still work.
4669 for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
4670 E = RC->allocation_order_end(MF); I != E; ++I)
4672 // We found a matching register class. Keep looking at others in case
4673 // we find one with larger registers that this physreg is also in.
4684 /// AsmOperandInfo - This contains information for each constraint that we are
4686 struct VISIBILITY_HIDDEN SDISelAsmOperandInfo :
4687 public TargetLowering::AsmOperandInfo {
4688 /// CallOperand - If this is the result output operand or a clobber
4689 /// this is null, otherwise it is the incoming operand to the CallInst.
4690 /// This gets modified as the asm is processed.
4691 SDValue CallOperand;
4693 /// AssignedRegs - If this is a register or register class operand, this
4694 /// contains the set of register corresponding to the operand.
4695 RegsForValue AssignedRegs;
4697 explicit SDISelAsmOperandInfo(const InlineAsm::ConstraintInfo &info)
4698 : TargetLowering::AsmOperandInfo(info), CallOperand(0,0) {
4701 /// MarkAllocatedRegs - Once AssignedRegs is set, mark the assigned registers
4702 /// busy in OutputRegs/InputRegs.
4703 void MarkAllocatedRegs(bool isOutReg, bool isInReg,
4704 std::set<unsigned> &OutputRegs,
4705 std::set<unsigned> &InputRegs,
4706 const TargetRegisterInfo &TRI) const {
4708 for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4709 MarkRegAndAliases(AssignedRegs.Regs[i], OutputRegs, TRI);
4712 for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4713 MarkRegAndAliases(AssignedRegs.Regs[i], InputRegs, TRI);
4717 /// getCallOperandValMVT - Return the MVT of the Value* that this operand
4718 /// corresponds to. If there is no Value* for this operand, it returns
4720 MVT getCallOperandValMVT(const TargetLowering &TLI,
4721 const TargetData *TD) const {
4722 if (CallOperandVal == 0) return MVT::Other;
4724 if (isa<BasicBlock>(CallOperandVal))
4725 return TLI.getPointerTy();
4727 const llvm::Type *OpTy = CallOperandVal->getType();
4729 // If this is an indirect operand, the operand is a pointer to the
4732 OpTy = cast<PointerType>(OpTy)->getElementType();
4734 // If OpTy is not a single value, it may be a struct/union that we
4735 // can tile with integers.
4736 if (!OpTy->isSingleValueType() && OpTy->isSized()) {
4737 unsigned BitSize = TD->getTypeSizeInBits(OpTy);
4746 OpTy = IntegerType::get(BitSize);
4751 return TLI.getValueType(OpTy, true);
4755 /// MarkRegAndAliases - Mark the specified register and all aliases in the
4757 static void MarkRegAndAliases(unsigned Reg, std::set<unsigned> &Regs,
4758 const TargetRegisterInfo &TRI) {
4759 assert(TargetRegisterInfo::isPhysicalRegister(Reg) && "Isn't a physreg");
4761 if (const unsigned *Aliases = TRI.getAliasSet(Reg))
4762 for (; *Aliases; ++Aliases)
4763 Regs.insert(*Aliases);
4766 } // end llvm namespace.
4769 /// GetRegistersForValue - Assign registers (virtual or physical) for the
4770 /// specified operand. We prefer to assign virtual registers, to allow the
4771 /// register allocator handle the assignment process. However, if the asm uses
4772 /// features that we can't model on machineinstrs, we have SDISel do the
4773 /// allocation. This produces generally horrible, but correct, code.
4775 /// OpInfo describes the operand.
4776 /// Input and OutputRegs are the set of already allocated physical registers.
4778 void SelectionDAGLowering::
4779 GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
4780 std::set<unsigned> &OutputRegs,
4781 std::set<unsigned> &InputRegs) {
4782 // Compute whether this value requires an input register, an output register,
4784 bool isOutReg = false;
4785 bool isInReg = false;
4786 switch (OpInfo.Type) {
4787 case InlineAsm::isOutput:
4790 // If there is an input constraint that matches this, we need to reserve
4791 // the input register so no other inputs allocate to it.
4792 isInReg = OpInfo.hasMatchingInput();
4794 case InlineAsm::isInput:
4798 case InlineAsm::isClobber:
4805 MachineFunction &MF = DAG.getMachineFunction();
4806 SmallVector<unsigned, 4> Regs;
4808 // If this is a constraint for a single physreg, or a constraint for a
4809 // register class, find it.
4810 std::pair<unsigned, const TargetRegisterClass*> PhysReg =
4811 TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
4812 OpInfo.ConstraintVT);
4814 unsigned NumRegs = 1;
4815 if (OpInfo.ConstraintVT != MVT::Other) {
4816 // If this is a FP input in an integer register (or visa versa) insert a bit
4817 // cast of the input value. More generally, handle any case where the input
4818 // value disagrees with the register class we plan to stick this in.
4819 if (OpInfo.Type == InlineAsm::isInput &&
4820 PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) {
4821 // Try to convert to the first MVT that the reg class contains. If the
4822 // types are identical size, use a bitcast to convert (e.g. two differing
4824 MVT RegVT = *PhysReg.second->vt_begin();
4825 if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
4826 OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, RegVT,
4827 OpInfo.CallOperand);
4828 OpInfo.ConstraintVT = RegVT;
4829 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
4830 // If the input is a FP value and we want it in FP registers, do a
4831 // bitcast to the corresponding integer type. This turns an f64 value
4832 // into i64, which can be passed with two i32 values on a 32-bit
4834 RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
4835 OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, RegVT,
4836 OpInfo.CallOperand);
4837 OpInfo.ConstraintVT = RegVT;
4841 NumRegs = TLI.getNumRegisters(OpInfo.ConstraintVT);
4845 MVT ValueVT = OpInfo.ConstraintVT;
4847 // If this is a constraint for a specific physical register, like {r17},
4849 if (PhysReg.first) {
4850 if (OpInfo.ConstraintVT == MVT::Other)
4851 ValueVT = *PhysReg.second->vt_begin();
4853 // Get the actual register value type. This is important, because the user
4854 // may have asked for (e.g.) the AX register in i32 type. We need to
4855 // remember that AX is actually i16 to get the right extension.
4856 RegVT = *PhysReg.second->vt_begin();
4858 // This is a explicit reference to a physical register.
4859 Regs.push_back(PhysReg.first);
4861 // If this is an expanded reference, add the rest of the regs to Regs.
4863 TargetRegisterClass::iterator I = PhysReg.second->begin();
4864 for (; *I != PhysReg.first; ++I)
4865 assert(I != PhysReg.second->end() && "Didn't find reg!");
4867 // Already added the first reg.
4869 for (; NumRegs; --NumRegs, ++I) {
4870 assert(I != PhysReg.second->end() && "Ran out of registers to allocate!");
4874 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
4875 const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
4876 OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
4880 // Otherwise, if this was a reference to an LLVM register class, create vregs
4881 // for this reference.
4882 std::vector<unsigned> RegClassRegs;
4883 const TargetRegisterClass *RC = PhysReg.second;
4885 // If this is a tied register, our regalloc doesn't know how to maintain
4886 // the constraint, so we have to pick a register to pin the input/output to.
4887 // If it isn't a matched constraint, go ahead and create vreg and let the
4888 // regalloc do its thing.
4889 if (!OpInfo.hasMatchingInput()) {
4890 RegVT = *PhysReg.second->vt_begin();
4891 if (OpInfo.ConstraintVT == MVT::Other)
4894 // Create the appropriate number of virtual registers.
4895 MachineRegisterInfo &RegInfo = MF.getRegInfo();
4896 for (; NumRegs; --NumRegs)
4897 Regs.push_back(RegInfo.createVirtualRegister(PhysReg.second));
4899 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
4903 // Otherwise, we can't allocate it. Let the code below figure out how to
4904 // maintain these constraints.
4905 RegClassRegs.assign(PhysReg.second->begin(), PhysReg.second->end());
4908 // This is a reference to a register class that doesn't directly correspond
4909 // to an LLVM register class. Allocate NumRegs consecutive, available,
4910 // registers from the class.
4911 RegClassRegs = TLI.getRegClassForInlineAsmConstraint(OpInfo.ConstraintCode,
4912 OpInfo.ConstraintVT);
4915 const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
4916 unsigned NumAllocated = 0;
4917 for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
4918 unsigned Reg = RegClassRegs[i];
4919 // See if this register is available.
4920 if ((isOutReg && OutputRegs.count(Reg)) || // Already used.
4921 (isInReg && InputRegs.count(Reg))) { // Already used.
4922 // Make sure we find consecutive registers.
4927 // Check to see if this register is allocatable (i.e. don't give out the
4930 RC = isAllocatableRegister(Reg, MF, TLI, TRI);
4931 if (!RC) { // Couldn't allocate this register.
4932 // Reset NumAllocated to make sure we return consecutive registers.
4938 // Okay, this register is good, we can use it.
4941 // If we allocated enough consecutive registers, succeed.
4942 if (NumAllocated == NumRegs) {
4943 unsigned RegStart = (i-NumAllocated)+1;
4944 unsigned RegEnd = i+1;
4945 // Mark all of the allocated registers used.
4946 for (unsigned i = RegStart; i != RegEnd; ++i)
4947 Regs.push_back(RegClassRegs[i]);
4949 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, *RC->vt_begin(),
4950 OpInfo.ConstraintVT);
4951 OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
4956 // Otherwise, we couldn't allocate enough registers for this.
4959 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
4960 /// processed uses a memory 'm' constraint.
4962 hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos,
4963 TargetLowering &TLI) {
4964 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
4965 InlineAsm::ConstraintInfo &CI = CInfos[i];
4966 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
4967 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
4968 if (CType == TargetLowering::C_Memory)
4976 /// visitInlineAsm - Handle a call to an InlineAsm object.
4978 void SelectionDAGLowering::visitInlineAsm(CallSite CS) {
4979 InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
4981 /// ConstraintOperands - Information about all of the constraints.
4982 std::vector<SDISelAsmOperandInfo> ConstraintOperands;
4984 SDValue Chain = getRoot();
4987 std::set<unsigned> OutputRegs, InputRegs;
4989 // Do a prepass over the constraints, canonicalizing them, and building up the
4990 // ConstraintOperands list.
4991 std::vector<InlineAsm::ConstraintInfo>
4992 ConstraintInfos = IA->ParseConstraints();
4994 bool hasMemory = hasInlineAsmMemConstraint(ConstraintInfos, TLI);
4996 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
4997 unsigned ResNo = 0; // ResNo - The result number of the next output.
4998 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
4999 ConstraintOperands.push_back(SDISelAsmOperandInfo(ConstraintInfos[i]));
5000 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
5002 MVT OpVT = MVT::Other;
5004 // Compute the value type for each operand.
5005 switch (OpInfo.Type) {
5006 case InlineAsm::isOutput:
5007 // Indirect outputs just consume an argument.
5008 if (OpInfo.isIndirect) {
5009 OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5013 // The return value of the call is this value. As such, there is no
5014 // corresponding argument.
5015 assert(CS.getType() != Type::VoidTy && "Bad inline asm!");
5016 if (const StructType *STy = dyn_cast<StructType>(CS.getType())) {
5017 OpVT = TLI.getValueType(STy->getElementType(ResNo));
5019 assert(ResNo == 0 && "Asm only has one result!");
5020 OpVT = TLI.getValueType(CS.getType());
5024 case InlineAsm::isInput:
5025 OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5027 case InlineAsm::isClobber:
5032 // If this is an input or an indirect output, process the call argument.
5033 // BasicBlocks are labels, currently appearing only in asm's.
5034 if (OpInfo.CallOperandVal) {
5035 if (BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
5036 OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
5038 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
5041 OpVT = OpInfo.getCallOperandValMVT(TLI, TD);
5044 OpInfo.ConstraintVT = OpVT;
5047 // Second pass over the constraints: compute which constraint option to use
5048 // and assign registers to constraints that want a specific physreg.
5049 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5050 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5052 // If this is an output operand with a matching input operand, look up the
5053 // matching input. It might have a different type (e.g. the output might be
5054 // i32 and the input i64) and we need to pick the larger width to ensure we
5055 // reserve the right number of registers.
5056 if (OpInfo.hasMatchingInput()) {
5057 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
5058 if (OpInfo.ConstraintVT != Input.ConstraintVT) {
5059 assert(OpInfo.ConstraintVT.isInteger() &&
5060 Input.ConstraintVT.isInteger() &&
5061 "Asm constraints must be the same or different sized integers");
5062 if (OpInfo.ConstraintVT.getSizeInBits() <
5063 Input.ConstraintVT.getSizeInBits())
5064 OpInfo.ConstraintVT = Input.ConstraintVT;
5066 Input.ConstraintVT = OpInfo.ConstraintVT;
5070 // Compute the constraint code and ConstraintType to use.
5071 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, hasMemory, &DAG);
5073 // If this is a memory input, and if the operand is not indirect, do what we
5074 // need to to provide an address for the memory input.
5075 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
5076 !OpInfo.isIndirect) {
5077 assert(OpInfo.Type == InlineAsm::isInput &&
5078 "Can only indirectify direct input operands!");
5080 // Memory operands really want the address of the value. If we don't have
5081 // an indirect input, put it in the constpool if we can, otherwise spill
5082 // it to a stack slot.
5084 // If the operand is a float, integer, or vector constant, spill to a
5085 // constant pool entry to get its address.
5086 Value *OpVal = OpInfo.CallOperandVal;
5087 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
5088 isa<ConstantVector>(OpVal)) {
5089 OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
5090 TLI.getPointerTy());
5092 // Otherwise, create a stack slot and emit a store to it before the
5094 const Type *Ty = OpVal->getType();
5095 uint64_t TySize = TLI.getTargetData()->getABITypeSize(Ty);
5096 unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty);
5097 MachineFunction &MF = DAG.getMachineFunction();
5098 int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align);
5099 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
5100 Chain = DAG.getStore(Chain, OpInfo.CallOperand, StackSlot, NULL, 0);
5101 OpInfo.CallOperand = StackSlot;
5104 // There is no longer a Value* corresponding to this operand.
5105 OpInfo.CallOperandVal = 0;
5106 // It is now an indirect operand.
5107 OpInfo.isIndirect = true;
5110 // If this constraint is for a specific register, allocate it before
5112 if (OpInfo.ConstraintType == TargetLowering::C_Register)
5113 GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5115 ConstraintInfos.clear();
5118 // Second pass - Loop over all of the operands, assigning virtual or physregs
5119 // to register class operands.
5120 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5121 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5123 // C_Register operands have already been allocated, Other/Memory don't need
5125 if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
5126 GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5129 // AsmNodeOperands - The operands for the ISD::INLINEASM node.
5130 std::vector<SDValue> AsmNodeOperands;
5131 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
5132 AsmNodeOperands.push_back(
5133 DAG.getTargetExternalSymbol(IA->getAsmString().c_str(), MVT::Other));
5136 // Loop over all of the inputs, copying the operand values into the
5137 // appropriate registers and processing the output regs.
5138 RegsForValue RetValRegs;
5140 // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
5141 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
5143 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5144 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5146 switch (OpInfo.Type) {
5147 case InlineAsm::isOutput: {
5148 if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
5149 OpInfo.ConstraintType != TargetLowering::C_Register) {
5150 // Memory output, or 'other' output (e.g. 'X' constraint).
5151 assert(OpInfo.isIndirect && "Memory output must be indirect operand");
5153 // Add information to the INLINEASM node to know about this output.
5154 unsigned ResOpType = 4/*MEM*/ | (1<<3);
5155 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5156 TLI.getPointerTy()));
5157 AsmNodeOperands.push_back(OpInfo.CallOperand);
5161 // Otherwise, this is a register or register class output.
5163 // Copy the output from the appropriate register. Find a register that
5165 if (OpInfo.AssignedRegs.Regs.empty()) {
5166 cerr << "Couldn't allocate output reg for constraint '"
5167 << OpInfo.ConstraintCode << "'!\n";
5171 // If this is an indirect operand, store through the pointer after the
5173 if (OpInfo.isIndirect) {
5174 IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
5175 OpInfo.CallOperandVal));
5177 // This is the result value of the call.
5178 assert(CS.getType() != Type::VoidTy && "Bad inline asm!");
5179 // Concatenate this output onto the outputs list.
5180 RetValRegs.append(OpInfo.AssignedRegs);
5183 // Add information to the INLINEASM node to know that this register is
5185 OpInfo.AssignedRegs.AddInlineAsmOperands(OpInfo.isEarlyClobber ?
5186 6 /* EARLYCLOBBER REGDEF */ :
5188 DAG, AsmNodeOperands);
5191 case InlineAsm::isInput: {
5192 SDValue InOperandVal = OpInfo.CallOperand;
5194 if (OpInfo.isMatchingInputConstraint()) { // Matching constraint?
5195 // If this is required to match an output register we have already set,
5196 // just use its register.
5197 unsigned OperandNo = OpInfo.getMatchedOperand();
5199 // Scan until we find the definition we already emitted of this operand.
5200 // When we find it, create a RegsForValue operand.
5201 unsigned CurOp = 2; // The first operand.
5202 for (; OperandNo; --OperandNo) {
5203 // Advance to the next operand.
5205 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5206 assert(((NumOps & 7) == 2 /*REGDEF*/ ||
5207 (NumOps & 7) == 6 /*EARLYCLOBBER REGDEF*/ ||
5208 (NumOps & 7) == 4 /*MEM*/) &&
5209 "Skipped past definitions?");
5210 CurOp += (NumOps>>3)+1;
5214 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5215 if ((NumOps & 7) == 2 /*REGDEF*/
5216 || (NumOps & 7) == 6 /* EARLYCLOBBER REGDEF */) {
5217 // Add NumOps>>3 registers to MatchedRegs.
5218 RegsForValue MatchedRegs;
5219 MatchedRegs.TLI = &TLI;
5220 MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
5221 MatchedRegs.RegVTs.push_back(AsmNodeOperands[CurOp+1].getValueType());
5222 for (unsigned i = 0, e = NumOps>>3; i != e; ++i) {
5224 cast<RegisterSDNode>(AsmNodeOperands[++CurOp])->getReg();
5225 MatchedRegs.Regs.push_back(Reg);
5228 // Use the produced MatchedRegs object to
5229 MatchedRegs.getCopyToRegs(InOperandVal, DAG, Chain, &Flag);
5230 MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/, DAG, AsmNodeOperands);
5233 assert(((NumOps & 7) == 4) && "Unknown matching constraint!");
5234 assert((NumOps >> 3) == 1 && "Unexpected number of operands");
5235 // Add information to the INLINEASM node to know about this input.
5236 AsmNodeOperands.push_back(DAG.getTargetConstant(NumOps,
5237 TLI.getPointerTy()));
5238 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
5243 if (OpInfo.ConstraintType == TargetLowering::C_Other) {
5244 assert(!OpInfo.isIndirect &&
5245 "Don't know how to handle indirect other inputs yet!");
5247 std::vector<SDValue> Ops;
5248 TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode[0],
5249 hasMemory, Ops, DAG);
5251 cerr << "Invalid operand for inline asm constraint '"
5252 << OpInfo.ConstraintCode << "'!\n";
5256 // Add information to the INLINEASM node to know about this input.
5257 unsigned ResOpType = 3 /*IMM*/ | (Ops.size() << 3);
5258 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5259 TLI.getPointerTy()));
5260 AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
5262 } else if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
5263 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
5264 assert(InOperandVal.getValueType() == TLI.getPointerTy() &&
5265 "Memory operands expect pointer values");
5267 // Add information to the INLINEASM node to know about this input.
5268 unsigned ResOpType = 4/*MEM*/ | (1<<3);
5269 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5270 TLI.getPointerTy()));
5271 AsmNodeOperands.push_back(InOperandVal);
5275 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
5276 OpInfo.ConstraintType == TargetLowering::C_Register) &&
5277 "Unknown constraint type!");
5278 assert(!OpInfo.isIndirect &&
5279 "Don't know how to handle indirect register inputs yet!");
5281 // Copy the input into the appropriate registers.
5282 if (OpInfo.AssignedRegs.Regs.empty()) {
5283 cerr << "Couldn't allocate output reg for constraint '"
5284 << OpInfo.ConstraintCode << "'!\n";
5288 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, Chain, &Flag);
5290 OpInfo.AssignedRegs.AddInlineAsmOperands(1/*REGUSE*/,
5291 DAG, AsmNodeOperands);
5294 case InlineAsm::isClobber: {
5295 // Add the clobbered value to the operand list, so that the register
5296 // allocator is aware that the physreg got clobbered.
5297 if (!OpInfo.AssignedRegs.Regs.empty())
5298 OpInfo.AssignedRegs.AddInlineAsmOperands(6 /* EARLYCLOBBER REGDEF */,
5299 DAG, AsmNodeOperands);
5305 // Finish up input operands.
5306 AsmNodeOperands[0] = Chain;
5307 if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
5309 Chain = DAG.getNode(ISD::INLINEASM,
5310 DAG.getNodeValueTypes(MVT::Other, MVT::Flag), 2,
5311 &AsmNodeOperands[0], AsmNodeOperands.size());
5312 Flag = Chain.getValue(1);
5314 // If this asm returns a register value, copy the result from that register
5315 // and set it as the value of the call.
5316 if (!RetValRegs.Regs.empty()) {
5317 SDValue Val = RetValRegs.getCopyFromRegs(DAG, Chain, &Flag);
5319 // FIXME: Why don't we do this for inline asms with MRVs?
5320 if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
5321 MVT ResultType = TLI.getValueType(CS.getType());
5323 // If any of the results of the inline asm is a vector, it may have the
5324 // wrong width/num elts. This can happen for register classes that can
5325 // contain multiple different value types. The preg or vreg allocated may
5326 // not have the same VT as was expected. Convert it to the right type
5327 // with bit_convert.
5328 if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
5329 Val = DAG.getNode(ISD::BIT_CONVERT, ResultType, Val);
5331 } else if (ResultType != Val.getValueType() &&
5332 ResultType.isInteger() && Val.getValueType().isInteger()) {
5333 // If a result value was tied to an input value, the computed result may
5334 // have a wider width than the expected result. Extract the relevant
5336 Val = DAG.getNode(ISD::TRUNCATE, ResultType, Val);
5339 assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
5342 setValue(CS.getInstruction(), Val);
5345 std::vector<std::pair<SDValue, Value*> > StoresToEmit;
5347 // Process indirect outputs, first output all of the flagged copies out of
5349 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
5350 RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
5351 Value *Ptr = IndirectStoresToEmit[i].second;
5352 SDValue OutVal = OutRegs.getCopyFromRegs(DAG, Chain, &Flag);
5353 StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
5356 // Emit the non-flagged stores from the physregs.
5357 SmallVector<SDValue, 8> OutChains;
5358 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i)
5359 OutChains.push_back(DAG.getStore(Chain, StoresToEmit[i].first,
5360 getValue(StoresToEmit[i].second),
5361 StoresToEmit[i].second, 0));
5362 if (!OutChains.empty())
5363 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
5364 &OutChains[0], OutChains.size());
5369 void SelectionDAGLowering::visitMalloc(MallocInst &I) {
5370 SDValue Src = getValue(I.getOperand(0));
5372 MVT IntPtr = TLI.getPointerTy();
5374 if (IntPtr.bitsLT(Src.getValueType()))
5375 Src = DAG.getNode(ISD::TRUNCATE, IntPtr, Src);
5376 else if (IntPtr.bitsGT(Src.getValueType()))
5377 Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src);
5379 // Scale the source by the type size.
5380 uint64_t ElementSize = TD->getABITypeSize(I.getType()->getElementType());
5381 Src = DAG.getNode(ISD::MUL, Src.getValueType(),
5382 Src, DAG.getIntPtrConstant(ElementSize));
5384 TargetLowering::ArgListTy Args;
5385 TargetLowering::ArgListEntry Entry;
5387 Entry.Ty = TLI.getTargetData()->getIntPtrType();
5388 Args.push_back(Entry);
5390 std::pair<SDValue,SDValue> Result =
5391 TLI.LowerCallTo(getRoot(), I.getType(), false, false, false, false,
5392 CallingConv::C, PerformTailCallOpt,
5393 DAG.getExternalSymbol("malloc", IntPtr),
5395 setValue(&I, Result.first); // Pointers always fit in registers
5396 DAG.setRoot(Result.second);
5399 void SelectionDAGLowering::visitFree(FreeInst &I) {
5400 TargetLowering::ArgListTy Args;
5401 TargetLowering::ArgListEntry Entry;
5402 Entry.Node = getValue(I.getOperand(0));
5403 Entry.Ty = TLI.getTargetData()->getIntPtrType();
5404 Args.push_back(Entry);
5405 MVT IntPtr = TLI.getPointerTy();
5406 std::pair<SDValue,SDValue> Result =
5407 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, false, false, false,
5408 CallingConv::C, PerformTailCallOpt,
5409 DAG.getExternalSymbol("free", IntPtr), Args, DAG);
5410 DAG.setRoot(Result.second);
5413 void SelectionDAGLowering::visitVAStart(CallInst &I) {
5414 DAG.setRoot(DAG.getNode(ISD::VASTART, MVT::Other, getRoot(),
5415 getValue(I.getOperand(1)),
5416 DAG.getSrcValue(I.getOperand(1))));
5419 void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
5420 SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getRoot(),
5421 getValue(I.getOperand(0)),
5422 DAG.getSrcValue(I.getOperand(0)));
5424 DAG.setRoot(V.getValue(1));
5427 void SelectionDAGLowering::visitVAEnd(CallInst &I) {
5428 DAG.setRoot(DAG.getNode(ISD::VAEND, MVT::Other, getRoot(),
5429 getValue(I.getOperand(1)),
5430 DAG.getSrcValue(I.getOperand(1))));
5433 void SelectionDAGLowering::visitVACopy(CallInst &I) {
5434 DAG.setRoot(DAG.getNode(ISD::VACOPY, MVT::Other, getRoot(),
5435 getValue(I.getOperand(1)),
5436 getValue(I.getOperand(2)),
5437 DAG.getSrcValue(I.getOperand(1)),
5438 DAG.getSrcValue(I.getOperand(2))));
5441 /// TargetLowering::LowerArguments - This is the default LowerArguments
5442 /// implementation, which just inserts a FORMAL_ARGUMENTS node. FIXME: When all
5443 /// targets are migrated to using FORMAL_ARGUMENTS, this hook should be
5444 /// integrated into SDISel.
5445 void TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG,
5446 SmallVectorImpl<SDValue> &ArgValues) {
5447 // Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node.
5448 SmallVector<SDValue, 3+16> Ops;
5449 Ops.push_back(DAG.getRoot());
5450 Ops.push_back(DAG.getConstant(F.getCallingConv(), getPointerTy()));
5451 Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy()));
5453 // Add one result value for each formal argument.
5454 SmallVector<MVT, 16> RetVals;
5456 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
5458 SmallVector<MVT, 4> ValueVTs;
5459 ComputeValueVTs(*this, I->getType(), ValueVTs);
5460 for (unsigned Value = 0, NumValues = ValueVTs.size();
5461 Value != NumValues; ++Value) {
5462 MVT VT = ValueVTs[Value];
5463 const Type *ArgTy = VT.getTypeForMVT();
5464 ISD::ArgFlagsTy Flags;
5465 unsigned OriginalAlignment =
5466 getTargetData()->getABITypeAlignment(ArgTy);
5468 if (F.paramHasAttr(j, Attribute::ZExt))
5470 if (F.paramHasAttr(j, Attribute::SExt))
5472 if (F.paramHasAttr(j, Attribute::InReg))
5474 if (F.paramHasAttr(j, Attribute::StructRet))
5476 if (F.paramHasAttr(j, Attribute::ByVal)) {
5478 const PointerType *Ty = cast<PointerType>(I->getType());
5479 const Type *ElementTy = Ty->getElementType();
5480 unsigned FrameAlign = getByValTypeAlignment(ElementTy);
5481 unsigned FrameSize = getTargetData()->getABITypeSize(ElementTy);
5482 // For ByVal, alignment should be passed from FE. BE will guess if
5483 // this info is not there but there are cases it cannot get right.
5484 if (F.getParamAlignment(j))
5485 FrameAlign = F.getParamAlignment(j);
5486 Flags.setByValAlign(FrameAlign);
5487 Flags.setByValSize(FrameSize);
5489 if (F.paramHasAttr(j, Attribute::Nest))
5491 Flags.setOrigAlign(OriginalAlignment);
5493 MVT RegisterVT = getRegisterType(VT);
5494 unsigned NumRegs = getNumRegisters(VT);
5495 for (unsigned i = 0; i != NumRegs; ++i) {
5496 RetVals.push_back(RegisterVT);
5497 ISD::ArgFlagsTy MyFlags = Flags;
5498 if (NumRegs > 1 && i == 0)
5500 // if it isn't first piece, alignment must be 1
5502 MyFlags.setOrigAlign(1);
5503 Ops.push_back(DAG.getArgFlags(MyFlags));
5508 RetVals.push_back(MVT::Other);
5511 SDNode *Result = DAG.getNode(ISD::FORMAL_ARGUMENTS,
5512 DAG.getVTList(&RetVals[0], RetVals.size()),
5513 &Ops[0], Ops.size()).getNode();
5515 // Prelower FORMAL_ARGUMENTS. This isn't required for functionality, but
5516 // allows exposing the loads that may be part of the argument access to the
5517 // first DAGCombiner pass.
5518 SDValue TmpRes = LowerOperation(SDValue(Result, 0), DAG);
5520 // The number of results should match up, except that the lowered one may have
5521 // an extra flag result.
5522 assert((Result->getNumValues() == TmpRes.getNode()->getNumValues() ||
5523 (Result->getNumValues()+1 == TmpRes.getNode()->getNumValues() &&
5524 TmpRes.getValue(Result->getNumValues()).getValueType() == MVT::Flag))
5525 && "Lowering produced unexpected number of results!");
5527 // The FORMAL_ARGUMENTS node itself is likely no longer needed.
5528 if (Result != TmpRes.getNode() && Result->use_empty()) {
5529 HandleSDNode Dummy(DAG.getRoot());
5530 DAG.RemoveDeadNode(Result);
5533 Result = TmpRes.getNode();
5535 unsigned NumArgRegs = Result->getNumValues() - 1;
5536 DAG.setRoot(SDValue(Result, NumArgRegs));
5538 // Set up the return result vector.
5541 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
5543 SmallVector<MVT, 4> ValueVTs;
5544 ComputeValueVTs(*this, I->getType(), ValueVTs);
5545 for (unsigned Value = 0, NumValues = ValueVTs.size();
5546 Value != NumValues; ++Value) {
5547 MVT VT = ValueVTs[Value];
5548 MVT PartVT = getRegisterType(VT);
5550 unsigned NumParts = getNumRegisters(VT);
5551 SmallVector<SDValue, 4> Parts(NumParts);
5552 for (unsigned j = 0; j != NumParts; ++j)
5553 Parts[j] = SDValue(Result, i++);
5555 ISD::NodeType AssertOp = ISD::DELETED_NODE;
5556 if (F.paramHasAttr(Idx, Attribute::SExt))
5557 AssertOp = ISD::AssertSext;
5558 else if (F.paramHasAttr(Idx, Attribute::ZExt))
5559 AssertOp = ISD::AssertZext;
5561 ArgValues.push_back(getCopyFromParts(DAG, &Parts[0], NumParts, PartVT, VT,
5565 assert(i == NumArgRegs && "Argument register count mismatch!");
5569 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
5570 /// implementation, which just inserts an ISD::CALL node, which is later custom
5571 /// lowered by the target to something concrete. FIXME: When all targets are
5572 /// migrated to using ISD::CALL, this hook should be integrated into SDISel.
5573 std::pair<SDValue, SDValue>
5574 TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
5575 bool RetSExt, bool RetZExt, bool isVarArg,
5577 unsigned CallingConv, bool isTailCall,
5579 ArgListTy &Args, SelectionDAG &DAG) {
5580 assert((!isTailCall || PerformTailCallOpt) &&
5581 "isTailCall set when tail-call optimizations are disabled!");
5583 SmallVector<SDValue, 32> Ops;
5584 Ops.push_back(Chain); // Op#0 - Chain
5585 Ops.push_back(Callee);
5587 // Handle all of the outgoing arguments.
5588 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
5589 SmallVector<MVT, 4> ValueVTs;
5590 ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
5591 for (unsigned Value = 0, NumValues = ValueVTs.size();
5592 Value != NumValues; ++Value) {
5593 MVT VT = ValueVTs[Value];
5594 const Type *ArgTy = VT.getTypeForMVT();
5595 SDValue Op = SDValue(Args[i].Node.getNode(),
5596 Args[i].Node.getResNo() + Value);
5597 ISD::ArgFlagsTy Flags;
5598 unsigned OriginalAlignment =
5599 getTargetData()->getABITypeAlignment(ArgTy);
5605 if (Args[i].isInReg)
5609 if (Args[i].isByVal) {
5611 const PointerType *Ty = cast<PointerType>(Args[i].Ty);
5612 const Type *ElementTy = Ty->getElementType();
5613 unsigned FrameAlign = getByValTypeAlignment(ElementTy);
5614 unsigned FrameSize = getTargetData()->getABITypeSize(ElementTy);
5615 // For ByVal, alignment should come from FE. BE will guess if this
5616 // info is not there but there are cases it cannot get right.
5617 if (Args[i].Alignment)
5618 FrameAlign = Args[i].Alignment;
5619 Flags.setByValAlign(FrameAlign);
5620 Flags.setByValSize(FrameSize);
5624 Flags.setOrigAlign(OriginalAlignment);
5626 MVT PartVT = getRegisterType(VT);
5627 unsigned NumParts = getNumRegisters(VT);
5628 SmallVector<SDValue, 4> Parts(NumParts);
5629 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
5632 ExtendKind = ISD::SIGN_EXTEND;
5633 else if (Args[i].isZExt)
5634 ExtendKind = ISD::ZERO_EXTEND;
5636 getCopyToParts(DAG, Op, &Parts[0], NumParts, PartVT, ExtendKind);
5638 for (unsigned i = 0; i != NumParts; ++i) {
5639 // if it isn't first piece, alignment must be 1
5640 ISD::ArgFlagsTy MyFlags = Flags;
5641 if (NumParts > 1 && i == 0)
5644 MyFlags.setOrigAlign(1);
5646 Ops.push_back(Parts[i]);
5647 Ops.push_back(DAG.getArgFlags(MyFlags));
5652 // Figure out the result value types. We start by making a list of
5653 // the potentially illegal return value types.
5654 SmallVector<MVT, 4> LoweredRetTys;
5655 SmallVector<MVT, 4> RetTys;
5656 ComputeValueVTs(*this, RetTy, RetTys);
5658 // Then we translate that to a list of legal types.
5659 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5661 MVT RegisterVT = getRegisterType(VT);
5662 unsigned NumRegs = getNumRegisters(VT);
5663 for (unsigned i = 0; i != NumRegs; ++i)
5664 LoweredRetTys.push_back(RegisterVT);
5667 LoweredRetTys.push_back(MVT::Other); // Always has a chain.
5669 // Create the CALL node.
5670 SDValue Res = DAG.getCall(CallingConv, isVarArg, isTailCall, isInreg,
5671 DAG.getVTList(&LoweredRetTys[0],
5672 LoweredRetTys.size()),
5675 Chain = Res.getValue(LoweredRetTys.size() - 1);
5677 // Gather up the call result into a single value.
5678 if (RetTy != Type::VoidTy && !RetTys.empty()) {
5679 ISD::NodeType AssertOp = ISD::DELETED_NODE;
5682 AssertOp = ISD::AssertSext;
5684 AssertOp = ISD::AssertZext;
5686 SmallVector<SDValue, 4> ReturnValues;
5688 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5690 MVT RegisterVT = getRegisterType(VT);
5691 unsigned NumRegs = getNumRegisters(VT);
5692 unsigned RegNoEnd = NumRegs + RegNo;
5693 SmallVector<SDValue, 4> Results;
5694 for (; RegNo != RegNoEnd; ++RegNo)
5695 Results.push_back(Res.getValue(RegNo));
5696 SDValue ReturnValue =
5697 getCopyFromParts(DAG, &Results[0], NumRegs, RegisterVT, VT,
5699 ReturnValues.push_back(ReturnValue);
5701 Res = DAG.getNode(ISD::MERGE_VALUES,
5702 DAG.getVTList(&RetTys[0], RetTys.size()),
5703 &ReturnValues[0], ReturnValues.size());
5706 return std::make_pair(Res, Chain);
5709 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
5710 assert(0 && "LowerOperation not implemented for this target!");
5716 void SelectionDAGLowering::CopyValueToVirtualRegister(Value *V, unsigned Reg) {
5717 SDValue Op = getValue(V);
5718 assert((Op.getOpcode() != ISD::CopyFromReg ||
5719 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
5720 "Copy from a reg to the same reg!");
5721 assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
5723 RegsForValue RFV(TLI, Reg, V->getType());
5724 SDValue Chain = DAG.getEntryNode();
5725 RFV.getCopyToRegs(Op, DAG, Chain, 0);
5726 PendingExports.push_back(Chain);
5729 #include "llvm/CodeGen/SelectionDAGISel.h"
5731 void SelectionDAGISel::
5732 LowerArguments(BasicBlock *LLVMBB) {
5733 // If this is the entry block, emit arguments.
5734 Function &F = *LLVMBB->getParent();
5735 SDValue OldRoot = SDL->DAG.getRoot();
5736 SmallVector<SDValue, 16> Args;
5737 TLI.LowerArguments(F, SDL->DAG, Args);
5740 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
5742 SmallVector<MVT, 4> ValueVTs;
5743 ComputeValueVTs(TLI, AI->getType(), ValueVTs);
5744 unsigned NumValues = ValueVTs.size();
5745 if (!AI->use_empty()) {
5746 SDL->setValue(AI, SDL->DAG.getMergeValues(&Args[a], NumValues));
5747 // If this argument is live outside of the entry block, insert a copy from
5748 // whereever we got it to the vreg that other BB's will reference it as.
5749 DenseMap<const Value*, unsigned>::iterator VMI=FuncInfo->ValueMap.find(AI);
5750 if (VMI != FuncInfo->ValueMap.end()) {
5751 SDL->CopyValueToVirtualRegister(AI, VMI->second);
5757 // Finally, if the target has anything special to do, allow it to do so.
5758 // FIXME: this should insert code into the DAG!
5759 EmitFunctionEntryCode(F, SDL->DAG.getMachineFunction());
5762 /// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
5763 /// ensure constants are generated when needed. Remember the virtual registers
5764 /// that need to be added to the Machine PHI nodes as input. We cannot just
5765 /// directly add them, because expansion might result in multiple MBB's for one
5766 /// BB. As such, the start of the BB might correspond to a different MBB than
5770 SelectionDAGISel::HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB) {
5771 TerminatorInst *TI = LLVMBB->getTerminator();
5773 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
5775 // Check successor nodes' PHI nodes that expect a constant to be available
5777 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
5778 BasicBlock *SuccBB = TI->getSuccessor(succ);
5779 if (!isa<PHINode>(SuccBB->begin())) continue;
5780 MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
5782 // If this terminator has multiple identical successors (common for
5783 // switches), only handle each succ once.
5784 if (!SuccsHandled.insert(SuccMBB)) continue;
5786 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
5789 // At this point we know that there is a 1-1 correspondence between LLVM PHI
5790 // nodes and Machine PHI nodes, but the incoming operands have not been
5792 for (BasicBlock::iterator I = SuccBB->begin();
5793 (PN = dyn_cast<PHINode>(I)); ++I) {
5794 // Ignore dead phi's.
5795 if (PN->use_empty()) continue;
5798 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
5800 if (Constant *C = dyn_cast<Constant>(PHIOp)) {
5801 unsigned &RegOut = SDL->ConstantsOut[C];
5803 RegOut = FuncInfo->CreateRegForValue(C);
5804 SDL->CopyValueToVirtualRegister(C, RegOut);
5808 Reg = FuncInfo->ValueMap[PHIOp];
5810 assert(isa<AllocaInst>(PHIOp) &&
5811 FuncInfo->StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
5812 "Didn't codegen value into a register!??");
5813 Reg = FuncInfo->CreateRegForValue(PHIOp);
5814 SDL->CopyValueToVirtualRegister(PHIOp, Reg);
5818 // Remember that this register needs to added to the machine PHI node as
5819 // the input for this MBB.
5820 SmallVector<MVT, 4> ValueVTs;
5821 ComputeValueVTs(TLI, PN->getType(), ValueVTs);
5822 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
5823 MVT VT = ValueVTs[vti];
5824 unsigned NumRegisters = TLI.getNumRegisters(VT);
5825 for (unsigned i = 0, e = NumRegisters; i != e; ++i)
5826 SDL->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
5827 Reg += NumRegisters;
5831 SDL->ConstantsOut.clear();
5834 /// This is the Fast-ISel version of HandlePHINodesInSuccessorBlocks. It only
5835 /// supports legal types, and it emits MachineInstrs directly instead of
5836 /// creating SelectionDAG nodes.
5839 SelectionDAGISel::HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB,
5841 TerminatorInst *TI = LLVMBB->getTerminator();
5843 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
5844 unsigned OrigNumPHINodesToUpdate = SDL->PHINodesToUpdate.size();
5846 // Check successor nodes' PHI nodes that expect a constant to be available
5848 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
5849 BasicBlock *SuccBB = TI->getSuccessor(succ);
5850 if (!isa<PHINode>(SuccBB->begin())) continue;
5851 MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
5853 // If this terminator has multiple identical successors (common for
5854 // switches), only handle each succ once.
5855 if (!SuccsHandled.insert(SuccMBB)) continue;
5857 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
5860 // At this point we know that there is a 1-1 correspondence between LLVM PHI
5861 // nodes and Machine PHI nodes, but the incoming operands have not been
5863 for (BasicBlock::iterator I = SuccBB->begin();
5864 (PN = dyn_cast<PHINode>(I)); ++I) {
5865 // Ignore dead phi's.
5866 if (PN->use_empty()) continue;
5868 // Only handle legal types. Two interesting things to note here. First,
5869 // by bailing out early, we may leave behind some dead instructions,
5870 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
5871 // own moves. Second, this check is necessary becuase FastISel doesn't
5872 // use CreateRegForValue to create registers, so it always creates
5873 // exactly one register for each non-void instruction.
5874 MVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
5875 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
5878 VT = TLI.getTypeToTransformTo(VT);
5880 SDL->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
5885 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
5887 unsigned Reg = F->getRegForValue(PHIOp);
5889 SDL->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
5892 SDL->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));