1 //===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the SelectionDAGISel class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "isel"
15 #include "llvm/CodeGen/SelectionDAGISel.h"
16 #include "llvm/CodeGen/ScheduleDAG.h"
17 #include "llvm/CallingConv.h"
18 #include "llvm/Constants.h"
19 #include "llvm/DerivedTypes.h"
20 #include "llvm/Function.h"
21 #include "llvm/GlobalVariable.h"
22 #include "llvm/InlineAsm.h"
23 #include "llvm/Instructions.h"
24 #include "llvm/Intrinsics.h"
25 #include "llvm/IntrinsicInst.h"
26 #include "llvm/CodeGen/IntrinsicLowering.h"
27 #include "llvm/CodeGen/MachineDebugInfo.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineJumpTableInfo.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/SelectionDAG.h"
33 #include "llvm/CodeGen/SSARegMap.h"
34 #include "llvm/Target/MRegisterInfo.h"
35 #include "llvm/Target/TargetData.h"
36 #include "llvm/Target/TargetFrameInfo.h"
37 #include "llvm/Target/TargetInstrInfo.h"
38 #include "llvm/Target/TargetLowering.h"
39 #include "llvm/Target/TargetMachine.h"
40 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/MathExtras.h"
43 #include "llvm/Support/Debug.h"
52 ViewISelDAGs("view-isel-dags", cl::Hidden,
53 cl::desc("Pop up a window to show isel dags as they are selected"));
55 ViewSchedDAGs("view-sched-dags", cl::Hidden,
56 cl::desc("Pop up a window to show sched dags as they are processed"));
58 static const bool ViewISelDAGs = 0, ViewSchedDAGs = 0;
61 // Scheduling heuristics
62 enum SchedHeuristics {
63 defaultScheduling, // Let the target specify its preference.
64 noScheduling, // No scheduling, emit breadth first sequence.
65 simpleScheduling, // Two pass, min. critical path, max. utilization.
66 simpleNoItinScheduling, // Same as above exact using generic latency.
67 listSchedulingBURR, // Bottom-up reg reduction list scheduling.
68 listSchedulingTDRR, // Top-down reg reduction list scheduling.
69 listSchedulingTD // Top-down list scheduler.
73 cl::opt<SchedHeuristics>
76 cl::desc("Choose scheduling style"),
77 cl::init(defaultScheduling),
79 clEnumValN(defaultScheduling, "default",
80 "Target preferred scheduling style"),
81 clEnumValN(noScheduling, "none",
82 "No scheduling: breadth first sequencing"),
83 clEnumValN(simpleScheduling, "simple",
84 "Simple two pass scheduling: minimize critical path "
85 "and maximize processor utilization"),
86 clEnumValN(simpleNoItinScheduling, "simple-noitin",
87 "Simple two pass scheduling: Same as simple "
88 "except using generic latency"),
89 clEnumValN(listSchedulingBURR, "list-burr",
90 "Bottom-up register reduction list scheduling"),
91 clEnumValN(listSchedulingTDRR, "list-tdrr",
92 "Top-down register reduction list scheduling"),
93 clEnumValN(listSchedulingTD, "list-td",
94 "Top-down list scheduler"),
99 /// RegsForValue - This struct represents the physical registers that a
100 /// particular value is assigned and the type information about the value.
101 /// This is needed because values can be promoted into larger registers and
102 /// expanded into multiple smaller registers than the value.
103 struct RegsForValue {
104 /// Regs - This list hold the register (for legal and promoted values)
105 /// or register set (for expanded values) that the value should be assigned
107 std::vector<unsigned> Regs;
109 /// RegVT - The value type of each register.
111 MVT::ValueType RegVT;
113 /// ValueVT - The value type of the LLVM value, which may be promoted from
114 /// RegVT or made from merging the two expanded parts.
115 MVT::ValueType ValueVT;
117 RegsForValue() : RegVT(MVT::Other), ValueVT(MVT::Other) {}
119 RegsForValue(unsigned Reg, MVT::ValueType regvt, MVT::ValueType valuevt)
120 : RegVT(regvt), ValueVT(valuevt) {
123 RegsForValue(const std::vector<unsigned> ®s,
124 MVT::ValueType regvt, MVT::ValueType valuevt)
125 : Regs(regs), RegVT(regvt), ValueVT(valuevt) {
128 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
129 /// this value and returns the result as a ValueVT value. This uses
130 /// Chain/Flag as the input and updates them for the output Chain/Flag.
131 SDOperand getCopyFromRegs(SelectionDAG &DAG,
132 SDOperand &Chain, SDOperand &Flag) const;
134 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
135 /// specified value into the registers specified by this object. This uses
136 /// Chain/Flag as the input and updates them for the output Chain/Flag.
137 void getCopyToRegs(SDOperand Val, SelectionDAG &DAG,
138 SDOperand &Chain, SDOperand &Flag) const;
140 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
141 /// operand list. This adds the code marker and includes the number of
142 /// values added into it.
143 void AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG,
144 std::vector<SDOperand> &Ops) const;
149 //===--------------------------------------------------------------------===//
150 /// FunctionLoweringInfo - This contains information that is global to a
151 /// function that is used when lowering a region of the function.
152 class FunctionLoweringInfo {
159 FunctionLoweringInfo(TargetLowering &TLI, Function &Fn,MachineFunction &MF);
161 /// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
162 std::map<const BasicBlock*, MachineBasicBlock *> MBBMap;
164 /// ValueMap - Since we emit code for the function a basic block at a time,
165 /// we must remember which virtual registers hold the values for
166 /// cross-basic-block values.
167 std::map<const Value*, unsigned> ValueMap;
169 /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
170 /// the entry block. This allows the allocas to be efficiently referenced
171 /// anywhere in the function.
172 std::map<const AllocaInst*, int> StaticAllocaMap;
174 unsigned MakeReg(MVT::ValueType VT) {
175 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
178 unsigned CreateRegForValue(const Value *V);
180 unsigned InitializeRegForValue(const Value *V) {
181 unsigned &R = ValueMap[V];
182 assert(R == 0 && "Already initialized this value register!");
183 return R = CreateRegForValue(V);
188 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
189 /// PHI nodes or outside of the basic block that defines it, or used by a
190 /// switch instruction, which may expand to multiple basic blocks.
191 static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
192 if (isa<PHINode>(I)) return true;
193 BasicBlock *BB = I->getParent();
194 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
195 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI) ||
196 isa<SwitchInst>(*UI))
201 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
202 /// entry block, return true. This includes arguments used by switches, since
203 /// the switch may expand into multiple basic blocks.
204 static bool isOnlyUsedInEntryBlock(Argument *A) {
205 BasicBlock *Entry = A->getParent()->begin();
206 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
207 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI))
208 return false; // Use not in entry block.
212 FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli,
213 Function &fn, MachineFunction &mf)
214 : TLI(tli), Fn(fn), MF(mf), RegMap(MF.getSSARegMap()) {
216 // Create a vreg for each argument register that is not dead and is used
217 // outside of the entry block for the function.
218 for (Function::arg_iterator AI = Fn.arg_begin(), E = Fn.arg_end();
220 if (!isOnlyUsedInEntryBlock(AI))
221 InitializeRegForValue(AI);
223 // Initialize the mapping of values to registers. This is only set up for
224 // instruction values that are used outside of the block that defines
226 Function::iterator BB = Fn.begin(), EB = Fn.end();
227 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
228 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
229 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(AI->getArraySize())) {
230 const Type *Ty = AI->getAllocatedType();
231 uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
233 std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty),
236 // If the alignment of the value is smaller than the size of the value,
237 // and if the size of the value is particularly small (<= 8 bytes),
238 // round up to the size of the value for potentially better performance.
240 // FIXME: This could be made better with a preferred alignment hook in
241 // TargetData. It serves primarily to 8-byte align doubles for X86.
242 if (Align < TySize && TySize <= 8) Align = TySize;
243 TySize *= CUI->getValue(); // Get total allocated size.
244 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
245 StaticAllocaMap[AI] =
246 MF.getFrameInfo()->CreateStackObject((unsigned)TySize, Align);
249 for (; BB != EB; ++BB)
250 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
251 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
252 if (!isa<AllocaInst>(I) ||
253 !StaticAllocaMap.count(cast<AllocaInst>(I)))
254 InitializeRegForValue(I);
256 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
257 // also creates the initial PHI MachineInstrs, though none of the input
258 // operands are populated.
259 for (BB = Fn.begin(), EB = Fn.end(); BB != EB; ++BB) {
260 MachineBasicBlock *MBB = new MachineBasicBlock(BB);
262 MF.getBasicBlockList().push_back(MBB);
264 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
267 for (BasicBlock::iterator I = BB->begin();
268 (PN = dyn_cast<PHINode>(I)); ++I)
269 if (!PN->use_empty()) {
270 MVT::ValueType VT = TLI.getValueType(PN->getType());
271 unsigned NumElements;
272 if (VT != MVT::Vector)
273 NumElements = TLI.getNumElements(VT);
275 MVT::ValueType VT1,VT2;
277 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()),
280 unsigned PHIReg = ValueMap[PN];
281 assert(PHIReg &&"PHI node does not have an assigned virtual register!");
282 for (unsigned i = 0; i != NumElements; ++i)
283 BuildMI(MBB, TargetInstrInfo::PHI, PN->getNumOperands(), PHIReg+i);
288 /// CreateRegForValue - Allocate the appropriate number of virtual registers of
289 /// the correctly promoted or expanded types. Assign these registers
290 /// consecutive vreg numbers and return the first assigned number.
291 unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
292 MVT::ValueType VT = TLI.getValueType(V->getType());
294 // The number of multiples of registers that we need, to, e.g., split up
295 // a <2 x int64> -> 4 x i32 registers.
296 unsigned NumVectorRegs = 1;
298 // If this is a packed type, figure out what type it will decompose into
299 // and how many of the elements it will use.
300 if (VT == MVT::Vector) {
301 const PackedType *PTy = cast<PackedType>(V->getType());
302 unsigned NumElts = PTy->getNumElements();
303 MVT::ValueType EltTy = TLI.getValueType(PTy->getElementType());
305 // Divide the input until we get to a supported size. This will always
306 // end with a scalar if the target doesn't support vectors.
307 while (NumElts > 1 && !TLI.isTypeLegal(getVectorType(EltTy, NumElts))) {
314 VT = getVectorType(EltTy, NumElts);
317 // The common case is that we will only create one register for this
318 // value. If we have that case, create and return the virtual register.
319 unsigned NV = TLI.getNumElements(VT);
321 // If we are promoting this value, pick the next largest supported type.
322 MVT::ValueType PromotedType = TLI.getTypeToTransformTo(VT);
323 unsigned Reg = MakeReg(PromotedType);
324 // If this is a vector of supported or promoted types (e.g. 4 x i16),
325 // create all of the registers.
326 for (unsigned i = 1; i != NumVectorRegs; ++i)
327 MakeReg(PromotedType);
331 // If this value is represented with multiple target registers, make sure
332 // to create enough consecutive registers of the right (smaller) type.
333 unsigned NT = VT-1; // Find the type to use.
334 while (TLI.getNumElements((MVT::ValueType)NT) != 1)
337 unsigned R = MakeReg((MVT::ValueType)NT);
338 for (unsigned i = 1; i != NV*NumVectorRegs; ++i)
339 MakeReg((MVT::ValueType)NT);
343 //===----------------------------------------------------------------------===//
344 /// SelectionDAGLowering - This is the common target-independent lowering
345 /// implementation that is parameterized by a TargetLowering object.
346 /// Also, targets can overload any lowering method.
349 class SelectionDAGLowering {
350 MachineBasicBlock *CurMBB;
352 std::map<const Value*, SDOperand> NodeMap;
354 /// PendingLoads - Loads are not emitted to the program immediately. We bunch
355 /// them up and then emit token factor nodes when possible. This allows us to
356 /// get simple disambiguation between loads without worrying about alias
358 std::vector<SDOperand> PendingLoads;
360 /// Case - A pair of values to record the Value for a switch case, and the
361 /// case's target basic block.
362 typedef std::pair<Constant*, MachineBasicBlock*> Case;
363 typedef std::vector<Case>::iterator CaseItr;
364 typedef std::pair<CaseItr, CaseItr> CaseRange;
366 /// CaseRec - A struct with ctor used in lowering switches to a binary tree
367 /// of conditional branches.
369 CaseRec(MachineBasicBlock *bb, Constant *lt, Constant *ge, CaseRange r) :
370 CaseBB(bb), LT(lt), GE(ge), Range(r) {}
372 /// CaseBB - The MBB in which to emit the compare and branch
373 MachineBasicBlock *CaseBB;
374 /// LT, GE - If nonzero, we know the current case value must be less-than or
375 /// greater-than-or-equal-to these Constants.
378 /// Range - A pair of iterators representing the range of case values to be
379 /// processed at this point in the binary search tree.
383 /// The comparison function for sorting Case values.
385 bool operator () (const Case& C1, const Case& C2) {
386 if (const ConstantUInt* U1 = dyn_cast<const ConstantUInt>(C1.first))
387 return U1->getValue() < cast<const ConstantUInt>(C2.first)->getValue();
389 const ConstantSInt* S1 = dyn_cast<const ConstantSInt>(C1.first);
390 return S1->getValue() < cast<const ConstantSInt>(C2.first)->getValue();
395 // TLI - This is information that describes the available target features we
396 // need for lowering. This indicates when operations are unavailable,
397 // implemented with a libcall, etc.
400 const TargetData *TD;
402 /// SwitchCases - Vector of CaseBlock structures used to communicate
403 /// SwitchInst code generation information.
404 std::vector<SelectionDAGISel::CaseBlock> SwitchCases;
405 SelectionDAGISel::JumpTable JT;
407 /// FuncInfo - Information about the function as a whole.
409 FunctionLoweringInfo &FuncInfo;
411 SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli,
412 FunctionLoweringInfo &funcinfo)
413 : TLI(tli), DAG(dag), TD(DAG.getTarget().getTargetData()),
414 JT(0,0,0,0), FuncInfo(funcinfo) {
417 /// getRoot - Return the current virtual root of the Selection DAG.
419 SDOperand getRoot() {
420 if (PendingLoads.empty())
421 return DAG.getRoot();
423 if (PendingLoads.size() == 1) {
424 SDOperand Root = PendingLoads[0];
426 PendingLoads.clear();
430 // Otherwise, we have to make a token factor node.
431 SDOperand Root = DAG.getNode(ISD::TokenFactor, MVT::Other, PendingLoads);
432 PendingLoads.clear();
437 void visit(Instruction &I) { visit(I.getOpcode(), I); }
439 void visit(unsigned Opcode, User &I) {
441 default: assert(0 && "Unknown instruction type encountered!");
443 // Build the switch statement using the Instruction.def file.
444 #define HANDLE_INST(NUM, OPCODE, CLASS) \
445 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
446 #include "llvm/Instruction.def"
450 void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; }
452 SDOperand getLoadFrom(const Type *Ty, SDOperand Ptr,
453 SDOperand SrcValue, SDOperand Root,
456 SDOperand getIntPtrConstant(uint64_t Val) {
457 return DAG.getConstant(Val, TLI.getPointerTy());
460 SDOperand getValue(const Value *V);
462 const SDOperand &setValue(const Value *V, SDOperand NewN) {
463 SDOperand &N = NodeMap[V];
464 assert(N.Val == 0 && "Already set a value for this node!");
468 RegsForValue GetRegistersForValue(const std::string &ConstrCode,
470 bool OutReg, bool InReg,
471 std::set<unsigned> &OutputRegs,
472 std::set<unsigned> &InputRegs);
474 // Terminator instructions.
475 void visitRet(ReturnInst &I);
476 void visitBr(BranchInst &I);
477 void visitSwitch(SwitchInst &I);
478 void visitUnreachable(UnreachableInst &I) { /* noop */ }
480 // Helper for visitSwitch
481 void visitSwitchCase(SelectionDAGISel::CaseBlock &CB);
482 void visitJumpTable(SelectionDAGISel::JumpTable &JT);
484 // These all get lowered before this pass.
485 void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); }
486 void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); }
488 void visitBinary(User &I, unsigned IntOp, unsigned FPOp, unsigned VecOp);
489 void visitShift(User &I, unsigned Opcode);
490 void visitAdd(User &I) {
491 visitBinary(I, ISD::ADD, ISD::FADD, ISD::VADD);
493 void visitSub(User &I);
494 void visitMul(User &I) {
495 visitBinary(I, ISD::MUL, ISD::FMUL, ISD::VMUL);
497 void visitDiv(User &I) {
498 const Type *Ty = I.getType();
500 Ty->isSigned() ? ISD::SDIV : ISD::UDIV, ISD::FDIV,
501 Ty->isSigned() ? ISD::VSDIV : ISD::VUDIV);
503 void visitRem(User &I) {
504 const Type *Ty = I.getType();
505 visitBinary(I, Ty->isSigned() ? ISD::SREM : ISD::UREM, ISD::FREM, 0);
507 void visitAnd(User &I) { visitBinary(I, ISD::AND, 0, ISD::VAND); }
508 void visitOr (User &I) { visitBinary(I, ISD::OR, 0, ISD::VOR); }
509 void visitXor(User &I) { visitBinary(I, ISD::XOR, 0, ISD::VXOR); }
510 void visitShl(User &I) { visitShift(I, ISD::SHL); }
511 void visitShr(User &I) {
512 visitShift(I, I.getType()->isUnsigned() ? ISD::SRL : ISD::SRA);
515 void visitSetCC(User &I, ISD::CondCode SignedOpc, ISD::CondCode UnsignedOpc);
516 void visitSetEQ(User &I) { visitSetCC(I, ISD::SETEQ, ISD::SETEQ); }
517 void visitSetNE(User &I) { visitSetCC(I, ISD::SETNE, ISD::SETNE); }
518 void visitSetLE(User &I) { visitSetCC(I, ISD::SETLE, ISD::SETULE); }
519 void visitSetGE(User &I) { visitSetCC(I, ISD::SETGE, ISD::SETUGE); }
520 void visitSetLT(User &I) { visitSetCC(I, ISD::SETLT, ISD::SETULT); }
521 void visitSetGT(User &I) { visitSetCC(I, ISD::SETGT, ISD::SETUGT); }
523 void visitExtractElement(User &I);
524 void visitInsertElement(User &I);
525 void visitShuffleVector(User &I);
527 void visitGetElementPtr(User &I);
528 void visitCast(User &I);
529 void visitSelect(User &I);
531 void visitMalloc(MallocInst &I);
532 void visitFree(FreeInst &I);
533 void visitAlloca(AllocaInst &I);
534 void visitLoad(LoadInst &I);
535 void visitStore(StoreInst &I);
536 void visitPHI(PHINode &I) { } // PHI nodes are handled specially.
537 void visitCall(CallInst &I);
538 void visitInlineAsm(CallInst &I);
539 const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic);
540 void visitTargetIntrinsic(CallInst &I, unsigned Intrinsic);
542 void visitVAStart(CallInst &I);
543 void visitVAArg(VAArgInst &I);
544 void visitVAEnd(CallInst &I);
545 void visitVACopy(CallInst &I);
546 void visitFrameReturnAddress(CallInst &I, bool isFrameAddress);
548 void visitMemIntrinsic(CallInst &I, unsigned Op);
550 void visitUserOp1(Instruction &I) {
551 assert(0 && "UserOp1 should not exist at instruction selection time!");
554 void visitUserOp2(Instruction &I) {
555 assert(0 && "UserOp2 should not exist at instruction selection time!");
559 } // end namespace llvm
561 SDOperand SelectionDAGLowering::getValue(const Value *V) {
562 SDOperand &N = NodeMap[V];
565 const Type *VTy = V->getType();
566 MVT::ValueType VT = TLI.getValueType(VTy);
567 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
568 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
569 visit(CE->getOpcode(), *CE);
570 assert(N.Val && "visit didn't populate the ValueMap!");
572 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
573 return N = DAG.getGlobalAddress(GV, VT);
574 } else if (isa<ConstantPointerNull>(C)) {
575 return N = DAG.getConstant(0, TLI.getPointerTy());
576 } else if (isa<UndefValue>(C)) {
577 if (!isa<PackedType>(VTy))
578 return N = DAG.getNode(ISD::UNDEF, VT);
580 // Create a VBUILD_VECTOR of undef nodes.
581 const PackedType *PTy = cast<PackedType>(VTy);
582 unsigned NumElements = PTy->getNumElements();
583 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
585 std::vector<SDOperand> Ops;
586 Ops.assign(NumElements, DAG.getNode(ISD::UNDEF, PVT));
588 // Create a VConstant node with generic Vector type.
589 Ops.push_back(DAG.getConstant(NumElements, MVT::i32));
590 Ops.push_back(DAG.getValueType(PVT));
591 return N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops);
592 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
593 return N = DAG.getConstantFP(CFP->getValue(), VT);
594 } else if (const PackedType *PTy = dyn_cast<PackedType>(VTy)) {
595 unsigned NumElements = PTy->getNumElements();
596 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
598 // Now that we know the number and type of the elements, push a
599 // Constant or ConstantFP node onto the ops list for each element of
600 // the packed constant.
601 std::vector<SDOperand> Ops;
602 if (ConstantPacked *CP = dyn_cast<ConstantPacked>(C)) {
603 for (unsigned i = 0; i != NumElements; ++i)
604 Ops.push_back(getValue(CP->getOperand(i)));
606 assert(isa<ConstantAggregateZero>(C) && "Unknown packed constant!");
608 if (MVT::isFloatingPoint(PVT))
609 Op = DAG.getConstantFP(0, PVT);
611 Op = DAG.getConstant(0, PVT);
612 Ops.assign(NumElements, Op);
615 // Create a VBUILD_VECTOR node with generic Vector type.
616 Ops.push_back(DAG.getConstant(NumElements, MVT::i32));
617 Ops.push_back(DAG.getValueType(PVT));
618 return N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops);
620 // Canonicalize all constant ints to be unsigned.
621 return N = DAG.getConstant(cast<ConstantIntegral>(C)->getRawValue(),VT);
625 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
626 std::map<const AllocaInst*, int>::iterator SI =
627 FuncInfo.StaticAllocaMap.find(AI);
628 if (SI != FuncInfo.StaticAllocaMap.end())
629 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
632 std::map<const Value*, unsigned>::const_iterator VMI =
633 FuncInfo.ValueMap.find(V);
634 assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!");
636 unsigned InReg = VMI->second;
638 // If this type is not legal, make it so now.
639 if (VT != MVT::Vector) {
640 MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT);
642 N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT);
644 // Source must be expanded. This input value is actually coming from the
645 // register pair VMI->second and VMI->second+1.
646 N = DAG.getNode(ISD::BUILD_PAIR, VT, N,
647 DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT));
648 } else if (DestVT > VT) { // Promotion case
649 if (MVT::isFloatingPoint(VT))
650 N = DAG.getNode(ISD::FP_ROUND, VT, N);
652 N = DAG.getNode(ISD::TRUNCATE, VT, N);
655 // Otherwise, if this is a vector, make it available as a generic vector
657 MVT::ValueType PTyElementVT, PTyLegalElementVT;
658 const PackedType *PTy = cast<PackedType>(VTy);
659 unsigned NE = TLI.getPackedTypeBreakdown(PTy, PTyElementVT,
662 // Build a VBUILD_VECTOR with the input registers.
663 std::vector<SDOperand> Ops;
664 if (PTyElementVT == PTyLegalElementVT) {
665 // If the value types are legal, just VBUILD the CopyFromReg nodes.
666 for (unsigned i = 0; i != NE; ++i)
667 Ops.push_back(DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
669 } else if (PTyElementVT < PTyLegalElementVT) {
670 // If the register was promoted, use TRUNCATE of FP_ROUND as appropriate.
671 for (unsigned i = 0; i != NE; ++i) {
672 SDOperand Op = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
674 if (MVT::isFloatingPoint(PTyElementVT))
675 Op = DAG.getNode(ISD::FP_ROUND, PTyElementVT, Op);
677 Op = DAG.getNode(ISD::TRUNCATE, PTyElementVT, Op);
681 // If the register was expanded, use BUILD_PAIR.
682 assert((NE & 1) == 0 && "Must expand into a multiple of 2 elements!");
683 for (unsigned i = 0; i != NE/2; ++i) {
684 SDOperand Op0 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
686 SDOperand Op1 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
688 Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Op0, Op1));
692 Ops.push_back(DAG.getConstant(NE, MVT::i32));
693 Ops.push_back(DAG.getValueType(PTyLegalElementVT));
694 N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops);
696 // Finally, use a VBIT_CONVERT to make this available as the appropriate
698 N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N,
699 DAG.getConstant(PTy->getNumElements(),
701 DAG.getValueType(TLI.getValueType(PTy->getElementType())));
708 void SelectionDAGLowering::visitRet(ReturnInst &I) {
709 if (I.getNumOperands() == 0) {
710 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getRoot()));
713 std::vector<SDOperand> NewValues;
714 NewValues.push_back(getRoot());
715 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
716 SDOperand RetOp = getValue(I.getOperand(i));
718 // If this is an integer return value, we need to promote it ourselves to
719 // the full width of a register, since LegalizeOp will use ANY_EXTEND rather
721 if (MVT::isInteger(RetOp.getValueType()) &&
722 RetOp.getValueType() < MVT::i64) {
723 MVT::ValueType TmpVT;
724 if (TLI.getTypeAction(MVT::i32) == TargetLowering::Promote)
725 TmpVT = TLI.getTypeToTransformTo(MVT::i32);
729 if (I.getOperand(i)->getType()->isSigned())
730 RetOp = DAG.getNode(ISD::SIGN_EXTEND, TmpVT, RetOp);
732 RetOp = DAG.getNode(ISD::ZERO_EXTEND, TmpVT, RetOp);
734 NewValues.push_back(RetOp);
736 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, NewValues));
739 void SelectionDAGLowering::visitBr(BranchInst &I) {
740 // Update machine-CFG edges.
741 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
742 CurMBB->addSuccessor(Succ0MBB);
744 // Figure out which block is immediately after the current one.
745 MachineBasicBlock *NextBlock = 0;
746 MachineFunction::iterator BBI = CurMBB;
747 if (++BBI != CurMBB->getParent()->end())
750 if (I.isUnconditional()) {
751 // If this is not a fall-through branch, emit the branch.
752 if (Succ0MBB != NextBlock)
753 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(),
754 DAG.getBasicBlock(Succ0MBB)));
756 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
757 CurMBB->addSuccessor(Succ1MBB);
759 SDOperand Cond = getValue(I.getCondition());
760 if (Succ1MBB == NextBlock) {
761 // If the condition is false, fall through. This means we should branch
762 // if the condition is true to Succ #0.
763 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(),
764 Cond, DAG.getBasicBlock(Succ0MBB)));
765 } else if (Succ0MBB == NextBlock) {
766 // If the condition is true, fall through. This means we should branch if
767 // the condition is false to Succ #1. Invert the condition first.
768 SDOperand True = DAG.getConstant(1, Cond.getValueType());
769 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
770 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(),
771 Cond, DAG.getBasicBlock(Succ1MBB)));
773 std::vector<SDOperand> Ops;
774 Ops.push_back(getRoot());
775 // If the false case is the current basic block, then this is a self
776 // loop. We do not want to emit "Loop: ... brcond Out; br Loop", as it
777 // adds an extra instruction in the loop. Instead, invert the
778 // condition and emit "Loop: ... br!cond Loop; br Out.
779 if (CurMBB == Succ1MBB) {
780 std::swap(Succ0MBB, Succ1MBB);
781 SDOperand True = DAG.getConstant(1, Cond.getValueType());
782 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
784 SDOperand True = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond,
785 DAG.getBasicBlock(Succ0MBB));
786 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, True,
787 DAG.getBasicBlock(Succ1MBB)));
792 /// visitSwitchCase - Emits the necessary code to represent a single node in
793 /// the binary search tree resulting from lowering a switch instruction.
794 void SelectionDAGLowering::visitSwitchCase(SelectionDAGISel::CaseBlock &CB) {
795 SDOperand SwitchOp = getValue(CB.SwitchV);
796 SDOperand CaseOp = getValue(CB.CaseC);
797 SDOperand Cond = DAG.getSetCC(MVT::i1, SwitchOp, CaseOp, CB.CC);
799 // Set NextBlock to be the MBB immediately after the current one, if any.
800 // This is used to avoid emitting unnecessary branches to the next block.
801 MachineBasicBlock *NextBlock = 0;
802 MachineFunction::iterator BBI = CurMBB;
803 if (++BBI != CurMBB->getParent()->end())
806 // If the lhs block is the next block, invert the condition so that we can
807 // fall through to the lhs instead of the rhs block.
808 if (CB.LHSBB == NextBlock) {
809 std::swap(CB.LHSBB, CB.RHSBB);
810 SDOperand True = DAG.getConstant(1, Cond.getValueType());
811 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
813 SDOperand BrCond = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond,
814 DAG.getBasicBlock(CB.LHSBB));
815 if (CB.RHSBB == NextBlock)
818 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, BrCond,
819 DAG.getBasicBlock(CB.RHSBB)));
820 // Update successor info
821 CurMBB->addSuccessor(CB.LHSBB);
822 CurMBB->addSuccessor(CB.RHSBB);
825 /// visitSwitchCase - Emits the necessary code to represent a single node in
826 /// the binary search tree resulting from lowering a switch instruction.
827 void SelectionDAGLowering::visitJumpTable(SelectionDAGISel::JumpTable &JT) {
828 // FIXME: Need to emit different code for PIC vs. Non-PIC, specifically,
829 // we need to add the address of the jump table to the value loaded, since
830 // the entries in the jump table will be differences rather than absolute
833 // Emit the code for the jump table
834 MVT::ValueType PTy = TLI.getPointerTy();
835 unsigned PTyBytes = MVT::getSizeInBits(PTy)/8;
836 SDOperand Copy = DAG.getCopyFromReg(getRoot(), JT.Reg, PTy);
837 SDOperand IDX = DAG.getNode(ISD::MUL, PTy, Copy,
838 DAG.getConstant(PTyBytes, PTy));
839 SDOperand ADD = DAG.getNode(ISD::ADD, PTy, IDX, DAG.getJumpTable(JT.JTI,PTy));
840 SDOperand LD = DAG.getLoad(PTy, Copy.getValue(1), ADD, DAG.getSrcValue(0));
841 DAG.setRoot(DAG.getNode(ISD::BRIND, MVT::Other, LD.getValue(1), LD));
844 void SelectionDAGLowering::visitSwitch(SwitchInst &I) {
845 // Figure out which block is immediately after the current one.
846 MachineBasicBlock *NextBlock = 0;
847 MachineFunction::iterator BBI = CurMBB;
848 if (++BBI != CurMBB->getParent()->end())
851 // If there is only the default destination, branch to it if it is not the
852 // next basic block. Otherwise, just fall through.
853 if (I.getNumOperands() == 2) {
854 // Update machine-CFG edges.
855 MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[I.getDefaultDest()];
856 // If this is not a fall-through branch, emit the branch.
857 if (DefaultMBB != NextBlock)
858 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(),
859 DAG.getBasicBlock(DefaultMBB)));
863 // If there are any non-default case statements, create a vector of Cases
864 // representing each one, and sort the vector so that we can efficiently
865 // create a binary search tree from them.
866 std::vector<Case> Cases;
867 for (unsigned i = 1; i < I.getNumSuccessors(); ++i) {
868 MachineBasicBlock *SMBB = FuncInfo.MBBMap[I.getSuccessor(i)];
869 Cases.push_back(Case(I.getSuccessorValue(i), SMBB));
871 std::sort(Cases.begin(), Cases.end(), CaseCmp());
873 // Get the Value to be switched on and default basic blocks, which will be
874 // inserted into CaseBlock records, representing basic blocks in the binary
876 Value *SV = I.getOperand(0);
877 MachineBasicBlock *Default = FuncInfo.MBBMap[I.getDefaultDest()];
879 // Get the MachineFunction which holds the current MBB. This is used during
880 // emission of jump tables, and when inserting any additional MBBs necessary
881 // to represent the switch.
882 MachineFunction *CurMF = CurMBB->getParent();
883 const BasicBlock *LLVMBB = CurMBB->getBasicBlock();
884 Reloc::Model Relocs = TLI.getTargetMachine().getRelocationModel();
886 // If the switch has more than 5 blocks, and at least 31.25% dense, and the
887 // target supports indirect branches, then emit a jump table rather than
888 // lowering the switch to a binary tree of conditional branches.
889 // FIXME: Make this work with PIC code
890 if (TLI.isOperationLegal(ISD::BRIND, TLI.getPointerTy()) &&
891 (Relocs == Reloc::Static || Relocs == Reloc::DynamicNoPIC) &&
893 uint64_t First = cast<ConstantIntegral>(Cases.front().first)->getRawValue();
894 uint64_t Last = cast<ConstantIntegral>(Cases.back().first)->getRawValue();
895 double Density = (double)Cases.size() / (double)((Last - First) + 1ULL);
897 if (Density >= 0.3125) {
898 // Create a new basic block to hold the code for loading the address
899 // of the jump table, and jumping to it. Update successor information;
900 // we will either branch to the default case for the switch, or the jump
902 MachineBasicBlock *JumpTableBB = new MachineBasicBlock(LLVMBB);
903 CurMF->getBasicBlockList().insert(BBI, JumpTableBB);
904 CurMBB->addSuccessor(Default);
905 CurMBB->addSuccessor(JumpTableBB);
907 // Subtract the lowest switch case value from the value being switched on
908 // and conditional branch to default mbb if the result is greater than the
909 // difference between smallest and largest cases.
910 SDOperand SwitchOp = getValue(SV);
911 MVT::ValueType VT = SwitchOp.getValueType();
912 SDOperand SUB = DAG.getNode(ISD::SUB, VT, SwitchOp,
913 DAG.getConstant(First, VT));
915 // The SDNode we just created, which holds the value being switched on
916 // minus the the smallest case value, needs to be copied to a virtual
917 // register so it can be used as an index into the jump table in a
918 // subsequent basic block. This value may be smaller or larger than the
919 // target's pointer type, and therefore require extension or truncating.
920 if (VT > TLI.getPointerTy())
921 SwitchOp = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), SUB);
923 SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), SUB);
924 unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
925 SDOperand CopyTo = DAG.getCopyToReg(getRoot(), JumpTableReg, SwitchOp);
927 // Emit the range check for the jump table, and branch to the default
928 // block for the switch statement if the value being switched on exceeds
929 // the largest case in the switch.
930 SDOperand CMP = DAG.getSetCC(TLI.getSetCCResultTy(), SUB,
931 DAG.getConstant(Last-First,VT), ISD::SETUGT);
932 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, CopyTo, CMP,
933 DAG.getBasicBlock(Default)));
935 // Build a vector of destination BBs, corresponding to each target
936 // of the jump table. If the value of the jump table slot corresponds to
937 // a case statement, push the case's BB onto the vector, otherwise, push
939 std::set<MachineBasicBlock*> UniqueBBs;
940 std::vector<MachineBasicBlock*> DestBBs;
941 uint64_t TEI = First;
942 for (CaseItr ii = Cases.begin(), ee = Cases.end(); ii != ee; ++TEI) {
943 if (cast<ConstantIntegral>(ii->first)->getRawValue() == TEI) {
944 DestBBs.push_back(ii->second);
945 UniqueBBs.insert(ii->second);
948 DestBBs.push_back(Default);
949 UniqueBBs.insert(Default);
953 // Update successor info
954 for (std::set<MachineBasicBlock*>::iterator ii = UniqueBBs.begin(),
955 ee = UniqueBBs.end(); ii != ee; ++ii)
956 JumpTableBB->addSuccessor(*ii);
958 // Create a jump table index for this jump table, or return an existing
960 unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs);
962 // Set the jump table information so that we can codegen it as a second
964 JT.Reg = JumpTableReg;
966 JT.MBB = JumpTableBB;
967 JT.Default = Default;
972 // Push the initial CaseRec onto the worklist
973 std::vector<CaseRec> CaseVec;
974 CaseVec.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
976 while (!CaseVec.empty()) {
977 // Grab a record representing a case range to process off the worklist
978 CaseRec CR = CaseVec.back();
981 // Size is the number of Cases represented by this range. If Size is 1,
982 // then we are processing a leaf of the binary search tree. Otherwise,
983 // we need to pick a pivot, and push left and right ranges onto the
985 unsigned Size = CR.Range.second - CR.Range.first;
988 // Create a CaseBlock record representing a conditional branch to
989 // the Case's target mbb if the value being switched on SV is equal
990 // to C. Otherwise, branch to default.
991 Constant *C = CR.Range.first->first;
992 MachineBasicBlock *Target = CR.Range.first->second;
993 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, SV, C, Target, Default,
995 // If the MBB representing the leaf node is the current MBB, then just
996 // call visitSwitchCase to emit the code into the current block.
997 // Otherwise, push the CaseBlock onto the vector to be later processed
998 // by SDISel, and insert the node's MBB before the next MBB.
999 if (CR.CaseBB == CurMBB)
1000 visitSwitchCase(CB);
1002 SwitchCases.push_back(CB);
1003 CurMF->getBasicBlockList().insert(BBI, CR.CaseBB);
1006 // split case range at pivot
1007 CaseItr Pivot = CR.Range.first + (Size / 2);
1008 CaseRange LHSR(CR.Range.first, Pivot);
1009 CaseRange RHSR(Pivot, CR.Range.second);
1010 Constant *C = Pivot->first;
1011 MachineBasicBlock *RHSBB = 0, *LHSBB = 0;
1012 // We know that we branch to the LHS if the Value being switched on is
1013 // less than the Pivot value, C. We use this to optimize our binary
1014 // tree a bit, by recognizing that if SV is greater than or equal to the
1015 // LHS's Case Value, and that Case Value is exactly one less than the
1016 // Pivot's Value, then we can branch directly to the LHS's Target,
1017 // rather than creating a leaf node for it.
1018 if ((LHSR.second - LHSR.first) == 1 &&
1019 LHSR.first->first == CR.GE &&
1020 cast<ConstantIntegral>(C)->getRawValue() ==
1021 (cast<ConstantIntegral>(CR.GE)->getRawValue() + 1ULL)) {
1022 LHSBB = LHSR.first->second;
1024 LHSBB = new MachineBasicBlock(LLVMBB);
1025 CaseVec.push_back(CaseRec(LHSBB,C,CR.GE,LHSR));
1027 // Similar to the optimization above, if the Value being switched on is
1028 // known to be less than the Constant CR.LT, and the current Case Value
1029 // is CR.LT - 1, then we can branch directly to the target block for
1030 // the current Case Value, rather than emitting a RHS leaf node for it.
1031 if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
1032 cast<ConstantIntegral>(RHSR.first->first)->getRawValue() ==
1033 (cast<ConstantIntegral>(CR.LT)->getRawValue() - 1ULL)) {
1034 RHSBB = RHSR.first->second;
1036 RHSBB = new MachineBasicBlock(LLVMBB);
1037 CaseVec.push_back(CaseRec(RHSBB,CR.LT,C,RHSR));
1039 // Create a CaseBlock record representing a conditional branch to
1040 // the LHS node if the value being switched on SV is less than C.
1041 // Otherwise, branch to LHS.
1042 ISD::CondCode CC = C->getType()->isSigned() ? ISD::SETLT : ISD::SETULT;
1043 SelectionDAGISel::CaseBlock CB(CC, SV, C, LHSBB, RHSBB, CR.CaseBB);
1044 if (CR.CaseBB == CurMBB)
1045 visitSwitchCase(CB);
1047 SwitchCases.push_back(CB);
1048 CurMF->getBasicBlockList().insert(BBI, CR.CaseBB);
1054 void SelectionDAGLowering::visitSub(User &I) {
1055 // -0.0 - X --> fneg
1056 if (I.getType()->isFloatingPoint()) {
1057 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
1058 if (CFP->isExactlyValue(-0.0)) {
1059 SDOperand Op2 = getValue(I.getOperand(1));
1060 setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2));
1064 visitBinary(I, ISD::SUB, ISD::FSUB, ISD::VSUB);
1067 void SelectionDAGLowering::visitBinary(User &I, unsigned IntOp, unsigned FPOp,
1069 const Type *Ty = I.getType();
1070 SDOperand Op1 = getValue(I.getOperand(0));
1071 SDOperand Op2 = getValue(I.getOperand(1));
1073 if (Ty->isIntegral()) {
1074 setValue(&I, DAG.getNode(IntOp, Op1.getValueType(), Op1, Op2));
1075 } else if (Ty->isFloatingPoint()) {
1076 setValue(&I, DAG.getNode(FPOp, Op1.getValueType(), Op1, Op2));
1078 const PackedType *PTy = cast<PackedType>(Ty);
1079 SDOperand Num = DAG.getConstant(PTy->getNumElements(), MVT::i32);
1080 SDOperand Typ = DAG.getValueType(TLI.getValueType(PTy->getElementType()));
1081 setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ));
1085 void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
1086 SDOperand Op1 = getValue(I.getOperand(0));
1087 SDOperand Op2 = getValue(I.getOperand(1));
1089 Op2 = DAG.getNode(ISD::ANY_EXTEND, TLI.getShiftAmountTy(), Op2);
1091 setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2));
1094 void SelectionDAGLowering::visitSetCC(User &I,ISD::CondCode SignedOpcode,
1095 ISD::CondCode UnsignedOpcode) {
1096 SDOperand Op1 = getValue(I.getOperand(0));
1097 SDOperand Op2 = getValue(I.getOperand(1));
1098 ISD::CondCode Opcode = SignedOpcode;
1099 if (I.getOperand(0)->getType()->isUnsigned())
1100 Opcode = UnsignedOpcode;
1101 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode));
1104 void SelectionDAGLowering::visitSelect(User &I) {
1105 SDOperand Cond = getValue(I.getOperand(0));
1106 SDOperand TrueVal = getValue(I.getOperand(1));
1107 SDOperand FalseVal = getValue(I.getOperand(2));
1108 if (!isa<PackedType>(I.getType())) {
1109 setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond,
1110 TrueVal, FalseVal));
1112 setValue(&I, DAG.getNode(ISD::VSELECT, MVT::Vector, Cond, TrueVal, FalseVal,
1113 *(TrueVal.Val->op_end()-2),
1114 *(TrueVal.Val->op_end()-1)));
1118 void SelectionDAGLowering::visitCast(User &I) {
1119 SDOperand N = getValue(I.getOperand(0));
1120 MVT::ValueType SrcVT = N.getValueType();
1121 MVT::ValueType DestVT = TLI.getValueType(I.getType());
1123 if (DestVT == MVT::Vector) {
1124 // This is a cast to a vector from something else. This is always a bit
1125 // convert. Get information about the input vector.
1126 const PackedType *DestTy = cast<PackedType>(I.getType());
1127 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType());
1128 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N,
1129 DAG.getConstant(DestTy->getNumElements(),MVT::i32),
1130 DAG.getValueType(EltVT)));
1131 } else if (SrcVT == DestVT) {
1132 setValue(&I, N); // noop cast.
1133 } else if (DestVT == MVT::i1) {
1134 // Cast to bool is a comparison against zero, not truncation to zero.
1135 SDOperand Zero = isInteger(SrcVT) ? DAG.getConstant(0, N.getValueType()) :
1136 DAG.getConstantFP(0.0, N.getValueType());
1137 setValue(&I, DAG.getSetCC(MVT::i1, N, Zero, ISD::SETNE));
1138 } else if (isInteger(SrcVT)) {
1139 if (isInteger(DestVT)) { // Int -> Int cast
1140 if (DestVT < SrcVT) // Truncating cast?
1141 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N));
1142 else if (I.getOperand(0)->getType()->isSigned())
1143 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestVT, N));
1145 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N));
1146 } else if (isFloatingPoint(DestVT)) { // Int -> FP cast
1147 if (I.getOperand(0)->getType()->isSigned())
1148 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestVT, N));
1150 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestVT, N));
1152 assert(0 && "Unknown cast!");
1154 } else if (isFloatingPoint(SrcVT)) {
1155 if (isFloatingPoint(DestVT)) { // FP -> FP cast
1156 if (DestVT < SrcVT) // Rounding cast?
1157 setValue(&I, DAG.getNode(ISD::FP_ROUND, DestVT, N));
1159 setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestVT, N));
1160 } else if (isInteger(DestVT)) { // FP -> Int cast.
1161 if (I.getType()->isSigned())
1162 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestVT, N));
1164 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestVT, N));
1166 assert(0 && "Unknown cast!");
1169 assert(SrcVT == MVT::Vector && "Unknown cast!");
1170 assert(DestVT != MVT::Vector && "Casts to vector already handled!");
1171 // This is a cast from a vector to something else. This is always a bit
1172 // convert. Get information about the input vector.
1173 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N));
1177 void SelectionDAGLowering::visitInsertElement(User &I) {
1178 SDOperand InVec = getValue(I.getOperand(0));
1179 SDOperand InVal = getValue(I.getOperand(1));
1180 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(),
1181 getValue(I.getOperand(2)));
1183 SDOperand Num = *(InVec.Val->op_end()-2);
1184 SDOperand Typ = *(InVec.Val->op_end()-1);
1185 setValue(&I, DAG.getNode(ISD::VINSERT_VECTOR_ELT, MVT::Vector,
1186 InVec, InVal, InIdx, Num, Typ));
1189 void SelectionDAGLowering::visitExtractElement(User &I) {
1190 SDOperand InVec = getValue(I.getOperand(0));
1191 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(),
1192 getValue(I.getOperand(1)));
1193 SDOperand Typ = *(InVec.Val->op_end()-1);
1194 setValue(&I, DAG.getNode(ISD::VEXTRACT_VECTOR_ELT,
1195 TLI.getValueType(I.getType()), InVec, InIdx));
1198 void SelectionDAGLowering::visitShuffleVector(User &I) {
1199 SDOperand V1 = getValue(I.getOperand(0));
1200 SDOperand V2 = getValue(I.getOperand(1));
1201 SDOperand Mask = getValue(I.getOperand(2));
1203 SDOperand Num = *(V1.Val->op_end()-2);
1204 SDOperand Typ = *(V2.Val->op_end()-1);
1205 setValue(&I, DAG.getNode(ISD::VVECTOR_SHUFFLE, MVT::Vector,
1206 V1, V2, Mask, Num, Typ));
1210 void SelectionDAGLowering::visitGetElementPtr(User &I) {
1211 SDOperand N = getValue(I.getOperand(0));
1212 const Type *Ty = I.getOperand(0)->getType();
1214 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
1217 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
1218 unsigned Field = cast<ConstantUInt>(Idx)->getValue();
1221 uint64_t Offset = TD->getStructLayout(StTy)->MemberOffsets[Field];
1222 N = DAG.getNode(ISD::ADD, N.getValueType(), N,
1223 getIntPtrConstant(Offset));
1225 Ty = StTy->getElementType(Field);
1227 Ty = cast<SequentialType>(Ty)->getElementType();
1229 // If this is a constant subscript, handle it quickly.
1230 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
1231 if (CI->getRawValue() == 0) continue;
1234 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
1235 Offs = (int64_t)TD->getTypeSize(Ty)*CSI->getValue();
1237 Offs = TD->getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
1238 N = DAG.getNode(ISD::ADD, N.getValueType(), N, getIntPtrConstant(Offs));
1242 // N = N + Idx * ElementSize;
1243 uint64_t ElementSize = TD->getTypeSize(Ty);
1244 SDOperand IdxN = getValue(Idx);
1246 // If the index is smaller or larger than intptr_t, truncate or extend
1248 if (IdxN.getValueType() < N.getValueType()) {
1249 if (Idx->getType()->isSigned())
1250 IdxN = DAG.getNode(ISD::SIGN_EXTEND, N.getValueType(), IdxN);
1252 IdxN = DAG.getNode(ISD::ZERO_EXTEND, N.getValueType(), IdxN);
1253 } else if (IdxN.getValueType() > N.getValueType())
1254 IdxN = DAG.getNode(ISD::TRUNCATE, N.getValueType(), IdxN);
1256 // If this is a multiply by a power of two, turn it into a shl
1257 // immediately. This is a very common case.
1258 if (isPowerOf2_64(ElementSize)) {
1259 unsigned Amt = Log2_64(ElementSize);
1260 IdxN = DAG.getNode(ISD::SHL, N.getValueType(), IdxN,
1261 DAG.getConstant(Amt, TLI.getShiftAmountTy()));
1262 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
1266 SDOperand Scale = getIntPtrConstant(ElementSize);
1267 IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale);
1268 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
1274 void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
1275 // If this is a fixed sized alloca in the entry block of the function,
1276 // allocate it statically on the stack.
1277 if (FuncInfo.StaticAllocaMap.count(&I))
1278 return; // getValue will auto-populate this.
1280 const Type *Ty = I.getAllocatedType();
1281 uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
1282 unsigned Align = std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty),
1285 SDOperand AllocSize = getValue(I.getArraySize());
1286 MVT::ValueType IntPtr = TLI.getPointerTy();
1287 if (IntPtr < AllocSize.getValueType())
1288 AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize);
1289 else if (IntPtr > AllocSize.getValueType())
1290 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, AllocSize);
1292 AllocSize = DAG.getNode(ISD::MUL, IntPtr, AllocSize,
1293 getIntPtrConstant(TySize));
1295 // Handle alignment. If the requested alignment is less than or equal to the
1296 // stack alignment, ignore it and round the size of the allocation up to the
1297 // stack alignment size. If the size is greater than the stack alignment, we
1298 // note this in the DYNAMIC_STACKALLOC node.
1299 unsigned StackAlign =
1300 TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
1301 if (Align <= StackAlign) {
1303 // Add SA-1 to the size.
1304 AllocSize = DAG.getNode(ISD::ADD, AllocSize.getValueType(), AllocSize,
1305 getIntPtrConstant(StackAlign-1));
1306 // Mask out the low bits for alignment purposes.
1307 AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize,
1308 getIntPtrConstant(~(uint64_t)(StackAlign-1)));
1311 std::vector<MVT::ValueType> VTs;
1312 VTs.push_back(AllocSize.getValueType());
1313 VTs.push_back(MVT::Other);
1314 std::vector<SDOperand> Ops;
1315 Ops.push_back(getRoot());
1316 Ops.push_back(AllocSize);
1317 Ops.push_back(getIntPtrConstant(Align));
1318 SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, Ops);
1319 DAG.setRoot(setValue(&I, DSA).getValue(1));
1321 // Inform the Frame Information that we have just allocated a variable-sized
1323 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
1326 void SelectionDAGLowering::visitLoad(LoadInst &I) {
1327 SDOperand Ptr = getValue(I.getOperand(0));
1333 // Do not serialize non-volatile loads against each other.
1334 Root = DAG.getRoot();
1337 setValue(&I, getLoadFrom(I.getType(), Ptr, DAG.getSrcValue(I.getOperand(0)),
1338 Root, I.isVolatile()));
1341 SDOperand SelectionDAGLowering::getLoadFrom(const Type *Ty, SDOperand Ptr,
1342 SDOperand SrcValue, SDOperand Root,
1345 if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) {
1346 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
1347 L = DAG.getVecLoad(PTy->getNumElements(), PVT, Root, Ptr, SrcValue);
1349 L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr, SrcValue);
1353 DAG.setRoot(L.getValue(1));
1355 PendingLoads.push_back(L.getValue(1));
1361 void SelectionDAGLowering::visitStore(StoreInst &I) {
1362 Value *SrcV = I.getOperand(0);
1363 SDOperand Src = getValue(SrcV);
1364 SDOperand Ptr = getValue(I.getOperand(1));
1365 DAG.setRoot(DAG.getNode(ISD::STORE, MVT::Other, getRoot(), Src, Ptr,
1366 DAG.getSrcValue(I.getOperand(1))));
1369 /// IntrinsicCannotAccessMemory - Return true if the specified intrinsic cannot
1370 /// access memory and has no other side effects at all.
1371 static bool IntrinsicCannotAccessMemory(unsigned IntrinsicID) {
1372 #define GET_NO_MEMORY_INTRINSICS
1373 #include "llvm/Intrinsics.gen"
1374 #undef GET_NO_MEMORY_INTRINSICS
1378 // IntrinsicOnlyReadsMemory - Return true if the specified intrinsic doesn't
1379 // have any side-effects or if it only reads memory.
1380 static bool IntrinsicOnlyReadsMemory(unsigned IntrinsicID) {
1381 #define GET_SIDE_EFFECT_INFO
1382 #include "llvm/Intrinsics.gen"
1383 #undef GET_SIDE_EFFECT_INFO
1387 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
1389 void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I,
1390 unsigned Intrinsic) {
1391 bool HasChain = !IntrinsicCannotAccessMemory(Intrinsic);
1392 bool OnlyLoad = HasChain && IntrinsicOnlyReadsMemory(Intrinsic);
1394 // Build the operand list.
1395 std::vector<SDOperand> Ops;
1396 if (HasChain) { // If this intrinsic has side-effects, chainify it.
1398 // We don't need to serialize loads against other loads.
1399 Ops.push_back(DAG.getRoot());
1401 Ops.push_back(getRoot());
1405 // Add the intrinsic ID as an integer operand.
1406 Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
1408 // Add all operands of the call to the operand list.
1409 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
1410 SDOperand Op = getValue(I.getOperand(i));
1412 // If this is a vector type, force it to the right packed type.
1413 if (Op.getValueType() == MVT::Vector) {
1414 const PackedType *OpTy = cast<PackedType>(I.getOperand(i)->getType());
1415 MVT::ValueType EltVT = TLI.getValueType(OpTy->getElementType());
1417 MVT::ValueType VVT = MVT::getVectorType(EltVT, OpTy->getNumElements());
1418 assert(VVT != MVT::Other && "Intrinsic uses a non-legal type?");
1419 Op = DAG.getNode(ISD::VBIT_CONVERT, VVT, Op);
1422 assert(TLI.isTypeLegal(Op.getValueType()) &&
1423 "Intrinsic uses a non-legal type?");
1427 std::vector<MVT::ValueType> VTs;
1428 if (I.getType() != Type::VoidTy) {
1429 MVT::ValueType VT = TLI.getValueType(I.getType());
1430 if (VT == MVT::Vector) {
1431 const PackedType *DestTy = cast<PackedType>(I.getType());
1432 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType());
1434 VT = MVT::getVectorType(EltVT, DestTy->getNumElements());
1435 assert(VT != MVT::Other && "Intrinsic uses a non-legal type?");
1438 assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?");
1442 VTs.push_back(MVT::Other);
1447 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VTs, Ops);
1448 else if (I.getType() != Type::VoidTy)
1449 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, VTs, Ops);
1451 Result = DAG.getNode(ISD::INTRINSIC_VOID, VTs, Ops);
1454 SDOperand Chain = Result.getValue(Result.Val->getNumValues()-1);
1456 PendingLoads.push_back(Chain);
1460 if (I.getType() != Type::VoidTy) {
1461 if (const PackedType *PTy = dyn_cast<PackedType>(I.getType())) {
1462 MVT::ValueType EVT = TLI.getValueType(PTy->getElementType());
1463 Result = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Result,
1464 DAG.getConstant(PTy->getNumElements(), MVT::i32),
1465 DAG.getValueType(EVT));
1467 setValue(&I, Result);
1471 /// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
1472 /// we want to emit this as a call to a named external function, return the name
1473 /// otherwise lower it and return null.
1475 SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
1476 switch (Intrinsic) {
1478 // By default, turn this into a target intrinsic node.
1479 visitTargetIntrinsic(I, Intrinsic);
1481 case Intrinsic::vastart: visitVAStart(I); return 0;
1482 case Intrinsic::vaend: visitVAEnd(I); return 0;
1483 case Intrinsic::vacopy: visitVACopy(I); return 0;
1484 case Intrinsic::returnaddress: visitFrameReturnAddress(I, false); return 0;
1485 case Intrinsic::frameaddress: visitFrameReturnAddress(I, true); return 0;
1486 case Intrinsic::setjmp:
1487 return "_setjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
1489 case Intrinsic::longjmp:
1490 return "_longjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
1492 case Intrinsic::memcpy_i32:
1493 case Intrinsic::memcpy_i64:
1494 visitMemIntrinsic(I, ISD::MEMCPY);
1496 case Intrinsic::memset_i32:
1497 case Intrinsic::memset_i64:
1498 visitMemIntrinsic(I, ISD::MEMSET);
1500 case Intrinsic::memmove_i32:
1501 case Intrinsic::memmove_i64:
1502 visitMemIntrinsic(I, ISD::MEMMOVE);
1505 case Intrinsic::dbg_stoppoint: {
1506 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1507 DbgStopPointInst &SPI = cast<DbgStopPointInst>(I);
1508 if (DebugInfo && SPI.getContext() && DebugInfo->Verify(SPI.getContext())) {
1509 std::vector<SDOperand> Ops;
1511 Ops.push_back(getRoot());
1512 Ops.push_back(getValue(SPI.getLineValue()));
1513 Ops.push_back(getValue(SPI.getColumnValue()));
1515 DebugInfoDesc *DD = DebugInfo->getDescFor(SPI.getContext());
1516 assert(DD && "Not a debug information descriptor");
1517 CompileUnitDesc *CompileUnit = cast<CompileUnitDesc>(DD);
1519 Ops.push_back(DAG.getString(CompileUnit->getFileName()));
1520 Ops.push_back(DAG.getString(CompileUnit->getDirectory()));
1522 DAG.setRoot(DAG.getNode(ISD::LOCATION, MVT::Other, Ops));
1527 case Intrinsic::dbg_region_start: {
1528 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1529 DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I);
1530 if (DebugInfo && RSI.getContext() && DebugInfo->Verify(RSI.getContext())) {
1531 std::vector<SDOperand> Ops;
1533 unsigned LabelID = DebugInfo->RecordRegionStart(RSI.getContext());
1535 Ops.push_back(getRoot());
1536 Ops.push_back(DAG.getConstant(LabelID, MVT::i32));
1538 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops));
1543 case Intrinsic::dbg_region_end: {
1544 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1545 DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I);
1546 if (DebugInfo && REI.getContext() && DebugInfo->Verify(REI.getContext())) {
1547 std::vector<SDOperand> Ops;
1549 unsigned LabelID = DebugInfo->RecordRegionEnd(REI.getContext());
1551 Ops.push_back(getRoot());
1552 Ops.push_back(DAG.getConstant(LabelID, MVT::i32));
1554 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops));
1559 case Intrinsic::dbg_func_start: {
1560 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1561 DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I);
1562 if (DebugInfo && FSI.getSubprogram() &&
1563 DebugInfo->Verify(FSI.getSubprogram())) {
1564 std::vector<SDOperand> Ops;
1566 unsigned LabelID = DebugInfo->RecordRegionStart(FSI.getSubprogram());
1568 Ops.push_back(getRoot());
1569 Ops.push_back(DAG.getConstant(LabelID, MVT::i32));
1571 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops));
1576 case Intrinsic::dbg_declare: {
1577 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1578 DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
1579 if (DebugInfo && DI.getVariable() && DebugInfo->Verify(DI.getVariable())) {
1580 std::vector<SDOperand> Ops;
1582 SDOperand AddressOp = getValue(DI.getAddress());
1583 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(AddressOp)) {
1584 DebugInfo->RecordVariable(DI.getVariable(), FI->getIndex());
1591 case Intrinsic::isunordered_f32:
1592 case Intrinsic::isunordered_f64:
1593 setValue(&I, DAG.getSetCC(MVT::i1,getValue(I.getOperand(1)),
1594 getValue(I.getOperand(2)), ISD::SETUO));
1597 case Intrinsic::sqrt_f32:
1598 case Intrinsic::sqrt_f64:
1599 setValue(&I, DAG.getNode(ISD::FSQRT,
1600 getValue(I.getOperand(1)).getValueType(),
1601 getValue(I.getOperand(1))));
1603 case Intrinsic::pcmarker: {
1604 SDOperand Tmp = getValue(I.getOperand(1));
1605 DAG.setRoot(DAG.getNode(ISD::PCMARKER, MVT::Other, getRoot(), Tmp));
1608 case Intrinsic::readcyclecounter: {
1609 std::vector<MVT::ValueType> VTs;
1610 VTs.push_back(MVT::i64);
1611 VTs.push_back(MVT::Other);
1612 std::vector<SDOperand> Ops;
1613 Ops.push_back(getRoot());
1614 SDOperand Tmp = DAG.getNode(ISD::READCYCLECOUNTER, VTs, Ops);
1616 DAG.setRoot(Tmp.getValue(1));
1619 case Intrinsic::bswap_i16:
1620 case Intrinsic::bswap_i32:
1621 case Intrinsic::bswap_i64:
1622 setValue(&I, DAG.getNode(ISD::BSWAP,
1623 getValue(I.getOperand(1)).getValueType(),
1624 getValue(I.getOperand(1))));
1626 case Intrinsic::cttz_i8:
1627 case Intrinsic::cttz_i16:
1628 case Intrinsic::cttz_i32:
1629 case Intrinsic::cttz_i64:
1630 setValue(&I, DAG.getNode(ISD::CTTZ,
1631 getValue(I.getOperand(1)).getValueType(),
1632 getValue(I.getOperand(1))));
1634 case Intrinsic::ctlz_i8:
1635 case Intrinsic::ctlz_i16:
1636 case Intrinsic::ctlz_i32:
1637 case Intrinsic::ctlz_i64:
1638 setValue(&I, DAG.getNode(ISD::CTLZ,
1639 getValue(I.getOperand(1)).getValueType(),
1640 getValue(I.getOperand(1))));
1642 case Intrinsic::ctpop_i8:
1643 case Intrinsic::ctpop_i16:
1644 case Intrinsic::ctpop_i32:
1645 case Intrinsic::ctpop_i64:
1646 setValue(&I, DAG.getNode(ISD::CTPOP,
1647 getValue(I.getOperand(1)).getValueType(),
1648 getValue(I.getOperand(1))));
1650 case Intrinsic::stacksave: {
1651 std::vector<MVT::ValueType> VTs;
1652 VTs.push_back(TLI.getPointerTy());
1653 VTs.push_back(MVT::Other);
1654 std::vector<SDOperand> Ops;
1655 Ops.push_back(getRoot());
1656 SDOperand Tmp = DAG.getNode(ISD::STACKSAVE, VTs, Ops);
1658 DAG.setRoot(Tmp.getValue(1));
1661 case Intrinsic::stackrestore: {
1662 SDOperand Tmp = getValue(I.getOperand(1));
1663 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, MVT::Other, getRoot(), Tmp));
1666 case Intrinsic::prefetch:
1667 // FIXME: Currently discarding prefetches.
1673 void SelectionDAGLowering::visitCall(CallInst &I) {
1674 const char *RenameFn = 0;
1675 if (Function *F = I.getCalledFunction()) {
1676 if (F->isExternal())
1677 if (unsigned IID = F->getIntrinsicID()) {
1678 RenameFn = visitIntrinsicCall(I, IID);
1681 } else { // Not an LLVM intrinsic.
1682 const std::string &Name = F->getName();
1683 if (Name[0] == 'c' && (Name == "copysign" || Name == "copysignf")) {
1684 if (I.getNumOperands() == 3 && // Basic sanity checks.
1685 I.getOperand(1)->getType()->isFloatingPoint() &&
1686 I.getType() == I.getOperand(1)->getType() &&
1687 I.getType() == I.getOperand(2)->getType()) {
1688 SDOperand LHS = getValue(I.getOperand(1));
1689 SDOperand RHS = getValue(I.getOperand(2));
1690 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, LHS.getValueType(),
1694 } else if (Name[0] == 'f' && (Name == "fabs" || Name == "fabsf")) {
1695 if (I.getNumOperands() == 2 && // Basic sanity checks.
1696 I.getOperand(1)->getType()->isFloatingPoint() &&
1697 I.getType() == I.getOperand(1)->getType()) {
1698 SDOperand Tmp = getValue(I.getOperand(1));
1699 setValue(&I, DAG.getNode(ISD::FABS, Tmp.getValueType(), Tmp));
1702 } else if (Name[0] == 's' && (Name == "sin" || Name == "sinf")) {
1703 if (I.getNumOperands() == 2 && // Basic sanity checks.
1704 I.getOperand(1)->getType()->isFloatingPoint() &&
1705 I.getType() == I.getOperand(1)->getType()) {
1706 SDOperand Tmp = getValue(I.getOperand(1));
1707 setValue(&I, DAG.getNode(ISD::FSIN, Tmp.getValueType(), Tmp));
1710 } else if (Name[0] == 'c' && (Name == "cos" || Name == "cosf")) {
1711 if (I.getNumOperands() == 2 && // Basic sanity checks.
1712 I.getOperand(1)->getType()->isFloatingPoint() &&
1713 I.getType() == I.getOperand(1)->getType()) {
1714 SDOperand Tmp = getValue(I.getOperand(1));
1715 setValue(&I, DAG.getNode(ISD::FCOS, Tmp.getValueType(), Tmp));
1720 } else if (isa<InlineAsm>(I.getOperand(0))) {
1727 Callee = getValue(I.getOperand(0));
1729 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
1730 std::vector<std::pair<SDOperand, const Type*> > Args;
1731 Args.reserve(I.getNumOperands());
1732 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
1733 Value *Arg = I.getOperand(i);
1734 SDOperand ArgNode = getValue(Arg);
1735 Args.push_back(std::make_pair(ArgNode, Arg->getType()));
1738 const PointerType *PT = cast<PointerType>(I.getCalledValue()->getType());
1739 const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
1741 std::pair<SDOperand,SDOperand> Result =
1742 TLI.LowerCallTo(getRoot(), I.getType(), FTy->isVarArg(), I.getCallingConv(),
1743 I.isTailCall(), Callee, Args, DAG);
1744 if (I.getType() != Type::VoidTy)
1745 setValue(&I, Result.first);
1746 DAG.setRoot(Result.second);
1749 SDOperand RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
1750 SDOperand &Chain, SDOperand &Flag)const{
1751 SDOperand Val = DAG.getCopyFromReg(Chain, Regs[0], RegVT, Flag);
1752 Chain = Val.getValue(1);
1753 Flag = Val.getValue(2);
1755 // If the result was expanded, copy from the top part.
1756 if (Regs.size() > 1) {
1757 assert(Regs.size() == 2 &&
1758 "Cannot expand to more than 2 elts yet!");
1759 SDOperand Hi = DAG.getCopyFromReg(Chain, Regs[1], RegVT, Flag);
1760 Chain = Val.getValue(1);
1761 Flag = Val.getValue(2);
1762 if (DAG.getTargetLoweringInfo().isLittleEndian())
1763 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Val, Hi);
1765 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Hi, Val);
1768 // Otherwise, if the return value was promoted, truncate it to the
1769 // appropriate type.
1770 if (RegVT == ValueVT)
1773 if (MVT::isInteger(RegVT))
1774 return DAG.getNode(ISD::TRUNCATE, ValueVT, Val);
1776 return DAG.getNode(ISD::FP_ROUND, ValueVT, Val);
1779 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
1780 /// specified value into the registers specified by this object. This uses
1781 /// Chain/Flag as the input and updates them for the output Chain/Flag.
1782 void RegsForValue::getCopyToRegs(SDOperand Val, SelectionDAG &DAG,
1783 SDOperand &Chain, SDOperand &Flag) const {
1784 if (Regs.size() == 1) {
1785 // If there is a single register and the types differ, this must be
1787 if (RegVT != ValueVT) {
1788 if (MVT::isInteger(RegVT))
1789 Val = DAG.getNode(ISD::ANY_EXTEND, RegVT, Val);
1791 Val = DAG.getNode(ISD::FP_EXTEND, RegVT, Val);
1793 Chain = DAG.getCopyToReg(Chain, Regs[0], Val, Flag);
1794 Flag = Chain.getValue(1);
1796 std::vector<unsigned> R(Regs);
1797 if (!DAG.getTargetLoweringInfo().isLittleEndian())
1798 std::reverse(R.begin(), R.end());
1800 for (unsigned i = 0, e = R.size(); i != e; ++i) {
1801 SDOperand Part = DAG.getNode(ISD::EXTRACT_ELEMENT, RegVT, Val,
1802 DAG.getConstant(i, MVT::i32));
1803 Chain = DAG.getCopyToReg(Chain, R[i], Part, Flag);
1804 Flag = Chain.getValue(1);
1809 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
1810 /// operand list. This adds the code marker and includes the number of
1811 /// values added into it.
1812 void RegsForValue::AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG,
1813 std::vector<SDOperand> &Ops) const {
1814 Ops.push_back(DAG.getConstant(Code | (Regs.size() << 3), MVT::i32));
1815 for (unsigned i = 0, e = Regs.size(); i != e; ++i)
1816 Ops.push_back(DAG.getRegister(Regs[i], RegVT));
1819 /// isAllocatableRegister - If the specified register is safe to allocate,
1820 /// i.e. it isn't a stack pointer or some other special register, return the
1821 /// register class for the register. Otherwise, return null.
1822 static const TargetRegisterClass *
1823 isAllocatableRegister(unsigned Reg, MachineFunction &MF,
1824 const TargetLowering &TLI, const MRegisterInfo *MRI) {
1825 MVT::ValueType FoundVT = MVT::Other;
1826 const TargetRegisterClass *FoundRC = 0;
1827 for (MRegisterInfo::regclass_iterator RCI = MRI->regclass_begin(),
1828 E = MRI->regclass_end(); RCI != E; ++RCI) {
1829 MVT::ValueType ThisVT = MVT::Other;
1831 const TargetRegisterClass *RC = *RCI;
1832 // If none of the the value types for this register class are valid, we
1833 // can't use it. For example, 64-bit reg classes on 32-bit targets.
1834 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
1836 if (TLI.isTypeLegal(*I)) {
1837 // If we have already found this register in a different register class,
1838 // choose the one with the largest VT specified. For example, on
1839 // PowerPC, we favor f64 register classes over f32.
1840 if (FoundVT == MVT::Other ||
1841 MVT::getSizeInBits(FoundVT) < MVT::getSizeInBits(*I)) {
1848 if (ThisVT == MVT::Other) continue;
1850 // NOTE: This isn't ideal. In particular, this might allocate the
1851 // frame pointer in functions that need it (due to them not being taken
1852 // out of allocation, because a variable sized allocation hasn't been seen
1853 // yet). This is a slight code pessimization, but should still work.
1854 for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
1855 E = RC->allocation_order_end(MF); I != E; ++I)
1857 // We found a matching register class. Keep looking at others in case
1858 // we find one with larger registers that this physreg is also in.
1867 RegsForValue SelectionDAGLowering::
1868 GetRegistersForValue(const std::string &ConstrCode,
1869 MVT::ValueType VT, bool isOutReg, bool isInReg,
1870 std::set<unsigned> &OutputRegs,
1871 std::set<unsigned> &InputRegs) {
1872 std::pair<unsigned, const TargetRegisterClass*> PhysReg =
1873 TLI.getRegForInlineAsmConstraint(ConstrCode, VT);
1874 std::vector<unsigned> Regs;
1876 unsigned NumRegs = VT != MVT::Other ? TLI.getNumElements(VT) : 1;
1877 MVT::ValueType RegVT;
1878 MVT::ValueType ValueVT = VT;
1880 if (PhysReg.first) {
1881 if (VT == MVT::Other)
1882 ValueVT = *PhysReg.second->vt_begin();
1885 // This is a explicit reference to a physical register.
1886 Regs.push_back(PhysReg.first);
1888 // If this is an expanded reference, add the rest of the regs to Regs.
1890 RegVT = *PhysReg.second->vt_begin();
1891 TargetRegisterClass::iterator I = PhysReg.second->begin();
1892 TargetRegisterClass::iterator E = PhysReg.second->end();
1893 for (; *I != PhysReg.first; ++I)
1894 assert(I != E && "Didn't find reg!");
1896 // Already added the first reg.
1898 for (; NumRegs; --NumRegs, ++I) {
1899 assert(I != E && "Ran out of registers to allocate!");
1903 return RegsForValue(Regs, RegVT, ValueVT);
1906 // This is a reference to a register class. Allocate NumRegs consecutive,
1907 // available, registers from the class.
1908 std::vector<unsigned> RegClassRegs =
1909 TLI.getRegClassForInlineAsmConstraint(ConstrCode, VT);
1911 const MRegisterInfo *MRI = DAG.getTarget().getRegisterInfo();
1912 MachineFunction &MF = *CurMBB->getParent();
1913 unsigned NumAllocated = 0;
1914 for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
1915 unsigned Reg = RegClassRegs[i];
1916 // See if this register is available.
1917 if ((isOutReg && OutputRegs.count(Reg)) || // Already used.
1918 (isInReg && InputRegs.count(Reg))) { // Already used.
1919 // Make sure we find consecutive registers.
1924 // Check to see if this register is allocatable (i.e. don't give out the
1926 const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, MRI);
1928 // Make sure we find consecutive registers.
1933 // Okay, this register is good, we can use it.
1936 // If we allocated enough consecutive
1937 if (NumAllocated == NumRegs) {
1938 unsigned RegStart = (i-NumAllocated)+1;
1939 unsigned RegEnd = i+1;
1940 // Mark all of the allocated registers used.
1941 for (unsigned i = RegStart; i != RegEnd; ++i) {
1942 unsigned Reg = RegClassRegs[i];
1943 Regs.push_back(Reg);
1944 if (isOutReg) OutputRegs.insert(Reg); // Mark reg used.
1945 if (isInReg) InputRegs.insert(Reg); // Mark reg used.
1948 return RegsForValue(Regs, *RC->vt_begin(), VT);
1952 // Otherwise, we couldn't allocate enough registers for this.
1953 return RegsForValue();
1957 /// visitInlineAsm - Handle a call to an InlineAsm object.
1959 void SelectionDAGLowering::visitInlineAsm(CallInst &I) {
1960 InlineAsm *IA = cast<InlineAsm>(I.getOperand(0));
1962 SDOperand AsmStr = DAG.getTargetExternalSymbol(IA->getAsmString().c_str(),
1965 // Note, we treat inline asms both with and without side-effects as the same.
1966 // If an inline asm doesn't have side effects and doesn't access memory, we
1967 // could not choose to not chain it.
1968 bool hasSideEffects = IA->hasSideEffects();
1970 std::vector<InlineAsm::ConstraintInfo> Constraints = IA->ParseConstraints();
1971 std::vector<MVT::ValueType> ConstraintVTs;
1973 /// AsmNodeOperands - A list of pairs. The first element is a register, the
1974 /// second is a bitfield where bit #0 is set if it is a use and bit #1 is set
1975 /// if it is a def of that register.
1976 std::vector<SDOperand> AsmNodeOperands;
1977 AsmNodeOperands.push_back(SDOperand()); // reserve space for input chain
1978 AsmNodeOperands.push_back(AsmStr);
1980 SDOperand Chain = getRoot();
1983 // We fully assign registers here at isel time. This is not optimal, but
1984 // should work. For register classes that correspond to LLVM classes, we
1985 // could let the LLVM RA do its thing, but we currently don't. Do a prepass
1986 // over the constraints, collecting fixed registers that we know we can't use.
1987 std::set<unsigned> OutputRegs, InputRegs;
1989 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
1990 assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!");
1991 std::string &ConstraintCode = Constraints[i].Codes[0];
1993 MVT::ValueType OpVT;
1995 // Compute the value type for each operand and add it to ConstraintVTs.
1996 switch (Constraints[i].Type) {
1997 case InlineAsm::isOutput:
1998 if (!Constraints[i].isIndirectOutput) {
1999 assert(I.getType() != Type::VoidTy && "Bad inline asm!");
2000 OpVT = TLI.getValueType(I.getType());
2002 const Type *OpTy = I.getOperand(OpNum)->getType();
2003 OpVT = TLI.getValueType(cast<PointerType>(OpTy)->getElementType());
2004 OpNum++; // Consumes a call operand.
2007 case InlineAsm::isInput:
2008 OpVT = TLI.getValueType(I.getOperand(OpNum)->getType());
2009 OpNum++; // Consumes a call operand.
2011 case InlineAsm::isClobber:
2016 ConstraintVTs.push_back(OpVT);
2018 if (TLI.getRegForInlineAsmConstraint(ConstraintCode, OpVT).first == 0)
2019 continue; // Not assigned a fixed reg.
2021 // Build a list of regs that this operand uses. This always has a single
2022 // element for promoted/expanded operands.
2023 RegsForValue Regs = GetRegistersForValue(ConstraintCode, OpVT,
2025 OutputRegs, InputRegs);
2027 switch (Constraints[i].Type) {
2028 case InlineAsm::isOutput:
2029 // We can't assign any other output to this register.
2030 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2031 // If this is an early-clobber output, it cannot be assigned to the same
2032 // value as the input reg.
2033 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput)
2034 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2036 case InlineAsm::isInput:
2037 // We can't assign any other input to this register.
2038 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2040 case InlineAsm::isClobber:
2041 // Clobbered regs cannot be used as inputs or outputs.
2042 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2043 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2048 // Loop over all of the inputs, copying the operand values into the
2049 // appropriate registers and processing the output regs.
2050 RegsForValue RetValRegs;
2051 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
2054 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
2055 assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!");
2056 std::string &ConstraintCode = Constraints[i].Codes[0];
2058 switch (Constraints[i].Type) {
2059 case InlineAsm::isOutput: {
2060 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass;
2061 if (ConstraintCode.size() == 1) // not a physreg name.
2062 CTy = TLI.getConstraintType(ConstraintCode[0]);
2064 if (CTy == TargetLowering::C_Memory) {
2066 SDOperand InOperandVal = getValue(I.getOperand(OpNum));
2068 // Check that the operand (the address to store to) isn't a float.
2069 if (!MVT::isInteger(InOperandVal.getValueType()))
2070 assert(0 && "MATCH FAIL!");
2072 if (!Constraints[i].isIndirectOutput)
2073 assert(0 && "MATCH FAIL!");
2075 OpNum++; // Consumes a call operand.
2077 // Extend/truncate to the right pointer type if needed.
2078 MVT::ValueType PtrType = TLI.getPointerTy();
2079 if (InOperandVal.getValueType() < PtrType)
2080 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal);
2081 else if (InOperandVal.getValueType() > PtrType)
2082 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal);
2084 // Add information to the INLINEASM node to know about this output.
2085 unsigned ResOpType = 4/*MEM*/ | (1 << 3);
2086 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
2087 AsmNodeOperands.push_back(InOperandVal);
2091 // Otherwise, this is a register output.
2092 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!");
2094 // If this is an early-clobber output, or if there is an input
2095 // constraint that matches this, we need to reserve the input register
2096 // so no other inputs allocate to it.
2097 bool UsesInputRegister = false;
2098 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput)
2099 UsesInputRegister = true;
2101 // Copy the output from the appropriate register. Find a register that
2104 GetRegistersForValue(ConstraintCode, ConstraintVTs[i],
2105 true, UsesInputRegister,
2106 OutputRegs, InputRegs);
2107 assert(!Regs.Regs.empty() && "Couldn't allocate output reg!");
2109 if (!Constraints[i].isIndirectOutput) {
2110 assert(RetValRegs.Regs.empty() &&
2111 "Cannot have multiple output constraints yet!");
2112 assert(I.getType() != Type::VoidTy && "Bad inline asm!");
2115 IndirectStoresToEmit.push_back(std::make_pair(Regs,
2116 I.getOperand(OpNum)));
2117 OpNum++; // Consumes a call operand.
2120 // Add information to the INLINEASM node to know that this register is
2122 Regs.AddInlineAsmOperands(2 /*REGDEF*/, DAG, AsmNodeOperands);
2125 case InlineAsm::isInput: {
2126 SDOperand InOperandVal = getValue(I.getOperand(OpNum));
2127 OpNum++; // Consumes a call operand.
2129 if (isdigit(ConstraintCode[0])) { // Matching constraint?
2130 // If this is required to match an output register we have already set,
2131 // just use its register.
2132 unsigned OperandNo = atoi(ConstraintCode.c_str());
2134 // Scan until we find the definition we already emitted of this operand.
2135 // When we find it, create a RegsForValue operand.
2136 unsigned CurOp = 2; // The first operand.
2137 for (; OperandNo; --OperandNo) {
2138 // Advance to the next operand.
2140 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue();
2141 assert((NumOps & 7) == 2 /*REGDEF*/ &&
2142 "Skipped past definitions?");
2143 CurOp += (NumOps>>3)+1;
2147 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue();
2148 assert((NumOps & 7) == 2 /*REGDEF*/ &&
2149 "Skipped past definitions?");
2151 // Add NumOps>>3 registers to MatchedRegs.
2152 RegsForValue MatchedRegs;
2153 MatchedRegs.ValueVT = InOperandVal.getValueType();
2154 MatchedRegs.RegVT = AsmNodeOperands[CurOp+1].getValueType();
2155 for (unsigned i = 0, e = NumOps>>3; i != e; ++i) {
2156 unsigned Reg=cast<RegisterSDNode>(AsmNodeOperands[++CurOp])->getReg();
2157 MatchedRegs.Regs.push_back(Reg);
2160 // Use the produced MatchedRegs object to
2161 MatchedRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag);
2162 MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/, DAG, AsmNodeOperands);
2166 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass;
2167 if (ConstraintCode.size() == 1) // not a physreg name.
2168 CTy = TLI.getConstraintType(ConstraintCode[0]);
2170 if (CTy == TargetLowering::C_Other) {
2171 if (!TLI.isOperandValidForConstraint(InOperandVal, ConstraintCode[0]))
2172 assert(0 && "MATCH FAIL!");
2174 // Add information to the INLINEASM node to know about this input.
2175 unsigned ResOpType = 3 /*IMM*/ | (1 << 3);
2176 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
2177 AsmNodeOperands.push_back(InOperandVal);
2179 } else if (CTy == TargetLowering::C_Memory) {
2182 // Check that the operand isn't a float.
2183 if (!MVT::isInteger(InOperandVal.getValueType()))
2184 assert(0 && "MATCH FAIL!");
2186 // Extend/truncate to the right pointer type if needed.
2187 MVT::ValueType PtrType = TLI.getPointerTy();
2188 if (InOperandVal.getValueType() < PtrType)
2189 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal);
2190 else if (InOperandVal.getValueType() > PtrType)
2191 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal);
2193 // Add information to the INLINEASM node to know about this input.
2194 unsigned ResOpType = 4/*MEM*/ | (1 << 3);
2195 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
2196 AsmNodeOperands.push_back(InOperandVal);
2200 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!");
2202 // Copy the input into the appropriate registers.
2203 RegsForValue InRegs =
2204 GetRegistersForValue(ConstraintCode, ConstraintVTs[i],
2205 false, true, OutputRegs, InputRegs);
2206 // FIXME: should be match fail.
2207 assert(!InRegs.Regs.empty() && "Couldn't allocate input reg!");
2209 InRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag);
2211 InRegs.AddInlineAsmOperands(1/*REGUSE*/, DAG, AsmNodeOperands);
2214 case InlineAsm::isClobber: {
2215 RegsForValue ClobberedRegs =
2216 GetRegistersForValue(ConstraintCode, MVT::Other, false, false,
2217 OutputRegs, InputRegs);
2218 // Add the clobbered value to the operand list, so that the register
2219 // allocator is aware that the physreg got clobbered.
2220 if (!ClobberedRegs.Regs.empty())
2221 ClobberedRegs.AddInlineAsmOperands(2/*REGDEF*/, DAG, AsmNodeOperands);
2227 // Finish up input operands.
2228 AsmNodeOperands[0] = Chain;
2229 if (Flag.Val) AsmNodeOperands.push_back(Flag);
2231 std::vector<MVT::ValueType> VTs;
2232 VTs.push_back(MVT::Other);
2233 VTs.push_back(MVT::Flag);
2234 Chain = DAG.getNode(ISD::INLINEASM, VTs, AsmNodeOperands);
2235 Flag = Chain.getValue(1);
2237 // If this asm returns a register value, copy the result from that register
2238 // and set it as the value of the call.
2239 if (!RetValRegs.Regs.empty())
2240 setValue(&I, RetValRegs.getCopyFromRegs(DAG, Chain, Flag));
2242 std::vector<std::pair<SDOperand, Value*> > StoresToEmit;
2244 // Process indirect outputs, first output all of the flagged copies out of
2246 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
2247 RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
2248 Value *Ptr = IndirectStoresToEmit[i].second;
2249 SDOperand OutVal = OutRegs.getCopyFromRegs(DAG, Chain, Flag);
2250 StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
2253 // Emit the non-flagged stores from the physregs.
2254 std::vector<SDOperand> OutChains;
2255 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i)
2256 OutChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
2257 StoresToEmit[i].first,
2258 getValue(StoresToEmit[i].second),
2259 DAG.getSrcValue(StoresToEmit[i].second)));
2260 if (!OutChains.empty())
2261 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains);
2266 void SelectionDAGLowering::visitMalloc(MallocInst &I) {
2267 SDOperand Src = getValue(I.getOperand(0));
2269 MVT::ValueType IntPtr = TLI.getPointerTy();
2271 if (IntPtr < Src.getValueType())
2272 Src = DAG.getNode(ISD::TRUNCATE, IntPtr, Src);
2273 else if (IntPtr > Src.getValueType())
2274 Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src);
2276 // Scale the source by the type size.
2277 uint64_t ElementSize = TD->getTypeSize(I.getType()->getElementType());
2278 Src = DAG.getNode(ISD::MUL, Src.getValueType(),
2279 Src, getIntPtrConstant(ElementSize));
2281 std::vector<std::pair<SDOperand, const Type*> > Args;
2282 Args.push_back(std::make_pair(Src, TLI.getTargetData()->getIntPtrType()));
2284 std::pair<SDOperand,SDOperand> Result =
2285 TLI.LowerCallTo(getRoot(), I.getType(), false, CallingConv::C, true,
2286 DAG.getExternalSymbol("malloc", IntPtr),
2288 setValue(&I, Result.first); // Pointers always fit in registers
2289 DAG.setRoot(Result.second);
2292 void SelectionDAGLowering::visitFree(FreeInst &I) {
2293 std::vector<std::pair<SDOperand, const Type*> > Args;
2294 Args.push_back(std::make_pair(getValue(I.getOperand(0)),
2295 TLI.getTargetData()->getIntPtrType()));
2296 MVT::ValueType IntPtr = TLI.getPointerTy();
2297 std::pair<SDOperand,SDOperand> Result =
2298 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, CallingConv::C, true,
2299 DAG.getExternalSymbol("free", IntPtr), Args, DAG);
2300 DAG.setRoot(Result.second);
2303 // InsertAtEndOfBasicBlock - This method should be implemented by targets that
2304 // mark instructions with the 'usesCustomDAGSchedInserter' flag. These
2305 // instructions are special in various ways, which require special support to
2306 // insert. The specified MachineInstr is created but not inserted into any
2307 // basic blocks, and the scheduler passes ownership of it to this method.
2308 MachineBasicBlock *TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
2309 MachineBasicBlock *MBB) {
2310 std::cerr << "If a target marks an instruction with "
2311 "'usesCustomDAGSchedInserter', it must implement "
2312 "TargetLowering::InsertAtEndOfBasicBlock!\n";
2317 void SelectionDAGLowering::visitVAStart(CallInst &I) {
2318 DAG.setRoot(DAG.getNode(ISD::VASTART, MVT::Other, getRoot(),
2319 getValue(I.getOperand(1)),
2320 DAG.getSrcValue(I.getOperand(1))));
2323 void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
2324 SDOperand V = DAG.getVAArg(TLI.getValueType(I.getType()), getRoot(),
2325 getValue(I.getOperand(0)),
2326 DAG.getSrcValue(I.getOperand(0)));
2328 DAG.setRoot(V.getValue(1));
2331 void SelectionDAGLowering::visitVAEnd(CallInst &I) {
2332 DAG.setRoot(DAG.getNode(ISD::VAEND, MVT::Other, getRoot(),
2333 getValue(I.getOperand(1)),
2334 DAG.getSrcValue(I.getOperand(1))));
2337 void SelectionDAGLowering::visitVACopy(CallInst &I) {
2338 DAG.setRoot(DAG.getNode(ISD::VACOPY, MVT::Other, getRoot(),
2339 getValue(I.getOperand(1)),
2340 getValue(I.getOperand(2)),
2341 DAG.getSrcValue(I.getOperand(1)),
2342 DAG.getSrcValue(I.getOperand(2))));
2345 /// TargetLowering::LowerArguments - This is the default LowerArguments
2346 /// implementation, which just inserts a FORMAL_ARGUMENTS node. FIXME: When all
2347 /// targets are migrated to using FORMAL_ARGUMENTS, this hook should be removed.
2348 std::vector<SDOperand>
2349 TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
2350 // Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node.
2351 std::vector<SDOperand> Ops;
2352 Ops.push_back(DAG.getRoot());
2353 Ops.push_back(DAG.getConstant(F.getCallingConv(), getPointerTy()));
2354 Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy()));
2356 // Add one result value for each formal argument.
2357 std::vector<MVT::ValueType> RetVals;
2358 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
2359 MVT::ValueType VT = getValueType(I->getType());
2361 switch (getTypeAction(VT)) {
2362 default: assert(0 && "Unknown type action!");
2364 RetVals.push_back(VT);
2367 RetVals.push_back(getTypeToTransformTo(VT));
2370 if (VT != MVT::Vector) {
2371 // If this is a large integer, it needs to be broken up into small
2372 // integers. Figure out what the destination type is and how many small
2373 // integers it turns into.
2374 MVT::ValueType NVT = getTypeToTransformTo(VT);
2375 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT);
2376 for (unsigned i = 0; i != NumVals; ++i)
2377 RetVals.push_back(NVT);
2379 // Otherwise, this is a vector type. We only support legal vectors
2381 unsigned NumElems = cast<PackedType>(I->getType())->getNumElements();
2382 const Type *EltTy = cast<PackedType>(I->getType())->getElementType();
2384 // Figure out if there is a Packed type corresponding to this Vector
2385 // type. If so, convert to the packed type.
2386 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
2387 if (TVT != MVT::Other && isTypeLegal(TVT)) {
2388 RetVals.push_back(TVT);
2390 assert(0 && "Don't support illegal by-val vector arguments yet!");
2397 RetVals.push_back(MVT::Other);
2400 SDNode *Result = DAG.getNode(ISD::FORMAL_ARGUMENTS, RetVals, Ops).Val;
2402 DAG.setRoot(SDOperand(Result, Result->getNumValues()-1));
2404 // Set up the return result vector.
2407 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
2408 MVT::ValueType VT = getValueType(I->getType());
2410 switch (getTypeAction(VT)) {
2411 default: assert(0 && "Unknown type action!");
2413 Ops.push_back(SDOperand(Result, i++));
2416 SDOperand Op(Result, i++);
2417 if (MVT::isInteger(VT)) {
2418 unsigned AssertOp = I->getType()->isSigned() ? ISD::AssertSext
2420 Op = DAG.getNode(AssertOp, Op.getValueType(), Op, DAG.getValueType(VT));
2421 Op = DAG.getNode(ISD::TRUNCATE, VT, Op);
2423 assert(MVT::isFloatingPoint(VT) && "Not int or FP?");
2424 Op = DAG.getNode(ISD::FP_ROUND, VT, Op);
2430 if (VT != MVT::Vector) {
2431 // If this is a large integer, it needs to be reassembled from small
2432 // integers. Figure out what the source elt type is and how many small
2434 MVT::ValueType NVT = getTypeToTransformTo(VT);
2435 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT);
2437 SDOperand Lo = SDOperand(Result, i++);
2438 SDOperand Hi = SDOperand(Result, i++);
2440 if (!isLittleEndian())
2443 Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Lo, Hi));
2445 // Value scalarized into many values. Unimp for now.
2446 assert(0 && "Cannot expand i64 -> i16 yet!");
2449 // Otherwise, this is a vector type. We only support legal vectors
2451 const PackedType *PTy = cast<PackedType>(I->getType());
2452 unsigned NumElems = PTy->getNumElements();
2453 const Type *EltTy = PTy->getElementType();
2455 // Figure out if there is a Packed type corresponding to this Vector
2456 // type. If so, convert to the packed type.
2457 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
2458 if (TVT != MVT::Other && isTypeLegal(TVT)) {
2459 SDOperand N = SDOperand(Result, i++);
2460 // Handle copies from generic vectors to registers.
2461 MVT::ValueType PTyElementVT, PTyLegalElementVT;
2462 unsigned NE = getPackedTypeBreakdown(PTy, PTyElementVT,
2464 // Insert a VBIT_CONVERT of the FORMAL_ARGUMENTS to a
2465 // "N x PTyElementVT" MVT::Vector type.
2466 N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N,
2467 DAG.getConstant(NE, MVT::i32),
2468 DAG.getValueType(PTyElementVT));
2471 assert(0 && "Don't support illegal by-val vector arguments yet!");
2480 // It is always conservatively correct for llvm.returnaddress and
2481 // llvm.frameaddress to return 0.
2482 std::pair<SDOperand, SDOperand>
2483 TargetLowering::LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain,
2484 unsigned Depth, SelectionDAG &DAG) {
2485 return std::make_pair(DAG.getConstant(0, getPointerTy()), Chain);
2488 SDOperand TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
2489 assert(0 && "LowerOperation not implemented for this target!");
2494 SDOperand TargetLowering::CustomPromoteOperation(SDOperand Op,
2495 SelectionDAG &DAG) {
2496 assert(0 && "CustomPromoteOperation not implemented for this target!");
2501 void SelectionDAGLowering::visitFrameReturnAddress(CallInst &I, bool isFrame) {
2502 unsigned Depth = (unsigned)cast<ConstantUInt>(I.getOperand(1))->getValue();
2503 std::pair<SDOperand,SDOperand> Result =
2504 TLI.LowerFrameReturnAddress(isFrame, getRoot(), Depth, DAG);
2505 setValue(&I, Result.first);
2506 DAG.setRoot(Result.second);
2509 /// getMemsetValue - Vectorized representation of the memset value
2511 static SDOperand getMemsetValue(SDOperand Value, MVT::ValueType VT,
2512 SelectionDAG &DAG) {
2513 MVT::ValueType CurVT = VT;
2514 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
2515 uint64_t Val = C->getValue() & 255;
2517 while (CurVT != MVT::i8) {
2518 Val = (Val << Shift) | Val;
2520 CurVT = (MVT::ValueType)((unsigned)CurVT - 1);
2522 return DAG.getConstant(Val, VT);
2524 Value = DAG.getNode(ISD::ZERO_EXTEND, VT, Value);
2526 while (CurVT != MVT::i8) {
2528 DAG.getNode(ISD::OR, VT,
2529 DAG.getNode(ISD::SHL, VT, Value,
2530 DAG.getConstant(Shift, MVT::i8)), Value);
2532 CurVT = (MVT::ValueType)((unsigned)CurVT - 1);
2539 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
2540 /// used when a memcpy is turned into a memset when the source is a constant
2542 static SDOperand getMemsetStringVal(MVT::ValueType VT,
2543 SelectionDAG &DAG, TargetLowering &TLI,
2544 std::string &Str, unsigned Offset) {
2545 MVT::ValueType CurVT = VT;
2547 unsigned MSB = getSizeInBits(VT) / 8;
2548 if (TLI.isLittleEndian())
2549 Offset = Offset + MSB - 1;
2550 for (unsigned i = 0; i != MSB; ++i) {
2551 Val = (Val << 8) | Str[Offset];
2552 Offset += TLI.isLittleEndian() ? -1 : 1;
2554 return DAG.getConstant(Val, VT);
2557 /// getMemBasePlusOffset - Returns base and offset node for the
2558 static SDOperand getMemBasePlusOffset(SDOperand Base, unsigned Offset,
2559 SelectionDAG &DAG, TargetLowering &TLI) {
2560 MVT::ValueType VT = Base.getValueType();
2561 return DAG.getNode(ISD::ADD, VT, Base, DAG.getConstant(Offset, VT));
2564 /// MeetsMaxMemopRequirement - Determines if the number of memory ops required
2565 /// to replace the memset / memcpy is below the threshold. It also returns the
2566 /// types of the sequence of memory ops to perform memset / memcpy.
2567 static bool MeetsMaxMemopRequirement(std::vector<MVT::ValueType> &MemOps,
2568 unsigned Limit, uint64_t Size,
2569 unsigned Align, TargetLowering &TLI) {
2572 if (TLI.allowsUnalignedMemoryAccesses()) {
2575 switch (Align & 7) {
2591 MVT::ValueType LVT = MVT::i64;
2592 while (!TLI.isTypeLegal(LVT))
2593 LVT = (MVT::ValueType)((unsigned)LVT - 1);
2594 assert(MVT::isInteger(LVT));
2599 unsigned NumMemOps = 0;
2601 unsigned VTSize = getSizeInBits(VT) / 8;
2602 while (VTSize > Size) {
2603 VT = (MVT::ValueType)((unsigned)VT - 1);
2606 assert(MVT::isInteger(VT));
2608 if (++NumMemOps > Limit)
2610 MemOps.push_back(VT);
2617 void SelectionDAGLowering::visitMemIntrinsic(CallInst &I, unsigned Op) {
2618 SDOperand Op1 = getValue(I.getOperand(1));
2619 SDOperand Op2 = getValue(I.getOperand(2));
2620 SDOperand Op3 = getValue(I.getOperand(3));
2621 SDOperand Op4 = getValue(I.getOperand(4));
2622 unsigned Align = (unsigned)cast<ConstantSDNode>(Op4)->getValue();
2623 if (Align == 0) Align = 1;
2625 if (ConstantSDNode *Size = dyn_cast<ConstantSDNode>(Op3)) {
2626 std::vector<MVT::ValueType> MemOps;
2628 // Expand memset / memcpy to a series of load / store ops
2629 // if the size operand falls below a certain threshold.
2630 std::vector<SDOperand> OutChains;
2632 default: break; // Do nothing for now.
2634 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemset(),
2635 Size->getValue(), Align, TLI)) {
2636 unsigned NumMemOps = MemOps.size();
2637 unsigned Offset = 0;
2638 for (unsigned i = 0; i < NumMemOps; i++) {
2639 MVT::ValueType VT = MemOps[i];
2640 unsigned VTSize = getSizeInBits(VT) / 8;
2641 SDOperand Value = getMemsetValue(Op2, VT, DAG);
2642 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, getRoot(),
2644 getMemBasePlusOffset(Op1, Offset, DAG, TLI),
2645 DAG.getSrcValue(I.getOperand(1), Offset));
2646 OutChains.push_back(Store);
2653 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemcpy(),
2654 Size->getValue(), Align, TLI)) {
2655 unsigned NumMemOps = MemOps.size();
2656 unsigned SrcOff = 0, DstOff = 0, SrcDelta = 0;
2657 GlobalAddressSDNode *G = NULL;
2659 bool CopyFromStr = false;
2661 if (Op2.getOpcode() == ISD::GlobalAddress)
2662 G = cast<GlobalAddressSDNode>(Op2);
2663 else if (Op2.getOpcode() == ISD::ADD &&
2664 Op2.getOperand(0).getOpcode() == ISD::GlobalAddress &&
2665 Op2.getOperand(1).getOpcode() == ISD::Constant) {
2666 G = cast<GlobalAddressSDNode>(Op2.getOperand(0));
2667 SrcDelta = cast<ConstantSDNode>(Op2.getOperand(1))->getValue();
2670 GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal());
2672 Str = GV->getStringValue(false);
2680 for (unsigned i = 0; i < NumMemOps; i++) {
2681 MVT::ValueType VT = MemOps[i];
2682 unsigned VTSize = getSizeInBits(VT) / 8;
2683 SDOperand Value, Chain, Store;
2686 Value = getMemsetStringVal(VT, DAG, TLI, Str, SrcOff);
2689 DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
2690 getMemBasePlusOffset(Op1, DstOff, DAG, TLI),
2691 DAG.getSrcValue(I.getOperand(1), DstOff));
2693 Value = DAG.getLoad(VT, getRoot(),
2694 getMemBasePlusOffset(Op2, SrcOff, DAG, TLI),
2695 DAG.getSrcValue(I.getOperand(2), SrcOff));
2696 Chain = Value.getValue(1);
2698 DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
2699 getMemBasePlusOffset(Op1, DstOff, DAG, TLI),
2700 DAG.getSrcValue(I.getOperand(1), DstOff));
2702 OutChains.push_back(Store);
2711 if (!OutChains.empty()) {
2712 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains));
2717 std::vector<SDOperand> Ops;
2718 Ops.push_back(getRoot());
2723 DAG.setRoot(DAG.getNode(Op, MVT::Other, Ops));
2726 //===----------------------------------------------------------------------===//
2727 // SelectionDAGISel code
2728 //===----------------------------------------------------------------------===//
2730 unsigned SelectionDAGISel::MakeReg(MVT::ValueType VT) {
2731 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
2734 void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
2735 // FIXME: we only modify the CFG to split critical edges. This
2736 // updates dom and loop info.
2740 /// OptimizeNoopCopyExpression - We have determined that the specified cast
2741 /// instruction is a noop copy (e.g. it's casting from one pointer type to
2742 /// another, int->uint, or int->sbyte on PPC.
2744 /// Return true if any changes are made.
2745 static bool OptimizeNoopCopyExpression(CastInst *CI) {
2746 BasicBlock *DefBB = CI->getParent();
2748 /// InsertedCasts - Only insert a cast in each block once.
2749 std::map<BasicBlock*, CastInst*> InsertedCasts;
2751 bool MadeChange = false;
2752 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
2754 Use &TheUse = UI.getUse();
2755 Instruction *User = cast<Instruction>(*UI);
2757 // Figure out which BB this cast is used in. For PHI's this is the
2758 // appropriate predecessor block.
2759 BasicBlock *UserBB = User->getParent();
2760 if (PHINode *PN = dyn_cast<PHINode>(User)) {
2761 unsigned OpVal = UI.getOperandNo()/2;
2762 UserBB = PN->getIncomingBlock(OpVal);
2765 // Preincrement use iterator so we don't invalidate it.
2768 // If this user is in the same block as the cast, don't change the cast.
2769 if (UserBB == DefBB) continue;
2771 // If we have already inserted a cast into this block, use it.
2772 CastInst *&InsertedCast = InsertedCasts[UserBB];
2774 if (!InsertedCast) {
2775 BasicBlock::iterator InsertPt = UserBB->begin();
2776 while (isa<PHINode>(InsertPt)) ++InsertPt;
2779 new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt);
2783 // Replace a use of the cast with a use of the new casat.
2784 TheUse = InsertedCast;
2787 // If we removed all uses, nuke the cast.
2788 if (CI->use_empty())
2789 CI->eraseFromParent();
2794 /// InsertGEPComputeCode - Insert code into BB to compute Ptr+PtrOffset,
2795 /// casting to the type of GEPI.
2796 static Instruction *InsertGEPComputeCode(Instruction *&V, BasicBlock *BB,
2797 Instruction *GEPI, Value *Ptr,
2799 if (V) return V; // Already computed.
2801 BasicBlock::iterator InsertPt;
2802 if (BB == GEPI->getParent()) {
2803 // If insert into the GEP's block, insert right after the GEP.
2807 // Otherwise, insert at the top of BB, after any PHI nodes
2808 InsertPt = BB->begin();
2809 while (isa<PHINode>(InsertPt)) ++InsertPt;
2812 // If Ptr is itself a cast, but in some other BB, emit a copy of the cast into
2813 // BB so that there is only one value live across basic blocks (the cast
2815 if (CastInst *CI = dyn_cast<CastInst>(Ptr))
2816 if (CI->getParent() != BB && isa<PointerType>(CI->getOperand(0)->getType()))
2817 Ptr = new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt);
2819 // Add the offset, cast it to the right type.
2820 Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt);
2821 return V = new CastInst(Ptr, GEPI->getType(), "", InsertPt);
2824 /// ReplaceUsesOfGEPInst - Replace all uses of RepPtr with inserted code to
2825 /// compute its value. The RepPtr value can be computed with Ptr+PtrOffset. One
2826 /// trivial way of doing this would be to evaluate Ptr+PtrOffset in RepPtr's
2827 /// block, then ReplaceAllUsesWith'ing everything. However, we would prefer to
2828 /// sink PtrOffset into user blocks where doing so will likely allow us to fold
2829 /// the constant add into a load or store instruction. Additionally, if a user
2830 /// is a pointer-pointer cast, we look through it to find its users.
2831 static void ReplaceUsesOfGEPInst(Instruction *RepPtr, Value *Ptr,
2832 Constant *PtrOffset, BasicBlock *DefBB,
2833 GetElementPtrInst *GEPI,
2834 std::map<BasicBlock*,Instruction*> &InsertedExprs) {
2835 while (!RepPtr->use_empty()) {
2836 Instruction *User = cast<Instruction>(RepPtr->use_back());
2838 // If the user is a Pointer-Pointer cast, recurse.
2839 if (isa<CastInst>(User) && isa<PointerType>(User->getType())) {
2840 ReplaceUsesOfGEPInst(User, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs);
2842 // Drop the use of RepPtr. The cast is dead. Don't delete it now, else we
2843 // could invalidate an iterator.
2844 User->setOperand(0, UndefValue::get(RepPtr->getType()));
2848 // If this is a load of the pointer, or a store through the pointer, emit
2849 // the increment into the load/store block.
2850 Instruction *NewVal;
2851 if (isa<LoadInst>(User) ||
2852 (isa<StoreInst>(User) && User->getOperand(0) != RepPtr)) {
2853 NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()],
2854 User->getParent(), GEPI,
2857 // If this use is not foldable into the addressing mode, use a version
2858 // emitted in the GEP block.
2859 NewVal = InsertGEPComputeCode(InsertedExprs[DefBB], DefBB, GEPI,
2863 if (GEPI->getType() != RepPtr->getType()) {
2864 BasicBlock::iterator IP = NewVal;
2866 NewVal = new CastInst(NewVal, RepPtr->getType(), "", IP);
2868 User->replaceUsesOfWith(RepPtr, NewVal);
2873 /// OptimizeGEPExpression - Since we are doing basic-block-at-a-time instruction
2874 /// selection, we want to be a bit careful about some things. In particular, if
2875 /// we have a GEP instruction that is used in a different block than it is
2876 /// defined, the addressing expression of the GEP cannot be folded into loads or
2877 /// stores that use it. In this case, decompose the GEP and move constant
2878 /// indices into blocks that use it.
2879 static bool OptimizeGEPExpression(GetElementPtrInst *GEPI,
2880 const TargetData *TD) {
2881 // If this GEP is only used inside the block it is defined in, there is no
2882 // need to rewrite it.
2883 bool isUsedOutsideDefBB = false;
2884 BasicBlock *DefBB = GEPI->getParent();
2885 for (Value::use_iterator UI = GEPI->use_begin(), E = GEPI->use_end();
2887 if (cast<Instruction>(*UI)->getParent() != DefBB) {
2888 isUsedOutsideDefBB = true;
2892 if (!isUsedOutsideDefBB) return false;
2894 // If this GEP has no non-zero constant indices, there is nothing we can do,
2896 bool hasConstantIndex = false;
2897 bool hasVariableIndex = false;
2898 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
2899 E = GEPI->op_end(); OI != E; ++OI) {
2900 if (ConstantInt *CI = dyn_cast<ConstantInt>(*OI)) {
2901 if (CI->getRawValue()) {
2902 hasConstantIndex = true;
2906 hasVariableIndex = true;
2910 // If this is a "GEP X, 0, 0, 0", turn this into a cast.
2911 if (!hasConstantIndex && !hasVariableIndex) {
2912 Value *NC = new CastInst(GEPI->getOperand(0), GEPI->getType(),
2913 GEPI->getName(), GEPI);
2914 GEPI->replaceAllUsesWith(NC);
2915 GEPI->eraseFromParent();
2919 // If this is a GEP &Alloca, 0, 0, forward subst the frame index into uses.
2920 if (!hasConstantIndex && !isa<AllocaInst>(GEPI->getOperand(0)))
2923 // Otherwise, decompose the GEP instruction into multiplies and adds. Sum the
2924 // constant offset (which we now know is non-zero) and deal with it later.
2925 uint64_t ConstantOffset = 0;
2926 const Type *UIntPtrTy = TD->getIntPtrType();
2927 Value *Ptr = new CastInst(GEPI->getOperand(0), UIntPtrTy, "", GEPI);
2928 const Type *Ty = GEPI->getOperand(0)->getType();
2930 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
2931 E = GEPI->op_end(); OI != E; ++OI) {
2933 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
2934 unsigned Field = cast<ConstantUInt>(Idx)->getValue();
2936 ConstantOffset += TD->getStructLayout(StTy)->MemberOffsets[Field];
2937 Ty = StTy->getElementType(Field);
2939 Ty = cast<SequentialType>(Ty)->getElementType();
2941 // Handle constant subscripts.
2942 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
2943 if (CI->getRawValue() == 0) continue;
2945 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
2946 ConstantOffset += (int64_t)TD->getTypeSize(Ty)*CSI->getValue();
2948 ConstantOffset+=TD->getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
2952 // Ptr = Ptr + Idx * ElementSize;
2954 // Cast Idx to UIntPtrTy if needed.
2955 Idx = new CastInst(Idx, UIntPtrTy, "", GEPI);
2957 uint64_t ElementSize = TD->getTypeSize(Ty);
2958 // Mask off bits that should not be set.
2959 ElementSize &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
2960 Constant *SizeCst = ConstantUInt::get(UIntPtrTy, ElementSize);
2962 // Multiply by the element size and add to the base.
2963 Idx = BinaryOperator::createMul(Idx, SizeCst, "", GEPI);
2964 Ptr = BinaryOperator::createAdd(Ptr, Idx, "", GEPI);
2968 // Make sure that the offset fits in uintptr_t.
2969 ConstantOffset &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
2970 Constant *PtrOffset = ConstantUInt::get(UIntPtrTy, ConstantOffset);
2972 // Okay, we have now emitted all of the variable index parts to the BB that
2973 // the GEP is defined in. Loop over all of the using instructions, inserting
2974 // an "add Ptr, ConstantOffset" into each block that uses it and update the
2975 // instruction to use the newly computed value, making GEPI dead. When the
2976 // user is a load or store instruction address, we emit the add into the user
2977 // block, otherwise we use a canonical version right next to the gep (these
2978 // won't be foldable as addresses, so we might as well share the computation).
2980 std::map<BasicBlock*,Instruction*> InsertedExprs;
2981 ReplaceUsesOfGEPInst(GEPI, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs);
2983 // Finally, the GEP is dead, remove it.
2984 GEPI->eraseFromParent();
2989 bool SelectionDAGISel::runOnFunction(Function &Fn) {
2990 MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine());
2991 RegMap = MF.getSSARegMap();
2992 DEBUG(std::cerr << "\n\n\n=== " << Fn.getName() << "\n");
2994 // First, split all critical edges for PHI nodes with incoming values that are
2995 // constants, this way the load of the constant into a vreg will not be placed
2996 // into MBBs that are used some other way.
2998 // In this pass we also look for GEP and cast instructions that are used
2999 // across basic blocks and rewrite them to improve basic-block-at-a-time
3003 bool MadeChange = true;
3004 while (MadeChange) {
3006 for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
3008 BasicBlock::iterator BBI;
3009 for (BBI = BB->begin(); (PN = dyn_cast<PHINode>(BBI)); ++BBI)
3010 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
3011 if (isa<Constant>(PN->getIncomingValue(i)))
3012 SplitCriticalEdge(PN->getIncomingBlock(i), BB);
3014 for (BasicBlock::iterator E = BB->end(); BBI != E; ) {
3015 Instruction *I = BBI++;
3016 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
3017 MadeChange |= OptimizeGEPExpression(GEPI, TLI.getTargetData());
3018 } else if (CastInst *CI = dyn_cast<CastInst>(I)) {
3019 // If this is a noop copy, sink it into user blocks to reduce the number
3020 // of virtual registers that must be created and coallesced.
3021 MVT::ValueType SrcVT = TLI.getValueType(CI->getOperand(0)->getType());
3022 MVT::ValueType DstVT = TLI.getValueType(CI->getType());
3024 // This is an fp<->int conversion?
3025 if (MVT::isInteger(SrcVT) != MVT::isInteger(DstVT))
3028 // If this is an extension, it will be a zero or sign extension, which
3030 if (SrcVT < DstVT) continue;
3032 // If these values will be promoted, find out what they will be promoted
3033 // to. This helps us consider truncates on PPC as noop copies when they
3035 if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote)
3036 SrcVT = TLI.getTypeToTransformTo(SrcVT);
3037 if (TLI.getTypeAction(DstVT) == TargetLowering::Promote)
3038 DstVT = TLI.getTypeToTransformTo(DstVT);
3040 // If, after promotion, these are the same types, this is a noop copy.
3042 MadeChange |= OptimizeNoopCopyExpression(CI);
3048 FunctionLoweringInfo FuncInfo(TLI, Fn, MF);
3050 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
3051 SelectBasicBlock(I, MF, FuncInfo);
3057 SDOperand SelectionDAGISel::
3058 CopyValueToVirtualRegister(SelectionDAGLowering &SDL, Value *V, unsigned Reg) {
3059 SDOperand Op = SDL.getValue(V);
3060 assert((Op.getOpcode() != ISD::CopyFromReg ||
3061 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
3062 "Copy from a reg to the same reg!");
3064 // If this type is not legal, we must make sure to not create an invalid
3066 MVT::ValueType SrcVT = Op.getValueType();
3067 MVT::ValueType DestVT = TLI.getTypeToTransformTo(SrcVT);
3068 SelectionDAG &DAG = SDL.DAG;
3069 if (SrcVT == DestVT) {
3070 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op);
3071 } else if (SrcVT == MVT::Vector) {
3072 // Handle copies from generic vectors to registers.
3073 MVT::ValueType PTyElementVT, PTyLegalElementVT;
3074 unsigned NE = TLI.getPackedTypeBreakdown(cast<PackedType>(V->getType()),
3075 PTyElementVT, PTyLegalElementVT);
3077 // Insert a VBIT_CONVERT of the input vector to a "N x PTyElementVT"
3078 // MVT::Vector type.
3079 Op = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Op,
3080 DAG.getConstant(NE, MVT::i32),
3081 DAG.getValueType(PTyElementVT));
3083 // Loop over all of the elements of the resultant vector,
3084 // VEXTRACT_VECTOR_ELT'ing them, converting them to PTyLegalElementVT, then
3085 // copying them into output registers.
3086 std::vector<SDOperand> OutChains;
3087 SDOperand Root = SDL.getRoot();
3088 for (unsigned i = 0; i != NE; ++i) {
3089 SDOperand Elt = DAG.getNode(ISD::VEXTRACT_VECTOR_ELT, PTyElementVT,
3090 Op, DAG.getConstant(i, MVT::i32));
3091 if (PTyElementVT == PTyLegalElementVT) {
3092 // Elements are legal.
3093 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt));
3094 } else if (PTyLegalElementVT > PTyElementVT) {
3095 // Elements are promoted.
3096 if (MVT::isFloatingPoint(PTyLegalElementVT))
3097 Elt = DAG.getNode(ISD::FP_EXTEND, PTyLegalElementVT, Elt);
3099 Elt = DAG.getNode(ISD::ANY_EXTEND, PTyLegalElementVT, Elt);
3100 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt));
3102 // Elements are expanded.
3103 // The src value is expanded into multiple registers.
3104 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT,
3105 Elt, DAG.getConstant(0, MVT::i32));
3106 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT,
3107 Elt, DAG.getConstant(1, MVT::i32));
3108 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Lo));
3109 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Hi));
3112 return DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains);
3113 } else if (SrcVT < DestVT) {
3114 // The src value is promoted to the register.
3115 if (MVT::isFloatingPoint(SrcVT))
3116 Op = DAG.getNode(ISD::FP_EXTEND, DestVT, Op);
3118 Op = DAG.getNode(ISD::ANY_EXTEND, DestVT, Op);
3119 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op);
3121 // The src value is expanded into multiple registers.
3122 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
3123 Op, DAG.getConstant(0, MVT::i32));
3124 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
3125 Op, DAG.getConstant(1, MVT::i32));
3126 Op = DAG.getCopyToReg(SDL.getRoot(), Reg, Lo);
3127 return DAG.getCopyToReg(Op, Reg+1, Hi);
3131 void SelectionDAGISel::
3132 LowerArguments(BasicBlock *BB, SelectionDAGLowering &SDL,
3133 std::vector<SDOperand> &UnorderedChains) {
3134 // If this is the entry block, emit arguments.
3135 Function &F = *BB->getParent();
3136 FunctionLoweringInfo &FuncInfo = SDL.FuncInfo;
3137 SDOperand OldRoot = SDL.DAG.getRoot();
3138 std::vector<SDOperand> Args = TLI.LowerArguments(F, SDL.DAG);
3141 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
3143 if (!AI->use_empty()) {
3144 SDL.setValue(AI, Args[a]);
3146 // If this argument is live outside of the entry block, insert a copy from
3147 // whereever we got it to the vreg that other BB's will reference it as.
3148 if (FuncInfo.ValueMap.count(AI)) {
3150 CopyValueToVirtualRegister(SDL, AI, FuncInfo.ValueMap[AI]);
3151 UnorderedChains.push_back(Copy);
3155 // Finally, if the target has anything special to do, allow it to do so.
3156 // FIXME: this should insert code into the DAG!
3157 EmitFunctionEntryCode(F, SDL.DAG.getMachineFunction());
3160 void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB,
3161 std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate,
3162 FunctionLoweringInfo &FuncInfo) {
3163 SelectionDAGLowering SDL(DAG, TLI, FuncInfo);
3165 std::vector<SDOperand> UnorderedChains;
3167 // Lower any arguments needed in this block if this is the entry block.
3168 if (LLVMBB == &LLVMBB->getParent()->front())
3169 LowerArguments(LLVMBB, SDL, UnorderedChains);
3171 BB = FuncInfo.MBBMap[LLVMBB];
3172 SDL.setCurrentBasicBlock(BB);
3174 // Lower all of the non-terminator instructions.
3175 for (BasicBlock::iterator I = LLVMBB->begin(), E = --LLVMBB->end();
3179 // Ensure that all instructions which are used outside of their defining
3180 // blocks are available as virtual registers.
3181 for (BasicBlock::iterator I = LLVMBB->begin(), E = LLVMBB->end(); I != E;++I)
3182 if (!I->use_empty() && !isa<PHINode>(I)) {
3183 std::map<const Value*, unsigned>::iterator VMI =FuncInfo.ValueMap.find(I);
3184 if (VMI != FuncInfo.ValueMap.end())
3185 UnorderedChains.push_back(
3186 CopyValueToVirtualRegister(SDL, I, VMI->second));
3189 // Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
3190 // ensure constants are generated when needed. Remember the virtual registers
3191 // that need to be added to the Machine PHI nodes as input. We cannot just
3192 // directly add them, because expansion might result in multiple MBB's for one
3193 // BB. As such, the start of the BB might correspond to a different MBB than
3197 // Emit constants only once even if used by multiple PHI nodes.
3198 std::map<Constant*, unsigned> ConstantsOut;
3200 // Check successor nodes PHI nodes that expect a constant to be available from
3202 TerminatorInst *TI = LLVMBB->getTerminator();
3203 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
3204 BasicBlock *SuccBB = TI->getSuccessor(succ);
3205 MachineBasicBlock::iterator MBBI = FuncInfo.MBBMap[SuccBB]->begin();
3208 // At this point we know that there is a 1-1 correspondence between LLVM PHI
3209 // nodes and Machine PHI nodes, but the incoming operands have not been
3211 for (BasicBlock::iterator I = SuccBB->begin();
3212 (PN = dyn_cast<PHINode>(I)); ++I)
3213 if (!PN->use_empty()) {
3215 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
3216 if (Constant *C = dyn_cast<Constant>(PHIOp)) {
3217 unsigned &RegOut = ConstantsOut[C];
3219 RegOut = FuncInfo.CreateRegForValue(C);
3220 UnorderedChains.push_back(
3221 CopyValueToVirtualRegister(SDL, C, RegOut));
3225 Reg = FuncInfo.ValueMap[PHIOp];
3227 assert(isa<AllocaInst>(PHIOp) &&
3228 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
3229 "Didn't codegen value into a register!??");
3230 Reg = FuncInfo.CreateRegForValue(PHIOp);
3231 UnorderedChains.push_back(
3232 CopyValueToVirtualRegister(SDL, PHIOp, Reg));
3236 // Remember that this register needs to added to the machine PHI node as
3237 // the input for this MBB.
3238 MVT::ValueType VT = TLI.getValueType(PN->getType());
3239 unsigned NumElements;
3240 if (VT != MVT::Vector)
3241 NumElements = TLI.getNumElements(VT);
3243 MVT::ValueType VT1,VT2;
3245 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()),
3248 for (unsigned i = 0, e = NumElements; i != e; ++i)
3249 PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
3252 ConstantsOut.clear();
3254 // Turn all of the unordered chains into one factored node.
3255 if (!UnorderedChains.empty()) {
3256 SDOperand Root = SDL.getRoot();
3257 if (Root.getOpcode() != ISD::EntryToken) {
3258 unsigned i = 0, e = UnorderedChains.size();
3259 for (; i != e; ++i) {
3260 assert(UnorderedChains[i].Val->getNumOperands() > 1);
3261 if (UnorderedChains[i].Val->getOperand(0) == Root)
3262 break; // Don't add the root if we already indirectly depend on it.
3266 UnorderedChains.push_back(Root);
3268 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, UnorderedChains));
3271 // Lower the terminator after the copies are emitted.
3272 SDL.visit(*LLVMBB->getTerminator());
3274 // Copy over any CaseBlock records that may now exist due to SwitchInst
3275 // lowering, as well as any jump table information.
3276 SwitchCases.clear();
3277 SwitchCases = SDL.SwitchCases;
3280 // Make sure the root of the DAG is up-to-date.
3281 DAG.setRoot(SDL.getRoot());
3284 void SelectionDAGISel::CodeGenAndEmitDAG(SelectionDAG &DAG) {
3285 // Run the DAG combiner in pre-legalize mode.
3288 DEBUG(std::cerr << "Lowered selection DAG:\n");
3291 // Second step, hack on the DAG until it only uses operations and types that
3292 // the target supports.
3295 DEBUG(std::cerr << "Legalized selection DAG:\n");
3298 // Run the DAG combiner in post-legalize mode.
3301 if (ViewISelDAGs) DAG.viewGraph();
3303 // Third, instruction select all of the operations to machine code, adding the
3304 // code to the MachineBasicBlock.
3305 InstructionSelectBasicBlock(DAG);
3307 DEBUG(std::cerr << "Selected machine code:\n");
3311 void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF,
3312 FunctionLoweringInfo &FuncInfo) {
3313 std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
3315 SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
3318 // First step, lower LLVM code to some DAG. This DAG may use operations and
3319 // types that are not supported by the target.
3320 BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo);
3322 // Second step, emit the lowered DAG as machine code.
3323 CodeGenAndEmitDAG(DAG);
3326 // Next, now that we know what the last MBB the LLVM BB expanded is, update
3327 // PHI nodes in successors.
3328 if (SwitchCases.empty() && JT.Reg == 0) {
3329 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) {
3330 MachineInstr *PHI = PHINodesToUpdate[i].first;
3331 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
3332 "This is not a machine PHI node that we are updating!");
3333 PHI->addRegOperand(PHINodesToUpdate[i].second);
3334 PHI->addMachineBasicBlockOperand(BB);
3339 // If the JumpTable record is filled in, then we need to emit a jump table.
3340 // Updating the PHI nodes is tricky in this case, since we need to determine
3341 // whether the PHI is a successor of the range check MBB or the jump table MBB
3343 assert(SwitchCases.empty() && "Cannot have jump table and lowered switch");
3344 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
3346 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo);
3347 MachineBasicBlock *RangeBB = BB;
3348 // Set the current basic block to the mbb we wish to insert the code into
3350 SDL.setCurrentBasicBlock(BB);
3352 SDL.visitJumpTable(JT);
3353 SDAG.setRoot(SDL.getRoot());
3354 CodeGenAndEmitDAG(SDAG);
3356 for (unsigned pi = 0, pe = PHINodesToUpdate.size(); pi != pe; ++pi) {
3357 MachineInstr *PHI = PHINodesToUpdate[pi].first;
3358 MachineBasicBlock *PHIBB = PHI->getParent();
3359 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
3360 "This is not a machine PHI node that we are updating!");
3361 if (PHIBB == JT.Default) {
3362 PHI->addRegOperand(PHINodesToUpdate[pi].second);
3363 PHI->addMachineBasicBlockOperand(RangeBB);
3365 if (BB->succ_end() != std::find(BB->succ_begin(),BB->succ_end(), PHIBB)) {
3366 PHI->addRegOperand(PHINodesToUpdate[pi].second);
3367 PHI->addMachineBasicBlockOperand(BB);
3373 // If we generated any switch lowering information, build and codegen any
3374 // additional DAGs necessary.
3375 for(unsigned i = 0, e = SwitchCases.size(); i != e; ++i) {
3376 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
3378 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo);
3379 // Set the current basic block to the mbb we wish to insert the code into
3380 BB = SwitchCases[i].ThisBB;
3381 SDL.setCurrentBasicBlock(BB);
3383 SDL.visitSwitchCase(SwitchCases[i]);
3384 SDAG.setRoot(SDL.getRoot());
3385 CodeGenAndEmitDAG(SDAG);
3386 // Iterate over the phi nodes, if there is a phi node in a successor of this
3387 // block (for instance, the default block), then add a pair of operands to
3388 // the phi node for this block, as if we were coming from the original
3389 // BB before switch expansion.
3390 for (unsigned pi = 0, pe = PHINodesToUpdate.size(); pi != pe; ++pi) {
3391 MachineInstr *PHI = PHINodesToUpdate[pi].first;
3392 MachineBasicBlock *PHIBB = PHI->getParent();
3393 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
3394 "This is not a machine PHI node that we are updating!");
3395 if (PHIBB == SwitchCases[i].LHSBB || PHIBB == SwitchCases[i].RHSBB) {
3396 PHI->addRegOperand(PHINodesToUpdate[pi].second);
3397 PHI->addMachineBasicBlockOperand(BB);
3403 //===----------------------------------------------------------------------===//
3404 /// ScheduleAndEmitDAG - Pick a safe ordering and emit instructions for each
3405 /// target node in the graph.
3406 void SelectionDAGISel::ScheduleAndEmitDAG(SelectionDAG &DAG) {
3407 if (ViewSchedDAGs) DAG.viewGraph();
3408 ScheduleDAG *SL = NULL;
3410 switch (ISHeuristic) {
3411 default: assert(0 && "Unrecognized scheduling heuristic");
3412 case defaultScheduling:
3413 if (TLI.getSchedulingPreference() == TargetLowering::SchedulingForLatency)
3414 SL = createTDListDAGScheduler(DAG, BB, CreateTargetHazardRecognizer());
3416 assert(TLI.getSchedulingPreference() ==
3417 TargetLowering::SchedulingForRegPressure && "Unknown sched type!");
3418 SL = createBURRListDAGScheduler(DAG, BB);
3422 SL = createBFS_DAGScheduler(DAG, BB);
3424 case simpleScheduling:
3425 SL = createSimpleDAGScheduler(false, DAG, BB);
3427 case simpleNoItinScheduling:
3428 SL = createSimpleDAGScheduler(true, DAG, BB);
3430 case listSchedulingBURR:
3431 SL = createBURRListDAGScheduler(DAG, BB);
3433 case listSchedulingTDRR:
3434 SL = createTDRRListDAGScheduler(DAG, BB);
3436 case listSchedulingTD:
3437 SL = createTDListDAGScheduler(DAG, BB, CreateTargetHazardRecognizer());
3444 HazardRecognizer *SelectionDAGISel::CreateTargetHazardRecognizer() {
3445 return new HazardRecognizer();
3448 /// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
3449 /// by tblgen. Others should not call it.
3450 void SelectionDAGISel::
3451 SelectInlineAsmMemoryOperands(std::vector<SDOperand> &Ops, SelectionDAG &DAG) {
3452 std::vector<SDOperand> InOps;
3453 std::swap(InOps, Ops);
3455 Ops.push_back(InOps[0]); // input chain.
3456 Ops.push_back(InOps[1]); // input asm string.
3458 unsigned i = 2, e = InOps.size();
3459 if (InOps[e-1].getValueType() == MVT::Flag)
3460 --e; // Don't process a flag operand if it is here.
3463 unsigned Flags = cast<ConstantSDNode>(InOps[i])->getValue();
3464 if ((Flags & 7) != 4 /*MEM*/) {
3465 // Just skip over this operand, copying the operands verbatim.
3466 Ops.insert(Ops.end(), InOps.begin()+i, InOps.begin()+i+(Flags >> 3) + 1);
3467 i += (Flags >> 3) + 1;
3469 assert((Flags >> 3) == 1 && "Memory operand with multiple values?");
3470 // Otherwise, this is a memory operand. Ask the target to select it.
3471 std::vector<SDOperand> SelOps;
3472 if (SelectInlineAsmMemoryOperand(InOps[i+1], 'm', SelOps, DAG)) {
3473 std::cerr << "Could not match memory address. Inline asm failure!\n";
3477 // Add this to the output node.
3478 Ops.push_back(DAG.getConstant(4/*MEM*/ | (SelOps.size() << 3), MVT::i32));
3479 Ops.insert(Ops.end(), SelOps.begin(), SelOps.end());
3484 // Add the flag input back if present.
3485 if (e != InOps.size())
3486 Ops.push_back(InOps.back());