1 //===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the SelectionDAGISel class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "isel"
15 #include "llvm/CodeGen/SelectionDAGISel.h"
16 #include "llvm/CodeGen/ScheduleDAG.h"
17 #include "llvm/CallingConv.h"
18 #include "llvm/Constants.h"
19 #include "llvm/DerivedTypes.h"
20 #include "llvm/Function.h"
21 #include "llvm/GlobalVariable.h"
22 #include "llvm/Instructions.h"
23 #include "llvm/Intrinsics.h"
24 #include "llvm/CodeGen/IntrinsicLowering.h"
25 #include "llvm/CodeGen/MachineDebugInfo.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/SelectionDAG.h"
30 #include "llvm/CodeGen/SSARegMap.h"
31 #include "llvm/Target/MRegisterInfo.h"
32 #include "llvm/Target/TargetData.h"
33 #include "llvm/Target/TargetFrameInfo.h"
34 #include "llvm/Target/TargetInstrInfo.h"
35 #include "llvm/Target/TargetLowering.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
38 #include "llvm/Support/CommandLine.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/Debug.h"
47 ViewISelDAGs("view-isel-dags", cl::Hidden,
48 cl::desc("Pop up a window to show isel dags as they are selected"));
50 ViewSchedDAGs("view-sched-dags", cl::Hidden,
51 cl::desc("Pop up a window to show sched dags as they are processed"));
53 static const bool ViewISelDAGs = 0;
54 static const bool ViewSchedDAGs = 0;
58 cl::opt<SchedHeuristics>
61 cl::desc("Choose scheduling style"),
62 cl::init(noScheduling),
64 clEnumValN(noScheduling, "none",
65 "No scheduling: breath first sequencing"),
66 clEnumValN(simpleScheduling, "simple",
67 "Simple two pass scheduling: minimize critical path "
68 "and maximize processor utilization"),
69 clEnumValN(simpleNoItinScheduling, "simple-noitin",
70 "Simple two pass scheduling: Same as simple "
71 "except using generic latency"),
77 //===--------------------------------------------------------------------===//
78 /// FunctionLoweringInfo - This contains information that is global to a
79 /// function that is used when lowering a region of the function.
80 class FunctionLoweringInfo {
87 FunctionLoweringInfo(TargetLowering &TLI, Function &Fn,MachineFunction &MF);
89 /// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
90 std::map<const BasicBlock*, MachineBasicBlock *> MBBMap;
92 /// ValueMap - Since we emit code for the function a basic block at a time,
93 /// we must remember which virtual registers hold the values for
94 /// cross-basic-block values.
95 std::map<const Value*, unsigned> ValueMap;
97 /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
98 /// the entry block. This allows the allocas to be efficiently referenced
99 /// anywhere in the function.
100 std::map<const AllocaInst*, int> StaticAllocaMap;
102 unsigned MakeReg(MVT::ValueType VT) {
103 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
106 unsigned CreateRegForValue(const Value *V) {
107 MVT::ValueType VT = TLI.getValueType(V->getType());
108 // The common case is that we will only create one register for this
109 // value. If we have that case, create and return the virtual register.
110 unsigned NV = TLI.getNumElements(VT);
112 // If we are promoting this value, pick the next largest supported type.
113 return MakeReg(TLI.getTypeToTransformTo(VT));
116 // If this value is represented with multiple target registers, make sure
117 // to create enough consequtive registers of the right (smaller) type.
118 unsigned NT = VT-1; // Find the type to use.
119 while (TLI.getNumElements((MVT::ValueType)NT) != 1)
122 unsigned R = MakeReg((MVT::ValueType)NT);
123 for (unsigned i = 1; i != NV; ++i)
124 MakeReg((MVT::ValueType)NT);
128 unsigned InitializeRegForValue(const Value *V) {
129 unsigned &R = ValueMap[V];
130 assert(R == 0 && "Already initialized this value register!");
131 return R = CreateRegForValue(V);
136 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
137 /// PHI nodes or outside of the basic block that defines it.
138 static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
139 if (isa<PHINode>(I)) return true;
140 BasicBlock *BB = I->getParent();
141 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
142 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI))
147 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
148 /// entry block, return true.
149 static bool isOnlyUsedInEntryBlock(Argument *A) {
150 BasicBlock *Entry = A->getParent()->begin();
151 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
152 if (cast<Instruction>(*UI)->getParent() != Entry)
153 return false; // Use not in entry block.
157 FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli,
158 Function &fn, MachineFunction &mf)
159 : TLI(tli), Fn(fn), MF(mf), RegMap(MF.getSSARegMap()) {
161 // Create a vreg for each argument register that is not dead and is used
162 // outside of the entry block for the function.
163 for (Function::arg_iterator AI = Fn.arg_begin(), E = Fn.arg_end();
165 if (!isOnlyUsedInEntryBlock(AI))
166 InitializeRegForValue(AI);
168 // Initialize the mapping of values to registers. This is only set up for
169 // instruction values that are used outside of the block that defines
171 Function::iterator BB = Fn.begin(), EB = Fn.end();
172 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
173 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
174 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(AI->getArraySize())) {
175 const Type *Ty = AI->getAllocatedType();
176 uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
178 std::max((unsigned)TLI.getTargetData().getTypeAlignment(Ty),
181 // If the alignment of the value is smaller than the size of the value,
182 // and if the size of the value is particularly small (<= 8 bytes),
183 // round up to the size of the value for potentially better performance.
185 // FIXME: This could be made better with a preferred alignment hook in
186 // TargetData. It serves primarily to 8-byte align doubles for X86.
187 if (Align < TySize && TySize <= 8) Align = TySize;
188 TySize *= CUI->getValue(); // Get total allocated size.
189 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
190 StaticAllocaMap[AI] =
191 MF.getFrameInfo()->CreateStackObject((unsigned)TySize, Align);
194 for (; BB != EB; ++BB)
195 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
196 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
197 if (!isa<AllocaInst>(I) ||
198 !StaticAllocaMap.count(cast<AllocaInst>(I)))
199 InitializeRegForValue(I);
201 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
202 // also creates the initial PHI MachineInstrs, though none of the input
203 // operands are populated.
204 for (BB = Fn.begin(), EB = Fn.end(); BB != EB; ++BB) {
205 MachineBasicBlock *MBB = new MachineBasicBlock(BB);
207 MF.getBasicBlockList().push_back(MBB);
209 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
212 for (BasicBlock::iterator I = BB->begin();
213 (PN = dyn_cast<PHINode>(I)); ++I)
214 if (!PN->use_empty()) {
215 unsigned NumElements =
216 TLI.getNumElements(TLI.getValueType(PN->getType()));
217 unsigned PHIReg = ValueMap[PN];
218 assert(PHIReg &&"PHI node does not have an assigned virtual register!");
219 for (unsigned i = 0; i != NumElements; ++i)
220 BuildMI(MBB, TargetInstrInfo::PHI, PN->getNumOperands(), PHIReg+i);
227 //===----------------------------------------------------------------------===//
228 /// SelectionDAGLowering - This is the common target-independent lowering
229 /// implementation that is parameterized by a TargetLowering object.
230 /// Also, targets can overload any lowering method.
233 class SelectionDAGLowering {
234 MachineBasicBlock *CurMBB;
236 std::map<const Value*, SDOperand> NodeMap;
238 /// PendingLoads - Loads are not emitted to the program immediately. We bunch
239 /// them up and then emit token factor nodes when possible. This allows us to
240 /// get simple disambiguation between loads without worrying about alias
242 std::vector<SDOperand> PendingLoads;
245 // TLI - This is information that describes the available target features we
246 // need for lowering. This indicates when operations are unavailable,
247 // implemented with a libcall, etc.
250 const TargetData &TD;
252 /// FuncInfo - Information about the function as a whole.
254 FunctionLoweringInfo &FuncInfo;
256 SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli,
257 FunctionLoweringInfo &funcinfo)
258 : TLI(tli), DAG(dag), TD(DAG.getTarget().getTargetData()),
262 /// getRoot - Return the current virtual root of the Selection DAG.
264 SDOperand getRoot() {
265 if (PendingLoads.empty())
266 return DAG.getRoot();
268 if (PendingLoads.size() == 1) {
269 SDOperand Root = PendingLoads[0];
271 PendingLoads.clear();
275 // Otherwise, we have to make a token factor node.
276 SDOperand Root = DAG.getNode(ISD::TokenFactor, MVT::Other, PendingLoads);
277 PendingLoads.clear();
282 void visit(Instruction &I) { visit(I.getOpcode(), I); }
284 void visit(unsigned Opcode, User &I) {
286 default: assert(0 && "Unknown instruction type encountered!");
288 // Build the switch statement using the Instruction.def file.
289 #define HANDLE_INST(NUM, OPCODE, CLASS) \
290 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
291 #include "llvm/Instruction.def"
295 void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; }
298 SDOperand getIntPtrConstant(uint64_t Val) {
299 return DAG.getConstant(Val, TLI.getPointerTy());
302 SDOperand getValue(const Value *V) {
303 SDOperand &N = NodeMap[V];
306 const Type *VTy = V->getType();
307 MVT::ValueType VT = TLI.getValueType(VTy);
308 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V)))
309 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
310 visit(CE->getOpcode(), *CE);
311 assert(N.Val && "visit didn't populate the ValueMap!");
313 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
314 return N = DAG.getGlobalAddress(GV, VT);
315 } else if (isa<ConstantPointerNull>(C)) {
316 return N = DAG.getConstant(0, TLI.getPointerTy());
317 } else if (isa<UndefValue>(C)) {
318 return N = DAG.getNode(ISD::UNDEF, VT);
319 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
320 return N = DAG.getConstantFP(CFP->getValue(), VT);
321 } else if (const PackedType *PTy = dyn_cast<PackedType>(VTy)) {
322 unsigned NumElements = PTy->getNumElements();
323 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
324 MVT::ValueType TVT = MVT::getVectorType(PVT, NumElements);
326 // Now that we know the number and type of the elements, push a
327 // Constant or ConstantFP node onto the ops list for each element of
328 // the packed constant.
329 std::vector<SDOperand> Ops;
330 if (ConstantPacked *CP = dyn_cast<ConstantPacked>(C)) {
331 if (MVT::isFloatingPoint(PVT)) {
332 for (unsigned i = 0; i != NumElements; ++i) {
333 const ConstantFP *El = cast<ConstantFP>(CP->getOperand(i));
334 Ops.push_back(DAG.getConstantFP(El->getValue(), PVT));
337 for (unsigned i = 0; i != NumElements; ++i) {
338 const ConstantIntegral *El =
339 cast<ConstantIntegral>(CP->getOperand(i));
340 Ops.push_back(DAG.getConstant(El->getRawValue(), PVT));
344 assert(isa<ConstantAggregateZero>(C) && "Unknown packed constant!");
346 if (MVT::isFloatingPoint(PVT))
347 Op = DAG.getConstantFP(0, PVT);
349 Op = DAG.getConstant(0, PVT);
350 Ops.assign(NumElements, Op);
353 // Handle the case where we have a 1-element vector, in which
354 // case we want to immediately turn it into a scalar constant.
355 if (Ops.size() == 1) {
357 } else if (TVT != MVT::Other && TLI.isTypeLegal(TVT)) {
358 return N = DAG.getNode(ISD::ConstantVec, TVT, Ops);
360 // If the packed type isn't legal, then create a ConstantVec node with
361 // generic Vector type instead.
362 return N = DAG.getNode(ISD::ConstantVec, MVT::Vector, Ops);
365 // Canonicalize all constant ints to be unsigned.
366 return N = DAG.getConstant(cast<ConstantIntegral>(C)->getRawValue(),VT);
369 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
370 std::map<const AllocaInst*, int>::iterator SI =
371 FuncInfo.StaticAllocaMap.find(AI);
372 if (SI != FuncInfo.StaticAllocaMap.end())
373 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
376 std::map<const Value*, unsigned>::const_iterator VMI =
377 FuncInfo.ValueMap.find(V);
378 assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!");
380 unsigned InReg = VMI->second;
382 // If this type is not legal, make it so now.
383 MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT);
385 N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT);
387 // Source must be expanded. This input value is actually coming from the
388 // register pair VMI->second and VMI->second+1.
389 N = DAG.getNode(ISD::BUILD_PAIR, VT, N,
390 DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT));
392 if (DestVT > VT) { // Promotion case
393 if (MVT::isFloatingPoint(VT))
394 N = DAG.getNode(ISD::FP_ROUND, VT, N);
396 N = DAG.getNode(ISD::TRUNCATE, VT, N);
403 const SDOperand &setValue(const Value *V, SDOperand NewN) {
404 SDOperand &N = NodeMap[V];
405 assert(N.Val == 0 && "Already set a value for this node!");
409 // Terminator instructions.
410 void visitRet(ReturnInst &I);
411 void visitBr(BranchInst &I);
412 void visitUnreachable(UnreachableInst &I) { /* noop */ }
414 // These all get lowered before this pass.
415 void visitExtractElement(ExtractElementInst &I) { assert(0 && "TODO"); }
416 void visitInsertElement(InsertElementInst &I) { assert(0 && "TODO"); }
417 void visitSwitch(SwitchInst &I) { assert(0 && "TODO"); }
418 void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); }
419 void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); }
422 void visitBinary(User &I, unsigned IntOp, unsigned FPOp, unsigned VecOp);
423 void visitShift(User &I, unsigned Opcode);
424 void visitAdd(User &I) {
425 visitBinary(I, ISD::ADD, ISD::FADD, ISD::VADD);
427 void visitSub(User &I);
428 void visitMul(User &I) {
429 visitBinary(I, ISD::MUL, ISD::FMUL, ISD::VMUL);
431 void visitDiv(User &I) {
432 const Type *Ty = I.getType();
433 visitBinary(I, Ty->isSigned() ? ISD::SDIV : ISD::UDIV, ISD::FDIV, 0);
435 void visitRem(User &I) {
436 const Type *Ty = I.getType();
437 visitBinary(I, Ty->isSigned() ? ISD::SREM : ISD::UREM, ISD::FREM, 0);
439 void visitAnd(User &I) { visitBinary(I, ISD::AND, 0, 0); }
440 void visitOr (User &I) { visitBinary(I, ISD::OR, 0, 0); }
441 void visitXor(User &I) { visitBinary(I, ISD::XOR, 0, 0); }
442 void visitShl(User &I) { visitShift(I, ISD::SHL); }
443 void visitShr(User &I) {
444 visitShift(I, I.getType()->isUnsigned() ? ISD::SRL : ISD::SRA);
447 void visitSetCC(User &I, ISD::CondCode SignedOpc, ISD::CondCode UnsignedOpc);
448 void visitSetEQ(User &I) { visitSetCC(I, ISD::SETEQ, ISD::SETEQ); }
449 void visitSetNE(User &I) { visitSetCC(I, ISD::SETNE, ISD::SETNE); }
450 void visitSetLE(User &I) { visitSetCC(I, ISD::SETLE, ISD::SETULE); }
451 void visitSetGE(User &I) { visitSetCC(I, ISD::SETGE, ISD::SETUGE); }
452 void visitSetLT(User &I) { visitSetCC(I, ISD::SETLT, ISD::SETULT); }
453 void visitSetGT(User &I) { visitSetCC(I, ISD::SETGT, ISD::SETUGT); }
455 void visitGetElementPtr(User &I);
456 void visitCast(User &I);
457 void visitSelect(User &I);
460 void visitMalloc(MallocInst &I);
461 void visitFree(FreeInst &I);
462 void visitAlloca(AllocaInst &I);
463 void visitLoad(LoadInst &I);
464 void visitStore(StoreInst &I);
465 void visitPHI(PHINode &I) { } // PHI nodes are handled specially.
466 void visitCall(CallInst &I);
467 const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic);
469 void visitVAStart(CallInst &I);
470 void visitVAArg(VAArgInst &I);
471 void visitVAEnd(CallInst &I);
472 void visitVACopy(CallInst &I);
473 void visitFrameReturnAddress(CallInst &I, bool isFrameAddress);
475 void visitMemIntrinsic(CallInst &I, unsigned Op);
477 void visitUserOp1(Instruction &I) {
478 assert(0 && "UserOp1 should not exist at instruction selection time!");
481 void visitUserOp2(Instruction &I) {
482 assert(0 && "UserOp2 should not exist at instruction selection time!");
486 } // end namespace llvm
488 void SelectionDAGLowering::visitRet(ReturnInst &I) {
489 if (I.getNumOperands() == 0) {
490 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getRoot()));
494 SDOperand Op1 = getValue(I.getOperand(0));
495 MVT::ValueType TmpVT;
497 switch (Op1.getValueType()) {
498 default: assert(0 && "Unknown value type!");
503 // If this is a machine where 32-bits is legal or expanded, promote to
504 // 32-bits, otherwise, promote to 64-bits.
505 if (TLI.getTypeAction(MVT::i32) == TargetLowering::Promote)
506 TmpVT = TLI.getTypeToTransformTo(MVT::i32);
510 // Extend integer types to result type.
511 if (I.getOperand(0)->getType()->isSigned())
512 Op1 = DAG.getNode(ISD::SIGN_EXTEND, TmpVT, Op1);
514 Op1 = DAG.getNode(ISD::ZERO_EXTEND, TmpVT, Op1);
517 // If this is a machine where f32 is promoted to f64, do so now.
518 if (TLI.getTypeAction(MVT::f32) == TargetLowering::Promote)
519 Op1 = DAG.getNode(ISD::FP_EXTEND, TLI.getTypeToTransformTo(MVT::f32),Op1);
523 break; // No extension needed!
525 // Allow targets to lower this further to meet ABI requirements
526 DAG.setRoot(TLI.LowerReturnTo(getRoot(), Op1, DAG));
529 void SelectionDAGLowering::visitBr(BranchInst &I) {
530 // Update machine-CFG edges.
531 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
533 // Figure out which block is immediately after the current one.
534 MachineBasicBlock *NextBlock = 0;
535 MachineFunction::iterator BBI = CurMBB;
536 if (++BBI != CurMBB->getParent()->end())
539 if (I.isUnconditional()) {
540 // If this is not a fall-through branch, emit the branch.
541 if (Succ0MBB != NextBlock)
542 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(),
543 DAG.getBasicBlock(Succ0MBB)));
545 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
547 SDOperand Cond = getValue(I.getCondition());
548 if (Succ1MBB == NextBlock) {
549 // If the condition is false, fall through. This means we should branch
550 // if the condition is true to Succ #0.
551 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(),
552 Cond, DAG.getBasicBlock(Succ0MBB)));
553 } else if (Succ0MBB == NextBlock) {
554 // If the condition is true, fall through. This means we should branch if
555 // the condition is false to Succ #1. Invert the condition first.
556 SDOperand True = DAG.getConstant(1, Cond.getValueType());
557 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
558 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(),
559 Cond, DAG.getBasicBlock(Succ1MBB)));
561 std::vector<SDOperand> Ops;
562 Ops.push_back(getRoot());
564 Ops.push_back(DAG.getBasicBlock(Succ0MBB));
565 Ops.push_back(DAG.getBasicBlock(Succ1MBB));
566 DAG.setRoot(DAG.getNode(ISD::BRCONDTWOWAY, MVT::Other, Ops));
571 void SelectionDAGLowering::visitSub(User &I) {
573 if (I.getType()->isFloatingPoint()) {
574 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
575 if (CFP->isExactlyValue(-0.0)) {
576 SDOperand Op2 = getValue(I.getOperand(1));
577 setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2));
581 visitBinary(I, ISD::SUB, ISD::FSUB, ISD::VSUB);
584 void SelectionDAGLowering::visitBinary(User &I, unsigned IntOp, unsigned FPOp,
586 const Type *Ty = I.getType();
587 SDOperand Op1 = getValue(I.getOperand(0));
588 SDOperand Op2 = getValue(I.getOperand(1));
590 if (Ty->isIntegral()) {
591 setValue(&I, DAG.getNode(IntOp, Op1.getValueType(), Op1, Op2));
592 } else if (Ty->isFloatingPoint()) {
593 setValue(&I, DAG.getNode(FPOp, Op1.getValueType(), Op1, Op2));
595 const PackedType *PTy = cast<PackedType>(Ty);
596 unsigned NumElements = PTy->getNumElements();
597 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
598 MVT::ValueType TVT = MVT::getVectorType(PVT, NumElements);
600 // Immediately scalarize packed types containing only one element, so that
601 // the Legalize pass does not have to deal with them. Similarly, if the
602 // abstract vector is going to turn into one that the target natively
603 // supports, generate that type now so that Legalize doesn't have to deal
604 // with that either. These steps ensure that Legalize only has to handle
605 // vector types in its Expand case.
606 unsigned Opc = MVT::isFloatingPoint(PVT) ? FPOp : IntOp;
607 if (NumElements == 1) {
608 setValue(&I, DAG.getNode(Opc, PVT, Op1, Op2));
609 } else if (TVT != MVT::Other && TLI.isTypeLegal(TVT)) {
610 setValue(&I, DAG.getNode(Opc, TVT, Op1, Op2));
612 SDOperand Num = DAG.getConstant(NumElements, MVT::i32);
613 SDOperand Typ = DAG.getValueType(PVT);
614 setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ));
619 void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
620 SDOperand Op1 = getValue(I.getOperand(0));
621 SDOperand Op2 = getValue(I.getOperand(1));
623 Op2 = DAG.getNode(ISD::ANY_EXTEND, TLI.getShiftAmountTy(), Op2);
625 setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2));
628 void SelectionDAGLowering::visitSetCC(User &I,ISD::CondCode SignedOpcode,
629 ISD::CondCode UnsignedOpcode) {
630 SDOperand Op1 = getValue(I.getOperand(0));
631 SDOperand Op2 = getValue(I.getOperand(1));
632 ISD::CondCode Opcode = SignedOpcode;
633 if (I.getOperand(0)->getType()->isUnsigned())
634 Opcode = UnsignedOpcode;
635 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode));
638 void SelectionDAGLowering::visitSelect(User &I) {
639 SDOperand Cond = getValue(I.getOperand(0));
640 SDOperand TrueVal = getValue(I.getOperand(1));
641 SDOperand FalseVal = getValue(I.getOperand(2));
642 setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond,
646 void SelectionDAGLowering::visitCast(User &I) {
647 SDOperand N = getValue(I.getOperand(0));
648 MVT::ValueType SrcTy = TLI.getValueType(I.getOperand(0)->getType());
649 MVT::ValueType DestTy = TLI.getValueType(I.getType());
651 if (N.getValueType() == DestTy) {
652 setValue(&I, N); // noop cast.
653 } else if (DestTy == MVT::i1) {
654 // Cast to bool is a comparison against zero, not truncation to zero.
655 SDOperand Zero = isInteger(SrcTy) ? DAG.getConstant(0, N.getValueType()) :
656 DAG.getConstantFP(0.0, N.getValueType());
657 setValue(&I, DAG.getSetCC(MVT::i1, N, Zero, ISD::SETNE));
658 } else if (isInteger(SrcTy)) {
659 if (isInteger(DestTy)) { // Int -> Int cast
660 if (DestTy < SrcTy) // Truncating cast?
661 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestTy, N));
662 else if (I.getOperand(0)->getType()->isSigned())
663 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestTy, N));
665 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestTy, N));
666 } else { // Int -> FP cast
667 if (I.getOperand(0)->getType()->isSigned())
668 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestTy, N));
670 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestTy, N));
673 assert(isFloatingPoint(SrcTy) && "Unknown value type!");
674 if (isFloatingPoint(DestTy)) { // FP -> FP cast
675 if (DestTy < SrcTy) // Rounding cast?
676 setValue(&I, DAG.getNode(ISD::FP_ROUND, DestTy, N));
678 setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestTy, N));
679 } else { // FP -> Int cast.
680 if (I.getType()->isSigned())
681 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestTy, N));
683 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestTy, N));
688 void SelectionDAGLowering::visitGetElementPtr(User &I) {
689 SDOperand N = getValue(I.getOperand(0));
690 const Type *Ty = I.getOperand(0)->getType();
691 const Type *UIntPtrTy = TD.getIntPtrType();
693 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
696 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
697 unsigned Field = cast<ConstantUInt>(Idx)->getValue();
700 uint64_t Offset = TD.getStructLayout(StTy)->MemberOffsets[Field];
701 N = DAG.getNode(ISD::ADD, N.getValueType(), N,
702 getIntPtrConstant(Offset));
704 Ty = StTy->getElementType(Field);
706 Ty = cast<SequentialType>(Ty)->getElementType();
708 // If this is a constant subscript, handle it quickly.
709 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
710 if (CI->getRawValue() == 0) continue;
713 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
714 Offs = (int64_t)TD.getTypeSize(Ty)*CSI->getValue();
716 Offs = TD.getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
717 N = DAG.getNode(ISD::ADD, N.getValueType(), N, getIntPtrConstant(Offs));
721 // N = N + Idx * ElementSize;
722 uint64_t ElementSize = TD.getTypeSize(Ty);
723 SDOperand IdxN = getValue(Idx);
725 // If the index is smaller or larger than intptr_t, truncate or extend
727 if (IdxN.getValueType() < N.getValueType()) {
728 if (Idx->getType()->isSigned())
729 IdxN = DAG.getNode(ISD::SIGN_EXTEND, N.getValueType(), IdxN);
731 IdxN = DAG.getNode(ISD::ZERO_EXTEND, N.getValueType(), IdxN);
732 } else if (IdxN.getValueType() > N.getValueType())
733 IdxN = DAG.getNode(ISD::TRUNCATE, N.getValueType(), IdxN);
735 // If this is a multiply by a power of two, turn it into a shl
736 // immediately. This is a very common case.
737 if (isPowerOf2_64(ElementSize)) {
738 unsigned Amt = Log2_64(ElementSize);
739 IdxN = DAG.getNode(ISD::SHL, N.getValueType(), IdxN,
740 DAG.getConstant(Amt, TLI.getShiftAmountTy()));
741 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
745 SDOperand Scale = getIntPtrConstant(ElementSize);
746 IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale);
747 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
753 void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
754 // If this is a fixed sized alloca in the entry block of the function,
755 // allocate it statically on the stack.
756 if (FuncInfo.StaticAllocaMap.count(&I))
757 return; // getValue will auto-populate this.
759 const Type *Ty = I.getAllocatedType();
760 uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
761 unsigned Align = std::max((unsigned)TLI.getTargetData().getTypeAlignment(Ty),
764 SDOperand AllocSize = getValue(I.getArraySize());
765 MVT::ValueType IntPtr = TLI.getPointerTy();
766 if (IntPtr < AllocSize.getValueType())
767 AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize);
768 else if (IntPtr > AllocSize.getValueType())
769 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, AllocSize);
771 AllocSize = DAG.getNode(ISD::MUL, IntPtr, AllocSize,
772 getIntPtrConstant(TySize));
774 // Handle alignment. If the requested alignment is less than or equal to the
775 // stack alignment, ignore it and round the size of the allocation up to the
776 // stack alignment size. If the size is greater than the stack alignment, we
777 // note this in the DYNAMIC_STACKALLOC node.
778 unsigned StackAlign =
779 TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
780 if (Align <= StackAlign) {
782 // Add SA-1 to the size.
783 AllocSize = DAG.getNode(ISD::ADD, AllocSize.getValueType(), AllocSize,
784 getIntPtrConstant(StackAlign-1));
785 // Mask out the low bits for alignment purposes.
786 AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize,
787 getIntPtrConstant(~(uint64_t)(StackAlign-1)));
790 std::vector<MVT::ValueType> VTs;
791 VTs.push_back(AllocSize.getValueType());
792 VTs.push_back(MVT::Other);
793 std::vector<SDOperand> Ops;
794 Ops.push_back(getRoot());
795 Ops.push_back(AllocSize);
796 Ops.push_back(getIntPtrConstant(Align));
797 SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, Ops);
798 DAG.setRoot(setValue(&I, DSA).getValue(1));
800 // Inform the Frame Information that we have just allocated a variable-sized
802 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
805 /// getStringValue - Turn an LLVM constant pointer that eventually points to a
806 /// global into a string value. Return an empty string if we can't do it.
808 static std::string getStringValue(Value *V, unsigned Offset = 0) {
809 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
810 if (GV->hasInitializer() && isa<ConstantArray>(GV->getInitializer())) {
811 ConstantArray *Init = cast<ConstantArray>(GV->getInitializer());
812 if (Init->isString()) {
813 std::string Result = Init->getAsString();
814 if (Offset < Result.size()) {
815 // If we are pointing INTO The string, erase the beginning...
816 Result.erase(Result.begin(), Result.begin()+Offset);
818 // Take off the null terminator, and any string fragments after it.
819 std::string::size_type NullPos = Result.find_first_of((char)0);
820 if (NullPos != std::string::npos)
821 Result.erase(Result.begin()+NullPos, Result.end());
826 } else if (Constant *C = dyn_cast<Constant>(V)) {
827 if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
828 return getStringValue(GV, Offset);
829 else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
830 if (CE->getOpcode() == Instruction::GetElementPtr) {
831 // Turn a gep into the specified offset.
832 if (CE->getNumOperands() == 3 &&
833 cast<Constant>(CE->getOperand(1))->isNullValue() &&
834 isa<ConstantInt>(CE->getOperand(2))) {
835 return getStringValue(CE->getOperand(0),
836 Offset+cast<ConstantInt>(CE->getOperand(2))->getRawValue());
844 void SelectionDAGLowering::visitLoad(LoadInst &I) {
845 SDOperand Ptr = getValue(I.getOperand(0));
851 // Do not serialize non-volatile loads against each other.
852 Root = DAG.getRoot();
855 const Type *Ty = I.getType();
858 if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) {
859 unsigned NumElements = PTy->getNumElements();
860 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
861 MVT::ValueType TVT = MVT::getVectorType(PVT, NumElements);
863 // Immediately scalarize packed types containing only one element, so that
864 // the Legalize pass does not have to deal with them.
865 if (NumElements == 1) {
866 L = DAG.getLoad(PVT, Root, Ptr, DAG.getSrcValue(I.getOperand(0)));
867 } else if (TVT != MVT::Other && TLI.isTypeLegal(TVT)) {
868 L = DAG.getLoad(TVT, Root, Ptr, DAG.getSrcValue(I.getOperand(0)));
870 L = DAG.getVecLoad(NumElements, PVT, Root, Ptr,
871 DAG.getSrcValue(I.getOperand(0)));
874 L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr,
875 DAG.getSrcValue(I.getOperand(0)));
880 DAG.setRoot(L.getValue(1));
882 PendingLoads.push_back(L.getValue(1));
886 void SelectionDAGLowering::visitStore(StoreInst &I) {
887 Value *SrcV = I.getOperand(0);
888 SDOperand Src = getValue(SrcV);
889 SDOperand Ptr = getValue(I.getOperand(1));
890 DAG.setRoot(DAG.getNode(ISD::STORE, MVT::Other, getRoot(), Src, Ptr,
891 DAG.getSrcValue(I.getOperand(1))));
894 /// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
895 /// we want to emit this as a call to a named external function, return the name
896 /// otherwise lower it and return null.
898 SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
900 case Intrinsic::vastart: visitVAStart(I); return 0;
901 case Intrinsic::vaend: visitVAEnd(I); return 0;
902 case Intrinsic::vacopy: visitVACopy(I); return 0;
903 case Intrinsic::returnaddress: visitFrameReturnAddress(I, false); return 0;
904 case Intrinsic::frameaddress: visitFrameReturnAddress(I, true); return 0;
905 case Intrinsic::setjmp:
906 return "_setjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
908 case Intrinsic::longjmp:
909 return "_longjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
911 case Intrinsic::memcpy: visitMemIntrinsic(I, ISD::MEMCPY); return 0;
912 case Intrinsic::memset: visitMemIntrinsic(I, ISD::MEMSET); return 0;
913 case Intrinsic::memmove: visitMemIntrinsic(I, ISD::MEMMOVE); return 0;
915 case Intrinsic::readport:
916 case Intrinsic::readio: {
917 std::vector<MVT::ValueType> VTs;
918 VTs.push_back(TLI.getValueType(I.getType()));
919 VTs.push_back(MVT::Other);
920 std::vector<SDOperand> Ops;
921 Ops.push_back(getRoot());
922 Ops.push_back(getValue(I.getOperand(1)));
923 SDOperand Tmp = DAG.getNode(Intrinsic == Intrinsic::readport ?
924 ISD::READPORT : ISD::READIO, VTs, Ops);
927 DAG.setRoot(Tmp.getValue(1));
930 case Intrinsic::writeport:
931 case Intrinsic::writeio:
932 DAG.setRoot(DAG.getNode(Intrinsic == Intrinsic::writeport ?
933 ISD::WRITEPORT : ISD::WRITEIO, MVT::Other,
934 getRoot(), getValue(I.getOperand(1)),
935 getValue(I.getOperand(2))));
938 case Intrinsic::dbg_stoppoint: {
939 if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
940 return "llvm_debugger_stop";
942 std::string fname = "<unknown>";
943 std::vector<SDOperand> Ops;
946 Ops.push_back(getRoot());
949 Ops.push_back(getValue(I.getOperand(2)));
952 Ops.push_back(getValue(I.getOperand(3)));
954 // filename/working dir
955 // Pull the filename out of the the compilation unit.
956 const GlobalVariable *cunit = dyn_cast<GlobalVariable>(I.getOperand(4));
957 if (cunit && cunit->hasInitializer()) {
958 if (ConstantStruct *CS =
959 dyn_cast<ConstantStruct>(cunit->getInitializer())) {
960 if (CS->getNumOperands() > 0) {
961 Ops.push_back(DAG.getString(getStringValue(CS->getOperand(3))));
962 Ops.push_back(DAG.getString(getStringValue(CS->getOperand(4))));
967 if (Ops.size() == 5) // Found filename/workingdir.
968 DAG.setRoot(DAG.getNode(ISD::LOCATION, MVT::Other, Ops));
969 setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
972 case Intrinsic::dbg_region_start:
973 if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
974 return "llvm_dbg_region_start";
975 if (I.getType() != Type::VoidTy)
976 setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
978 case Intrinsic::dbg_region_end:
979 if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
980 return "llvm_dbg_region_end";
981 if (I.getType() != Type::VoidTy)
982 setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
984 case Intrinsic::dbg_func_start:
985 if (TLI.getTargetMachine().getIntrinsicLowering().EmitDebugFunctions())
986 return "llvm_dbg_subprogram";
987 if (I.getType() != Type::VoidTy)
988 setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
990 case Intrinsic::dbg_declare:
991 if (I.getType() != Type::VoidTy)
992 setValue(&I, DAG.getNode(ISD::UNDEF, TLI.getValueType(I.getType())));
995 case Intrinsic::isunordered_f32:
996 case Intrinsic::isunordered_f64:
997 setValue(&I, DAG.getSetCC(MVT::i1,getValue(I.getOperand(1)),
998 getValue(I.getOperand(2)), ISD::SETUO));
1001 case Intrinsic::sqrt_f32:
1002 case Intrinsic::sqrt_f64:
1003 setValue(&I, DAG.getNode(ISD::FSQRT,
1004 getValue(I.getOperand(1)).getValueType(),
1005 getValue(I.getOperand(1))));
1007 case Intrinsic::pcmarker: {
1008 SDOperand Tmp = getValue(I.getOperand(1));
1009 DAG.setRoot(DAG.getNode(ISD::PCMARKER, MVT::Other, getRoot(), Tmp));
1012 case Intrinsic::readcyclecounter: {
1013 std::vector<MVT::ValueType> VTs;
1014 VTs.push_back(MVT::i64);
1015 VTs.push_back(MVT::Other);
1016 std::vector<SDOperand> Ops;
1017 Ops.push_back(getRoot());
1018 SDOperand Tmp = DAG.getNode(ISD::READCYCLECOUNTER, VTs, Ops);
1020 DAG.setRoot(Tmp.getValue(1));
1023 case Intrinsic::bswap_i16:
1024 case Intrinsic::bswap_i32:
1025 case Intrinsic::bswap_i64:
1026 setValue(&I, DAG.getNode(ISD::BSWAP,
1027 getValue(I.getOperand(1)).getValueType(),
1028 getValue(I.getOperand(1))));
1030 case Intrinsic::cttz_i8:
1031 case Intrinsic::cttz_i16:
1032 case Intrinsic::cttz_i32:
1033 case Intrinsic::cttz_i64:
1034 setValue(&I, DAG.getNode(ISD::CTTZ,
1035 getValue(I.getOperand(1)).getValueType(),
1036 getValue(I.getOperand(1))));
1038 case Intrinsic::ctlz_i8:
1039 case Intrinsic::ctlz_i16:
1040 case Intrinsic::ctlz_i32:
1041 case Intrinsic::ctlz_i64:
1042 setValue(&I, DAG.getNode(ISD::CTLZ,
1043 getValue(I.getOperand(1)).getValueType(),
1044 getValue(I.getOperand(1))));
1046 case Intrinsic::ctpop_i8:
1047 case Intrinsic::ctpop_i16:
1048 case Intrinsic::ctpop_i32:
1049 case Intrinsic::ctpop_i64:
1050 setValue(&I, DAG.getNode(ISD::CTPOP,
1051 getValue(I.getOperand(1)).getValueType(),
1052 getValue(I.getOperand(1))));
1054 case Intrinsic::stacksave: {
1055 std::vector<MVT::ValueType> VTs;
1056 VTs.push_back(TLI.getPointerTy());
1057 VTs.push_back(MVT::Other);
1058 std::vector<SDOperand> Ops;
1059 Ops.push_back(getRoot());
1060 SDOperand Tmp = DAG.getNode(ISD::STACKSAVE, VTs, Ops);
1062 DAG.setRoot(Tmp.getValue(1));
1065 case Intrinsic::stackrestore: {
1066 SDOperand Tmp = getValue(I.getOperand(1));
1067 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, MVT::Other, getRoot(), Tmp));
1070 case Intrinsic::prefetch:
1071 // FIXME: Currently discarding prefetches.
1075 assert(0 && "This intrinsic is not implemented yet!");
1081 void SelectionDAGLowering::visitCall(CallInst &I) {
1082 const char *RenameFn = 0;
1083 if (Function *F = I.getCalledFunction()) {
1084 if (F->isExternal())
1085 if (unsigned IID = F->getIntrinsicID()) {
1086 RenameFn = visitIntrinsicCall(I, IID);
1089 } else { // Not an LLVM intrinsic.
1090 const std::string &Name = F->getName();
1091 if (Name[0] == 'f' && (Name == "fabs" || Name == "fabsf")) {
1092 if (I.getNumOperands() == 2 && // Basic sanity checks.
1093 I.getOperand(1)->getType()->isFloatingPoint() &&
1094 I.getType() == I.getOperand(1)->getType()) {
1095 SDOperand Tmp = getValue(I.getOperand(1));
1096 setValue(&I, DAG.getNode(ISD::FABS, Tmp.getValueType(), Tmp));
1099 } else if (Name[0] == 's' && (Name == "sin" || Name == "sinf")) {
1100 if (I.getNumOperands() == 2 && // Basic sanity checks.
1101 I.getOperand(1)->getType()->isFloatingPoint() &&
1102 I.getType() == I.getOperand(1)->getType() &&
1103 TLI.isOperationLegal(ISD::FSIN,
1104 TLI.getValueType(I.getOperand(1)->getType()))) {
1105 SDOperand Tmp = getValue(I.getOperand(1));
1106 setValue(&I, DAG.getNode(ISD::FSIN, Tmp.getValueType(), Tmp));
1109 } else if (Name[0] == 'c' && (Name == "cos" || Name == "cosf")) {
1110 if (I.getNumOperands() == 2 && // Basic sanity checks.
1111 I.getOperand(1)->getType()->isFloatingPoint() &&
1112 I.getType() == I.getOperand(1)->getType() &&
1113 TLI.isOperationLegal(ISD::FCOS,
1114 TLI.getValueType(I.getOperand(1)->getType()))) {
1115 SDOperand Tmp = getValue(I.getOperand(1));
1116 setValue(&I, DAG.getNode(ISD::FCOS, Tmp.getValueType(), Tmp));
1125 Callee = getValue(I.getOperand(0));
1127 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
1128 std::vector<std::pair<SDOperand, const Type*> > Args;
1129 Args.reserve(I.getNumOperands());
1130 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
1131 Value *Arg = I.getOperand(i);
1132 SDOperand ArgNode = getValue(Arg);
1133 Args.push_back(std::make_pair(ArgNode, Arg->getType()));
1136 const PointerType *PT = cast<PointerType>(I.getCalledValue()->getType());
1137 const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
1139 std::pair<SDOperand,SDOperand> Result =
1140 TLI.LowerCallTo(getRoot(), I.getType(), FTy->isVarArg(), I.getCallingConv(),
1141 I.isTailCall(), Callee, Args, DAG);
1142 if (I.getType() != Type::VoidTy)
1143 setValue(&I, Result.first);
1144 DAG.setRoot(Result.second);
1147 void SelectionDAGLowering::visitMalloc(MallocInst &I) {
1148 SDOperand Src = getValue(I.getOperand(0));
1150 MVT::ValueType IntPtr = TLI.getPointerTy();
1152 if (IntPtr < Src.getValueType())
1153 Src = DAG.getNode(ISD::TRUNCATE, IntPtr, Src);
1154 else if (IntPtr > Src.getValueType())
1155 Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src);
1157 // Scale the source by the type size.
1158 uint64_t ElementSize = TD.getTypeSize(I.getType()->getElementType());
1159 Src = DAG.getNode(ISD::MUL, Src.getValueType(),
1160 Src, getIntPtrConstant(ElementSize));
1162 std::vector<std::pair<SDOperand, const Type*> > Args;
1163 Args.push_back(std::make_pair(Src, TLI.getTargetData().getIntPtrType()));
1165 std::pair<SDOperand,SDOperand> Result =
1166 TLI.LowerCallTo(getRoot(), I.getType(), false, CallingConv::C, true,
1167 DAG.getExternalSymbol("malloc", IntPtr),
1169 setValue(&I, Result.first); // Pointers always fit in registers
1170 DAG.setRoot(Result.second);
1173 void SelectionDAGLowering::visitFree(FreeInst &I) {
1174 std::vector<std::pair<SDOperand, const Type*> > Args;
1175 Args.push_back(std::make_pair(getValue(I.getOperand(0)),
1176 TLI.getTargetData().getIntPtrType()));
1177 MVT::ValueType IntPtr = TLI.getPointerTy();
1178 std::pair<SDOperand,SDOperand> Result =
1179 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, CallingConv::C, true,
1180 DAG.getExternalSymbol("free", IntPtr), Args, DAG);
1181 DAG.setRoot(Result.second);
1184 // InsertAtEndOfBasicBlock - This method should be implemented by targets that
1185 // mark instructions with the 'usesCustomDAGSchedInserter' flag. These
1186 // instructions are special in various ways, which require special support to
1187 // insert. The specified MachineInstr is created but not inserted into any
1188 // basic blocks, and the scheduler passes ownership of it to this method.
1189 MachineBasicBlock *TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
1190 MachineBasicBlock *MBB) {
1191 std::cerr << "If a target marks an instruction with "
1192 "'usesCustomDAGSchedInserter', it must implement "
1193 "TargetLowering::InsertAtEndOfBasicBlock!\n";
1198 SDOperand TargetLowering::LowerReturnTo(SDOperand Chain, SDOperand Op,
1199 SelectionDAG &DAG) {
1200 return DAG.getNode(ISD::RET, MVT::Other, Chain, Op);
1203 SDOperand TargetLowering::LowerVAStart(SDOperand Chain,
1204 SDOperand VAListP, Value *VAListV,
1205 SelectionDAG &DAG) {
1206 // We have no sane default behavior, just emit a useful error message and bail
1208 std::cerr << "Variable arguments handling not implemented on this target!\n";
1213 SDOperand TargetLowering::LowerVAEnd(SDOperand Chain, SDOperand LP, Value *LV,
1214 SelectionDAG &DAG) {
1215 // Default to a noop.
1219 SDOperand TargetLowering::LowerVACopy(SDOperand Chain,
1220 SDOperand SrcP, Value *SrcV,
1221 SDOperand DestP, Value *DestV,
1222 SelectionDAG &DAG) {
1223 // Default to copying the input list.
1224 SDOperand Val = DAG.getLoad(getPointerTy(), Chain,
1225 SrcP, DAG.getSrcValue(SrcV));
1226 SDOperand Result = DAG.getNode(ISD::STORE, MVT::Other, Val.getValue(1),
1227 Val, DestP, DAG.getSrcValue(DestV));
1231 std::pair<SDOperand,SDOperand>
1232 TargetLowering::LowerVAArg(SDOperand Chain, SDOperand VAListP, Value *VAListV,
1233 const Type *ArgTy, SelectionDAG &DAG) {
1234 // We have no sane default behavior, just emit a useful error message and bail
1236 std::cerr << "Variable arguments handling not implemented on this target!\n";
1238 return std::make_pair(SDOperand(), SDOperand());
1242 void SelectionDAGLowering::visitVAStart(CallInst &I) {
1243 DAG.setRoot(TLI.LowerVAStart(getRoot(), getValue(I.getOperand(1)),
1244 I.getOperand(1), DAG));
1247 void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
1248 std::pair<SDOperand,SDOperand> Result =
1249 TLI.LowerVAArg(getRoot(), getValue(I.getOperand(0)), I.getOperand(0),
1251 setValue(&I, Result.first);
1252 DAG.setRoot(Result.second);
1255 void SelectionDAGLowering::visitVAEnd(CallInst &I) {
1256 DAG.setRoot(TLI.LowerVAEnd(getRoot(), getValue(I.getOperand(1)),
1257 I.getOperand(1), DAG));
1260 void SelectionDAGLowering::visitVACopy(CallInst &I) {
1262 TLI.LowerVACopy(getRoot(), getValue(I.getOperand(2)), I.getOperand(2),
1263 getValue(I.getOperand(1)), I.getOperand(1), DAG);
1264 DAG.setRoot(Result);
1268 // It is always conservatively correct for llvm.returnaddress and
1269 // llvm.frameaddress to return 0.
1270 std::pair<SDOperand, SDOperand>
1271 TargetLowering::LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain,
1272 unsigned Depth, SelectionDAG &DAG) {
1273 return std::make_pair(DAG.getConstant(0, getPointerTy()), Chain);
1276 SDOperand TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
1277 assert(0 && "LowerOperation not implemented for this target!");
1282 void SelectionDAGLowering::visitFrameReturnAddress(CallInst &I, bool isFrame) {
1283 unsigned Depth = (unsigned)cast<ConstantUInt>(I.getOperand(1))->getValue();
1284 std::pair<SDOperand,SDOperand> Result =
1285 TLI.LowerFrameReturnAddress(isFrame, getRoot(), Depth, DAG);
1286 setValue(&I, Result.first);
1287 DAG.setRoot(Result.second);
1290 void SelectionDAGLowering::visitMemIntrinsic(CallInst &I, unsigned Op) {
1292 // If the size of the cpy/move/set is constant (known)
1293 if (ConstantUInt* op3 = dyn_cast<ConstantUInt>(I.getOperand(3))) {
1294 uint64_t size = op3->getValue();
1297 if (size <= TLI.getMaxStoresPerMemSet()) {
1298 if (ConstantUInt* op4 = dyn_cast<ConstantUInt>(I.getOperand(4))) {
1299 uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
1300 uint64_t align = op4.getValue();
1301 while (size > align) {
1304 Value *SrcV = I.getOperand(0);
1305 SDOperand Src = getValue(SrcV);
1306 SDOperand Ptr = getValue(I.getOperand(1));
1307 DAG.setRoot(DAG.getNode(ISD::STORE, MVT::Other, getRoot(), Src, Ptr,
1308 DAG.getSrcValue(I.getOperand(1))));
1312 break; // don't do this optimization, use a normal memset
1315 break; // FIXME: not implemented yet
1320 // Non-optimized version
1321 std::vector<SDOperand> Ops;
1322 Ops.push_back(getRoot());
1323 Ops.push_back(getValue(I.getOperand(1)));
1324 Ops.push_back(getValue(I.getOperand(2)));
1325 Ops.push_back(getValue(I.getOperand(3)));
1326 Ops.push_back(getValue(I.getOperand(4)));
1327 DAG.setRoot(DAG.getNode(Op, MVT::Other, Ops));
1330 //===----------------------------------------------------------------------===//
1331 // SelectionDAGISel code
1332 //===----------------------------------------------------------------------===//
1334 unsigned SelectionDAGISel::MakeReg(MVT::ValueType VT) {
1335 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
1338 void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
1339 // FIXME: we only modify the CFG to split critical edges. This
1340 // updates dom and loop info.
1344 /// InsertGEPComputeCode - Insert code into BB to compute Ptr+PtrOffset,
1345 /// casting to the type of GEPI.
1346 static Value *InsertGEPComputeCode(Value *&V, BasicBlock *BB, Instruction *GEPI,
1347 Value *Ptr, Value *PtrOffset) {
1348 if (V) return V; // Already computed.
1350 BasicBlock::iterator InsertPt;
1351 if (BB == GEPI->getParent()) {
1352 // If insert into the GEP's block, insert right after the GEP.
1356 // Otherwise, insert at the top of BB, after any PHI nodes
1357 InsertPt = BB->begin();
1358 while (isa<PHINode>(InsertPt)) ++InsertPt;
1361 // If Ptr is itself a cast, but in some other BB, emit a copy of the cast into
1362 // BB so that there is only one value live across basic blocks (the cast
1364 if (CastInst *CI = dyn_cast<CastInst>(Ptr))
1365 if (CI->getParent() != BB && isa<PointerType>(CI->getOperand(0)->getType()))
1366 Ptr = new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt);
1368 // Add the offset, cast it to the right type.
1369 Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt);
1370 Ptr = new CastInst(Ptr, GEPI->getType(), "", InsertPt);
1375 /// OptimizeGEPExpression - Since we are doing basic-block-at-a-time instruction
1376 /// selection, we want to be a bit careful about some things. In particular, if
1377 /// we have a GEP instruction that is used in a different block than it is
1378 /// defined, the addressing expression of the GEP cannot be folded into loads or
1379 /// stores that use it. In this case, decompose the GEP and move constant
1380 /// indices into blocks that use it.
1381 static void OptimizeGEPExpression(GetElementPtrInst *GEPI,
1382 const TargetData &TD) {
1383 // If this GEP is only used inside the block it is defined in, there is no
1384 // need to rewrite it.
1385 bool isUsedOutsideDefBB = false;
1386 BasicBlock *DefBB = GEPI->getParent();
1387 for (Value::use_iterator UI = GEPI->use_begin(), E = GEPI->use_end();
1389 if (cast<Instruction>(*UI)->getParent() != DefBB) {
1390 isUsedOutsideDefBB = true;
1394 if (!isUsedOutsideDefBB) return;
1396 // If this GEP has no non-zero constant indices, there is nothing we can do,
1398 bool hasConstantIndex = false;
1399 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
1400 E = GEPI->op_end(); OI != E; ++OI) {
1401 if (ConstantInt *CI = dyn_cast<ConstantInt>(*OI))
1402 if (CI->getRawValue()) {
1403 hasConstantIndex = true;
1407 // If this is a GEP &Alloca, 0, 0, forward subst the frame index into uses.
1408 if (!hasConstantIndex && !isa<AllocaInst>(GEPI->getOperand(0))) return;
1410 // Otherwise, decompose the GEP instruction into multiplies and adds. Sum the
1411 // constant offset (which we now know is non-zero) and deal with it later.
1412 uint64_t ConstantOffset = 0;
1413 const Type *UIntPtrTy = TD.getIntPtrType();
1414 Value *Ptr = new CastInst(GEPI->getOperand(0), UIntPtrTy, "", GEPI);
1415 const Type *Ty = GEPI->getOperand(0)->getType();
1417 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
1418 E = GEPI->op_end(); OI != E; ++OI) {
1420 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
1421 unsigned Field = cast<ConstantUInt>(Idx)->getValue();
1423 ConstantOffset += TD.getStructLayout(StTy)->MemberOffsets[Field];
1424 Ty = StTy->getElementType(Field);
1426 Ty = cast<SequentialType>(Ty)->getElementType();
1428 // Handle constant subscripts.
1429 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
1430 if (CI->getRawValue() == 0) continue;
1432 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
1433 ConstantOffset += (int64_t)TD.getTypeSize(Ty)*CSI->getValue();
1435 ConstantOffset+=TD.getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
1439 // Ptr = Ptr + Idx * ElementSize;
1441 // Cast Idx to UIntPtrTy if needed.
1442 Idx = new CastInst(Idx, UIntPtrTy, "", GEPI);
1444 uint64_t ElementSize = TD.getTypeSize(Ty);
1445 // Mask off bits that should not be set.
1446 ElementSize &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
1447 Constant *SizeCst = ConstantUInt::get(UIntPtrTy, ElementSize);
1449 // Multiply by the element size and add to the base.
1450 Idx = BinaryOperator::createMul(Idx, SizeCst, "", GEPI);
1451 Ptr = BinaryOperator::createAdd(Ptr, Idx, "", GEPI);
1455 // Make sure that the offset fits in uintptr_t.
1456 ConstantOffset &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
1457 Constant *PtrOffset = ConstantUInt::get(UIntPtrTy, ConstantOffset);
1459 // Okay, we have now emitted all of the variable index parts to the BB that
1460 // the GEP is defined in. Loop over all of the using instructions, inserting
1461 // an "add Ptr, ConstantOffset" into each block that uses it and update the
1462 // instruction to use the newly computed value, making GEPI dead. When the
1463 // user is a load or store instruction address, we emit the add into the user
1464 // block, otherwise we use a canonical version right next to the gep (these
1465 // won't be foldable as addresses, so we might as well share the computation).
1467 std::map<BasicBlock*,Value*> InsertedExprs;
1468 while (!GEPI->use_empty()) {
1469 Instruction *User = cast<Instruction>(GEPI->use_back());
1471 // If this use is not foldable into the addressing mode, use a version
1472 // emitted in the GEP block.
1474 if (!isa<LoadInst>(User) &&
1475 (!isa<StoreInst>(User) || User->getOperand(0) == GEPI)) {
1476 NewVal = InsertGEPComputeCode(InsertedExprs[DefBB], DefBB, GEPI,
1479 // Otherwise, insert the code in the User's block so it can be folded into
1480 // any users in that block.
1481 NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()],
1482 User->getParent(), GEPI,
1485 User->replaceUsesOfWith(GEPI, NewVal);
1488 // Finally, the GEP is dead, remove it.
1489 GEPI->eraseFromParent();
1492 bool SelectionDAGISel::runOnFunction(Function &Fn) {
1493 MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine());
1494 RegMap = MF.getSSARegMap();
1495 DEBUG(std::cerr << "\n\n\n=== " << Fn.getName() << "\n");
1497 // First, split all critical edges for PHI nodes with incoming values that are
1498 // constants, this way the load of the constant into a vreg will not be placed
1499 // into MBBs that are used some other way.
1501 // In this pass we also look for GEP instructions that are used across basic
1502 // blocks and rewrites them to improve basic-block-at-a-time selection.
1504 for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
1506 BasicBlock::iterator BBI;
1507 for (BBI = BB->begin(); (PN = dyn_cast<PHINode>(BBI)); ++BBI)
1508 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
1509 if (isa<Constant>(PN->getIncomingValue(i)))
1510 SplitCriticalEdge(PN->getIncomingBlock(i), BB);
1512 for (BasicBlock::iterator E = BB->end(); BBI != E; )
1513 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(BBI++))
1514 OptimizeGEPExpression(GEPI, TLI.getTargetData());
1517 FunctionLoweringInfo FuncInfo(TLI, Fn, MF);
1519 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
1520 SelectBasicBlock(I, MF, FuncInfo);
1526 SDOperand SelectionDAGISel::
1527 CopyValueToVirtualRegister(SelectionDAGLowering &SDL, Value *V, unsigned Reg) {
1528 SDOperand Op = SDL.getValue(V);
1529 assert((Op.getOpcode() != ISD::CopyFromReg ||
1530 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
1531 "Copy from a reg to the same reg!");
1533 // If this type is not legal, we must make sure to not create an invalid
1535 MVT::ValueType SrcVT = Op.getValueType();
1536 MVT::ValueType DestVT = TLI.getTypeToTransformTo(SrcVT);
1537 SelectionDAG &DAG = SDL.DAG;
1538 if (SrcVT == DestVT) {
1539 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op);
1540 } else if (SrcVT < DestVT) {
1541 // The src value is promoted to the register.
1542 if (MVT::isFloatingPoint(SrcVT))
1543 Op = DAG.getNode(ISD::FP_EXTEND, DestVT, Op);
1545 Op = DAG.getNode(ISD::ANY_EXTEND, DestVT, Op);
1546 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op);
1548 // The src value is expanded into multiple registers.
1549 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
1550 Op, DAG.getConstant(0, MVT::i32));
1551 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
1552 Op, DAG.getConstant(1, MVT::i32));
1553 Op = DAG.getCopyToReg(SDL.getRoot(), Reg, Lo);
1554 return DAG.getCopyToReg(Op, Reg+1, Hi);
1558 void SelectionDAGISel::
1559 LowerArguments(BasicBlock *BB, SelectionDAGLowering &SDL,
1560 std::vector<SDOperand> &UnorderedChains) {
1561 // If this is the entry block, emit arguments.
1562 Function &F = *BB->getParent();
1563 FunctionLoweringInfo &FuncInfo = SDL.FuncInfo;
1564 SDOperand OldRoot = SDL.DAG.getRoot();
1565 std::vector<SDOperand> Args = TLI.LowerArguments(F, SDL.DAG);
1568 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
1570 if (!AI->use_empty()) {
1571 SDL.setValue(AI, Args[a]);
1573 // If this argument is live outside of the entry block, insert a copy from
1574 // whereever we got it to the vreg that other BB's will reference it as.
1575 if (FuncInfo.ValueMap.count(AI)) {
1577 CopyValueToVirtualRegister(SDL, AI, FuncInfo.ValueMap[AI]);
1578 UnorderedChains.push_back(Copy);
1582 // Next, if the function has live ins that need to be copied into vregs,
1583 // emit the copies now, into the top of the block.
1584 MachineFunction &MF = SDL.DAG.getMachineFunction();
1585 if (MF.livein_begin() != MF.livein_end()) {
1586 SSARegMap *RegMap = MF.getSSARegMap();
1587 const MRegisterInfo &MRI = *MF.getTarget().getRegisterInfo();
1588 for (MachineFunction::livein_iterator LI = MF.livein_begin(),
1589 E = MF.livein_end(); LI != E; ++LI)
1591 MRI.copyRegToReg(*MF.begin(), MF.begin()->end(), LI->second,
1592 LI->first, RegMap->getRegClass(LI->second));
1595 // Finally, if the target has anything special to do, allow it to do so.
1596 EmitFunctionEntryCode(F, SDL.DAG.getMachineFunction());
1600 void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB,
1601 std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate,
1602 FunctionLoweringInfo &FuncInfo) {
1603 SelectionDAGLowering SDL(DAG, TLI, FuncInfo);
1605 std::vector<SDOperand> UnorderedChains;
1607 // Lower any arguments needed in this block if this is the entry block.
1608 if (LLVMBB == &LLVMBB->getParent()->front())
1609 LowerArguments(LLVMBB, SDL, UnorderedChains);
1611 BB = FuncInfo.MBBMap[LLVMBB];
1612 SDL.setCurrentBasicBlock(BB);
1614 // Lower all of the non-terminator instructions.
1615 for (BasicBlock::iterator I = LLVMBB->begin(), E = --LLVMBB->end();
1619 // Ensure that all instructions which are used outside of their defining
1620 // blocks are available as virtual registers.
1621 for (BasicBlock::iterator I = LLVMBB->begin(), E = LLVMBB->end(); I != E;++I)
1622 if (!I->use_empty() && !isa<PHINode>(I)) {
1623 std::map<const Value*, unsigned>::iterator VMI =FuncInfo.ValueMap.find(I);
1624 if (VMI != FuncInfo.ValueMap.end())
1625 UnorderedChains.push_back(
1626 CopyValueToVirtualRegister(SDL, I, VMI->second));
1629 // Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
1630 // ensure constants are generated when needed. Remember the virtual registers
1631 // that need to be added to the Machine PHI nodes as input. We cannot just
1632 // directly add them, because expansion might result in multiple MBB's for one
1633 // BB. As such, the start of the BB might correspond to a different MBB than
1637 // Emit constants only once even if used by multiple PHI nodes.
1638 std::map<Constant*, unsigned> ConstantsOut;
1640 // Check successor nodes PHI nodes that expect a constant to be available from
1642 TerminatorInst *TI = LLVMBB->getTerminator();
1643 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1644 BasicBlock *SuccBB = TI->getSuccessor(succ);
1645 MachineBasicBlock::iterator MBBI = FuncInfo.MBBMap[SuccBB]->begin();
1648 // At this point we know that there is a 1-1 correspondence between LLVM PHI
1649 // nodes and Machine PHI nodes, but the incoming operands have not been
1651 for (BasicBlock::iterator I = SuccBB->begin();
1652 (PN = dyn_cast<PHINode>(I)); ++I)
1653 if (!PN->use_empty()) {
1655 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
1656 if (Constant *C = dyn_cast<Constant>(PHIOp)) {
1657 unsigned &RegOut = ConstantsOut[C];
1659 RegOut = FuncInfo.CreateRegForValue(C);
1660 UnorderedChains.push_back(
1661 CopyValueToVirtualRegister(SDL, C, RegOut));
1665 Reg = FuncInfo.ValueMap[PHIOp];
1667 assert(isa<AllocaInst>(PHIOp) &&
1668 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
1669 "Didn't codegen value into a register!??");
1670 Reg = FuncInfo.CreateRegForValue(PHIOp);
1671 UnorderedChains.push_back(
1672 CopyValueToVirtualRegister(SDL, PHIOp, Reg));
1676 // Remember that this register needs to added to the machine PHI node as
1677 // the input for this MBB.
1678 unsigned NumElements =
1679 TLI.getNumElements(TLI.getValueType(PN->getType()));
1680 for (unsigned i = 0, e = NumElements; i != e; ++i)
1681 PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
1684 ConstantsOut.clear();
1686 // Turn all of the unordered chains into one factored node.
1687 if (!UnorderedChains.empty()) {
1688 SDOperand Root = SDL.getRoot();
1689 if (Root.getOpcode() != ISD::EntryToken) {
1690 unsigned i = 0, e = UnorderedChains.size();
1691 for (; i != e; ++i) {
1692 assert(UnorderedChains[i].Val->getNumOperands() > 1);
1693 if (UnorderedChains[i].Val->getOperand(0) == Root)
1694 break; // Don't add the root if we already indirectly depend on it.
1698 UnorderedChains.push_back(Root);
1700 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, UnorderedChains));
1703 // Lower the terminator after the copies are emitted.
1704 SDL.visit(*LLVMBB->getTerminator());
1706 // Make sure the root of the DAG is up-to-date.
1707 DAG.setRoot(SDL.getRoot());
1710 void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF,
1711 FunctionLoweringInfo &FuncInfo) {
1712 SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
1714 std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
1716 // First step, lower LLVM code to some DAG. This DAG may use operations and
1717 // types that are not supported by the target.
1718 BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo);
1720 // Run the DAG combiner in pre-legalize mode.
1723 DEBUG(std::cerr << "Lowered selection DAG:\n");
1726 // Second step, hack on the DAG until it only uses operations and types that
1727 // the target supports.
1730 DEBUG(std::cerr << "Legalized selection DAG:\n");
1733 // Run the DAG combiner in post-legalize mode.
1736 if (ViewISelDAGs) DAG.viewGraph();
1738 // Third, instruction select all of the operations to machine code, adding the
1739 // code to the MachineBasicBlock.
1740 InstructionSelectBasicBlock(DAG);
1742 DEBUG(std::cerr << "Selected machine code:\n");
1745 // Next, now that we know what the last MBB the LLVM BB expanded is, update
1746 // PHI nodes in successors.
1747 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) {
1748 MachineInstr *PHI = PHINodesToUpdate[i].first;
1749 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
1750 "This is not a machine PHI node that we are updating!");
1751 PHI->addRegOperand(PHINodesToUpdate[i].second);
1752 PHI->addMachineBasicBlockOperand(BB);
1755 // Finally, add the CFG edges from the last selected MBB to the successor
1757 TerminatorInst *TI = LLVMBB->getTerminator();
1758 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
1759 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[TI->getSuccessor(i)];
1760 BB->addSuccessor(Succ0MBB);
1764 //===----------------------------------------------------------------------===//
1765 /// ScheduleAndEmitDAG - Pick a safe ordering and emit instructions for each
1766 /// target node in the graph.
1767 void SelectionDAGISel::ScheduleAndEmitDAG(SelectionDAG &DAG) {
1768 if (ViewSchedDAGs) DAG.viewGraph();
1769 ScheduleDAG *SL = NULL;
1771 switch (ISHeuristic) {
1772 default: assert(0 && "Unrecognized scheduling heuristic");
1774 case simpleScheduling:
1775 case simpleNoItinScheduling:
1776 SL = createSimpleDAGScheduler(ISHeuristic, DAG, BB);