1 //===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the SelectionDAGISel class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "isel"
15 #include "llvm/CodeGen/SelectionDAGISel.h"
16 #include "llvm/CodeGen/ScheduleDAG.h"
17 #include "llvm/CallingConv.h"
18 #include "llvm/Constants.h"
19 #include "llvm/DerivedTypes.h"
20 #include "llvm/Function.h"
21 #include "llvm/GlobalVariable.h"
22 #include "llvm/InlineAsm.h"
23 #include "llvm/Instructions.h"
24 #include "llvm/Intrinsics.h"
25 #include "llvm/IntrinsicInst.h"
26 #include "llvm/CodeGen/IntrinsicLowering.h"
27 #include "llvm/CodeGen/MachineDebugInfo.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineJumpTableInfo.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/SelectionDAG.h"
33 #include "llvm/CodeGen/SSARegMap.h"
34 #include "llvm/Target/MRegisterInfo.h"
35 #include "llvm/Target/TargetData.h"
36 #include "llvm/Target/TargetFrameInfo.h"
37 #include "llvm/Target/TargetInstrInfo.h"
38 #include "llvm/Target/TargetLowering.h"
39 #include "llvm/Target/TargetMachine.h"
40 #include "llvm/Target/TargetOptions.h"
41 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/MathExtras.h"
44 #include "llvm/Support/Debug.h"
45 #include "llvm/Support/Visibility.h"
54 ViewISelDAGs("view-isel-dags", cl::Hidden,
55 cl::desc("Pop up a window to show isel dags as they are selected"));
57 ViewSchedDAGs("view-sched-dags", cl::Hidden,
58 cl::desc("Pop up a window to show sched dags as they are processed"));
60 static const bool ViewISelDAGs = 0, ViewSchedDAGs = 0;
63 // Scheduling heuristics
64 enum SchedHeuristics {
65 defaultScheduling, // Let the target specify its preference.
66 noScheduling, // No scheduling, emit breadth first sequence.
67 simpleScheduling, // Two pass, min. critical path, max. utilization.
68 simpleNoItinScheduling, // Same as above exact using generic latency.
69 listSchedulingBURR, // Bottom-up reg reduction list scheduling.
70 listSchedulingTDRR, // Top-down reg reduction list scheduling.
71 listSchedulingTD // Top-down list scheduler.
75 cl::opt<SchedHeuristics>
78 cl::desc("Choose scheduling style"),
79 cl::init(defaultScheduling),
81 clEnumValN(defaultScheduling, "default",
82 "Target preferred scheduling style"),
83 clEnumValN(noScheduling, "none",
84 "No scheduling: breadth first sequencing"),
85 clEnumValN(simpleScheduling, "simple",
86 "Simple two pass scheduling: minimize critical path "
87 "and maximize processor utilization"),
88 clEnumValN(simpleNoItinScheduling, "simple-noitin",
89 "Simple two pass scheduling: Same as simple "
90 "except using generic latency"),
91 clEnumValN(listSchedulingBURR, "list-burr",
92 "Bottom-up register reduction list scheduling"),
93 clEnumValN(listSchedulingTDRR, "list-tdrr",
94 "Top-down register reduction list scheduling"),
95 clEnumValN(listSchedulingTD, "list-td",
96 "Top-down list scheduler"),
101 /// RegsForValue - This struct represents the physical registers that a
102 /// particular value is assigned and the type information about the value.
103 /// This is needed because values can be promoted into larger registers and
104 /// expanded into multiple smaller registers than the value.
105 struct VISIBILITY_HIDDEN RegsForValue {
106 /// Regs - This list hold the register (for legal and promoted values)
107 /// or register set (for expanded values) that the value should be assigned
109 std::vector<unsigned> Regs;
111 /// RegVT - The value type of each register.
113 MVT::ValueType RegVT;
115 /// ValueVT - The value type of the LLVM value, which may be promoted from
116 /// RegVT or made from merging the two expanded parts.
117 MVT::ValueType ValueVT;
119 RegsForValue() : RegVT(MVT::Other), ValueVT(MVT::Other) {}
121 RegsForValue(unsigned Reg, MVT::ValueType regvt, MVT::ValueType valuevt)
122 : RegVT(regvt), ValueVT(valuevt) {
125 RegsForValue(const std::vector<unsigned> ®s,
126 MVT::ValueType regvt, MVT::ValueType valuevt)
127 : Regs(regs), RegVT(regvt), ValueVT(valuevt) {
130 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
131 /// this value and returns the result as a ValueVT value. This uses
132 /// Chain/Flag as the input and updates them for the output Chain/Flag.
133 SDOperand getCopyFromRegs(SelectionDAG &DAG,
134 SDOperand &Chain, SDOperand &Flag) const;
136 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
137 /// specified value into the registers specified by this object. This uses
138 /// Chain/Flag as the input and updates them for the output Chain/Flag.
139 void getCopyToRegs(SDOperand Val, SelectionDAG &DAG,
140 SDOperand &Chain, SDOperand &Flag,
141 MVT::ValueType PtrVT) const;
143 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
144 /// operand list. This adds the code marker and includes the number of
145 /// values added into it.
146 void AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG,
147 std::vector<SDOperand> &Ops) const;
152 //===--------------------------------------------------------------------===//
153 /// FunctionLoweringInfo - This contains information that is global to a
154 /// function that is used when lowering a region of the function.
155 class FunctionLoweringInfo {
162 FunctionLoweringInfo(TargetLowering &TLI, Function &Fn,MachineFunction &MF);
164 /// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
165 std::map<const BasicBlock*, MachineBasicBlock *> MBBMap;
167 /// ValueMap - Since we emit code for the function a basic block at a time,
168 /// we must remember which virtual registers hold the values for
169 /// cross-basic-block values.
170 std::map<const Value*, unsigned> ValueMap;
172 /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
173 /// the entry block. This allows the allocas to be efficiently referenced
174 /// anywhere in the function.
175 std::map<const AllocaInst*, int> StaticAllocaMap;
177 unsigned MakeReg(MVT::ValueType VT) {
178 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
181 unsigned CreateRegForValue(const Value *V);
183 unsigned InitializeRegForValue(const Value *V) {
184 unsigned &R = ValueMap[V];
185 assert(R == 0 && "Already initialized this value register!");
186 return R = CreateRegForValue(V);
191 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
192 /// PHI nodes or outside of the basic block that defines it, or used by a
193 /// switch instruction, which may expand to multiple basic blocks.
194 static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
195 if (isa<PHINode>(I)) return true;
196 BasicBlock *BB = I->getParent();
197 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
198 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI) ||
199 isa<SwitchInst>(*UI))
204 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
205 /// entry block, return true. This includes arguments used by switches, since
206 /// the switch may expand into multiple basic blocks.
207 static bool isOnlyUsedInEntryBlock(Argument *A) {
208 BasicBlock *Entry = A->getParent()->begin();
209 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
210 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI))
211 return false; // Use not in entry block.
215 FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli,
216 Function &fn, MachineFunction &mf)
217 : TLI(tli), Fn(fn), MF(mf), RegMap(MF.getSSARegMap()) {
219 // Create a vreg for each argument register that is not dead and is used
220 // outside of the entry block for the function.
221 for (Function::arg_iterator AI = Fn.arg_begin(), E = Fn.arg_end();
223 if (!isOnlyUsedInEntryBlock(AI))
224 InitializeRegForValue(AI);
226 // Initialize the mapping of values to registers. This is only set up for
227 // instruction values that are used outside of the block that defines
229 Function::iterator BB = Fn.begin(), EB = Fn.end();
230 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
231 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
232 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(AI->getArraySize())) {
233 const Type *Ty = AI->getAllocatedType();
234 uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
236 std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty),
239 // If the alignment of the value is smaller than the size of the value,
240 // and if the size of the value is particularly small (<= 8 bytes),
241 // round up to the size of the value for potentially better performance.
243 // FIXME: This could be made better with a preferred alignment hook in
244 // TargetData. It serves primarily to 8-byte align doubles for X86.
245 if (Align < TySize && TySize <= 8) Align = TySize;
246 TySize *= CUI->getValue(); // Get total allocated size.
247 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
248 StaticAllocaMap[AI] =
249 MF.getFrameInfo()->CreateStackObject((unsigned)TySize, Align);
252 for (; BB != EB; ++BB)
253 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
254 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
255 if (!isa<AllocaInst>(I) ||
256 !StaticAllocaMap.count(cast<AllocaInst>(I)))
257 InitializeRegForValue(I);
259 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
260 // also creates the initial PHI MachineInstrs, though none of the input
261 // operands are populated.
262 for (BB = Fn.begin(), EB = Fn.end(); BB != EB; ++BB) {
263 MachineBasicBlock *MBB = new MachineBasicBlock(BB);
265 MF.getBasicBlockList().push_back(MBB);
267 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
270 for (BasicBlock::iterator I = BB->begin();
271 (PN = dyn_cast<PHINode>(I)); ++I)
272 if (!PN->use_empty()) {
273 MVT::ValueType VT = TLI.getValueType(PN->getType());
274 unsigned NumElements;
275 if (VT != MVT::Vector)
276 NumElements = TLI.getNumElements(VT);
278 MVT::ValueType VT1,VT2;
280 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()),
283 unsigned PHIReg = ValueMap[PN];
284 assert(PHIReg &&"PHI node does not have an assigned virtual register!");
285 for (unsigned i = 0; i != NumElements; ++i)
286 BuildMI(MBB, TargetInstrInfo::PHI, PN->getNumOperands(), PHIReg+i);
291 /// CreateRegForValue - Allocate the appropriate number of virtual registers of
292 /// the correctly promoted or expanded types. Assign these registers
293 /// consecutive vreg numbers and return the first assigned number.
294 unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
295 MVT::ValueType VT = TLI.getValueType(V->getType());
297 // The number of multiples of registers that we need, to, e.g., split up
298 // a <2 x int64> -> 4 x i32 registers.
299 unsigned NumVectorRegs = 1;
301 // If this is a packed type, figure out what type it will decompose into
302 // and how many of the elements it will use.
303 if (VT == MVT::Vector) {
304 const PackedType *PTy = cast<PackedType>(V->getType());
305 unsigned NumElts = PTy->getNumElements();
306 MVT::ValueType EltTy = TLI.getValueType(PTy->getElementType());
308 // Divide the input until we get to a supported size. This will always
309 // end with a scalar if the target doesn't support vectors.
310 while (NumElts > 1 && !TLI.isTypeLegal(getVectorType(EltTy, NumElts))) {
317 VT = getVectorType(EltTy, NumElts);
320 // The common case is that we will only create one register for this
321 // value. If we have that case, create and return the virtual register.
322 unsigned NV = TLI.getNumElements(VT);
324 // If we are promoting this value, pick the next largest supported type.
325 MVT::ValueType PromotedType = TLI.getTypeToTransformTo(VT);
326 unsigned Reg = MakeReg(PromotedType);
327 // If this is a vector of supported or promoted types (e.g. 4 x i16),
328 // create all of the registers.
329 for (unsigned i = 1; i != NumVectorRegs; ++i)
330 MakeReg(PromotedType);
334 // If this value is represented with multiple target registers, make sure
335 // to create enough consecutive registers of the right (smaller) type.
336 unsigned NT = VT-1; // Find the type to use.
337 while (TLI.getNumElements((MVT::ValueType)NT) != 1)
340 unsigned R = MakeReg((MVT::ValueType)NT);
341 for (unsigned i = 1; i != NV*NumVectorRegs; ++i)
342 MakeReg((MVT::ValueType)NT);
346 //===----------------------------------------------------------------------===//
347 /// SelectionDAGLowering - This is the common target-independent lowering
348 /// implementation that is parameterized by a TargetLowering object.
349 /// Also, targets can overload any lowering method.
352 class SelectionDAGLowering {
353 MachineBasicBlock *CurMBB;
355 std::map<const Value*, SDOperand> NodeMap;
357 /// PendingLoads - Loads are not emitted to the program immediately. We bunch
358 /// them up and then emit token factor nodes when possible. This allows us to
359 /// get simple disambiguation between loads without worrying about alias
361 std::vector<SDOperand> PendingLoads;
363 /// Case - A pair of values to record the Value for a switch case, and the
364 /// case's target basic block.
365 typedef std::pair<Constant*, MachineBasicBlock*> Case;
366 typedef std::vector<Case>::iterator CaseItr;
367 typedef std::pair<CaseItr, CaseItr> CaseRange;
369 /// CaseRec - A struct with ctor used in lowering switches to a binary tree
370 /// of conditional branches.
372 CaseRec(MachineBasicBlock *bb, Constant *lt, Constant *ge, CaseRange r) :
373 CaseBB(bb), LT(lt), GE(ge), Range(r) {}
375 /// CaseBB - The MBB in which to emit the compare and branch
376 MachineBasicBlock *CaseBB;
377 /// LT, GE - If nonzero, we know the current case value must be less-than or
378 /// greater-than-or-equal-to these Constants.
381 /// Range - A pair of iterators representing the range of case values to be
382 /// processed at this point in the binary search tree.
386 /// The comparison function for sorting Case values.
388 bool operator () (const Case& C1, const Case& C2) {
389 if (const ConstantUInt* U1 = dyn_cast<const ConstantUInt>(C1.first))
390 return U1->getValue() < cast<const ConstantUInt>(C2.first)->getValue();
392 const ConstantSInt* S1 = dyn_cast<const ConstantSInt>(C1.first);
393 return S1->getValue() < cast<const ConstantSInt>(C2.first)->getValue();
398 // TLI - This is information that describes the available target features we
399 // need for lowering. This indicates when operations are unavailable,
400 // implemented with a libcall, etc.
403 const TargetData *TD;
405 /// SwitchCases - Vector of CaseBlock structures used to communicate
406 /// SwitchInst code generation information.
407 std::vector<SelectionDAGISel::CaseBlock> SwitchCases;
408 SelectionDAGISel::JumpTable JT;
410 /// FuncInfo - Information about the function as a whole.
412 FunctionLoweringInfo &FuncInfo;
414 SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli,
415 FunctionLoweringInfo &funcinfo)
416 : TLI(tli), DAG(dag), TD(DAG.getTarget().getTargetData()),
417 JT(0,0,0,0), FuncInfo(funcinfo) {
420 /// getRoot - Return the current virtual root of the Selection DAG.
422 SDOperand getRoot() {
423 if (PendingLoads.empty())
424 return DAG.getRoot();
426 if (PendingLoads.size() == 1) {
427 SDOperand Root = PendingLoads[0];
429 PendingLoads.clear();
433 // Otherwise, we have to make a token factor node.
434 SDOperand Root = DAG.getNode(ISD::TokenFactor, MVT::Other, PendingLoads);
435 PendingLoads.clear();
440 void visit(Instruction &I) { visit(I.getOpcode(), I); }
442 void visit(unsigned Opcode, User &I) {
444 default: assert(0 && "Unknown instruction type encountered!");
446 // Build the switch statement using the Instruction.def file.
447 #define HANDLE_INST(NUM, OPCODE, CLASS) \
448 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
449 #include "llvm/Instruction.def"
453 void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; }
455 SDOperand getLoadFrom(const Type *Ty, SDOperand Ptr,
456 SDOperand SrcValue, SDOperand Root,
459 SDOperand getIntPtrConstant(uint64_t Val) {
460 return DAG.getConstant(Val, TLI.getPointerTy());
463 SDOperand getValue(const Value *V);
465 const SDOperand &setValue(const Value *V, SDOperand NewN) {
466 SDOperand &N = NodeMap[V];
467 assert(N.Val == 0 && "Already set a value for this node!");
471 RegsForValue GetRegistersForValue(const std::string &ConstrCode,
473 bool OutReg, bool InReg,
474 std::set<unsigned> &OutputRegs,
475 std::set<unsigned> &InputRegs);
477 // Terminator instructions.
478 void visitRet(ReturnInst &I);
479 void visitBr(BranchInst &I);
480 void visitSwitch(SwitchInst &I);
481 void visitUnreachable(UnreachableInst &I) { /* noop */ }
483 // Helper for visitSwitch
484 void visitSwitchCase(SelectionDAGISel::CaseBlock &CB);
485 void visitJumpTable(SelectionDAGISel::JumpTable &JT);
487 // These all get lowered before this pass.
488 void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); }
489 void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); }
491 void visitBinary(User &I, unsigned IntOp, unsigned FPOp, unsigned VecOp);
492 void visitShift(User &I, unsigned Opcode);
493 void visitAdd(User &I) {
494 visitBinary(I, ISD::ADD, ISD::FADD, ISD::VADD);
496 void visitSub(User &I);
497 void visitMul(User &I) {
498 visitBinary(I, ISD::MUL, ISD::FMUL, ISD::VMUL);
500 void visitDiv(User &I) {
501 const Type *Ty = I.getType();
503 Ty->isSigned() ? ISD::SDIV : ISD::UDIV, ISD::FDIV,
504 Ty->isSigned() ? ISD::VSDIV : ISD::VUDIV);
506 void visitRem(User &I) {
507 const Type *Ty = I.getType();
508 visitBinary(I, Ty->isSigned() ? ISD::SREM : ISD::UREM, ISD::FREM, 0);
510 void visitAnd(User &I) { visitBinary(I, ISD::AND, 0, ISD::VAND); }
511 void visitOr (User &I) { visitBinary(I, ISD::OR, 0, ISD::VOR); }
512 void visitXor(User &I) { visitBinary(I, ISD::XOR, 0, ISD::VXOR); }
513 void visitShl(User &I) { visitShift(I, ISD::SHL); }
514 void visitShr(User &I) {
515 visitShift(I, I.getType()->isUnsigned() ? ISD::SRL : ISD::SRA);
518 void visitSetCC(User &I, ISD::CondCode SignedOpc, ISD::CondCode UnsignedOpc,
519 ISD::CondCode FPOpc);
520 void visitSetEQ(User &I) { visitSetCC(I, ISD::SETEQ, ISD::SETEQ,
522 void visitSetNE(User &I) { visitSetCC(I, ISD::SETNE, ISD::SETNE,
524 void visitSetLE(User &I) { visitSetCC(I, ISD::SETLE, ISD::SETULE,
526 void visitSetGE(User &I) { visitSetCC(I, ISD::SETGE, ISD::SETUGE,
528 void visitSetLT(User &I) { visitSetCC(I, ISD::SETLT, ISD::SETULT,
530 void visitSetGT(User &I) { visitSetCC(I, ISD::SETGT, ISD::SETUGT,
533 void visitExtractElement(User &I);
534 void visitInsertElement(User &I);
535 void visitShuffleVector(User &I);
537 void visitGetElementPtr(User &I);
538 void visitCast(User &I);
539 void visitSelect(User &I);
541 void visitMalloc(MallocInst &I);
542 void visitFree(FreeInst &I);
543 void visitAlloca(AllocaInst &I);
544 void visitLoad(LoadInst &I);
545 void visitStore(StoreInst &I);
546 void visitPHI(PHINode &I) { } // PHI nodes are handled specially.
547 void visitCall(CallInst &I);
548 void visitInlineAsm(CallInst &I);
549 const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic);
550 void visitTargetIntrinsic(CallInst &I, unsigned Intrinsic);
552 void visitVAStart(CallInst &I);
553 void visitVAArg(VAArgInst &I);
554 void visitVAEnd(CallInst &I);
555 void visitVACopy(CallInst &I);
556 void visitFrameReturnAddress(CallInst &I, bool isFrameAddress);
558 void visitMemIntrinsic(CallInst &I, unsigned Op);
560 void visitUserOp1(Instruction &I) {
561 assert(0 && "UserOp1 should not exist at instruction selection time!");
564 void visitUserOp2(Instruction &I) {
565 assert(0 && "UserOp2 should not exist at instruction selection time!");
569 } // end namespace llvm
571 SDOperand SelectionDAGLowering::getValue(const Value *V) {
572 SDOperand &N = NodeMap[V];
575 const Type *VTy = V->getType();
576 MVT::ValueType VT = TLI.getValueType(VTy);
577 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
578 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
579 visit(CE->getOpcode(), *CE);
580 assert(N.Val && "visit didn't populate the ValueMap!");
582 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
583 return N = DAG.getGlobalAddress(GV, VT);
584 } else if (isa<ConstantPointerNull>(C)) {
585 return N = DAG.getConstant(0, TLI.getPointerTy());
586 } else if (isa<UndefValue>(C)) {
587 if (!isa<PackedType>(VTy))
588 return N = DAG.getNode(ISD::UNDEF, VT);
590 // Create a VBUILD_VECTOR of undef nodes.
591 const PackedType *PTy = cast<PackedType>(VTy);
592 unsigned NumElements = PTy->getNumElements();
593 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
595 std::vector<SDOperand> Ops;
596 Ops.assign(NumElements, DAG.getNode(ISD::UNDEF, PVT));
598 // Create a VConstant node with generic Vector type.
599 Ops.push_back(DAG.getConstant(NumElements, MVT::i32));
600 Ops.push_back(DAG.getValueType(PVT));
601 return N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops);
602 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
603 return N = DAG.getConstantFP(CFP->getValue(), VT);
604 } else if (const PackedType *PTy = dyn_cast<PackedType>(VTy)) {
605 unsigned NumElements = PTy->getNumElements();
606 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
608 // Now that we know the number and type of the elements, push a
609 // Constant or ConstantFP node onto the ops list for each element of
610 // the packed constant.
611 std::vector<SDOperand> Ops;
612 if (ConstantPacked *CP = dyn_cast<ConstantPacked>(C)) {
613 for (unsigned i = 0; i != NumElements; ++i)
614 Ops.push_back(getValue(CP->getOperand(i)));
616 assert(isa<ConstantAggregateZero>(C) && "Unknown packed constant!");
618 if (MVT::isFloatingPoint(PVT))
619 Op = DAG.getConstantFP(0, PVT);
621 Op = DAG.getConstant(0, PVT);
622 Ops.assign(NumElements, Op);
625 // Create a VBUILD_VECTOR node with generic Vector type.
626 Ops.push_back(DAG.getConstant(NumElements, MVT::i32));
627 Ops.push_back(DAG.getValueType(PVT));
628 return N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops);
630 // Canonicalize all constant ints to be unsigned.
631 return N = DAG.getConstant(cast<ConstantIntegral>(C)->getRawValue(),VT);
635 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
636 std::map<const AllocaInst*, int>::iterator SI =
637 FuncInfo.StaticAllocaMap.find(AI);
638 if (SI != FuncInfo.StaticAllocaMap.end())
639 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
642 std::map<const Value*, unsigned>::const_iterator VMI =
643 FuncInfo.ValueMap.find(V);
644 assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!");
646 unsigned InReg = VMI->second;
648 // If this type is not legal, make it so now.
649 if (VT != MVT::Vector) {
650 MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT);
652 N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT);
654 // Source must be expanded. This input value is actually coming from the
655 // register pair VMI->second and VMI->second+1.
656 N = DAG.getNode(ISD::BUILD_PAIR, VT, N,
657 DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT));
658 } else if (DestVT > VT) { // Promotion case
659 if (MVT::isFloatingPoint(VT))
660 N = DAG.getNode(ISD::FP_ROUND, VT, N);
662 N = DAG.getNode(ISD::TRUNCATE, VT, N);
665 // Otherwise, if this is a vector, make it available as a generic vector
667 MVT::ValueType PTyElementVT, PTyLegalElementVT;
668 const PackedType *PTy = cast<PackedType>(VTy);
669 unsigned NE = TLI.getPackedTypeBreakdown(PTy, PTyElementVT,
672 // Build a VBUILD_VECTOR with the input registers.
673 std::vector<SDOperand> Ops;
674 if (PTyElementVT == PTyLegalElementVT) {
675 // If the value types are legal, just VBUILD the CopyFromReg nodes.
676 for (unsigned i = 0; i != NE; ++i)
677 Ops.push_back(DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
679 } else if (PTyElementVT < PTyLegalElementVT) {
680 // If the register was promoted, use TRUNCATE of FP_ROUND as appropriate.
681 for (unsigned i = 0; i != NE; ++i) {
682 SDOperand Op = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
684 if (MVT::isFloatingPoint(PTyElementVT))
685 Op = DAG.getNode(ISD::FP_ROUND, PTyElementVT, Op);
687 Op = DAG.getNode(ISD::TRUNCATE, PTyElementVT, Op);
691 // If the register was expanded, use BUILD_PAIR.
692 assert((NE & 1) == 0 && "Must expand into a multiple of 2 elements!");
693 for (unsigned i = 0; i != NE/2; ++i) {
694 SDOperand Op0 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
696 SDOperand Op1 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
698 Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Op0, Op1));
702 Ops.push_back(DAG.getConstant(NE, MVT::i32));
703 Ops.push_back(DAG.getValueType(PTyLegalElementVT));
704 N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, Ops);
706 // Finally, use a VBIT_CONVERT to make this available as the appropriate
708 N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N,
709 DAG.getConstant(PTy->getNumElements(),
711 DAG.getValueType(TLI.getValueType(PTy->getElementType())));
718 void SelectionDAGLowering::visitRet(ReturnInst &I) {
719 if (I.getNumOperands() == 0) {
720 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getRoot()));
723 std::vector<SDOperand> NewValues;
724 NewValues.push_back(getRoot());
725 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
726 SDOperand RetOp = getValue(I.getOperand(i));
727 bool isSigned = I.getOperand(i)->getType()->isSigned();
729 // If this is an integer return value, we need to promote it ourselves to
730 // the full width of a register, since LegalizeOp will use ANY_EXTEND rather
732 // FIXME: C calling convention requires the return type to be promoted to
733 // at least 32-bit. But this is not necessary for non-C calling conventions.
734 if (MVT::isInteger(RetOp.getValueType()) &&
735 RetOp.getValueType() < MVT::i64) {
736 MVT::ValueType TmpVT;
737 if (TLI.getTypeAction(MVT::i32) == TargetLowering::Promote)
738 TmpVT = TLI.getTypeToTransformTo(MVT::i32);
743 RetOp = DAG.getNode(ISD::SIGN_EXTEND, TmpVT, RetOp);
745 RetOp = DAG.getNode(ISD::ZERO_EXTEND, TmpVT, RetOp);
747 NewValues.push_back(RetOp);
748 NewValues.push_back(DAG.getConstant(isSigned, MVT::i32));
750 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, NewValues));
753 void SelectionDAGLowering::visitBr(BranchInst &I) {
754 // Update machine-CFG edges.
755 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
756 CurMBB->addSuccessor(Succ0MBB);
758 // Figure out which block is immediately after the current one.
759 MachineBasicBlock *NextBlock = 0;
760 MachineFunction::iterator BBI = CurMBB;
761 if (++BBI != CurMBB->getParent()->end())
764 if (I.isUnconditional()) {
765 // If this is not a fall-through branch, emit the branch.
766 if (Succ0MBB != NextBlock)
767 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(),
768 DAG.getBasicBlock(Succ0MBB)));
770 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
771 CurMBB->addSuccessor(Succ1MBB);
773 SDOperand Cond = getValue(I.getCondition());
774 if (Succ1MBB == NextBlock) {
775 // If the condition is false, fall through. This means we should branch
776 // if the condition is true to Succ #0.
777 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(),
778 Cond, DAG.getBasicBlock(Succ0MBB)));
779 } else if (Succ0MBB == NextBlock) {
780 // If the condition is true, fall through. This means we should branch if
781 // the condition is false to Succ #1. Invert the condition first.
782 SDOperand True = DAG.getConstant(1, Cond.getValueType());
783 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
784 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(),
785 Cond, DAG.getBasicBlock(Succ1MBB)));
787 std::vector<SDOperand> Ops;
788 Ops.push_back(getRoot());
789 // If the false case is the current basic block, then this is a self
790 // loop. We do not want to emit "Loop: ... brcond Out; br Loop", as it
791 // adds an extra instruction in the loop. Instead, invert the
792 // condition and emit "Loop: ... br!cond Loop; br Out.
793 if (CurMBB == Succ1MBB) {
794 std::swap(Succ0MBB, Succ1MBB);
795 SDOperand True = DAG.getConstant(1, Cond.getValueType());
796 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
798 SDOperand True = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond,
799 DAG.getBasicBlock(Succ0MBB));
800 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, True,
801 DAG.getBasicBlock(Succ1MBB)));
806 /// visitSwitchCase - Emits the necessary code to represent a single node in
807 /// the binary search tree resulting from lowering a switch instruction.
808 void SelectionDAGLowering::visitSwitchCase(SelectionDAGISel::CaseBlock &CB) {
809 SDOperand SwitchOp = getValue(CB.SwitchV);
810 SDOperand CaseOp = getValue(CB.CaseC);
811 SDOperand Cond = DAG.getSetCC(MVT::i1, SwitchOp, CaseOp, CB.CC);
813 // Set NextBlock to be the MBB immediately after the current one, if any.
814 // This is used to avoid emitting unnecessary branches to the next block.
815 MachineBasicBlock *NextBlock = 0;
816 MachineFunction::iterator BBI = CurMBB;
817 if (++BBI != CurMBB->getParent()->end())
820 // If the lhs block is the next block, invert the condition so that we can
821 // fall through to the lhs instead of the rhs block.
822 if (CB.LHSBB == NextBlock) {
823 std::swap(CB.LHSBB, CB.RHSBB);
824 SDOperand True = DAG.getConstant(1, Cond.getValueType());
825 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
827 SDOperand BrCond = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond,
828 DAG.getBasicBlock(CB.LHSBB));
829 if (CB.RHSBB == NextBlock)
832 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, BrCond,
833 DAG.getBasicBlock(CB.RHSBB)));
834 // Update successor info
835 CurMBB->addSuccessor(CB.LHSBB);
836 CurMBB->addSuccessor(CB.RHSBB);
839 /// visitSwitchCase - Emits the necessary code to represent a single node in
840 /// the binary search tree resulting from lowering a switch instruction.
841 void SelectionDAGLowering::visitJumpTable(SelectionDAGISel::JumpTable &JT) {
842 // FIXME: Need to emit different code for PIC vs. Non-PIC, specifically,
843 // we need to add the address of the jump table to the value loaded, since
844 // the entries in the jump table will be differences rather than absolute
847 // Emit the code for the jump table
848 MVT::ValueType PTy = TLI.getPointerTy();
849 unsigned PTyBytes = MVT::getSizeInBits(PTy)/8;
850 SDOperand Copy = DAG.getCopyFromReg(getRoot(), JT.Reg, PTy);
851 SDOperand IDX = DAG.getNode(ISD::MUL, PTy, Copy,
852 DAG.getConstant(PTyBytes, PTy));
853 SDOperand TAB = DAG.getJumpTable(JT.JTI,PTy);
854 SDOperand ADD = DAG.getNode(ISD::ADD, PTy, IDX, TAB);
855 SDOperand LD = DAG.getLoad(PTy, Copy.getValue(1), ADD, DAG.getSrcValue(0));
856 if (TLI.getTargetMachine().getRelocationModel() == Reloc::PIC_) {
857 ADD = DAG.getNode(ISD::ADD, PTy, LD.getValue(0), TAB);
858 DAG.setRoot(DAG.getNode(ISD::BRIND, MVT::Other, LD.getValue(1), ADD));
860 DAG.setRoot(DAG.getNode(ISD::BRIND, MVT::Other, LD.getValue(1), LD));
864 void SelectionDAGLowering::visitSwitch(SwitchInst &I) {
865 // Figure out which block is immediately after the current one.
866 MachineBasicBlock *NextBlock = 0;
867 MachineFunction::iterator BBI = CurMBB;
868 if (++BBI != CurMBB->getParent()->end())
871 // If there is only the default destination, branch to it if it is not the
872 // next basic block. Otherwise, just fall through.
873 if (I.getNumOperands() == 2) {
874 // Update machine-CFG edges.
875 MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[I.getDefaultDest()];
876 // If this is not a fall-through branch, emit the branch.
877 if (DefaultMBB != NextBlock)
878 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(),
879 DAG.getBasicBlock(DefaultMBB)));
880 CurMBB->addSuccessor(DefaultMBB);
884 // If there are any non-default case statements, create a vector of Cases
885 // representing each one, and sort the vector so that we can efficiently
886 // create a binary search tree from them.
887 std::vector<Case> Cases;
888 for (unsigned i = 1; i < I.getNumSuccessors(); ++i) {
889 MachineBasicBlock *SMBB = FuncInfo.MBBMap[I.getSuccessor(i)];
890 Cases.push_back(Case(I.getSuccessorValue(i), SMBB));
892 std::sort(Cases.begin(), Cases.end(), CaseCmp());
894 // Get the Value to be switched on and default basic blocks, which will be
895 // inserted into CaseBlock records, representing basic blocks in the binary
897 Value *SV = I.getOperand(0);
898 MachineBasicBlock *Default = FuncInfo.MBBMap[I.getDefaultDest()];
900 // Get the MachineFunction which holds the current MBB. This is used during
901 // emission of jump tables, and when inserting any additional MBBs necessary
902 // to represent the switch.
903 MachineFunction *CurMF = CurMBB->getParent();
904 const BasicBlock *LLVMBB = CurMBB->getBasicBlock();
906 // If the switch has more than 5 blocks, and at least 31.25% dense, and the
907 // target supports indirect branches, then emit a jump table rather than
908 // lowering the switch to a binary tree of conditional branches.
909 // FIXME: Make this work with PIC code
910 if (TLI.isOperationLegal(ISD::BRIND, TLI.getPointerTy()) &&
912 uint64_t First = cast<ConstantIntegral>(Cases.front().first)->getRawValue();
913 uint64_t Last = cast<ConstantIntegral>(Cases.back().first)->getRawValue();
914 double Density = (double)Cases.size() / (double)((Last - First) + 1ULL);
916 if (Density >= 0.3125) {
917 Reloc::Model Relocs = TLI.getTargetMachine().getRelocationModel();
919 // Create a new basic block to hold the code for loading the address
920 // of the jump table, and jumping to it. Update successor information;
921 // we will either branch to the default case for the switch, or the jump
923 MachineBasicBlock *JumpTableBB = new MachineBasicBlock(LLVMBB);
924 CurMF->getBasicBlockList().insert(BBI, JumpTableBB);
925 CurMBB->addSuccessor(Default);
926 CurMBB->addSuccessor(JumpTableBB);
928 // Subtract the lowest switch case value from the value being switched on
929 // and conditional branch to default mbb if the result is greater than the
930 // difference between smallest and largest cases.
931 SDOperand SwitchOp = getValue(SV);
932 MVT::ValueType VT = SwitchOp.getValueType();
933 SDOperand SUB = DAG.getNode(ISD::SUB, VT, SwitchOp,
934 DAG.getConstant(First, VT));
936 // The SDNode we just created, which holds the value being switched on
937 // minus the the smallest case value, needs to be copied to a virtual
938 // register so it can be used as an index into the jump table in a
939 // subsequent basic block. This value may be smaller or larger than the
940 // target's pointer type, and therefore require extension or truncating.
941 if (VT > TLI.getPointerTy())
942 SwitchOp = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), SUB);
944 SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), SUB);
945 unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
946 SDOperand CopyTo = DAG.getCopyToReg(getRoot(), JumpTableReg, SwitchOp);
948 // Emit the range check for the jump table, and branch to the default
949 // block for the switch statement if the value being switched on exceeds
950 // the largest case in the switch.
951 SDOperand CMP = DAG.getSetCC(TLI.getSetCCResultTy(), SUB,
952 DAG.getConstant(Last-First,VT), ISD::SETUGT);
953 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, CopyTo, CMP,
954 DAG.getBasicBlock(Default)));
956 // Build a vector of destination BBs, corresponding to each target
957 // of the jump table. If the value of the jump table slot corresponds to
958 // a case statement, push the case's BB onto the vector, otherwise, push
960 std::set<MachineBasicBlock*> UniqueBBs;
961 std::vector<MachineBasicBlock*> DestBBs;
962 uint64_t TEI = First;
963 for (CaseItr ii = Cases.begin(), ee = Cases.end(); ii != ee; ++TEI) {
964 if (cast<ConstantIntegral>(ii->first)->getRawValue() == TEI) {
965 DestBBs.push_back(ii->second);
966 UniqueBBs.insert(ii->second);
969 DestBBs.push_back(Default);
970 UniqueBBs.insert(Default);
974 // Update successor info
975 for (std::set<MachineBasicBlock*>::iterator ii = UniqueBBs.begin(),
976 ee = UniqueBBs.end(); ii != ee; ++ii)
977 JumpTableBB->addSuccessor(*ii);
979 // Create a jump table index for this jump table, or return an existing
981 unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs);
983 // Set the jump table information so that we can codegen it as a second
985 JT.Reg = JumpTableReg;
987 JT.MBB = JumpTableBB;
988 JT.Default = Default;
993 // Push the initial CaseRec onto the worklist
994 std::vector<CaseRec> CaseVec;
995 CaseVec.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
997 while (!CaseVec.empty()) {
998 // Grab a record representing a case range to process off the worklist
999 CaseRec CR = CaseVec.back();
1002 // Size is the number of Cases represented by this range. If Size is 1,
1003 // then we are processing a leaf of the binary search tree. Otherwise,
1004 // we need to pick a pivot, and push left and right ranges onto the
1006 unsigned Size = CR.Range.second - CR.Range.first;
1009 // Create a CaseBlock record representing a conditional branch to
1010 // the Case's target mbb if the value being switched on SV is equal
1011 // to C. Otherwise, branch to default.
1012 Constant *C = CR.Range.first->first;
1013 MachineBasicBlock *Target = CR.Range.first->second;
1014 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, SV, C, Target, Default,
1016 // If the MBB representing the leaf node is the current MBB, then just
1017 // call visitSwitchCase to emit the code into the current block.
1018 // Otherwise, push the CaseBlock onto the vector to be later processed
1019 // by SDISel, and insert the node's MBB before the next MBB.
1020 if (CR.CaseBB == CurMBB)
1021 visitSwitchCase(CB);
1023 SwitchCases.push_back(CB);
1024 CurMF->getBasicBlockList().insert(BBI, CR.CaseBB);
1027 // split case range at pivot
1028 CaseItr Pivot = CR.Range.first + (Size / 2);
1029 CaseRange LHSR(CR.Range.first, Pivot);
1030 CaseRange RHSR(Pivot, CR.Range.second);
1031 Constant *C = Pivot->first;
1032 MachineBasicBlock *RHSBB = 0, *LHSBB = 0;
1033 // We know that we branch to the LHS if the Value being switched on is
1034 // less than the Pivot value, C. We use this to optimize our binary
1035 // tree a bit, by recognizing that if SV is greater than or equal to the
1036 // LHS's Case Value, and that Case Value is exactly one less than the
1037 // Pivot's Value, then we can branch directly to the LHS's Target,
1038 // rather than creating a leaf node for it.
1039 if ((LHSR.second - LHSR.first) == 1 &&
1040 LHSR.first->first == CR.GE &&
1041 cast<ConstantIntegral>(C)->getRawValue() ==
1042 (cast<ConstantIntegral>(CR.GE)->getRawValue() + 1ULL)) {
1043 LHSBB = LHSR.first->second;
1045 LHSBB = new MachineBasicBlock(LLVMBB);
1046 CaseVec.push_back(CaseRec(LHSBB,C,CR.GE,LHSR));
1048 // Similar to the optimization above, if the Value being switched on is
1049 // known to be less than the Constant CR.LT, and the current Case Value
1050 // is CR.LT - 1, then we can branch directly to the target block for
1051 // the current Case Value, rather than emitting a RHS leaf node for it.
1052 if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
1053 cast<ConstantIntegral>(RHSR.first->first)->getRawValue() ==
1054 (cast<ConstantIntegral>(CR.LT)->getRawValue() - 1ULL)) {
1055 RHSBB = RHSR.first->second;
1057 RHSBB = new MachineBasicBlock(LLVMBB);
1058 CaseVec.push_back(CaseRec(RHSBB,CR.LT,C,RHSR));
1060 // Create a CaseBlock record representing a conditional branch to
1061 // the LHS node if the value being switched on SV is less than C.
1062 // Otherwise, branch to LHS.
1063 ISD::CondCode CC = C->getType()->isSigned() ? ISD::SETLT : ISD::SETULT;
1064 SelectionDAGISel::CaseBlock CB(CC, SV, C, LHSBB, RHSBB, CR.CaseBB);
1065 if (CR.CaseBB == CurMBB)
1066 visitSwitchCase(CB);
1068 SwitchCases.push_back(CB);
1069 CurMF->getBasicBlockList().insert(BBI, CR.CaseBB);
1075 void SelectionDAGLowering::visitSub(User &I) {
1076 // -0.0 - X --> fneg
1077 if (I.getType()->isFloatingPoint()) {
1078 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
1079 if (CFP->isExactlyValue(-0.0)) {
1080 SDOperand Op2 = getValue(I.getOperand(1));
1081 setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2));
1085 visitBinary(I, ISD::SUB, ISD::FSUB, ISD::VSUB);
1088 void SelectionDAGLowering::visitBinary(User &I, unsigned IntOp, unsigned FPOp,
1090 const Type *Ty = I.getType();
1091 SDOperand Op1 = getValue(I.getOperand(0));
1092 SDOperand Op2 = getValue(I.getOperand(1));
1094 if (Ty->isIntegral()) {
1095 setValue(&I, DAG.getNode(IntOp, Op1.getValueType(), Op1, Op2));
1096 } else if (Ty->isFloatingPoint()) {
1097 setValue(&I, DAG.getNode(FPOp, Op1.getValueType(), Op1, Op2));
1099 const PackedType *PTy = cast<PackedType>(Ty);
1100 SDOperand Num = DAG.getConstant(PTy->getNumElements(), MVT::i32);
1101 SDOperand Typ = DAG.getValueType(TLI.getValueType(PTy->getElementType()));
1102 setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ));
1106 void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
1107 SDOperand Op1 = getValue(I.getOperand(0));
1108 SDOperand Op2 = getValue(I.getOperand(1));
1110 Op2 = DAG.getNode(ISD::ANY_EXTEND, TLI.getShiftAmountTy(), Op2);
1112 setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2));
1115 void SelectionDAGLowering::visitSetCC(User &I,ISD::CondCode SignedOpcode,
1116 ISD::CondCode UnsignedOpcode,
1117 ISD::CondCode FPOpcode) {
1118 SDOperand Op1 = getValue(I.getOperand(0));
1119 SDOperand Op2 = getValue(I.getOperand(1));
1120 ISD::CondCode Opcode = SignedOpcode;
1121 if (!FiniteOnlyFPMath() && I.getOperand(0)->getType()->isFloatingPoint())
1123 else if (I.getOperand(0)->getType()->isUnsigned())
1124 Opcode = UnsignedOpcode;
1125 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode));
1128 void SelectionDAGLowering::visitSelect(User &I) {
1129 SDOperand Cond = getValue(I.getOperand(0));
1130 SDOperand TrueVal = getValue(I.getOperand(1));
1131 SDOperand FalseVal = getValue(I.getOperand(2));
1132 if (!isa<PackedType>(I.getType())) {
1133 setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond,
1134 TrueVal, FalseVal));
1136 setValue(&I, DAG.getNode(ISD::VSELECT, MVT::Vector, Cond, TrueVal, FalseVal,
1137 *(TrueVal.Val->op_end()-2),
1138 *(TrueVal.Val->op_end()-1)));
1142 void SelectionDAGLowering::visitCast(User &I) {
1143 SDOperand N = getValue(I.getOperand(0));
1144 MVT::ValueType SrcVT = N.getValueType();
1145 MVT::ValueType DestVT = TLI.getValueType(I.getType());
1147 if (DestVT == MVT::Vector) {
1148 // This is a cast to a vector from something else. This is always a bit
1149 // convert. Get information about the input vector.
1150 const PackedType *DestTy = cast<PackedType>(I.getType());
1151 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType());
1152 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N,
1153 DAG.getConstant(DestTy->getNumElements(),MVT::i32),
1154 DAG.getValueType(EltVT)));
1155 } else if (SrcVT == DestVT) {
1156 setValue(&I, N); // noop cast.
1157 } else if (DestVT == MVT::i1) {
1158 // Cast to bool is a comparison against zero, not truncation to zero.
1159 SDOperand Zero = isInteger(SrcVT) ? DAG.getConstant(0, N.getValueType()) :
1160 DAG.getConstantFP(0.0, N.getValueType());
1161 setValue(&I, DAG.getSetCC(MVT::i1, N, Zero, ISD::SETNE));
1162 } else if (isInteger(SrcVT)) {
1163 if (isInteger(DestVT)) { // Int -> Int cast
1164 if (DestVT < SrcVT) // Truncating cast?
1165 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N));
1166 else if (I.getOperand(0)->getType()->isSigned())
1167 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestVT, N));
1169 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N));
1170 } else if (isFloatingPoint(DestVT)) { // Int -> FP cast
1171 if (I.getOperand(0)->getType()->isSigned())
1172 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestVT, N));
1174 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestVT, N));
1176 assert(0 && "Unknown cast!");
1178 } else if (isFloatingPoint(SrcVT)) {
1179 if (isFloatingPoint(DestVT)) { // FP -> FP cast
1180 if (DestVT < SrcVT) // Rounding cast?
1181 setValue(&I, DAG.getNode(ISD::FP_ROUND, DestVT, N));
1183 setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestVT, N));
1184 } else if (isInteger(DestVT)) { // FP -> Int cast.
1185 if (I.getType()->isSigned())
1186 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestVT, N));
1188 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestVT, N));
1190 assert(0 && "Unknown cast!");
1193 assert(SrcVT == MVT::Vector && "Unknown cast!");
1194 assert(DestVT != MVT::Vector && "Casts to vector already handled!");
1195 // This is a cast from a vector to something else. This is always a bit
1196 // convert. Get information about the input vector.
1197 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N));
1201 void SelectionDAGLowering::visitInsertElement(User &I) {
1202 SDOperand InVec = getValue(I.getOperand(0));
1203 SDOperand InVal = getValue(I.getOperand(1));
1204 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(),
1205 getValue(I.getOperand(2)));
1207 SDOperand Num = *(InVec.Val->op_end()-2);
1208 SDOperand Typ = *(InVec.Val->op_end()-1);
1209 setValue(&I, DAG.getNode(ISD::VINSERT_VECTOR_ELT, MVT::Vector,
1210 InVec, InVal, InIdx, Num, Typ));
1213 void SelectionDAGLowering::visitExtractElement(User &I) {
1214 SDOperand InVec = getValue(I.getOperand(0));
1215 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(),
1216 getValue(I.getOperand(1)));
1217 SDOperand Typ = *(InVec.Val->op_end()-1);
1218 setValue(&I, DAG.getNode(ISD::VEXTRACT_VECTOR_ELT,
1219 TLI.getValueType(I.getType()), InVec, InIdx));
1222 void SelectionDAGLowering::visitShuffleVector(User &I) {
1223 SDOperand V1 = getValue(I.getOperand(0));
1224 SDOperand V2 = getValue(I.getOperand(1));
1225 SDOperand Mask = getValue(I.getOperand(2));
1227 SDOperand Num = *(V1.Val->op_end()-2);
1228 SDOperand Typ = *(V2.Val->op_end()-1);
1229 setValue(&I, DAG.getNode(ISD::VVECTOR_SHUFFLE, MVT::Vector,
1230 V1, V2, Mask, Num, Typ));
1234 void SelectionDAGLowering::visitGetElementPtr(User &I) {
1235 SDOperand N = getValue(I.getOperand(0));
1236 const Type *Ty = I.getOperand(0)->getType();
1238 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
1241 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
1242 unsigned Field = cast<ConstantUInt>(Idx)->getValue();
1245 uint64_t Offset = TD->getStructLayout(StTy)->MemberOffsets[Field];
1246 N = DAG.getNode(ISD::ADD, N.getValueType(), N,
1247 getIntPtrConstant(Offset));
1249 Ty = StTy->getElementType(Field);
1251 Ty = cast<SequentialType>(Ty)->getElementType();
1253 // If this is a constant subscript, handle it quickly.
1254 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
1255 if (CI->getRawValue() == 0) continue;
1258 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
1259 Offs = (int64_t)TD->getTypeSize(Ty)*CSI->getValue();
1261 Offs = TD->getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
1262 N = DAG.getNode(ISD::ADD, N.getValueType(), N, getIntPtrConstant(Offs));
1266 // N = N + Idx * ElementSize;
1267 uint64_t ElementSize = TD->getTypeSize(Ty);
1268 SDOperand IdxN = getValue(Idx);
1270 // If the index is smaller or larger than intptr_t, truncate or extend
1272 if (IdxN.getValueType() < N.getValueType()) {
1273 if (Idx->getType()->isSigned())
1274 IdxN = DAG.getNode(ISD::SIGN_EXTEND, N.getValueType(), IdxN);
1276 IdxN = DAG.getNode(ISD::ZERO_EXTEND, N.getValueType(), IdxN);
1277 } else if (IdxN.getValueType() > N.getValueType())
1278 IdxN = DAG.getNode(ISD::TRUNCATE, N.getValueType(), IdxN);
1280 // If this is a multiply by a power of two, turn it into a shl
1281 // immediately. This is a very common case.
1282 if (isPowerOf2_64(ElementSize)) {
1283 unsigned Amt = Log2_64(ElementSize);
1284 IdxN = DAG.getNode(ISD::SHL, N.getValueType(), IdxN,
1285 DAG.getConstant(Amt, TLI.getShiftAmountTy()));
1286 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
1290 SDOperand Scale = getIntPtrConstant(ElementSize);
1291 IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale);
1292 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
1298 void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
1299 // If this is a fixed sized alloca in the entry block of the function,
1300 // allocate it statically on the stack.
1301 if (FuncInfo.StaticAllocaMap.count(&I))
1302 return; // getValue will auto-populate this.
1304 const Type *Ty = I.getAllocatedType();
1305 uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
1306 unsigned Align = std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty),
1309 SDOperand AllocSize = getValue(I.getArraySize());
1310 MVT::ValueType IntPtr = TLI.getPointerTy();
1311 if (IntPtr < AllocSize.getValueType())
1312 AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize);
1313 else if (IntPtr > AllocSize.getValueType())
1314 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, AllocSize);
1316 AllocSize = DAG.getNode(ISD::MUL, IntPtr, AllocSize,
1317 getIntPtrConstant(TySize));
1319 // Handle alignment. If the requested alignment is less than or equal to the
1320 // stack alignment, ignore it and round the size of the allocation up to the
1321 // stack alignment size. If the size is greater than the stack alignment, we
1322 // note this in the DYNAMIC_STACKALLOC node.
1323 unsigned StackAlign =
1324 TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
1325 if (Align <= StackAlign) {
1327 // Add SA-1 to the size.
1328 AllocSize = DAG.getNode(ISD::ADD, AllocSize.getValueType(), AllocSize,
1329 getIntPtrConstant(StackAlign-1));
1330 // Mask out the low bits for alignment purposes.
1331 AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize,
1332 getIntPtrConstant(~(uint64_t)(StackAlign-1)));
1335 std::vector<MVT::ValueType> VTs;
1336 VTs.push_back(AllocSize.getValueType());
1337 VTs.push_back(MVT::Other);
1338 std::vector<SDOperand> Ops;
1339 Ops.push_back(getRoot());
1340 Ops.push_back(AllocSize);
1341 Ops.push_back(getIntPtrConstant(Align));
1342 SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, Ops);
1343 DAG.setRoot(setValue(&I, DSA).getValue(1));
1345 // Inform the Frame Information that we have just allocated a variable-sized
1347 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
1350 void SelectionDAGLowering::visitLoad(LoadInst &I) {
1351 SDOperand Ptr = getValue(I.getOperand(0));
1357 // Do not serialize non-volatile loads against each other.
1358 Root = DAG.getRoot();
1361 setValue(&I, getLoadFrom(I.getType(), Ptr, DAG.getSrcValue(I.getOperand(0)),
1362 Root, I.isVolatile()));
1365 SDOperand SelectionDAGLowering::getLoadFrom(const Type *Ty, SDOperand Ptr,
1366 SDOperand SrcValue, SDOperand Root,
1369 if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) {
1370 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
1371 L = DAG.getVecLoad(PTy->getNumElements(), PVT, Root, Ptr, SrcValue);
1373 L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr, SrcValue);
1377 DAG.setRoot(L.getValue(1));
1379 PendingLoads.push_back(L.getValue(1));
1385 void SelectionDAGLowering::visitStore(StoreInst &I) {
1386 Value *SrcV = I.getOperand(0);
1387 SDOperand Src = getValue(SrcV);
1388 SDOperand Ptr = getValue(I.getOperand(1));
1389 DAG.setRoot(DAG.getNode(ISD::STORE, MVT::Other, getRoot(), Src, Ptr,
1390 DAG.getSrcValue(I.getOperand(1))));
1393 /// IntrinsicCannotAccessMemory - Return true if the specified intrinsic cannot
1394 /// access memory and has no other side effects at all.
1395 static bool IntrinsicCannotAccessMemory(unsigned IntrinsicID) {
1396 #define GET_NO_MEMORY_INTRINSICS
1397 #include "llvm/Intrinsics.gen"
1398 #undef GET_NO_MEMORY_INTRINSICS
1402 // IntrinsicOnlyReadsMemory - Return true if the specified intrinsic doesn't
1403 // have any side-effects or if it only reads memory.
1404 static bool IntrinsicOnlyReadsMemory(unsigned IntrinsicID) {
1405 #define GET_SIDE_EFFECT_INFO
1406 #include "llvm/Intrinsics.gen"
1407 #undef GET_SIDE_EFFECT_INFO
1411 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
1413 void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I,
1414 unsigned Intrinsic) {
1415 bool HasChain = !IntrinsicCannotAccessMemory(Intrinsic);
1416 bool OnlyLoad = HasChain && IntrinsicOnlyReadsMemory(Intrinsic);
1418 // Build the operand list.
1419 std::vector<SDOperand> Ops;
1420 if (HasChain) { // If this intrinsic has side-effects, chainify it.
1422 // We don't need to serialize loads against other loads.
1423 Ops.push_back(DAG.getRoot());
1425 Ops.push_back(getRoot());
1429 // Add the intrinsic ID as an integer operand.
1430 Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
1432 // Add all operands of the call to the operand list.
1433 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
1434 SDOperand Op = getValue(I.getOperand(i));
1436 // If this is a vector type, force it to the right packed type.
1437 if (Op.getValueType() == MVT::Vector) {
1438 const PackedType *OpTy = cast<PackedType>(I.getOperand(i)->getType());
1439 MVT::ValueType EltVT = TLI.getValueType(OpTy->getElementType());
1441 MVT::ValueType VVT = MVT::getVectorType(EltVT, OpTy->getNumElements());
1442 assert(VVT != MVT::Other && "Intrinsic uses a non-legal type?");
1443 Op = DAG.getNode(ISD::VBIT_CONVERT, VVT, Op);
1446 assert(TLI.isTypeLegal(Op.getValueType()) &&
1447 "Intrinsic uses a non-legal type?");
1451 std::vector<MVT::ValueType> VTs;
1452 if (I.getType() != Type::VoidTy) {
1453 MVT::ValueType VT = TLI.getValueType(I.getType());
1454 if (VT == MVT::Vector) {
1455 const PackedType *DestTy = cast<PackedType>(I.getType());
1456 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType());
1458 VT = MVT::getVectorType(EltVT, DestTy->getNumElements());
1459 assert(VT != MVT::Other && "Intrinsic uses a non-legal type?");
1462 assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?");
1466 VTs.push_back(MVT::Other);
1471 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VTs, Ops);
1472 else if (I.getType() != Type::VoidTy)
1473 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, VTs, Ops);
1475 Result = DAG.getNode(ISD::INTRINSIC_VOID, VTs, Ops);
1478 SDOperand Chain = Result.getValue(Result.Val->getNumValues()-1);
1480 PendingLoads.push_back(Chain);
1484 if (I.getType() != Type::VoidTy) {
1485 if (const PackedType *PTy = dyn_cast<PackedType>(I.getType())) {
1486 MVT::ValueType EVT = TLI.getValueType(PTy->getElementType());
1487 Result = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Result,
1488 DAG.getConstant(PTy->getNumElements(), MVT::i32),
1489 DAG.getValueType(EVT));
1491 setValue(&I, Result);
1495 /// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
1496 /// we want to emit this as a call to a named external function, return the name
1497 /// otherwise lower it and return null.
1499 SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
1500 switch (Intrinsic) {
1502 // By default, turn this into a target intrinsic node.
1503 visitTargetIntrinsic(I, Intrinsic);
1505 case Intrinsic::vastart: visitVAStart(I); return 0;
1506 case Intrinsic::vaend: visitVAEnd(I); return 0;
1507 case Intrinsic::vacopy: visitVACopy(I); return 0;
1508 case Intrinsic::returnaddress: visitFrameReturnAddress(I, false); return 0;
1509 case Intrinsic::frameaddress: visitFrameReturnAddress(I, true); return 0;
1510 case Intrinsic::setjmp:
1511 return "_setjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
1513 case Intrinsic::longjmp:
1514 return "_longjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
1516 case Intrinsic::memcpy_i32:
1517 case Intrinsic::memcpy_i64:
1518 visitMemIntrinsic(I, ISD::MEMCPY);
1520 case Intrinsic::memset_i32:
1521 case Intrinsic::memset_i64:
1522 visitMemIntrinsic(I, ISD::MEMSET);
1524 case Intrinsic::memmove_i32:
1525 case Intrinsic::memmove_i64:
1526 visitMemIntrinsic(I, ISD::MEMMOVE);
1529 case Intrinsic::dbg_stoppoint: {
1530 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1531 DbgStopPointInst &SPI = cast<DbgStopPointInst>(I);
1532 if (DebugInfo && SPI.getContext() && DebugInfo->Verify(SPI.getContext())) {
1533 std::vector<SDOperand> Ops;
1535 Ops.push_back(getRoot());
1536 Ops.push_back(getValue(SPI.getLineValue()));
1537 Ops.push_back(getValue(SPI.getColumnValue()));
1539 DebugInfoDesc *DD = DebugInfo->getDescFor(SPI.getContext());
1540 assert(DD && "Not a debug information descriptor");
1541 CompileUnitDesc *CompileUnit = cast<CompileUnitDesc>(DD);
1543 Ops.push_back(DAG.getString(CompileUnit->getFileName()));
1544 Ops.push_back(DAG.getString(CompileUnit->getDirectory()));
1546 DAG.setRoot(DAG.getNode(ISD::LOCATION, MVT::Other, Ops));
1551 case Intrinsic::dbg_region_start: {
1552 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1553 DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I);
1554 if (DebugInfo && RSI.getContext() && DebugInfo->Verify(RSI.getContext())) {
1555 std::vector<SDOperand> Ops;
1557 unsigned LabelID = DebugInfo->RecordRegionStart(RSI.getContext());
1559 Ops.push_back(getRoot());
1560 Ops.push_back(DAG.getConstant(LabelID, MVT::i32));
1562 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops));
1567 case Intrinsic::dbg_region_end: {
1568 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1569 DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I);
1570 if (DebugInfo && REI.getContext() && DebugInfo->Verify(REI.getContext())) {
1571 std::vector<SDOperand> Ops;
1573 unsigned LabelID = DebugInfo->RecordRegionEnd(REI.getContext());
1575 Ops.push_back(getRoot());
1576 Ops.push_back(DAG.getConstant(LabelID, MVT::i32));
1578 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops));
1583 case Intrinsic::dbg_func_start: {
1584 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1585 DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I);
1586 if (DebugInfo && FSI.getSubprogram() &&
1587 DebugInfo->Verify(FSI.getSubprogram())) {
1588 std::vector<SDOperand> Ops;
1590 unsigned LabelID = DebugInfo->RecordRegionStart(FSI.getSubprogram());
1592 Ops.push_back(getRoot());
1593 Ops.push_back(DAG.getConstant(LabelID, MVT::i32));
1595 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, Ops));
1600 case Intrinsic::dbg_declare: {
1601 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1602 DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
1603 if (DebugInfo && DI.getVariable() && DebugInfo->Verify(DI.getVariable())) {
1604 std::vector<SDOperand> Ops;
1606 SDOperand AddressOp = getValue(DI.getAddress());
1607 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(AddressOp)) {
1608 DebugInfo->RecordVariable(DI.getVariable(), FI->getIndex());
1615 case Intrinsic::isunordered_f32:
1616 case Intrinsic::isunordered_f64:
1617 setValue(&I, DAG.getSetCC(MVT::i1,getValue(I.getOperand(1)),
1618 getValue(I.getOperand(2)), ISD::SETUO));
1621 case Intrinsic::sqrt_f32:
1622 case Intrinsic::sqrt_f64:
1623 setValue(&I, DAG.getNode(ISD::FSQRT,
1624 getValue(I.getOperand(1)).getValueType(),
1625 getValue(I.getOperand(1))));
1627 case Intrinsic::pcmarker: {
1628 SDOperand Tmp = getValue(I.getOperand(1));
1629 DAG.setRoot(DAG.getNode(ISD::PCMARKER, MVT::Other, getRoot(), Tmp));
1632 case Intrinsic::readcyclecounter: {
1633 std::vector<MVT::ValueType> VTs;
1634 VTs.push_back(MVT::i64);
1635 VTs.push_back(MVT::Other);
1636 std::vector<SDOperand> Ops;
1637 Ops.push_back(getRoot());
1638 SDOperand Tmp = DAG.getNode(ISD::READCYCLECOUNTER, VTs, Ops);
1640 DAG.setRoot(Tmp.getValue(1));
1643 case Intrinsic::bswap_i16:
1644 case Intrinsic::bswap_i32:
1645 case Intrinsic::bswap_i64:
1646 setValue(&I, DAG.getNode(ISD::BSWAP,
1647 getValue(I.getOperand(1)).getValueType(),
1648 getValue(I.getOperand(1))));
1650 case Intrinsic::cttz_i8:
1651 case Intrinsic::cttz_i16:
1652 case Intrinsic::cttz_i32:
1653 case Intrinsic::cttz_i64:
1654 setValue(&I, DAG.getNode(ISD::CTTZ,
1655 getValue(I.getOperand(1)).getValueType(),
1656 getValue(I.getOperand(1))));
1658 case Intrinsic::ctlz_i8:
1659 case Intrinsic::ctlz_i16:
1660 case Intrinsic::ctlz_i32:
1661 case Intrinsic::ctlz_i64:
1662 setValue(&I, DAG.getNode(ISD::CTLZ,
1663 getValue(I.getOperand(1)).getValueType(),
1664 getValue(I.getOperand(1))));
1666 case Intrinsic::ctpop_i8:
1667 case Intrinsic::ctpop_i16:
1668 case Intrinsic::ctpop_i32:
1669 case Intrinsic::ctpop_i64:
1670 setValue(&I, DAG.getNode(ISD::CTPOP,
1671 getValue(I.getOperand(1)).getValueType(),
1672 getValue(I.getOperand(1))));
1674 case Intrinsic::stacksave: {
1675 std::vector<MVT::ValueType> VTs;
1676 VTs.push_back(TLI.getPointerTy());
1677 VTs.push_back(MVT::Other);
1678 std::vector<SDOperand> Ops;
1679 Ops.push_back(getRoot());
1680 SDOperand Tmp = DAG.getNode(ISD::STACKSAVE, VTs, Ops);
1682 DAG.setRoot(Tmp.getValue(1));
1685 case Intrinsic::stackrestore: {
1686 SDOperand Tmp = getValue(I.getOperand(1));
1687 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, MVT::Other, getRoot(), Tmp));
1690 case Intrinsic::prefetch:
1691 // FIXME: Currently discarding prefetches.
1697 void SelectionDAGLowering::visitCall(CallInst &I) {
1698 const char *RenameFn = 0;
1699 if (Function *F = I.getCalledFunction()) {
1700 if (F->isExternal())
1701 if (unsigned IID = F->getIntrinsicID()) {
1702 RenameFn = visitIntrinsicCall(I, IID);
1705 } else { // Not an LLVM intrinsic.
1706 const std::string &Name = F->getName();
1707 if (Name[0] == 'c' && (Name == "copysign" || Name == "copysignf")) {
1708 if (I.getNumOperands() == 3 && // Basic sanity checks.
1709 I.getOperand(1)->getType()->isFloatingPoint() &&
1710 I.getType() == I.getOperand(1)->getType() &&
1711 I.getType() == I.getOperand(2)->getType()) {
1712 SDOperand LHS = getValue(I.getOperand(1));
1713 SDOperand RHS = getValue(I.getOperand(2));
1714 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, LHS.getValueType(),
1718 } else if (Name[0] == 'f' && (Name == "fabs" || Name == "fabsf")) {
1719 if (I.getNumOperands() == 2 && // Basic sanity checks.
1720 I.getOperand(1)->getType()->isFloatingPoint() &&
1721 I.getType() == I.getOperand(1)->getType()) {
1722 SDOperand Tmp = getValue(I.getOperand(1));
1723 setValue(&I, DAG.getNode(ISD::FABS, Tmp.getValueType(), Tmp));
1726 } else if (Name[0] == 's' && (Name == "sin" || Name == "sinf")) {
1727 if (I.getNumOperands() == 2 && // Basic sanity checks.
1728 I.getOperand(1)->getType()->isFloatingPoint() &&
1729 I.getType() == I.getOperand(1)->getType()) {
1730 SDOperand Tmp = getValue(I.getOperand(1));
1731 setValue(&I, DAG.getNode(ISD::FSIN, Tmp.getValueType(), Tmp));
1734 } else if (Name[0] == 'c' && (Name == "cos" || Name == "cosf")) {
1735 if (I.getNumOperands() == 2 && // Basic sanity checks.
1736 I.getOperand(1)->getType()->isFloatingPoint() &&
1737 I.getType() == I.getOperand(1)->getType()) {
1738 SDOperand Tmp = getValue(I.getOperand(1));
1739 setValue(&I, DAG.getNode(ISD::FCOS, Tmp.getValueType(), Tmp));
1744 } else if (isa<InlineAsm>(I.getOperand(0))) {
1751 Callee = getValue(I.getOperand(0));
1753 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
1754 std::vector<std::pair<SDOperand, const Type*> > Args;
1755 Args.reserve(I.getNumOperands());
1756 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
1757 Value *Arg = I.getOperand(i);
1758 SDOperand ArgNode = getValue(Arg);
1759 Args.push_back(std::make_pair(ArgNode, Arg->getType()));
1762 const PointerType *PT = cast<PointerType>(I.getCalledValue()->getType());
1763 const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
1765 std::pair<SDOperand,SDOperand> Result =
1766 TLI.LowerCallTo(getRoot(), I.getType(), FTy->isVarArg(), I.getCallingConv(),
1767 I.isTailCall(), Callee, Args, DAG);
1768 if (I.getType() != Type::VoidTy)
1769 setValue(&I, Result.first);
1770 DAG.setRoot(Result.second);
1773 SDOperand RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
1774 SDOperand &Chain, SDOperand &Flag)const{
1775 SDOperand Val = DAG.getCopyFromReg(Chain, Regs[0], RegVT, Flag);
1776 Chain = Val.getValue(1);
1777 Flag = Val.getValue(2);
1779 // If the result was expanded, copy from the top part.
1780 if (Regs.size() > 1) {
1781 assert(Regs.size() == 2 &&
1782 "Cannot expand to more than 2 elts yet!");
1783 SDOperand Hi = DAG.getCopyFromReg(Chain, Regs[1], RegVT, Flag);
1784 Chain = Val.getValue(1);
1785 Flag = Val.getValue(2);
1786 if (DAG.getTargetLoweringInfo().isLittleEndian())
1787 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Val, Hi);
1789 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Hi, Val);
1792 // Otherwise, if the return value was promoted or extended, truncate it to the
1793 // appropriate type.
1794 if (RegVT == ValueVT)
1797 if (MVT::isInteger(RegVT)) {
1798 if (ValueVT < RegVT)
1799 return DAG.getNode(ISD::TRUNCATE, ValueVT, Val);
1801 return DAG.getNode(ISD::ANY_EXTEND, ValueVT, Val);
1803 return DAG.getNode(ISD::FP_ROUND, ValueVT, Val);
1807 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
1808 /// specified value into the registers specified by this object. This uses
1809 /// Chain/Flag as the input and updates them for the output Chain/Flag.
1810 void RegsForValue::getCopyToRegs(SDOperand Val, SelectionDAG &DAG,
1811 SDOperand &Chain, SDOperand &Flag,
1812 MVT::ValueType PtrVT) const {
1813 if (Regs.size() == 1) {
1814 // If there is a single register and the types differ, this must be
1816 if (RegVT != ValueVT) {
1817 if (MVT::isInteger(RegVT)) {
1818 if (RegVT < ValueVT)
1819 Val = DAG.getNode(ISD::TRUNCATE, RegVT, Val);
1821 Val = DAG.getNode(ISD::ANY_EXTEND, RegVT, Val);
1823 Val = DAG.getNode(ISD::FP_EXTEND, RegVT, Val);
1825 Chain = DAG.getCopyToReg(Chain, Regs[0], Val, Flag);
1826 Flag = Chain.getValue(1);
1828 std::vector<unsigned> R(Regs);
1829 if (!DAG.getTargetLoweringInfo().isLittleEndian())
1830 std::reverse(R.begin(), R.end());
1832 for (unsigned i = 0, e = R.size(); i != e; ++i) {
1833 SDOperand Part = DAG.getNode(ISD::EXTRACT_ELEMENT, RegVT, Val,
1834 DAG.getConstant(i, PtrVT));
1835 Chain = DAG.getCopyToReg(Chain, R[i], Part, Flag);
1836 Flag = Chain.getValue(1);
1841 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
1842 /// operand list. This adds the code marker and includes the number of
1843 /// values added into it.
1844 void RegsForValue::AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG,
1845 std::vector<SDOperand> &Ops) const {
1846 Ops.push_back(DAG.getConstant(Code | (Regs.size() << 3), MVT::i32));
1847 for (unsigned i = 0, e = Regs.size(); i != e; ++i)
1848 Ops.push_back(DAG.getRegister(Regs[i], RegVT));
1851 /// isAllocatableRegister - If the specified register is safe to allocate,
1852 /// i.e. it isn't a stack pointer or some other special register, return the
1853 /// register class for the register. Otherwise, return null.
1854 static const TargetRegisterClass *
1855 isAllocatableRegister(unsigned Reg, MachineFunction &MF,
1856 const TargetLowering &TLI, const MRegisterInfo *MRI) {
1857 MVT::ValueType FoundVT = MVT::Other;
1858 const TargetRegisterClass *FoundRC = 0;
1859 for (MRegisterInfo::regclass_iterator RCI = MRI->regclass_begin(),
1860 E = MRI->regclass_end(); RCI != E; ++RCI) {
1861 MVT::ValueType ThisVT = MVT::Other;
1863 const TargetRegisterClass *RC = *RCI;
1864 // If none of the the value types for this register class are valid, we
1865 // can't use it. For example, 64-bit reg classes on 32-bit targets.
1866 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
1868 if (TLI.isTypeLegal(*I)) {
1869 // If we have already found this register in a different register class,
1870 // choose the one with the largest VT specified. For example, on
1871 // PowerPC, we favor f64 register classes over f32.
1872 if (FoundVT == MVT::Other ||
1873 MVT::getSizeInBits(FoundVT) < MVT::getSizeInBits(*I)) {
1880 if (ThisVT == MVT::Other) continue;
1882 // NOTE: This isn't ideal. In particular, this might allocate the
1883 // frame pointer in functions that need it (due to them not being taken
1884 // out of allocation, because a variable sized allocation hasn't been seen
1885 // yet). This is a slight code pessimization, but should still work.
1886 for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
1887 E = RC->allocation_order_end(MF); I != E; ++I)
1889 // We found a matching register class. Keep looking at others in case
1890 // we find one with larger registers that this physreg is also in.
1899 RegsForValue SelectionDAGLowering::
1900 GetRegistersForValue(const std::string &ConstrCode,
1901 MVT::ValueType VT, bool isOutReg, bool isInReg,
1902 std::set<unsigned> &OutputRegs,
1903 std::set<unsigned> &InputRegs) {
1904 std::pair<unsigned, const TargetRegisterClass*> PhysReg =
1905 TLI.getRegForInlineAsmConstraint(ConstrCode, VT);
1906 std::vector<unsigned> Regs;
1908 unsigned NumRegs = VT != MVT::Other ? TLI.getNumElements(VT) : 1;
1909 MVT::ValueType RegVT;
1910 MVT::ValueType ValueVT = VT;
1912 if (PhysReg.first) {
1913 if (VT == MVT::Other)
1914 ValueVT = *PhysReg.second->vt_begin();
1916 // Get the actual register value type. This is important, because the user
1917 // may have asked for (e.g.) the AX register in i32 type. We need to
1918 // remember that AX is actually i16 to get the right extension.
1919 RegVT = *PhysReg.second->vt_begin();
1921 // This is a explicit reference to a physical register.
1922 Regs.push_back(PhysReg.first);
1924 // If this is an expanded reference, add the rest of the regs to Regs.
1926 TargetRegisterClass::iterator I = PhysReg.second->begin();
1927 TargetRegisterClass::iterator E = PhysReg.second->end();
1928 for (; *I != PhysReg.first; ++I)
1929 assert(I != E && "Didn't find reg!");
1931 // Already added the first reg.
1933 for (; NumRegs; --NumRegs, ++I) {
1934 assert(I != E && "Ran out of registers to allocate!");
1938 return RegsForValue(Regs, RegVT, ValueVT);
1941 // This is a reference to a register class. Allocate NumRegs consecutive,
1942 // available, registers from the class.
1943 std::vector<unsigned> RegClassRegs =
1944 TLI.getRegClassForInlineAsmConstraint(ConstrCode, VT);
1946 const MRegisterInfo *MRI = DAG.getTarget().getRegisterInfo();
1947 MachineFunction &MF = *CurMBB->getParent();
1948 unsigned NumAllocated = 0;
1949 for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
1950 unsigned Reg = RegClassRegs[i];
1951 // See if this register is available.
1952 if ((isOutReg && OutputRegs.count(Reg)) || // Already used.
1953 (isInReg && InputRegs.count(Reg))) { // Already used.
1954 // Make sure we find consecutive registers.
1959 // Check to see if this register is allocatable (i.e. don't give out the
1961 const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, MRI);
1963 // Make sure we find consecutive registers.
1968 // Okay, this register is good, we can use it.
1971 // If we allocated enough consecutive
1972 if (NumAllocated == NumRegs) {
1973 unsigned RegStart = (i-NumAllocated)+1;
1974 unsigned RegEnd = i+1;
1975 // Mark all of the allocated registers used.
1976 for (unsigned i = RegStart; i != RegEnd; ++i) {
1977 unsigned Reg = RegClassRegs[i];
1978 Regs.push_back(Reg);
1979 if (isOutReg) OutputRegs.insert(Reg); // Mark reg used.
1980 if (isInReg) InputRegs.insert(Reg); // Mark reg used.
1983 return RegsForValue(Regs, *RC->vt_begin(), VT);
1987 // Otherwise, we couldn't allocate enough registers for this.
1988 return RegsForValue();
1992 /// visitInlineAsm - Handle a call to an InlineAsm object.
1994 void SelectionDAGLowering::visitInlineAsm(CallInst &I) {
1995 InlineAsm *IA = cast<InlineAsm>(I.getOperand(0));
1997 SDOperand AsmStr = DAG.getTargetExternalSymbol(IA->getAsmString().c_str(),
2000 // Note, we treat inline asms both with and without side-effects as the same.
2001 // If an inline asm doesn't have side effects and doesn't access memory, we
2002 // could not choose to not chain it.
2003 bool hasSideEffects = IA->hasSideEffects();
2005 std::vector<InlineAsm::ConstraintInfo> Constraints = IA->ParseConstraints();
2006 std::vector<MVT::ValueType> ConstraintVTs;
2008 /// AsmNodeOperands - A list of pairs. The first element is a register, the
2009 /// second is a bitfield where bit #0 is set if it is a use and bit #1 is set
2010 /// if it is a def of that register.
2011 std::vector<SDOperand> AsmNodeOperands;
2012 AsmNodeOperands.push_back(SDOperand()); // reserve space for input chain
2013 AsmNodeOperands.push_back(AsmStr);
2015 SDOperand Chain = getRoot();
2018 // We fully assign registers here at isel time. This is not optimal, but
2019 // should work. For register classes that correspond to LLVM classes, we
2020 // could let the LLVM RA do its thing, but we currently don't. Do a prepass
2021 // over the constraints, collecting fixed registers that we know we can't use.
2022 std::set<unsigned> OutputRegs, InputRegs;
2024 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
2025 assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!");
2026 std::string &ConstraintCode = Constraints[i].Codes[0];
2028 MVT::ValueType OpVT;
2030 // Compute the value type for each operand and add it to ConstraintVTs.
2031 switch (Constraints[i].Type) {
2032 case InlineAsm::isOutput:
2033 if (!Constraints[i].isIndirectOutput) {
2034 assert(I.getType() != Type::VoidTy && "Bad inline asm!");
2035 OpVT = TLI.getValueType(I.getType());
2037 const Type *OpTy = I.getOperand(OpNum)->getType();
2038 OpVT = TLI.getValueType(cast<PointerType>(OpTy)->getElementType());
2039 OpNum++; // Consumes a call operand.
2042 case InlineAsm::isInput:
2043 OpVT = TLI.getValueType(I.getOperand(OpNum)->getType());
2044 OpNum++; // Consumes a call operand.
2046 case InlineAsm::isClobber:
2051 ConstraintVTs.push_back(OpVT);
2053 if (TLI.getRegForInlineAsmConstraint(ConstraintCode, OpVT).first == 0)
2054 continue; // Not assigned a fixed reg.
2056 // Build a list of regs that this operand uses. This always has a single
2057 // element for promoted/expanded operands.
2058 RegsForValue Regs = GetRegistersForValue(ConstraintCode, OpVT,
2060 OutputRegs, InputRegs);
2062 switch (Constraints[i].Type) {
2063 case InlineAsm::isOutput:
2064 // We can't assign any other output to this register.
2065 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2066 // If this is an early-clobber output, it cannot be assigned to the same
2067 // value as the input reg.
2068 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput)
2069 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2071 case InlineAsm::isInput:
2072 // We can't assign any other input to this register.
2073 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2075 case InlineAsm::isClobber:
2076 // Clobbered regs cannot be used as inputs or outputs.
2077 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2078 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2083 // Loop over all of the inputs, copying the operand values into the
2084 // appropriate registers and processing the output regs.
2085 RegsForValue RetValRegs;
2086 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
2089 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
2090 assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!");
2091 std::string &ConstraintCode = Constraints[i].Codes[0];
2093 switch (Constraints[i].Type) {
2094 case InlineAsm::isOutput: {
2095 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass;
2096 if (ConstraintCode.size() == 1) // not a physreg name.
2097 CTy = TLI.getConstraintType(ConstraintCode[0]);
2099 if (CTy == TargetLowering::C_Memory) {
2101 SDOperand InOperandVal = getValue(I.getOperand(OpNum));
2103 // Check that the operand (the address to store to) isn't a float.
2104 if (!MVT::isInteger(InOperandVal.getValueType()))
2105 assert(0 && "MATCH FAIL!");
2107 if (!Constraints[i].isIndirectOutput)
2108 assert(0 && "MATCH FAIL!");
2110 OpNum++; // Consumes a call operand.
2112 // Extend/truncate to the right pointer type if needed.
2113 MVT::ValueType PtrType = TLI.getPointerTy();
2114 if (InOperandVal.getValueType() < PtrType)
2115 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal);
2116 else if (InOperandVal.getValueType() > PtrType)
2117 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal);
2119 // Add information to the INLINEASM node to know about this output.
2120 unsigned ResOpType = 4/*MEM*/ | (1 << 3);
2121 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
2122 AsmNodeOperands.push_back(InOperandVal);
2126 // Otherwise, this is a register output.
2127 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!");
2129 // If this is an early-clobber output, or if there is an input
2130 // constraint that matches this, we need to reserve the input register
2131 // so no other inputs allocate to it.
2132 bool UsesInputRegister = false;
2133 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput)
2134 UsesInputRegister = true;
2136 // Copy the output from the appropriate register. Find a register that
2139 GetRegistersForValue(ConstraintCode, ConstraintVTs[i],
2140 true, UsesInputRegister,
2141 OutputRegs, InputRegs);
2142 assert(!Regs.Regs.empty() && "Couldn't allocate output reg!");
2144 if (!Constraints[i].isIndirectOutput) {
2145 assert(RetValRegs.Regs.empty() &&
2146 "Cannot have multiple output constraints yet!");
2147 assert(I.getType() != Type::VoidTy && "Bad inline asm!");
2150 IndirectStoresToEmit.push_back(std::make_pair(Regs,
2151 I.getOperand(OpNum)));
2152 OpNum++; // Consumes a call operand.
2155 // Add information to the INLINEASM node to know that this register is
2157 Regs.AddInlineAsmOperands(2 /*REGDEF*/, DAG, AsmNodeOperands);
2160 case InlineAsm::isInput: {
2161 SDOperand InOperandVal = getValue(I.getOperand(OpNum));
2162 OpNum++; // Consumes a call operand.
2164 if (isdigit(ConstraintCode[0])) { // Matching constraint?
2165 // If this is required to match an output register we have already set,
2166 // just use its register.
2167 unsigned OperandNo = atoi(ConstraintCode.c_str());
2169 // Scan until we find the definition we already emitted of this operand.
2170 // When we find it, create a RegsForValue operand.
2171 unsigned CurOp = 2; // The first operand.
2172 for (; OperandNo; --OperandNo) {
2173 // Advance to the next operand.
2175 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue();
2176 assert(((NumOps & 7) == 2 /*REGDEF*/ ||
2177 (NumOps & 7) == 4 /*MEM*/) &&
2178 "Skipped past definitions?");
2179 CurOp += (NumOps>>3)+1;
2183 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue();
2184 assert((NumOps & 7) == 2 /*REGDEF*/ &&
2185 "Skipped past definitions?");
2187 // Add NumOps>>3 registers to MatchedRegs.
2188 RegsForValue MatchedRegs;
2189 MatchedRegs.ValueVT = InOperandVal.getValueType();
2190 MatchedRegs.RegVT = AsmNodeOperands[CurOp+1].getValueType();
2191 for (unsigned i = 0, e = NumOps>>3; i != e; ++i) {
2192 unsigned Reg=cast<RegisterSDNode>(AsmNodeOperands[++CurOp])->getReg();
2193 MatchedRegs.Regs.push_back(Reg);
2196 // Use the produced MatchedRegs object to
2197 MatchedRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag,
2198 TLI.getPointerTy());
2199 MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/, DAG, AsmNodeOperands);
2203 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass;
2204 if (ConstraintCode.size() == 1) // not a physreg name.
2205 CTy = TLI.getConstraintType(ConstraintCode[0]);
2207 if (CTy == TargetLowering::C_Other) {
2208 if (!TLI.isOperandValidForConstraint(InOperandVal, ConstraintCode[0]))
2209 assert(0 && "MATCH FAIL!");
2211 // Add information to the INLINEASM node to know about this input.
2212 unsigned ResOpType = 3 /*IMM*/ | (1 << 3);
2213 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
2214 AsmNodeOperands.push_back(InOperandVal);
2216 } else if (CTy == TargetLowering::C_Memory) {
2219 // Check that the operand isn't a float.
2220 if (!MVT::isInteger(InOperandVal.getValueType()))
2221 assert(0 && "MATCH FAIL!");
2223 // Extend/truncate to the right pointer type if needed.
2224 MVT::ValueType PtrType = TLI.getPointerTy();
2225 if (InOperandVal.getValueType() < PtrType)
2226 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal);
2227 else if (InOperandVal.getValueType() > PtrType)
2228 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal);
2230 // Add information to the INLINEASM node to know about this input.
2231 unsigned ResOpType = 4/*MEM*/ | (1 << 3);
2232 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
2233 AsmNodeOperands.push_back(InOperandVal);
2237 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!");
2239 // Copy the input into the appropriate registers.
2240 RegsForValue InRegs =
2241 GetRegistersForValue(ConstraintCode, ConstraintVTs[i],
2242 false, true, OutputRegs, InputRegs);
2243 // FIXME: should be match fail.
2244 assert(!InRegs.Regs.empty() && "Couldn't allocate input reg!");
2246 InRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag, TLI.getPointerTy());
2248 InRegs.AddInlineAsmOperands(1/*REGUSE*/, DAG, AsmNodeOperands);
2251 case InlineAsm::isClobber: {
2252 RegsForValue ClobberedRegs =
2253 GetRegistersForValue(ConstraintCode, MVT::Other, false, false,
2254 OutputRegs, InputRegs);
2255 // Add the clobbered value to the operand list, so that the register
2256 // allocator is aware that the physreg got clobbered.
2257 if (!ClobberedRegs.Regs.empty())
2258 ClobberedRegs.AddInlineAsmOperands(2/*REGDEF*/, DAG, AsmNodeOperands);
2264 // Finish up input operands.
2265 AsmNodeOperands[0] = Chain;
2266 if (Flag.Val) AsmNodeOperands.push_back(Flag);
2268 std::vector<MVT::ValueType> VTs;
2269 VTs.push_back(MVT::Other);
2270 VTs.push_back(MVT::Flag);
2271 Chain = DAG.getNode(ISD::INLINEASM, VTs, AsmNodeOperands);
2272 Flag = Chain.getValue(1);
2274 // If this asm returns a register value, copy the result from that register
2275 // and set it as the value of the call.
2276 if (!RetValRegs.Regs.empty())
2277 setValue(&I, RetValRegs.getCopyFromRegs(DAG, Chain, Flag));
2279 std::vector<std::pair<SDOperand, Value*> > StoresToEmit;
2281 // Process indirect outputs, first output all of the flagged copies out of
2283 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
2284 RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
2285 Value *Ptr = IndirectStoresToEmit[i].second;
2286 SDOperand OutVal = OutRegs.getCopyFromRegs(DAG, Chain, Flag);
2287 StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
2290 // Emit the non-flagged stores from the physregs.
2291 std::vector<SDOperand> OutChains;
2292 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i)
2293 OutChains.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
2294 StoresToEmit[i].first,
2295 getValue(StoresToEmit[i].second),
2296 DAG.getSrcValue(StoresToEmit[i].second)));
2297 if (!OutChains.empty())
2298 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains);
2303 void SelectionDAGLowering::visitMalloc(MallocInst &I) {
2304 SDOperand Src = getValue(I.getOperand(0));
2306 MVT::ValueType IntPtr = TLI.getPointerTy();
2308 if (IntPtr < Src.getValueType())
2309 Src = DAG.getNode(ISD::TRUNCATE, IntPtr, Src);
2310 else if (IntPtr > Src.getValueType())
2311 Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src);
2313 // Scale the source by the type size.
2314 uint64_t ElementSize = TD->getTypeSize(I.getType()->getElementType());
2315 Src = DAG.getNode(ISD::MUL, Src.getValueType(),
2316 Src, getIntPtrConstant(ElementSize));
2318 std::vector<std::pair<SDOperand, const Type*> > Args;
2319 Args.push_back(std::make_pair(Src, TLI.getTargetData()->getIntPtrType()));
2321 std::pair<SDOperand,SDOperand> Result =
2322 TLI.LowerCallTo(getRoot(), I.getType(), false, CallingConv::C, true,
2323 DAG.getExternalSymbol("malloc", IntPtr),
2325 setValue(&I, Result.first); // Pointers always fit in registers
2326 DAG.setRoot(Result.second);
2329 void SelectionDAGLowering::visitFree(FreeInst &I) {
2330 std::vector<std::pair<SDOperand, const Type*> > Args;
2331 Args.push_back(std::make_pair(getValue(I.getOperand(0)),
2332 TLI.getTargetData()->getIntPtrType()));
2333 MVT::ValueType IntPtr = TLI.getPointerTy();
2334 std::pair<SDOperand,SDOperand> Result =
2335 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, CallingConv::C, true,
2336 DAG.getExternalSymbol("free", IntPtr), Args, DAG);
2337 DAG.setRoot(Result.second);
2340 // InsertAtEndOfBasicBlock - This method should be implemented by targets that
2341 // mark instructions with the 'usesCustomDAGSchedInserter' flag. These
2342 // instructions are special in various ways, which require special support to
2343 // insert. The specified MachineInstr is created but not inserted into any
2344 // basic blocks, and the scheduler passes ownership of it to this method.
2345 MachineBasicBlock *TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
2346 MachineBasicBlock *MBB) {
2347 std::cerr << "If a target marks an instruction with "
2348 "'usesCustomDAGSchedInserter', it must implement "
2349 "TargetLowering::InsertAtEndOfBasicBlock!\n";
2354 void SelectionDAGLowering::visitVAStart(CallInst &I) {
2355 DAG.setRoot(DAG.getNode(ISD::VASTART, MVT::Other, getRoot(),
2356 getValue(I.getOperand(1)),
2357 DAG.getSrcValue(I.getOperand(1))));
2360 void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
2361 SDOperand V = DAG.getVAArg(TLI.getValueType(I.getType()), getRoot(),
2362 getValue(I.getOperand(0)),
2363 DAG.getSrcValue(I.getOperand(0)));
2365 DAG.setRoot(V.getValue(1));
2368 void SelectionDAGLowering::visitVAEnd(CallInst &I) {
2369 DAG.setRoot(DAG.getNode(ISD::VAEND, MVT::Other, getRoot(),
2370 getValue(I.getOperand(1)),
2371 DAG.getSrcValue(I.getOperand(1))));
2374 void SelectionDAGLowering::visitVACopy(CallInst &I) {
2375 DAG.setRoot(DAG.getNode(ISD::VACOPY, MVT::Other, getRoot(),
2376 getValue(I.getOperand(1)),
2377 getValue(I.getOperand(2)),
2378 DAG.getSrcValue(I.getOperand(1)),
2379 DAG.getSrcValue(I.getOperand(2))));
2382 /// TargetLowering::LowerArguments - This is the default LowerArguments
2383 /// implementation, which just inserts a FORMAL_ARGUMENTS node. FIXME: When all
2384 /// targets are migrated to using FORMAL_ARGUMENTS, this hook should be
2385 /// integrated into SDISel.
2386 std::vector<SDOperand>
2387 TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
2388 // Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node.
2389 std::vector<SDOperand> Ops;
2390 Ops.push_back(DAG.getRoot());
2391 Ops.push_back(DAG.getConstant(F.getCallingConv(), getPointerTy()));
2392 Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy()));
2394 // Add one result value for each formal argument.
2395 std::vector<MVT::ValueType> RetVals;
2396 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
2397 MVT::ValueType VT = getValueType(I->getType());
2399 switch (getTypeAction(VT)) {
2400 default: assert(0 && "Unknown type action!");
2402 RetVals.push_back(VT);
2405 RetVals.push_back(getTypeToTransformTo(VT));
2408 if (VT != MVT::Vector) {
2409 // If this is a large integer, it needs to be broken up into small
2410 // integers. Figure out what the destination type is and how many small
2411 // integers it turns into.
2412 MVT::ValueType NVT = getTypeToTransformTo(VT);
2413 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT);
2414 for (unsigned i = 0; i != NumVals; ++i)
2415 RetVals.push_back(NVT);
2417 // Otherwise, this is a vector type. We only support legal vectors
2419 unsigned NumElems = cast<PackedType>(I->getType())->getNumElements();
2420 const Type *EltTy = cast<PackedType>(I->getType())->getElementType();
2422 // Figure out if there is a Packed type corresponding to this Vector
2423 // type. If so, convert to the packed type.
2424 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
2425 if (TVT != MVT::Other && isTypeLegal(TVT)) {
2426 RetVals.push_back(TVT);
2428 assert(0 && "Don't support illegal by-val vector arguments yet!");
2435 RetVals.push_back(MVT::Other);
2438 SDNode *Result = DAG.getNode(ISD::FORMAL_ARGUMENTS, RetVals, Ops).Val;
2440 DAG.setRoot(SDOperand(Result, Result->getNumValues()-1));
2442 // Set up the return result vector.
2445 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
2446 MVT::ValueType VT = getValueType(I->getType());
2448 switch (getTypeAction(VT)) {
2449 default: assert(0 && "Unknown type action!");
2451 Ops.push_back(SDOperand(Result, i++));
2454 SDOperand Op(Result, i++);
2455 if (MVT::isInteger(VT)) {
2456 unsigned AssertOp = I->getType()->isSigned() ? ISD::AssertSext
2458 Op = DAG.getNode(AssertOp, Op.getValueType(), Op, DAG.getValueType(VT));
2459 Op = DAG.getNode(ISD::TRUNCATE, VT, Op);
2461 assert(MVT::isFloatingPoint(VT) && "Not int or FP?");
2462 Op = DAG.getNode(ISD::FP_ROUND, VT, Op);
2468 if (VT != MVT::Vector) {
2469 // If this is a large integer, it needs to be reassembled from small
2470 // integers. Figure out what the source elt type is and how many small
2472 MVT::ValueType NVT = getTypeToTransformTo(VT);
2473 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT);
2475 SDOperand Lo = SDOperand(Result, i++);
2476 SDOperand Hi = SDOperand(Result, i++);
2478 if (!isLittleEndian())
2481 Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Lo, Hi));
2483 // Value scalarized into many values. Unimp for now.
2484 assert(0 && "Cannot expand i64 -> i16 yet!");
2487 // Otherwise, this is a vector type. We only support legal vectors
2489 const PackedType *PTy = cast<PackedType>(I->getType());
2490 unsigned NumElems = PTy->getNumElements();
2491 const Type *EltTy = PTy->getElementType();
2493 // Figure out if there is a Packed type corresponding to this Vector
2494 // type. If so, convert to the packed type.
2495 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
2496 if (TVT != MVT::Other && isTypeLegal(TVT)) {
2497 SDOperand N = SDOperand(Result, i++);
2498 // Handle copies from generic vectors to registers.
2499 N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N,
2500 DAG.getConstant(NumElems, MVT::i32),
2501 DAG.getValueType(getValueType(EltTy)));
2504 assert(0 && "Don't support illegal by-val vector arguments yet!");
2515 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
2516 /// implementation, which just inserts an ISD::CALL node, which is later custom
2517 /// lowered by the target to something concrete. FIXME: When all targets are
2518 /// migrated to using ISD::CALL, this hook should be integrated into SDISel.
2519 std::pair<SDOperand, SDOperand>
2520 TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, bool isVarArg,
2521 unsigned CallingConv, bool isTailCall,
2523 ArgListTy &Args, SelectionDAG &DAG) {
2524 std::vector<SDOperand> Ops;
2525 Ops.push_back(Chain); // Op#0 - Chain
2526 Ops.push_back(DAG.getConstant(CallingConv, getPointerTy())); // Op#1 - CC
2527 Ops.push_back(DAG.getConstant(isVarArg, getPointerTy())); // Op#2 - VarArg
2528 Ops.push_back(DAG.getConstant(isTailCall, getPointerTy())); // Op#3 - Tail
2529 Ops.push_back(Callee);
2531 // Handle all of the outgoing arguments.
2532 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
2533 MVT::ValueType VT = getValueType(Args[i].second);
2534 SDOperand Op = Args[i].first;
2535 bool isSigned = Args[i].second->isSigned();
2536 switch (getTypeAction(VT)) {
2537 default: assert(0 && "Unknown type action!");
2540 Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
2543 if (MVT::isInteger(VT)) {
2544 unsigned ExtOp = isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
2545 Op = DAG.getNode(ExtOp, getTypeToTransformTo(VT), Op);
2547 assert(MVT::isFloatingPoint(VT) && "Not int or FP?");
2548 Op = DAG.getNode(ISD::FP_EXTEND, getTypeToTransformTo(VT), Op);
2551 Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
2554 if (VT != MVT::Vector) {
2555 // If this is a large integer, it needs to be broken down into small
2556 // integers. Figure out what the source elt type is and how many small
2558 MVT::ValueType NVT = getTypeToTransformTo(VT);
2559 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT);
2561 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, NVT, Op,
2562 DAG.getConstant(0, getPointerTy()));
2563 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, NVT, Op,
2564 DAG.getConstant(1, getPointerTy()));
2565 if (!isLittleEndian())
2569 Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
2571 Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
2573 // Value scalarized into many values. Unimp for now.
2574 assert(0 && "Cannot expand i64 -> i16 yet!");
2577 // Otherwise, this is a vector type. We only support legal vectors
2579 const PackedType *PTy = cast<PackedType>(Args[i].second);
2580 unsigned NumElems = PTy->getNumElements();
2581 const Type *EltTy = PTy->getElementType();
2583 // Figure out if there is a Packed type corresponding to this Vector
2584 // type. If so, convert to the packed type.
2585 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
2586 if (TVT != MVT::Other && isTypeLegal(TVT)) {
2587 // Insert a VBIT_CONVERT of the MVT::Vector type to the packed type.
2588 Op = DAG.getNode(ISD::VBIT_CONVERT, TVT, Op);
2590 Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
2592 assert(0 && "Don't support illegal by-val vector call args yet!");
2600 // Figure out the result value types.
2601 std::vector<MVT::ValueType> RetTys;
2603 if (RetTy != Type::VoidTy) {
2604 MVT::ValueType VT = getValueType(RetTy);
2605 switch (getTypeAction(VT)) {
2606 default: assert(0 && "Unknown type action!");
2608 RetTys.push_back(VT);
2611 RetTys.push_back(getTypeToTransformTo(VT));
2614 if (VT != MVT::Vector) {
2615 // If this is a large integer, it needs to be reassembled from small
2616 // integers. Figure out what the source elt type is and how many small
2618 MVT::ValueType NVT = getTypeToTransformTo(VT);
2619 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT);
2620 for (unsigned i = 0; i != NumVals; ++i)
2621 RetTys.push_back(NVT);
2623 // Otherwise, this is a vector type. We only support legal vectors
2625 const PackedType *PTy = cast<PackedType>(RetTy);
2626 unsigned NumElems = PTy->getNumElements();
2627 const Type *EltTy = PTy->getElementType();
2629 // Figure out if there is a Packed type corresponding to this Vector
2630 // type. If so, convert to the packed type.
2631 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
2632 if (TVT != MVT::Other && isTypeLegal(TVT)) {
2633 RetTys.push_back(TVT);
2635 assert(0 && "Don't support illegal by-val vector call results yet!");
2642 RetTys.push_back(MVT::Other); // Always has a chain.
2644 // Finally, create the CALL node.
2645 SDOperand Res = DAG.getNode(ISD::CALL, RetTys, Ops);
2647 // This returns a pair of operands. The first element is the
2648 // return value for the function (if RetTy is not VoidTy). The second
2649 // element is the outgoing token chain.
2651 if (RetTys.size() != 1) {
2652 MVT::ValueType VT = getValueType(RetTy);
2653 if (RetTys.size() == 2) {
2656 // If this value was promoted, truncate it down.
2657 if (ResVal.getValueType() != VT) {
2658 if (VT == MVT::Vector) {
2659 // Insert a VBITCONVERT to convert from the packed result type to the
2660 // MVT::Vector type.
2661 unsigned NumElems = cast<PackedType>(RetTy)->getNumElements();
2662 const Type *EltTy = cast<PackedType>(RetTy)->getElementType();
2664 // Figure out if there is a Packed type corresponding to this Vector
2665 // type. If so, convert to the packed type.
2666 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
2667 if (TVT != MVT::Other && isTypeLegal(TVT)) {
2668 // Insert a VBIT_CONVERT of the FORMAL_ARGUMENTS to a
2669 // "N x PTyElementVT" MVT::Vector type.
2670 ResVal = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, ResVal,
2671 DAG.getConstant(NumElems, MVT::i32),
2672 DAG.getValueType(getValueType(EltTy)));
2676 } else if (MVT::isInteger(VT)) {
2677 unsigned AssertOp = RetTy->isSigned() ?
2678 ISD::AssertSext : ISD::AssertZext;
2679 ResVal = DAG.getNode(AssertOp, ResVal.getValueType(), ResVal,
2680 DAG.getValueType(VT));
2681 ResVal = DAG.getNode(ISD::TRUNCATE, VT, ResVal);
2683 assert(MVT::isFloatingPoint(VT));
2684 ResVal = DAG.getNode(ISD::FP_ROUND, VT, ResVal);
2687 } else if (RetTys.size() == 3) {
2688 ResVal = DAG.getNode(ISD::BUILD_PAIR, VT,
2689 Res.getValue(0), Res.getValue(1));
2692 assert(0 && "Case not handled yet!");
2696 return std::make_pair(ResVal, Res.getValue(Res.Val->getNumValues()-1));
2701 // It is always conservatively correct for llvm.returnaddress and
2702 // llvm.frameaddress to return 0.
2704 // FIXME: Change this to insert a FRAMEADDR/RETURNADDR node, and have that be
2705 // expanded to 0 if the target wants.
2706 std::pair<SDOperand, SDOperand>
2707 TargetLowering::LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain,
2708 unsigned Depth, SelectionDAG &DAG) {
2709 return std::make_pair(DAG.getConstant(0, getPointerTy()), Chain);
2712 SDOperand TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
2713 assert(0 && "LowerOperation not implemented for this target!");
2718 SDOperand TargetLowering::CustomPromoteOperation(SDOperand Op,
2719 SelectionDAG &DAG) {
2720 assert(0 && "CustomPromoteOperation not implemented for this target!");
2725 void SelectionDAGLowering::visitFrameReturnAddress(CallInst &I, bool isFrame) {
2726 unsigned Depth = (unsigned)cast<ConstantUInt>(I.getOperand(1))->getValue();
2727 std::pair<SDOperand,SDOperand> Result =
2728 TLI.LowerFrameReturnAddress(isFrame, getRoot(), Depth, DAG);
2729 setValue(&I, Result.first);
2730 DAG.setRoot(Result.second);
2733 /// getMemsetValue - Vectorized representation of the memset value
2735 static SDOperand getMemsetValue(SDOperand Value, MVT::ValueType VT,
2736 SelectionDAG &DAG) {
2737 MVT::ValueType CurVT = VT;
2738 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
2739 uint64_t Val = C->getValue() & 255;
2741 while (CurVT != MVT::i8) {
2742 Val = (Val << Shift) | Val;
2744 CurVT = (MVT::ValueType)((unsigned)CurVT - 1);
2746 return DAG.getConstant(Val, VT);
2748 Value = DAG.getNode(ISD::ZERO_EXTEND, VT, Value);
2750 while (CurVT != MVT::i8) {
2752 DAG.getNode(ISD::OR, VT,
2753 DAG.getNode(ISD::SHL, VT, Value,
2754 DAG.getConstant(Shift, MVT::i8)), Value);
2756 CurVT = (MVT::ValueType)((unsigned)CurVT - 1);
2763 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
2764 /// used when a memcpy is turned into a memset when the source is a constant
2766 static SDOperand getMemsetStringVal(MVT::ValueType VT,
2767 SelectionDAG &DAG, TargetLowering &TLI,
2768 std::string &Str, unsigned Offset) {
2769 MVT::ValueType CurVT = VT;
2771 unsigned MSB = getSizeInBits(VT) / 8;
2772 if (TLI.isLittleEndian())
2773 Offset = Offset + MSB - 1;
2774 for (unsigned i = 0; i != MSB; ++i) {
2775 Val = (Val << 8) | Str[Offset];
2776 Offset += TLI.isLittleEndian() ? -1 : 1;
2778 return DAG.getConstant(Val, VT);
2781 /// getMemBasePlusOffset - Returns base and offset node for the
2782 static SDOperand getMemBasePlusOffset(SDOperand Base, unsigned Offset,
2783 SelectionDAG &DAG, TargetLowering &TLI) {
2784 MVT::ValueType VT = Base.getValueType();
2785 return DAG.getNode(ISD::ADD, VT, Base, DAG.getConstant(Offset, VT));
2788 /// MeetsMaxMemopRequirement - Determines if the number of memory ops required
2789 /// to replace the memset / memcpy is below the threshold. It also returns the
2790 /// types of the sequence of memory ops to perform memset / memcpy.
2791 static bool MeetsMaxMemopRequirement(std::vector<MVT::ValueType> &MemOps,
2792 unsigned Limit, uint64_t Size,
2793 unsigned Align, TargetLowering &TLI) {
2796 if (TLI.allowsUnalignedMemoryAccesses()) {
2799 switch (Align & 7) {
2815 MVT::ValueType LVT = MVT::i64;
2816 while (!TLI.isTypeLegal(LVT))
2817 LVT = (MVT::ValueType)((unsigned)LVT - 1);
2818 assert(MVT::isInteger(LVT));
2823 unsigned NumMemOps = 0;
2825 unsigned VTSize = getSizeInBits(VT) / 8;
2826 while (VTSize > Size) {
2827 VT = (MVT::ValueType)((unsigned)VT - 1);
2830 assert(MVT::isInteger(VT));
2832 if (++NumMemOps > Limit)
2834 MemOps.push_back(VT);
2841 void SelectionDAGLowering::visitMemIntrinsic(CallInst &I, unsigned Op) {
2842 SDOperand Op1 = getValue(I.getOperand(1));
2843 SDOperand Op2 = getValue(I.getOperand(2));
2844 SDOperand Op3 = getValue(I.getOperand(3));
2845 SDOperand Op4 = getValue(I.getOperand(4));
2846 unsigned Align = (unsigned)cast<ConstantSDNode>(Op4)->getValue();
2847 if (Align == 0) Align = 1;
2849 if (ConstantSDNode *Size = dyn_cast<ConstantSDNode>(Op3)) {
2850 std::vector<MVT::ValueType> MemOps;
2852 // Expand memset / memcpy to a series of load / store ops
2853 // if the size operand falls below a certain threshold.
2854 std::vector<SDOperand> OutChains;
2856 default: break; // Do nothing for now.
2858 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemset(),
2859 Size->getValue(), Align, TLI)) {
2860 unsigned NumMemOps = MemOps.size();
2861 unsigned Offset = 0;
2862 for (unsigned i = 0; i < NumMemOps; i++) {
2863 MVT::ValueType VT = MemOps[i];
2864 unsigned VTSize = getSizeInBits(VT) / 8;
2865 SDOperand Value = getMemsetValue(Op2, VT, DAG);
2866 SDOperand Store = DAG.getNode(ISD::STORE, MVT::Other, getRoot(),
2868 getMemBasePlusOffset(Op1, Offset, DAG, TLI),
2869 DAG.getSrcValue(I.getOperand(1), Offset));
2870 OutChains.push_back(Store);
2877 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemcpy(),
2878 Size->getValue(), Align, TLI)) {
2879 unsigned NumMemOps = MemOps.size();
2880 unsigned SrcOff = 0, DstOff = 0, SrcDelta = 0;
2881 GlobalAddressSDNode *G = NULL;
2883 bool CopyFromStr = false;
2885 if (Op2.getOpcode() == ISD::GlobalAddress)
2886 G = cast<GlobalAddressSDNode>(Op2);
2887 else if (Op2.getOpcode() == ISD::ADD &&
2888 Op2.getOperand(0).getOpcode() == ISD::GlobalAddress &&
2889 Op2.getOperand(1).getOpcode() == ISD::Constant) {
2890 G = cast<GlobalAddressSDNode>(Op2.getOperand(0));
2891 SrcDelta = cast<ConstantSDNode>(Op2.getOperand(1))->getValue();
2894 GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal());
2896 Str = GV->getStringValue(false);
2904 for (unsigned i = 0; i < NumMemOps; i++) {
2905 MVT::ValueType VT = MemOps[i];
2906 unsigned VTSize = getSizeInBits(VT) / 8;
2907 SDOperand Value, Chain, Store;
2910 Value = getMemsetStringVal(VT, DAG, TLI, Str, SrcOff);
2913 DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
2914 getMemBasePlusOffset(Op1, DstOff, DAG, TLI),
2915 DAG.getSrcValue(I.getOperand(1), DstOff));
2917 Value = DAG.getLoad(VT, getRoot(),
2918 getMemBasePlusOffset(Op2, SrcOff, DAG, TLI),
2919 DAG.getSrcValue(I.getOperand(2), SrcOff));
2920 Chain = Value.getValue(1);
2922 DAG.getNode(ISD::STORE, MVT::Other, Chain, Value,
2923 getMemBasePlusOffset(Op1, DstOff, DAG, TLI),
2924 DAG.getSrcValue(I.getOperand(1), DstOff));
2926 OutChains.push_back(Store);
2935 if (!OutChains.empty()) {
2936 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains));
2941 std::vector<SDOperand> Ops;
2942 Ops.push_back(getRoot());
2947 DAG.setRoot(DAG.getNode(Op, MVT::Other, Ops));
2950 //===----------------------------------------------------------------------===//
2951 // SelectionDAGISel code
2952 //===----------------------------------------------------------------------===//
2954 unsigned SelectionDAGISel::MakeReg(MVT::ValueType VT) {
2955 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
2958 void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
2959 // FIXME: we only modify the CFG to split critical edges. This
2960 // updates dom and loop info.
2964 /// OptimizeNoopCopyExpression - We have determined that the specified cast
2965 /// instruction is a noop copy (e.g. it's casting from one pointer type to
2966 /// another, int->uint, or int->sbyte on PPC.
2968 /// Return true if any changes are made.
2969 static bool OptimizeNoopCopyExpression(CastInst *CI) {
2970 BasicBlock *DefBB = CI->getParent();
2972 /// InsertedCasts - Only insert a cast in each block once.
2973 std::map<BasicBlock*, CastInst*> InsertedCasts;
2975 bool MadeChange = false;
2976 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
2978 Use &TheUse = UI.getUse();
2979 Instruction *User = cast<Instruction>(*UI);
2981 // Figure out which BB this cast is used in. For PHI's this is the
2982 // appropriate predecessor block.
2983 BasicBlock *UserBB = User->getParent();
2984 if (PHINode *PN = dyn_cast<PHINode>(User)) {
2985 unsigned OpVal = UI.getOperandNo()/2;
2986 UserBB = PN->getIncomingBlock(OpVal);
2989 // Preincrement use iterator so we don't invalidate it.
2992 // If this user is in the same block as the cast, don't change the cast.
2993 if (UserBB == DefBB) continue;
2995 // If we have already inserted a cast into this block, use it.
2996 CastInst *&InsertedCast = InsertedCasts[UserBB];
2998 if (!InsertedCast) {
2999 BasicBlock::iterator InsertPt = UserBB->begin();
3000 while (isa<PHINode>(InsertPt)) ++InsertPt;
3003 new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt);
3007 // Replace a use of the cast with a use of the new casat.
3008 TheUse = InsertedCast;
3011 // If we removed all uses, nuke the cast.
3012 if (CI->use_empty())
3013 CI->eraseFromParent();
3018 /// InsertGEPComputeCode - Insert code into BB to compute Ptr+PtrOffset,
3019 /// casting to the type of GEPI.
3020 static Instruction *InsertGEPComputeCode(Instruction *&V, BasicBlock *BB,
3021 Instruction *GEPI, Value *Ptr,
3023 if (V) return V; // Already computed.
3025 BasicBlock::iterator InsertPt;
3026 if (BB == GEPI->getParent()) {
3027 // If insert into the GEP's block, insert right after the GEP.
3031 // Otherwise, insert at the top of BB, after any PHI nodes
3032 InsertPt = BB->begin();
3033 while (isa<PHINode>(InsertPt)) ++InsertPt;
3036 // If Ptr is itself a cast, but in some other BB, emit a copy of the cast into
3037 // BB so that there is only one value live across basic blocks (the cast
3039 if (CastInst *CI = dyn_cast<CastInst>(Ptr))
3040 if (CI->getParent() != BB && isa<PointerType>(CI->getOperand(0)->getType()))
3041 Ptr = new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt);
3043 // Add the offset, cast it to the right type.
3044 Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt);
3045 return V = new CastInst(Ptr, GEPI->getType(), "", InsertPt);
3048 /// ReplaceUsesOfGEPInst - Replace all uses of RepPtr with inserted code to
3049 /// compute its value. The RepPtr value can be computed with Ptr+PtrOffset. One
3050 /// trivial way of doing this would be to evaluate Ptr+PtrOffset in RepPtr's
3051 /// block, then ReplaceAllUsesWith'ing everything. However, we would prefer to
3052 /// sink PtrOffset into user blocks where doing so will likely allow us to fold
3053 /// the constant add into a load or store instruction. Additionally, if a user
3054 /// is a pointer-pointer cast, we look through it to find its users.
3055 static void ReplaceUsesOfGEPInst(Instruction *RepPtr, Value *Ptr,
3056 Constant *PtrOffset, BasicBlock *DefBB,
3057 GetElementPtrInst *GEPI,
3058 std::map<BasicBlock*,Instruction*> &InsertedExprs) {
3059 while (!RepPtr->use_empty()) {
3060 Instruction *User = cast<Instruction>(RepPtr->use_back());
3062 // If the user is a Pointer-Pointer cast, recurse.
3063 if (isa<CastInst>(User) && isa<PointerType>(User->getType())) {
3064 ReplaceUsesOfGEPInst(User, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs);
3066 // Drop the use of RepPtr. The cast is dead. Don't delete it now, else we
3067 // could invalidate an iterator.
3068 User->setOperand(0, UndefValue::get(RepPtr->getType()));
3072 // If this is a load of the pointer, or a store through the pointer, emit
3073 // the increment into the load/store block.
3074 Instruction *NewVal;
3075 if (isa<LoadInst>(User) ||
3076 (isa<StoreInst>(User) && User->getOperand(0) != RepPtr)) {
3077 NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()],
3078 User->getParent(), GEPI,
3081 // If this use is not foldable into the addressing mode, use a version
3082 // emitted in the GEP block.
3083 NewVal = InsertGEPComputeCode(InsertedExprs[DefBB], DefBB, GEPI,
3087 if (GEPI->getType() != RepPtr->getType()) {
3088 BasicBlock::iterator IP = NewVal;
3090 NewVal = new CastInst(NewVal, RepPtr->getType(), "", IP);
3092 User->replaceUsesOfWith(RepPtr, NewVal);
3097 /// OptimizeGEPExpression - Since we are doing basic-block-at-a-time instruction
3098 /// selection, we want to be a bit careful about some things. In particular, if
3099 /// we have a GEP instruction that is used in a different block than it is
3100 /// defined, the addressing expression of the GEP cannot be folded into loads or
3101 /// stores that use it. In this case, decompose the GEP and move constant
3102 /// indices into blocks that use it.
3103 static bool OptimizeGEPExpression(GetElementPtrInst *GEPI,
3104 const TargetData *TD) {
3105 // If this GEP is only used inside the block it is defined in, there is no
3106 // need to rewrite it.
3107 bool isUsedOutsideDefBB = false;
3108 BasicBlock *DefBB = GEPI->getParent();
3109 for (Value::use_iterator UI = GEPI->use_begin(), E = GEPI->use_end();
3111 if (cast<Instruction>(*UI)->getParent() != DefBB) {
3112 isUsedOutsideDefBB = true;
3116 if (!isUsedOutsideDefBB) return false;
3118 // If this GEP has no non-zero constant indices, there is nothing we can do,
3120 bool hasConstantIndex = false;
3121 bool hasVariableIndex = false;
3122 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
3123 E = GEPI->op_end(); OI != E; ++OI) {
3124 if (ConstantInt *CI = dyn_cast<ConstantInt>(*OI)) {
3125 if (CI->getRawValue()) {
3126 hasConstantIndex = true;
3130 hasVariableIndex = true;
3134 // If this is a "GEP X, 0, 0, 0", turn this into a cast.
3135 if (!hasConstantIndex && !hasVariableIndex) {
3136 Value *NC = new CastInst(GEPI->getOperand(0), GEPI->getType(),
3137 GEPI->getName(), GEPI);
3138 GEPI->replaceAllUsesWith(NC);
3139 GEPI->eraseFromParent();
3143 // If this is a GEP &Alloca, 0, 0, forward subst the frame index into uses.
3144 if (!hasConstantIndex && !isa<AllocaInst>(GEPI->getOperand(0)))
3147 // Otherwise, decompose the GEP instruction into multiplies and adds. Sum the
3148 // constant offset (which we now know is non-zero) and deal with it later.
3149 uint64_t ConstantOffset = 0;
3150 const Type *UIntPtrTy = TD->getIntPtrType();
3151 Value *Ptr = new CastInst(GEPI->getOperand(0), UIntPtrTy, "", GEPI);
3152 const Type *Ty = GEPI->getOperand(0)->getType();
3154 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
3155 E = GEPI->op_end(); OI != E; ++OI) {
3157 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
3158 unsigned Field = cast<ConstantUInt>(Idx)->getValue();
3160 ConstantOffset += TD->getStructLayout(StTy)->MemberOffsets[Field];
3161 Ty = StTy->getElementType(Field);
3163 Ty = cast<SequentialType>(Ty)->getElementType();
3165 // Handle constant subscripts.
3166 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
3167 if (CI->getRawValue() == 0) continue;
3169 if (ConstantSInt *CSI = dyn_cast<ConstantSInt>(CI))
3170 ConstantOffset += (int64_t)TD->getTypeSize(Ty)*CSI->getValue();
3172 ConstantOffset+=TD->getTypeSize(Ty)*cast<ConstantUInt>(CI)->getValue();
3176 // Ptr = Ptr + Idx * ElementSize;
3178 // Cast Idx to UIntPtrTy if needed.
3179 Idx = new CastInst(Idx, UIntPtrTy, "", GEPI);
3181 uint64_t ElementSize = TD->getTypeSize(Ty);
3182 // Mask off bits that should not be set.
3183 ElementSize &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
3184 Constant *SizeCst = ConstantUInt::get(UIntPtrTy, ElementSize);
3186 // Multiply by the element size and add to the base.
3187 Idx = BinaryOperator::createMul(Idx, SizeCst, "", GEPI);
3188 Ptr = BinaryOperator::createAdd(Ptr, Idx, "", GEPI);
3192 // Make sure that the offset fits in uintptr_t.
3193 ConstantOffset &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
3194 Constant *PtrOffset = ConstantUInt::get(UIntPtrTy, ConstantOffset);
3196 // Okay, we have now emitted all of the variable index parts to the BB that
3197 // the GEP is defined in. Loop over all of the using instructions, inserting
3198 // an "add Ptr, ConstantOffset" into each block that uses it and update the
3199 // instruction to use the newly computed value, making GEPI dead. When the
3200 // user is a load or store instruction address, we emit the add into the user
3201 // block, otherwise we use a canonical version right next to the gep (these
3202 // won't be foldable as addresses, so we might as well share the computation).
3204 std::map<BasicBlock*,Instruction*> InsertedExprs;
3205 ReplaceUsesOfGEPInst(GEPI, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs);
3207 // Finally, the GEP is dead, remove it.
3208 GEPI->eraseFromParent();
3213 bool SelectionDAGISel::runOnFunction(Function &Fn) {
3214 MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine());
3215 RegMap = MF.getSSARegMap();
3216 DEBUG(std::cerr << "\n\n\n=== " << Fn.getName() << "\n");
3218 // First, split all critical edges for PHI nodes with incoming values that are
3219 // constants, this way the load of the constant into a vreg will not be placed
3220 // into MBBs that are used some other way.
3222 // In this pass we also look for GEP and cast instructions that are used
3223 // across basic blocks and rewrite them to improve basic-block-at-a-time
3227 bool MadeChange = true;
3228 while (MadeChange) {
3230 for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
3232 BasicBlock::iterator BBI;
3233 for (BBI = BB->begin(); (PN = dyn_cast<PHINode>(BBI)); ++BBI)
3234 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
3235 if (isa<Constant>(PN->getIncomingValue(i)))
3236 SplitCriticalEdge(PN->getIncomingBlock(i), BB);
3238 for (BasicBlock::iterator E = BB->end(); BBI != E; ) {
3239 Instruction *I = BBI++;
3240 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
3241 MadeChange |= OptimizeGEPExpression(GEPI, TLI.getTargetData());
3242 } else if (CastInst *CI = dyn_cast<CastInst>(I)) {
3243 // If this is a noop copy, sink it into user blocks to reduce the number
3244 // of virtual registers that must be created and coallesced.
3245 MVT::ValueType SrcVT = TLI.getValueType(CI->getOperand(0)->getType());
3246 MVT::ValueType DstVT = TLI.getValueType(CI->getType());
3248 // This is an fp<->int conversion?
3249 if (MVT::isInteger(SrcVT) != MVT::isInteger(DstVT))
3252 // If this is an extension, it will be a zero or sign extension, which
3254 if (SrcVT < DstVT) continue;
3256 // If these values will be promoted, find out what they will be promoted
3257 // to. This helps us consider truncates on PPC as noop copies when they
3259 if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote)
3260 SrcVT = TLI.getTypeToTransformTo(SrcVT);
3261 if (TLI.getTypeAction(DstVT) == TargetLowering::Promote)
3262 DstVT = TLI.getTypeToTransformTo(DstVT);
3264 // If, after promotion, these are the same types, this is a noop copy.
3266 MadeChange |= OptimizeNoopCopyExpression(CI);
3272 FunctionLoweringInfo FuncInfo(TLI, Fn, MF);
3274 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
3275 SelectBasicBlock(I, MF, FuncInfo);
3281 SDOperand SelectionDAGISel::
3282 CopyValueToVirtualRegister(SelectionDAGLowering &SDL, Value *V, unsigned Reg) {
3283 SDOperand Op = SDL.getValue(V);
3284 assert((Op.getOpcode() != ISD::CopyFromReg ||
3285 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
3286 "Copy from a reg to the same reg!");
3288 // If this type is not legal, we must make sure to not create an invalid
3290 MVT::ValueType SrcVT = Op.getValueType();
3291 MVT::ValueType DestVT = TLI.getTypeToTransformTo(SrcVT);
3292 SelectionDAG &DAG = SDL.DAG;
3293 if (SrcVT == DestVT) {
3294 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op);
3295 } else if (SrcVT == MVT::Vector) {
3296 // Handle copies from generic vectors to registers.
3297 MVT::ValueType PTyElementVT, PTyLegalElementVT;
3298 unsigned NE = TLI.getPackedTypeBreakdown(cast<PackedType>(V->getType()),
3299 PTyElementVT, PTyLegalElementVT);
3301 // Insert a VBIT_CONVERT of the input vector to a "N x PTyElementVT"
3302 // MVT::Vector type.
3303 Op = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Op,
3304 DAG.getConstant(NE, MVT::i32),
3305 DAG.getValueType(PTyElementVT));
3307 // Loop over all of the elements of the resultant vector,
3308 // VEXTRACT_VECTOR_ELT'ing them, converting them to PTyLegalElementVT, then
3309 // copying them into output registers.
3310 std::vector<SDOperand> OutChains;
3311 SDOperand Root = SDL.getRoot();
3312 for (unsigned i = 0; i != NE; ++i) {
3313 SDOperand Elt = DAG.getNode(ISD::VEXTRACT_VECTOR_ELT, PTyElementVT,
3314 Op, DAG.getConstant(i, TLI.getPointerTy()));
3315 if (PTyElementVT == PTyLegalElementVT) {
3316 // Elements are legal.
3317 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt));
3318 } else if (PTyLegalElementVT > PTyElementVT) {
3319 // Elements are promoted.
3320 if (MVT::isFloatingPoint(PTyLegalElementVT))
3321 Elt = DAG.getNode(ISD::FP_EXTEND, PTyLegalElementVT, Elt);
3323 Elt = DAG.getNode(ISD::ANY_EXTEND, PTyLegalElementVT, Elt);
3324 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt));
3326 // Elements are expanded.
3327 // The src value is expanded into multiple registers.
3328 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT,
3329 Elt, DAG.getConstant(0, TLI.getPointerTy()));
3330 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT,
3331 Elt, DAG.getConstant(1, TLI.getPointerTy()));
3332 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Lo));
3333 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Hi));
3336 return DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains);
3337 } else if (SrcVT < DestVT) {
3338 // The src value is promoted to the register.
3339 if (MVT::isFloatingPoint(SrcVT))
3340 Op = DAG.getNode(ISD::FP_EXTEND, DestVT, Op);
3342 Op = DAG.getNode(ISD::ANY_EXTEND, DestVT, Op);
3343 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op);
3345 // The src value is expanded into multiple registers.
3346 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
3347 Op, DAG.getConstant(0, TLI.getPointerTy()));
3348 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
3349 Op, DAG.getConstant(1, TLI.getPointerTy()));
3350 Op = DAG.getCopyToReg(SDL.getRoot(), Reg, Lo);
3351 return DAG.getCopyToReg(Op, Reg+1, Hi);
3355 void SelectionDAGISel::
3356 LowerArguments(BasicBlock *BB, SelectionDAGLowering &SDL,
3357 std::vector<SDOperand> &UnorderedChains) {
3358 // If this is the entry block, emit arguments.
3359 Function &F = *BB->getParent();
3360 FunctionLoweringInfo &FuncInfo = SDL.FuncInfo;
3361 SDOperand OldRoot = SDL.DAG.getRoot();
3362 std::vector<SDOperand> Args = TLI.LowerArguments(F, SDL.DAG);
3365 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
3367 if (!AI->use_empty()) {
3368 SDL.setValue(AI, Args[a]);
3370 // If this argument is live outside of the entry block, insert a copy from
3371 // whereever we got it to the vreg that other BB's will reference it as.
3372 if (FuncInfo.ValueMap.count(AI)) {
3374 CopyValueToVirtualRegister(SDL, AI, FuncInfo.ValueMap[AI]);
3375 UnorderedChains.push_back(Copy);
3379 // Finally, if the target has anything special to do, allow it to do so.
3380 // FIXME: this should insert code into the DAG!
3381 EmitFunctionEntryCode(F, SDL.DAG.getMachineFunction());
3384 void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB,
3385 std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate,
3386 FunctionLoweringInfo &FuncInfo) {
3387 SelectionDAGLowering SDL(DAG, TLI, FuncInfo);
3389 std::vector<SDOperand> UnorderedChains;
3391 // Lower any arguments needed in this block if this is the entry block.
3392 if (LLVMBB == &LLVMBB->getParent()->front())
3393 LowerArguments(LLVMBB, SDL, UnorderedChains);
3395 BB = FuncInfo.MBBMap[LLVMBB];
3396 SDL.setCurrentBasicBlock(BB);
3398 // Lower all of the non-terminator instructions.
3399 for (BasicBlock::iterator I = LLVMBB->begin(), E = --LLVMBB->end();
3403 // Ensure that all instructions which are used outside of their defining
3404 // blocks are available as virtual registers.
3405 for (BasicBlock::iterator I = LLVMBB->begin(), E = LLVMBB->end(); I != E;++I)
3406 if (!I->use_empty() && !isa<PHINode>(I)) {
3407 std::map<const Value*, unsigned>::iterator VMI =FuncInfo.ValueMap.find(I);
3408 if (VMI != FuncInfo.ValueMap.end())
3409 UnorderedChains.push_back(
3410 CopyValueToVirtualRegister(SDL, I, VMI->second));
3413 // Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
3414 // ensure constants are generated when needed. Remember the virtual registers
3415 // that need to be added to the Machine PHI nodes as input. We cannot just
3416 // directly add them, because expansion might result in multiple MBB's for one
3417 // BB. As such, the start of the BB might correspond to a different MBB than
3421 // Emit constants only once even if used by multiple PHI nodes.
3422 std::map<Constant*, unsigned> ConstantsOut;
3424 // Check successor nodes PHI nodes that expect a constant to be available from
3426 TerminatorInst *TI = LLVMBB->getTerminator();
3427 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
3428 BasicBlock *SuccBB = TI->getSuccessor(succ);
3429 MachineBasicBlock::iterator MBBI = FuncInfo.MBBMap[SuccBB]->begin();
3432 // At this point we know that there is a 1-1 correspondence between LLVM PHI
3433 // nodes and Machine PHI nodes, but the incoming operands have not been
3435 for (BasicBlock::iterator I = SuccBB->begin();
3436 (PN = dyn_cast<PHINode>(I)); ++I)
3437 if (!PN->use_empty()) {
3439 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
3440 if (Constant *C = dyn_cast<Constant>(PHIOp)) {
3441 unsigned &RegOut = ConstantsOut[C];
3443 RegOut = FuncInfo.CreateRegForValue(C);
3444 UnorderedChains.push_back(
3445 CopyValueToVirtualRegister(SDL, C, RegOut));
3449 Reg = FuncInfo.ValueMap[PHIOp];
3451 assert(isa<AllocaInst>(PHIOp) &&
3452 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
3453 "Didn't codegen value into a register!??");
3454 Reg = FuncInfo.CreateRegForValue(PHIOp);
3455 UnorderedChains.push_back(
3456 CopyValueToVirtualRegister(SDL, PHIOp, Reg));
3460 // Remember that this register needs to added to the machine PHI node as
3461 // the input for this MBB.
3462 MVT::ValueType VT = TLI.getValueType(PN->getType());
3463 unsigned NumElements;
3464 if (VT != MVT::Vector)
3465 NumElements = TLI.getNumElements(VT);
3467 MVT::ValueType VT1,VT2;
3469 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()),
3472 for (unsigned i = 0, e = NumElements; i != e; ++i)
3473 PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
3476 ConstantsOut.clear();
3478 // Turn all of the unordered chains into one factored node.
3479 if (!UnorderedChains.empty()) {
3480 SDOperand Root = SDL.getRoot();
3481 if (Root.getOpcode() != ISD::EntryToken) {
3482 unsigned i = 0, e = UnorderedChains.size();
3483 for (; i != e; ++i) {
3484 assert(UnorderedChains[i].Val->getNumOperands() > 1);
3485 if (UnorderedChains[i].Val->getOperand(0) == Root)
3486 break; // Don't add the root if we already indirectly depend on it.
3490 UnorderedChains.push_back(Root);
3492 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other, UnorderedChains));
3495 // Lower the terminator after the copies are emitted.
3496 SDL.visit(*LLVMBB->getTerminator());
3498 // Copy over any CaseBlock records that may now exist due to SwitchInst
3499 // lowering, as well as any jump table information.
3500 SwitchCases.clear();
3501 SwitchCases = SDL.SwitchCases;
3504 // Make sure the root of the DAG is up-to-date.
3505 DAG.setRoot(SDL.getRoot());
3508 void SelectionDAGISel::CodeGenAndEmitDAG(SelectionDAG &DAG) {
3509 // Run the DAG combiner in pre-legalize mode.
3512 DEBUG(std::cerr << "Lowered selection DAG:\n");
3515 // Second step, hack on the DAG until it only uses operations and types that
3516 // the target supports.
3519 DEBUG(std::cerr << "Legalized selection DAG:\n");
3522 // Run the DAG combiner in post-legalize mode.
3525 if (ViewISelDAGs) DAG.viewGraph();
3527 // Third, instruction select all of the operations to machine code, adding the
3528 // code to the MachineBasicBlock.
3529 InstructionSelectBasicBlock(DAG);
3531 DEBUG(std::cerr << "Selected machine code:\n");
3535 void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF,
3536 FunctionLoweringInfo &FuncInfo) {
3537 std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
3539 SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
3542 // First step, lower LLVM code to some DAG. This DAG may use operations and
3543 // types that are not supported by the target.
3544 BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo);
3546 // Second step, emit the lowered DAG as machine code.
3547 CodeGenAndEmitDAG(DAG);
3550 // Next, now that we know what the last MBB the LLVM BB expanded is, update
3551 // PHI nodes in successors.
3552 if (SwitchCases.empty() && JT.Reg == 0) {
3553 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) {
3554 MachineInstr *PHI = PHINodesToUpdate[i].first;
3555 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
3556 "This is not a machine PHI node that we are updating!");
3557 PHI->addRegOperand(PHINodesToUpdate[i].second);
3558 PHI->addMachineBasicBlockOperand(BB);
3563 // If the JumpTable record is filled in, then we need to emit a jump table.
3564 // Updating the PHI nodes is tricky in this case, since we need to determine
3565 // whether the PHI is a successor of the range check MBB or the jump table MBB
3567 assert(SwitchCases.empty() && "Cannot have jump table and lowered switch");
3568 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
3570 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo);
3571 MachineBasicBlock *RangeBB = BB;
3572 // Set the current basic block to the mbb we wish to insert the code into
3574 SDL.setCurrentBasicBlock(BB);
3576 SDL.visitJumpTable(JT);
3577 SDAG.setRoot(SDL.getRoot());
3578 CodeGenAndEmitDAG(SDAG);
3580 for (unsigned pi = 0, pe = PHINodesToUpdate.size(); pi != pe; ++pi) {
3581 MachineInstr *PHI = PHINodesToUpdate[pi].first;
3582 MachineBasicBlock *PHIBB = PHI->getParent();
3583 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
3584 "This is not a machine PHI node that we are updating!");
3585 if (PHIBB == JT.Default) {
3586 PHI->addRegOperand(PHINodesToUpdate[pi].second);
3587 PHI->addMachineBasicBlockOperand(RangeBB);
3589 if (BB->succ_end() != std::find(BB->succ_begin(),BB->succ_end(), PHIBB)) {
3590 PHI->addRegOperand(PHINodesToUpdate[pi].second);
3591 PHI->addMachineBasicBlockOperand(BB);
3597 // If we generated any switch lowering information, build and codegen any
3598 // additional DAGs necessary.
3599 for(unsigned i = 0, e = SwitchCases.size(); i != e; ++i) {
3600 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
3602 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo);
3603 // Set the current basic block to the mbb we wish to insert the code into
3604 BB = SwitchCases[i].ThisBB;
3605 SDL.setCurrentBasicBlock(BB);
3607 SDL.visitSwitchCase(SwitchCases[i]);
3608 SDAG.setRoot(SDL.getRoot());
3609 CodeGenAndEmitDAG(SDAG);
3610 // Iterate over the phi nodes, if there is a phi node in a successor of this
3611 // block (for instance, the default block), then add a pair of operands to
3612 // the phi node for this block, as if we were coming from the original
3613 // BB before switch expansion.
3614 for (unsigned pi = 0, pe = PHINodesToUpdate.size(); pi != pe; ++pi) {
3615 MachineInstr *PHI = PHINodesToUpdate[pi].first;
3616 MachineBasicBlock *PHIBB = PHI->getParent();
3617 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
3618 "This is not a machine PHI node that we are updating!");
3619 if (PHIBB == SwitchCases[i].LHSBB || PHIBB == SwitchCases[i].RHSBB) {
3620 PHI->addRegOperand(PHINodesToUpdate[pi].second);
3621 PHI->addMachineBasicBlockOperand(BB);
3627 //===----------------------------------------------------------------------===//
3628 /// ScheduleAndEmitDAG - Pick a safe ordering and emit instructions for each
3629 /// target node in the graph.
3630 void SelectionDAGISel::ScheduleAndEmitDAG(SelectionDAG &DAG) {
3631 if (ViewSchedDAGs) DAG.viewGraph();
3632 ScheduleDAG *SL = NULL;
3634 switch (ISHeuristic) {
3635 default: assert(0 && "Unrecognized scheduling heuristic");
3636 case defaultScheduling:
3637 if (TLI.getSchedulingPreference() == TargetLowering::SchedulingForLatency)
3638 SL = createTDListDAGScheduler(DAG, BB, CreateTargetHazardRecognizer());
3640 assert(TLI.getSchedulingPreference() ==
3641 TargetLowering::SchedulingForRegPressure && "Unknown sched type!");
3642 SL = createBURRListDAGScheduler(DAG, BB);
3646 SL = createBFS_DAGScheduler(DAG, BB);
3648 case simpleScheduling:
3649 SL = createSimpleDAGScheduler(false, DAG, BB);
3651 case simpleNoItinScheduling:
3652 SL = createSimpleDAGScheduler(true, DAG, BB);
3654 case listSchedulingBURR:
3655 SL = createBURRListDAGScheduler(DAG, BB);
3657 case listSchedulingTDRR:
3658 SL = createTDRRListDAGScheduler(DAG, BB);
3660 case listSchedulingTD:
3661 SL = createTDListDAGScheduler(DAG, BB, CreateTargetHazardRecognizer());
3668 HazardRecognizer *SelectionDAGISel::CreateTargetHazardRecognizer() {
3669 return new HazardRecognizer();
3672 /// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
3673 /// by tblgen. Others should not call it.
3674 void SelectionDAGISel::
3675 SelectInlineAsmMemoryOperands(std::vector<SDOperand> &Ops, SelectionDAG &DAG) {
3676 std::vector<SDOperand> InOps;
3677 std::swap(InOps, Ops);
3679 Ops.push_back(InOps[0]); // input chain.
3680 Ops.push_back(InOps[1]); // input asm string.
3682 unsigned i = 2, e = InOps.size();
3683 if (InOps[e-1].getValueType() == MVT::Flag)
3684 --e; // Don't process a flag operand if it is here.
3687 unsigned Flags = cast<ConstantSDNode>(InOps[i])->getValue();
3688 if ((Flags & 7) != 4 /*MEM*/) {
3689 // Just skip over this operand, copying the operands verbatim.
3690 Ops.insert(Ops.end(), InOps.begin()+i, InOps.begin()+i+(Flags >> 3) + 1);
3691 i += (Flags >> 3) + 1;
3693 assert((Flags >> 3) == 1 && "Memory operand with multiple values?");
3694 // Otherwise, this is a memory operand. Ask the target to select it.
3695 std::vector<SDOperand> SelOps;
3696 if (SelectInlineAsmMemoryOperand(InOps[i+1], 'm', SelOps, DAG)) {
3697 std::cerr << "Could not match memory address. Inline asm failure!\n";
3701 // Add this to the output node.
3702 Ops.push_back(DAG.getConstant(4/*MEM*/ | (SelOps.size() << 3), MVT::i32));
3703 Ops.insert(Ops.end(), SelOps.begin(), SelOps.end());
3708 // Add the flag input back if present.
3709 if (e != InOps.size())
3710 Ops.push_back(InOps.back());