1 //===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the SelectionDAGISel class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "isel"
15 #include "llvm/Analysis/AliasAnalysis.h"
16 #include "llvm/CodeGen/SelectionDAGISel.h"
17 #include "llvm/CodeGen/ScheduleDAG.h"
18 #include "llvm/CallingConv.h"
19 #include "llvm/Constants.h"
20 #include "llvm/DerivedTypes.h"
21 #include "llvm/Function.h"
22 #include "llvm/GlobalVariable.h"
23 #include "llvm/InlineAsm.h"
24 #include "llvm/Instructions.h"
25 #include "llvm/Intrinsics.h"
26 #include "llvm/IntrinsicInst.h"
27 #include "llvm/CodeGen/IntrinsicLowering.h"
28 #include "llvm/CodeGen/MachineDebugInfo.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineJumpTableInfo.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/SchedulerRegistry.h"
34 #include "llvm/CodeGen/SelectionDAG.h"
35 #include "llvm/CodeGen/SSARegMap.h"
36 #include "llvm/Target/MRegisterInfo.h"
37 #include "llvm/Target/TargetData.h"
38 #include "llvm/Target/TargetFrameInfo.h"
39 #include "llvm/Target/TargetInstrInfo.h"
40 #include "llvm/Target/TargetLowering.h"
41 #include "llvm/Target/TargetMachine.h"
42 #include "llvm/Target/TargetOptions.h"
43 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
44 #include "llvm/Support/MathExtras.h"
45 #include "llvm/Support/Debug.h"
46 #include "llvm/Support/Compiler.h"
53 ViewISelDAGs("view-isel-dags", cl::Hidden,
54 cl::desc("Pop up a window to show isel dags as they are selected"));
56 ViewSchedDAGs("view-sched-dags", cl::Hidden,
57 cl::desc("Pop up a window to show sched dags as they are processed"));
59 static const bool ViewISelDAGs = 0, ViewSchedDAGs = 0;
63 //===---------------------------------------------------------------------===//
65 /// RegisterScheduler class - Track the registration of instruction schedulers.
67 //===---------------------------------------------------------------------===//
68 MachinePassRegistry RegisterScheduler::Registry;
70 //===---------------------------------------------------------------------===//
72 /// ISHeuristic command line option for instruction schedulers.
74 //===---------------------------------------------------------------------===//
76 cl::opt<RegisterScheduler::FunctionPassCtor, false,
77 RegisterPassParser<RegisterScheduler> >
79 cl::init(&createDefaultScheduler),
80 cl::desc("Instruction schedulers available:"));
82 static RegisterScheduler
83 defaultListDAGScheduler("default", " Best scheduler for the target",
84 createDefaultScheduler);
88 /// RegsForValue - This struct represents the physical registers that a
89 /// particular value is assigned and the type information about the value.
90 /// This is needed because values can be promoted into larger registers and
91 /// expanded into multiple smaller registers than the value.
92 struct VISIBILITY_HIDDEN RegsForValue {
93 /// Regs - This list hold the register (for legal and promoted values)
94 /// or register set (for expanded values) that the value should be assigned
96 std::vector<unsigned> Regs;
98 /// RegVT - The value type of each register.
100 MVT::ValueType RegVT;
102 /// ValueVT - The value type of the LLVM value, which may be promoted from
103 /// RegVT or made from merging the two expanded parts.
104 MVT::ValueType ValueVT;
106 RegsForValue() : RegVT(MVT::Other), ValueVT(MVT::Other) {}
108 RegsForValue(unsigned Reg, MVT::ValueType regvt, MVT::ValueType valuevt)
109 : RegVT(regvt), ValueVT(valuevt) {
112 RegsForValue(const std::vector<unsigned> ®s,
113 MVT::ValueType regvt, MVT::ValueType valuevt)
114 : Regs(regs), RegVT(regvt), ValueVT(valuevt) {
117 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
118 /// this value and returns the result as a ValueVT value. This uses
119 /// Chain/Flag as the input and updates them for the output Chain/Flag.
120 SDOperand getCopyFromRegs(SelectionDAG &DAG,
121 SDOperand &Chain, SDOperand &Flag) const;
123 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
124 /// specified value into the registers specified by this object. This uses
125 /// Chain/Flag as the input and updates them for the output Chain/Flag.
126 void getCopyToRegs(SDOperand Val, SelectionDAG &DAG,
127 SDOperand &Chain, SDOperand &Flag,
128 MVT::ValueType PtrVT) const;
130 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
131 /// operand list. This adds the code marker and includes the number of
132 /// values added into it.
133 void AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG,
134 std::vector<SDOperand> &Ops) const;
139 //===--------------------------------------------------------------------===//
140 /// createDefaultScheduler - This creates an instruction scheduler appropriate
142 ScheduleDAG* createDefaultScheduler(SelectionDAGISel *IS,
144 MachineBasicBlock *BB) {
145 TargetLowering &TLI = IS->getTargetLowering();
147 if (TLI.getSchedulingPreference() == TargetLowering::SchedulingForLatency) {
148 return createTDListDAGScheduler(IS, DAG, BB);
150 assert(TLI.getSchedulingPreference() ==
151 TargetLowering::SchedulingForRegPressure && "Unknown sched type!");
152 return createBURRListDAGScheduler(IS, DAG, BB);
157 //===--------------------------------------------------------------------===//
158 /// FunctionLoweringInfo - This contains information that is global to a
159 /// function that is used when lowering a region of the function.
160 class FunctionLoweringInfo {
167 FunctionLoweringInfo(TargetLowering &TLI, Function &Fn,MachineFunction &MF);
169 /// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
170 std::map<const BasicBlock*, MachineBasicBlock *> MBBMap;
172 /// ValueMap - Since we emit code for the function a basic block at a time,
173 /// we must remember which virtual registers hold the values for
174 /// cross-basic-block values.
175 std::map<const Value*, unsigned> ValueMap;
177 /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
178 /// the entry block. This allows the allocas to be efficiently referenced
179 /// anywhere in the function.
180 std::map<const AllocaInst*, int> StaticAllocaMap;
182 unsigned MakeReg(MVT::ValueType VT) {
183 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
186 /// isExportedInst - Return true if the specified value is an instruction
187 /// exported from its block.
188 bool isExportedInst(const Value *V) {
189 return ValueMap.count(V);
192 unsigned CreateRegForValue(const Value *V);
194 unsigned InitializeRegForValue(const Value *V) {
195 unsigned &R = ValueMap[V];
196 assert(R == 0 && "Already initialized this value register!");
197 return R = CreateRegForValue(V);
202 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
203 /// PHI nodes or outside of the basic block that defines it, or used by a
204 /// switch instruction, which may expand to multiple basic blocks.
205 static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
206 if (isa<PHINode>(I)) return true;
207 BasicBlock *BB = I->getParent();
208 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
209 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI) ||
210 // FIXME: Remove switchinst special case.
211 isa<SwitchInst>(*UI))
216 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
217 /// entry block, return true. This includes arguments used by switches, since
218 /// the switch may expand into multiple basic blocks.
219 static bool isOnlyUsedInEntryBlock(Argument *A) {
220 BasicBlock *Entry = A->getParent()->begin();
221 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
222 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI))
223 return false; // Use not in entry block.
227 FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli,
228 Function &fn, MachineFunction &mf)
229 : TLI(tli), Fn(fn), MF(mf), RegMap(MF.getSSARegMap()) {
231 // Create a vreg for each argument register that is not dead and is used
232 // outside of the entry block for the function.
233 for (Function::arg_iterator AI = Fn.arg_begin(), E = Fn.arg_end();
235 if (!isOnlyUsedInEntryBlock(AI))
236 InitializeRegForValue(AI);
238 // Initialize the mapping of values to registers. This is only set up for
239 // instruction values that are used outside of the block that defines
241 Function::iterator BB = Fn.begin(), EB = Fn.end();
242 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
243 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
244 if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
245 const Type *Ty = AI->getAllocatedType();
246 uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
248 std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty),
251 // If the alignment of the value is smaller than the size of the
252 // value, and if the size of the value is particularly small
253 // (<= 8 bytes), round up to the size of the value for potentially
254 // better performance.
256 // FIXME: This could be made better with a preferred alignment hook in
257 // TargetData. It serves primarily to 8-byte align doubles for X86.
258 if (Align < TySize && TySize <= 8) Align = TySize;
259 TySize *= CUI->getZExtValue(); // Get total allocated size.
260 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
261 StaticAllocaMap[AI] =
262 MF.getFrameInfo()->CreateStackObject((unsigned)TySize, Align);
265 for (; BB != EB; ++BB)
266 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
267 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
268 if (!isa<AllocaInst>(I) ||
269 !StaticAllocaMap.count(cast<AllocaInst>(I)))
270 InitializeRegForValue(I);
272 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
273 // also creates the initial PHI MachineInstrs, though none of the input
274 // operands are populated.
275 for (BB = Fn.begin(), EB = Fn.end(); BB != EB; ++BB) {
276 MachineBasicBlock *MBB = new MachineBasicBlock(BB);
278 MF.getBasicBlockList().push_back(MBB);
280 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
283 for (BasicBlock::iterator I = BB->begin();(PN = dyn_cast<PHINode>(I)); ++I){
284 if (PN->use_empty()) continue;
286 MVT::ValueType VT = TLI.getValueType(PN->getType());
287 unsigned NumElements;
288 if (VT != MVT::Vector)
289 NumElements = TLI.getNumElements(VT);
291 MVT::ValueType VT1,VT2;
293 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()),
296 unsigned PHIReg = ValueMap[PN];
297 assert(PHIReg && "PHI node does not have an assigned virtual register!");
298 for (unsigned i = 0; i != NumElements; ++i)
299 BuildMI(MBB, TargetInstrInfo::PHI, PN->getNumOperands(), PHIReg+i);
304 /// CreateRegForValue - Allocate the appropriate number of virtual registers of
305 /// the correctly promoted or expanded types. Assign these registers
306 /// consecutive vreg numbers and return the first assigned number.
307 unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
308 MVT::ValueType VT = TLI.getValueType(V->getType());
310 // The number of multiples of registers that we need, to, e.g., split up
311 // a <2 x int64> -> 4 x i32 registers.
312 unsigned NumVectorRegs = 1;
314 // If this is a packed type, figure out what type it will decompose into
315 // and how many of the elements it will use.
316 if (VT == MVT::Vector) {
317 const PackedType *PTy = cast<PackedType>(V->getType());
318 unsigned NumElts = PTy->getNumElements();
319 MVT::ValueType EltTy = TLI.getValueType(PTy->getElementType());
321 // Divide the input until we get to a supported size. This will always
322 // end with a scalar if the target doesn't support vectors.
323 while (NumElts > 1 && !TLI.isTypeLegal(getVectorType(EltTy, NumElts))) {
330 VT = getVectorType(EltTy, NumElts);
333 // The common case is that we will only create one register for this
334 // value. If we have that case, create and return the virtual register.
335 unsigned NV = TLI.getNumElements(VT);
337 // If we are promoting this value, pick the next largest supported type.
338 MVT::ValueType PromotedType = TLI.getTypeToTransformTo(VT);
339 unsigned Reg = MakeReg(PromotedType);
340 // If this is a vector of supported or promoted types (e.g. 4 x i16),
341 // create all of the registers.
342 for (unsigned i = 1; i != NumVectorRegs; ++i)
343 MakeReg(PromotedType);
347 // If this value is represented with multiple target registers, make sure
348 // to create enough consecutive registers of the right (smaller) type.
349 unsigned NT = VT-1; // Find the type to use.
350 while (TLI.getNumElements((MVT::ValueType)NT) != 1)
353 unsigned R = MakeReg((MVT::ValueType)NT);
354 for (unsigned i = 1; i != NV*NumVectorRegs; ++i)
355 MakeReg((MVT::ValueType)NT);
359 //===----------------------------------------------------------------------===//
360 /// SelectionDAGLowering - This is the common target-independent lowering
361 /// implementation that is parameterized by a TargetLowering object.
362 /// Also, targets can overload any lowering method.
365 class SelectionDAGLowering {
366 MachineBasicBlock *CurMBB;
368 std::map<const Value*, SDOperand> NodeMap;
370 /// PendingLoads - Loads are not emitted to the program immediately. We bunch
371 /// them up and then emit token factor nodes when possible. This allows us to
372 /// get simple disambiguation between loads without worrying about alias
374 std::vector<SDOperand> PendingLoads;
376 /// Case - A pair of values to record the Value for a switch case, and the
377 /// case's target basic block.
378 typedef std::pair<Constant*, MachineBasicBlock*> Case;
379 typedef std::vector<Case>::iterator CaseItr;
380 typedef std::pair<CaseItr, CaseItr> CaseRange;
382 /// CaseRec - A struct with ctor used in lowering switches to a binary tree
383 /// of conditional branches.
385 CaseRec(MachineBasicBlock *bb, Constant *lt, Constant *ge, CaseRange r) :
386 CaseBB(bb), LT(lt), GE(ge), Range(r) {}
388 /// CaseBB - The MBB in which to emit the compare and branch
389 MachineBasicBlock *CaseBB;
390 /// LT, GE - If nonzero, we know the current case value must be less-than or
391 /// greater-than-or-equal-to these Constants.
394 /// Range - A pair of iterators representing the range of case values to be
395 /// processed at this point in the binary search tree.
399 /// The comparison function for sorting Case values.
401 bool operator () (const Case& C1, const Case& C2) {
402 if (const ConstantInt* I1 = dyn_cast<const ConstantInt>(C1.first))
403 if (I1->getType()->isUnsigned())
404 return I1->getZExtValue() <
405 cast<const ConstantInt>(C2.first)->getZExtValue();
407 return cast<const ConstantInt>(C1.first)->getSExtValue() <
408 cast<const ConstantInt>(C2.first)->getSExtValue();
413 // TLI - This is information that describes the available target features we
414 // need for lowering. This indicates when operations are unavailable,
415 // implemented with a libcall, etc.
418 const TargetData *TD;
420 /// SwitchCases - Vector of CaseBlock structures used to communicate
421 /// SwitchInst code generation information.
422 std::vector<SelectionDAGISel::CaseBlock> SwitchCases;
423 SelectionDAGISel::JumpTable JT;
425 /// FuncInfo - Information about the function as a whole.
427 FunctionLoweringInfo &FuncInfo;
429 SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli,
430 FunctionLoweringInfo &funcinfo)
431 : TLI(tli), DAG(dag), TD(DAG.getTarget().getTargetData()),
432 JT(0,0,0,0), FuncInfo(funcinfo) {
435 /// getRoot - Return the current virtual root of the Selection DAG.
437 SDOperand getRoot() {
438 if (PendingLoads.empty())
439 return DAG.getRoot();
441 if (PendingLoads.size() == 1) {
442 SDOperand Root = PendingLoads[0];
444 PendingLoads.clear();
448 // Otherwise, we have to make a token factor node.
449 SDOperand Root = DAG.getNode(ISD::TokenFactor, MVT::Other,
450 &PendingLoads[0], PendingLoads.size());
451 PendingLoads.clear();
456 SDOperand CopyValueToVirtualRegister(Value *V, unsigned Reg);
458 void visit(Instruction &I) { visit(I.getOpcode(), I); }
460 void visit(unsigned Opcode, User &I) {
462 default: assert(0 && "Unknown instruction type encountered!");
464 // Build the switch statement using the Instruction.def file.
465 #define HANDLE_INST(NUM, OPCODE, CLASS) \
466 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
467 #include "llvm/Instruction.def"
471 void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; }
473 SDOperand getLoadFrom(const Type *Ty, SDOperand Ptr,
474 const Value *SV, SDOperand Root,
477 SDOperand getIntPtrConstant(uint64_t Val) {
478 return DAG.getConstant(Val, TLI.getPointerTy());
481 SDOperand getValue(const Value *V);
483 const SDOperand &setValue(const Value *V, SDOperand NewN) {
484 SDOperand &N = NodeMap[V];
485 assert(N.Val == 0 && "Already set a value for this node!");
489 RegsForValue GetRegistersForValue(const std::string &ConstrCode,
491 bool OutReg, bool InReg,
492 std::set<unsigned> &OutputRegs,
493 std::set<unsigned> &InputRegs);
495 void FindMergedConditions(Value *Cond, MachineBasicBlock *TBB,
496 MachineBasicBlock *FBB, MachineBasicBlock *CurBB,
498 bool isExportableFromCurrentBlock(Value *V, const BasicBlock *FromBB);
499 void ExportFromCurrentBlock(Value *V);
501 // Terminator instructions.
502 void visitRet(ReturnInst &I);
503 void visitBr(BranchInst &I);
504 void visitSwitch(SwitchInst &I);
505 void visitUnreachable(UnreachableInst &I) { /* noop */ }
507 // Helper for visitSwitch
508 void visitSwitchCase(SelectionDAGISel::CaseBlock &CB);
509 void visitJumpTable(SelectionDAGISel::JumpTable &JT);
511 // These all get lowered before this pass.
512 void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); }
513 void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); }
515 void visitIntBinary(User &I, unsigned IntOp, unsigned VecOp);
516 void visitFPBinary(User &I, unsigned FPOp, unsigned VecOp);
517 void visitShift(User &I, unsigned Opcode);
518 void visitAdd(User &I) {
519 if (I.getType()->isFloatingPoint())
520 visitFPBinary(I, ISD::FADD, ISD::VADD);
522 visitIntBinary(I, ISD::ADD, ISD::VADD);
524 void visitSub(User &I);
525 void visitMul(User &I) {
526 if (I.getType()->isFloatingPoint())
527 visitFPBinary(I, ISD::FMUL, ISD::VMUL);
529 visitIntBinary(I, ISD::MUL, ISD::VMUL);
531 void visitURem(User &I) { visitIntBinary(I, ISD::UREM, 0); }
532 void visitSRem(User &I) { visitIntBinary(I, ISD::SREM, 0); }
533 void visitFRem(User &I) { visitFPBinary (I, ISD::FREM, 0); }
534 void visitUDiv(User &I) { visitIntBinary(I, ISD::UDIV, ISD::VUDIV); }
535 void visitSDiv(User &I) { visitIntBinary(I, ISD::SDIV, ISD::VSDIV); }
536 void visitFDiv(User &I) { visitFPBinary (I, ISD::FDIV, ISD::VSDIV); }
537 void visitAnd(User &I) { visitIntBinary(I, ISD::AND, ISD::VAND); }
538 void visitOr (User &I) { visitIntBinary(I, ISD::OR, ISD::VOR); }
539 void visitXor(User &I) { visitIntBinary(I, ISD::XOR, ISD::VXOR); }
540 void visitShl(User &I) { visitShift(I, ISD::SHL); }
541 void visitLShr(User &I) { visitShift(I, ISD::SRL); }
542 void visitAShr(User &I) { visitShift(I, ISD::SRA); }
543 void visitSetCC(User &I, ISD::CondCode SignedOpc, ISD::CondCode UnsignedOpc,
544 ISD::CondCode FPOpc);
545 void visitSetEQ(User &I) { visitSetCC(I, ISD::SETEQ, ISD::SETEQ,
547 void visitSetNE(User &I) { visitSetCC(I, ISD::SETNE, ISD::SETNE,
549 void visitSetLE(User &I) { visitSetCC(I, ISD::SETLE, ISD::SETULE,
551 void visitSetGE(User &I) { visitSetCC(I, ISD::SETGE, ISD::SETUGE,
553 void visitSetLT(User &I) { visitSetCC(I, ISD::SETLT, ISD::SETULT,
555 void visitSetGT(User &I) { visitSetCC(I, ISD::SETGT, ISD::SETUGT,
558 void visitExtractElement(User &I);
559 void visitInsertElement(User &I);
560 void visitShuffleVector(User &I);
562 void visitGetElementPtr(User &I);
563 void visitCast(User &I);
564 void visitSelect(User &I);
566 void visitMalloc(MallocInst &I);
567 void visitFree(FreeInst &I);
568 void visitAlloca(AllocaInst &I);
569 void visitLoad(LoadInst &I);
570 void visitStore(StoreInst &I);
571 void visitPHI(PHINode &I) { } // PHI nodes are handled specially.
572 void visitCall(CallInst &I);
573 void visitInlineAsm(CallInst &I);
574 const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic);
575 void visitTargetIntrinsic(CallInst &I, unsigned Intrinsic);
577 void visitVAStart(CallInst &I);
578 void visitVAArg(VAArgInst &I);
579 void visitVAEnd(CallInst &I);
580 void visitVACopy(CallInst &I);
581 void visitFrameReturnAddress(CallInst &I, bool isFrameAddress);
583 void visitMemIntrinsic(CallInst &I, unsigned Op);
585 void visitUserOp1(Instruction &I) {
586 assert(0 && "UserOp1 should not exist at instruction selection time!");
589 void visitUserOp2(Instruction &I) {
590 assert(0 && "UserOp2 should not exist at instruction selection time!");
594 } // end namespace llvm
596 SDOperand SelectionDAGLowering::getValue(const Value *V) {
597 SDOperand &N = NodeMap[V];
600 const Type *VTy = V->getType();
601 MVT::ValueType VT = TLI.getValueType(VTy);
602 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
603 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
604 visit(CE->getOpcode(), *CE);
605 assert(N.Val && "visit didn't populate the ValueMap!");
607 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
608 return N = DAG.getGlobalAddress(GV, VT);
609 } else if (isa<ConstantPointerNull>(C)) {
610 return N = DAG.getConstant(0, TLI.getPointerTy());
611 } else if (isa<UndefValue>(C)) {
612 if (!isa<PackedType>(VTy))
613 return N = DAG.getNode(ISD::UNDEF, VT);
615 // Create a VBUILD_VECTOR of undef nodes.
616 const PackedType *PTy = cast<PackedType>(VTy);
617 unsigned NumElements = PTy->getNumElements();
618 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
620 SmallVector<SDOperand, 8> Ops;
621 Ops.assign(NumElements, DAG.getNode(ISD::UNDEF, PVT));
623 // Create a VConstant node with generic Vector type.
624 Ops.push_back(DAG.getConstant(NumElements, MVT::i32));
625 Ops.push_back(DAG.getValueType(PVT));
626 return N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector,
627 &Ops[0], Ops.size());
628 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
629 return N = DAG.getConstantFP(CFP->getValue(), VT);
630 } else if (const PackedType *PTy = dyn_cast<PackedType>(VTy)) {
631 unsigned NumElements = PTy->getNumElements();
632 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
634 // Now that we know the number and type of the elements, push a
635 // Constant or ConstantFP node onto the ops list for each element of
636 // the packed constant.
637 SmallVector<SDOperand, 8> Ops;
638 if (ConstantPacked *CP = dyn_cast<ConstantPacked>(C)) {
639 for (unsigned i = 0; i != NumElements; ++i)
640 Ops.push_back(getValue(CP->getOperand(i)));
642 assert(isa<ConstantAggregateZero>(C) && "Unknown packed constant!");
644 if (MVT::isFloatingPoint(PVT))
645 Op = DAG.getConstantFP(0, PVT);
647 Op = DAG.getConstant(0, PVT);
648 Ops.assign(NumElements, Op);
651 // Create a VBUILD_VECTOR node with generic Vector type.
652 Ops.push_back(DAG.getConstant(NumElements, MVT::i32));
653 Ops.push_back(DAG.getValueType(PVT));
654 return N = DAG.getNode(ISD::VBUILD_VECTOR,MVT::Vector,&Ops[0],Ops.size());
656 // Canonicalize all constant ints to be unsigned.
657 return N = DAG.getConstant(cast<ConstantIntegral>(C)->getZExtValue(),VT);
661 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
662 std::map<const AllocaInst*, int>::iterator SI =
663 FuncInfo.StaticAllocaMap.find(AI);
664 if (SI != FuncInfo.StaticAllocaMap.end())
665 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
668 std::map<const Value*, unsigned>::const_iterator VMI =
669 FuncInfo.ValueMap.find(V);
670 assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!");
672 unsigned InReg = VMI->second;
674 // If this type is not legal, make it so now.
675 if (VT != MVT::Vector) {
676 MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT);
678 N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT);
680 // Source must be expanded. This input value is actually coming from the
681 // register pair VMI->second and VMI->second+1.
682 N = DAG.getNode(ISD::BUILD_PAIR, VT, N,
683 DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT));
684 } else if (DestVT > VT) { // Promotion case
685 if (MVT::isFloatingPoint(VT))
686 N = DAG.getNode(ISD::FP_ROUND, VT, N);
688 N = DAG.getNode(ISD::TRUNCATE, VT, N);
691 // Otherwise, if this is a vector, make it available as a generic vector
693 MVT::ValueType PTyElementVT, PTyLegalElementVT;
694 const PackedType *PTy = cast<PackedType>(VTy);
695 unsigned NE = TLI.getPackedTypeBreakdown(PTy, PTyElementVT,
698 // Build a VBUILD_VECTOR with the input registers.
699 SmallVector<SDOperand, 8> Ops;
700 if (PTyElementVT == PTyLegalElementVT) {
701 // If the value types are legal, just VBUILD the CopyFromReg nodes.
702 for (unsigned i = 0; i != NE; ++i)
703 Ops.push_back(DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
705 } else if (PTyElementVT < PTyLegalElementVT) {
706 // If the register was promoted, use TRUNCATE of FP_ROUND as appropriate.
707 for (unsigned i = 0; i != NE; ++i) {
708 SDOperand Op = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
710 if (MVT::isFloatingPoint(PTyElementVT))
711 Op = DAG.getNode(ISD::FP_ROUND, PTyElementVT, Op);
713 Op = DAG.getNode(ISD::TRUNCATE, PTyElementVT, Op);
717 // If the register was expanded, use BUILD_PAIR.
718 assert((NE & 1) == 0 && "Must expand into a multiple of 2 elements!");
719 for (unsigned i = 0; i != NE/2; ++i) {
720 SDOperand Op0 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
722 SDOperand Op1 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
724 Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Op0, Op1));
728 Ops.push_back(DAG.getConstant(NE, MVT::i32));
729 Ops.push_back(DAG.getValueType(PTyLegalElementVT));
730 N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, &Ops[0], Ops.size());
732 // Finally, use a VBIT_CONVERT to make this available as the appropriate
734 N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N,
735 DAG.getConstant(PTy->getNumElements(),
737 DAG.getValueType(TLI.getValueType(PTy->getElementType())));
744 void SelectionDAGLowering::visitRet(ReturnInst &I) {
745 if (I.getNumOperands() == 0) {
746 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getRoot()));
749 SmallVector<SDOperand, 8> NewValues;
750 NewValues.push_back(getRoot());
751 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
752 SDOperand RetOp = getValue(I.getOperand(i));
753 bool isSigned = I.getOperand(i)->getType()->isSigned();
755 // If this is an integer return value, we need to promote it ourselves to
756 // the full width of a register, since LegalizeOp will use ANY_EXTEND rather
758 // FIXME: C calling convention requires the return type to be promoted to
759 // at least 32-bit. But this is not necessary for non-C calling conventions.
760 if (MVT::isInteger(RetOp.getValueType()) &&
761 RetOp.getValueType() < MVT::i64) {
762 MVT::ValueType TmpVT;
763 if (TLI.getTypeAction(MVT::i32) == TargetLowering::Promote)
764 TmpVT = TLI.getTypeToTransformTo(MVT::i32);
769 RetOp = DAG.getNode(ISD::SIGN_EXTEND, TmpVT, RetOp);
771 RetOp = DAG.getNode(ISD::ZERO_EXTEND, TmpVT, RetOp);
773 NewValues.push_back(RetOp);
774 NewValues.push_back(DAG.getConstant(isSigned, MVT::i32));
776 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other,
777 &NewValues[0], NewValues.size()));
780 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
781 /// the current basic block, add it to ValueMap now so that we'll get a
783 void SelectionDAGLowering::ExportFromCurrentBlock(Value *V) {
784 // No need to export constants.
785 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
788 if (FuncInfo.isExportedInst(V)) return;
790 unsigned Reg = FuncInfo.InitializeRegForValue(V);
791 PendingLoads.push_back(CopyValueToVirtualRegister(V, Reg));
794 bool SelectionDAGLowering::isExportableFromCurrentBlock(Value *V,
795 const BasicBlock *FromBB) {
796 // The operands of the setcc have to be in this block. We don't know
797 // how to export them from some other block.
798 if (Instruction *VI = dyn_cast<Instruction>(V)) {
799 // Can export from current BB.
800 if (VI->getParent() == FromBB)
803 // Is already exported, noop.
804 return FuncInfo.isExportedInst(V);
807 // If this is an argument, we can export it if the BB is the entry block or
808 // if it is already exported.
809 if (isa<Argument>(V)) {
810 if (FromBB == &FromBB->getParent()->getEntryBlock())
813 // Otherwise, can only export this if it is already exported.
814 return FuncInfo.isExportedInst(V);
817 // Otherwise, constants can always be exported.
821 static bool InBlock(const Value *V, const BasicBlock *BB) {
822 if (const Instruction *I = dyn_cast<Instruction>(V))
823 return I->getParent() == BB;
827 /// FindMergedConditions - If Cond is an expression like
828 void SelectionDAGLowering::FindMergedConditions(Value *Cond,
829 MachineBasicBlock *TBB,
830 MachineBasicBlock *FBB,
831 MachineBasicBlock *CurBB,
833 // If this node is not part of the or/and tree, emit it as a branch.
834 BinaryOperator *BOp = dyn_cast<BinaryOperator>(Cond);
836 if (!BOp || (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
837 BOp->getParent() != CurBB->getBasicBlock() ||
838 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
839 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
840 const BasicBlock *BB = CurBB->getBasicBlock();
842 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Cond))
843 if ((II->getIntrinsicID() == Intrinsic::isunordered_f32 ||
844 II->getIntrinsicID() == Intrinsic::isunordered_f64) &&
845 // The operands of the setcc have to be in this block. We don't know
846 // how to export them from some other block. If this is the first
847 // block of the sequence, no exporting is needed.
849 (isExportableFromCurrentBlock(II->getOperand(1), BB) &&
850 isExportableFromCurrentBlock(II->getOperand(2), BB)))) {
851 SelectionDAGISel::CaseBlock CB(ISD::SETUO, II->getOperand(1),
852 II->getOperand(2), TBB, FBB, CurBB);
853 SwitchCases.push_back(CB);
858 // If the leaf of the tree is a setcond inst, merge the condition into the
860 if (BOp && isa<SetCondInst>(BOp) &&
861 // The operands of the setcc have to be in this block. We don't know
862 // how to export them from some other block. If this is the first block
863 // of the sequence, no exporting is needed.
865 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
866 isExportableFromCurrentBlock(BOp->getOperand(1), BB)))) {
867 ISD::CondCode SignCond, UnsCond, FPCond, Condition;
868 switch (BOp->getOpcode()) {
869 default: assert(0 && "Unknown setcc opcode!");
870 case Instruction::SetEQ:
871 SignCond = ISD::SETEQ;
872 UnsCond = ISD::SETEQ;
873 FPCond = ISD::SETOEQ;
875 case Instruction::SetNE:
876 SignCond = ISD::SETNE;
877 UnsCond = ISD::SETNE;
878 FPCond = ISD::SETUNE;
880 case Instruction::SetLE:
881 SignCond = ISD::SETLE;
882 UnsCond = ISD::SETULE;
883 FPCond = ISD::SETOLE;
885 case Instruction::SetGE:
886 SignCond = ISD::SETGE;
887 UnsCond = ISD::SETUGE;
888 FPCond = ISD::SETOGE;
890 case Instruction::SetLT:
891 SignCond = ISD::SETLT;
892 UnsCond = ISD::SETULT;
893 FPCond = ISD::SETOLT;
895 case Instruction::SetGT:
896 SignCond = ISD::SETGT;
897 UnsCond = ISD::SETUGT;
898 FPCond = ISD::SETOGT;
902 const Type *OpType = BOp->getOperand(0)->getType();
903 if (const PackedType *PTy = dyn_cast<PackedType>(OpType))
904 OpType = PTy->getElementType();
906 if (!FiniteOnlyFPMath() && OpType->isFloatingPoint())
908 else if (OpType->isUnsigned())
911 Condition = SignCond;
913 SelectionDAGISel::CaseBlock CB(Condition, BOp->getOperand(0),
914 BOp->getOperand(1), TBB, FBB, CurBB);
915 SwitchCases.push_back(CB);
919 // Create a CaseBlock record representing this branch.
920 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, Cond, ConstantBool::getTrue(),
922 SwitchCases.push_back(CB);
927 // Create TmpBB after CurBB.
928 MachineFunction::iterator BBI = CurBB;
929 MachineBasicBlock *TmpBB = new MachineBasicBlock(CurBB->getBasicBlock());
930 CurBB->getParent()->getBasicBlockList().insert(++BBI, TmpBB);
932 if (Opc == Instruction::Or) {
941 // Emit the LHS condition.
942 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, Opc);
944 // Emit the RHS condition into TmpBB.
945 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
947 assert(Opc == Instruction::And && "Unknown merge op!");
955 // This requires creation of TmpBB after CurBB.
957 // Emit the LHS condition.
958 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, Opc);
960 // Emit the RHS condition into TmpBB.
961 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
965 /// If the set of cases should be emitted as a series of branches, return true.
966 /// If we should emit this as a bunch of and/or'd together conditions, return
969 ShouldEmitAsBranches(const std::vector<SelectionDAGISel::CaseBlock> &Cases) {
970 if (Cases.size() != 2) return true;
972 // If this is two comparisons of the same values or'd or and'd together, they
973 // will get folded into a single comparison, so don't emit two blocks.
974 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
975 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
976 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
977 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
984 void SelectionDAGLowering::visitBr(BranchInst &I) {
985 // Update machine-CFG edges.
986 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
988 // Figure out which block is immediately after the current one.
989 MachineBasicBlock *NextBlock = 0;
990 MachineFunction::iterator BBI = CurMBB;
991 if (++BBI != CurMBB->getParent()->end())
994 if (I.isUnconditional()) {
995 // If this is not a fall-through branch, emit the branch.
996 if (Succ0MBB != NextBlock)
997 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(),
998 DAG.getBasicBlock(Succ0MBB)));
1000 // Update machine-CFG edges.
1001 CurMBB->addSuccessor(Succ0MBB);
1006 // If this condition is one of the special cases we handle, do special stuff
1008 Value *CondVal = I.getCondition();
1009 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
1011 // If this is a series of conditions that are or'd or and'd together, emit
1012 // this as a sequence of branches instead of setcc's with and/or operations.
1013 // For example, instead of something like:
1026 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
1027 if (BOp->hasOneUse() &&
1028 (BOp->getOpcode() == Instruction::And ||
1029 BOp->getOpcode() == Instruction::Or)) {
1030 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, CurMBB, BOp->getOpcode());
1031 // If the compares in later blocks need to use values not currently
1032 // exported from this block, export them now. This block should always
1033 // be the first entry.
1034 assert(SwitchCases[0].ThisBB == CurMBB && "Unexpected lowering!");
1036 // Allow some cases to be rejected.
1037 if (ShouldEmitAsBranches(SwitchCases)) {
1038 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
1039 ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
1040 ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
1043 // Emit the branch for this block.
1044 visitSwitchCase(SwitchCases[0]);
1045 SwitchCases.erase(SwitchCases.begin());
1049 // Okay, we decided not to do this, remove any inserted MBB's and clear
1051 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
1052 CurMBB->getParent()->getBasicBlockList().erase(SwitchCases[i].ThisBB);
1054 SwitchCases.clear();
1058 // Create a CaseBlock record representing this branch.
1059 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, CondVal, ConstantBool::getTrue(),
1060 Succ0MBB, Succ1MBB, CurMBB);
1061 // Use visitSwitchCase to actually insert the fast branch sequence for this
1063 visitSwitchCase(CB);
1066 /// visitSwitchCase - Emits the necessary code to represent a single node in
1067 /// the binary search tree resulting from lowering a switch instruction.
1068 void SelectionDAGLowering::visitSwitchCase(SelectionDAGISel::CaseBlock &CB) {
1070 SDOperand CondLHS = getValue(CB.CmpLHS);
1072 // Build the setcc now, fold "(X == true)" to X and "(X == false)" to !X to
1073 // handle common cases produced by branch lowering.
1074 if (CB.CmpRHS == ConstantBool::getTrue() && CB.CC == ISD::SETEQ)
1076 else if (CB.CmpRHS == ConstantBool::getFalse() && CB.CC == ISD::SETEQ) {
1077 SDOperand True = DAG.getConstant(1, CondLHS.getValueType());
1078 Cond = DAG.getNode(ISD::XOR, CondLHS.getValueType(), CondLHS, True);
1080 Cond = DAG.getSetCC(MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
1082 // Set NextBlock to be the MBB immediately after the current one, if any.
1083 // This is used to avoid emitting unnecessary branches to the next block.
1084 MachineBasicBlock *NextBlock = 0;
1085 MachineFunction::iterator BBI = CurMBB;
1086 if (++BBI != CurMBB->getParent()->end())
1089 // If the lhs block is the next block, invert the condition so that we can
1090 // fall through to the lhs instead of the rhs block.
1091 if (CB.TrueBB == NextBlock) {
1092 std::swap(CB.TrueBB, CB.FalseBB);
1093 SDOperand True = DAG.getConstant(1, Cond.getValueType());
1094 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
1096 SDOperand BrCond = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond,
1097 DAG.getBasicBlock(CB.TrueBB));
1098 if (CB.FalseBB == NextBlock)
1099 DAG.setRoot(BrCond);
1101 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, BrCond,
1102 DAG.getBasicBlock(CB.FalseBB)));
1103 // Update successor info
1104 CurMBB->addSuccessor(CB.TrueBB);
1105 CurMBB->addSuccessor(CB.FalseBB);
1108 void SelectionDAGLowering::visitJumpTable(SelectionDAGISel::JumpTable &JT) {
1109 // Emit the code for the jump table
1110 MVT::ValueType PTy = TLI.getPointerTy();
1111 SDOperand Index = DAG.getCopyFromReg(getRoot(), JT.Reg, PTy);
1112 SDOperand Table = DAG.getJumpTable(JT.JTI, PTy);
1113 DAG.setRoot(DAG.getNode(ISD::BR_JT, MVT::Other, Index.getValue(1),
1118 void SelectionDAGLowering::visitSwitch(SwitchInst &I) {
1119 // Figure out which block is immediately after the current one.
1120 MachineBasicBlock *NextBlock = 0;
1121 MachineFunction::iterator BBI = CurMBB;
1123 if (++BBI != CurMBB->getParent()->end())
1126 MachineBasicBlock *Default = FuncInfo.MBBMap[I.getDefaultDest()];
1128 // If there is only the default destination, branch to it if it is not the
1129 // next basic block. Otherwise, just fall through.
1130 if (I.getNumOperands() == 2) {
1131 // Update machine-CFG edges.
1133 // If this is not a fall-through branch, emit the branch.
1134 if (Default != NextBlock)
1135 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(),
1136 DAG.getBasicBlock(Default)));
1138 CurMBB->addSuccessor(Default);
1142 // If there are any non-default case statements, create a vector of Cases
1143 // representing each one, and sort the vector so that we can efficiently
1144 // create a binary search tree from them.
1145 std::vector<Case> Cases;
1147 for (unsigned i = 1; i < I.getNumSuccessors(); ++i) {
1148 MachineBasicBlock *SMBB = FuncInfo.MBBMap[I.getSuccessor(i)];
1149 Cases.push_back(Case(I.getSuccessorValue(i), SMBB));
1152 std::sort(Cases.begin(), Cases.end(), CaseCmp());
1154 // Get the Value to be switched on and default basic blocks, which will be
1155 // inserted into CaseBlock records, representing basic blocks in the binary
1157 Value *SV = I.getOperand(0);
1159 // Get the MachineFunction which holds the current MBB. This is used during
1160 // emission of jump tables, and when inserting any additional MBBs necessary
1161 // to represent the switch.
1162 MachineFunction *CurMF = CurMBB->getParent();
1163 const BasicBlock *LLVMBB = CurMBB->getBasicBlock();
1165 // If the switch has few cases (two or less) emit a series of specific
1167 if (Cases.size() < 3) {
1168 // TODO: If any two of the cases has the same destination, and if one value
1169 // is the same as the other, but has one bit unset that the other has set,
1170 // use bit manipulation to do two compares at once. For example:
1171 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
1173 // Rearrange the case blocks so that the last one falls through if possible.
1174 if (NextBlock && Default != NextBlock && Cases.back().second != NextBlock) {
1175 // The last case block won't fall through into 'NextBlock' if we emit the
1176 // branches in this order. See if rearranging a case value would help.
1177 for (unsigned i = 0, e = Cases.size()-1; i != e; ++i) {
1178 if (Cases[i].second == NextBlock) {
1179 std::swap(Cases[i], Cases.back());
1185 // Create a CaseBlock record representing a conditional branch to
1186 // the Case's target mbb if the value being switched on SV is equal
1188 MachineBasicBlock *CurBlock = CurMBB;
1189 for (unsigned i = 0, e = Cases.size(); i != e; ++i) {
1190 MachineBasicBlock *FallThrough;
1192 FallThrough = new MachineBasicBlock(CurMBB->getBasicBlock());
1193 CurMF->getBasicBlockList().insert(BBI, FallThrough);
1195 // If the last case doesn't match, go to the default block.
1196 FallThrough = Default;
1199 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, SV, Cases[i].first,
1200 Cases[i].second, FallThrough, CurBlock);
1202 // If emitting the first comparison, just call visitSwitchCase to emit the
1203 // code into the current block. Otherwise, push the CaseBlock onto the
1204 // vector to be later processed by SDISel, and insert the node's MBB
1205 // before the next MBB.
1206 if (CurBlock == CurMBB)
1207 visitSwitchCase(CB);
1209 SwitchCases.push_back(CB);
1211 CurBlock = FallThrough;
1216 // If the switch has more than 5 blocks, and at least 31.25% dense, and the
1217 // target supports indirect branches, then emit a jump table rather than
1218 // lowering the switch to a binary tree of conditional branches.
1219 if ((TLI.isOperationLegal(ISD::BR_JT, MVT::Other) ||
1220 TLI.isOperationLegal(ISD::BRIND, MVT::Other)) &&
1222 uint64_t First =cast<ConstantIntegral>(Cases.front().first)->getZExtValue();
1223 uint64_t Last = cast<ConstantIntegral>(Cases.back().first)->getZExtValue();
1224 double Density = (double)Cases.size() / (double)((Last - First) + 1ULL);
1226 if (Density >= 0.3125) {
1227 // Create a new basic block to hold the code for loading the address
1228 // of the jump table, and jumping to it. Update successor information;
1229 // we will either branch to the default case for the switch, or the jump
1231 MachineBasicBlock *JumpTableBB = new MachineBasicBlock(LLVMBB);
1232 CurMF->getBasicBlockList().insert(BBI, JumpTableBB);
1233 CurMBB->addSuccessor(Default);
1234 CurMBB->addSuccessor(JumpTableBB);
1236 // Subtract the lowest switch case value from the value being switched on
1237 // and conditional branch to default mbb if the result is greater than the
1238 // difference between smallest and largest cases.
1239 SDOperand SwitchOp = getValue(SV);
1240 MVT::ValueType VT = SwitchOp.getValueType();
1241 SDOperand SUB = DAG.getNode(ISD::SUB, VT, SwitchOp,
1242 DAG.getConstant(First, VT));
1244 // The SDNode we just created, which holds the value being switched on
1245 // minus the the smallest case value, needs to be copied to a virtual
1246 // register so it can be used as an index into the jump table in a
1247 // subsequent basic block. This value may be smaller or larger than the
1248 // target's pointer type, and therefore require extension or truncating.
1249 if (VT > TLI.getPointerTy())
1250 SwitchOp = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), SUB);
1252 SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), SUB);
1254 unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
1255 SDOperand CopyTo = DAG.getCopyToReg(getRoot(), JumpTableReg, SwitchOp);
1257 // Emit the range check for the jump table, and branch to the default
1258 // block for the switch statement if the value being switched on exceeds
1259 // the largest case in the switch.
1260 SDOperand CMP = DAG.getSetCC(TLI.getSetCCResultTy(), SUB,
1261 DAG.getConstant(Last-First,VT), ISD::SETUGT);
1262 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, CopyTo, CMP,
1263 DAG.getBasicBlock(Default)));
1265 // Build a vector of destination BBs, corresponding to each target
1266 // of the jump table. If the value of the jump table slot corresponds to
1267 // a case statement, push the case's BB onto the vector, otherwise, push
1269 std::vector<MachineBasicBlock*> DestBBs;
1270 uint64_t TEI = First;
1271 for (CaseItr ii = Cases.begin(), ee = Cases.end(); ii != ee; ++TEI)
1272 if (cast<ConstantIntegral>(ii->first)->getZExtValue() == TEI) {
1273 DestBBs.push_back(ii->second);
1276 DestBBs.push_back(Default);
1279 // Update successor info. Add one edge to each unique successor.
1280 // Vector bool would be better, but vector<bool> is really slow.
1281 std::vector<unsigned char> SuccsHandled;
1282 SuccsHandled.resize(CurMBB->getParent()->getNumBlockIDs());
1284 for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
1285 E = DestBBs.end(); I != E; ++I) {
1286 if (!SuccsHandled[(*I)->getNumber()]) {
1287 SuccsHandled[(*I)->getNumber()] = true;
1288 JumpTableBB->addSuccessor(*I);
1292 // Create a jump table index for this jump table, or return an existing
1294 unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs);
1296 // Set the jump table information so that we can codegen it as a second
1297 // MachineBasicBlock
1298 JT.Reg = JumpTableReg;
1300 JT.MBB = JumpTableBB;
1301 JT.Default = Default;
1306 // Push the initial CaseRec onto the worklist
1307 std::vector<CaseRec> CaseVec;
1308 CaseVec.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
1310 while (!CaseVec.empty()) {
1311 // Grab a record representing a case range to process off the worklist
1312 CaseRec CR = CaseVec.back();
1315 // Size is the number of Cases represented by this range. If Size is 1,
1316 // then we are processing a leaf of the binary search tree. Otherwise,
1317 // we need to pick a pivot, and push left and right ranges onto the
1319 unsigned Size = CR.Range.second - CR.Range.first;
1322 // Create a CaseBlock record representing a conditional branch to
1323 // the Case's target mbb if the value being switched on SV is equal
1324 // to C. Otherwise, branch to default.
1325 Constant *C = CR.Range.first->first;
1326 MachineBasicBlock *Target = CR.Range.first->second;
1327 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, SV, C, Target, Default,
1330 // If the MBB representing the leaf node is the current MBB, then just
1331 // call visitSwitchCase to emit the code into the current block.
1332 // Otherwise, push the CaseBlock onto the vector to be later processed
1333 // by SDISel, and insert the node's MBB before the next MBB.
1334 if (CR.CaseBB == CurMBB)
1335 visitSwitchCase(CB);
1337 SwitchCases.push_back(CB);
1339 // split case range at pivot
1340 CaseItr Pivot = CR.Range.first + (Size / 2);
1341 CaseRange LHSR(CR.Range.first, Pivot);
1342 CaseRange RHSR(Pivot, CR.Range.second);
1343 Constant *C = Pivot->first;
1344 MachineBasicBlock *FalseBB = 0, *TrueBB = 0;
1346 // We know that we branch to the LHS if the Value being switched on is
1347 // less than the Pivot value, C. We use this to optimize our binary
1348 // tree a bit, by recognizing that if SV is greater than or equal to the
1349 // LHS's Case Value, and that Case Value is exactly one less than the
1350 // Pivot's Value, then we can branch directly to the LHS's Target,
1351 // rather than creating a leaf node for it.
1352 if ((LHSR.second - LHSR.first) == 1 &&
1353 LHSR.first->first == CR.GE &&
1354 cast<ConstantIntegral>(C)->getZExtValue() ==
1355 (cast<ConstantIntegral>(CR.GE)->getZExtValue() + 1ULL)) {
1356 TrueBB = LHSR.first->second;
1358 TrueBB = new MachineBasicBlock(LLVMBB);
1359 CurMF->getBasicBlockList().insert(BBI, TrueBB);
1360 CaseVec.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
1363 // Similar to the optimization above, if the Value being switched on is
1364 // known to be less than the Constant CR.LT, and the current Case Value
1365 // is CR.LT - 1, then we can branch directly to the target block for
1366 // the current Case Value, rather than emitting a RHS leaf node for it.
1367 if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
1368 cast<ConstantIntegral>(RHSR.first->first)->getZExtValue() ==
1369 (cast<ConstantIntegral>(CR.LT)->getZExtValue() - 1ULL)) {
1370 FalseBB = RHSR.first->second;
1372 FalseBB = new MachineBasicBlock(LLVMBB);
1373 CurMF->getBasicBlockList().insert(BBI, FalseBB);
1374 CaseVec.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
1377 // Create a CaseBlock record representing a conditional branch to
1378 // the LHS node if the value being switched on SV is less than C.
1379 // Otherwise, branch to LHS.
1380 ISD::CondCode CC = C->getType()->isSigned() ? ISD::SETLT : ISD::SETULT;
1381 SelectionDAGISel::CaseBlock CB(CC, SV, C, TrueBB, FalseBB, CR.CaseBB);
1383 if (CR.CaseBB == CurMBB)
1384 visitSwitchCase(CB);
1386 SwitchCases.push_back(CB);
1391 void SelectionDAGLowering::visitSub(User &I) {
1392 // -0.0 - X --> fneg
1393 if (I.getType()->isFloatingPoint()) {
1394 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
1395 if (CFP->isExactlyValue(-0.0)) {
1396 SDOperand Op2 = getValue(I.getOperand(1));
1397 setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2));
1400 visitFPBinary(I, ISD::FSUB, ISD::VSUB);
1402 visitIntBinary(I, ISD::SUB, ISD::VSUB);
1406 SelectionDAGLowering::visitIntBinary(User &I, unsigned IntOp, unsigned VecOp) {
1407 const Type *Ty = I.getType();
1408 SDOperand Op1 = getValue(I.getOperand(0));
1409 SDOperand Op2 = getValue(I.getOperand(1));
1411 if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) {
1412 SDOperand Num = DAG.getConstant(PTy->getNumElements(), MVT::i32);
1413 SDOperand Typ = DAG.getValueType(TLI.getValueType(PTy->getElementType()));
1414 setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ));
1416 setValue(&I, DAG.getNode(IntOp, Op1.getValueType(), Op1, Op2));
1421 SelectionDAGLowering::visitFPBinary(User &I, unsigned FPOp, unsigned VecOp) {
1422 const Type *Ty = I.getType();
1423 SDOperand Op1 = getValue(I.getOperand(0));
1424 SDOperand Op2 = getValue(I.getOperand(1));
1426 if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) {
1427 SDOperand Num = DAG.getConstant(PTy->getNumElements(), MVT::i32);
1428 SDOperand Typ = DAG.getValueType(TLI.getValueType(PTy->getElementType()));
1429 setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ));
1431 setValue(&I, DAG.getNode(FPOp, Op1.getValueType(), Op1, Op2));
1435 void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
1436 SDOperand Op1 = getValue(I.getOperand(0));
1437 SDOperand Op2 = getValue(I.getOperand(1));
1439 Op2 = DAG.getNode(ISD::ANY_EXTEND, TLI.getShiftAmountTy(), Op2);
1441 setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2));
1444 void SelectionDAGLowering::visitSetCC(User &I,ISD::CondCode SignedOpcode,
1445 ISD::CondCode UnsignedOpcode,
1446 ISD::CondCode FPOpcode) {
1447 SDOperand Op1 = getValue(I.getOperand(0));
1448 SDOperand Op2 = getValue(I.getOperand(1));
1449 ISD::CondCode Opcode = SignedOpcode;
1450 if (!FiniteOnlyFPMath() && I.getOperand(0)->getType()->isFloatingPoint())
1452 else if (I.getOperand(0)->getType()->isUnsigned())
1453 Opcode = UnsignedOpcode;
1454 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode));
1457 void SelectionDAGLowering::visitSelect(User &I) {
1458 SDOperand Cond = getValue(I.getOperand(0));
1459 SDOperand TrueVal = getValue(I.getOperand(1));
1460 SDOperand FalseVal = getValue(I.getOperand(2));
1461 if (!isa<PackedType>(I.getType())) {
1462 setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond,
1463 TrueVal, FalseVal));
1465 setValue(&I, DAG.getNode(ISD::VSELECT, MVT::Vector, Cond, TrueVal, FalseVal,
1466 *(TrueVal.Val->op_end()-2),
1467 *(TrueVal.Val->op_end()-1)));
1471 void SelectionDAGLowering::visitCast(User &I) {
1472 SDOperand N = getValue(I.getOperand(0));
1473 MVT::ValueType SrcVT = N.getValueType();
1474 MVT::ValueType DestVT = TLI.getValueType(I.getType());
1476 if (DestVT == MVT::Vector) {
1477 // This is a cast to a vector from something else. This is always a bit
1478 // convert. Get information about the input vector.
1479 const PackedType *DestTy = cast<PackedType>(I.getType());
1480 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType());
1481 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N,
1482 DAG.getConstant(DestTy->getNumElements(),MVT::i32),
1483 DAG.getValueType(EltVT)));
1484 } else if (SrcVT == DestVT) {
1485 setValue(&I, N); // noop cast.
1486 } else if (DestVT == MVT::i1) {
1487 // Cast to bool is a comparison against zero, not truncation to zero.
1488 SDOperand Zero = isInteger(SrcVT) ? DAG.getConstant(0, N.getValueType()) :
1489 DAG.getConstantFP(0.0, N.getValueType());
1490 setValue(&I, DAG.getSetCC(MVT::i1, N, Zero, ISD::SETNE));
1491 } else if (isInteger(SrcVT)) {
1492 if (isInteger(DestVT)) { // Int -> Int cast
1493 if (DestVT < SrcVT) // Truncating cast?
1494 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N));
1495 else if (I.getOperand(0)->getType()->isSigned())
1496 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestVT, N));
1498 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N));
1499 } else if (isFloatingPoint(DestVT)) { // Int -> FP cast
1500 if (I.getOperand(0)->getType()->isSigned())
1501 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestVT, N));
1503 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestVT, N));
1505 assert(0 && "Unknown cast!");
1507 } else if (isFloatingPoint(SrcVT)) {
1508 if (isFloatingPoint(DestVT)) { // FP -> FP cast
1509 if (DestVT < SrcVT) // Rounding cast?
1510 setValue(&I, DAG.getNode(ISD::FP_ROUND, DestVT, N));
1512 setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestVT, N));
1513 } else if (isInteger(DestVT)) { // FP -> Int cast.
1514 if (I.getType()->isSigned())
1515 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestVT, N));
1517 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestVT, N));
1519 assert(0 && "Unknown cast!");
1522 assert(SrcVT == MVT::Vector && "Unknown cast!");
1523 assert(DestVT != MVT::Vector && "Casts to vector already handled!");
1524 // This is a cast from a vector to something else. This is always a bit
1525 // convert. Get information about the input vector.
1526 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N));
1530 void SelectionDAGLowering::visitInsertElement(User &I) {
1531 SDOperand InVec = getValue(I.getOperand(0));
1532 SDOperand InVal = getValue(I.getOperand(1));
1533 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(),
1534 getValue(I.getOperand(2)));
1536 SDOperand Num = *(InVec.Val->op_end()-2);
1537 SDOperand Typ = *(InVec.Val->op_end()-1);
1538 setValue(&I, DAG.getNode(ISD::VINSERT_VECTOR_ELT, MVT::Vector,
1539 InVec, InVal, InIdx, Num, Typ));
1542 void SelectionDAGLowering::visitExtractElement(User &I) {
1543 SDOperand InVec = getValue(I.getOperand(0));
1544 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(),
1545 getValue(I.getOperand(1)));
1546 SDOperand Typ = *(InVec.Val->op_end()-1);
1547 setValue(&I, DAG.getNode(ISD::VEXTRACT_VECTOR_ELT,
1548 TLI.getValueType(I.getType()), InVec, InIdx));
1551 void SelectionDAGLowering::visitShuffleVector(User &I) {
1552 SDOperand V1 = getValue(I.getOperand(0));
1553 SDOperand V2 = getValue(I.getOperand(1));
1554 SDOperand Mask = getValue(I.getOperand(2));
1556 SDOperand Num = *(V1.Val->op_end()-2);
1557 SDOperand Typ = *(V2.Val->op_end()-1);
1558 setValue(&I, DAG.getNode(ISD::VVECTOR_SHUFFLE, MVT::Vector,
1559 V1, V2, Mask, Num, Typ));
1563 void SelectionDAGLowering::visitGetElementPtr(User &I) {
1564 SDOperand N = getValue(I.getOperand(0));
1565 const Type *Ty = I.getOperand(0)->getType();
1567 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
1570 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
1571 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
1574 uint64_t Offset = TD->getStructLayout(StTy)->MemberOffsets[Field];
1575 N = DAG.getNode(ISD::ADD, N.getValueType(), N,
1576 getIntPtrConstant(Offset));
1578 Ty = StTy->getElementType(Field);
1580 Ty = cast<SequentialType>(Ty)->getElementType();
1582 // If this is a constant subscript, handle it quickly.
1583 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
1584 if (CI->getZExtValue() == 0) continue;
1586 if (CI->getType()->isSigned())
1588 TD->getTypeSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
1591 TD->getTypeSize(Ty)*cast<ConstantInt>(CI)->getZExtValue();
1592 N = DAG.getNode(ISD::ADD, N.getValueType(), N, getIntPtrConstant(Offs));
1596 // N = N + Idx * ElementSize;
1597 uint64_t ElementSize = TD->getTypeSize(Ty);
1598 SDOperand IdxN = getValue(Idx);
1600 // If the index is smaller or larger than intptr_t, truncate or extend
1602 if (IdxN.getValueType() < N.getValueType()) {
1603 if (Idx->getType()->isSigned())
1604 IdxN = DAG.getNode(ISD::SIGN_EXTEND, N.getValueType(), IdxN);
1606 IdxN = DAG.getNode(ISD::ZERO_EXTEND, N.getValueType(), IdxN);
1607 } else if (IdxN.getValueType() > N.getValueType())
1608 IdxN = DAG.getNode(ISD::TRUNCATE, N.getValueType(), IdxN);
1610 // If this is a multiply by a power of two, turn it into a shl
1611 // immediately. This is a very common case.
1612 if (isPowerOf2_64(ElementSize)) {
1613 unsigned Amt = Log2_64(ElementSize);
1614 IdxN = DAG.getNode(ISD::SHL, N.getValueType(), IdxN,
1615 DAG.getConstant(Amt, TLI.getShiftAmountTy()));
1616 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
1620 SDOperand Scale = getIntPtrConstant(ElementSize);
1621 IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale);
1622 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
1628 void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
1629 // If this is a fixed sized alloca in the entry block of the function,
1630 // allocate it statically on the stack.
1631 if (FuncInfo.StaticAllocaMap.count(&I))
1632 return; // getValue will auto-populate this.
1634 const Type *Ty = I.getAllocatedType();
1635 uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
1636 unsigned Align = std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty),
1639 SDOperand AllocSize = getValue(I.getArraySize());
1640 MVT::ValueType IntPtr = TLI.getPointerTy();
1641 if (IntPtr < AllocSize.getValueType())
1642 AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize);
1643 else if (IntPtr > AllocSize.getValueType())
1644 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, AllocSize);
1646 AllocSize = DAG.getNode(ISD::MUL, IntPtr, AllocSize,
1647 getIntPtrConstant(TySize));
1649 // Handle alignment. If the requested alignment is less than or equal to the
1650 // stack alignment, ignore it and round the size of the allocation up to the
1651 // stack alignment size. If the size is greater than the stack alignment, we
1652 // note this in the DYNAMIC_STACKALLOC node.
1653 unsigned StackAlign =
1654 TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
1655 if (Align <= StackAlign) {
1657 // Add SA-1 to the size.
1658 AllocSize = DAG.getNode(ISD::ADD, AllocSize.getValueType(), AllocSize,
1659 getIntPtrConstant(StackAlign-1));
1660 // Mask out the low bits for alignment purposes.
1661 AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize,
1662 getIntPtrConstant(~(uint64_t)(StackAlign-1)));
1665 SDOperand Ops[] = { getRoot(), AllocSize, getIntPtrConstant(Align) };
1666 const MVT::ValueType *VTs = DAG.getNodeValueTypes(AllocSize.getValueType(),
1668 SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, 2, Ops, 3);
1669 DAG.setRoot(setValue(&I, DSA).getValue(1));
1671 // Inform the Frame Information that we have just allocated a variable-sized
1673 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
1676 void SelectionDAGLowering::visitLoad(LoadInst &I) {
1677 SDOperand Ptr = getValue(I.getOperand(0));
1683 // Do not serialize non-volatile loads against each other.
1684 Root = DAG.getRoot();
1687 setValue(&I, getLoadFrom(I.getType(), Ptr, I.getOperand(0),
1688 Root, I.isVolatile()));
1691 SDOperand SelectionDAGLowering::getLoadFrom(const Type *Ty, SDOperand Ptr,
1692 const Value *SV, SDOperand Root,
1695 if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) {
1696 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
1697 L = DAG.getVecLoad(PTy->getNumElements(), PVT, Root, Ptr,
1698 DAG.getSrcValue(SV));
1700 L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr, SV, isVolatile);
1704 DAG.setRoot(L.getValue(1));
1706 PendingLoads.push_back(L.getValue(1));
1712 void SelectionDAGLowering::visitStore(StoreInst &I) {
1713 Value *SrcV = I.getOperand(0);
1714 SDOperand Src = getValue(SrcV);
1715 SDOperand Ptr = getValue(I.getOperand(1));
1716 DAG.setRoot(DAG.getStore(getRoot(), Src, Ptr, I.getOperand(1),
1720 /// IntrinsicCannotAccessMemory - Return true if the specified intrinsic cannot
1721 /// access memory and has no other side effects at all.
1722 static bool IntrinsicCannotAccessMemory(unsigned IntrinsicID) {
1723 #define GET_NO_MEMORY_INTRINSICS
1724 #include "llvm/Intrinsics.gen"
1725 #undef GET_NO_MEMORY_INTRINSICS
1729 // IntrinsicOnlyReadsMemory - Return true if the specified intrinsic doesn't
1730 // have any side-effects or if it only reads memory.
1731 static bool IntrinsicOnlyReadsMemory(unsigned IntrinsicID) {
1732 #define GET_SIDE_EFFECT_INFO
1733 #include "llvm/Intrinsics.gen"
1734 #undef GET_SIDE_EFFECT_INFO
1738 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
1740 void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I,
1741 unsigned Intrinsic) {
1742 bool HasChain = !IntrinsicCannotAccessMemory(Intrinsic);
1743 bool OnlyLoad = HasChain && IntrinsicOnlyReadsMemory(Intrinsic);
1745 // Build the operand list.
1746 SmallVector<SDOperand, 8> Ops;
1747 if (HasChain) { // If this intrinsic has side-effects, chainify it.
1749 // We don't need to serialize loads against other loads.
1750 Ops.push_back(DAG.getRoot());
1752 Ops.push_back(getRoot());
1756 // Add the intrinsic ID as an integer operand.
1757 Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
1759 // Add all operands of the call to the operand list.
1760 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
1761 SDOperand Op = getValue(I.getOperand(i));
1763 // If this is a vector type, force it to the right packed type.
1764 if (Op.getValueType() == MVT::Vector) {
1765 const PackedType *OpTy = cast<PackedType>(I.getOperand(i)->getType());
1766 MVT::ValueType EltVT = TLI.getValueType(OpTy->getElementType());
1768 MVT::ValueType VVT = MVT::getVectorType(EltVT, OpTy->getNumElements());
1769 assert(VVT != MVT::Other && "Intrinsic uses a non-legal type?");
1770 Op = DAG.getNode(ISD::VBIT_CONVERT, VVT, Op);
1773 assert(TLI.isTypeLegal(Op.getValueType()) &&
1774 "Intrinsic uses a non-legal type?");
1778 std::vector<MVT::ValueType> VTs;
1779 if (I.getType() != Type::VoidTy) {
1780 MVT::ValueType VT = TLI.getValueType(I.getType());
1781 if (VT == MVT::Vector) {
1782 const PackedType *DestTy = cast<PackedType>(I.getType());
1783 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType());
1785 VT = MVT::getVectorType(EltVT, DestTy->getNumElements());
1786 assert(VT != MVT::Other && "Intrinsic uses a non-legal type?");
1789 assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?");
1793 VTs.push_back(MVT::Other);
1795 const MVT::ValueType *VTList = DAG.getNodeValueTypes(VTs);
1800 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VTList, VTs.size(),
1801 &Ops[0], Ops.size());
1802 else if (I.getType() != Type::VoidTy)
1803 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, VTList, VTs.size(),
1804 &Ops[0], Ops.size());
1806 Result = DAG.getNode(ISD::INTRINSIC_VOID, VTList, VTs.size(),
1807 &Ops[0], Ops.size());
1810 SDOperand Chain = Result.getValue(Result.Val->getNumValues()-1);
1812 PendingLoads.push_back(Chain);
1816 if (I.getType() != Type::VoidTy) {
1817 if (const PackedType *PTy = dyn_cast<PackedType>(I.getType())) {
1818 MVT::ValueType EVT = TLI.getValueType(PTy->getElementType());
1819 Result = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Result,
1820 DAG.getConstant(PTy->getNumElements(), MVT::i32),
1821 DAG.getValueType(EVT));
1823 setValue(&I, Result);
1827 /// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
1828 /// we want to emit this as a call to a named external function, return the name
1829 /// otherwise lower it and return null.
1831 SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
1832 switch (Intrinsic) {
1834 // By default, turn this into a target intrinsic node.
1835 visitTargetIntrinsic(I, Intrinsic);
1837 case Intrinsic::vastart: visitVAStart(I); return 0;
1838 case Intrinsic::vaend: visitVAEnd(I); return 0;
1839 case Intrinsic::vacopy: visitVACopy(I); return 0;
1840 case Intrinsic::returnaddress: visitFrameReturnAddress(I, false); return 0;
1841 case Intrinsic::frameaddress: visitFrameReturnAddress(I, true); return 0;
1842 case Intrinsic::setjmp:
1843 return "_setjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
1845 case Intrinsic::longjmp:
1846 return "_longjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
1848 case Intrinsic::memcpy_i32:
1849 case Intrinsic::memcpy_i64:
1850 visitMemIntrinsic(I, ISD::MEMCPY);
1852 case Intrinsic::memset_i32:
1853 case Intrinsic::memset_i64:
1854 visitMemIntrinsic(I, ISD::MEMSET);
1856 case Intrinsic::memmove_i32:
1857 case Intrinsic::memmove_i64:
1858 visitMemIntrinsic(I, ISD::MEMMOVE);
1861 case Intrinsic::dbg_stoppoint: {
1862 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1863 DbgStopPointInst &SPI = cast<DbgStopPointInst>(I);
1864 if (DebugInfo && SPI.getContext() && DebugInfo->Verify(SPI.getContext())) {
1868 Ops[1] = getValue(SPI.getLineValue());
1869 Ops[2] = getValue(SPI.getColumnValue());
1871 DebugInfoDesc *DD = DebugInfo->getDescFor(SPI.getContext());
1872 assert(DD && "Not a debug information descriptor");
1873 CompileUnitDesc *CompileUnit = cast<CompileUnitDesc>(DD);
1875 Ops[3] = DAG.getString(CompileUnit->getFileName());
1876 Ops[4] = DAG.getString(CompileUnit->getDirectory());
1878 DAG.setRoot(DAG.getNode(ISD::LOCATION, MVT::Other, Ops, 5));
1883 case Intrinsic::dbg_region_start: {
1884 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1885 DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I);
1886 if (DebugInfo && RSI.getContext() && DebugInfo->Verify(RSI.getContext())) {
1887 unsigned LabelID = DebugInfo->RecordRegionStart(RSI.getContext());
1888 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, getRoot(),
1889 DAG.getConstant(LabelID, MVT::i32)));
1894 case Intrinsic::dbg_region_end: {
1895 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1896 DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I);
1897 if (DebugInfo && REI.getContext() && DebugInfo->Verify(REI.getContext())) {
1898 unsigned LabelID = DebugInfo->RecordRegionEnd(REI.getContext());
1899 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other,
1900 getRoot(), DAG.getConstant(LabelID, MVT::i32)));
1905 case Intrinsic::dbg_func_start: {
1906 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1907 DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I);
1908 if (DebugInfo && FSI.getSubprogram() &&
1909 DebugInfo->Verify(FSI.getSubprogram())) {
1910 unsigned LabelID = DebugInfo->RecordRegionStart(FSI.getSubprogram());
1911 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other,
1912 getRoot(), DAG.getConstant(LabelID, MVT::i32)));
1917 case Intrinsic::dbg_declare: {
1918 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1919 DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
1920 if (DebugInfo && DI.getVariable() && DebugInfo->Verify(DI.getVariable())) {
1921 SDOperand AddressOp = getValue(DI.getAddress());
1922 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(AddressOp))
1923 DebugInfo->RecordVariable(DI.getVariable(), FI->getIndex());
1929 case Intrinsic::isunordered_f32:
1930 case Intrinsic::isunordered_f64:
1931 setValue(&I, DAG.getSetCC(MVT::i1,getValue(I.getOperand(1)),
1932 getValue(I.getOperand(2)), ISD::SETUO));
1935 case Intrinsic::sqrt_f32:
1936 case Intrinsic::sqrt_f64:
1937 setValue(&I, DAG.getNode(ISD::FSQRT,
1938 getValue(I.getOperand(1)).getValueType(),
1939 getValue(I.getOperand(1))));
1941 case Intrinsic::powi_f32:
1942 case Intrinsic::powi_f64:
1943 setValue(&I, DAG.getNode(ISD::FPOWI,
1944 getValue(I.getOperand(1)).getValueType(),
1945 getValue(I.getOperand(1)),
1946 getValue(I.getOperand(2))));
1948 case Intrinsic::pcmarker: {
1949 SDOperand Tmp = getValue(I.getOperand(1));
1950 DAG.setRoot(DAG.getNode(ISD::PCMARKER, MVT::Other, getRoot(), Tmp));
1953 case Intrinsic::readcyclecounter: {
1954 SDOperand Op = getRoot();
1955 SDOperand Tmp = DAG.getNode(ISD::READCYCLECOUNTER,
1956 DAG.getNodeValueTypes(MVT::i64, MVT::Other), 2,
1959 DAG.setRoot(Tmp.getValue(1));
1962 case Intrinsic::bswap_i16:
1963 case Intrinsic::bswap_i32:
1964 case Intrinsic::bswap_i64:
1965 setValue(&I, DAG.getNode(ISD::BSWAP,
1966 getValue(I.getOperand(1)).getValueType(),
1967 getValue(I.getOperand(1))));
1969 case Intrinsic::cttz_i8:
1970 case Intrinsic::cttz_i16:
1971 case Intrinsic::cttz_i32:
1972 case Intrinsic::cttz_i64:
1973 setValue(&I, DAG.getNode(ISD::CTTZ,
1974 getValue(I.getOperand(1)).getValueType(),
1975 getValue(I.getOperand(1))));
1977 case Intrinsic::ctlz_i8:
1978 case Intrinsic::ctlz_i16:
1979 case Intrinsic::ctlz_i32:
1980 case Intrinsic::ctlz_i64:
1981 setValue(&I, DAG.getNode(ISD::CTLZ,
1982 getValue(I.getOperand(1)).getValueType(),
1983 getValue(I.getOperand(1))));
1985 case Intrinsic::ctpop_i8:
1986 case Intrinsic::ctpop_i16:
1987 case Intrinsic::ctpop_i32:
1988 case Intrinsic::ctpop_i64:
1989 setValue(&I, DAG.getNode(ISD::CTPOP,
1990 getValue(I.getOperand(1)).getValueType(),
1991 getValue(I.getOperand(1))));
1993 case Intrinsic::stacksave: {
1994 SDOperand Op = getRoot();
1995 SDOperand Tmp = DAG.getNode(ISD::STACKSAVE,
1996 DAG.getNodeValueTypes(TLI.getPointerTy(), MVT::Other), 2, &Op, 1);
1998 DAG.setRoot(Tmp.getValue(1));
2001 case Intrinsic::stackrestore: {
2002 SDOperand Tmp = getValue(I.getOperand(1));
2003 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, MVT::Other, getRoot(), Tmp));
2006 case Intrinsic::prefetch:
2007 // FIXME: Currently discarding prefetches.
2013 void SelectionDAGLowering::visitCall(CallInst &I) {
2014 const char *RenameFn = 0;
2015 if (Function *F = I.getCalledFunction()) {
2016 if (F->isExternal())
2017 if (unsigned IID = F->getIntrinsicID()) {
2018 RenameFn = visitIntrinsicCall(I, IID);
2021 } else { // Not an LLVM intrinsic.
2022 const std::string &Name = F->getName();
2023 if (Name[0] == 'c' && (Name == "copysign" || Name == "copysignf")) {
2024 if (I.getNumOperands() == 3 && // Basic sanity checks.
2025 I.getOperand(1)->getType()->isFloatingPoint() &&
2026 I.getType() == I.getOperand(1)->getType() &&
2027 I.getType() == I.getOperand(2)->getType()) {
2028 SDOperand LHS = getValue(I.getOperand(1));
2029 SDOperand RHS = getValue(I.getOperand(2));
2030 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, LHS.getValueType(),
2034 } else if (Name[0] == 'f' && (Name == "fabs" || Name == "fabsf")) {
2035 if (I.getNumOperands() == 2 && // Basic sanity checks.
2036 I.getOperand(1)->getType()->isFloatingPoint() &&
2037 I.getType() == I.getOperand(1)->getType()) {
2038 SDOperand Tmp = getValue(I.getOperand(1));
2039 setValue(&I, DAG.getNode(ISD::FABS, Tmp.getValueType(), Tmp));
2042 } else if (Name[0] == 's' && (Name == "sin" || Name == "sinf")) {
2043 if (I.getNumOperands() == 2 && // Basic sanity checks.
2044 I.getOperand(1)->getType()->isFloatingPoint() &&
2045 I.getType() == I.getOperand(1)->getType()) {
2046 SDOperand Tmp = getValue(I.getOperand(1));
2047 setValue(&I, DAG.getNode(ISD::FSIN, Tmp.getValueType(), Tmp));
2050 } else if (Name[0] == 'c' && (Name == "cos" || Name == "cosf")) {
2051 if (I.getNumOperands() == 2 && // Basic sanity checks.
2052 I.getOperand(1)->getType()->isFloatingPoint() &&
2053 I.getType() == I.getOperand(1)->getType()) {
2054 SDOperand Tmp = getValue(I.getOperand(1));
2055 setValue(&I, DAG.getNode(ISD::FCOS, Tmp.getValueType(), Tmp));
2060 } else if (isa<InlineAsm>(I.getOperand(0))) {
2067 Callee = getValue(I.getOperand(0));
2069 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
2070 std::vector<std::pair<SDOperand, const Type*> > Args;
2071 Args.reserve(I.getNumOperands());
2072 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
2073 Value *Arg = I.getOperand(i);
2074 SDOperand ArgNode = getValue(Arg);
2075 Args.push_back(std::make_pair(ArgNode, Arg->getType()));
2078 const PointerType *PT = cast<PointerType>(I.getCalledValue()->getType());
2079 const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
2081 std::pair<SDOperand,SDOperand> Result =
2082 TLI.LowerCallTo(getRoot(), I.getType(), FTy->isVarArg(), I.getCallingConv(),
2083 I.isTailCall(), Callee, Args, DAG);
2084 if (I.getType() != Type::VoidTy)
2085 setValue(&I, Result.first);
2086 DAG.setRoot(Result.second);
2089 SDOperand RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
2090 SDOperand &Chain, SDOperand &Flag)const{
2091 SDOperand Val = DAG.getCopyFromReg(Chain, Regs[0], RegVT, Flag);
2092 Chain = Val.getValue(1);
2093 Flag = Val.getValue(2);
2095 // If the result was expanded, copy from the top part.
2096 if (Regs.size() > 1) {
2097 assert(Regs.size() == 2 &&
2098 "Cannot expand to more than 2 elts yet!");
2099 SDOperand Hi = DAG.getCopyFromReg(Chain, Regs[1], RegVT, Flag);
2100 Chain = Hi.getValue(1);
2101 Flag = Hi.getValue(2);
2102 if (DAG.getTargetLoweringInfo().isLittleEndian())
2103 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Val, Hi);
2105 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Hi, Val);
2108 // Otherwise, if the return value was promoted or extended, truncate it to the
2109 // appropriate type.
2110 if (RegVT == ValueVT)
2113 if (MVT::isInteger(RegVT)) {
2114 if (ValueVT < RegVT)
2115 return DAG.getNode(ISD::TRUNCATE, ValueVT, Val);
2117 return DAG.getNode(ISD::ANY_EXTEND, ValueVT, Val);
2119 return DAG.getNode(ISD::FP_ROUND, ValueVT, Val);
2123 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
2124 /// specified value into the registers specified by this object. This uses
2125 /// Chain/Flag as the input and updates them for the output Chain/Flag.
2126 void RegsForValue::getCopyToRegs(SDOperand Val, SelectionDAG &DAG,
2127 SDOperand &Chain, SDOperand &Flag,
2128 MVT::ValueType PtrVT) const {
2129 if (Regs.size() == 1) {
2130 // If there is a single register and the types differ, this must be
2132 if (RegVT != ValueVT) {
2133 if (MVT::isInteger(RegVT)) {
2134 if (RegVT < ValueVT)
2135 Val = DAG.getNode(ISD::TRUNCATE, RegVT, Val);
2137 Val = DAG.getNode(ISD::ANY_EXTEND, RegVT, Val);
2139 Val = DAG.getNode(ISD::FP_EXTEND, RegVT, Val);
2141 Chain = DAG.getCopyToReg(Chain, Regs[0], Val, Flag);
2142 Flag = Chain.getValue(1);
2144 std::vector<unsigned> R(Regs);
2145 if (!DAG.getTargetLoweringInfo().isLittleEndian())
2146 std::reverse(R.begin(), R.end());
2148 for (unsigned i = 0, e = R.size(); i != e; ++i) {
2149 SDOperand Part = DAG.getNode(ISD::EXTRACT_ELEMENT, RegVT, Val,
2150 DAG.getConstant(i, PtrVT));
2151 Chain = DAG.getCopyToReg(Chain, R[i], Part, Flag);
2152 Flag = Chain.getValue(1);
2157 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
2158 /// operand list. This adds the code marker and includes the number of
2159 /// values added into it.
2160 void RegsForValue::AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG,
2161 std::vector<SDOperand> &Ops) const {
2162 Ops.push_back(DAG.getConstant(Code | (Regs.size() << 3), MVT::i32));
2163 for (unsigned i = 0, e = Regs.size(); i != e; ++i)
2164 Ops.push_back(DAG.getRegister(Regs[i], RegVT));
2167 /// isAllocatableRegister - If the specified register is safe to allocate,
2168 /// i.e. it isn't a stack pointer or some other special register, return the
2169 /// register class for the register. Otherwise, return null.
2170 static const TargetRegisterClass *
2171 isAllocatableRegister(unsigned Reg, MachineFunction &MF,
2172 const TargetLowering &TLI, const MRegisterInfo *MRI) {
2173 MVT::ValueType FoundVT = MVT::Other;
2174 const TargetRegisterClass *FoundRC = 0;
2175 for (MRegisterInfo::regclass_iterator RCI = MRI->regclass_begin(),
2176 E = MRI->regclass_end(); RCI != E; ++RCI) {
2177 MVT::ValueType ThisVT = MVT::Other;
2179 const TargetRegisterClass *RC = *RCI;
2180 // If none of the the value types for this register class are valid, we
2181 // can't use it. For example, 64-bit reg classes on 32-bit targets.
2182 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
2184 if (TLI.isTypeLegal(*I)) {
2185 // If we have already found this register in a different register class,
2186 // choose the one with the largest VT specified. For example, on
2187 // PowerPC, we favor f64 register classes over f32.
2188 if (FoundVT == MVT::Other ||
2189 MVT::getSizeInBits(FoundVT) < MVT::getSizeInBits(*I)) {
2196 if (ThisVT == MVT::Other) continue;
2198 // NOTE: This isn't ideal. In particular, this might allocate the
2199 // frame pointer in functions that need it (due to them not being taken
2200 // out of allocation, because a variable sized allocation hasn't been seen
2201 // yet). This is a slight code pessimization, but should still work.
2202 for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
2203 E = RC->allocation_order_end(MF); I != E; ++I)
2205 // We found a matching register class. Keep looking at others in case
2206 // we find one with larger registers that this physreg is also in.
2215 RegsForValue SelectionDAGLowering::
2216 GetRegistersForValue(const std::string &ConstrCode,
2217 MVT::ValueType VT, bool isOutReg, bool isInReg,
2218 std::set<unsigned> &OutputRegs,
2219 std::set<unsigned> &InputRegs) {
2220 std::pair<unsigned, const TargetRegisterClass*> PhysReg =
2221 TLI.getRegForInlineAsmConstraint(ConstrCode, VT);
2222 std::vector<unsigned> Regs;
2224 unsigned NumRegs = VT != MVT::Other ? TLI.getNumElements(VT) : 1;
2225 MVT::ValueType RegVT;
2226 MVT::ValueType ValueVT = VT;
2228 // If this is a constraint for a specific physical register, like {r17},
2230 if (PhysReg.first) {
2231 if (VT == MVT::Other)
2232 ValueVT = *PhysReg.second->vt_begin();
2234 // Get the actual register value type. This is important, because the user
2235 // may have asked for (e.g.) the AX register in i32 type. We need to
2236 // remember that AX is actually i16 to get the right extension.
2237 RegVT = *PhysReg.second->vt_begin();
2239 // This is a explicit reference to a physical register.
2240 Regs.push_back(PhysReg.first);
2242 // If this is an expanded reference, add the rest of the regs to Regs.
2244 TargetRegisterClass::iterator I = PhysReg.second->begin();
2245 TargetRegisterClass::iterator E = PhysReg.second->end();
2246 for (; *I != PhysReg.first; ++I)
2247 assert(I != E && "Didn't find reg!");
2249 // Already added the first reg.
2251 for (; NumRegs; --NumRegs, ++I) {
2252 assert(I != E && "Ran out of registers to allocate!");
2256 return RegsForValue(Regs, RegVT, ValueVT);
2259 // Otherwise, if this was a reference to an LLVM register class, create vregs
2260 // for this reference.
2261 std::vector<unsigned> RegClassRegs;
2262 if (PhysReg.second) {
2263 // If this is an early clobber or tied register, our regalloc doesn't know
2264 // how to maintain the constraint. If it isn't, go ahead and create vreg
2265 // and let the regalloc do the right thing.
2266 if (!isOutReg || !isInReg) {
2267 if (VT == MVT::Other)
2268 ValueVT = *PhysReg.second->vt_begin();
2269 RegVT = *PhysReg.second->vt_begin();
2271 // Create the appropriate number of virtual registers.
2272 SSARegMap *RegMap = DAG.getMachineFunction().getSSARegMap();
2273 for (; NumRegs; --NumRegs)
2274 Regs.push_back(RegMap->createVirtualRegister(PhysReg.second));
2276 return RegsForValue(Regs, RegVT, ValueVT);
2279 // Otherwise, we can't allocate it. Let the code below figure out how to
2280 // maintain these constraints.
2281 RegClassRegs.assign(PhysReg.second->begin(), PhysReg.second->end());
2284 // This is a reference to a register class that doesn't directly correspond
2285 // to an LLVM register class. Allocate NumRegs consecutive, available,
2286 // registers from the class.
2287 RegClassRegs = TLI.getRegClassForInlineAsmConstraint(ConstrCode, VT);
2290 const MRegisterInfo *MRI = DAG.getTarget().getRegisterInfo();
2291 MachineFunction &MF = *CurMBB->getParent();
2292 unsigned NumAllocated = 0;
2293 for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
2294 unsigned Reg = RegClassRegs[i];
2295 // See if this register is available.
2296 if ((isOutReg && OutputRegs.count(Reg)) || // Already used.
2297 (isInReg && InputRegs.count(Reg))) { // Already used.
2298 // Make sure we find consecutive registers.
2303 // Check to see if this register is allocatable (i.e. don't give out the
2305 const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, MRI);
2307 // Make sure we find consecutive registers.
2312 // Okay, this register is good, we can use it.
2315 // If we allocated enough consecutive
2316 if (NumAllocated == NumRegs) {
2317 unsigned RegStart = (i-NumAllocated)+1;
2318 unsigned RegEnd = i+1;
2319 // Mark all of the allocated registers used.
2320 for (unsigned i = RegStart; i != RegEnd; ++i) {
2321 unsigned Reg = RegClassRegs[i];
2322 Regs.push_back(Reg);
2323 if (isOutReg) OutputRegs.insert(Reg); // Mark reg used.
2324 if (isInReg) InputRegs.insert(Reg); // Mark reg used.
2327 return RegsForValue(Regs, *RC->vt_begin(), VT);
2331 // Otherwise, we couldn't allocate enough registers for this.
2332 return RegsForValue();
2336 /// visitInlineAsm - Handle a call to an InlineAsm object.
2338 void SelectionDAGLowering::visitInlineAsm(CallInst &I) {
2339 InlineAsm *IA = cast<InlineAsm>(I.getOperand(0));
2341 SDOperand AsmStr = DAG.getTargetExternalSymbol(IA->getAsmString().c_str(),
2344 std::vector<InlineAsm::ConstraintInfo> Constraints = IA->ParseConstraints();
2345 std::vector<MVT::ValueType> ConstraintVTs;
2347 /// AsmNodeOperands - A list of pairs. The first element is a register, the
2348 /// second is a bitfield where bit #0 is set if it is a use and bit #1 is set
2349 /// if it is a def of that register.
2350 std::vector<SDOperand> AsmNodeOperands;
2351 AsmNodeOperands.push_back(SDOperand()); // reserve space for input chain
2352 AsmNodeOperands.push_back(AsmStr);
2354 SDOperand Chain = getRoot();
2357 // We fully assign registers here at isel time. This is not optimal, but
2358 // should work. For register classes that correspond to LLVM classes, we
2359 // could let the LLVM RA do its thing, but we currently don't. Do a prepass
2360 // over the constraints, collecting fixed registers that we know we can't use.
2361 std::set<unsigned> OutputRegs, InputRegs;
2363 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
2364 assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!");
2365 std::string &ConstraintCode = Constraints[i].Codes[0];
2367 MVT::ValueType OpVT;
2369 // Compute the value type for each operand and add it to ConstraintVTs.
2370 switch (Constraints[i].Type) {
2371 case InlineAsm::isOutput:
2372 if (!Constraints[i].isIndirectOutput) {
2373 assert(I.getType() != Type::VoidTy && "Bad inline asm!");
2374 OpVT = TLI.getValueType(I.getType());
2376 const Type *OpTy = I.getOperand(OpNum)->getType();
2377 OpVT = TLI.getValueType(cast<PointerType>(OpTy)->getElementType());
2378 OpNum++; // Consumes a call operand.
2381 case InlineAsm::isInput:
2382 OpVT = TLI.getValueType(I.getOperand(OpNum)->getType());
2383 OpNum++; // Consumes a call operand.
2385 case InlineAsm::isClobber:
2390 ConstraintVTs.push_back(OpVT);
2392 if (TLI.getRegForInlineAsmConstraint(ConstraintCode, OpVT).first == 0)
2393 continue; // Not assigned a fixed reg.
2395 // Build a list of regs that this operand uses. This always has a single
2396 // element for promoted/expanded operands.
2397 RegsForValue Regs = GetRegistersForValue(ConstraintCode, OpVT,
2399 OutputRegs, InputRegs);
2401 switch (Constraints[i].Type) {
2402 case InlineAsm::isOutput:
2403 // We can't assign any other output to this register.
2404 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2405 // If this is an early-clobber output, it cannot be assigned to the same
2406 // value as the input reg.
2407 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput)
2408 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2410 case InlineAsm::isInput:
2411 // We can't assign any other input to this register.
2412 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2414 case InlineAsm::isClobber:
2415 // Clobbered regs cannot be used as inputs or outputs.
2416 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2417 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2422 // Loop over all of the inputs, copying the operand values into the
2423 // appropriate registers and processing the output regs.
2424 RegsForValue RetValRegs;
2425 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
2428 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
2429 assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!");
2430 std::string &ConstraintCode = Constraints[i].Codes[0];
2432 switch (Constraints[i].Type) {
2433 case InlineAsm::isOutput: {
2434 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass;
2435 if (ConstraintCode.size() == 1) // not a physreg name.
2436 CTy = TLI.getConstraintType(ConstraintCode[0]);
2438 if (CTy == TargetLowering::C_Memory) {
2440 SDOperand InOperandVal = getValue(I.getOperand(OpNum));
2442 // Check that the operand (the address to store to) isn't a float.
2443 if (!MVT::isInteger(InOperandVal.getValueType()))
2444 assert(0 && "MATCH FAIL!");
2446 if (!Constraints[i].isIndirectOutput)
2447 assert(0 && "MATCH FAIL!");
2449 OpNum++; // Consumes a call operand.
2451 // Extend/truncate to the right pointer type if needed.
2452 MVT::ValueType PtrType = TLI.getPointerTy();
2453 if (InOperandVal.getValueType() < PtrType)
2454 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal);
2455 else if (InOperandVal.getValueType() > PtrType)
2456 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal);
2458 // Add information to the INLINEASM node to know about this output.
2459 unsigned ResOpType = 4/*MEM*/ | (1 << 3);
2460 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
2461 AsmNodeOperands.push_back(InOperandVal);
2465 // Otherwise, this is a register output.
2466 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!");
2468 // If this is an early-clobber output, or if there is an input
2469 // constraint that matches this, we need to reserve the input register
2470 // so no other inputs allocate to it.
2471 bool UsesInputRegister = false;
2472 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput)
2473 UsesInputRegister = true;
2475 // Copy the output from the appropriate register. Find a register that
2478 GetRegistersForValue(ConstraintCode, ConstraintVTs[i],
2479 true, UsesInputRegister,
2480 OutputRegs, InputRegs);
2481 if (Regs.Regs.empty()) {
2482 std::cerr << "Couldn't allocate output reg for contraint '"
2483 << ConstraintCode << "'!\n";
2487 if (!Constraints[i].isIndirectOutput) {
2488 assert(RetValRegs.Regs.empty() &&
2489 "Cannot have multiple output constraints yet!");
2490 assert(I.getType() != Type::VoidTy && "Bad inline asm!");
2493 IndirectStoresToEmit.push_back(std::make_pair(Regs,
2494 I.getOperand(OpNum)));
2495 OpNum++; // Consumes a call operand.
2498 // Add information to the INLINEASM node to know that this register is
2500 Regs.AddInlineAsmOperands(2 /*REGDEF*/, DAG, AsmNodeOperands);
2503 case InlineAsm::isInput: {
2504 SDOperand InOperandVal = getValue(I.getOperand(OpNum));
2505 OpNum++; // Consumes a call operand.
2507 if (isdigit(ConstraintCode[0])) { // Matching constraint?
2508 // If this is required to match an output register we have already set,
2509 // just use its register.
2510 unsigned OperandNo = atoi(ConstraintCode.c_str());
2512 // Scan until we find the definition we already emitted of this operand.
2513 // When we find it, create a RegsForValue operand.
2514 unsigned CurOp = 2; // The first operand.
2515 for (; OperandNo; --OperandNo) {
2516 // Advance to the next operand.
2518 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue();
2519 assert(((NumOps & 7) == 2 /*REGDEF*/ ||
2520 (NumOps & 7) == 4 /*MEM*/) &&
2521 "Skipped past definitions?");
2522 CurOp += (NumOps>>3)+1;
2526 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue();
2527 assert((NumOps & 7) == 2 /*REGDEF*/ &&
2528 "Skipped past definitions?");
2530 // Add NumOps>>3 registers to MatchedRegs.
2531 RegsForValue MatchedRegs;
2532 MatchedRegs.ValueVT = InOperandVal.getValueType();
2533 MatchedRegs.RegVT = AsmNodeOperands[CurOp+1].getValueType();
2534 for (unsigned i = 0, e = NumOps>>3; i != e; ++i) {
2535 unsigned Reg=cast<RegisterSDNode>(AsmNodeOperands[++CurOp])->getReg();
2536 MatchedRegs.Regs.push_back(Reg);
2539 // Use the produced MatchedRegs object to
2540 MatchedRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag,
2541 TLI.getPointerTy());
2542 MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/, DAG, AsmNodeOperands);
2546 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass;
2547 if (ConstraintCode.size() == 1) // not a physreg name.
2548 CTy = TLI.getConstraintType(ConstraintCode[0]);
2550 if (CTy == TargetLowering::C_Other) {
2551 InOperandVal = TLI.isOperandValidForConstraint(InOperandVal,
2552 ConstraintCode[0], DAG);
2553 if (!InOperandVal.Val) {
2554 std::cerr << "Invalid operand for inline asm constraint '"
2555 << ConstraintCode << "'!\n";
2559 // Add information to the INLINEASM node to know about this input.
2560 unsigned ResOpType = 3 /*IMM*/ | (1 << 3);
2561 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
2562 AsmNodeOperands.push_back(InOperandVal);
2564 } else if (CTy == TargetLowering::C_Memory) {
2567 // Check that the operand isn't a float.
2568 if (!MVT::isInteger(InOperandVal.getValueType()))
2569 assert(0 && "MATCH FAIL!");
2571 // Extend/truncate to the right pointer type if needed.
2572 MVT::ValueType PtrType = TLI.getPointerTy();
2573 if (InOperandVal.getValueType() < PtrType)
2574 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal);
2575 else if (InOperandVal.getValueType() > PtrType)
2576 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal);
2578 // Add information to the INLINEASM node to know about this input.
2579 unsigned ResOpType = 4/*MEM*/ | (1 << 3);
2580 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
2581 AsmNodeOperands.push_back(InOperandVal);
2585 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!");
2587 // Copy the input into the appropriate registers.
2588 RegsForValue InRegs =
2589 GetRegistersForValue(ConstraintCode, ConstraintVTs[i],
2590 false, true, OutputRegs, InputRegs);
2591 // FIXME: should be match fail.
2592 assert(!InRegs.Regs.empty() && "Couldn't allocate input reg!");
2594 InRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag, TLI.getPointerTy());
2596 InRegs.AddInlineAsmOperands(1/*REGUSE*/, DAG, AsmNodeOperands);
2599 case InlineAsm::isClobber: {
2600 RegsForValue ClobberedRegs =
2601 GetRegistersForValue(ConstraintCode, MVT::Other, false, false,
2602 OutputRegs, InputRegs);
2603 // Add the clobbered value to the operand list, so that the register
2604 // allocator is aware that the physreg got clobbered.
2605 if (!ClobberedRegs.Regs.empty())
2606 ClobberedRegs.AddInlineAsmOperands(2/*REGDEF*/, DAG, AsmNodeOperands);
2612 // Finish up input operands.
2613 AsmNodeOperands[0] = Chain;
2614 if (Flag.Val) AsmNodeOperands.push_back(Flag);
2616 Chain = DAG.getNode(ISD::INLINEASM,
2617 DAG.getNodeValueTypes(MVT::Other, MVT::Flag), 2,
2618 &AsmNodeOperands[0], AsmNodeOperands.size());
2619 Flag = Chain.getValue(1);
2621 // If this asm returns a register value, copy the result from that register
2622 // and set it as the value of the call.
2623 if (!RetValRegs.Regs.empty())
2624 setValue(&I, RetValRegs.getCopyFromRegs(DAG, Chain, Flag));
2626 std::vector<std::pair<SDOperand, Value*> > StoresToEmit;
2628 // Process indirect outputs, first output all of the flagged copies out of
2630 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
2631 RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
2632 Value *Ptr = IndirectStoresToEmit[i].second;
2633 SDOperand OutVal = OutRegs.getCopyFromRegs(DAG, Chain, Flag);
2634 StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
2637 // Emit the non-flagged stores from the physregs.
2638 SmallVector<SDOperand, 8> OutChains;
2639 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i)
2640 OutChains.push_back(DAG.getStore(Chain, StoresToEmit[i].first,
2641 getValue(StoresToEmit[i].second),
2642 StoresToEmit[i].second, 0));
2643 if (!OutChains.empty())
2644 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
2645 &OutChains[0], OutChains.size());
2650 void SelectionDAGLowering::visitMalloc(MallocInst &I) {
2651 SDOperand Src = getValue(I.getOperand(0));
2653 MVT::ValueType IntPtr = TLI.getPointerTy();
2655 if (IntPtr < Src.getValueType())
2656 Src = DAG.getNode(ISD::TRUNCATE, IntPtr, Src);
2657 else if (IntPtr > Src.getValueType())
2658 Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src);
2660 // Scale the source by the type size.
2661 uint64_t ElementSize = TD->getTypeSize(I.getType()->getElementType());
2662 Src = DAG.getNode(ISD::MUL, Src.getValueType(),
2663 Src, getIntPtrConstant(ElementSize));
2665 std::vector<std::pair<SDOperand, const Type*> > Args;
2666 Args.push_back(std::make_pair(Src, TLI.getTargetData()->getIntPtrType()));
2668 std::pair<SDOperand,SDOperand> Result =
2669 TLI.LowerCallTo(getRoot(), I.getType(), false, CallingConv::C, true,
2670 DAG.getExternalSymbol("malloc", IntPtr),
2672 setValue(&I, Result.first); // Pointers always fit in registers
2673 DAG.setRoot(Result.second);
2676 void SelectionDAGLowering::visitFree(FreeInst &I) {
2677 std::vector<std::pair<SDOperand, const Type*> > Args;
2678 Args.push_back(std::make_pair(getValue(I.getOperand(0)),
2679 TLI.getTargetData()->getIntPtrType()));
2680 MVT::ValueType IntPtr = TLI.getPointerTy();
2681 std::pair<SDOperand,SDOperand> Result =
2682 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, CallingConv::C, true,
2683 DAG.getExternalSymbol("free", IntPtr), Args, DAG);
2684 DAG.setRoot(Result.second);
2687 // InsertAtEndOfBasicBlock - This method should be implemented by targets that
2688 // mark instructions with the 'usesCustomDAGSchedInserter' flag. These
2689 // instructions are special in various ways, which require special support to
2690 // insert. The specified MachineInstr is created but not inserted into any
2691 // basic blocks, and the scheduler passes ownership of it to this method.
2692 MachineBasicBlock *TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
2693 MachineBasicBlock *MBB) {
2694 std::cerr << "If a target marks an instruction with "
2695 "'usesCustomDAGSchedInserter', it must implement "
2696 "TargetLowering::InsertAtEndOfBasicBlock!\n";
2701 void SelectionDAGLowering::visitVAStart(CallInst &I) {
2702 DAG.setRoot(DAG.getNode(ISD::VASTART, MVT::Other, getRoot(),
2703 getValue(I.getOperand(1)),
2704 DAG.getSrcValue(I.getOperand(1))));
2707 void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
2708 SDOperand V = DAG.getVAArg(TLI.getValueType(I.getType()), getRoot(),
2709 getValue(I.getOperand(0)),
2710 DAG.getSrcValue(I.getOperand(0)));
2712 DAG.setRoot(V.getValue(1));
2715 void SelectionDAGLowering::visitVAEnd(CallInst &I) {
2716 DAG.setRoot(DAG.getNode(ISD::VAEND, MVT::Other, getRoot(),
2717 getValue(I.getOperand(1)),
2718 DAG.getSrcValue(I.getOperand(1))));
2721 void SelectionDAGLowering::visitVACopy(CallInst &I) {
2722 DAG.setRoot(DAG.getNode(ISD::VACOPY, MVT::Other, getRoot(),
2723 getValue(I.getOperand(1)),
2724 getValue(I.getOperand(2)),
2725 DAG.getSrcValue(I.getOperand(1)),
2726 DAG.getSrcValue(I.getOperand(2))));
2729 /// TargetLowering::LowerArguments - This is the default LowerArguments
2730 /// implementation, which just inserts a FORMAL_ARGUMENTS node. FIXME: When all
2731 /// targets are migrated to using FORMAL_ARGUMENTS, this hook should be
2732 /// integrated into SDISel.
2733 std::vector<SDOperand>
2734 TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
2735 // Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node.
2736 std::vector<SDOperand> Ops;
2737 Ops.push_back(DAG.getRoot());
2738 Ops.push_back(DAG.getConstant(F.getCallingConv(), getPointerTy()));
2739 Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy()));
2741 // Add one result value for each formal argument.
2742 std::vector<MVT::ValueType> RetVals;
2743 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
2744 MVT::ValueType VT = getValueType(I->getType());
2746 switch (getTypeAction(VT)) {
2747 default: assert(0 && "Unknown type action!");
2749 RetVals.push_back(VT);
2752 RetVals.push_back(getTypeToTransformTo(VT));
2755 if (VT != MVT::Vector) {
2756 // If this is a large integer, it needs to be broken up into small
2757 // integers. Figure out what the destination type is and how many small
2758 // integers it turns into.
2759 MVT::ValueType NVT = getTypeToTransformTo(VT);
2760 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT);
2761 for (unsigned i = 0; i != NumVals; ++i)
2762 RetVals.push_back(NVT);
2764 // Otherwise, this is a vector type. We only support legal vectors
2766 unsigned NumElems = cast<PackedType>(I->getType())->getNumElements();
2767 const Type *EltTy = cast<PackedType>(I->getType())->getElementType();
2769 // Figure out if there is a Packed type corresponding to this Vector
2770 // type. If so, convert to the packed type.
2771 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
2772 if (TVT != MVT::Other && isTypeLegal(TVT)) {
2773 RetVals.push_back(TVT);
2775 assert(0 && "Don't support illegal by-val vector arguments yet!");
2782 RetVals.push_back(MVT::Other);
2785 SDNode *Result = DAG.getNode(ISD::FORMAL_ARGUMENTS,
2786 DAG.getNodeValueTypes(RetVals), RetVals.size(),
2787 &Ops[0], Ops.size()).Val;
2789 DAG.setRoot(SDOperand(Result, Result->getNumValues()-1));
2791 // Set up the return result vector.
2794 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
2795 MVT::ValueType VT = getValueType(I->getType());
2797 switch (getTypeAction(VT)) {
2798 default: assert(0 && "Unknown type action!");
2800 Ops.push_back(SDOperand(Result, i++));
2803 SDOperand Op(Result, i++);
2804 if (MVT::isInteger(VT)) {
2805 unsigned AssertOp = I->getType()->isSigned() ? ISD::AssertSext
2807 Op = DAG.getNode(AssertOp, Op.getValueType(), Op, DAG.getValueType(VT));
2808 Op = DAG.getNode(ISD::TRUNCATE, VT, Op);
2810 assert(MVT::isFloatingPoint(VT) && "Not int or FP?");
2811 Op = DAG.getNode(ISD::FP_ROUND, VT, Op);
2817 if (VT != MVT::Vector) {
2818 // If this is a large integer, it needs to be reassembled from small
2819 // integers. Figure out what the source elt type is and how many small
2821 MVT::ValueType NVT = getTypeToTransformTo(VT);
2822 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT);
2824 SDOperand Lo = SDOperand(Result, i++);
2825 SDOperand Hi = SDOperand(Result, i++);
2827 if (!isLittleEndian())
2830 Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Lo, Hi));
2832 // Value scalarized into many values. Unimp for now.
2833 assert(0 && "Cannot expand i64 -> i16 yet!");
2836 // Otherwise, this is a vector type. We only support legal vectors
2838 const PackedType *PTy = cast<PackedType>(I->getType());
2839 unsigned NumElems = PTy->getNumElements();
2840 const Type *EltTy = PTy->getElementType();
2842 // Figure out if there is a Packed type corresponding to this Vector
2843 // type. If so, convert to the packed type.
2844 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
2845 if (TVT != MVT::Other && isTypeLegal(TVT)) {
2846 SDOperand N = SDOperand(Result, i++);
2847 // Handle copies from generic vectors to registers.
2848 N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N,
2849 DAG.getConstant(NumElems, MVT::i32),
2850 DAG.getValueType(getValueType(EltTy)));
2853 assert(0 && "Don't support illegal by-val vector arguments yet!");
2864 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
2865 /// implementation, which just inserts an ISD::CALL node, which is later custom
2866 /// lowered by the target to something concrete. FIXME: When all targets are
2867 /// migrated to using ISD::CALL, this hook should be integrated into SDISel.
2868 std::pair<SDOperand, SDOperand>
2869 TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, bool isVarArg,
2870 unsigned CallingConv, bool isTailCall,
2872 ArgListTy &Args, SelectionDAG &DAG) {
2873 SmallVector<SDOperand, 32> Ops;
2874 Ops.push_back(Chain); // Op#0 - Chain
2875 Ops.push_back(DAG.getConstant(CallingConv, getPointerTy())); // Op#1 - CC
2876 Ops.push_back(DAG.getConstant(isVarArg, getPointerTy())); // Op#2 - VarArg
2877 Ops.push_back(DAG.getConstant(isTailCall, getPointerTy())); // Op#3 - Tail
2878 Ops.push_back(Callee);
2880 // Handle all of the outgoing arguments.
2881 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
2882 MVT::ValueType VT = getValueType(Args[i].second);
2883 SDOperand Op = Args[i].first;
2884 bool isSigned = Args[i].second->isSigned();
2885 switch (getTypeAction(VT)) {
2886 default: assert(0 && "Unknown type action!");
2889 Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
2892 if (MVT::isInteger(VT)) {
2893 unsigned ExtOp = isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
2894 Op = DAG.getNode(ExtOp, getTypeToTransformTo(VT), Op);
2896 assert(MVT::isFloatingPoint(VT) && "Not int or FP?");
2897 Op = DAG.getNode(ISD::FP_EXTEND, getTypeToTransformTo(VT), Op);
2900 Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
2903 if (VT != MVT::Vector) {
2904 // If this is a large integer, it needs to be broken down into small
2905 // integers. Figure out what the source elt type is and how many small
2907 MVT::ValueType NVT = getTypeToTransformTo(VT);
2908 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT);
2910 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, NVT, Op,
2911 DAG.getConstant(0, getPointerTy()));
2912 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, NVT, Op,
2913 DAG.getConstant(1, getPointerTy()));
2914 if (!isLittleEndian())
2918 Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
2920 Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
2922 // Value scalarized into many values. Unimp for now.
2923 assert(0 && "Cannot expand i64 -> i16 yet!");
2926 // Otherwise, this is a vector type. We only support legal vectors
2928 const PackedType *PTy = cast<PackedType>(Args[i].second);
2929 unsigned NumElems = PTy->getNumElements();
2930 const Type *EltTy = PTy->getElementType();
2932 // Figure out if there is a Packed type corresponding to this Vector
2933 // type. If so, convert to the packed type.
2934 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
2935 if (TVT != MVT::Other && isTypeLegal(TVT)) {
2936 // Insert a VBIT_CONVERT of the MVT::Vector type to the packed type.
2937 Op = DAG.getNode(ISD::VBIT_CONVERT, TVT, Op);
2939 Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
2941 assert(0 && "Don't support illegal by-val vector call args yet!");
2949 // Figure out the result value types.
2950 SmallVector<MVT::ValueType, 4> RetTys;
2952 if (RetTy != Type::VoidTy) {
2953 MVT::ValueType VT = getValueType(RetTy);
2954 switch (getTypeAction(VT)) {
2955 default: assert(0 && "Unknown type action!");
2957 RetTys.push_back(VT);
2960 RetTys.push_back(getTypeToTransformTo(VT));
2963 if (VT != MVT::Vector) {
2964 // If this is a large integer, it needs to be reassembled from small
2965 // integers. Figure out what the source elt type is and how many small
2967 MVT::ValueType NVT = getTypeToTransformTo(VT);
2968 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT);
2969 for (unsigned i = 0; i != NumVals; ++i)
2970 RetTys.push_back(NVT);
2972 // Otherwise, this is a vector type. We only support legal vectors
2974 const PackedType *PTy = cast<PackedType>(RetTy);
2975 unsigned NumElems = PTy->getNumElements();
2976 const Type *EltTy = PTy->getElementType();
2978 // Figure out if there is a Packed type corresponding to this Vector
2979 // type. If so, convert to the packed type.
2980 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
2981 if (TVT != MVT::Other && isTypeLegal(TVT)) {
2982 RetTys.push_back(TVT);
2984 assert(0 && "Don't support illegal by-val vector call results yet!");
2991 RetTys.push_back(MVT::Other); // Always has a chain.
2993 // Finally, create the CALL node.
2994 SDOperand Res = DAG.getNode(ISD::CALL,
2995 DAG.getVTList(&RetTys[0], RetTys.size()),
2996 &Ops[0], Ops.size());
2998 // This returns a pair of operands. The first element is the
2999 // return value for the function (if RetTy is not VoidTy). The second
3000 // element is the outgoing token chain.
3002 if (RetTys.size() != 1) {
3003 MVT::ValueType VT = getValueType(RetTy);
3004 if (RetTys.size() == 2) {
3007 // If this value was promoted, truncate it down.
3008 if (ResVal.getValueType() != VT) {
3009 if (VT == MVT::Vector) {
3010 // Insert a VBITCONVERT to convert from the packed result type to the
3011 // MVT::Vector type.
3012 unsigned NumElems = cast<PackedType>(RetTy)->getNumElements();
3013 const Type *EltTy = cast<PackedType>(RetTy)->getElementType();
3015 // Figure out if there is a Packed type corresponding to this Vector
3016 // type. If so, convert to the packed type.
3017 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
3018 if (TVT != MVT::Other && isTypeLegal(TVT)) {
3019 // Insert a VBIT_CONVERT of the FORMAL_ARGUMENTS to a
3020 // "N x PTyElementVT" MVT::Vector type.
3021 ResVal = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, ResVal,
3022 DAG.getConstant(NumElems, MVT::i32),
3023 DAG.getValueType(getValueType(EltTy)));
3027 } else if (MVT::isInteger(VT)) {
3028 unsigned AssertOp = RetTy->isSigned() ?
3029 ISD::AssertSext : ISD::AssertZext;
3030 ResVal = DAG.getNode(AssertOp, ResVal.getValueType(), ResVal,
3031 DAG.getValueType(VT));
3032 ResVal = DAG.getNode(ISD::TRUNCATE, VT, ResVal);
3034 assert(MVT::isFloatingPoint(VT));
3035 ResVal = DAG.getNode(ISD::FP_ROUND, VT, ResVal);
3038 } else if (RetTys.size() == 3) {
3039 ResVal = DAG.getNode(ISD::BUILD_PAIR, VT,
3040 Res.getValue(0), Res.getValue(1));
3043 assert(0 && "Case not handled yet!");
3047 return std::make_pair(ResVal, Res.getValue(Res.Val->getNumValues()-1));
3052 // It is always conservatively correct for llvm.returnaddress and
3053 // llvm.frameaddress to return 0.
3055 // FIXME: Change this to insert a FRAMEADDR/RETURNADDR node, and have that be
3056 // expanded to 0 if the target wants.
3057 std::pair<SDOperand, SDOperand>
3058 TargetLowering::LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain,
3059 unsigned Depth, SelectionDAG &DAG) {
3060 return std::make_pair(DAG.getConstant(0, getPointerTy()), Chain);
3063 SDOperand TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
3064 assert(0 && "LowerOperation not implemented for this target!");
3069 SDOperand TargetLowering::CustomPromoteOperation(SDOperand Op,
3070 SelectionDAG &DAG) {
3071 assert(0 && "CustomPromoteOperation not implemented for this target!");
3076 void SelectionDAGLowering::visitFrameReturnAddress(CallInst &I, bool isFrame) {
3077 unsigned Depth = (unsigned)cast<ConstantInt>(I.getOperand(1))->getZExtValue();
3078 std::pair<SDOperand,SDOperand> Result =
3079 TLI.LowerFrameReturnAddress(isFrame, getRoot(), Depth, DAG);
3080 setValue(&I, Result.first);
3081 DAG.setRoot(Result.second);
3084 /// getMemsetValue - Vectorized representation of the memset value
3086 static SDOperand getMemsetValue(SDOperand Value, MVT::ValueType VT,
3087 SelectionDAG &DAG) {
3088 MVT::ValueType CurVT = VT;
3089 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
3090 uint64_t Val = C->getValue() & 255;
3092 while (CurVT != MVT::i8) {
3093 Val = (Val << Shift) | Val;
3095 CurVT = (MVT::ValueType)((unsigned)CurVT - 1);
3097 return DAG.getConstant(Val, VT);
3099 Value = DAG.getNode(ISD::ZERO_EXTEND, VT, Value);
3101 while (CurVT != MVT::i8) {
3103 DAG.getNode(ISD::OR, VT,
3104 DAG.getNode(ISD::SHL, VT, Value,
3105 DAG.getConstant(Shift, MVT::i8)), Value);
3107 CurVT = (MVT::ValueType)((unsigned)CurVT - 1);
3114 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
3115 /// used when a memcpy is turned into a memset when the source is a constant
3117 static SDOperand getMemsetStringVal(MVT::ValueType VT,
3118 SelectionDAG &DAG, TargetLowering &TLI,
3119 std::string &Str, unsigned Offset) {
3121 unsigned MSB = getSizeInBits(VT) / 8;
3122 if (TLI.isLittleEndian())
3123 Offset = Offset + MSB - 1;
3124 for (unsigned i = 0; i != MSB; ++i) {
3125 Val = (Val << 8) | Str[Offset];
3126 Offset += TLI.isLittleEndian() ? -1 : 1;
3128 return DAG.getConstant(Val, VT);
3131 /// getMemBasePlusOffset - Returns base and offset node for the
3132 static SDOperand getMemBasePlusOffset(SDOperand Base, unsigned Offset,
3133 SelectionDAG &DAG, TargetLowering &TLI) {
3134 MVT::ValueType VT = Base.getValueType();
3135 return DAG.getNode(ISD::ADD, VT, Base, DAG.getConstant(Offset, VT));
3138 /// MeetsMaxMemopRequirement - Determines if the number of memory ops required
3139 /// to replace the memset / memcpy is below the threshold. It also returns the
3140 /// types of the sequence of memory ops to perform memset / memcpy.
3141 static bool MeetsMaxMemopRequirement(std::vector<MVT::ValueType> &MemOps,
3142 unsigned Limit, uint64_t Size,
3143 unsigned Align, TargetLowering &TLI) {
3146 if (TLI.allowsUnalignedMemoryAccesses()) {
3149 switch (Align & 7) {
3165 MVT::ValueType LVT = MVT::i64;
3166 while (!TLI.isTypeLegal(LVT))
3167 LVT = (MVT::ValueType)((unsigned)LVT - 1);
3168 assert(MVT::isInteger(LVT));
3173 unsigned NumMemOps = 0;
3175 unsigned VTSize = getSizeInBits(VT) / 8;
3176 while (VTSize > Size) {
3177 VT = (MVT::ValueType)((unsigned)VT - 1);
3180 assert(MVT::isInteger(VT));
3182 if (++NumMemOps > Limit)
3184 MemOps.push_back(VT);
3191 void SelectionDAGLowering::visitMemIntrinsic(CallInst &I, unsigned Op) {
3192 SDOperand Op1 = getValue(I.getOperand(1));
3193 SDOperand Op2 = getValue(I.getOperand(2));
3194 SDOperand Op3 = getValue(I.getOperand(3));
3195 SDOperand Op4 = getValue(I.getOperand(4));
3196 unsigned Align = (unsigned)cast<ConstantSDNode>(Op4)->getValue();
3197 if (Align == 0) Align = 1;
3199 if (ConstantSDNode *Size = dyn_cast<ConstantSDNode>(Op3)) {
3200 std::vector<MVT::ValueType> MemOps;
3202 // Expand memset / memcpy to a series of load / store ops
3203 // if the size operand falls below a certain threshold.
3204 SmallVector<SDOperand, 8> OutChains;
3206 default: break; // Do nothing for now.
3208 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemset(),
3209 Size->getValue(), Align, TLI)) {
3210 unsigned NumMemOps = MemOps.size();
3211 unsigned Offset = 0;
3212 for (unsigned i = 0; i < NumMemOps; i++) {
3213 MVT::ValueType VT = MemOps[i];
3214 unsigned VTSize = getSizeInBits(VT) / 8;
3215 SDOperand Value = getMemsetValue(Op2, VT, DAG);
3216 SDOperand Store = DAG.getStore(getRoot(), Value,
3217 getMemBasePlusOffset(Op1, Offset, DAG, TLI),
3218 I.getOperand(1), Offset);
3219 OutChains.push_back(Store);
3226 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemcpy(),
3227 Size->getValue(), Align, TLI)) {
3228 unsigned NumMemOps = MemOps.size();
3229 unsigned SrcOff = 0, DstOff = 0, SrcDelta = 0;
3230 GlobalAddressSDNode *G = NULL;
3232 bool CopyFromStr = false;
3234 if (Op2.getOpcode() == ISD::GlobalAddress)
3235 G = cast<GlobalAddressSDNode>(Op2);
3236 else if (Op2.getOpcode() == ISD::ADD &&
3237 Op2.getOperand(0).getOpcode() == ISD::GlobalAddress &&
3238 Op2.getOperand(1).getOpcode() == ISD::Constant) {
3239 G = cast<GlobalAddressSDNode>(Op2.getOperand(0));
3240 SrcDelta = cast<ConstantSDNode>(Op2.getOperand(1))->getValue();
3243 GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal());
3245 Str = GV->getStringValue(false);
3253 for (unsigned i = 0; i < NumMemOps; i++) {
3254 MVT::ValueType VT = MemOps[i];
3255 unsigned VTSize = getSizeInBits(VT) / 8;
3256 SDOperand Value, Chain, Store;
3259 Value = getMemsetStringVal(VT, DAG, TLI, Str, SrcOff);
3262 DAG.getStore(Chain, Value,
3263 getMemBasePlusOffset(Op1, DstOff, DAG, TLI),
3264 I.getOperand(1), DstOff);
3266 Value = DAG.getLoad(VT, getRoot(),
3267 getMemBasePlusOffset(Op2, SrcOff, DAG, TLI),
3268 I.getOperand(2), SrcOff);
3269 Chain = Value.getValue(1);
3271 DAG.getStore(Chain, Value,
3272 getMemBasePlusOffset(Op1, DstOff, DAG, TLI),
3273 I.getOperand(1), DstOff);
3275 OutChains.push_back(Store);
3284 if (!OutChains.empty()) {
3285 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other,
3286 &OutChains[0], OutChains.size()));
3291 DAG.setRoot(DAG.getNode(Op, MVT::Other, getRoot(), Op1, Op2, Op3, Op4));
3294 //===----------------------------------------------------------------------===//
3295 // SelectionDAGISel code
3296 //===----------------------------------------------------------------------===//
3298 unsigned SelectionDAGISel::MakeReg(MVT::ValueType VT) {
3299 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
3302 void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
3303 // FIXME: we only modify the CFG to split critical edges. This
3304 // updates dom and loop info.
3305 AU.addRequired<AliasAnalysis>();
3309 /// OptimizeNoopCopyExpression - We have determined that the specified cast
3310 /// instruction is a noop copy (e.g. it's casting from one pointer type to
3311 /// another, int->uint, or int->sbyte on PPC.
3313 /// Return true if any changes are made.
3314 static bool OptimizeNoopCopyExpression(CastInst *CI) {
3315 BasicBlock *DefBB = CI->getParent();
3317 /// InsertedCasts - Only insert a cast in each block once.
3318 std::map<BasicBlock*, CastInst*> InsertedCasts;
3320 bool MadeChange = false;
3321 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
3323 Use &TheUse = UI.getUse();
3324 Instruction *User = cast<Instruction>(*UI);
3326 // Figure out which BB this cast is used in. For PHI's this is the
3327 // appropriate predecessor block.
3328 BasicBlock *UserBB = User->getParent();
3329 if (PHINode *PN = dyn_cast<PHINode>(User)) {
3330 unsigned OpVal = UI.getOperandNo()/2;
3331 UserBB = PN->getIncomingBlock(OpVal);
3334 // Preincrement use iterator so we don't invalidate it.
3337 // If this user is in the same block as the cast, don't change the cast.
3338 if (UserBB == DefBB) continue;
3340 // If we have already inserted a cast into this block, use it.
3341 CastInst *&InsertedCast = InsertedCasts[UserBB];
3343 if (!InsertedCast) {
3344 BasicBlock::iterator InsertPt = UserBB->begin();
3345 while (isa<PHINode>(InsertPt)) ++InsertPt;
3348 new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt);
3352 // Replace a use of the cast with a use of the new casat.
3353 TheUse = InsertedCast;
3356 // If we removed all uses, nuke the cast.
3357 if (CI->use_empty())
3358 CI->eraseFromParent();
3363 /// InsertGEPComputeCode - Insert code into BB to compute Ptr+PtrOffset,
3364 /// casting to the type of GEPI.
3365 static Instruction *InsertGEPComputeCode(Instruction *&V, BasicBlock *BB,
3366 Instruction *GEPI, Value *Ptr,
3368 if (V) return V; // Already computed.
3370 BasicBlock::iterator InsertPt;
3371 if (BB == GEPI->getParent()) {
3372 // If insert into the GEP's block, insert right after the GEP.
3376 // Otherwise, insert at the top of BB, after any PHI nodes
3377 InsertPt = BB->begin();
3378 while (isa<PHINode>(InsertPt)) ++InsertPt;
3381 // If Ptr is itself a cast, but in some other BB, emit a copy of the cast into
3382 // BB so that there is only one value live across basic blocks (the cast
3384 if (CastInst *CI = dyn_cast<CastInst>(Ptr))
3385 if (CI->getParent() != BB && isa<PointerType>(CI->getOperand(0)->getType()))
3386 Ptr = new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt);
3388 // Add the offset, cast it to the right type.
3389 Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt);
3390 return V = new CastInst(Ptr, GEPI->getType(), "", InsertPt);
3393 /// ReplaceUsesOfGEPInst - Replace all uses of RepPtr with inserted code to
3394 /// compute its value. The RepPtr value can be computed with Ptr+PtrOffset. One
3395 /// trivial way of doing this would be to evaluate Ptr+PtrOffset in RepPtr's
3396 /// block, then ReplaceAllUsesWith'ing everything. However, we would prefer to
3397 /// sink PtrOffset into user blocks where doing so will likely allow us to fold
3398 /// the constant add into a load or store instruction. Additionally, if a user
3399 /// is a pointer-pointer cast, we look through it to find its users.
3400 static void ReplaceUsesOfGEPInst(Instruction *RepPtr, Value *Ptr,
3401 Constant *PtrOffset, BasicBlock *DefBB,
3402 GetElementPtrInst *GEPI,
3403 std::map<BasicBlock*,Instruction*> &InsertedExprs) {
3404 while (!RepPtr->use_empty()) {
3405 Instruction *User = cast<Instruction>(RepPtr->use_back());
3407 // If the user is a Pointer-Pointer cast, recurse.
3408 if (isa<CastInst>(User) && isa<PointerType>(User->getType())) {
3409 ReplaceUsesOfGEPInst(User, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs);
3411 // Drop the use of RepPtr. The cast is dead. Don't delete it now, else we
3412 // could invalidate an iterator.
3413 User->setOperand(0, UndefValue::get(RepPtr->getType()));
3417 // If this is a load of the pointer, or a store through the pointer, emit
3418 // the increment into the load/store block.
3419 Instruction *NewVal;
3420 if (isa<LoadInst>(User) ||
3421 (isa<StoreInst>(User) && User->getOperand(0) != RepPtr)) {
3422 NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()],
3423 User->getParent(), GEPI,
3426 // If this use is not foldable into the addressing mode, use a version
3427 // emitted in the GEP block.
3428 NewVal = InsertGEPComputeCode(InsertedExprs[DefBB], DefBB, GEPI,
3432 if (GEPI->getType() != RepPtr->getType()) {
3433 BasicBlock::iterator IP = NewVal;
3435 NewVal = new CastInst(NewVal, RepPtr->getType(), "", IP);
3437 User->replaceUsesOfWith(RepPtr, NewVal);
3442 /// OptimizeGEPExpression - Since we are doing basic-block-at-a-time instruction
3443 /// selection, we want to be a bit careful about some things. In particular, if
3444 /// we have a GEP instruction that is used in a different block than it is
3445 /// defined, the addressing expression of the GEP cannot be folded into loads or
3446 /// stores that use it. In this case, decompose the GEP and move constant
3447 /// indices into blocks that use it.
3448 static bool OptimizeGEPExpression(GetElementPtrInst *GEPI,
3449 const TargetData *TD) {
3450 // If this GEP is only used inside the block it is defined in, there is no
3451 // need to rewrite it.
3452 bool isUsedOutsideDefBB = false;
3453 BasicBlock *DefBB = GEPI->getParent();
3454 for (Value::use_iterator UI = GEPI->use_begin(), E = GEPI->use_end();
3456 if (cast<Instruction>(*UI)->getParent() != DefBB) {
3457 isUsedOutsideDefBB = true;
3461 if (!isUsedOutsideDefBB) return false;
3463 // If this GEP has no non-zero constant indices, there is nothing we can do,
3465 bool hasConstantIndex = false;
3466 bool hasVariableIndex = false;
3467 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
3468 E = GEPI->op_end(); OI != E; ++OI) {
3469 if (ConstantInt *CI = dyn_cast<ConstantInt>(*OI)) {
3470 if (CI->getZExtValue()) {
3471 hasConstantIndex = true;
3475 hasVariableIndex = true;
3479 // If this is a "GEP X, 0, 0, 0", turn this into a cast.
3480 if (!hasConstantIndex && !hasVariableIndex) {
3481 Value *NC = new CastInst(GEPI->getOperand(0), GEPI->getType(),
3482 GEPI->getName(), GEPI);
3483 GEPI->replaceAllUsesWith(NC);
3484 GEPI->eraseFromParent();
3488 // If this is a GEP &Alloca, 0, 0, forward subst the frame index into uses.
3489 if (!hasConstantIndex && !isa<AllocaInst>(GEPI->getOperand(0)))
3492 // Otherwise, decompose the GEP instruction into multiplies and adds. Sum the
3493 // constant offset (which we now know is non-zero) and deal with it later.
3494 uint64_t ConstantOffset = 0;
3495 const Type *UIntPtrTy = TD->getIntPtrType();
3496 Value *Ptr = new CastInst(GEPI->getOperand(0), UIntPtrTy, "", GEPI);
3497 const Type *Ty = GEPI->getOperand(0)->getType();
3499 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
3500 E = GEPI->op_end(); OI != E; ++OI) {
3502 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
3503 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
3505 ConstantOffset += TD->getStructLayout(StTy)->MemberOffsets[Field];
3506 Ty = StTy->getElementType(Field);
3508 Ty = cast<SequentialType>(Ty)->getElementType();
3510 // Handle constant subscripts.
3511 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
3512 if (CI->getZExtValue() == 0) continue;
3513 if (CI->getType()->isSigned())
3514 ConstantOffset += (int64_t)TD->getTypeSize(Ty)*CI->getSExtValue();
3516 ConstantOffset += TD->getTypeSize(Ty)*CI->getZExtValue();
3520 // Ptr = Ptr + Idx * ElementSize;
3522 // Cast Idx to UIntPtrTy if needed.
3523 Idx = new CastInst(Idx, UIntPtrTy, "", GEPI);
3525 uint64_t ElementSize = TD->getTypeSize(Ty);
3526 // Mask off bits that should not be set.
3527 ElementSize &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
3528 Constant *SizeCst = ConstantInt::get(UIntPtrTy, ElementSize);
3530 // Multiply by the element size and add to the base.
3531 Idx = BinaryOperator::createMul(Idx, SizeCst, "", GEPI);
3532 Ptr = BinaryOperator::createAdd(Ptr, Idx, "", GEPI);
3536 // Make sure that the offset fits in uintptr_t.
3537 ConstantOffset &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
3538 Constant *PtrOffset = ConstantInt::get(UIntPtrTy, ConstantOffset);
3540 // Okay, we have now emitted all of the variable index parts to the BB that
3541 // the GEP is defined in. Loop over all of the using instructions, inserting
3542 // an "add Ptr, ConstantOffset" into each block that uses it and update the
3543 // instruction to use the newly computed value, making GEPI dead. When the
3544 // user is a load or store instruction address, we emit the add into the user
3545 // block, otherwise we use a canonical version right next to the gep (these
3546 // won't be foldable as addresses, so we might as well share the computation).
3548 std::map<BasicBlock*,Instruction*> InsertedExprs;
3549 ReplaceUsesOfGEPInst(GEPI, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs);
3551 // Finally, the GEP is dead, remove it.
3552 GEPI->eraseFromParent();
3558 /// SplitEdgeNicely - Split the critical edge from TI to it's specified
3559 /// successor if it will improve codegen. We only do this if the successor has
3560 /// phi nodes (otherwise critical edges are ok). If there is already another
3561 /// predecessor of the succ that is empty (and thus has no phi nodes), use it
3562 /// instead of introducing a new block.
3563 static void SplitEdgeNicely(TerminatorInst *TI, unsigned SuccNum, Pass *P) {
3564 BasicBlock *TIBB = TI->getParent();
3565 BasicBlock *Dest = TI->getSuccessor(SuccNum);
3566 assert(isa<PHINode>(Dest->begin()) &&
3567 "This should only be called if Dest has a PHI!");
3569 /// TIPHIValues - This array is lazily computed to determine the values of
3570 /// PHIs in Dest that TI would provide.
3571 std::vector<Value*> TIPHIValues;
3573 // Check to see if Dest has any blocks that can be used as a split edge for
3575 for (pred_iterator PI = pred_begin(Dest), E = pred_end(Dest); PI != E; ++PI) {
3576 BasicBlock *Pred = *PI;
3577 // To be usable, the pred has to end with an uncond branch to the dest.
3578 BranchInst *PredBr = dyn_cast<BranchInst>(Pred->getTerminator());
3579 if (!PredBr || !PredBr->isUnconditional() ||
3580 // Must be empty other than the branch.
3581 &Pred->front() != PredBr)
3584 // Finally, since we know that Dest has phi nodes in it, we have to make
3585 // sure that jumping to Pred will have the same affect as going to Dest in
3586 // terms of PHI values.
3589 bool FoundMatch = true;
3590 for (BasicBlock::iterator I = Dest->begin();
3591 (PN = dyn_cast<PHINode>(I)); ++I, ++PHINo) {
3592 if (PHINo == TIPHIValues.size())
3593 TIPHIValues.push_back(PN->getIncomingValueForBlock(TIBB));
3595 // If the PHI entry doesn't work, we can't use this pred.
3596 if (TIPHIValues[PHINo] != PN->getIncomingValueForBlock(Pred)) {
3602 // If we found a workable predecessor, change TI to branch to Succ.
3604 Dest->removePredecessor(TIBB);
3605 TI->setSuccessor(SuccNum, Pred);
3610 SplitCriticalEdge(TI, SuccNum, P, true);
3614 bool SelectionDAGISel::runOnFunction(Function &Fn) {
3615 MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine());
3616 RegMap = MF.getSSARegMap();
3617 DEBUG(std::cerr << "\n\n\n=== " << Fn.getName() << "\n");
3619 // First, split all critical edges.
3621 // In this pass we also look for GEP and cast instructions that are used
3622 // across basic blocks and rewrite them to improve basic-block-at-a-time
3625 bool MadeChange = true;
3626 while (MadeChange) {
3628 for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
3629 // Split all critical edges where the dest block has a PHI.
3630 TerminatorInst *BBTI = BB->getTerminator();
3631 if (BBTI->getNumSuccessors() > 1) {
3632 for (unsigned i = 0, e = BBTI->getNumSuccessors(); i != e; ++i)
3633 if (isa<PHINode>(BBTI->getSuccessor(i)->begin()) &&
3634 isCriticalEdge(BBTI, i, true))
3635 SplitEdgeNicely(BBTI, i, this);
3639 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
3640 Instruction *I = BBI++;
3641 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
3642 MadeChange |= OptimizeGEPExpression(GEPI, TLI.getTargetData());
3643 } else if (CastInst *CI = dyn_cast<CastInst>(I)) {
3644 // If the source of the cast is a constant, then this should have
3645 // already been constant folded. The only reason NOT to constant fold
3646 // it is if something (e.g. LSR) was careful to place the constant
3647 // evaluation in a block other than then one that uses it (e.g. to hoist
3648 // the address of globals out of a loop). If this is the case, we don't
3649 // want to forward-subst the cast.
3650 if (isa<Constant>(CI->getOperand(0)))
3653 // If this is a noop copy, sink it into user blocks to reduce the number
3654 // of virtual registers that must be created and coallesced.
3655 MVT::ValueType SrcVT = TLI.getValueType(CI->getOperand(0)->getType());
3656 MVT::ValueType DstVT = TLI.getValueType(CI->getType());
3658 // This is an fp<->int conversion?
3659 if (MVT::isInteger(SrcVT) != MVT::isInteger(DstVT))
3662 // If this is an extension, it will be a zero or sign extension, which
3664 if (SrcVT < DstVT) continue;
3666 // If these values will be promoted, find out what they will be promoted
3667 // to. This helps us consider truncates on PPC as noop copies when they
3669 if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote)
3670 SrcVT = TLI.getTypeToTransformTo(SrcVT);
3671 if (TLI.getTypeAction(DstVT) == TargetLowering::Promote)
3672 DstVT = TLI.getTypeToTransformTo(DstVT);
3674 // If, after promotion, these are the same types, this is a noop copy.
3676 MadeChange |= OptimizeNoopCopyExpression(CI);
3682 FunctionLoweringInfo FuncInfo(TLI, Fn, MF);
3684 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
3685 SelectBasicBlock(I, MF, FuncInfo);
3690 SDOperand SelectionDAGLowering::CopyValueToVirtualRegister(Value *V,
3692 SDOperand Op = getValue(V);
3693 assert((Op.getOpcode() != ISD::CopyFromReg ||
3694 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
3695 "Copy from a reg to the same reg!");
3697 // If this type is not legal, we must make sure to not create an invalid
3699 MVT::ValueType SrcVT = Op.getValueType();
3700 MVT::ValueType DestVT = TLI.getTypeToTransformTo(SrcVT);
3701 if (SrcVT == DestVT) {
3702 return DAG.getCopyToReg(getRoot(), Reg, Op);
3703 } else if (SrcVT == MVT::Vector) {
3704 // Handle copies from generic vectors to registers.
3705 MVT::ValueType PTyElementVT, PTyLegalElementVT;
3706 unsigned NE = TLI.getPackedTypeBreakdown(cast<PackedType>(V->getType()),
3707 PTyElementVT, PTyLegalElementVT);
3709 // Insert a VBIT_CONVERT of the input vector to a "N x PTyElementVT"
3710 // MVT::Vector type.
3711 Op = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Op,
3712 DAG.getConstant(NE, MVT::i32),
3713 DAG.getValueType(PTyElementVT));
3715 // Loop over all of the elements of the resultant vector,
3716 // VEXTRACT_VECTOR_ELT'ing them, converting them to PTyLegalElementVT, then
3717 // copying them into output registers.
3718 SmallVector<SDOperand, 8> OutChains;
3719 SDOperand Root = getRoot();
3720 for (unsigned i = 0; i != NE; ++i) {
3721 SDOperand Elt = DAG.getNode(ISD::VEXTRACT_VECTOR_ELT, PTyElementVT,
3722 Op, DAG.getConstant(i, TLI.getPointerTy()));
3723 if (PTyElementVT == PTyLegalElementVT) {
3724 // Elements are legal.
3725 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt));
3726 } else if (PTyLegalElementVT > PTyElementVT) {
3727 // Elements are promoted.
3728 if (MVT::isFloatingPoint(PTyLegalElementVT))
3729 Elt = DAG.getNode(ISD::FP_EXTEND, PTyLegalElementVT, Elt);
3731 Elt = DAG.getNode(ISD::ANY_EXTEND, PTyLegalElementVT, Elt);
3732 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt));
3734 // Elements are expanded.
3735 // The src value is expanded into multiple registers.
3736 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT,
3737 Elt, DAG.getConstant(0, TLI.getPointerTy()));
3738 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT,
3739 Elt, DAG.getConstant(1, TLI.getPointerTy()));
3740 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Lo));
3741 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Hi));
3744 return DAG.getNode(ISD::TokenFactor, MVT::Other,
3745 &OutChains[0], OutChains.size());
3746 } else if (SrcVT < DestVT) {
3747 // The src value is promoted to the register.
3748 if (MVT::isFloatingPoint(SrcVT))
3749 Op = DAG.getNode(ISD::FP_EXTEND, DestVT, Op);
3751 Op = DAG.getNode(ISD::ANY_EXTEND, DestVT, Op);
3752 return DAG.getCopyToReg(getRoot(), Reg, Op);
3754 // The src value is expanded into multiple registers.
3755 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
3756 Op, DAG.getConstant(0, TLI.getPointerTy()));
3757 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
3758 Op, DAG.getConstant(1, TLI.getPointerTy()));
3759 Op = DAG.getCopyToReg(getRoot(), Reg, Lo);
3760 return DAG.getCopyToReg(Op, Reg+1, Hi);
3764 void SelectionDAGISel::
3765 LowerArguments(BasicBlock *BB, SelectionDAGLowering &SDL,
3766 std::vector<SDOperand> &UnorderedChains) {
3767 // If this is the entry block, emit arguments.
3768 Function &F = *BB->getParent();
3769 FunctionLoweringInfo &FuncInfo = SDL.FuncInfo;
3770 SDOperand OldRoot = SDL.DAG.getRoot();
3771 std::vector<SDOperand> Args = TLI.LowerArguments(F, SDL.DAG);
3774 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
3776 if (!AI->use_empty()) {
3777 SDL.setValue(AI, Args[a]);
3779 // If this argument is live outside of the entry block, insert a copy from
3780 // whereever we got it to the vreg that other BB's will reference it as.
3781 if (FuncInfo.ValueMap.count(AI)) {
3783 SDL.CopyValueToVirtualRegister(AI, FuncInfo.ValueMap[AI]);
3784 UnorderedChains.push_back(Copy);
3788 // Finally, if the target has anything special to do, allow it to do so.
3789 // FIXME: this should insert code into the DAG!
3790 EmitFunctionEntryCode(F, SDL.DAG.getMachineFunction());
3793 void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB,
3794 std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate,
3795 FunctionLoweringInfo &FuncInfo) {
3796 SelectionDAGLowering SDL(DAG, TLI, FuncInfo);
3798 std::vector<SDOperand> UnorderedChains;
3800 // Lower any arguments needed in this block if this is the entry block.
3801 if (LLVMBB == &LLVMBB->getParent()->front())
3802 LowerArguments(LLVMBB, SDL, UnorderedChains);
3804 BB = FuncInfo.MBBMap[LLVMBB];
3805 SDL.setCurrentBasicBlock(BB);
3807 // Lower all of the non-terminator instructions.
3808 for (BasicBlock::iterator I = LLVMBB->begin(), E = --LLVMBB->end();
3812 // Ensure that all instructions which are used outside of their defining
3813 // blocks are available as virtual registers.
3814 for (BasicBlock::iterator I = LLVMBB->begin(), E = LLVMBB->end(); I != E;++I)
3815 if (!I->use_empty() && !isa<PHINode>(I)) {
3816 std::map<const Value*, unsigned>::iterator VMI =FuncInfo.ValueMap.find(I);
3817 if (VMI != FuncInfo.ValueMap.end())
3818 UnorderedChains.push_back(
3819 SDL.CopyValueToVirtualRegister(I, VMI->second));
3822 // Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
3823 // ensure constants are generated when needed. Remember the virtual registers
3824 // that need to be added to the Machine PHI nodes as input. We cannot just
3825 // directly add them, because expansion might result in multiple MBB's for one
3826 // BB. As such, the start of the BB might correspond to a different MBB than
3829 TerminatorInst *TI = LLVMBB->getTerminator();
3831 // Emit constants only once even if used by multiple PHI nodes.
3832 std::map<Constant*, unsigned> ConstantsOut;
3834 // Vector bool would be better, but vector<bool> is really slow.
3835 std::vector<unsigned char> SuccsHandled;
3836 if (TI->getNumSuccessors())
3837 SuccsHandled.resize(BB->getParent()->getNumBlockIDs());
3839 // Check successor nodes PHI nodes that expect a constant to be available from
3841 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
3842 BasicBlock *SuccBB = TI->getSuccessor(succ);
3843 if (!isa<PHINode>(SuccBB->begin())) continue;
3844 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
3846 // If this terminator has multiple identical successors (common for
3847 // switches), only handle each succ once.
3848 unsigned SuccMBBNo = SuccMBB->getNumber();
3849 if (SuccsHandled[SuccMBBNo]) continue;
3850 SuccsHandled[SuccMBBNo] = true;
3852 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
3855 // At this point we know that there is a 1-1 correspondence between LLVM PHI
3856 // nodes and Machine PHI nodes, but the incoming operands have not been
3858 for (BasicBlock::iterator I = SuccBB->begin();
3859 (PN = dyn_cast<PHINode>(I)); ++I) {
3860 // Ignore dead phi's.
3861 if (PN->use_empty()) continue;
3864 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
3865 if (Constant *C = dyn_cast<Constant>(PHIOp)) {
3866 unsigned &RegOut = ConstantsOut[C];
3868 RegOut = FuncInfo.CreateRegForValue(C);
3869 UnorderedChains.push_back(
3870 SDL.CopyValueToVirtualRegister(C, RegOut));
3874 Reg = FuncInfo.ValueMap[PHIOp];
3876 assert(isa<AllocaInst>(PHIOp) &&
3877 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
3878 "Didn't codegen value into a register!??");
3879 Reg = FuncInfo.CreateRegForValue(PHIOp);
3880 UnorderedChains.push_back(
3881 SDL.CopyValueToVirtualRegister(PHIOp, Reg));
3885 // Remember that this register needs to added to the machine PHI node as
3886 // the input for this MBB.
3887 MVT::ValueType VT = TLI.getValueType(PN->getType());
3888 unsigned NumElements;
3889 if (VT != MVT::Vector)
3890 NumElements = TLI.getNumElements(VT);
3892 MVT::ValueType VT1,VT2;
3894 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()),
3897 for (unsigned i = 0, e = NumElements; i != e; ++i)
3898 PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
3901 ConstantsOut.clear();
3903 // Turn all of the unordered chains into one factored node.
3904 if (!UnorderedChains.empty()) {
3905 SDOperand Root = SDL.getRoot();
3906 if (Root.getOpcode() != ISD::EntryToken) {
3907 unsigned i = 0, e = UnorderedChains.size();
3908 for (; i != e; ++i) {
3909 assert(UnorderedChains[i].Val->getNumOperands() > 1);
3910 if (UnorderedChains[i].Val->getOperand(0) == Root)
3911 break; // Don't add the root if we already indirectly depend on it.
3915 UnorderedChains.push_back(Root);
3917 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other,
3918 &UnorderedChains[0], UnorderedChains.size()));
3921 // Lower the terminator after the copies are emitted.
3922 SDL.visit(*LLVMBB->getTerminator());
3924 // Copy over any CaseBlock records that may now exist due to SwitchInst
3925 // lowering, as well as any jump table information.
3926 SwitchCases.clear();
3927 SwitchCases = SDL.SwitchCases;
3930 // Make sure the root of the DAG is up-to-date.
3931 DAG.setRoot(SDL.getRoot());
3934 void SelectionDAGISel::CodeGenAndEmitDAG(SelectionDAG &DAG) {
3935 // Get alias analysis for load/store combining.
3936 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
3938 // Run the DAG combiner in pre-legalize mode.
3939 DAG.Combine(false, AA);
3941 DEBUG(std::cerr << "Lowered selection DAG:\n");
3944 // Second step, hack on the DAG until it only uses operations and types that
3945 // the target supports.
3948 DEBUG(std::cerr << "Legalized selection DAG:\n");
3951 // Run the DAG combiner in post-legalize mode.
3952 DAG.Combine(true, AA);
3954 if (ViewISelDAGs) DAG.viewGraph();
3956 // Third, instruction select all of the operations to machine code, adding the
3957 // code to the MachineBasicBlock.
3958 InstructionSelectBasicBlock(DAG);
3960 DEBUG(std::cerr << "Selected machine code:\n");
3964 void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF,
3965 FunctionLoweringInfo &FuncInfo) {
3966 std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
3968 SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
3971 // First step, lower LLVM code to some DAG. This DAG may use operations and
3972 // types that are not supported by the target.
3973 BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo);
3975 // Second step, emit the lowered DAG as machine code.
3976 CodeGenAndEmitDAG(DAG);
3979 // Next, now that we know what the last MBB the LLVM BB expanded is, update
3980 // PHI nodes in successors.
3981 if (SwitchCases.empty() && JT.Reg == 0) {
3982 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) {
3983 MachineInstr *PHI = PHINodesToUpdate[i].first;
3984 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
3985 "This is not a machine PHI node that we are updating!");
3986 PHI->addRegOperand(PHINodesToUpdate[i].second, false);
3987 PHI->addMachineBasicBlockOperand(BB);
3992 // If the JumpTable record is filled in, then we need to emit a jump table.
3993 // Updating the PHI nodes is tricky in this case, since we need to determine
3994 // whether the PHI is a successor of the range check MBB or the jump table MBB
3996 assert(SwitchCases.empty() && "Cannot have jump table and lowered switch");
3997 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
3999 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo);
4000 MachineBasicBlock *RangeBB = BB;
4001 // Set the current basic block to the mbb we wish to insert the code into
4003 SDL.setCurrentBasicBlock(BB);
4005 SDL.visitJumpTable(JT);
4006 SDAG.setRoot(SDL.getRoot());
4007 CodeGenAndEmitDAG(SDAG);
4009 for (unsigned pi = 0, pe = PHINodesToUpdate.size(); pi != pe; ++pi) {
4010 MachineInstr *PHI = PHINodesToUpdate[pi].first;
4011 MachineBasicBlock *PHIBB = PHI->getParent();
4012 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
4013 "This is not a machine PHI node that we are updating!");
4014 if (PHIBB == JT.Default) {
4015 PHI->addRegOperand(PHINodesToUpdate[pi].second, false);
4016 PHI->addMachineBasicBlockOperand(RangeBB);
4018 if (BB->succ_end() != std::find(BB->succ_begin(),BB->succ_end(), PHIBB)) {
4019 PHI->addRegOperand(PHINodesToUpdate[pi].second, false);
4020 PHI->addMachineBasicBlockOperand(BB);
4026 // If the switch block involved a branch to one of the actual successors, we
4027 // need to update PHI nodes in that block.
4028 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) {
4029 MachineInstr *PHI = PHINodesToUpdate[i].first;
4030 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
4031 "This is not a machine PHI node that we are updating!");
4032 if (BB->isSuccessor(PHI->getParent())) {
4033 PHI->addRegOperand(PHINodesToUpdate[i].second, false);
4034 PHI->addMachineBasicBlockOperand(BB);
4038 // If we generated any switch lowering information, build and codegen any
4039 // additional DAGs necessary.
4040 for (unsigned i = 0, e = SwitchCases.size(); i != e; ++i) {
4041 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
4043 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo);
4045 // Set the current basic block to the mbb we wish to insert the code into
4046 BB = SwitchCases[i].ThisBB;
4047 SDL.setCurrentBasicBlock(BB);
4050 SDL.visitSwitchCase(SwitchCases[i]);
4051 SDAG.setRoot(SDL.getRoot());
4052 CodeGenAndEmitDAG(SDAG);
4054 // Handle any PHI nodes in successors of this chunk, as if we were coming
4055 // from the original BB before switch expansion. Note that PHI nodes can
4056 // occur multiple times in PHINodesToUpdate. We have to be very careful to
4057 // handle them the right number of times.
4058 while ((BB = SwitchCases[i].TrueBB)) { // Handle LHS and RHS.
4059 for (MachineBasicBlock::iterator Phi = BB->begin();
4060 Phi != BB->end() && Phi->getOpcode() == TargetInstrInfo::PHI; ++Phi){
4061 // This value for this PHI node is recorded in PHINodesToUpdate, get it.
4062 for (unsigned pn = 0; ; ++pn) {
4063 assert(pn != PHINodesToUpdate.size() && "Didn't find PHI entry!");
4064 if (PHINodesToUpdate[pn].first == Phi) {
4065 Phi->addRegOperand(PHINodesToUpdate[pn].second, false);
4066 Phi->addMachineBasicBlockOperand(SwitchCases[i].ThisBB);
4072 // Don't process RHS if same block as LHS.
4073 if (BB == SwitchCases[i].FalseBB)
4074 SwitchCases[i].FalseBB = 0;
4076 // If we haven't handled the RHS, do so now. Otherwise, we're done.
4077 SwitchCases[i].TrueBB = SwitchCases[i].FalseBB;
4078 SwitchCases[i].FalseBB = 0;
4080 assert(SwitchCases[i].TrueBB == 0 && SwitchCases[i].FalseBB == 0);
4085 //===----------------------------------------------------------------------===//
4086 /// ScheduleAndEmitDAG - Pick a safe ordering and emit instructions for each
4087 /// target node in the graph.
4088 void SelectionDAGISel::ScheduleAndEmitDAG(SelectionDAG &DAG) {
4089 if (ViewSchedDAGs) DAG.viewGraph();
4091 RegisterScheduler::FunctionPassCtor Ctor = RegisterScheduler::getDefault();
4095 RegisterScheduler::setDefault(Ctor);
4098 ScheduleDAG *SL = Ctor(this, &DAG, BB);
4104 HazardRecognizer *SelectionDAGISel::CreateTargetHazardRecognizer() {
4105 return new HazardRecognizer();
4108 //===----------------------------------------------------------------------===//
4109 // Helper functions used by the generated instruction selector.
4110 //===----------------------------------------------------------------------===//
4111 // Calls to these methods are generated by tblgen.
4113 /// CheckAndMask - The isel is trying to match something like (and X, 255). If
4114 /// the dag combiner simplified the 255, we still want to match. RHS is the
4115 /// actual value in the DAG on the RHS of an AND, and DesiredMaskS is the value
4116 /// specified in the .td file (e.g. 255).
4117 bool SelectionDAGISel::CheckAndMask(SDOperand LHS, ConstantSDNode *RHS,
4118 int64_t DesiredMaskS) {
4119 uint64_t ActualMask = RHS->getValue();
4120 uint64_t DesiredMask =DesiredMaskS & MVT::getIntVTBitMask(LHS.getValueType());
4122 // If the actual mask exactly matches, success!
4123 if (ActualMask == DesiredMask)
4126 // If the actual AND mask is allowing unallowed bits, this doesn't match.
4127 if (ActualMask & ~DesiredMask)
4130 // Otherwise, the DAG Combiner may have proven that the value coming in is
4131 // either already zero or is not demanded. Check for known zero input bits.
4132 uint64_t NeededMask = DesiredMask & ~ActualMask;
4133 if (getTargetLowering().MaskedValueIsZero(LHS, NeededMask))
4136 // TODO: check to see if missing bits are just not demanded.
4138 // Otherwise, this pattern doesn't match.
4142 /// CheckOrMask - The isel is trying to match something like (or X, 255). If
4143 /// the dag combiner simplified the 255, we still want to match. RHS is the
4144 /// actual value in the DAG on the RHS of an OR, and DesiredMaskS is the value
4145 /// specified in the .td file (e.g. 255).
4146 bool SelectionDAGISel::CheckOrMask(SDOperand LHS, ConstantSDNode *RHS,
4147 int64_t DesiredMaskS) {
4148 uint64_t ActualMask = RHS->getValue();
4149 uint64_t DesiredMask =DesiredMaskS & MVT::getIntVTBitMask(LHS.getValueType());
4151 // If the actual mask exactly matches, success!
4152 if (ActualMask == DesiredMask)
4155 // If the actual AND mask is allowing unallowed bits, this doesn't match.
4156 if (ActualMask & ~DesiredMask)
4159 // Otherwise, the DAG Combiner may have proven that the value coming in is
4160 // either already zero or is not demanded. Check for known zero input bits.
4161 uint64_t NeededMask = DesiredMask & ~ActualMask;
4163 uint64_t KnownZero, KnownOne;
4164 getTargetLowering().ComputeMaskedBits(LHS, NeededMask, KnownZero, KnownOne);
4166 // If all the missing bits in the or are already known to be set, match!
4167 if ((NeededMask & KnownOne) == NeededMask)
4170 // TODO: check to see if missing bits are just not demanded.
4172 // Otherwise, this pattern doesn't match.
4177 /// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
4178 /// by tblgen. Others should not call it.
4179 void SelectionDAGISel::
4180 SelectInlineAsmMemoryOperands(std::vector<SDOperand> &Ops, SelectionDAG &DAG) {
4181 std::vector<SDOperand> InOps;
4182 std::swap(InOps, Ops);
4184 Ops.push_back(InOps[0]); // input chain.
4185 Ops.push_back(InOps[1]); // input asm string.
4187 unsigned i = 2, e = InOps.size();
4188 if (InOps[e-1].getValueType() == MVT::Flag)
4189 --e; // Don't process a flag operand if it is here.
4192 unsigned Flags = cast<ConstantSDNode>(InOps[i])->getValue();
4193 if ((Flags & 7) != 4 /*MEM*/) {
4194 // Just skip over this operand, copying the operands verbatim.
4195 Ops.insert(Ops.end(), InOps.begin()+i, InOps.begin()+i+(Flags >> 3) + 1);
4196 i += (Flags >> 3) + 1;
4198 assert((Flags >> 3) == 1 && "Memory operand with multiple values?");
4199 // Otherwise, this is a memory operand. Ask the target to select it.
4200 std::vector<SDOperand> SelOps;
4201 if (SelectInlineAsmMemoryOperand(InOps[i+1], 'm', SelOps, DAG)) {
4202 std::cerr << "Could not match memory address. Inline asm failure!\n";
4206 // Add this to the output node.
4207 Ops.push_back(DAG.getConstant(4/*MEM*/ | (SelOps.size() << 3), MVT::i32));
4208 Ops.insert(Ops.end(), SelOps.begin(), SelOps.end());
4213 // Add the flag input back if present.
4214 if (e != InOps.size())
4215 Ops.push_back(InOps.back());