1 //===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the SelectionDAGISel class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "isel"
15 #include "llvm/Analysis/AliasAnalysis.h"
16 #include "llvm/CodeGen/SelectionDAGISel.h"
17 #include "llvm/CodeGen/ScheduleDAG.h"
18 #include "llvm/CallingConv.h"
19 #include "llvm/Constants.h"
20 #include "llvm/DerivedTypes.h"
21 #include "llvm/Function.h"
22 #include "llvm/GlobalVariable.h"
23 #include "llvm/InlineAsm.h"
24 #include "llvm/Instructions.h"
25 #include "llvm/Intrinsics.h"
26 #include "llvm/IntrinsicInst.h"
27 #include "llvm/CodeGen/IntrinsicLowering.h"
28 #include "llvm/CodeGen/MachineDebugInfo.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineJumpTableInfo.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/SchedulerRegistry.h"
34 #include "llvm/CodeGen/SelectionDAG.h"
35 #include "llvm/CodeGen/SSARegMap.h"
36 #include "llvm/Target/MRegisterInfo.h"
37 #include "llvm/Target/TargetData.h"
38 #include "llvm/Target/TargetFrameInfo.h"
39 #include "llvm/Target/TargetInstrInfo.h"
40 #include "llvm/Target/TargetLowering.h"
41 #include "llvm/Target/TargetMachine.h"
42 #include "llvm/Target/TargetOptions.h"
43 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
44 #include "llvm/Support/MathExtras.h"
45 #include "llvm/Support/Debug.h"
46 #include "llvm/Support/Compiler.h"
55 ViewISelDAGs("view-isel-dags", cl::Hidden,
56 cl::desc("Pop up a window to show isel dags as they are selected"));
58 ViewSchedDAGs("view-sched-dags", cl::Hidden,
59 cl::desc("Pop up a window to show sched dags as they are processed"));
61 static const bool ViewISelDAGs = 0, ViewSchedDAGs = 0;
65 //===---------------------------------------------------------------------===//
67 /// RegisterScheduler class - Track the registration of instruction schedulers.
69 //===---------------------------------------------------------------------===//
70 MachinePassRegistry RegisterScheduler::Registry;
72 //===---------------------------------------------------------------------===//
74 /// ISHeuristic command line option for instruction schedulers.
76 //===---------------------------------------------------------------------===//
78 cl::opt<RegisterScheduler::FunctionPassCtor, false,
79 RegisterPassParser<RegisterScheduler> >
81 cl::init(&createDefaultScheduler),
82 cl::desc("Instruction schedulers available:"));
84 static RegisterScheduler
85 defaultListDAGScheduler("default", " Best scheduler for the target",
86 createDefaultScheduler);
90 /// RegsForValue - This struct represents the physical registers that a
91 /// particular value is assigned and the type information about the value.
92 /// This is needed because values can be promoted into larger registers and
93 /// expanded into multiple smaller registers than the value.
94 struct VISIBILITY_HIDDEN RegsForValue {
95 /// Regs - This list hold the register (for legal and promoted values)
96 /// or register set (for expanded values) that the value should be assigned
98 std::vector<unsigned> Regs;
100 /// RegVT - The value type of each register.
102 MVT::ValueType RegVT;
104 /// ValueVT - The value type of the LLVM value, which may be promoted from
105 /// RegVT or made from merging the two expanded parts.
106 MVT::ValueType ValueVT;
108 RegsForValue() : RegVT(MVT::Other), ValueVT(MVT::Other) {}
110 RegsForValue(unsigned Reg, MVT::ValueType regvt, MVT::ValueType valuevt)
111 : RegVT(regvt), ValueVT(valuevt) {
114 RegsForValue(const std::vector<unsigned> ®s,
115 MVT::ValueType regvt, MVT::ValueType valuevt)
116 : Regs(regs), RegVT(regvt), ValueVT(valuevt) {
119 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
120 /// this value and returns the result as a ValueVT value. This uses
121 /// Chain/Flag as the input and updates them for the output Chain/Flag.
122 SDOperand getCopyFromRegs(SelectionDAG &DAG,
123 SDOperand &Chain, SDOperand &Flag) const;
125 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
126 /// specified value into the registers specified by this object. This uses
127 /// Chain/Flag as the input and updates them for the output Chain/Flag.
128 void getCopyToRegs(SDOperand Val, SelectionDAG &DAG,
129 SDOperand &Chain, SDOperand &Flag,
130 MVT::ValueType PtrVT) const;
132 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
133 /// operand list. This adds the code marker and includes the number of
134 /// values added into it.
135 void AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG,
136 std::vector<SDOperand> &Ops) const;
141 //===--------------------------------------------------------------------===//
142 /// createDefaultScheduler - This creates an instruction scheduler appropriate
144 ScheduleDAG* createDefaultScheduler(SelectionDAGISel *IS,
146 MachineBasicBlock *BB) {
147 TargetLowering &TLI = IS->getTargetLowering();
149 if (TLI.getSchedulingPreference() == TargetLowering::SchedulingForLatency) {
150 return createTDListDAGScheduler(IS, DAG, BB);
152 assert(TLI.getSchedulingPreference() ==
153 TargetLowering::SchedulingForRegPressure && "Unknown sched type!");
154 return createBURRListDAGScheduler(IS, DAG, BB);
159 //===--------------------------------------------------------------------===//
160 /// FunctionLoweringInfo - This contains information that is global to a
161 /// function that is used when lowering a region of the function.
162 class FunctionLoweringInfo {
169 FunctionLoweringInfo(TargetLowering &TLI, Function &Fn,MachineFunction &MF);
171 /// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
172 std::map<const BasicBlock*, MachineBasicBlock *> MBBMap;
174 /// ValueMap - Since we emit code for the function a basic block at a time,
175 /// we must remember which virtual registers hold the values for
176 /// cross-basic-block values.
177 std::map<const Value*, unsigned> ValueMap;
179 /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
180 /// the entry block. This allows the allocas to be efficiently referenced
181 /// anywhere in the function.
182 std::map<const AllocaInst*, int> StaticAllocaMap;
184 unsigned MakeReg(MVT::ValueType VT) {
185 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
188 unsigned CreateRegForValue(const Value *V);
190 unsigned InitializeRegForValue(const Value *V) {
191 unsigned &R = ValueMap[V];
192 assert(R == 0 && "Already initialized this value register!");
193 return R = CreateRegForValue(V);
198 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
199 /// PHI nodes or outside of the basic block that defines it, or used by a
200 /// switch instruction, which may expand to multiple basic blocks.
201 static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
202 if (isa<PHINode>(I)) return true;
203 BasicBlock *BB = I->getParent();
204 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
205 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI) ||
206 isa<SwitchInst>(*UI))
211 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
212 /// entry block, return true. This includes arguments used by switches, since
213 /// the switch may expand into multiple basic blocks.
214 static bool isOnlyUsedInEntryBlock(Argument *A) {
215 BasicBlock *Entry = A->getParent()->begin();
216 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
217 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI))
218 return false; // Use not in entry block.
222 FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli,
223 Function &fn, MachineFunction &mf)
224 : TLI(tli), Fn(fn), MF(mf), RegMap(MF.getSSARegMap()) {
226 // Create a vreg for each argument register that is not dead and is used
227 // outside of the entry block for the function.
228 for (Function::arg_iterator AI = Fn.arg_begin(), E = Fn.arg_end();
230 if (!isOnlyUsedInEntryBlock(AI))
231 InitializeRegForValue(AI);
233 // Initialize the mapping of values to registers. This is only set up for
234 // instruction values that are used outside of the block that defines
236 Function::iterator BB = Fn.begin(), EB = Fn.end();
237 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
238 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
239 if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
240 const Type *Ty = AI->getAllocatedType();
241 uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
243 std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty),
246 // If the alignment of the value is smaller than the size of the
247 // value, and if the size of the value is particularly small
248 // (<= 8 bytes), round up to the size of the value for potentially
249 // better performance.
251 // FIXME: This could be made better with a preferred alignment hook in
252 // TargetData. It serves primarily to 8-byte align doubles for X86.
253 if (Align < TySize && TySize <= 8) Align = TySize;
254 TySize *= CUI->getZExtValue(); // Get total allocated size.
255 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
256 StaticAllocaMap[AI] =
257 MF.getFrameInfo()->CreateStackObject((unsigned)TySize, Align);
260 for (; BB != EB; ++BB)
261 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
262 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
263 if (!isa<AllocaInst>(I) ||
264 !StaticAllocaMap.count(cast<AllocaInst>(I)))
265 InitializeRegForValue(I);
267 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
268 // also creates the initial PHI MachineInstrs, though none of the input
269 // operands are populated.
270 for (BB = Fn.begin(), EB = Fn.end(); BB != EB; ++BB) {
271 MachineBasicBlock *MBB = new MachineBasicBlock(BB);
273 MF.getBasicBlockList().push_back(MBB);
275 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
278 for (BasicBlock::iterator I = BB->begin();
279 (PN = dyn_cast<PHINode>(I)); ++I)
280 if (!PN->use_empty()) {
281 MVT::ValueType VT = TLI.getValueType(PN->getType());
282 unsigned NumElements;
283 if (VT != MVT::Vector)
284 NumElements = TLI.getNumElements(VT);
286 MVT::ValueType VT1,VT2;
288 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()),
291 unsigned PHIReg = ValueMap[PN];
292 assert(PHIReg &&"PHI node does not have an assigned virtual register!");
293 for (unsigned i = 0; i != NumElements; ++i)
294 BuildMI(MBB, TargetInstrInfo::PHI, PN->getNumOperands(), PHIReg+i);
299 /// CreateRegForValue - Allocate the appropriate number of virtual registers of
300 /// the correctly promoted or expanded types. Assign these registers
301 /// consecutive vreg numbers and return the first assigned number.
302 unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
303 MVT::ValueType VT = TLI.getValueType(V->getType());
305 // The number of multiples of registers that we need, to, e.g., split up
306 // a <2 x int64> -> 4 x i32 registers.
307 unsigned NumVectorRegs = 1;
309 // If this is a packed type, figure out what type it will decompose into
310 // and how many of the elements it will use.
311 if (VT == MVT::Vector) {
312 const PackedType *PTy = cast<PackedType>(V->getType());
313 unsigned NumElts = PTy->getNumElements();
314 MVT::ValueType EltTy = TLI.getValueType(PTy->getElementType());
316 // Divide the input until we get to a supported size. This will always
317 // end with a scalar if the target doesn't support vectors.
318 while (NumElts > 1 && !TLI.isTypeLegal(getVectorType(EltTy, NumElts))) {
325 VT = getVectorType(EltTy, NumElts);
328 // The common case is that we will only create one register for this
329 // value. If we have that case, create and return the virtual register.
330 unsigned NV = TLI.getNumElements(VT);
332 // If we are promoting this value, pick the next largest supported type.
333 MVT::ValueType PromotedType = TLI.getTypeToTransformTo(VT);
334 unsigned Reg = MakeReg(PromotedType);
335 // If this is a vector of supported or promoted types (e.g. 4 x i16),
336 // create all of the registers.
337 for (unsigned i = 1; i != NumVectorRegs; ++i)
338 MakeReg(PromotedType);
342 // If this value is represented with multiple target registers, make sure
343 // to create enough consecutive registers of the right (smaller) type.
344 unsigned NT = VT-1; // Find the type to use.
345 while (TLI.getNumElements((MVT::ValueType)NT) != 1)
348 unsigned R = MakeReg((MVT::ValueType)NT);
349 for (unsigned i = 1; i != NV*NumVectorRegs; ++i)
350 MakeReg((MVT::ValueType)NT);
354 //===----------------------------------------------------------------------===//
355 /// SelectionDAGLowering - This is the common target-independent lowering
356 /// implementation that is parameterized by a TargetLowering object.
357 /// Also, targets can overload any lowering method.
360 class SelectionDAGLowering {
361 MachineBasicBlock *CurMBB;
363 std::map<const Value*, SDOperand> NodeMap;
365 /// PendingLoads - Loads are not emitted to the program immediately. We bunch
366 /// them up and then emit token factor nodes when possible. This allows us to
367 /// get simple disambiguation between loads without worrying about alias
369 std::vector<SDOperand> PendingLoads;
371 /// Case - A pair of values to record the Value for a switch case, and the
372 /// case's target basic block.
373 typedef std::pair<Constant*, MachineBasicBlock*> Case;
374 typedef std::vector<Case>::iterator CaseItr;
375 typedef std::pair<CaseItr, CaseItr> CaseRange;
377 /// CaseRec - A struct with ctor used in lowering switches to a binary tree
378 /// of conditional branches.
380 CaseRec(MachineBasicBlock *bb, Constant *lt, Constant *ge, CaseRange r) :
381 CaseBB(bb), LT(lt), GE(ge), Range(r) {}
383 /// CaseBB - The MBB in which to emit the compare and branch
384 MachineBasicBlock *CaseBB;
385 /// LT, GE - If nonzero, we know the current case value must be less-than or
386 /// greater-than-or-equal-to these Constants.
389 /// Range - A pair of iterators representing the range of case values to be
390 /// processed at this point in the binary search tree.
394 /// The comparison function for sorting Case values.
396 bool operator () (const Case& C1, const Case& C2) {
397 if (const ConstantInt* I1 = dyn_cast<const ConstantInt>(C1.first))
398 if (I1->getType()->isUnsigned())
399 return I1->getZExtValue() <
400 cast<const ConstantInt>(C2.first)->getZExtValue();
402 return cast<const ConstantInt>(C1.first)->getSExtValue() <
403 cast<const ConstantInt>(C2.first)->getSExtValue();
408 // TLI - This is information that describes the available target features we
409 // need for lowering. This indicates when operations are unavailable,
410 // implemented with a libcall, etc.
413 const TargetData *TD;
415 /// SwitchCases - Vector of CaseBlock structures used to communicate
416 /// SwitchInst code generation information.
417 std::vector<SelectionDAGISel::CaseBlock> SwitchCases;
418 SelectionDAGISel::JumpTable JT;
420 /// FuncInfo - Information about the function as a whole.
422 FunctionLoweringInfo &FuncInfo;
424 SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli,
425 FunctionLoweringInfo &funcinfo)
426 : TLI(tli), DAG(dag), TD(DAG.getTarget().getTargetData()),
427 JT(0,0,0,0), FuncInfo(funcinfo) {
430 /// getRoot - Return the current virtual root of the Selection DAG.
432 SDOperand getRoot() {
433 if (PendingLoads.empty())
434 return DAG.getRoot();
436 if (PendingLoads.size() == 1) {
437 SDOperand Root = PendingLoads[0];
439 PendingLoads.clear();
443 // Otherwise, we have to make a token factor node.
444 SDOperand Root = DAG.getNode(ISD::TokenFactor, MVT::Other,
445 &PendingLoads[0], PendingLoads.size());
446 PendingLoads.clear();
451 void visit(Instruction &I) { visit(I.getOpcode(), I); }
453 void visit(unsigned Opcode, User &I) {
455 default: assert(0 && "Unknown instruction type encountered!");
457 // Build the switch statement using the Instruction.def file.
458 #define HANDLE_INST(NUM, OPCODE, CLASS) \
459 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
460 #include "llvm/Instruction.def"
464 void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; }
466 SDOperand getLoadFrom(const Type *Ty, SDOperand Ptr,
467 const Value *SV, SDOperand Root,
470 SDOperand getIntPtrConstant(uint64_t Val) {
471 return DAG.getConstant(Val, TLI.getPointerTy());
474 SDOperand getValue(const Value *V);
476 const SDOperand &setValue(const Value *V, SDOperand NewN) {
477 SDOperand &N = NodeMap[V];
478 assert(N.Val == 0 && "Already set a value for this node!");
482 RegsForValue GetRegistersForValue(const std::string &ConstrCode,
484 bool OutReg, bool InReg,
485 std::set<unsigned> &OutputRegs,
486 std::set<unsigned> &InputRegs);
488 // Terminator instructions.
489 void visitRet(ReturnInst &I);
490 void visitBr(BranchInst &I);
491 void visitSwitch(SwitchInst &I);
492 void visitUnreachable(UnreachableInst &I) { /* noop */ }
494 // Helper for visitSwitch
495 void visitSwitchCase(SelectionDAGISel::CaseBlock &CB);
496 void visitJumpTable(SelectionDAGISel::JumpTable &JT);
498 // These all get lowered before this pass.
499 void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); }
500 void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); }
502 void visitIntBinary(User &I, unsigned IntOp, unsigned VecOp);
503 void visitFPBinary(User &I, unsigned FPOp, unsigned VecOp);
504 void visitShift(User &I, unsigned Opcode);
505 void visitAdd(User &I) {
506 if (I.getType()->isFloatingPoint())
507 visitFPBinary(I, ISD::FADD, ISD::VADD);
509 visitIntBinary(I, ISD::ADD, ISD::VADD);
511 void visitSub(User &I);
512 void visitMul(User &I) {
513 if (I.getType()->isFloatingPoint())
514 visitFPBinary(I, ISD::FMUL, ISD::VMUL);
516 visitIntBinary(I, ISD::MUL, ISD::VMUL);
518 void visitUDiv(User &I) { visitIntBinary(I, ISD::UDIV, ISD::VUDIV); }
519 void visitSDiv(User &I) { visitIntBinary(I, ISD::SDIV, ISD::VSDIV); }
520 void visitFDiv(User &I) { visitFPBinary(I, ISD::FDIV, ISD::VSDIV); }
521 void visitRem(User &I) {
522 const Type *Ty = I.getType();
523 if (Ty->isFloatingPoint())
524 visitFPBinary(I, ISD::FREM, 0);
526 visitIntBinary(I, Ty->isSigned() ? ISD::SREM : ISD::UREM, 0);
528 void visitAnd(User &I) { visitIntBinary(I, ISD::AND, ISD::VAND); }
529 void visitOr (User &I) { visitIntBinary(I, ISD::OR, ISD::VOR); }
530 void visitXor(User &I) { visitIntBinary(I, ISD::XOR, ISD::VXOR); }
531 void visitShl(User &I) { visitShift(I, ISD::SHL); }
532 void visitShr(User &I) {
533 visitShift(I, I.getType()->isUnsigned() ? ISD::SRL : ISD::SRA);
536 void visitSetCC(User &I, ISD::CondCode SignedOpc, ISD::CondCode UnsignedOpc,
537 ISD::CondCode FPOpc);
538 void visitSetEQ(User &I) { visitSetCC(I, ISD::SETEQ, ISD::SETEQ,
540 void visitSetNE(User &I) { visitSetCC(I, ISD::SETNE, ISD::SETNE,
542 void visitSetLE(User &I) { visitSetCC(I, ISD::SETLE, ISD::SETULE,
544 void visitSetGE(User &I) { visitSetCC(I, ISD::SETGE, ISD::SETUGE,
546 void visitSetLT(User &I) { visitSetCC(I, ISD::SETLT, ISD::SETULT,
548 void visitSetGT(User &I) { visitSetCC(I, ISD::SETGT, ISD::SETUGT,
551 void visitExtractElement(User &I);
552 void visitInsertElement(User &I);
553 void visitShuffleVector(User &I);
555 void visitGetElementPtr(User &I);
556 void visitCast(User &I);
557 void visitSelect(User &I);
559 void visitMalloc(MallocInst &I);
560 void visitFree(FreeInst &I);
561 void visitAlloca(AllocaInst &I);
562 void visitLoad(LoadInst &I);
563 void visitStore(StoreInst &I);
564 void visitPHI(PHINode &I) { } // PHI nodes are handled specially.
565 void visitCall(CallInst &I);
566 void visitInlineAsm(CallInst &I);
567 const char *visitIntrinsicCall(CallInst &I, unsigned Intrinsic);
568 void visitTargetIntrinsic(CallInst &I, unsigned Intrinsic);
570 void visitVAStart(CallInst &I);
571 void visitVAArg(VAArgInst &I);
572 void visitVAEnd(CallInst &I);
573 void visitVACopy(CallInst &I);
574 void visitFrameReturnAddress(CallInst &I, bool isFrameAddress);
576 void visitMemIntrinsic(CallInst &I, unsigned Op);
578 void visitUserOp1(Instruction &I) {
579 assert(0 && "UserOp1 should not exist at instruction selection time!");
582 void visitUserOp2(Instruction &I) {
583 assert(0 && "UserOp2 should not exist at instruction selection time!");
587 } // end namespace llvm
589 SDOperand SelectionDAGLowering::getValue(const Value *V) {
590 SDOperand &N = NodeMap[V];
593 const Type *VTy = V->getType();
594 MVT::ValueType VT = TLI.getValueType(VTy);
595 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
596 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
597 visit(CE->getOpcode(), *CE);
598 assert(N.Val && "visit didn't populate the ValueMap!");
600 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
601 return N = DAG.getGlobalAddress(GV, VT);
602 } else if (isa<ConstantPointerNull>(C)) {
603 return N = DAG.getConstant(0, TLI.getPointerTy());
604 } else if (isa<UndefValue>(C)) {
605 if (!isa<PackedType>(VTy))
606 return N = DAG.getNode(ISD::UNDEF, VT);
608 // Create a VBUILD_VECTOR of undef nodes.
609 const PackedType *PTy = cast<PackedType>(VTy);
610 unsigned NumElements = PTy->getNumElements();
611 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
613 SmallVector<SDOperand, 8> Ops;
614 Ops.assign(NumElements, DAG.getNode(ISD::UNDEF, PVT));
616 // Create a VConstant node with generic Vector type.
617 Ops.push_back(DAG.getConstant(NumElements, MVT::i32));
618 Ops.push_back(DAG.getValueType(PVT));
619 return N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector,
620 &Ops[0], Ops.size());
621 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
622 return N = DAG.getConstantFP(CFP->getValue(), VT);
623 } else if (const PackedType *PTy = dyn_cast<PackedType>(VTy)) {
624 unsigned NumElements = PTy->getNumElements();
625 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
627 // Now that we know the number and type of the elements, push a
628 // Constant or ConstantFP node onto the ops list for each element of
629 // the packed constant.
630 SmallVector<SDOperand, 8> Ops;
631 if (ConstantPacked *CP = dyn_cast<ConstantPacked>(C)) {
632 for (unsigned i = 0; i != NumElements; ++i)
633 Ops.push_back(getValue(CP->getOperand(i)));
635 assert(isa<ConstantAggregateZero>(C) && "Unknown packed constant!");
637 if (MVT::isFloatingPoint(PVT))
638 Op = DAG.getConstantFP(0, PVT);
640 Op = DAG.getConstant(0, PVT);
641 Ops.assign(NumElements, Op);
644 // Create a VBUILD_VECTOR node with generic Vector type.
645 Ops.push_back(DAG.getConstant(NumElements, MVT::i32));
646 Ops.push_back(DAG.getValueType(PVT));
647 return N = DAG.getNode(ISD::VBUILD_VECTOR,MVT::Vector,&Ops[0],Ops.size());
649 // Canonicalize all constant ints to be unsigned.
650 return N = DAG.getConstant(cast<ConstantIntegral>(C)->getZExtValue(),VT);
654 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
655 std::map<const AllocaInst*, int>::iterator SI =
656 FuncInfo.StaticAllocaMap.find(AI);
657 if (SI != FuncInfo.StaticAllocaMap.end())
658 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
661 std::map<const Value*, unsigned>::const_iterator VMI =
662 FuncInfo.ValueMap.find(V);
663 assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!");
665 unsigned InReg = VMI->second;
667 // If this type is not legal, make it so now.
668 if (VT != MVT::Vector) {
669 MVT::ValueType DestVT = TLI.getTypeToTransformTo(VT);
671 N = DAG.getCopyFromReg(DAG.getEntryNode(), InReg, DestVT);
673 // Source must be expanded. This input value is actually coming from the
674 // register pair VMI->second and VMI->second+1.
675 N = DAG.getNode(ISD::BUILD_PAIR, VT, N,
676 DAG.getCopyFromReg(DAG.getEntryNode(), InReg+1, DestVT));
677 } else if (DestVT > VT) { // Promotion case
678 if (MVT::isFloatingPoint(VT))
679 N = DAG.getNode(ISD::FP_ROUND, VT, N);
681 N = DAG.getNode(ISD::TRUNCATE, VT, N);
684 // Otherwise, if this is a vector, make it available as a generic vector
686 MVT::ValueType PTyElementVT, PTyLegalElementVT;
687 const PackedType *PTy = cast<PackedType>(VTy);
688 unsigned NE = TLI.getPackedTypeBreakdown(PTy, PTyElementVT,
691 // Build a VBUILD_VECTOR with the input registers.
692 SmallVector<SDOperand, 8> Ops;
693 if (PTyElementVT == PTyLegalElementVT) {
694 // If the value types are legal, just VBUILD the CopyFromReg nodes.
695 for (unsigned i = 0; i != NE; ++i)
696 Ops.push_back(DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
698 } else if (PTyElementVT < PTyLegalElementVT) {
699 // If the register was promoted, use TRUNCATE of FP_ROUND as appropriate.
700 for (unsigned i = 0; i != NE; ++i) {
701 SDOperand Op = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
703 if (MVT::isFloatingPoint(PTyElementVT))
704 Op = DAG.getNode(ISD::FP_ROUND, PTyElementVT, Op);
706 Op = DAG.getNode(ISD::TRUNCATE, PTyElementVT, Op);
710 // If the register was expanded, use BUILD_PAIR.
711 assert((NE & 1) == 0 && "Must expand into a multiple of 2 elements!");
712 for (unsigned i = 0; i != NE/2; ++i) {
713 SDOperand Op0 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
715 SDOperand Op1 = DAG.getCopyFromReg(DAG.getEntryNode(), InReg++,
717 Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Op0, Op1));
721 Ops.push_back(DAG.getConstant(NE, MVT::i32));
722 Ops.push_back(DAG.getValueType(PTyLegalElementVT));
723 N = DAG.getNode(ISD::VBUILD_VECTOR, MVT::Vector, &Ops[0], Ops.size());
725 // Finally, use a VBIT_CONVERT to make this available as the appropriate
727 N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N,
728 DAG.getConstant(PTy->getNumElements(),
730 DAG.getValueType(TLI.getValueType(PTy->getElementType())));
737 void SelectionDAGLowering::visitRet(ReturnInst &I) {
738 if (I.getNumOperands() == 0) {
739 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, getRoot()));
742 SmallVector<SDOperand, 8> NewValues;
743 NewValues.push_back(getRoot());
744 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
745 SDOperand RetOp = getValue(I.getOperand(i));
746 bool isSigned = I.getOperand(i)->getType()->isSigned();
748 // If this is an integer return value, we need to promote it ourselves to
749 // the full width of a register, since LegalizeOp will use ANY_EXTEND rather
751 // FIXME: C calling convention requires the return type to be promoted to
752 // at least 32-bit. But this is not necessary for non-C calling conventions.
753 if (MVT::isInteger(RetOp.getValueType()) &&
754 RetOp.getValueType() < MVT::i64) {
755 MVT::ValueType TmpVT;
756 if (TLI.getTypeAction(MVT::i32) == TargetLowering::Promote)
757 TmpVT = TLI.getTypeToTransformTo(MVT::i32);
762 RetOp = DAG.getNode(ISD::SIGN_EXTEND, TmpVT, RetOp);
764 RetOp = DAG.getNode(ISD::ZERO_EXTEND, TmpVT, RetOp);
766 NewValues.push_back(RetOp);
767 NewValues.push_back(DAG.getConstant(isSigned, MVT::i32));
769 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other,
770 &NewValues[0], NewValues.size()));
773 void SelectionDAGLowering::visitBr(BranchInst &I) {
774 // Update machine-CFG edges.
775 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
777 // Figure out which block is immediately after the current one.
778 MachineBasicBlock *NextBlock = 0;
779 MachineFunction::iterator BBI = CurMBB;
780 if (++BBI != CurMBB->getParent()->end())
783 if (I.isUnconditional()) {
784 // If this is not a fall-through branch, emit the branch.
785 if (Succ0MBB != NextBlock)
786 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(),
787 DAG.getBasicBlock(Succ0MBB)));
789 // Update machine-CFG edges.
790 CurMBB->addSuccessor(Succ0MBB);
795 // If this condition is one of the special cases we handle, do special stuff
797 Value *CondVal = I.getCondition();
798 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
800 // Create a CaseBlock record representing this branch.
801 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, CondVal, 0,
802 Succ0MBB, Succ1MBB, CurMBB);
803 // Use visitSwitchCase to actually insert the fast branch sequence for this
808 /// visitSwitchCase - Emits the necessary code to represent a single node in
809 /// the binary search tree resulting from lowering a switch instruction.
810 void SelectionDAGLowering::visitSwitchCase(SelectionDAGISel::CaseBlock &CB) {
812 SDOperand CondLHS = getValue(CB.CmpLHS);
814 // If the CaseBlock has both LHS/RHS comparisons, build the setcc now,
815 // otherwise, just use the LHS value as a bool comparison value.
817 Cond = DAG.getSetCC(MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
821 // Set NextBlock to be the MBB immediately after the current one, if any.
822 // This is used to avoid emitting unnecessary branches to the next block.
823 MachineBasicBlock *NextBlock = 0;
824 MachineFunction::iterator BBI = CurMBB;
825 if (++BBI != CurMBB->getParent()->end())
828 // If the lhs block is the next block, invert the condition so that we can
829 // fall through to the lhs instead of the rhs block.
830 if (CB.TrueBB == NextBlock) {
831 std::swap(CB.TrueBB, CB.FalseBB);
832 SDOperand True = DAG.getConstant(1, Cond.getValueType());
833 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
835 SDOperand BrCond = DAG.getNode(ISD::BRCOND, MVT::Other, getRoot(), Cond,
836 DAG.getBasicBlock(CB.TrueBB));
837 if (CB.FalseBB == NextBlock)
840 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, BrCond,
841 DAG.getBasicBlock(CB.FalseBB)));
842 // Update successor info
843 CurMBB->addSuccessor(CB.TrueBB);
844 CurMBB->addSuccessor(CB.FalseBB);
847 void SelectionDAGLowering::visitJumpTable(SelectionDAGISel::JumpTable &JT) {
848 // Emit the code for the jump table
849 MVT::ValueType PTy = TLI.getPointerTy();
850 assert((PTy == MVT::i32 || PTy == MVT::i64) &&
851 "Jump table entries are 32-bit values");
852 bool isPIC = TLI.getTargetMachine().getRelocationModel() == Reloc::PIC_;
853 // PIC jump table entries are 32-bit values.
854 unsigned EntrySize = isPIC ? 4 : MVT::getSizeInBits(PTy)/8;
855 SDOperand Copy = DAG.getCopyFromReg(getRoot(), JT.Reg, PTy);
856 SDOperand IDX = DAG.getNode(ISD::MUL, PTy, Copy,
857 DAG.getConstant(EntrySize, PTy));
858 SDOperand TAB = DAG.getJumpTable(JT.JTI,PTy);
859 SDOperand ADD = DAG.getNode(ISD::ADD, PTy, IDX, TAB);
860 SDOperand LD = DAG.getLoad(isPIC ? MVT::i32 : PTy, Copy.getValue(1), ADD,
863 // For Pic, the sequence is:
864 // BRIND(load(Jumptable + index) + RelocBase)
865 // RelocBase is the JumpTable on PPC and X86, GOT on Alpha
867 if (TLI.usesGlobalOffsetTable())
868 Reloc = DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, PTy);
871 ADD = (PTy != MVT::i32) ? DAG.getNode(ISD::SIGN_EXTEND, PTy, LD) : LD;
872 ADD = DAG.getNode(ISD::ADD, PTy, ADD, Reloc);
873 DAG.setRoot(DAG.getNode(ISD::BRIND, MVT::Other, LD.getValue(1), ADD));
875 DAG.setRoot(DAG.getNode(ISD::BRIND, MVT::Other, LD.getValue(1), LD));
879 void SelectionDAGLowering::visitSwitch(SwitchInst &I) {
880 // Figure out which block is immediately after the current one.
881 MachineBasicBlock *NextBlock = 0;
882 MachineFunction::iterator BBI = CurMBB;
884 if (++BBI != CurMBB->getParent()->end())
887 MachineBasicBlock *Default = FuncInfo.MBBMap[I.getDefaultDest()];
889 // If there is only the default destination, branch to it if it is not the
890 // next basic block. Otherwise, just fall through.
891 if (I.getNumOperands() == 2) {
892 // Update machine-CFG edges.
894 // If this is not a fall-through branch, emit the branch.
895 if (Default != NextBlock)
896 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, getRoot(),
897 DAG.getBasicBlock(Default)));
899 CurMBB->addSuccessor(Default);
903 // If there are any non-default case statements, create a vector of Cases
904 // representing each one, and sort the vector so that we can efficiently
905 // create a binary search tree from them.
906 std::vector<Case> Cases;
908 for (unsigned i = 1; i < I.getNumSuccessors(); ++i) {
909 MachineBasicBlock *SMBB = FuncInfo.MBBMap[I.getSuccessor(i)];
910 Cases.push_back(Case(I.getSuccessorValue(i), SMBB));
913 std::sort(Cases.begin(), Cases.end(), CaseCmp());
915 // Get the Value to be switched on and default basic blocks, which will be
916 // inserted into CaseBlock records, representing basic blocks in the binary
918 Value *SV = I.getOperand(0);
920 // Get the MachineFunction which holds the current MBB. This is used during
921 // emission of jump tables, and when inserting any additional MBBs necessary
922 // to represent the switch.
923 MachineFunction *CurMF = CurMBB->getParent();
924 const BasicBlock *LLVMBB = CurMBB->getBasicBlock();
926 // If the switch has few cases (two or less) emit a series of specific
928 if (Cases.size() < 3) {
929 // TODO: If any two of the cases has the same destination, and if one value
930 // is the same as the other, but has one bit unset that the other has set,
931 // use bit manipulation to do two compares at once. For example:
932 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
934 // Rearrange the case blocks so that the last one falls through if possible.
935 if (NextBlock && Default != NextBlock && Cases.back().second != NextBlock) {
936 // The last case block won't fall through into 'NextBlock' if we emit the
937 // branches in this order. See if rearranging a case value would help.
938 for (unsigned i = 0, e = Cases.size()-1; i != e; ++i) {
939 if (Cases[i].second == NextBlock) {
940 std::swap(Cases[i], Cases.back());
946 // Create a CaseBlock record representing a conditional branch to
947 // the Case's target mbb if the value being switched on SV is equal
949 MachineBasicBlock *CurBlock = CurMBB;
950 for (unsigned i = 0, e = Cases.size(); i != e; ++i) {
951 MachineBasicBlock *FallThrough;
953 FallThrough = new MachineBasicBlock(CurMBB->getBasicBlock());
954 CurMF->getBasicBlockList().insert(BBI, FallThrough);
956 // If the last case doesn't match, go to the default block.
957 FallThrough = Default;
960 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, SV, Cases[i].first,
961 Cases[i].second, FallThrough, CurBlock);
963 // If emitting the first comparison, just call visitSwitchCase to emit the
964 // code into the current block. Otherwise, push the CaseBlock onto the
965 // vector to be later processed by SDISel, and insert the node's MBB
966 // before the next MBB.
967 if (CurBlock == CurMBB)
970 SwitchCases.push_back(CB);
972 CurBlock = FallThrough;
977 // If the switch has more than 5 blocks, and at least 31.25% dense, and the
978 // target supports indirect branches, then emit a jump table rather than
979 // lowering the switch to a binary tree of conditional branches.
980 if (TLI.isOperationLegal(ISD::BRIND, TLI.getPointerTy()) &&
982 uint64_t First =cast<ConstantIntegral>(Cases.front().first)->getZExtValue();
983 uint64_t Last = cast<ConstantIntegral>(Cases.back().first)->getZExtValue();
984 double Density = (double)Cases.size() / (double)((Last - First) + 1ULL);
986 if (Density >= 0.3125) {
987 // Create a new basic block to hold the code for loading the address
988 // of the jump table, and jumping to it. Update successor information;
989 // we will either branch to the default case for the switch, or the jump
991 MachineBasicBlock *JumpTableBB = new MachineBasicBlock(LLVMBB);
992 CurMF->getBasicBlockList().insert(BBI, JumpTableBB);
993 CurMBB->addSuccessor(Default);
994 CurMBB->addSuccessor(JumpTableBB);
996 // Subtract the lowest switch case value from the value being switched on
997 // and conditional branch to default mbb if the result is greater than the
998 // difference between smallest and largest cases.
999 SDOperand SwitchOp = getValue(SV);
1000 MVT::ValueType VT = SwitchOp.getValueType();
1001 SDOperand SUB = DAG.getNode(ISD::SUB, VT, SwitchOp,
1002 DAG.getConstant(First, VT));
1004 // The SDNode we just created, which holds the value being switched on
1005 // minus the the smallest case value, needs to be copied to a virtual
1006 // register so it can be used as an index into the jump table in a
1007 // subsequent basic block. This value may be smaller or larger than the
1008 // target's pointer type, and therefore require extension or truncating.
1009 if (VT > TLI.getPointerTy())
1010 SwitchOp = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), SUB);
1012 SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), SUB);
1014 unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
1015 SDOperand CopyTo = DAG.getCopyToReg(getRoot(), JumpTableReg, SwitchOp);
1017 // Emit the range check for the jump table, and branch to the default
1018 // block for the switch statement if the value being switched on exceeds
1019 // the largest case in the switch.
1020 SDOperand CMP = DAG.getSetCC(TLI.getSetCCResultTy(), SUB,
1021 DAG.getConstant(Last-First,VT), ISD::SETUGT);
1022 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, CopyTo, CMP,
1023 DAG.getBasicBlock(Default)));
1025 // Build a vector of destination BBs, corresponding to each target
1026 // of the jump table. If the value of the jump table slot corresponds to
1027 // a case statement, push the case's BB onto the vector, otherwise, push
1029 std::vector<MachineBasicBlock*> DestBBs;
1030 uint64_t TEI = First;
1031 for (CaseItr ii = Cases.begin(), ee = Cases.end(); ii != ee; ++TEI)
1032 if (cast<ConstantIntegral>(ii->first)->getZExtValue() == TEI) {
1033 DestBBs.push_back(ii->second);
1036 DestBBs.push_back(Default);
1039 // Update successor info
1040 for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
1041 E = DestBBs.end(); I != E; ++I)
1042 JumpTableBB->addSuccessor(*I);
1044 // Create a jump table index for this jump table, or return an existing
1046 unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs);
1048 // Set the jump table information so that we can codegen it as a second
1049 // MachineBasicBlock
1050 JT.Reg = JumpTableReg;
1052 JT.MBB = JumpTableBB;
1053 JT.Default = Default;
1058 // Push the initial CaseRec onto the worklist
1059 std::vector<CaseRec> CaseVec;
1060 CaseVec.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
1062 while (!CaseVec.empty()) {
1063 // Grab a record representing a case range to process off the worklist
1064 CaseRec CR = CaseVec.back();
1067 // Size is the number of Cases represented by this range. If Size is 1,
1068 // then we are processing a leaf of the binary search tree. Otherwise,
1069 // we need to pick a pivot, and push left and right ranges onto the
1071 unsigned Size = CR.Range.second - CR.Range.first;
1074 // Create a CaseBlock record representing a conditional branch to
1075 // the Case's target mbb if the value being switched on SV is equal
1076 // to C. Otherwise, branch to default.
1077 Constant *C = CR.Range.first->first;
1078 MachineBasicBlock *Target = CR.Range.first->second;
1079 SelectionDAGISel::CaseBlock CB(ISD::SETEQ, SV, C, Target, Default,
1082 // If the MBB representing the leaf node is the current MBB, then just
1083 // call visitSwitchCase to emit the code into the current block.
1084 // Otherwise, push the CaseBlock onto the vector to be later processed
1085 // by SDISel, and insert the node's MBB before the next MBB.
1086 if (CR.CaseBB == CurMBB)
1087 visitSwitchCase(CB);
1089 SwitchCases.push_back(CB);
1091 // split case range at pivot
1092 CaseItr Pivot = CR.Range.first + (Size / 2);
1093 CaseRange LHSR(CR.Range.first, Pivot);
1094 CaseRange RHSR(Pivot, CR.Range.second);
1095 Constant *C = Pivot->first;
1096 MachineBasicBlock *FalseBB = 0, *TrueBB = 0;
1098 // We know that we branch to the LHS if the Value being switched on is
1099 // less than the Pivot value, C. We use this to optimize our binary
1100 // tree a bit, by recognizing that if SV is greater than or equal to the
1101 // LHS's Case Value, and that Case Value is exactly one less than the
1102 // Pivot's Value, then we can branch directly to the LHS's Target,
1103 // rather than creating a leaf node for it.
1104 if ((LHSR.second - LHSR.first) == 1 &&
1105 LHSR.first->first == CR.GE &&
1106 cast<ConstantIntegral>(C)->getZExtValue() ==
1107 (cast<ConstantIntegral>(CR.GE)->getZExtValue() + 1ULL)) {
1108 TrueBB = LHSR.first->second;
1110 TrueBB = new MachineBasicBlock(LLVMBB);
1111 CurMF->getBasicBlockList().insert(BBI, TrueBB);
1112 CaseVec.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
1115 // Similar to the optimization above, if the Value being switched on is
1116 // known to be less than the Constant CR.LT, and the current Case Value
1117 // is CR.LT - 1, then we can branch directly to the target block for
1118 // the current Case Value, rather than emitting a RHS leaf node for it.
1119 if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
1120 cast<ConstantIntegral>(RHSR.first->first)->getZExtValue() ==
1121 (cast<ConstantIntegral>(CR.LT)->getZExtValue() - 1ULL)) {
1122 FalseBB = RHSR.first->second;
1124 FalseBB = new MachineBasicBlock(LLVMBB);
1125 CurMF->getBasicBlockList().insert(BBI, FalseBB);
1126 CaseVec.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
1129 // Create a CaseBlock record representing a conditional branch to
1130 // the LHS node if the value being switched on SV is less than C.
1131 // Otherwise, branch to LHS.
1132 ISD::CondCode CC = C->getType()->isSigned() ? ISD::SETLT : ISD::SETULT;
1133 SelectionDAGISel::CaseBlock CB(CC, SV, C, TrueBB, FalseBB, CR.CaseBB);
1135 if (CR.CaseBB == CurMBB)
1136 visitSwitchCase(CB);
1138 SwitchCases.push_back(CB);
1143 void SelectionDAGLowering::visitSub(User &I) {
1144 // -0.0 - X --> fneg
1145 if (I.getType()->isFloatingPoint()) {
1146 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
1147 if (CFP->isExactlyValue(-0.0)) {
1148 SDOperand Op2 = getValue(I.getOperand(1));
1149 setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2));
1152 visitFPBinary(I, ISD::FSUB, ISD::VSUB);
1154 visitIntBinary(I, ISD::SUB, ISD::VSUB);
1158 SelectionDAGLowering::visitIntBinary(User &I, unsigned IntOp, unsigned VecOp) {
1159 const Type *Ty = I.getType();
1160 SDOperand Op1 = getValue(I.getOperand(0));
1161 SDOperand Op2 = getValue(I.getOperand(1));
1163 if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) {
1164 SDOperand Num = DAG.getConstant(PTy->getNumElements(), MVT::i32);
1165 SDOperand Typ = DAG.getValueType(TLI.getValueType(PTy->getElementType()));
1166 setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ));
1168 setValue(&I, DAG.getNode(IntOp, Op1.getValueType(), Op1, Op2));
1173 SelectionDAGLowering::visitFPBinary(User &I, unsigned FPOp, unsigned VecOp) {
1174 const Type *Ty = I.getType();
1175 SDOperand Op1 = getValue(I.getOperand(0));
1176 SDOperand Op2 = getValue(I.getOperand(1));
1178 if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) {
1179 SDOperand Num = DAG.getConstant(PTy->getNumElements(), MVT::i32);
1180 SDOperand Typ = DAG.getValueType(TLI.getValueType(PTy->getElementType()));
1181 setValue(&I, DAG.getNode(VecOp, MVT::Vector, Op1, Op2, Num, Typ));
1183 setValue(&I, DAG.getNode(FPOp, Op1.getValueType(), Op1, Op2));
1187 void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
1188 SDOperand Op1 = getValue(I.getOperand(0));
1189 SDOperand Op2 = getValue(I.getOperand(1));
1191 Op2 = DAG.getNode(ISD::ANY_EXTEND, TLI.getShiftAmountTy(), Op2);
1193 setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2));
1196 void SelectionDAGLowering::visitSetCC(User &I,ISD::CondCode SignedOpcode,
1197 ISD::CondCode UnsignedOpcode,
1198 ISD::CondCode FPOpcode) {
1199 SDOperand Op1 = getValue(I.getOperand(0));
1200 SDOperand Op2 = getValue(I.getOperand(1));
1201 ISD::CondCode Opcode = SignedOpcode;
1202 if (!FiniteOnlyFPMath() && I.getOperand(0)->getType()->isFloatingPoint())
1204 else if (I.getOperand(0)->getType()->isUnsigned())
1205 Opcode = UnsignedOpcode;
1206 setValue(&I, DAG.getSetCC(MVT::i1, Op1, Op2, Opcode));
1209 void SelectionDAGLowering::visitSelect(User &I) {
1210 SDOperand Cond = getValue(I.getOperand(0));
1211 SDOperand TrueVal = getValue(I.getOperand(1));
1212 SDOperand FalseVal = getValue(I.getOperand(2));
1213 if (!isa<PackedType>(I.getType())) {
1214 setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond,
1215 TrueVal, FalseVal));
1217 setValue(&I, DAG.getNode(ISD::VSELECT, MVT::Vector, Cond, TrueVal, FalseVal,
1218 *(TrueVal.Val->op_end()-2),
1219 *(TrueVal.Val->op_end()-1)));
1223 void SelectionDAGLowering::visitCast(User &I) {
1224 SDOperand N = getValue(I.getOperand(0));
1225 MVT::ValueType SrcVT = N.getValueType();
1226 MVT::ValueType DestVT = TLI.getValueType(I.getType());
1228 if (DestVT == MVT::Vector) {
1229 // This is a cast to a vector from something else. This is always a bit
1230 // convert. Get information about the input vector.
1231 const PackedType *DestTy = cast<PackedType>(I.getType());
1232 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType());
1233 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N,
1234 DAG.getConstant(DestTy->getNumElements(),MVT::i32),
1235 DAG.getValueType(EltVT)));
1236 } else if (SrcVT == DestVT) {
1237 setValue(&I, N); // noop cast.
1238 } else if (DestVT == MVT::i1) {
1239 // Cast to bool is a comparison against zero, not truncation to zero.
1240 SDOperand Zero = isInteger(SrcVT) ? DAG.getConstant(0, N.getValueType()) :
1241 DAG.getConstantFP(0.0, N.getValueType());
1242 setValue(&I, DAG.getSetCC(MVT::i1, N, Zero, ISD::SETNE));
1243 } else if (isInteger(SrcVT)) {
1244 if (isInteger(DestVT)) { // Int -> Int cast
1245 if (DestVT < SrcVT) // Truncating cast?
1246 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N));
1247 else if (I.getOperand(0)->getType()->isSigned())
1248 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestVT, N));
1250 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N));
1251 } else if (isFloatingPoint(DestVT)) { // Int -> FP cast
1252 if (I.getOperand(0)->getType()->isSigned())
1253 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestVT, N));
1255 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestVT, N));
1257 assert(0 && "Unknown cast!");
1259 } else if (isFloatingPoint(SrcVT)) {
1260 if (isFloatingPoint(DestVT)) { // FP -> FP cast
1261 if (DestVT < SrcVT) // Rounding cast?
1262 setValue(&I, DAG.getNode(ISD::FP_ROUND, DestVT, N));
1264 setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestVT, N));
1265 } else if (isInteger(DestVT)) { // FP -> Int cast.
1266 if (I.getType()->isSigned())
1267 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestVT, N));
1269 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestVT, N));
1271 assert(0 && "Unknown cast!");
1274 assert(SrcVT == MVT::Vector && "Unknown cast!");
1275 assert(DestVT != MVT::Vector && "Casts to vector already handled!");
1276 // This is a cast from a vector to something else. This is always a bit
1277 // convert. Get information about the input vector.
1278 setValue(&I, DAG.getNode(ISD::VBIT_CONVERT, DestVT, N));
1282 void SelectionDAGLowering::visitInsertElement(User &I) {
1283 SDOperand InVec = getValue(I.getOperand(0));
1284 SDOperand InVal = getValue(I.getOperand(1));
1285 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(),
1286 getValue(I.getOperand(2)));
1288 SDOperand Num = *(InVec.Val->op_end()-2);
1289 SDOperand Typ = *(InVec.Val->op_end()-1);
1290 setValue(&I, DAG.getNode(ISD::VINSERT_VECTOR_ELT, MVT::Vector,
1291 InVec, InVal, InIdx, Num, Typ));
1294 void SelectionDAGLowering::visitExtractElement(User &I) {
1295 SDOperand InVec = getValue(I.getOperand(0));
1296 SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(),
1297 getValue(I.getOperand(1)));
1298 SDOperand Typ = *(InVec.Val->op_end()-1);
1299 setValue(&I, DAG.getNode(ISD::VEXTRACT_VECTOR_ELT,
1300 TLI.getValueType(I.getType()), InVec, InIdx));
1303 void SelectionDAGLowering::visitShuffleVector(User &I) {
1304 SDOperand V1 = getValue(I.getOperand(0));
1305 SDOperand V2 = getValue(I.getOperand(1));
1306 SDOperand Mask = getValue(I.getOperand(2));
1308 SDOperand Num = *(V1.Val->op_end()-2);
1309 SDOperand Typ = *(V2.Val->op_end()-1);
1310 setValue(&I, DAG.getNode(ISD::VVECTOR_SHUFFLE, MVT::Vector,
1311 V1, V2, Mask, Num, Typ));
1315 void SelectionDAGLowering::visitGetElementPtr(User &I) {
1316 SDOperand N = getValue(I.getOperand(0));
1317 const Type *Ty = I.getOperand(0)->getType();
1319 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
1322 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
1323 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
1326 uint64_t Offset = TD->getStructLayout(StTy)->MemberOffsets[Field];
1327 N = DAG.getNode(ISD::ADD, N.getValueType(), N,
1328 getIntPtrConstant(Offset));
1330 Ty = StTy->getElementType(Field);
1332 Ty = cast<SequentialType>(Ty)->getElementType();
1334 // If this is a constant subscript, handle it quickly.
1335 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
1336 if (CI->getZExtValue() == 0) continue;
1338 if (CI->getType()->isSigned())
1340 TD->getTypeSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
1343 TD->getTypeSize(Ty)*cast<ConstantInt>(CI)->getZExtValue();
1344 N = DAG.getNode(ISD::ADD, N.getValueType(), N, getIntPtrConstant(Offs));
1348 // N = N + Idx * ElementSize;
1349 uint64_t ElementSize = TD->getTypeSize(Ty);
1350 SDOperand IdxN = getValue(Idx);
1352 // If the index is smaller or larger than intptr_t, truncate or extend
1354 if (IdxN.getValueType() < N.getValueType()) {
1355 if (Idx->getType()->isSigned())
1356 IdxN = DAG.getNode(ISD::SIGN_EXTEND, N.getValueType(), IdxN);
1358 IdxN = DAG.getNode(ISD::ZERO_EXTEND, N.getValueType(), IdxN);
1359 } else if (IdxN.getValueType() > N.getValueType())
1360 IdxN = DAG.getNode(ISD::TRUNCATE, N.getValueType(), IdxN);
1362 // If this is a multiply by a power of two, turn it into a shl
1363 // immediately. This is a very common case.
1364 if (isPowerOf2_64(ElementSize)) {
1365 unsigned Amt = Log2_64(ElementSize);
1366 IdxN = DAG.getNode(ISD::SHL, N.getValueType(), IdxN,
1367 DAG.getConstant(Amt, TLI.getShiftAmountTy()));
1368 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
1372 SDOperand Scale = getIntPtrConstant(ElementSize);
1373 IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale);
1374 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
1380 void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
1381 // If this is a fixed sized alloca in the entry block of the function,
1382 // allocate it statically on the stack.
1383 if (FuncInfo.StaticAllocaMap.count(&I))
1384 return; // getValue will auto-populate this.
1386 const Type *Ty = I.getAllocatedType();
1387 uint64_t TySize = TLI.getTargetData()->getTypeSize(Ty);
1388 unsigned Align = std::max((unsigned)TLI.getTargetData()->getTypeAlignment(Ty),
1391 SDOperand AllocSize = getValue(I.getArraySize());
1392 MVT::ValueType IntPtr = TLI.getPointerTy();
1393 if (IntPtr < AllocSize.getValueType())
1394 AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize);
1395 else if (IntPtr > AllocSize.getValueType())
1396 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, AllocSize);
1398 AllocSize = DAG.getNode(ISD::MUL, IntPtr, AllocSize,
1399 getIntPtrConstant(TySize));
1401 // Handle alignment. If the requested alignment is less than or equal to the
1402 // stack alignment, ignore it and round the size of the allocation up to the
1403 // stack alignment size. If the size is greater than the stack alignment, we
1404 // note this in the DYNAMIC_STACKALLOC node.
1405 unsigned StackAlign =
1406 TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
1407 if (Align <= StackAlign) {
1409 // Add SA-1 to the size.
1410 AllocSize = DAG.getNode(ISD::ADD, AllocSize.getValueType(), AllocSize,
1411 getIntPtrConstant(StackAlign-1));
1412 // Mask out the low bits for alignment purposes.
1413 AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize,
1414 getIntPtrConstant(~(uint64_t)(StackAlign-1)));
1417 SDOperand Ops[] = { getRoot(), AllocSize, getIntPtrConstant(Align) };
1418 const MVT::ValueType *VTs = DAG.getNodeValueTypes(AllocSize.getValueType(),
1420 SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, 2, Ops, 3);
1421 DAG.setRoot(setValue(&I, DSA).getValue(1));
1423 // Inform the Frame Information that we have just allocated a variable-sized
1425 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
1428 void SelectionDAGLowering::visitLoad(LoadInst &I) {
1429 SDOperand Ptr = getValue(I.getOperand(0));
1435 // Do not serialize non-volatile loads against each other.
1436 Root = DAG.getRoot();
1439 setValue(&I, getLoadFrom(I.getType(), Ptr, I.getOperand(0),
1440 Root, I.isVolatile()));
1443 SDOperand SelectionDAGLowering::getLoadFrom(const Type *Ty, SDOperand Ptr,
1444 const Value *SV, SDOperand Root,
1447 if (const PackedType *PTy = dyn_cast<PackedType>(Ty)) {
1448 MVT::ValueType PVT = TLI.getValueType(PTy->getElementType());
1449 L = DAG.getVecLoad(PTy->getNumElements(), PVT, Root, Ptr,
1450 DAG.getSrcValue(SV));
1452 L = DAG.getLoad(TLI.getValueType(Ty), Root, Ptr, SV, isVolatile);
1456 DAG.setRoot(L.getValue(1));
1458 PendingLoads.push_back(L.getValue(1));
1464 void SelectionDAGLowering::visitStore(StoreInst &I) {
1465 Value *SrcV = I.getOperand(0);
1466 SDOperand Src = getValue(SrcV);
1467 SDOperand Ptr = getValue(I.getOperand(1));
1468 DAG.setRoot(DAG.getStore(getRoot(), Src, Ptr, I.getOperand(1),
1472 /// IntrinsicCannotAccessMemory - Return true if the specified intrinsic cannot
1473 /// access memory and has no other side effects at all.
1474 static bool IntrinsicCannotAccessMemory(unsigned IntrinsicID) {
1475 #define GET_NO_MEMORY_INTRINSICS
1476 #include "llvm/Intrinsics.gen"
1477 #undef GET_NO_MEMORY_INTRINSICS
1481 // IntrinsicOnlyReadsMemory - Return true if the specified intrinsic doesn't
1482 // have any side-effects or if it only reads memory.
1483 static bool IntrinsicOnlyReadsMemory(unsigned IntrinsicID) {
1484 #define GET_SIDE_EFFECT_INFO
1485 #include "llvm/Intrinsics.gen"
1486 #undef GET_SIDE_EFFECT_INFO
1490 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
1492 void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I,
1493 unsigned Intrinsic) {
1494 bool HasChain = !IntrinsicCannotAccessMemory(Intrinsic);
1495 bool OnlyLoad = HasChain && IntrinsicOnlyReadsMemory(Intrinsic);
1497 // Build the operand list.
1498 SmallVector<SDOperand, 8> Ops;
1499 if (HasChain) { // If this intrinsic has side-effects, chainify it.
1501 // We don't need to serialize loads against other loads.
1502 Ops.push_back(DAG.getRoot());
1504 Ops.push_back(getRoot());
1508 // Add the intrinsic ID as an integer operand.
1509 Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
1511 // Add all operands of the call to the operand list.
1512 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
1513 SDOperand Op = getValue(I.getOperand(i));
1515 // If this is a vector type, force it to the right packed type.
1516 if (Op.getValueType() == MVT::Vector) {
1517 const PackedType *OpTy = cast<PackedType>(I.getOperand(i)->getType());
1518 MVT::ValueType EltVT = TLI.getValueType(OpTy->getElementType());
1520 MVT::ValueType VVT = MVT::getVectorType(EltVT, OpTy->getNumElements());
1521 assert(VVT != MVT::Other && "Intrinsic uses a non-legal type?");
1522 Op = DAG.getNode(ISD::VBIT_CONVERT, VVT, Op);
1525 assert(TLI.isTypeLegal(Op.getValueType()) &&
1526 "Intrinsic uses a non-legal type?");
1530 std::vector<MVT::ValueType> VTs;
1531 if (I.getType() != Type::VoidTy) {
1532 MVT::ValueType VT = TLI.getValueType(I.getType());
1533 if (VT == MVT::Vector) {
1534 const PackedType *DestTy = cast<PackedType>(I.getType());
1535 MVT::ValueType EltVT = TLI.getValueType(DestTy->getElementType());
1537 VT = MVT::getVectorType(EltVT, DestTy->getNumElements());
1538 assert(VT != MVT::Other && "Intrinsic uses a non-legal type?");
1541 assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?");
1545 VTs.push_back(MVT::Other);
1547 const MVT::ValueType *VTList = DAG.getNodeValueTypes(VTs);
1552 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VTList, VTs.size(),
1553 &Ops[0], Ops.size());
1554 else if (I.getType() != Type::VoidTy)
1555 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, VTList, VTs.size(),
1556 &Ops[0], Ops.size());
1558 Result = DAG.getNode(ISD::INTRINSIC_VOID, VTList, VTs.size(),
1559 &Ops[0], Ops.size());
1562 SDOperand Chain = Result.getValue(Result.Val->getNumValues()-1);
1564 PendingLoads.push_back(Chain);
1568 if (I.getType() != Type::VoidTy) {
1569 if (const PackedType *PTy = dyn_cast<PackedType>(I.getType())) {
1570 MVT::ValueType EVT = TLI.getValueType(PTy->getElementType());
1571 Result = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Result,
1572 DAG.getConstant(PTy->getNumElements(), MVT::i32),
1573 DAG.getValueType(EVT));
1575 setValue(&I, Result);
1579 /// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
1580 /// we want to emit this as a call to a named external function, return the name
1581 /// otherwise lower it and return null.
1583 SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
1584 switch (Intrinsic) {
1586 // By default, turn this into a target intrinsic node.
1587 visitTargetIntrinsic(I, Intrinsic);
1589 case Intrinsic::vastart: visitVAStart(I); return 0;
1590 case Intrinsic::vaend: visitVAEnd(I); return 0;
1591 case Intrinsic::vacopy: visitVACopy(I); return 0;
1592 case Intrinsic::returnaddress: visitFrameReturnAddress(I, false); return 0;
1593 case Intrinsic::frameaddress: visitFrameReturnAddress(I, true); return 0;
1594 case Intrinsic::setjmp:
1595 return "_setjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
1597 case Intrinsic::longjmp:
1598 return "_longjmp"+!TLI.usesUnderscoreSetJmpLongJmp();
1600 case Intrinsic::memcpy_i32:
1601 case Intrinsic::memcpy_i64:
1602 visitMemIntrinsic(I, ISD::MEMCPY);
1604 case Intrinsic::memset_i32:
1605 case Intrinsic::memset_i64:
1606 visitMemIntrinsic(I, ISD::MEMSET);
1608 case Intrinsic::memmove_i32:
1609 case Intrinsic::memmove_i64:
1610 visitMemIntrinsic(I, ISD::MEMMOVE);
1613 case Intrinsic::dbg_stoppoint: {
1614 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1615 DbgStopPointInst &SPI = cast<DbgStopPointInst>(I);
1616 if (DebugInfo && SPI.getContext() && DebugInfo->Verify(SPI.getContext())) {
1620 Ops[1] = getValue(SPI.getLineValue());
1621 Ops[2] = getValue(SPI.getColumnValue());
1623 DebugInfoDesc *DD = DebugInfo->getDescFor(SPI.getContext());
1624 assert(DD && "Not a debug information descriptor");
1625 CompileUnitDesc *CompileUnit = cast<CompileUnitDesc>(DD);
1627 Ops[3] = DAG.getString(CompileUnit->getFileName());
1628 Ops[4] = DAG.getString(CompileUnit->getDirectory());
1630 DAG.setRoot(DAG.getNode(ISD::LOCATION, MVT::Other, Ops, 5));
1635 case Intrinsic::dbg_region_start: {
1636 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1637 DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I);
1638 if (DebugInfo && RSI.getContext() && DebugInfo->Verify(RSI.getContext())) {
1639 unsigned LabelID = DebugInfo->RecordRegionStart(RSI.getContext());
1640 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other, getRoot(),
1641 DAG.getConstant(LabelID, MVT::i32)));
1646 case Intrinsic::dbg_region_end: {
1647 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1648 DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I);
1649 if (DebugInfo && REI.getContext() && DebugInfo->Verify(REI.getContext())) {
1650 unsigned LabelID = DebugInfo->RecordRegionEnd(REI.getContext());
1651 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other,
1652 getRoot(), DAG.getConstant(LabelID, MVT::i32)));
1657 case Intrinsic::dbg_func_start: {
1658 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1659 DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I);
1660 if (DebugInfo && FSI.getSubprogram() &&
1661 DebugInfo->Verify(FSI.getSubprogram())) {
1662 unsigned LabelID = DebugInfo->RecordRegionStart(FSI.getSubprogram());
1663 DAG.setRoot(DAG.getNode(ISD::DEBUG_LABEL, MVT::Other,
1664 getRoot(), DAG.getConstant(LabelID, MVT::i32)));
1669 case Intrinsic::dbg_declare: {
1670 MachineDebugInfo *DebugInfo = DAG.getMachineDebugInfo();
1671 DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
1672 if (DebugInfo && DI.getVariable() && DebugInfo->Verify(DI.getVariable())) {
1673 SDOperand AddressOp = getValue(DI.getAddress());
1674 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(AddressOp))
1675 DebugInfo->RecordVariable(DI.getVariable(), FI->getIndex());
1681 case Intrinsic::isunordered_f32:
1682 case Intrinsic::isunordered_f64:
1683 setValue(&I, DAG.getSetCC(MVT::i1,getValue(I.getOperand(1)),
1684 getValue(I.getOperand(2)), ISD::SETUO));
1687 case Intrinsic::sqrt_f32:
1688 case Intrinsic::sqrt_f64:
1689 setValue(&I, DAG.getNode(ISD::FSQRT,
1690 getValue(I.getOperand(1)).getValueType(),
1691 getValue(I.getOperand(1))));
1693 case Intrinsic::powi_f32:
1694 case Intrinsic::powi_f64:
1695 setValue(&I, DAG.getNode(ISD::FPOWI,
1696 getValue(I.getOperand(1)).getValueType(),
1697 getValue(I.getOperand(1)),
1698 getValue(I.getOperand(2))));
1700 case Intrinsic::pcmarker: {
1701 SDOperand Tmp = getValue(I.getOperand(1));
1702 DAG.setRoot(DAG.getNode(ISD::PCMARKER, MVT::Other, getRoot(), Tmp));
1705 case Intrinsic::readcyclecounter: {
1706 SDOperand Op = getRoot();
1707 SDOperand Tmp = DAG.getNode(ISD::READCYCLECOUNTER,
1708 DAG.getNodeValueTypes(MVT::i64, MVT::Other), 2,
1711 DAG.setRoot(Tmp.getValue(1));
1714 case Intrinsic::bswap_i16:
1715 case Intrinsic::bswap_i32:
1716 case Intrinsic::bswap_i64:
1717 setValue(&I, DAG.getNode(ISD::BSWAP,
1718 getValue(I.getOperand(1)).getValueType(),
1719 getValue(I.getOperand(1))));
1721 case Intrinsic::cttz_i8:
1722 case Intrinsic::cttz_i16:
1723 case Intrinsic::cttz_i32:
1724 case Intrinsic::cttz_i64:
1725 setValue(&I, DAG.getNode(ISD::CTTZ,
1726 getValue(I.getOperand(1)).getValueType(),
1727 getValue(I.getOperand(1))));
1729 case Intrinsic::ctlz_i8:
1730 case Intrinsic::ctlz_i16:
1731 case Intrinsic::ctlz_i32:
1732 case Intrinsic::ctlz_i64:
1733 setValue(&I, DAG.getNode(ISD::CTLZ,
1734 getValue(I.getOperand(1)).getValueType(),
1735 getValue(I.getOperand(1))));
1737 case Intrinsic::ctpop_i8:
1738 case Intrinsic::ctpop_i16:
1739 case Intrinsic::ctpop_i32:
1740 case Intrinsic::ctpop_i64:
1741 setValue(&I, DAG.getNode(ISD::CTPOP,
1742 getValue(I.getOperand(1)).getValueType(),
1743 getValue(I.getOperand(1))));
1745 case Intrinsic::stacksave: {
1746 SDOperand Op = getRoot();
1747 SDOperand Tmp = DAG.getNode(ISD::STACKSAVE,
1748 DAG.getNodeValueTypes(TLI.getPointerTy(), MVT::Other), 2, &Op, 1);
1750 DAG.setRoot(Tmp.getValue(1));
1753 case Intrinsic::stackrestore: {
1754 SDOperand Tmp = getValue(I.getOperand(1));
1755 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, MVT::Other, getRoot(), Tmp));
1758 case Intrinsic::prefetch:
1759 // FIXME: Currently discarding prefetches.
1765 void SelectionDAGLowering::visitCall(CallInst &I) {
1766 const char *RenameFn = 0;
1767 if (Function *F = I.getCalledFunction()) {
1768 if (F->isExternal())
1769 if (unsigned IID = F->getIntrinsicID()) {
1770 RenameFn = visitIntrinsicCall(I, IID);
1773 } else { // Not an LLVM intrinsic.
1774 const std::string &Name = F->getName();
1775 if (Name[0] == 'c' && (Name == "copysign" || Name == "copysignf")) {
1776 if (I.getNumOperands() == 3 && // Basic sanity checks.
1777 I.getOperand(1)->getType()->isFloatingPoint() &&
1778 I.getType() == I.getOperand(1)->getType() &&
1779 I.getType() == I.getOperand(2)->getType()) {
1780 SDOperand LHS = getValue(I.getOperand(1));
1781 SDOperand RHS = getValue(I.getOperand(2));
1782 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, LHS.getValueType(),
1786 } else if (Name[0] == 'f' && (Name == "fabs" || Name == "fabsf")) {
1787 if (I.getNumOperands() == 2 && // Basic sanity checks.
1788 I.getOperand(1)->getType()->isFloatingPoint() &&
1789 I.getType() == I.getOperand(1)->getType()) {
1790 SDOperand Tmp = getValue(I.getOperand(1));
1791 setValue(&I, DAG.getNode(ISD::FABS, Tmp.getValueType(), Tmp));
1794 } else if (Name[0] == 's' && (Name == "sin" || Name == "sinf")) {
1795 if (I.getNumOperands() == 2 && // Basic sanity checks.
1796 I.getOperand(1)->getType()->isFloatingPoint() &&
1797 I.getType() == I.getOperand(1)->getType()) {
1798 SDOperand Tmp = getValue(I.getOperand(1));
1799 setValue(&I, DAG.getNode(ISD::FSIN, Tmp.getValueType(), Tmp));
1802 } else if (Name[0] == 'c' && (Name == "cos" || Name == "cosf")) {
1803 if (I.getNumOperands() == 2 && // Basic sanity checks.
1804 I.getOperand(1)->getType()->isFloatingPoint() &&
1805 I.getType() == I.getOperand(1)->getType()) {
1806 SDOperand Tmp = getValue(I.getOperand(1));
1807 setValue(&I, DAG.getNode(ISD::FCOS, Tmp.getValueType(), Tmp));
1812 } else if (isa<InlineAsm>(I.getOperand(0))) {
1819 Callee = getValue(I.getOperand(0));
1821 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
1822 std::vector<std::pair<SDOperand, const Type*> > Args;
1823 Args.reserve(I.getNumOperands());
1824 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
1825 Value *Arg = I.getOperand(i);
1826 SDOperand ArgNode = getValue(Arg);
1827 Args.push_back(std::make_pair(ArgNode, Arg->getType()));
1830 const PointerType *PT = cast<PointerType>(I.getCalledValue()->getType());
1831 const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
1833 std::pair<SDOperand,SDOperand> Result =
1834 TLI.LowerCallTo(getRoot(), I.getType(), FTy->isVarArg(), I.getCallingConv(),
1835 I.isTailCall(), Callee, Args, DAG);
1836 if (I.getType() != Type::VoidTy)
1837 setValue(&I, Result.first);
1838 DAG.setRoot(Result.second);
1841 SDOperand RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
1842 SDOperand &Chain, SDOperand &Flag)const{
1843 SDOperand Val = DAG.getCopyFromReg(Chain, Regs[0], RegVT, Flag);
1844 Chain = Val.getValue(1);
1845 Flag = Val.getValue(2);
1847 // If the result was expanded, copy from the top part.
1848 if (Regs.size() > 1) {
1849 assert(Regs.size() == 2 &&
1850 "Cannot expand to more than 2 elts yet!");
1851 SDOperand Hi = DAG.getCopyFromReg(Chain, Regs[1], RegVT, Flag);
1852 Chain = Hi.getValue(1);
1853 Flag = Hi.getValue(2);
1854 if (DAG.getTargetLoweringInfo().isLittleEndian())
1855 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Val, Hi);
1857 return DAG.getNode(ISD::BUILD_PAIR, ValueVT, Hi, Val);
1860 // Otherwise, if the return value was promoted or extended, truncate it to the
1861 // appropriate type.
1862 if (RegVT == ValueVT)
1865 if (MVT::isInteger(RegVT)) {
1866 if (ValueVT < RegVT)
1867 return DAG.getNode(ISD::TRUNCATE, ValueVT, Val);
1869 return DAG.getNode(ISD::ANY_EXTEND, ValueVT, Val);
1871 return DAG.getNode(ISD::FP_ROUND, ValueVT, Val);
1875 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
1876 /// specified value into the registers specified by this object. This uses
1877 /// Chain/Flag as the input and updates them for the output Chain/Flag.
1878 void RegsForValue::getCopyToRegs(SDOperand Val, SelectionDAG &DAG,
1879 SDOperand &Chain, SDOperand &Flag,
1880 MVT::ValueType PtrVT) const {
1881 if (Regs.size() == 1) {
1882 // If there is a single register and the types differ, this must be
1884 if (RegVT != ValueVT) {
1885 if (MVT::isInteger(RegVT)) {
1886 if (RegVT < ValueVT)
1887 Val = DAG.getNode(ISD::TRUNCATE, RegVT, Val);
1889 Val = DAG.getNode(ISD::ANY_EXTEND, RegVT, Val);
1891 Val = DAG.getNode(ISD::FP_EXTEND, RegVT, Val);
1893 Chain = DAG.getCopyToReg(Chain, Regs[0], Val, Flag);
1894 Flag = Chain.getValue(1);
1896 std::vector<unsigned> R(Regs);
1897 if (!DAG.getTargetLoweringInfo().isLittleEndian())
1898 std::reverse(R.begin(), R.end());
1900 for (unsigned i = 0, e = R.size(); i != e; ++i) {
1901 SDOperand Part = DAG.getNode(ISD::EXTRACT_ELEMENT, RegVT, Val,
1902 DAG.getConstant(i, PtrVT));
1903 Chain = DAG.getCopyToReg(Chain, R[i], Part, Flag);
1904 Flag = Chain.getValue(1);
1909 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
1910 /// operand list. This adds the code marker and includes the number of
1911 /// values added into it.
1912 void RegsForValue::AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG,
1913 std::vector<SDOperand> &Ops) const {
1914 Ops.push_back(DAG.getConstant(Code | (Regs.size() << 3), MVT::i32));
1915 for (unsigned i = 0, e = Regs.size(); i != e; ++i)
1916 Ops.push_back(DAG.getRegister(Regs[i], RegVT));
1919 /// isAllocatableRegister - If the specified register is safe to allocate,
1920 /// i.e. it isn't a stack pointer or some other special register, return the
1921 /// register class for the register. Otherwise, return null.
1922 static const TargetRegisterClass *
1923 isAllocatableRegister(unsigned Reg, MachineFunction &MF,
1924 const TargetLowering &TLI, const MRegisterInfo *MRI) {
1925 MVT::ValueType FoundVT = MVT::Other;
1926 const TargetRegisterClass *FoundRC = 0;
1927 for (MRegisterInfo::regclass_iterator RCI = MRI->regclass_begin(),
1928 E = MRI->regclass_end(); RCI != E; ++RCI) {
1929 MVT::ValueType ThisVT = MVT::Other;
1931 const TargetRegisterClass *RC = *RCI;
1932 // If none of the the value types for this register class are valid, we
1933 // can't use it. For example, 64-bit reg classes on 32-bit targets.
1934 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
1936 if (TLI.isTypeLegal(*I)) {
1937 // If we have already found this register in a different register class,
1938 // choose the one with the largest VT specified. For example, on
1939 // PowerPC, we favor f64 register classes over f32.
1940 if (FoundVT == MVT::Other ||
1941 MVT::getSizeInBits(FoundVT) < MVT::getSizeInBits(*I)) {
1948 if (ThisVT == MVT::Other) continue;
1950 // NOTE: This isn't ideal. In particular, this might allocate the
1951 // frame pointer in functions that need it (due to them not being taken
1952 // out of allocation, because a variable sized allocation hasn't been seen
1953 // yet). This is a slight code pessimization, but should still work.
1954 for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
1955 E = RC->allocation_order_end(MF); I != E; ++I)
1957 // We found a matching register class. Keep looking at others in case
1958 // we find one with larger registers that this physreg is also in.
1967 RegsForValue SelectionDAGLowering::
1968 GetRegistersForValue(const std::string &ConstrCode,
1969 MVT::ValueType VT, bool isOutReg, bool isInReg,
1970 std::set<unsigned> &OutputRegs,
1971 std::set<unsigned> &InputRegs) {
1972 std::pair<unsigned, const TargetRegisterClass*> PhysReg =
1973 TLI.getRegForInlineAsmConstraint(ConstrCode, VT);
1974 std::vector<unsigned> Regs;
1976 unsigned NumRegs = VT != MVT::Other ? TLI.getNumElements(VT) : 1;
1977 MVT::ValueType RegVT;
1978 MVT::ValueType ValueVT = VT;
1980 if (PhysReg.first) {
1981 if (VT == MVT::Other)
1982 ValueVT = *PhysReg.second->vt_begin();
1984 // Get the actual register value type. This is important, because the user
1985 // may have asked for (e.g.) the AX register in i32 type. We need to
1986 // remember that AX is actually i16 to get the right extension.
1987 RegVT = *PhysReg.second->vt_begin();
1989 // This is a explicit reference to a physical register.
1990 Regs.push_back(PhysReg.first);
1992 // If this is an expanded reference, add the rest of the regs to Regs.
1994 TargetRegisterClass::iterator I = PhysReg.second->begin();
1995 TargetRegisterClass::iterator E = PhysReg.second->end();
1996 for (; *I != PhysReg.first; ++I)
1997 assert(I != E && "Didn't find reg!");
1999 // Already added the first reg.
2001 for (; NumRegs; --NumRegs, ++I) {
2002 assert(I != E && "Ran out of registers to allocate!");
2006 return RegsForValue(Regs, RegVT, ValueVT);
2009 // This is a reference to a register class. Allocate NumRegs consecutive,
2010 // available, registers from the class.
2011 std::vector<unsigned> RegClassRegs =
2012 TLI.getRegClassForInlineAsmConstraint(ConstrCode, VT);
2014 const MRegisterInfo *MRI = DAG.getTarget().getRegisterInfo();
2015 MachineFunction &MF = *CurMBB->getParent();
2016 unsigned NumAllocated = 0;
2017 for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
2018 unsigned Reg = RegClassRegs[i];
2019 // See if this register is available.
2020 if ((isOutReg && OutputRegs.count(Reg)) || // Already used.
2021 (isInReg && InputRegs.count(Reg))) { // Already used.
2022 // Make sure we find consecutive registers.
2027 // Check to see if this register is allocatable (i.e. don't give out the
2029 const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, MRI);
2031 // Make sure we find consecutive registers.
2036 // Okay, this register is good, we can use it.
2039 // If we allocated enough consecutive
2040 if (NumAllocated == NumRegs) {
2041 unsigned RegStart = (i-NumAllocated)+1;
2042 unsigned RegEnd = i+1;
2043 // Mark all of the allocated registers used.
2044 for (unsigned i = RegStart; i != RegEnd; ++i) {
2045 unsigned Reg = RegClassRegs[i];
2046 Regs.push_back(Reg);
2047 if (isOutReg) OutputRegs.insert(Reg); // Mark reg used.
2048 if (isInReg) InputRegs.insert(Reg); // Mark reg used.
2051 return RegsForValue(Regs, *RC->vt_begin(), VT);
2055 // Otherwise, we couldn't allocate enough registers for this.
2056 return RegsForValue();
2060 /// visitInlineAsm - Handle a call to an InlineAsm object.
2062 void SelectionDAGLowering::visitInlineAsm(CallInst &I) {
2063 InlineAsm *IA = cast<InlineAsm>(I.getOperand(0));
2065 SDOperand AsmStr = DAG.getTargetExternalSymbol(IA->getAsmString().c_str(),
2068 // Note, we treat inline asms both with and without side-effects as the same.
2069 // If an inline asm doesn't have side effects and doesn't access memory, we
2070 // could not choose to not chain it.
2071 bool hasSideEffects = IA->hasSideEffects();
2073 std::vector<InlineAsm::ConstraintInfo> Constraints = IA->ParseConstraints();
2074 std::vector<MVT::ValueType> ConstraintVTs;
2076 /// AsmNodeOperands - A list of pairs. The first element is a register, the
2077 /// second is a bitfield where bit #0 is set if it is a use and bit #1 is set
2078 /// if it is a def of that register.
2079 std::vector<SDOperand> AsmNodeOperands;
2080 AsmNodeOperands.push_back(SDOperand()); // reserve space for input chain
2081 AsmNodeOperands.push_back(AsmStr);
2083 SDOperand Chain = getRoot();
2086 // We fully assign registers here at isel time. This is not optimal, but
2087 // should work. For register classes that correspond to LLVM classes, we
2088 // could let the LLVM RA do its thing, but we currently don't. Do a prepass
2089 // over the constraints, collecting fixed registers that we know we can't use.
2090 std::set<unsigned> OutputRegs, InputRegs;
2092 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
2093 assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!");
2094 std::string &ConstraintCode = Constraints[i].Codes[0];
2096 MVT::ValueType OpVT;
2098 // Compute the value type for each operand and add it to ConstraintVTs.
2099 switch (Constraints[i].Type) {
2100 case InlineAsm::isOutput:
2101 if (!Constraints[i].isIndirectOutput) {
2102 assert(I.getType() != Type::VoidTy && "Bad inline asm!");
2103 OpVT = TLI.getValueType(I.getType());
2105 const Type *OpTy = I.getOperand(OpNum)->getType();
2106 OpVT = TLI.getValueType(cast<PointerType>(OpTy)->getElementType());
2107 OpNum++; // Consumes a call operand.
2110 case InlineAsm::isInput:
2111 OpVT = TLI.getValueType(I.getOperand(OpNum)->getType());
2112 OpNum++; // Consumes a call operand.
2114 case InlineAsm::isClobber:
2119 ConstraintVTs.push_back(OpVT);
2121 if (TLI.getRegForInlineAsmConstraint(ConstraintCode, OpVT).first == 0)
2122 continue; // Not assigned a fixed reg.
2124 // Build a list of regs that this operand uses. This always has a single
2125 // element for promoted/expanded operands.
2126 RegsForValue Regs = GetRegistersForValue(ConstraintCode, OpVT,
2128 OutputRegs, InputRegs);
2130 switch (Constraints[i].Type) {
2131 case InlineAsm::isOutput:
2132 // We can't assign any other output to this register.
2133 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2134 // If this is an early-clobber output, it cannot be assigned to the same
2135 // value as the input reg.
2136 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput)
2137 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2139 case InlineAsm::isInput:
2140 // We can't assign any other input to this register.
2141 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2143 case InlineAsm::isClobber:
2144 // Clobbered regs cannot be used as inputs or outputs.
2145 InputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2146 OutputRegs.insert(Regs.Regs.begin(), Regs.Regs.end());
2151 // Loop over all of the inputs, copying the operand values into the
2152 // appropriate registers and processing the output regs.
2153 RegsForValue RetValRegs;
2154 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
2157 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
2158 assert(Constraints[i].Codes.size() == 1 && "Only handles one code so far!");
2159 std::string &ConstraintCode = Constraints[i].Codes[0];
2161 switch (Constraints[i].Type) {
2162 case InlineAsm::isOutput: {
2163 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass;
2164 if (ConstraintCode.size() == 1) // not a physreg name.
2165 CTy = TLI.getConstraintType(ConstraintCode[0]);
2167 if (CTy == TargetLowering::C_Memory) {
2169 SDOperand InOperandVal = getValue(I.getOperand(OpNum));
2171 // Check that the operand (the address to store to) isn't a float.
2172 if (!MVT::isInteger(InOperandVal.getValueType()))
2173 assert(0 && "MATCH FAIL!");
2175 if (!Constraints[i].isIndirectOutput)
2176 assert(0 && "MATCH FAIL!");
2178 OpNum++; // Consumes a call operand.
2180 // Extend/truncate to the right pointer type if needed.
2181 MVT::ValueType PtrType = TLI.getPointerTy();
2182 if (InOperandVal.getValueType() < PtrType)
2183 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal);
2184 else if (InOperandVal.getValueType() > PtrType)
2185 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal);
2187 // Add information to the INLINEASM node to know about this output.
2188 unsigned ResOpType = 4/*MEM*/ | (1 << 3);
2189 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
2190 AsmNodeOperands.push_back(InOperandVal);
2194 // Otherwise, this is a register output.
2195 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!");
2197 // If this is an early-clobber output, or if there is an input
2198 // constraint that matches this, we need to reserve the input register
2199 // so no other inputs allocate to it.
2200 bool UsesInputRegister = false;
2201 if (Constraints[i].isEarlyClobber || Constraints[i].hasMatchingInput)
2202 UsesInputRegister = true;
2204 // Copy the output from the appropriate register. Find a register that
2207 GetRegistersForValue(ConstraintCode, ConstraintVTs[i],
2208 true, UsesInputRegister,
2209 OutputRegs, InputRegs);
2210 assert(!Regs.Regs.empty() && "Couldn't allocate output reg!");
2212 if (!Constraints[i].isIndirectOutput) {
2213 assert(RetValRegs.Regs.empty() &&
2214 "Cannot have multiple output constraints yet!");
2215 assert(I.getType() != Type::VoidTy && "Bad inline asm!");
2218 IndirectStoresToEmit.push_back(std::make_pair(Regs,
2219 I.getOperand(OpNum)));
2220 OpNum++; // Consumes a call operand.
2223 // Add information to the INLINEASM node to know that this register is
2225 Regs.AddInlineAsmOperands(2 /*REGDEF*/, DAG, AsmNodeOperands);
2228 case InlineAsm::isInput: {
2229 SDOperand InOperandVal = getValue(I.getOperand(OpNum));
2230 OpNum++; // Consumes a call operand.
2232 if (isdigit(ConstraintCode[0])) { // Matching constraint?
2233 // If this is required to match an output register we have already set,
2234 // just use its register.
2235 unsigned OperandNo = atoi(ConstraintCode.c_str());
2237 // Scan until we find the definition we already emitted of this operand.
2238 // When we find it, create a RegsForValue operand.
2239 unsigned CurOp = 2; // The first operand.
2240 for (; OperandNo; --OperandNo) {
2241 // Advance to the next operand.
2243 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue();
2244 assert(((NumOps & 7) == 2 /*REGDEF*/ ||
2245 (NumOps & 7) == 4 /*MEM*/) &&
2246 "Skipped past definitions?");
2247 CurOp += (NumOps>>3)+1;
2251 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getValue();
2252 assert((NumOps & 7) == 2 /*REGDEF*/ &&
2253 "Skipped past definitions?");
2255 // Add NumOps>>3 registers to MatchedRegs.
2256 RegsForValue MatchedRegs;
2257 MatchedRegs.ValueVT = InOperandVal.getValueType();
2258 MatchedRegs.RegVT = AsmNodeOperands[CurOp+1].getValueType();
2259 for (unsigned i = 0, e = NumOps>>3; i != e; ++i) {
2260 unsigned Reg=cast<RegisterSDNode>(AsmNodeOperands[++CurOp])->getReg();
2261 MatchedRegs.Regs.push_back(Reg);
2264 // Use the produced MatchedRegs object to
2265 MatchedRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag,
2266 TLI.getPointerTy());
2267 MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/, DAG, AsmNodeOperands);
2271 TargetLowering::ConstraintType CTy = TargetLowering::C_RegisterClass;
2272 if (ConstraintCode.size() == 1) // not a physreg name.
2273 CTy = TLI.getConstraintType(ConstraintCode[0]);
2275 if (CTy == TargetLowering::C_Other) {
2276 if (!TLI.isOperandValidForConstraint(InOperandVal, ConstraintCode[0]))
2277 assert(0 && "MATCH FAIL!");
2279 // Add information to the INLINEASM node to know about this input.
2280 unsigned ResOpType = 3 /*IMM*/ | (1 << 3);
2281 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
2282 AsmNodeOperands.push_back(InOperandVal);
2284 } else if (CTy == TargetLowering::C_Memory) {
2287 // Check that the operand isn't a float.
2288 if (!MVT::isInteger(InOperandVal.getValueType()))
2289 assert(0 && "MATCH FAIL!");
2291 // Extend/truncate to the right pointer type if needed.
2292 MVT::ValueType PtrType = TLI.getPointerTy();
2293 if (InOperandVal.getValueType() < PtrType)
2294 InOperandVal = DAG.getNode(ISD::ZERO_EXTEND, PtrType, InOperandVal);
2295 else if (InOperandVal.getValueType() > PtrType)
2296 InOperandVal = DAG.getNode(ISD::TRUNCATE, PtrType, InOperandVal);
2298 // Add information to the INLINEASM node to know about this input.
2299 unsigned ResOpType = 4/*MEM*/ | (1 << 3);
2300 AsmNodeOperands.push_back(DAG.getConstant(ResOpType, MVT::i32));
2301 AsmNodeOperands.push_back(InOperandVal);
2305 assert(CTy == TargetLowering::C_RegisterClass && "Unknown op type!");
2307 // Copy the input into the appropriate registers.
2308 RegsForValue InRegs =
2309 GetRegistersForValue(ConstraintCode, ConstraintVTs[i],
2310 false, true, OutputRegs, InputRegs);
2311 // FIXME: should be match fail.
2312 assert(!InRegs.Regs.empty() && "Couldn't allocate input reg!");
2314 InRegs.getCopyToRegs(InOperandVal, DAG, Chain, Flag, TLI.getPointerTy());
2316 InRegs.AddInlineAsmOperands(1/*REGUSE*/, DAG, AsmNodeOperands);
2319 case InlineAsm::isClobber: {
2320 RegsForValue ClobberedRegs =
2321 GetRegistersForValue(ConstraintCode, MVT::Other, false, false,
2322 OutputRegs, InputRegs);
2323 // Add the clobbered value to the operand list, so that the register
2324 // allocator is aware that the physreg got clobbered.
2325 if (!ClobberedRegs.Regs.empty())
2326 ClobberedRegs.AddInlineAsmOperands(2/*REGDEF*/, DAG, AsmNodeOperands);
2332 // Finish up input operands.
2333 AsmNodeOperands[0] = Chain;
2334 if (Flag.Val) AsmNodeOperands.push_back(Flag);
2336 Chain = DAG.getNode(ISD::INLINEASM,
2337 DAG.getNodeValueTypes(MVT::Other, MVT::Flag), 2,
2338 &AsmNodeOperands[0], AsmNodeOperands.size());
2339 Flag = Chain.getValue(1);
2341 // If this asm returns a register value, copy the result from that register
2342 // and set it as the value of the call.
2343 if (!RetValRegs.Regs.empty())
2344 setValue(&I, RetValRegs.getCopyFromRegs(DAG, Chain, Flag));
2346 std::vector<std::pair<SDOperand, Value*> > StoresToEmit;
2348 // Process indirect outputs, first output all of the flagged copies out of
2350 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
2351 RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
2352 Value *Ptr = IndirectStoresToEmit[i].second;
2353 SDOperand OutVal = OutRegs.getCopyFromRegs(DAG, Chain, Flag);
2354 StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
2357 // Emit the non-flagged stores from the physregs.
2358 SmallVector<SDOperand, 8> OutChains;
2359 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i)
2360 OutChains.push_back(DAG.getStore(Chain, StoresToEmit[i].first,
2361 getValue(StoresToEmit[i].second),
2362 StoresToEmit[i].second, 0));
2363 if (!OutChains.empty())
2364 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
2365 &OutChains[0], OutChains.size());
2370 void SelectionDAGLowering::visitMalloc(MallocInst &I) {
2371 SDOperand Src = getValue(I.getOperand(0));
2373 MVT::ValueType IntPtr = TLI.getPointerTy();
2375 if (IntPtr < Src.getValueType())
2376 Src = DAG.getNode(ISD::TRUNCATE, IntPtr, Src);
2377 else if (IntPtr > Src.getValueType())
2378 Src = DAG.getNode(ISD::ZERO_EXTEND, IntPtr, Src);
2380 // Scale the source by the type size.
2381 uint64_t ElementSize = TD->getTypeSize(I.getType()->getElementType());
2382 Src = DAG.getNode(ISD::MUL, Src.getValueType(),
2383 Src, getIntPtrConstant(ElementSize));
2385 std::vector<std::pair<SDOperand, const Type*> > Args;
2386 Args.push_back(std::make_pair(Src, TLI.getTargetData()->getIntPtrType()));
2388 std::pair<SDOperand,SDOperand> Result =
2389 TLI.LowerCallTo(getRoot(), I.getType(), false, CallingConv::C, true,
2390 DAG.getExternalSymbol("malloc", IntPtr),
2392 setValue(&I, Result.first); // Pointers always fit in registers
2393 DAG.setRoot(Result.second);
2396 void SelectionDAGLowering::visitFree(FreeInst &I) {
2397 std::vector<std::pair<SDOperand, const Type*> > Args;
2398 Args.push_back(std::make_pair(getValue(I.getOperand(0)),
2399 TLI.getTargetData()->getIntPtrType()));
2400 MVT::ValueType IntPtr = TLI.getPointerTy();
2401 std::pair<SDOperand,SDOperand> Result =
2402 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, CallingConv::C, true,
2403 DAG.getExternalSymbol("free", IntPtr), Args, DAG);
2404 DAG.setRoot(Result.second);
2407 // InsertAtEndOfBasicBlock - This method should be implemented by targets that
2408 // mark instructions with the 'usesCustomDAGSchedInserter' flag. These
2409 // instructions are special in various ways, which require special support to
2410 // insert. The specified MachineInstr is created but not inserted into any
2411 // basic blocks, and the scheduler passes ownership of it to this method.
2412 MachineBasicBlock *TargetLowering::InsertAtEndOfBasicBlock(MachineInstr *MI,
2413 MachineBasicBlock *MBB) {
2414 std::cerr << "If a target marks an instruction with "
2415 "'usesCustomDAGSchedInserter', it must implement "
2416 "TargetLowering::InsertAtEndOfBasicBlock!\n";
2421 void SelectionDAGLowering::visitVAStart(CallInst &I) {
2422 DAG.setRoot(DAG.getNode(ISD::VASTART, MVT::Other, getRoot(),
2423 getValue(I.getOperand(1)),
2424 DAG.getSrcValue(I.getOperand(1))));
2427 void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
2428 SDOperand V = DAG.getVAArg(TLI.getValueType(I.getType()), getRoot(),
2429 getValue(I.getOperand(0)),
2430 DAG.getSrcValue(I.getOperand(0)));
2432 DAG.setRoot(V.getValue(1));
2435 void SelectionDAGLowering::visitVAEnd(CallInst &I) {
2436 DAG.setRoot(DAG.getNode(ISD::VAEND, MVT::Other, getRoot(),
2437 getValue(I.getOperand(1)),
2438 DAG.getSrcValue(I.getOperand(1))));
2441 void SelectionDAGLowering::visitVACopy(CallInst &I) {
2442 DAG.setRoot(DAG.getNode(ISD::VACOPY, MVT::Other, getRoot(),
2443 getValue(I.getOperand(1)),
2444 getValue(I.getOperand(2)),
2445 DAG.getSrcValue(I.getOperand(1)),
2446 DAG.getSrcValue(I.getOperand(2))));
2449 /// TargetLowering::LowerArguments - This is the default LowerArguments
2450 /// implementation, which just inserts a FORMAL_ARGUMENTS node. FIXME: When all
2451 /// targets are migrated to using FORMAL_ARGUMENTS, this hook should be
2452 /// integrated into SDISel.
2453 std::vector<SDOperand>
2454 TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
2455 // Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node.
2456 std::vector<SDOperand> Ops;
2457 Ops.push_back(DAG.getRoot());
2458 Ops.push_back(DAG.getConstant(F.getCallingConv(), getPointerTy()));
2459 Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy()));
2461 // Add one result value for each formal argument.
2462 std::vector<MVT::ValueType> RetVals;
2463 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
2464 MVT::ValueType VT = getValueType(I->getType());
2466 switch (getTypeAction(VT)) {
2467 default: assert(0 && "Unknown type action!");
2469 RetVals.push_back(VT);
2472 RetVals.push_back(getTypeToTransformTo(VT));
2475 if (VT != MVT::Vector) {
2476 // If this is a large integer, it needs to be broken up into small
2477 // integers. Figure out what the destination type is and how many small
2478 // integers it turns into.
2479 MVT::ValueType NVT = getTypeToTransformTo(VT);
2480 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT);
2481 for (unsigned i = 0; i != NumVals; ++i)
2482 RetVals.push_back(NVT);
2484 // Otherwise, this is a vector type. We only support legal vectors
2486 unsigned NumElems = cast<PackedType>(I->getType())->getNumElements();
2487 const Type *EltTy = cast<PackedType>(I->getType())->getElementType();
2489 // Figure out if there is a Packed type corresponding to this Vector
2490 // type. If so, convert to the packed type.
2491 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
2492 if (TVT != MVT::Other && isTypeLegal(TVT)) {
2493 RetVals.push_back(TVT);
2495 assert(0 && "Don't support illegal by-val vector arguments yet!");
2502 RetVals.push_back(MVT::Other);
2505 SDNode *Result = DAG.getNode(ISD::FORMAL_ARGUMENTS,
2506 DAG.getNodeValueTypes(RetVals), RetVals.size(),
2507 &Ops[0], Ops.size()).Val;
2509 DAG.setRoot(SDOperand(Result, Result->getNumValues()-1));
2511 // Set up the return result vector.
2514 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
2515 MVT::ValueType VT = getValueType(I->getType());
2517 switch (getTypeAction(VT)) {
2518 default: assert(0 && "Unknown type action!");
2520 Ops.push_back(SDOperand(Result, i++));
2523 SDOperand Op(Result, i++);
2524 if (MVT::isInteger(VT)) {
2525 unsigned AssertOp = I->getType()->isSigned() ? ISD::AssertSext
2527 Op = DAG.getNode(AssertOp, Op.getValueType(), Op, DAG.getValueType(VT));
2528 Op = DAG.getNode(ISD::TRUNCATE, VT, Op);
2530 assert(MVT::isFloatingPoint(VT) && "Not int or FP?");
2531 Op = DAG.getNode(ISD::FP_ROUND, VT, Op);
2537 if (VT != MVT::Vector) {
2538 // If this is a large integer, it needs to be reassembled from small
2539 // integers. Figure out what the source elt type is and how many small
2541 MVT::ValueType NVT = getTypeToTransformTo(VT);
2542 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT);
2544 SDOperand Lo = SDOperand(Result, i++);
2545 SDOperand Hi = SDOperand(Result, i++);
2547 if (!isLittleEndian())
2550 Ops.push_back(DAG.getNode(ISD::BUILD_PAIR, VT, Lo, Hi));
2552 // Value scalarized into many values. Unimp for now.
2553 assert(0 && "Cannot expand i64 -> i16 yet!");
2556 // Otherwise, this is a vector type. We only support legal vectors
2558 const PackedType *PTy = cast<PackedType>(I->getType());
2559 unsigned NumElems = PTy->getNumElements();
2560 const Type *EltTy = PTy->getElementType();
2562 // Figure out if there is a Packed type corresponding to this Vector
2563 // type. If so, convert to the packed type.
2564 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
2565 if (TVT != MVT::Other && isTypeLegal(TVT)) {
2566 SDOperand N = SDOperand(Result, i++);
2567 // Handle copies from generic vectors to registers.
2568 N = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, N,
2569 DAG.getConstant(NumElems, MVT::i32),
2570 DAG.getValueType(getValueType(EltTy)));
2573 assert(0 && "Don't support illegal by-val vector arguments yet!");
2584 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
2585 /// implementation, which just inserts an ISD::CALL node, which is later custom
2586 /// lowered by the target to something concrete. FIXME: When all targets are
2587 /// migrated to using ISD::CALL, this hook should be integrated into SDISel.
2588 std::pair<SDOperand, SDOperand>
2589 TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, bool isVarArg,
2590 unsigned CallingConv, bool isTailCall,
2592 ArgListTy &Args, SelectionDAG &DAG) {
2593 SmallVector<SDOperand, 32> Ops;
2594 Ops.push_back(Chain); // Op#0 - Chain
2595 Ops.push_back(DAG.getConstant(CallingConv, getPointerTy())); // Op#1 - CC
2596 Ops.push_back(DAG.getConstant(isVarArg, getPointerTy())); // Op#2 - VarArg
2597 Ops.push_back(DAG.getConstant(isTailCall, getPointerTy())); // Op#3 - Tail
2598 Ops.push_back(Callee);
2600 // Handle all of the outgoing arguments.
2601 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
2602 MVT::ValueType VT = getValueType(Args[i].second);
2603 SDOperand Op = Args[i].first;
2604 bool isSigned = Args[i].second->isSigned();
2605 switch (getTypeAction(VT)) {
2606 default: assert(0 && "Unknown type action!");
2609 Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
2612 if (MVT::isInteger(VT)) {
2613 unsigned ExtOp = isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
2614 Op = DAG.getNode(ExtOp, getTypeToTransformTo(VT), Op);
2616 assert(MVT::isFloatingPoint(VT) && "Not int or FP?");
2617 Op = DAG.getNode(ISD::FP_EXTEND, getTypeToTransformTo(VT), Op);
2620 Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
2623 if (VT != MVT::Vector) {
2624 // If this is a large integer, it needs to be broken down into small
2625 // integers. Figure out what the source elt type is and how many small
2627 MVT::ValueType NVT = getTypeToTransformTo(VT);
2628 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT);
2630 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, NVT, Op,
2631 DAG.getConstant(0, getPointerTy()));
2632 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, NVT, Op,
2633 DAG.getConstant(1, getPointerTy()));
2634 if (!isLittleEndian())
2638 Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
2640 Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
2642 // Value scalarized into many values. Unimp for now.
2643 assert(0 && "Cannot expand i64 -> i16 yet!");
2646 // Otherwise, this is a vector type. We only support legal vectors
2648 const PackedType *PTy = cast<PackedType>(Args[i].second);
2649 unsigned NumElems = PTy->getNumElements();
2650 const Type *EltTy = PTy->getElementType();
2652 // Figure out if there is a Packed type corresponding to this Vector
2653 // type. If so, convert to the packed type.
2654 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
2655 if (TVT != MVT::Other && isTypeLegal(TVT)) {
2656 // Insert a VBIT_CONVERT of the MVT::Vector type to the packed type.
2657 Op = DAG.getNode(ISD::VBIT_CONVERT, TVT, Op);
2659 Ops.push_back(DAG.getConstant(isSigned, MVT::i32));
2661 assert(0 && "Don't support illegal by-val vector call args yet!");
2669 // Figure out the result value types.
2670 SmallVector<MVT::ValueType, 4> RetTys;
2672 if (RetTy != Type::VoidTy) {
2673 MVT::ValueType VT = getValueType(RetTy);
2674 switch (getTypeAction(VT)) {
2675 default: assert(0 && "Unknown type action!");
2677 RetTys.push_back(VT);
2680 RetTys.push_back(getTypeToTransformTo(VT));
2683 if (VT != MVT::Vector) {
2684 // If this is a large integer, it needs to be reassembled from small
2685 // integers. Figure out what the source elt type is and how many small
2687 MVT::ValueType NVT = getTypeToTransformTo(VT);
2688 unsigned NumVals = MVT::getSizeInBits(VT)/MVT::getSizeInBits(NVT);
2689 for (unsigned i = 0; i != NumVals; ++i)
2690 RetTys.push_back(NVT);
2692 // Otherwise, this is a vector type. We only support legal vectors
2694 const PackedType *PTy = cast<PackedType>(RetTy);
2695 unsigned NumElems = PTy->getNumElements();
2696 const Type *EltTy = PTy->getElementType();
2698 // Figure out if there is a Packed type corresponding to this Vector
2699 // type. If so, convert to the packed type.
2700 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
2701 if (TVT != MVT::Other && isTypeLegal(TVT)) {
2702 RetTys.push_back(TVT);
2704 assert(0 && "Don't support illegal by-val vector call results yet!");
2711 RetTys.push_back(MVT::Other); // Always has a chain.
2713 // Finally, create the CALL node.
2714 SDOperand Res = DAG.getNode(ISD::CALL,
2715 DAG.getVTList(&RetTys[0], RetTys.size()),
2716 &Ops[0], Ops.size());
2718 // This returns a pair of operands. The first element is the
2719 // return value for the function (if RetTy is not VoidTy). The second
2720 // element is the outgoing token chain.
2722 if (RetTys.size() != 1) {
2723 MVT::ValueType VT = getValueType(RetTy);
2724 if (RetTys.size() == 2) {
2727 // If this value was promoted, truncate it down.
2728 if (ResVal.getValueType() != VT) {
2729 if (VT == MVT::Vector) {
2730 // Insert a VBITCONVERT to convert from the packed result type to the
2731 // MVT::Vector type.
2732 unsigned NumElems = cast<PackedType>(RetTy)->getNumElements();
2733 const Type *EltTy = cast<PackedType>(RetTy)->getElementType();
2735 // Figure out if there is a Packed type corresponding to this Vector
2736 // type. If so, convert to the packed type.
2737 MVT::ValueType TVT = MVT::getVectorType(getValueType(EltTy), NumElems);
2738 if (TVT != MVT::Other && isTypeLegal(TVT)) {
2739 // Insert a VBIT_CONVERT of the FORMAL_ARGUMENTS to a
2740 // "N x PTyElementVT" MVT::Vector type.
2741 ResVal = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, ResVal,
2742 DAG.getConstant(NumElems, MVT::i32),
2743 DAG.getValueType(getValueType(EltTy)));
2747 } else if (MVT::isInteger(VT)) {
2748 unsigned AssertOp = RetTy->isSigned() ?
2749 ISD::AssertSext : ISD::AssertZext;
2750 ResVal = DAG.getNode(AssertOp, ResVal.getValueType(), ResVal,
2751 DAG.getValueType(VT));
2752 ResVal = DAG.getNode(ISD::TRUNCATE, VT, ResVal);
2754 assert(MVT::isFloatingPoint(VT));
2755 ResVal = DAG.getNode(ISD::FP_ROUND, VT, ResVal);
2758 } else if (RetTys.size() == 3) {
2759 ResVal = DAG.getNode(ISD::BUILD_PAIR, VT,
2760 Res.getValue(0), Res.getValue(1));
2763 assert(0 && "Case not handled yet!");
2767 return std::make_pair(ResVal, Res.getValue(Res.Val->getNumValues()-1));
2772 // It is always conservatively correct for llvm.returnaddress and
2773 // llvm.frameaddress to return 0.
2775 // FIXME: Change this to insert a FRAMEADDR/RETURNADDR node, and have that be
2776 // expanded to 0 if the target wants.
2777 std::pair<SDOperand, SDOperand>
2778 TargetLowering::LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain,
2779 unsigned Depth, SelectionDAG &DAG) {
2780 return std::make_pair(DAG.getConstant(0, getPointerTy()), Chain);
2783 SDOperand TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
2784 assert(0 && "LowerOperation not implemented for this target!");
2789 SDOperand TargetLowering::CustomPromoteOperation(SDOperand Op,
2790 SelectionDAG &DAG) {
2791 assert(0 && "CustomPromoteOperation not implemented for this target!");
2796 void SelectionDAGLowering::visitFrameReturnAddress(CallInst &I, bool isFrame) {
2797 unsigned Depth = (unsigned)cast<ConstantInt>(I.getOperand(1))->getZExtValue();
2798 std::pair<SDOperand,SDOperand> Result =
2799 TLI.LowerFrameReturnAddress(isFrame, getRoot(), Depth, DAG);
2800 setValue(&I, Result.first);
2801 DAG.setRoot(Result.second);
2804 /// getMemsetValue - Vectorized representation of the memset value
2806 static SDOperand getMemsetValue(SDOperand Value, MVT::ValueType VT,
2807 SelectionDAG &DAG) {
2808 MVT::ValueType CurVT = VT;
2809 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
2810 uint64_t Val = C->getValue() & 255;
2812 while (CurVT != MVT::i8) {
2813 Val = (Val << Shift) | Val;
2815 CurVT = (MVT::ValueType)((unsigned)CurVT - 1);
2817 return DAG.getConstant(Val, VT);
2819 Value = DAG.getNode(ISD::ZERO_EXTEND, VT, Value);
2821 while (CurVT != MVT::i8) {
2823 DAG.getNode(ISD::OR, VT,
2824 DAG.getNode(ISD::SHL, VT, Value,
2825 DAG.getConstant(Shift, MVT::i8)), Value);
2827 CurVT = (MVT::ValueType)((unsigned)CurVT - 1);
2834 /// getMemsetStringVal - Similar to getMemsetValue. Except this is only
2835 /// used when a memcpy is turned into a memset when the source is a constant
2837 static SDOperand getMemsetStringVal(MVT::ValueType VT,
2838 SelectionDAG &DAG, TargetLowering &TLI,
2839 std::string &Str, unsigned Offset) {
2840 MVT::ValueType CurVT = VT;
2842 unsigned MSB = getSizeInBits(VT) / 8;
2843 if (TLI.isLittleEndian())
2844 Offset = Offset + MSB - 1;
2845 for (unsigned i = 0; i != MSB; ++i) {
2846 Val = (Val << 8) | Str[Offset];
2847 Offset += TLI.isLittleEndian() ? -1 : 1;
2849 return DAG.getConstant(Val, VT);
2852 /// getMemBasePlusOffset - Returns base and offset node for the
2853 static SDOperand getMemBasePlusOffset(SDOperand Base, unsigned Offset,
2854 SelectionDAG &DAG, TargetLowering &TLI) {
2855 MVT::ValueType VT = Base.getValueType();
2856 return DAG.getNode(ISD::ADD, VT, Base, DAG.getConstant(Offset, VT));
2859 /// MeetsMaxMemopRequirement - Determines if the number of memory ops required
2860 /// to replace the memset / memcpy is below the threshold. It also returns the
2861 /// types of the sequence of memory ops to perform memset / memcpy.
2862 static bool MeetsMaxMemopRequirement(std::vector<MVT::ValueType> &MemOps,
2863 unsigned Limit, uint64_t Size,
2864 unsigned Align, TargetLowering &TLI) {
2867 if (TLI.allowsUnalignedMemoryAccesses()) {
2870 switch (Align & 7) {
2886 MVT::ValueType LVT = MVT::i64;
2887 while (!TLI.isTypeLegal(LVT))
2888 LVT = (MVT::ValueType)((unsigned)LVT - 1);
2889 assert(MVT::isInteger(LVT));
2894 unsigned NumMemOps = 0;
2896 unsigned VTSize = getSizeInBits(VT) / 8;
2897 while (VTSize > Size) {
2898 VT = (MVT::ValueType)((unsigned)VT - 1);
2901 assert(MVT::isInteger(VT));
2903 if (++NumMemOps > Limit)
2905 MemOps.push_back(VT);
2912 void SelectionDAGLowering::visitMemIntrinsic(CallInst &I, unsigned Op) {
2913 SDOperand Op1 = getValue(I.getOperand(1));
2914 SDOperand Op2 = getValue(I.getOperand(2));
2915 SDOperand Op3 = getValue(I.getOperand(3));
2916 SDOperand Op4 = getValue(I.getOperand(4));
2917 unsigned Align = (unsigned)cast<ConstantSDNode>(Op4)->getValue();
2918 if (Align == 0) Align = 1;
2920 if (ConstantSDNode *Size = dyn_cast<ConstantSDNode>(Op3)) {
2921 std::vector<MVT::ValueType> MemOps;
2923 // Expand memset / memcpy to a series of load / store ops
2924 // if the size operand falls below a certain threshold.
2925 SmallVector<SDOperand, 8> OutChains;
2927 default: break; // Do nothing for now.
2929 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemset(),
2930 Size->getValue(), Align, TLI)) {
2931 unsigned NumMemOps = MemOps.size();
2932 unsigned Offset = 0;
2933 for (unsigned i = 0; i < NumMemOps; i++) {
2934 MVT::ValueType VT = MemOps[i];
2935 unsigned VTSize = getSizeInBits(VT) / 8;
2936 SDOperand Value = getMemsetValue(Op2, VT, DAG);
2937 SDOperand Store = DAG.getStore(getRoot(), Value,
2938 getMemBasePlusOffset(Op1, Offset, DAG, TLI),
2939 I.getOperand(1), Offset);
2940 OutChains.push_back(Store);
2947 if (MeetsMaxMemopRequirement(MemOps, TLI.getMaxStoresPerMemcpy(),
2948 Size->getValue(), Align, TLI)) {
2949 unsigned NumMemOps = MemOps.size();
2950 unsigned SrcOff = 0, DstOff = 0, SrcDelta = 0;
2951 GlobalAddressSDNode *G = NULL;
2953 bool CopyFromStr = false;
2955 if (Op2.getOpcode() == ISD::GlobalAddress)
2956 G = cast<GlobalAddressSDNode>(Op2);
2957 else if (Op2.getOpcode() == ISD::ADD &&
2958 Op2.getOperand(0).getOpcode() == ISD::GlobalAddress &&
2959 Op2.getOperand(1).getOpcode() == ISD::Constant) {
2960 G = cast<GlobalAddressSDNode>(Op2.getOperand(0));
2961 SrcDelta = cast<ConstantSDNode>(Op2.getOperand(1))->getValue();
2964 GlobalVariable *GV = dyn_cast<GlobalVariable>(G->getGlobal());
2966 Str = GV->getStringValue(false);
2974 for (unsigned i = 0; i < NumMemOps; i++) {
2975 MVT::ValueType VT = MemOps[i];
2976 unsigned VTSize = getSizeInBits(VT) / 8;
2977 SDOperand Value, Chain, Store;
2980 Value = getMemsetStringVal(VT, DAG, TLI, Str, SrcOff);
2983 DAG.getStore(Chain, Value,
2984 getMemBasePlusOffset(Op1, DstOff, DAG, TLI),
2985 I.getOperand(1), DstOff);
2987 Value = DAG.getLoad(VT, getRoot(),
2988 getMemBasePlusOffset(Op2, SrcOff, DAG, TLI),
2989 I.getOperand(2), SrcOff);
2990 Chain = Value.getValue(1);
2992 DAG.getStore(Chain, Value,
2993 getMemBasePlusOffset(Op1, DstOff, DAG, TLI),
2994 I.getOperand(1), DstOff);
2996 OutChains.push_back(Store);
3005 if (!OutChains.empty()) {
3006 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other,
3007 &OutChains[0], OutChains.size()));
3012 DAG.setRoot(DAG.getNode(Op, MVT::Other, getRoot(), Op1, Op2, Op3, Op4));
3015 //===----------------------------------------------------------------------===//
3016 // SelectionDAGISel code
3017 //===----------------------------------------------------------------------===//
3019 unsigned SelectionDAGISel::MakeReg(MVT::ValueType VT) {
3020 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
3023 void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
3024 // FIXME: we only modify the CFG to split critical edges. This
3025 // updates dom and loop info.
3026 AU.addRequired<AliasAnalysis>();
3030 /// OptimizeNoopCopyExpression - We have determined that the specified cast
3031 /// instruction is a noop copy (e.g. it's casting from one pointer type to
3032 /// another, int->uint, or int->sbyte on PPC.
3034 /// Return true if any changes are made.
3035 static bool OptimizeNoopCopyExpression(CastInst *CI) {
3036 BasicBlock *DefBB = CI->getParent();
3038 /// InsertedCasts - Only insert a cast in each block once.
3039 std::map<BasicBlock*, CastInst*> InsertedCasts;
3041 bool MadeChange = false;
3042 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
3044 Use &TheUse = UI.getUse();
3045 Instruction *User = cast<Instruction>(*UI);
3047 // Figure out which BB this cast is used in. For PHI's this is the
3048 // appropriate predecessor block.
3049 BasicBlock *UserBB = User->getParent();
3050 if (PHINode *PN = dyn_cast<PHINode>(User)) {
3051 unsigned OpVal = UI.getOperandNo()/2;
3052 UserBB = PN->getIncomingBlock(OpVal);
3055 // Preincrement use iterator so we don't invalidate it.
3058 // If this user is in the same block as the cast, don't change the cast.
3059 if (UserBB == DefBB) continue;
3061 // If we have already inserted a cast into this block, use it.
3062 CastInst *&InsertedCast = InsertedCasts[UserBB];
3064 if (!InsertedCast) {
3065 BasicBlock::iterator InsertPt = UserBB->begin();
3066 while (isa<PHINode>(InsertPt)) ++InsertPt;
3069 new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt);
3073 // Replace a use of the cast with a use of the new casat.
3074 TheUse = InsertedCast;
3077 // If we removed all uses, nuke the cast.
3078 if (CI->use_empty())
3079 CI->eraseFromParent();
3084 /// InsertGEPComputeCode - Insert code into BB to compute Ptr+PtrOffset,
3085 /// casting to the type of GEPI.
3086 static Instruction *InsertGEPComputeCode(Instruction *&V, BasicBlock *BB,
3087 Instruction *GEPI, Value *Ptr,
3089 if (V) return V; // Already computed.
3091 BasicBlock::iterator InsertPt;
3092 if (BB == GEPI->getParent()) {
3093 // If insert into the GEP's block, insert right after the GEP.
3097 // Otherwise, insert at the top of BB, after any PHI nodes
3098 InsertPt = BB->begin();
3099 while (isa<PHINode>(InsertPt)) ++InsertPt;
3102 // If Ptr is itself a cast, but in some other BB, emit a copy of the cast into
3103 // BB so that there is only one value live across basic blocks (the cast
3105 if (CastInst *CI = dyn_cast<CastInst>(Ptr))
3106 if (CI->getParent() != BB && isa<PointerType>(CI->getOperand(0)->getType()))
3107 Ptr = new CastInst(CI->getOperand(0), CI->getType(), "", InsertPt);
3109 // Add the offset, cast it to the right type.
3110 Ptr = BinaryOperator::createAdd(Ptr, PtrOffset, "", InsertPt);
3111 return V = new CastInst(Ptr, GEPI->getType(), "", InsertPt);
3114 /// ReplaceUsesOfGEPInst - Replace all uses of RepPtr with inserted code to
3115 /// compute its value. The RepPtr value can be computed with Ptr+PtrOffset. One
3116 /// trivial way of doing this would be to evaluate Ptr+PtrOffset in RepPtr's
3117 /// block, then ReplaceAllUsesWith'ing everything. However, we would prefer to
3118 /// sink PtrOffset into user blocks where doing so will likely allow us to fold
3119 /// the constant add into a load or store instruction. Additionally, if a user
3120 /// is a pointer-pointer cast, we look through it to find its users.
3121 static void ReplaceUsesOfGEPInst(Instruction *RepPtr, Value *Ptr,
3122 Constant *PtrOffset, BasicBlock *DefBB,
3123 GetElementPtrInst *GEPI,
3124 std::map<BasicBlock*,Instruction*> &InsertedExprs) {
3125 while (!RepPtr->use_empty()) {
3126 Instruction *User = cast<Instruction>(RepPtr->use_back());
3128 // If the user is a Pointer-Pointer cast, recurse.
3129 if (isa<CastInst>(User) && isa<PointerType>(User->getType())) {
3130 ReplaceUsesOfGEPInst(User, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs);
3132 // Drop the use of RepPtr. The cast is dead. Don't delete it now, else we
3133 // could invalidate an iterator.
3134 User->setOperand(0, UndefValue::get(RepPtr->getType()));
3138 // If this is a load of the pointer, or a store through the pointer, emit
3139 // the increment into the load/store block.
3140 Instruction *NewVal;
3141 if (isa<LoadInst>(User) ||
3142 (isa<StoreInst>(User) && User->getOperand(0) != RepPtr)) {
3143 NewVal = InsertGEPComputeCode(InsertedExprs[User->getParent()],
3144 User->getParent(), GEPI,
3147 // If this use is not foldable into the addressing mode, use a version
3148 // emitted in the GEP block.
3149 NewVal = InsertGEPComputeCode(InsertedExprs[DefBB], DefBB, GEPI,
3153 if (GEPI->getType() != RepPtr->getType()) {
3154 BasicBlock::iterator IP = NewVal;
3156 NewVal = new CastInst(NewVal, RepPtr->getType(), "", IP);
3158 User->replaceUsesOfWith(RepPtr, NewVal);
3163 /// OptimizeGEPExpression - Since we are doing basic-block-at-a-time instruction
3164 /// selection, we want to be a bit careful about some things. In particular, if
3165 /// we have a GEP instruction that is used in a different block than it is
3166 /// defined, the addressing expression of the GEP cannot be folded into loads or
3167 /// stores that use it. In this case, decompose the GEP and move constant
3168 /// indices into blocks that use it.
3169 static bool OptimizeGEPExpression(GetElementPtrInst *GEPI,
3170 const TargetData *TD) {
3171 // If this GEP is only used inside the block it is defined in, there is no
3172 // need to rewrite it.
3173 bool isUsedOutsideDefBB = false;
3174 BasicBlock *DefBB = GEPI->getParent();
3175 for (Value::use_iterator UI = GEPI->use_begin(), E = GEPI->use_end();
3177 if (cast<Instruction>(*UI)->getParent() != DefBB) {
3178 isUsedOutsideDefBB = true;
3182 if (!isUsedOutsideDefBB) return false;
3184 // If this GEP has no non-zero constant indices, there is nothing we can do,
3186 bool hasConstantIndex = false;
3187 bool hasVariableIndex = false;
3188 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
3189 E = GEPI->op_end(); OI != E; ++OI) {
3190 if (ConstantInt *CI = dyn_cast<ConstantInt>(*OI)) {
3191 if (CI->getZExtValue()) {
3192 hasConstantIndex = true;
3196 hasVariableIndex = true;
3200 // If this is a "GEP X, 0, 0, 0", turn this into a cast.
3201 if (!hasConstantIndex && !hasVariableIndex) {
3202 Value *NC = new CastInst(GEPI->getOperand(0), GEPI->getType(),
3203 GEPI->getName(), GEPI);
3204 GEPI->replaceAllUsesWith(NC);
3205 GEPI->eraseFromParent();
3209 // If this is a GEP &Alloca, 0, 0, forward subst the frame index into uses.
3210 if (!hasConstantIndex && !isa<AllocaInst>(GEPI->getOperand(0)))
3213 // Otherwise, decompose the GEP instruction into multiplies and adds. Sum the
3214 // constant offset (which we now know is non-zero) and deal with it later.
3215 uint64_t ConstantOffset = 0;
3216 const Type *UIntPtrTy = TD->getIntPtrType();
3217 Value *Ptr = new CastInst(GEPI->getOperand(0), UIntPtrTy, "", GEPI);
3218 const Type *Ty = GEPI->getOperand(0)->getType();
3220 for (GetElementPtrInst::op_iterator OI = GEPI->op_begin()+1,
3221 E = GEPI->op_end(); OI != E; ++OI) {
3223 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
3224 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
3226 ConstantOffset += TD->getStructLayout(StTy)->MemberOffsets[Field];
3227 Ty = StTy->getElementType(Field);
3229 Ty = cast<SequentialType>(Ty)->getElementType();
3231 // Handle constant subscripts.
3232 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
3233 if (CI->getZExtValue() == 0) continue;
3234 if (CI->getType()->isSigned())
3235 ConstantOffset += (int64_t)TD->getTypeSize(Ty)*CI->getSExtValue();
3237 ConstantOffset += TD->getTypeSize(Ty)*CI->getZExtValue();
3241 // Ptr = Ptr + Idx * ElementSize;
3243 // Cast Idx to UIntPtrTy if needed.
3244 Idx = new CastInst(Idx, UIntPtrTy, "", GEPI);
3246 uint64_t ElementSize = TD->getTypeSize(Ty);
3247 // Mask off bits that should not be set.
3248 ElementSize &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
3249 Constant *SizeCst = ConstantInt::get(UIntPtrTy, ElementSize);
3251 // Multiply by the element size and add to the base.
3252 Idx = BinaryOperator::createMul(Idx, SizeCst, "", GEPI);
3253 Ptr = BinaryOperator::createAdd(Ptr, Idx, "", GEPI);
3257 // Make sure that the offset fits in uintptr_t.
3258 ConstantOffset &= ~0ULL >> (64-UIntPtrTy->getPrimitiveSizeInBits());
3259 Constant *PtrOffset = ConstantInt::get(UIntPtrTy, ConstantOffset);
3261 // Okay, we have now emitted all of the variable index parts to the BB that
3262 // the GEP is defined in. Loop over all of the using instructions, inserting
3263 // an "add Ptr, ConstantOffset" into each block that uses it and update the
3264 // instruction to use the newly computed value, making GEPI dead. When the
3265 // user is a load or store instruction address, we emit the add into the user
3266 // block, otherwise we use a canonical version right next to the gep (these
3267 // won't be foldable as addresses, so we might as well share the computation).
3269 std::map<BasicBlock*,Instruction*> InsertedExprs;
3270 ReplaceUsesOfGEPInst(GEPI, Ptr, PtrOffset, DefBB, GEPI, InsertedExprs);
3272 // Finally, the GEP is dead, remove it.
3273 GEPI->eraseFromParent();
3278 /// SplitCritEdgesForPHIConstants - If this block has any PHI nodes with
3279 /// constant operands, and if any of the edges feeding the PHI node are
3280 /// critical, split them so that the assignments of a constant to a register
3281 /// will not be executed on a path that isn't relevant.
3282 void SelectionDAGISel::SplitCritEdgesForPHIConstants(BasicBlock *BB) {
3283 // The most common case is that this is a PHI node with two incoming
3284 // successors handle this case efficiently, because it is simple.
3285 PHINode *PN = cast<PHINode>(BB->begin());
3286 if (PN->getNumIncomingValues() == 2) {
3287 // If neither edge is critical, we never need to split.
3288 if (PN->getIncomingBlock(0)->getTerminator()->getNumSuccessors() == 1 &&
3289 PN->getIncomingBlock(1)->getTerminator()->getNumSuccessors() == 1)
3292 BasicBlock::iterator BBI = BB->begin();
3293 while ((PN = dyn_cast<PHINode>(BBI++))) {
3294 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
3295 if (isa<Constant>(PN->getIncomingValue(i)))
3296 SplitCriticalEdge(PN->getIncomingBlock(i), BB);
3301 // Otherwise, things are a bit trickier.
3305 BasicBlock::iterator BBI = BB->begin();
3306 while ((PN = dyn_cast<PHINode>(BBI++))) {
3307 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
3308 if (isa<Constant>(PN->getIncomingValue(i)))
3309 SplitCriticalEdge(PN->getIncomingBlock(i), BB);
3314 bool SelectionDAGISel::runOnFunction(Function &Fn) {
3315 MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine());
3316 RegMap = MF.getSSARegMap();
3317 DEBUG(std::cerr << "\n\n\n=== " << Fn.getName() << "\n");
3319 // First, split all critical edges for PHI nodes with incoming values that are
3320 // constants, this way the load of the constant into a vreg will not be placed
3321 // into MBBs that are used some other way.
3323 // In this pass we also look for GEP and cast instructions that are used
3324 // across basic blocks and rewrite them to improve basic-block-at-a-time
3328 bool MadeChange = true;
3329 while (MadeChange) {
3331 for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
3332 // If this block has any PHI nodes with constant operands, and if any of the
3333 // edges feeding the PHI node are critical, split them.
3334 if (isa<PHINode>(BB->begin()))
3335 SplitCritEdgesForPHIConstants(BB);
3337 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
3338 Instruction *I = BBI++;
3339 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
3340 MadeChange |= OptimizeGEPExpression(GEPI, TLI.getTargetData());
3341 } else if (CastInst *CI = dyn_cast<CastInst>(I)) {
3342 // If the source of the cast is a constant, then this should have
3343 // already been constant folded. The only reason NOT to constant fold
3344 // it is if something (e.g. LSR) was careful to place the constant
3345 // evaluation in a block other than then one that uses it (e.g. to hoist
3346 // the address of globals out of a loop). If this is the case, we don't
3347 // want to forward-subst the cast.
3348 if (isa<Constant>(CI->getOperand(0)))
3351 // If this is a noop copy, sink it into user blocks to reduce the number
3352 // of virtual registers that must be created and coallesced.
3353 MVT::ValueType SrcVT = TLI.getValueType(CI->getOperand(0)->getType());
3354 MVT::ValueType DstVT = TLI.getValueType(CI->getType());
3356 // This is an fp<->int conversion?
3357 if (MVT::isInteger(SrcVT) != MVT::isInteger(DstVT))
3360 // If this is an extension, it will be a zero or sign extension, which
3362 if (SrcVT < DstVT) continue;
3364 // If these values will be promoted, find out what they will be promoted
3365 // to. This helps us consider truncates on PPC as noop copies when they
3367 if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote)
3368 SrcVT = TLI.getTypeToTransformTo(SrcVT);
3369 if (TLI.getTypeAction(DstVT) == TargetLowering::Promote)
3370 DstVT = TLI.getTypeToTransformTo(DstVT);
3372 // If, after promotion, these are the same types, this is a noop copy.
3374 MadeChange |= OptimizeNoopCopyExpression(CI);
3380 FunctionLoweringInfo FuncInfo(TLI, Fn, MF);
3382 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
3383 SelectBasicBlock(I, MF, FuncInfo);
3389 SDOperand SelectionDAGISel::
3390 CopyValueToVirtualRegister(SelectionDAGLowering &SDL, Value *V, unsigned Reg) {
3391 SDOperand Op = SDL.getValue(V);
3392 assert((Op.getOpcode() != ISD::CopyFromReg ||
3393 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
3394 "Copy from a reg to the same reg!");
3396 // If this type is not legal, we must make sure to not create an invalid
3398 MVT::ValueType SrcVT = Op.getValueType();
3399 MVT::ValueType DestVT = TLI.getTypeToTransformTo(SrcVT);
3400 SelectionDAG &DAG = SDL.DAG;
3401 if (SrcVT == DestVT) {
3402 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op);
3403 } else if (SrcVT == MVT::Vector) {
3404 // Handle copies from generic vectors to registers.
3405 MVT::ValueType PTyElementVT, PTyLegalElementVT;
3406 unsigned NE = TLI.getPackedTypeBreakdown(cast<PackedType>(V->getType()),
3407 PTyElementVT, PTyLegalElementVT);
3409 // Insert a VBIT_CONVERT of the input vector to a "N x PTyElementVT"
3410 // MVT::Vector type.
3411 Op = DAG.getNode(ISD::VBIT_CONVERT, MVT::Vector, Op,
3412 DAG.getConstant(NE, MVT::i32),
3413 DAG.getValueType(PTyElementVT));
3415 // Loop over all of the elements of the resultant vector,
3416 // VEXTRACT_VECTOR_ELT'ing them, converting them to PTyLegalElementVT, then
3417 // copying them into output registers.
3418 SmallVector<SDOperand, 8> OutChains;
3419 SDOperand Root = SDL.getRoot();
3420 for (unsigned i = 0; i != NE; ++i) {
3421 SDOperand Elt = DAG.getNode(ISD::VEXTRACT_VECTOR_ELT, PTyElementVT,
3422 Op, DAG.getConstant(i, TLI.getPointerTy()));
3423 if (PTyElementVT == PTyLegalElementVT) {
3424 // Elements are legal.
3425 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt));
3426 } else if (PTyLegalElementVT > PTyElementVT) {
3427 // Elements are promoted.
3428 if (MVT::isFloatingPoint(PTyLegalElementVT))
3429 Elt = DAG.getNode(ISD::FP_EXTEND, PTyLegalElementVT, Elt);
3431 Elt = DAG.getNode(ISD::ANY_EXTEND, PTyLegalElementVT, Elt);
3432 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Elt));
3434 // Elements are expanded.
3435 // The src value is expanded into multiple registers.
3436 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT,
3437 Elt, DAG.getConstant(0, TLI.getPointerTy()));
3438 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, PTyLegalElementVT,
3439 Elt, DAG.getConstant(1, TLI.getPointerTy()));
3440 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Lo));
3441 OutChains.push_back(DAG.getCopyToReg(Root, Reg++, Hi));
3444 return DAG.getNode(ISD::TokenFactor, MVT::Other,
3445 &OutChains[0], OutChains.size());
3446 } else if (SrcVT < DestVT) {
3447 // The src value is promoted to the register.
3448 if (MVT::isFloatingPoint(SrcVT))
3449 Op = DAG.getNode(ISD::FP_EXTEND, DestVT, Op);
3451 Op = DAG.getNode(ISD::ANY_EXTEND, DestVT, Op);
3452 return DAG.getCopyToReg(SDL.getRoot(), Reg, Op);
3454 // The src value is expanded into multiple registers.
3455 SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
3456 Op, DAG.getConstant(0, TLI.getPointerTy()));
3457 SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DestVT,
3458 Op, DAG.getConstant(1, TLI.getPointerTy()));
3459 Op = DAG.getCopyToReg(SDL.getRoot(), Reg, Lo);
3460 return DAG.getCopyToReg(Op, Reg+1, Hi);
3464 void SelectionDAGISel::
3465 LowerArguments(BasicBlock *BB, SelectionDAGLowering &SDL,
3466 std::vector<SDOperand> &UnorderedChains) {
3467 // If this is the entry block, emit arguments.
3468 Function &F = *BB->getParent();
3469 FunctionLoweringInfo &FuncInfo = SDL.FuncInfo;
3470 SDOperand OldRoot = SDL.DAG.getRoot();
3471 std::vector<SDOperand> Args = TLI.LowerArguments(F, SDL.DAG);
3474 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
3476 if (!AI->use_empty()) {
3477 SDL.setValue(AI, Args[a]);
3479 // If this argument is live outside of the entry block, insert a copy from
3480 // whereever we got it to the vreg that other BB's will reference it as.
3481 if (FuncInfo.ValueMap.count(AI)) {
3483 CopyValueToVirtualRegister(SDL, AI, FuncInfo.ValueMap[AI]);
3484 UnorderedChains.push_back(Copy);
3488 // Finally, if the target has anything special to do, allow it to do so.
3489 // FIXME: this should insert code into the DAG!
3490 EmitFunctionEntryCode(F, SDL.DAG.getMachineFunction());
3493 void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB,
3494 std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate,
3495 FunctionLoweringInfo &FuncInfo) {
3496 SelectionDAGLowering SDL(DAG, TLI, FuncInfo);
3498 std::vector<SDOperand> UnorderedChains;
3500 // Lower any arguments needed in this block if this is the entry block.
3501 if (LLVMBB == &LLVMBB->getParent()->front())
3502 LowerArguments(LLVMBB, SDL, UnorderedChains);
3504 BB = FuncInfo.MBBMap[LLVMBB];
3505 SDL.setCurrentBasicBlock(BB);
3507 // Lower all of the non-terminator instructions.
3508 for (BasicBlock::iterator I = LLVMBB->begin(), E = --LLVMBB->end();
3512 // Ensure that all instructions which are used outside of their defining
3513 // blocks are available as virtual registers.
3514 for (BasicBlock::iterator I = LLVMBB->begin(), E = LLVMBB->end(); I != E;++I)
3515 if (!I->use_empty() && !isa<PHINode>(I)) {
3516 std::map<const Value*, unsigned>::iterator VMI =FuncInfo.ValueMap.find(I);
3517 if (VMI != FuncInfo.ValueMap.end())
3518 UnorderedChains.push_back(
3519 CopyValueToVirtualRegister(SDL, I, VMI->second));
3522 // Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
3523 // ensure constants are generated when needed. Remember the virtual registers
3524 // that need to be added to the Machine PHI nodes as input. We cannot just
3525 // directly add them, because expansion might result in multiple MBB's for one
3526 // BB. As such, the start of the BB might correspond to a different MBB than
3530 // Emit constants only once even if used by multiple PHI nodes.
3531 std::map<Constant*, unsigned> ConstantsOut;
3533 // Check successor nodes PHI nodes that expect a constant to be available from
3535 TerminatorInst *TI = LLVMBB->getTerminator();
3536 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
3537 BasicBlock *SuccBB = TI->getSuccessor(succ);
3538 if (!isa<PHINode>(SuccBB->begin())) continue;
3540 MachineBasicBlock::iterator MBBI = FuncInfo.MBBMap[SuccBB]->begin();
3543 // At this point we know that there is a 1-1 correspondence between LLVM PHI
3544 // nodes and Machine PHI nodes, but the incoming operands have not been
3546 for (BasicBlock::iterator I = SuccBB->begin();
3547 (PN = dyn_cast<PHINode>(I)); ++I)
3548 if (!PN->use_empty()) {
3550 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
3551 if (Constant *C = dyn_cast<Constant>(PHIOp)) {
3552 unsigned &RegOut = ConstantsOut[C];
3554 RegOut = FuncInfo.CreateRegForValue(C);
3555 UnorderedChains.push_back(
3556 CopyValueToVirtualRegister(SDL, C, RegOut));
3560 Reg = FuncInfo.ValueMap[PHIOp];
3562 assert(isa<AllocaInst>(PHIOp) &&
3563 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
3564 "Didn't codegen value into a register!??");
3565 Reg = FuncInfo.CreateRegForValue(PHIOp);
3566 UnorderedChains.push_back(
3567 CopyValueToVirtualRegister(SDL, PHIOp, Reg));
3571 // Remember that this register needs to added to the machine PHI node as
3572 // the input for this MBB.
3573 MVT::ValueType VT = TLI.getValueType(PN->getType());
3574 unsigned NumElements;
3575 if (VT != MVT::Vector)
3576 NumElements = TLI.getNumElements(VT);
3578 MVT::ValueType VT1,VT2;
3580 TLI.getPackedTypeBreakdown(cast<PackedType>(PN->getType()),
3583 for (unsigned i = 0, e = NumElements; i != e; ++i)
3584 PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
3587 ConstantsOut.clear();
3589 // Turn all of the unordered chains into one factored node.
3590 if (!UnorderedChains.empty()) {
3591 SDOperand Root = SDL.getRoot();
3592 if (Root.getOpcode() != ISD::EntryToken) {
3593 unsigned i = 0, e = UnorderedChains.size();
3594 for (; i != e; ++i) {
3595 assert(UnorderedChains[i].Val->getNumOperands() > 1);
3596 if (UnorderedChains[i].Val->getOperand(0) == Root)
3597 break; // Don't add the root if we already indirectly depend on it.
3601 UnorderedChains.push_back(Root);
3603 DAG.setRoot(DAG.getNode(ISD::TokenFactor, MVT::Other,
3604 &UnorderedChains[0], UnorderedChains.size()));
3607 // Lower the terminator after the copies are emitted.
3608 SDL.visit(*LLVMBB->getTerminator());
3610 // Copy over any CaseBlock records that may now exist due to SwitchInst
3611 // lowering, as well as any jump table information.
3612 SwitchCases.clear();
3613 SwitchCases = SDL.SwitchCases;
3616 // Make sure the root of the DAG is up-to-date.
3617 DAG.setRoot(SDL.getRoot());
3620 void SelectionDAGISel::CodeGenAndEmitDAG(SelectionDAG &DAG) {
3621 // Get alias analysis for load/store combining.
3622 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
3624 // Run the DAG combiner in pre-legalize mode.
3625 DAG.Combine(false, AA);
3627 DEBUG(std::cerr << "Lowered selection DAG:\n");
3630 // Second step, hack on the DAG until it only uses operations and types that
3631 // the target supports.
3634 DEBUG(std::cerr << "Legalized selection DAG:\n");
3637 // Run the DAG combiner in post-legalize mode.
3638 DAG.Combine(true, AA);
3640 if (ViewISelDAGs) DAG.viewGraph();
3642 // Third, instruction select all of the operations to machine code, adding the
3643 // code to the MachineBasicBlock.
3644 InstructionSelectBasicBlock(DAG);
3646 DEBUG(std::cerr << "Selected machine code:\n");
3650 void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF,
3651 FunctionLoweringInfo &FuncInfo) {
3652 std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
3654 SelectionDAG DAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
3657 // First step, lower LLVM code to some DAG. This DAG may use operations and
3658 // types that are not supported by the target.
3659 BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo);
3661 // Second step, emit the lowered DAG as machine code.
3662 CodeGenAndEmitDAG(DAG);
3665 // Next, now that we know what the last MBB the LLVM BB expanded is, update
3666 // PHI nodes in successors.
3667 if (SwitchCases.empty() && JT.Reg == 0) {
3668 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) {
3669 MachineInstr *PHI = PHINodesToUpdate[i].first;
3670 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
3671 "This is not a machine PHI node that we are updating!");
3672 PHI->addRegOperand(PHINodesToUpdate[i].second, false);
3673 PHI->addMachineBasicBlockOperand(BB);
3678 // If the JumpTable record is filled in, then we need to emit a jump table.
3679 // Updating the PHI nodes is tricky in this case, since we need to determine
3680 // whether the PHI is a successor of the range check MBB or the jump table MBB
3682 assert(SwitchCases.empty() && "Cannot have jump table and lowered switch");
3683 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
3685 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo);
3686 MachineBasicBlock *RangeBB = BB;
3687 // Set the current basic block to the mbb we wish to insert the code into
3689 SDL.setCurrentBasicBlock(BB);
3691 SDL.visitJumpTable(JT);
3692 SDAG.setRoot(SDL.getRoot());
3693 CodeGenAndEmitDAG(SDAG);
3695 for (unsigned pi = 0, pe = PHINodesToUpdate.size(); pi != pe; ++pi) {
3696 MachineInstr *PHI = PHINodesToUpdate[pi].first;
3697 MachineBasicBlock *PHIBB = PHI->getParent();
3698 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
3699 "This is not a machine PHI node that we are updating!");
3700 if (PHIBB == JT.Default) {
3701 PHI->addRegOperand(PHINodesToUpdate[pi].second, false);
3702 PHI->addMachineBasicBlockOperand(RangeBB);
3704 if (BB->succ_end() != std::find(BB->succ_begin(),BB->succ_end(), PHIBB)) {
3705 PHI->addRegOperand(PHINodesToUpdate[pi].second, false);
3706 PHI->addMachineBasicBlockOperand(BB);
3712 // If the switch block involved a branch to one of the actual successors, we
3713 // need to update PHI nodes in that block.
3714 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) {
3715 MachineInstr *PHI = PHINodesToUpdate[i].first;
3716 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
3717 "This is not a machine PHI node that we are updating!");
3718 if (BB->isSuccessor(PHI->getParent())) {
3719 PHI->addRegOperand(PHINodesToUpdate[i].second, false);
3720 PHI->addMachineBasicBlockOperand(BB);
3724 // If we generated any switch lowering information, build and codegen any
3725 // additional DAGs necessary.
3726 for (unsigned i = 0, e = SwitchCases.size(); i != e; ++i) {
3727 SelectionDAG SDAG(TLI, MF, getAnalysisToUpdate<MachineDebugInfo>());
3729 SelectionDAGLowering SDL(SDAG, TLI, FuncInfo);
3731 // Set the current basic block to the mbb we wish to insert the code into
3732 BB = SwitchCases[i].ThisBB;
3733 SDL.setCurrentBasicBlock(BB);
3736 SDL.visitSwitchCase(SwitchCases[i]);
3737 SDAG.setRoot(SDL.getRoot());
3738 CodeGenAndEmitDAG(SDAG);
3740 // Handle any PHI nodes in successors of this chunk, as if we were coming
3741 // from the original BB before switch expansion. Note that PHI nodes can
3742 // occur multiple times in PHINodesToUpdate. We have to be very careful to
3743 // handle them the right number of times.
3744 while ((BB = SwitchCases[i].TrueBB)) { // Handle LHS and RHS.
3745 for (MachineBasicBlock::iterator Phi = BB->begin();
3746 Phi != BB->end() && Phi->getOpcode() == TargetInstrInfo::PHI; ++Phi){
3747 // This value for this PHI node is recorded in PHINodesToUpdate, get it.
3748 for (unsigned pn = 0; ; ++pn) {
3749 assert(pn != PHINodesToUpdate.size() && "Didn't find PHI entry!");
3750 if (PHINodesToUpdate[pn].first == Phi) {
3751 Phi->addRegOperand(PHINodesToUpdate[pn].second, false);
3752 Phi->addMachineBasicBlockOperand(SwitchCases[i].ThisBB);
3758 // Don't process RHS if same block as LHS.
3759 if (BB == SwitchCases[i].FalseBB)
3760 SwitchCases[i].FalseBB = 0;
3762 // If we haven't handled the RHS, do so now. Otherwise, we're done.
3763 SwitchCases[i].TrueBB = SwitchCases[i].FalseBB;
3764 SwitchCases[i].FalseBB = 0;
3766 assert(SwitchCases[i].TrueBB == 0 && SwitchCases[i].FalseBB == 0);
3771 //===----------------------------------------------------------------------===//
3772 /// ScheduleAndEmitDAG - Pick a safe ordering and emit instructions for each
3773 /// target node in the graph.
3774 void SelectionDAGISel::ScheduleAndEmitDAG(SelectionDAG &DAG) {
3775 if (ViewSchedDAGs) DAG.viewGraph();
3777 RegisterScheduler::FunctionPassCtor Ctor = RegisterScheduler::getDefault();
3781 RegisterScheduler::setDefault(Ctor);
3784 ScheduleDAG *SL = Ctor(this, &DAG, BB);
3790 HazardRecognizer *SelectionDAGISel::CreateTargetHazardRecognizer() {
3791 return new HazardRecognizer();
3794 //===----------------------------------------------------------------------===//
3795 // Helper functions used by the generated instruction selector.
3796 //===----------------------------------------------------------------------===//
3797 // Calls to these methods are generated by tblgen.
3799 /// CheckAndMask - The isel is trying to match something like (and X, 255). If
3800 /// the dag combiner simplified the 255, we still want to match. RHS is the
3801 /// actual value in the DAG on the RHS of an AND, and DesiredMaskS is the value
3802 /// specified in the .td file (e.g. 255).
3803 bool SelectionDAGISel::CheckAndMask(SDOperand LHS, ConstantSDNode *RHS,
3804 int64_t DesiredMaskS) {
3805 uint64_t ActualMask = RHS->getValue();
3806 uint64_t DesiredMask =DesiredMaskS & MVT::getIntVTBitMask(LHS.getValueType());
3808 // If the actual mask exactly matches, success!
3809 if (ActualMask == DesiredMask)
3812 // If the actual AND mask is allowing unallowed bits, this doesn't match.
3813 if (ActualMask & ~DesiredMask)
3816 // Otherwise, the DAG Combiner may have proven that the value coming in is
3817 // either already zero or is not demanded. Check for known zero input bits.
3818 uint64_t NeededMask = DesiredMask & ~ActualMask;
3819 if (getTargetLowering().MaskedValueIsZero(LHS, NeededMask))
3822 // TODO: check to see if missing bits are just not demanded.
3824 // Otherwise, this pattern doesn't match.
3828 /// CheckOrMask - The isel is trying to match something like (or X, 255). If
3829 /// the dag combiner simplified the 255, we still want to match. RHS is the
3830 /// actual value in the DAG on the RHS of an OR, and DesiredMaskS is the value
3831 /// specified in the .td file (e.g. 255).
3832 bool SelectionDAGISel::CheckOrMask(SDOperand LHS, ConstantSDNode *RHS,
3833 int64_t DesiredMaskS) {
3834 uint64_t ActualMask = RHS->getValue();
3835 uint64_t DesiredMask =DesiredMaskS & MVT::getIntVTBitMask(LHS.getValueType());
3837 // If the actual mask exactly matches, success!
3838 if (ActualMask == DesiredMask)
3841 // If the actual AND mask is allowing unallowed bits, this doesn't match.
3842 if (ActualMask & ~DesiredMask)
3845 // Otherwise, the DAG Combiner may have proven that the value coming in is
3846 // either already zero or is not demanded. Check for known zero input bits.
3847 uint64_t NeededMask = DesiredMask & ~ActualMask;
3849 uint64_t KnownZero, KnownOne;
3850 getTargetLowering().ComputeMaskedBits(LHS, NeededMask, KnownZero, KnownOne);
3852 // If all the missing bits in the or are already known to be set, match!
3853 if ((NeededMask & KnownOne) == NeededMask)
3856 // TODO: check to see if missing bits are just not demanded.
3858 // Otherwise, this pattern doesn't match.
3863 /// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
3864 /// by tblgen. Others should not call it.
3865 void SelectionDAGISel::
3866 SelectInlineAsmMemoryOperands(std::vector<SDOperand> &Ops, SelectionDAG &DAG) {
3867 std::vector<SDOperand> InOps;
3868 std::swap(InOps, Ops);
3870 Ops.push_back(InOps[0]); // input chain.
3871 Ops.push_back(InOps[1]); // input asm string.
3873 unsigned i = 2, e = InOps.size();
3874 if (InOps[e-1].getValueType() == MVT::Flag)
3875 --e; // Don't process a flag operand if it is here.
3878 unsigned Flags = cast<ConstantSDNode>(InOps[i])->getValue();
3879 if ((Flags & 7) != 4 /*MEM*/) {
3880 // Just skip over this operand, copying the operands verbatim.
3881 Ops.insert(Ops.end(), InOps.begin()+i, InOps.begin()+i+(Flags >> 3) + 1);
3882 i += (Flags >> 3) + 1;
3884 assert((Flags >> 3) == 1 && "Memory operand with multiple values?");
3885 // Otherwise, this is a memory operand. Ask the target to select it.
3886 std::vector<SDOperand> SelOps;
3887 if (SelectInlineAsmMemoryOperand(InOps[i+1], 'm', SelOps, DAG)) {
3888 std::cerr << "Could not match memory address. Inline asm failure!\n";
3892 // Add this to the output node.
3893 Ops.push_back(DAG.getConstant(4/*MEM*/ | (SelOps.size() << 3), MVT::i32));
3894 Ops.insert(Ops.end(), SelOps.begin(), SelOps.end());
3899 // Add the flag input back if present.
3900 if (e != InOps.size())
3901 Ops.push_back(InOps.back());