From eabaed26c3921453d824b71a93557c32132f4fdd Mon Sep 17 00:00:00 2001 From: Dan Gohman Date: Wed, 7 Jul 2010 16:47:08 +0000 Subject: [PATCH] Give FunctionLoweringInfo an MBB member, avoiding the need to pass it around everywhere, and also give it an InsertPt member, to enable isel to operate at an arbitrary position within a block, rather than just appending to a block. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@107791 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/CodeGen/FastISel.h | 11 +- include/llvm/CodeGen/FunctionLoweringInfo.h | 7 + include/llvm/CodeGen/SelectionDAGISel.h | 13 +- lib/CodeGen/SelectionDAG/FastISel.cpp | 129 ++++++++------- lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp | 109 +++++++------ lib/Target/X86/X86FastISel.cpp | 152 +++++++++++------- utils/TableGen/FastISelEmitter.cpp | 4 +- 7 files changed, 240 insertions(+), 185 deletions(-) diff --git a/include/llvm/CodeGen/FastISel.h b/include/llvm/CodeGen/FastISel.h index c5c457db0c1..2067ca81325 100644 --- a/include/llvm/CodeGen/FastISel.h +++ b/include/llvm/CodeGen/FastISel.h @@ -44,7 +44,6 @@ class TargetRegisterInfo; /// lowering, but runs quickly. class FastISel { protected: - MachineBasicBlock *MBB; DenseMap LocalValueMap; FunctionLoweringInfo &FuncInfo; MachineRegisterInfo &MRI; @@ -62,18 +61,10 @@ public: /// startNewBlock - Set the current block to which generated machine /// instructions will be appended, and clear the local CSE map. /// - void startNewBlock(MachineBasicBlock *mbb) { - setCurrentBlock(mbb); + void startNewBlock() { LocalValueMap.clear(); } - /// setCurrentBlock - Set the current block to which generated machine - /// instructions will be appended. - /// - void setCurrentBlock(MachineBasicBlock *mbb) { - MBB = mbb; - } - /// getCurDebugLoc() - Return current debug location information. DebugLoc getCurDebugLoc() const { return DL; } diff --git a/include/llvm/CodeGen/FunctionLoweringInfo.h b/include/llvm/CodeGen/FunctionLoweringInfo.h index 011d42617d6..da8a3ff7c6a 100644 --- a/include/llvm/CodeGen/FunctionLoweringInfo.h +++ b/include/llvm/CodeGen/FunctionLoweringInfo.h @@ -25,6 +25,7 @@ #endif #include "llvm/CodeGen/ValueTypes.h" #include "llvm/CodeGen/ISDOpcodes.h" +#include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/Support/CallSite.h" #include @@ -80,6 +81,12 @@ public: /// function arguments that are inserted after scheduling is completed. SmallVector ArgDbgValues; + /// MBB - The current block. + MachineBasicBlock *MBB; + + /// MBB - The current insert position inside the current block. + MachineBasicBlock::iterator InsertPt; + #ifndef NDEBUG SmallSet CatchInfoLost; SmallSet CatchInfoFound; diff --git a/include/llvm/CodeGen/SelectionDAGISel.h b/include/llvm/CodeGen/SelectionDAGISel.h index 16159947410..01d05ddac11 100644 --- a/include/llvm/CodeGen/SelectionDAGISel.h +++ b/include/llvm/CodeGen/SelectionDAGISel.h @@ -280,15 +280,14 @@ private: SDNode *MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTs, const SDValue *Ops, unsigned NumOps, unsigned EmitNodeInfo); - void PrepareEHLandingPad(MachineBasicBlock *BB); + void PrepareEHLandingPad(); void SelectAllBasicBlocks(const Function &Fn); - void FinishBasicBlock(MachineBasicBlock *BB); + void FinishBasicBlock(); - MachineBasicBlock *SelectBasicBlock(MachineBasicBlock *BB, - BasicBlock::const_iterator Begin, - BasicBlock::const_iterator End, - bool &HadTailCall); - MachineBasicBlock *CodeGenAndEmitDAG(MachineBasicBlock *BB); + void SelectBasicBlock(BasicBlock::const_iterator Begin, + BasicBlock::const_iterator End, + bool &HadTailCall); + void CodeGenAndEmitDAG(); void LowerArguments(const BasicBlock *BB); void ComputeLiveOutVRegInfo(); diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp index a917cdddeea..2ba8315a7e6 100644 --- a/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -169,7 +169,8 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) { Reg = lookUpRegForValue(Op); } else if (isa(V)) { Reg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(MBB, DL, TII.get(TargetOpcode::IMPLICIT_DEF), Reg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(TargetOpcode::IMPLICIT_DEF), Reg); } // If target-independent code couldn't handle the value, give target-specific @@ -212,7 +213,7 @@ unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) { AssignedReg = Reg; else if (Reg != AssignedReg) { const TargetRegisterClass *RegClass = MRI.getRegClass(Reg); - TII.copyRegToReg(*MBB, MBB->end(), AssignedReg, + TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, AssignedReg, Reg, RegClass, RegClass, DL); } return AssignedReg; @@ -434,23 +435,28 @@ bool FastISel::SelectCall(const User *I) { if (!V) { // Currently the optimizer can produce this; insert an undef to // help debugging. Probably the optimizer should not do this. - BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()). - addMetadata(DI->getVariable()); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) + .addReg(0U).addImm(DI->getOffset()) + .addMetadata(DI->getVariable()); } else if (const ConstantInt *CI = dyn_cast(V)) { - BuildMI(MBB, DL, II).addImm(CI->getZExtValue()).addImm(DI->getOffset()). - addMetadata(DI->getVariable()); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) + .addImm(CI->getZExtValue()).addImm(DI->getOffset()) + .addMetadata(DI->getVariable()); } else if (const ConstantFP *CF = dyn_cast(V)) { - BuildMI(MBB, DL, II).addFPImm(CF).addImm(DI->getOffset()). - addMetadata(DI->getVariable()); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) + .addFPImm(CF).addImm(DI->getOffset()) + .addMetadata(DI->getVariable()); } else if (unsigned Reg = lookUpRegForValue(V)) { - BuildMI(MBB, DL, II).addReg(Reg, RegState::Debug).addImm(DI->getOffset()). - addMetadata(DI->getVariable()); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) + .addReg(Reg, RegState::Debug).addImm(DI->getOffset()) + .addMetadata(DI->getVariable()); } else { // We can't yet handle anything else here because it would require // generating code, thus altering codegen because of debug info. // Insert an undef so we can see what we dropped. - BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()). - addMetadata(DI->getVariable()); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) + .addReg(0U).addImm(DI->getOffset()) + .addMetadata(DI->getVariable()); } return true; } @@ -459,12 +465,13 @@ bool FastISel::SelectCall(const User *I) { switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) { default: break; case TargetLowering::Expand: { - assert(MBB->isLandingPad() && "Call to eh.exception not in landing pad!"); + assert(FuncInfo.MBB->isLandingPad() && + "Call to eh.exception not in landing pad!"); unsigned Reg = TLI.getExceptionAddressRegister(); const TargetRegisterClass *RC = TLI.getRegClassFor(VT); unsigned ResultReg = createResultReg(RC); - bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, - Reg, RC, RC, DL); + bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, + ResultReg, Reg, RC, RC, DL); assert(InsertedCopy && "Can't copy address registers!"); InsertedCopy = InsertedCopy; UpdateValueMap(I, ResultReg); @@ -478,23 +485,23 @@ bool FastISel::SelectCall(const User *I) { switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) { default: break; case TargetLowering::Expand: { - if (MBB->isLandingPad()) - AddCatchInfo(*cast(I), &FuncInfo.MF->getMMI(), MBB); + if (FuncInfo.MBB->isLandingPad()) + AddCatchInfo(*cast(I), &FuncInfo.MF->getMMI(), FuncInfo.MBB); else { #ifndef NDEBUG FuncInfo.CatchInfoLost.insert(cast(I)); #endif // FIXME: Mark exception selector register as live in. Hack for PR1508. unsigned Reg = TLI.getExceptionSelectorRegister(); - if (Reg) MBB->addLiveIn(Reg); + if (Reg) FuncInfo.MBB->addLiveIn(Reg); } unsigned Reg = TLI.getExceptionSelectorRegister(); EVT SrcVT = TLI.getPointerTy(); const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT); unsigned ResultReg = createResultReg(RC); - bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, Reg, - RC, RC, DL); + bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, + ResultReg, Reg, RC, RC, DL); assert(InsertedCopy && "Can't copy address registers!"); InsertedCopy = InsertedCopy; @@ -613,8 +620,9 @@ bool FastISel::SelectBitCast(const User *I) { TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT); ResultReg = createResultReg(DstClass); - bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, - Op0, DstClass, SrcClass, DL); + bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, + ResultReg, Op0, + DstClass, SrcClass, DL); if (!InsertedCopy) ResultReg = 0; } @@ -662,13 +670,14 @@ FastISel::SelectInstruction(const Instruction *I) { /// the CFG. void FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) { - if (MBB->isLayoutSuccessor(MSucc)) { + if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) { // The unconditional fall-through case, which needs no instructions. } else { // The unconditional branch case. - TII.InsertBranch(*MBB, MSucc, NULL, SmallVector(), DL); + TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL, + SmallVector(), DL); } - MBB->addSuccessor(MSucc); + FuncInfo.MBB->addSuccessor(MSucc); } /// SelectFNeg - Emit an FNeg operation. @@ -854,8 +863,7 @@ FastISel::SelectOperator(const User *I, unsigned Opcode) { } FastISel::FastISel(FunctionLoweringInfo &funcInfo) - : MBB(0), - FuncInfo(funcInfo), + : FuncInfo(funcInfo), MRI(FuncInfo.MF->getRegInfo()), MFI(*FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()), @@ -993,7 +1001,7 @@ unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode, unsigned ResultReg = createResultReg(RC); const TargetInstrDesc &II = TII.get(MachineInstOpcode); - BuildMI(MBB, DL, II, ResultReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg); return ResultReg; } @@ -1004,11 +1012,14 @@ unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode, const TargetInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) - BuildMI(MBB, DL, II, ResultReg).addReg(Op0, Op0IsKill * RegState::Kill); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) + .addReg(Op0, Op0IsKill * RegState::Kill); else { - BuildMI(MBB, DL, II).addReg(Op0, Op0IsKill * RegState::Kill); - bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, - II.ImplicitDefs[0], RC, RC, DL); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) + .addReg(Op0, Op0IsKill * RegState::Kill); + bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, + ResultReg, II.ImplicitDefs[0], + RC, RC, DL); if (!InsertedCopy) ResultReg = 0; } @@ -1024,15 +1035,16 @@ unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode, const TargetInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) - BuildMI(MBB, DL, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) .addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op1, Op1IsKill * RegState::Kill); else { - BuildMI(MBB, DL, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) .addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op1, Op1IsKill * RegState::Kill); - bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, - II.ImplicitDefs[0], RC, RC, DL); + bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, + ResultReg, II.ImplicitDefs[0], + RC, RC, DL); if (!InsertedCopy) ResultReg = 0; } @@ -1047,15 +1059,16 @@ unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode, const TargetInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) - BuildMI(MBB, DL, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) .addReg(Op0, Op0IsKill * RegState::Kill) .addImm(Imm); else { - BuildMI(MBB, DL, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) .addReg(Op0, Op0IsKill * RegState::Kill) .addImm(Imm); - bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, - II.ImplicitDefs[0], RC, RC, DL); + bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, + ResultReg, II.ImplicitDefs[0], + RC, RC, DL); if (!InsertedCopy) ResultReg = 0; } @@ -1070,15 +1083,16 @@ unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode, const TargetInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) - BuildMI(MBB, DL, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) .addReg(Op0, Op0IsKill * RegState::Kill) .addFPImm(FPImm); else { - BuildMI(MBB, DL, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) .addReg(Op0, Op0IsKill * RegState::Kill) .addFPImm(FPImm); - bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, - II.ImplicitDefs[0], RC, RC, DL); + bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, + ResultReg, II.ImplicitDefs[0], + RC, RC, DL); if (!InsertedCopy) ResultReg = 0; } @@ -1094,17 +1108,18 @@ unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode, const TargetInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) - BuildMI(MBB, DL, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) .addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op1, Op1IsKill * RegState::Kill) .addImm(Imm); else { - BuildMI(MBB, DL, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) .addReg(Op0, Op0IsKill * RegState::Kill) .addReg(Op1, Op1IsKill * RegState::Kill) .addImm(Imm); - bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, - II.ImplicitDefs[0], RC, RC, DL); + bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, + ResultReg, II.ImplicitDefs[0], + RC, RC, DL); if (!InsertedCopy) ResultReg = 0; } @@ -1118,11 +1133,12 @@ unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode, const TargetInstrDesc &II = TII.get(MachineInstOpcode); if (II.getNumDefs() >= 1) - BuildMI(MBB, DL, II, ResultReg).addImm(Imm); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm); else { - BuildMI(MBB, DL, II).addImm(Imm); - bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, - II.ImplicitDefs[0], RC, RC, DL); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm); + bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, + ResultReg, II.ImplicitDefs[0], + RC, RC, DL); if (!InsertedCopy) ResultReg = 0; } @@ -1138,15 +1154,16 @@ unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT, const TargetInstrDesc &II = TII.get(TargetOpcode::EXTRACT_SUBREG); if (II.getNumDefs() >= 1) - BuildMI(MBB, DL, II, ResultReg) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg) .addReg(Op0, Op0IsKill * RegState::Kill) .addImm(Idx); else { - BuildMI(MBB, DL, II) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) .addReg(Op0, Op0IsKill * RegState::Kill) .addImm(Idx); - bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, - II.ImplicitDefs[0], RC, RC, DL); + bool InsertedCopy = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, + ResultReg, II.ImplicitDefs[0], + RC, RC, DL); if (!InsertedCopy) ResultReg = 0; } diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index 5c14bf319e0..c81437965af 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -319,9 +319,8 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) { return true; } -MachineBasicBlock * -SelectionDAGISel::SelectBasicBlock(MachineBasicBlock *BB, - BasicBlock::const_iterator Begin, +void +SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin, BasicBlock::const_iterator End, bool &HadTailCall) { // Lower all of the non-terminator instructions. If a call is emitted @@ -336,7 +335,7 @@ SelectionDAGISel::SelectBasicBlock(MachineBasicBlock *BB, SDB->clear(); // Final step, emit the lowered DAG as machine code. - return CodeGenAndEmitDAG(BB); + CodeGenAndEmitDAG(); } namespace { @@ -425,7 +424,7 @@ void SelectionDAGISel::ComputeLiveOutVRegInfo() { } while (!Worklist.empty()); } -MachineBasicBlock *SelectionDAGISel::CodeGenAndEmitDAG(MachineBasicBlock *BB) { +void SelectionDAGISel::CodeGenAndEmitDAG() { std::string GroupName; if (TimePassesIsEnabled) GroupName = "Instruction Selection and Scheduling"; @@ -434,7 +433,7 @@ MachineBasicBlock *SelectionDAGISel::CodeGenAndEmitDAG(MachineBasicBlock *BB) { ViewDAGCombine2 || ViewDAGCombineLT || ViewISelDAGs || ViewSchedDAGs || ViewSUnitDAGs) BlockName = MF->getFunction()->getNameStr() + ":" + - BB->getBasicBlock()->getNameStr(); + FuncInfo->MBB->getBasicBlock()->getNameStr(); DEBUG(dbgs() << "Initial selection DAG:\n"; CurDAG->dump()); @@ -541,7 +540,7 @@ MachineBasicBlock *SelectionDAGISel::CodeGenAndEmitDAG(MachineBasicBlock *BB) { { NamedRegionTimer T("Instruction Scheduling", GroupName, TimePassesIsEnabled); - Scheduler->Run(CurDAG, BB, BB->end()); + Scheduler->Run(CurDAG, FuncInfo->MBB, FuncInfo->InsertPt); } if (ViewSUnitDAGs) Scheduler->viewGraph(); @@ -550,7 +549,7 @@ MachineBasicBlock *SelectionDAGISel::CodeGenAndEmitDAG(MachineBasicBlock *BB) { // inserted into. { NamedRegionTimer T("Instruction Creation", GroupName, TimePassesIsEnabled); - BB = Scheduler->EmitSchedule(); + FuncInfo->MBB = Scheduler->EmitSchedule(); } // Free the scheduler state. @@ -562,8 +561,6 @@ MachineBasicBlock *SelectionDAGISel::CodeGenAndEmitDAG(MachineBasicBlock *BB) { // Free the SelectionDAG state, now that we're finished with it. CurDAG->clear(); - - return BB; } void SelectionDAGISel::DoInstructionSelection() { @@ -625,21 +622,22 @@ void SelectionDAGISel::DoInstructionSelection() { /// PrepareEHLandingPad - Emit an EH_LABEL, set up live-in registers, and /// do other setup for EH landing-pad blocks. -void SelectionDAGISel::PrepareEHLandingPad(MachineBasicBlock *BB) { +void SelectionDAGISel::PrepareEHLandingPad() { // Add a label to mark the beginning of the landing pad. Deletion of the // landing pad can thus be detected via the MachineModuleInfo. - MCSymbol *Label = MF->getMMI().addLandingPad(BB); + MCSymbol *Label = MF->getMMI().addLandingPad(FuncInfo->MBB); const TargetInstrDesc &II = TM.getInstrInfo()->get(TargetOpcode::EH_LABEL); - BuildMI(BB, SDB->getCurDebugLoc(), II).addSym(Label); + BuildMI(*FuncInfo->MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), II) + .addSym(Label); // Mark exception register as live in. unsigned Reg = TLI.getExceptionAddressRegister(); - if (Reg) BB->addLiveIn(Reg); + if (Reg) FuncInfo->MBB->addLiveIn(Reg); // Mark exception selector register as live in. Reg = TLI.getExceptionSelectorRegister(); - if (Reg) BB->addLiveIn(Reg); + if (Reg) FuncInfo->MBB->addLiveIn(Reg); // FIXME: Hack around an exception handling flaw (PR1508): the personality // function and list of typeids logically belong to the invoke (or, if you @@ -652,7 +650,7 @@ void SelectionDAGISel::PrepareEHLandingPad(MachineBasicBlock *BB) { // in exceptions not being caught because no typeids are associated with // the invoke. This may not be the only way things can go wrong, but it // is the only way we try to work around for the moment. - const BasicBlock *LLVMBB = BB->getBasicBlock(); + const BasicBlock *LLVMBB = FuncInfo->MBB->getBasicBlock(); const BranchInst *Br = dyn_cast(LLVMBB->getTerminator()); if (Br && Br->isUnconditional()) { // Critical edge? @@ -676,7 +674,8 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { // Iterate over all basic blocks in the function. for (Function::const_iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) { const BasicBlock *LLVMBB = &*I; - MachineBasicBlock *BB = FuncInfo->MBBMap[LLVMBB]; + FuncInfo->MBB = FuncInfo->MBBMap[LLVMBB]; + FuncInfo->InsertPt = FuncInfo->MBB->getFirstNonPHI(); BasicBlock::const_iterator const Begin = LLVMBB->getFirstNonPHI(); BasicBlock::const_iterator const End = LLVMBB->end(); @@ -687,8 +686,8 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { LowerArguments(LLVMBB); // Setup an EH landing-pad block. - if (BB->isLandingPad()) - PrepareEHLandingPad(BB); + if (FuncInfo->MBB->isLandingPad()) + PrepareEHLandingPad(); // Before doing SelectionDAG ISel, see if FastISel has been requested. if (FastIS) { @@ -697,9 +696,9 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { if (LLVMBB == &Fn.getEntryBlock()) { CurDAG->setRoot(SDB->getControlRoot()); SDB->clear(); - BB = CodeGenAndEmitDAG(BB); + CodeGenAndEmitDAG(); } - FastIS->startNewBlock(BB); + FastIS->startNewBlock(); // Do FastISel on as many instructions as possible. for (; BI != End; ++BI) { #if 0 @@ -729,7 +728,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { } bool HadTailCall = false; - BB = SelectBasicBlock(BB, BI, llvm::next(BI), HadTailCall); + SelectBasicBlock(BI, llvm::next(BI), HadTailCall); // If the call was emitted as a tail call, we're done with the block. if (HadTailCall) { @@ -737,9 +736,6 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { break; } - // If the instruction was codegen'd with multiple blocks, - // inform the FastISel object where to resume inserting. - FastIS->setCurrentBlock(BB); continue; } @@ -765,10 +761,10 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { // block. if (BI != End) { bool HadTailCall; - BB = SelectBasicBlock(BB, BI, End, HadTailCall); + SelectBasicBlock(BI, End, HadTailCall); } - FinishBasicBlock(BB); + FinishBasicBlock(); FuncInfo->PHINodesToUpdate.clear(); } @@ -776,7 +772,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { } void -SelectionDAGISel::FinishBasicBlock(MachineBasicBlock *BB) { +SelectionDAGISel::FinishBasicBlock() { DEBUG(dbgs() << "Total amount of phi nodes to update: " << FuncInfo->PHINodesToUpdate.size() << "\n"; @@ -794,11 +790,11 @@ SelectionDAGISel::FinishBasicBlock(MachineBasicBlock *BB) { MachineInstr *PHI = FuncInfo->PHINodesToUpdate[i].first; assert(PHI->isPHI() && "This is not a machine PHI node that we are updating!"); - if (!BB->isSuccessor(PHI->getParent())) + if (!FuncInfo->MBB->isSuccessor(PHI->getParent())) continue; PHI->addOperand( MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[i].second, false)); - PHI->addOperand(MachineOperand::CreateMBB(BB)); + PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB)); } return; } @@ -807,33 +803,35 @@ SelectionDAGISel::FinishBasicBlock(MachineBasicBlock *BB) { // Lower header first, if it wasn't already lowered if (!SDB->BitTestCases[i].Emitted) { // Set the current basic block to the mbb we wish to insert the code into - BB = SDB->BitTestCases[i].Parent; + FuncInfo->MBB = SDB->BitTestCases[i].Parent; + FuncInfo->InsertPt = FuncInfo->MBB->end(); // Emit the code - SDB->visitBitTestHeader(SDB->BitTestCases[i], BB); + SDB->visitBitTestHeader(SDB->BitTestCases[i], FuncInfo->MBB); CurDAG->setRoot(SDB->getRoot()); SDB->clear(); - BB = CodeGenAndEmitDAG(BB); + CodeGenAndEmitDAG(); } for (unsigned j = 0, ej = SDB->BitTestCases[i].Cases.size(); j != ej; ++j) { // Set the current basic block to the mbb we wish to insert the code into - BB = SDB->BitTestCases[i].Cases[j].ThisBB; + FuncInfo->MBB = SDB->BitTestCases[i].Cases[j].ThisBB; + FuncInfo->InsertPt = FuncInfo->MBB->end(); // Emit the code if (j+1 != ej) SDB->visitBitTestCase(SDB->BitTestCases[i].Cases[j+1].ThisBB, SDB->BitTestCases[i].Reg, SDB->BitTestCases[i].Cases[j], - BB); + FuncInfo->MBB); else SDB->visitBitTestCase(SDB->BitTestCases[i].Default, SDB->BitTestCases[i].Reg, SDB->BitTestCases[i].Cases[j], - BB); + FuncInfo->MBB); CurDAG->setRoot(SDB->getRoot()); SDB->clear(); - BB = CodeGenAndEmitDAG(BB); + CodeGenAndEmitDAG(); } // Update PHI Nodes @@ -878,22 +876,24 @@ SelectionDAGISel::FinishBasicBlock(MachineBasicBlock *BB) { // Lower header first, if it wasn't already lowered if (!SDB->JTCases[i].first.Emitted) { // Set the current basic block to the mbb we wish to insert the code into - BB = SDB->JTCases[i].first.HeaderBB; + FuncInfo->MBB = SDB->JTCases[i].first.HeaderBB; + FuncInfo->InsertPt = FuncInfo->MBB->end(); // Emit the code SDB->visitJumpTableHeader(SDB->JTCases[i].second, SDB->JTCases[i].first, - BB); + FuncInfo->MBB); CurDAG->setRoot(SDB->getRoot()); SDB->clear(); - BB = CodeGenAndEmitDAG(BB); + CodeGenAndEmitDAG(); } // Set the current basic block to the mbb we wish to insert the code into - BB = SDB->JTCases[i].second.MBB; + FuncInfo->MBB = SDB->JTCases[i].second.MBB; + FuncInfo->InsertPt = FuncInfo->MBB->end(); // Emit the code SDB->visitJumpTable(SDB->JTCases[i].second); CurDAG->setRoot(SDB->getRoot()); SDB->clear(); - BB = CodeGenAndEmitDAG(BB); + CodeGenAndEmitDAG(); // Update PHI Nodes for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size(); @@ -911,11 +911,11 @@ SelectionDAGISel::FinishBasicBlock(MachineBasicBlock *BB) { (MachineOperand::CreateMBB(SDB->JTCases[i].first.HeaderBB)); } // JT BB. Just iterate over successors here - if (BB->isSuccessor(PHIBB)) { + if (FuncInfo->MBB->isSuccessor(PHIBB)) { PHI->addOperand (MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[pi].second, false)); - PHI->addOperand(MachineOperand::CreateMBB(BB)); + PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB)); } } } @@ -927,10 +927,10 @@ SelectionDAGISel::FinishBasicBlock(MachineBasicBlock *BB) { MachineInstr *PHI = FuncInfo->PHINodesToUpdate[i].first; assert(PHI->isPHI() && "This is not a machine PHI node that we are updating!"); - if (BB->isSuccessor(PHI->getParent())) { + if (FuncInfo->MBB->isSuccessor(PHI->getParent())) { PHI->addOperand( MachineOperand::CreateReg(FuncInfo->PHINodesToUpdate[i].second, false)); - PHI->addOperand(MachineOperand::CreateMBB(BB)); + PHI->addOperand(MachineOperand::CreateMBB(FuncInfo->MBB)); } } @@ -938,7 +938,8 @@ SelectionDAGISel::FinishBasicBlock(MachineBasicBlock *BB) { // additional DAGs necessary. for (unsigned i = 0, e = SDB->SwitchCases.size(); i != e; ++i) { // Set the current basic block to the mbb we wish to insert the code into - MachineBasicBlock *ThisBB = BB = SDB->SwitchCases[i].ThisBB; + MachineBasicBlock *ThisBB = FuncInfo->MBB = SDB->SwitchCases[i].ThisBB; + FuncInfo->InsertPt = FuncInfo->MBB->end(); // Determine the unique successors. SmallVector Succs; @@ -948,21 +949,23 @@ SelectionDAGISel::FinishBasicBlock(MachineBasicBlock *BB) { // Emit the code. Note that this could result in ThisBB being split, so // we need to check for updates. - SDB->visitSwitchCase(SDB->SwitchCases[i], BB); + SDB->visitSwitchCase(SDB->SwitchCases[i], FuncInfo->MBB); CurDAG->setRoot(SDB->getRoot()); SDB->clear(); - ThisBB = CodeGenAndEmitDAG(BB); + CodeGenAndEmitDAG(); + ThisBB = FuncInfo->MBB; // Handle any PHI nodes in successors of this chunk, as if we were coming // from the original BB before switch expansion. Note that PHI nodes can // occur multiple times in PHINodesToUpdate. We have to be very careful to // handle them the right number of times. for (unsigned i = 0, e = Succs.size(); i != e; ++i) { - BB = Succs[i]; + FuncInfo->MBB = Succs[i]; + FuncInfo->InsertPt = FuncInfo->MBB->end(); // BB may have been removed from the CFG if a branch was constant folded. - if (ThisBB->isSuccessor(BB)) { - for (MachineBasicBlock::iterator Phi = BB->begin(); - Phi != BB->end() && Phi->isPHI(); + if (ThisBB->isSuccessor(FuncInfo->MBB)) { + for (MachineBasicBlock::iterator Phi = FuncInfo->MBB->begin(); + Phi != FuncInfo->MBB->end() && Phi->isPHI(); ++Phi) { // This value for this PHI node is recorded in PHINodesToUpdate. for (unsigned pn = 0; ; ++pn) { diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index f60cbcdf9b4..cdcbee6bc16 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -230,7 +230,8 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM, } ResultReg = createResultReg(RC); - addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM); + addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, + DL, TII.get(Opc), ResultReg), AM); return true; } @@ -249,7 +250,7 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, case MVT::i1: { // Mask out all but lowest bit. unsigned AndResult = createResultReg(X86::GR8RegisterClass); - BuildMI(MBB, DL, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::AND8ri), AndResult).addReg(Val).addImm(1); Val = AndResult; } @@ -266,7 +267,8 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, break; } - addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM).addReg(Val); + addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, + DL, TII.get(Opc)), AM).addReg(Val); return true; } @@ -294,7 +296,8 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val, } if (Opc) { - addFullAddress(BuildMI(MBB, DL, TII.get(Opc)), AM) + addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, + DL, TII.get(Opc)), AM) .addImm(Signed ? (uint64_t) CI->getSExtValue() : CI->getZExtValue()); return true; @@ -333,7 +336,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) { // Don't walk into other basic blocks; it's possible we haven't // visited them yet, so the instructions may not yet be assigned // virtual registers. - if (FuncInfo.MBBMap[I->getParent()] != MBB) + if (FuncInfo.MBBMap[I->getParent()] != FuncInfo.MBB) return false; Opcode = I->getOpcode(); @@ -530,7 +533,8 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) { } LoadReg = createResultReg(RC); - addFullAddress(BuildMI(MBB, DL, TII.get(Opc), LoadReg), StubAM); + addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, + DL, TII.get(Opc), LoadReg), StubAM); // Prevent loading GV stub multiple times in same MBB. LocalValueMap[V] = LoadReg; @@ -720,8 +724,9 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, // CMPri, otherwise use CMPrr. if (const ConstantInt *Op1C = dyn_cast(Op1)) { if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) { - BuildMI(MBB, DL, TII.get(CompareImmOpc)).addReg(Op0Reg) - .addImm(Op1C->getSExtValue()); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareImmOpc)) + .addReg(Op0Reg) + .addImm(Op1C->getSExtValue()); return true; } } @@ -731,7 +736,9 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, unsigned Op1Reg = getRegForValue(Op1); if (Op1Reg == 0) return false; - BuildMI(MBB, DL, TII.get(CompareOpc)).addReg(Op0Reg).addReg(Op1Reg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareOpc)) + .addReg(Op0Reg) + .addReg(Op1Reg); return true; } @@ -753,9 +760,10 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { unsigned EReg = createResultReg(&X86::GR8RegClass); unsigned NPReg = createResultReg(&X86::GR8RegClass); - BuildMI(MBB, DL, TII.get(X86::SETEr), EReg); - BuildMI(MBB, DL, TII.get(X86::SETNPr), NPReg); - BuildMI(MBB, DL, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SETEr), EReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(X86::SETNPr), NPReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg); UpdateValueMap(I, ResultReg); return true; @@ -766,9 +774,13 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { unsigned NEReg = createResultReg(&X86::GR8RegClass); unsigned PReg = createResultReg(&X86::GR8RegClass); - BuildMI(MBB, DL, TII.get(X86::SETNEr), NEReg); - BuildMI(MBB, DL, TII.get(X86::SETPr), PReg); - BuildMI(MBB, DL, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(X86::SETNEr), NEReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(X86::SETPr), PReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(X86::OR8rr), ResultReg) + .addReg(PReg).addReg(NEReg); UpdateValueMap(I, ResultReg); return true; } @@ -807,7 +819,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { if (!X86FastEmitCompare(Op0, Op1, VT)) return false; - BuildMI(MBB, DL, TII.get(SetCCOpc), ResultReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(SetCCOpc), ResultReg); UpdateValueMap(I, ResultReg); return true; } @@ -843,7 +855,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { // Try to take advantage of fallthrough opportunities. CmpInst::Predicate Predicate = CI->getPredicate(); - if (MBB->isLayoutSuccessor(TrueMBB)) { + if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) { std::swap(TrueMBB, FalseMBB); Predicate = CmpInst::getInversePredicate(Predicate); } @@ -892,16 +904,18 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { if (!X86FastEmitCompare(Op0, Op1, VT)) return false; - BuildMI(MBB, DL, TII.get(BranchOpc)).addMBB(TrueMBB); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BranchOpc)) + .addMBB(TrueMBB); if (Predicate == CmpInst::FCMP_UNE) { // X86 requires a second branch to handle UNE (and OEQ, // which is mapped to UNE above). - BuildMI(MBB, DL, TII.get(X86::JP_4)).addMBB(TrueMBB); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JP_4)) + .addMBB(TrueMBB); } FastEmitBranch(FalseMBB, DL); - MBB->addSuccessor(TrueMBB); + FuncInfo.MBB->addSuccessor(TrueMBB); return true; } } else if (ExtractValueInst *EI = @@ -927,7 +941,8 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { unsigned Reg = getRegForValue(EI); for (MachineBasicBlock::const_reverse_iterator - RI = MBB->rbegin(), RE = MBB->rend(); RI != RE; ++RI) { + RI = FuncInfo.MBB->rbegin(), RE = FuncInfo.MBB->rend(); + RI != RE; ++RI) { const MachineInstr &MI = *RI; if (MI.definesRegister(Reg)) { @@ -952,11 +967,11 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { unsigned OpCode = SetMI->getOpcode(); if (OpCode == X86::SETOr || OpCode == X86::SETBr) { - BuildMI(MBB, DL, TII.get(OpCode == X86::SETOr ? - X86::JO_4 : X86::JB_4)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(OpCode == X86::SETOr ? X86::JO_4 : X86::JB_4)) .addMBB(TrueMBB); FastEmitBranch(FalseMBB, DL); - MBB->addSuccessor(TrueMBB); + FuncInfo.MBB->addSuccessor(TrueMBB); return true; } } @@ -968,10 +983,12 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { unsigned OpReg = getRegForValue(BI->getCondition()); if (OpReg == 0) return false; - BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(OpReg).addReg(OpReg); - BuildMI(MBB, DL, TII.get(X86::JNE_4)).addMBB(TrueMBB); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8rr)) + .addReg(OpReg).addReg(OpReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JNE_4)) + .addMBB(TrueMBB); FastEmitBranch(FalseMBB, DL); - MBB->addSuccessor(TrueMBB); + FuncInfo.MBB->addSuccessor(TrueMBB); return true; } @@ -1028,7 +1045,7 @@ bool X86FastISel::X86SelectShift(const Instruction *I) { // Fold immediate in shl(x,3). if (const ConstantInt *CI = dyn_cast(I->getOperand(1))) { unsigned ResultReg = createResultReg(RC); - BuildMI(MBB, DL, TII.get(OpImm), + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpImm), ResultReg).addReg(Op0Reg).addImm(CI->getZExtValue() & 0xff); UpdateValueMap(I, ResultReg); return true; @@ -1036,17 +1053,20 @@ bool X86FastISel::X86SelectShift(const Instruction *I) { unsigned Op1Reg = getRegForValue(I->getOperand(1)); if (Op1Reg == 0) return false; - TII.copyRegToReg(*MBB, MBB->end(), CReg, Op1Reg, RC, RC, DL); + TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, + CReg, Op1Reg, RC, RC, DL); // The shift instruction uses X86::CL. If we defined a super-register // of X86::CL, emit an EXTRACT_SUBREG to precisely describe what // we're doing here. if (CReg != X86::CL) - BuildMI(MBB, DL, TII.get(TargetOpcode::EXTRACT_SUBREG), X86::CL) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(TargetOpcode::EXTRACT_SUBREG), X86::CL) .addReg(CReg).addImm(X86::sub_8bit); unsigned ResultReg = createResultReg(RC); - BuildMI(MBB, DL, TII.get(OpReg), ResultReg).addReg(Op0Reg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpReg), ResultReg) + .addReg(Op0Reg); UpdateValueMap(I, ResultReg); return true; } @@ -1078,9 +1098,11 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) { unsigned Op2Reg = getRegForValue(I->getOperand(2)); if (Op2Reg == 0) return false; - BuildMI(MBB, DL, TII.get(X86::TEST8rr)).addReg(Op0Reg).addReg(Op0Reg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8rr)) + .addReg(Op0Reg).addReg(Op0Reg); unsigned ResultReg = createResultReg(RC); - BuildMI(MBB, DL, TII.get(Opc), ResultReg).addReg(Op1Reg).addReg(Op2Reg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg) + .addReg(Op1Reg).addReg(Op2Reg); UpdateValueMap(I, ResultReg); return true; } @@ -1094,7 +1116,9 @@ bool X86FastISel::X86SelectFPExt(const Instruction *I) { unsigned OpReg = getRegForValue(V); if (OpReg == 0) return false; unsigned ResultReg = createResultReg(X86::FR64RegisterClass); - BuildMI(MBB, DL, TII.get(X86::CVTSS2SDrr), ResultReg).addReg(OpReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(X86::CVTSS2SDrr), ResultReg) + .addReg(OpReg); UpdateValueMap(I, ResultReg); return true; } @@ -1111,7 +1135,9 @@ bool X86FastISel::X86SelectFPTrunc(const Instruction *I) { unsigned OpReg = getRegForValue(V); if (OpReg == 0) return false; unsigned ResultReg = createResultReg(X86::FR32RegisterClass); - BuildMI(MBB, DL, TII.get(X86::CVTSD2SSrr), ResultReg).addReg(OpReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(X86::CVTSD2SSrr), ResultReg) + .addReg(OpReg); UpdateValueMap(I, ResultReg); return true; } @@ -1146,7 +1172,8 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) { const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16) ? X86::GR16_ABCDRegisterClass : X86::GR32_ABCDRegisterClass; unsigned CopyReg = createResultReg(CopyRC); - BuildMI(MBB, DL, TII.get(CopyOpc), CopyReg).addReg(InputReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CopyOpc), CopyReg) + .addReg(InputReg); // Then issue an extract_subreg. unsigned ResultReg = FastEmitInst_extractsubreg(MVT::i8, @@ -1218,7 +1245,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { return false; unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(MBB, DL, TII.get(OpC), ResultReg). + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg). addImm(CI->isZero() ? -1ULL : 0); UpdateValueMap(&I, ResultReg); return true; @@ -1232,12 +1259,12 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); // FIXME may need to add RegState::Debug to any registers produced, // although ESP/EBP should be the only ones at the moment. - addFullAddress(BuildMI(MBB, DL, II), AM).addImm(0). - addMetadata(DI->getVariable()); + addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II), AM). + addImm(0).addMetadata(DI->getVariable()); return true; } case Intrinsic::trap: { - BuildMI(MBB, DL, TII.get(X86::TRAP)); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TRAP)); return true; } case Intrinsic::sadd_with_overflow: @@ -1273,7 +1300,8 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { return false; unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(MBB, DL, TII.get(OpC), ResultReg).addReg(Reg1).addReg(Reg2); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg) + .addReg(Reg1).addReg(Reg2); unsigned DestReg1 = UpdateValueMap(&I, ResultReg); // If the add with overflow is an intra-block value then we just want to @@ -1291,7 +1319,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { unsigned Opc = X86::SETBr; if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow) Opc = X86::SETOr; - BuildMI(MBB, DL, TII.get(Opc), ResultReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg); return true; } } @@ -1418,7 +1446,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { // Issue CALLSEQ_START unsigned AdjStackDown = TM.getRegisterInfo()->getCallFrameSetupOpcode(); - BuildMI(MBB, DL, TII.get(AdjStackDown)).addImm(NumBytes); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackDown)) + .addImm(NumBytes); // Process argument: walk the register/memloc assignments, inserting // copies / loads. @@ -1474,8 +1503,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { if (VA.isRegLoc()) { TargetRegisterClass* RC = TLI.getRegClassFor(ArgVT); - bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), VA.getLocReg(), - Arg, RC, RC, DL); + bool Emitted = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, + VA.getLocReg(), Arg, RC, RC, DL); assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted; Emitted = true; RegArgs.push_back(VA.getLocReg()); @@ -1501,8 +1530,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { if (Subtarget->isPICStyleGOT()) { TargetRegisterClass *RC = X86::GR32RegisterClass; unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF); - bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), X86::EBX, Base, RC, RC, - DL); + bool Emitted = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, + X86::EBX, Base, RC, RC, DL); assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted; Emitted = true; } @@ -1512,7 +1541,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { if (CalleeOp) { // Register-indirect call. unsigned CallOpc = Subtarget->is64Bit() ? X86::CALL64r : X86::CALL32r; - MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addReg(CalleeOp); + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)) + .addReg(CalleeOp); } else { // Direct call. @@ -1541,7 +1571,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { } - MIB = BuildMI(MBB, DL, TII.get(CallOpc)).addGlobalAddress(GV, 0, OpFlags); + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)) + .addGlobalAddress(GV, 0, OpFlags); } // Add an implicit use GOT pointer in EBX. @@ -1554,7 +1585,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { // Issue CALLSEQ_END unsigned AdjStackUp = TM.getRegisterInfo()->getCallFrameDestroyOpcode(); - BuildMI(MBB, DL, TII.get(AdjStackUp)).addImm(NumBytes).addImm(0); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackUp)) + .addImm(NumBytes).addImm(0); // Now handle call return value (if any). SmallVector UsedRegs; @@ -1581,7 +1613,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { } unsigned ResultReg = createResultReg(DstRC); - bool Emitted = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, + bool Emitted = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, ResultReg, RVLocs[0].getLocReg(), DstRC, SrcRC, DL); assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted; Emitted = true; @@ -1595,18 +1627,21 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64; unsigned MemSize = ResVT.getSizeInBits()/8; int FI = MFI.CreateStackObject(MemSize, MemSize, false); - addFrameReference(BuildMI(MBB, DL, TII.get(Opc)), FI).addReg(ResultReg); + addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(Opc)), FI) + .addReg(ResultReg); DstRC = ResVT == MVT::f32 ? X86::FR32RegisterClass : X86::FR64RegisterClass; Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm; ResultReg = createResultReg(DstRC); - addFrameReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), FI); + addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(Opc), ResultReg), FI); } if (AndToI1) { // Mask out all but lowest bit for some call which produces an i1. unsigned AndResult = createResultReg(X86::GR8RegisterClass); - BuildMI(MBB, DL, + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::AND8ri), AndResult).addReg(ResultReg).addImm(1); ResultReg = AndResult; } @@ -1729,7 +1764,8 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { else Opc = X86::LEA64r; unsigned ResultReg = createResultReg(RC); - addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM); + addLeaAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(Opc), ResultReg), AM); return ResultReg; } return 0; @@ -1759,7 +1795,8 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { // Create the load from the constant pool. unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align); unsigned ResultReg = createResultReg(RC); - addConstantPoolReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), + addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(Opc), ResultReg), MCPOffset, PICBase, OpFlag); return ResultReg; @@ -1782,7 +1819,8 @@ unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) { unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy()); unsigned ResultReg = createResultReg(RC); - addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM); + addLeaAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(Opc), ResultReg), AM); return ResultReg; } diff --git a/utils/TableGen/FastISelEmitter.cpp b/utils/TableGen/FastISelEmitter.cpp index 7c54f7877d5..843546d355f 100644 --- a/utils/TableGen/FastISelEmitter.cpp +++ b/utils/TableGen/FastISelEmitter.cpp @@ -432,7 +432,7 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) { for (unsigned i = 0; i < Memo.PhysRegs->size(); ++i) { if ((*Memo.PhysRegs)[i] != "") - OS << " TII.copyRegToReg(*MBB, MBB->end(), " + OS << " TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, " << (*Memo.PhysRegs)[i] << ", Op" << i << ", " << "TM.getRegisterInfo()->getPhysicalRegisterRegClass(" << (*Memo.PhysRegs)[i] << "), " @@ -526,7 +526,7 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) { for (unsigned i = 0; i < Memo.PhysRegs->size(); ++i) { if ((*Memo.PhysRegs)[i] != "") - OS << " TII.copyRegToReg(*MBB, MBB->end(), " + OS << " TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt, " << (*Memo.PhysRegs)[i] << ", Op" << i << ", " << "TM.getRegisterInfo()->getPhysicalRegisterRegClass(" << (*Memo.PhysRegs)[i] << "), " -- 2.34.1