X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FInlineSpiller.cpp;h=59a92890f5a686d2945dfa522b6d081e71099131;hb=e2007c9e7e58f66ad9976e89d83b3ea315b5dc93;hp=28235b0f973d36f1689af48991972868f102b11e;hpb=6035518e3bd06cef0515af5a319fbe2cea7df6d4;p=oota-llvm.git diff --git a/lib/CodeGen/InlineSpiller.cpp b/lib/CodeGen/InlineSpiller.cpp index 28235b0f973..59a92890f5a 100644 --- a/lib/CodeGen/InlineSpiller.cpp +++ b/lib/CodeGen/InlineSpiller.cpp @@ -34,7 +34,6 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetInstrInfo.h" -#include "llvm/Target/TargetMachine.h" using namespace llvm; @@ -136,7 +135,7 @@ private: // Dead defs generated during spilling. SmallVector DeadDefs; - ~InlineSpiller() {} + ~InlineSpiller() override {} public: InlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm) @@ -185,11 +184,16 @@ private: } namespace llvm { + +Spiller::~Spiller() { } +void Spiller::anchor() { } + Spiller *createInlineSpiller(MachineFunctionPass &pass, MachineFunction &mf, VirtRegMap &vrm) { return new InlineSpiller(pass, mf, vrm); } + } //===----------------------------------------------------------------------===// @@ -504,6 +508,7 @@ MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI, SmallVector, 8> WorkList; WorkList.push_back(std::make_pair(UseReg, UseVNI)); + LiveInterval &OrigLI = LIS.getInterval(Original); do { unsigned Reg; VNInfo *VNI; @@ -517,8 +522,11 @@ MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI, // Trace through PHI-defs created by live range splitting. if (VNI->isPHIDef()) { - // Stop at original PHIs. We don't know the value at the predecessors. - if (VNI->def == OrigVNI->def) { + // Stop at original PHIs. We don't know the value at the + // predecessors. Look up the VNInfo for the current definition + // in OrigLI, to properly determine whether or not this phi was + // added by splitting. + if (VNI->def == OrigLI.getVNInfoAt(VNI->def)->def) { DEBUG(dbgs() << "orig phi value\n"); SVI->second.DefByOrigPHI = true; SVI->second.AllDefsAreReloads = false; @@ -538,7 +546,6 @@ MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI, // Separate all values dominated by OrigVNI into PHIs and non-PHIs. SmallVector PHIs, NonPHIs; LiveInterval &LI = LIS.getInterval(Reg); - LiveInterval &OrigLI = LIS.getInterval(Original); for (LiveInterval::vni_iterator VI = LI.vni_begin(), VE = LI.vni_end(); VI != VE; ++VI) { @@ -569,8 +576,8 @@ MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI, std::tie(SVI, Inserted) = SibValues.insert(std::make_pair(NonPHI, SibValueInfo(Reg, NonPHI))); // Add all the PHIs as dependents of NonPHI. - for (unsigned pi = 0, pe = PHIs.size(); pi != pe; ++pi) - SVI->second.Deps.push_back(PHIs[pi]); + SVI->second.Deps.insert(SVI->second.Deps.end(), PHIs.begin(), + PHIs.end()); // This is the first time we see NonPHI, add it to the worklist. if (Inserted) WorkList.push_back(std::make_pair(Reg, NonPHI)); @@ -819,7 +826,7 @@ void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) { WorkList.push_back(std::make_pair(LI, VNI)); do { std::tie(LI, VNI) = WorkList.pop_back_val(); - if (!UsedValues.insert(VNI)) + if (!UsedValues.insert(VNI).second) continue; if (VNI->isPHIDef()) { @@ -848,6 +855,15 @@ void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) { /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading. bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineBasicBlock::iterator MI) { + + // Analyze instruction + SmallVector, 8> Ops; + MIBundleOperands::VirtRegInfo RI = + MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops); + + if (!RI.Reads) + return false; + SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true); VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex()); @@ -878,9 +894,6 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, // If the instruction also writes VirtReg.reg, it had better not require the // same register for uses and defs. - SmallVector, 8> Ops; - MIBundleOperands::VirtRegInfo RI = - MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops); if (RI.Tied) { markValueUsed(&VirtReg, ParentVNI); DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI); @@ -934,10 +947,15 @@ void InlineSpiller::reMaterializeAll() { for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) { unsigned Reg = RegsToSpill[i]; LiveInterval &LI = LIS.getInterval(Reg); - for (MachineRegisterInfo::use_bundle_nodbg_iterator - RI = MRI.use_bundle_nodbg_begin(Reg), E = MRI.use_bundle_nodbg_end(); - RI != E; ) { - MachineInstr *MI = &*(RI++); + for (MachineRegisterInfo::reg_bundle_iterator + RegI = MRI.reg_bundle_begin(Reg), E = MRI.reg_bundle_end(); + RegI != E; ) { + MachineInstr *MI = &*(RegI++); + + // Debug values are not allowed to affect codegen. + if (MI->isDebugValue()) + continue; + anyRemat |= reMaterializeFor(LI, MI); } } @@ -1073,7 +1091,8 @@ foldMemoryOperand(ArrayRef > Ops, bool WasCopy = MI->isCopy(); unsigned ImpReg = 0; - bool SpillSubRegs = (MI->getOpcode() == TargetOpcode::PATCHPOINT || + bool SpillSubRegs = (MI->getOpcode() == TargetOpcode::STATEPOINT || + MI->getOpcode() == TargetOpcode::PATCHPOINT || MI->getOpcode() == TargetOpcode::STACKMAP); // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied @@ -1123,13 +1142,8 @@ foldMemoryOperand(ArrayRef > Ops, continue; // FoldMI does not define this physreg. Remove the LI segment. assert(MO->isDead() && "Cannot fold physreg def"); - for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units) { - if (LiveRange *LR = LIS.getCachedRegUnit(*Units)) { - SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot(); - if (VNInfo *VNI = LR->getVNInfoAt(Idx)) - LR->removeValNo(VNI); - } - } + SlotIndex Idx = LIS.getInstructionIndex(MI).getRegSlot(); + LIS.removePhysRegDefAt(Reg, Idx); } LIS.ReplaceMachineInstrInMaps(MI, FoldMI); @@ -1213,12 +1227,18 @@ void InlineSpiller::spillAroundUses(unsigned Reg) { // Modify DBG_VALUE now that the value is in a spill slot. bool IsIndirect = MI->isIndirectDebugValue(); uint64_t Offset = IsIndirect ? MI->getOperand(1).getImm() : 0; - const MDNode *MDPtr = MI->getOperand(2).getMetadata(); + const MDNode *Var = MI->getDebugVariable(); + const MDNode *Expr = MI->getDebugExpression(); DebugLoc DL = MI->getDebugLoc(); DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); MachineBasicBlock *MBB = MI->getParent(); + assert(cast(Var)->isValidLocationForIntrinsic(DL) && + "Expected inlined-at fields to agree"); BuildMI(*MBB, MBB->erase(MI), DL, TII.get(TargetOpcode::DBG_VALUE)) - .addFrameIndex(StackSlot).addImm(Offset).addMetadata(MDPtr); + .addFrameIndex(StackSlot) + .addImm(Offset) + .addMetadata(Var) + .addMetadata(Expr); continue; } @@ -1358,7 +1378,7 @@ void InlineSpiller::spill(LiveRangeEdit &edit) { StackInt = nullptr; DEBUG(dbgs() << "Inline spilling " - << MRI.getRegClass(edit.getReg())->getName() + << TRI.getRegClassName(MRI.getRegClass(edit.getReg())) << ':' << edit.getParent() << "\nFrom original " << PrintReg(Original) << '\n'); assert(edit.getParent().isSpillable() &&