X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FLiveIntervalAnalysis.cpp;h=6701e5b93fe4fbfeff3ced385cc14b66bb8b3eab;hb=69244300b8a0112efb44b6273ecea4ca6264b8cf;hp=ca6c04d8f75712a3f274820991418d83cea94ed1;hpb=a5bfc97da713ec9e185226d44e6adb4d3087b304;p=oota-llvm.git diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index ca6c04d8f75..6701e5b93fe 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -2,8 +2,8 @@ // // The LLVM Compiler Infrastructure // -// This file was developed by the LLVM research group and is distributed under -// the University of Illinois Open Source License. See LICENSE.TXT for details. +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // @@ -19,12 +19,12 @@ #include "llvm/CodeGen/LiveIntervalAnalysis.h" #include "VirtRegMap.h" #include "llvm/Value.h" -#include "llvm/Analysis/LoopInfo.h" #include "llvm/CodeGen/LiveVariables.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineLoopInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/Passes.h" -#include "llvm/CodeGen/SSARegMap.h" #include "llvm/Target/MRegisterInfo.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetMachine.h" @@ -40,11 +40,17 @@ namespace { // Hidden options for help debugging. cl::opt DisableReMat("disable-rematerialization", cl::init(false), cl::Hidden); + + cl::opt SplitAtBB("split-intervals-at-bb", + cl::init(true), cl::Hidden); + cl::opt SplitLimit("split-limit", + cl::init(-1), cl::Hidden); } STATISTIC(numIntervals, "Number of original intervals"); STATISTIC(numIntervalsAfter, "Number of intervals after coalescing"); -STATISTIC(numFolded , "Number of loads/stores folded into instructions"); +STATISTIC(numFolds , "Number of loads/stores folded into instructions"); +STATISTIC(numSplits , "Number of intervals split"); char LiveIntervals::ID = 0; namespace { @@ -54,10 +60,11 @@ namespace { void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const { AU.addPreserved(); AU.addRequired(); + AU.addPreservedID(MachineLoopInfoID); + AU.addPreservedID(MachineDominatorsID); AU.addPreservedID(PHIEliminationID); AU.addRequiredID(PHIEliminationID); AU.addRequiredID(TwoAddressInstructionPassID); - AU.addRequired(); MachineFunctionPass::getAnalysisUsage(AU); } @@ -155,291 +162,44 @@ void LiveIntervals::print(std::ostream &O, const Module* ) const { } } -/// isReMaterializable - Returns true if the definition MI of the specified -/// val# of the specified interval is re-materializable. -bool LiveIntervals::isReMaterializable(const LiveInterval &li, - const VNInfo *ValNo, MachineInstr *MI) { - if (DisableReMat) - return false; - - if (tii_->isTriviallyReMaterializable(MI)) - return true; - - int FrameIdx = 0; - if (!tii_->isLoadFromStackSlot(MI, FrameIdx) || - !mf_->getFrameInfo()->isFixedObjectIndex(FrameIdx)) - return false; - - // This is a load from fixed stack slot. It can be rematerialized unless it's - // re-defined by a two-address instruction. - for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); - i != e; ++i) { - const VNInfo *VNI = *i; - if (VNI == ValNo) - continue; - unsigned DefIdx = VNI->def; - if (DefIdx == ~1U) - continue; // Dead val#. - MachineInstr *DefMI = (DefIdx == ~0u) - ? NULL : getInstructionFromIndex(DefIdx); - if (DefMI && DefMI->isRegReDefinedByTwoAddr(li.reg)) - return false; - } - return true; -} - -/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from -/// slot / to reg or any rematerialized load into ith operand of specified -/// MI. If it is successul, MI is updated with the newly created MI and -/// returns true. -bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI, VirtRegMap &vrm, - MachineInstr *DefMI, - unsigned index, unsigned i, - bool isSS, int slot, unsigned reg) { - MachineInstr *fmi = isSS - ? mri_->foldMemoryOperand(MI, i, slot) - : mri_->foldMemoryOperand(MI, i, DefMI); - if (fmi) { - // Attempt to fold the memory reference into the instruction. If - // we can do this, we don't need to insert spill code. - if (lv_) - lv_->instructionChanged(MI, fmi); - MachineBasicBlock &MBB = *MI->getParent(); - vrm.virtFolded(reg, MI, i, fmi); - mi2iMap_.erase(MI); - i2miMap_[index/InstrSlots::NUM] = fmi; - mi2iMap_[fmi] = index; - MI = MBB.insert(MBB.erase(MI), fmi); - ++numFolded; - return true; - } - return false; -} - -std::vector LiveIntervals:: -addIntervalsForSpills(const LiveInterval &li, VirtRegMap &vrm, unsigned reg) { - // since this is called after the analysis is done we don't know if - // LiveVariables is available - lv_ = getAnalysisToUpdate(); - - std::vector added; - - assert(li.weight != HUGE_VALF && - "attempt to spill already spilled interval!"); - - DOUT << "\t\t\t\tadding intervals for spills for interval: "; - li.print(DOUT, mri_); - DOUT << '\n'; - - SSARegMap *RegMap = mf_->getSSARegMap(); - const TargetRegisterClass* rc = RegMap->getRegClass(li.reg); - - unsigned NumValNums = li.getNumValNums(); - SmallVector ReMatDefs; - ReMatDefs.resize(NumValNums, NULL); - SmallVector ReMatOrigDefs; - ReMatOrigDefs.resize(NumValNums, NULL); - SmallVector ReMatIds; - ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT); - BitVector ReMatDelete(NumValNums); - unsigned slot = VirtRegMap::MAX_STACK_SLOT; - - bool NeedStackSlot = false; - for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); - i != e; ++i) { - const VNInfo *VNI = *i; - unsigned VN = VNI->id; - unsigned DefIdx = VNI->def; - if (DefIdx == ~1U) - continue; // Dead val#. - // Is the def for the val# rematerializable? - MachineInstr *DefMI = (DefIdx == ~0u) - ? NULL : getInstructionFromIndex(DefIdx); - if (DefMI && isReMaterializable(li, VNI, DefMI)) { - // Remember how to remat the def of this val#. - ReMatOrigDefs[VN] = DefMI; - // Original def may be modified so we have to make a copy here. vrm must - // delete these! - ReMatDefs[VN] = DefMI = DefMI->clone(); - vrm.setVirtIsReMaterialized(reg, DefMI); - - bool CanDelete = true; - for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) { - unsigned KillIdx = VNI->kills[j]; - MachineInstr *KillMI = (KillIdx & 1) - ? NULL : getInstructionFromIndex(KillIdx); - // Kill is a phi node, not all of its uses can be rematerialized. - // It must not be deleted. - if (!KillMI) { - CanDelete = false; - // Need a stack slot if there is any live range where uses cannot be - // rematerialized. - NeedStackSlot = true; - break; - } - } - - if (CanDelete) - ReMatDelete.set(VN); - } else { - // Need a stack slot if there is any live range where uses cannot be - // rematerialized. - NeedStackSlot = true; - } - } - - // One stack slot per live interval. - if (NeedStackSlot) - slot = vrm.assignVirt2StackSlot(reg); - +/// conflictsWithPhysRegDef - Returns true if the specified register +/// is defined during the duration of the specified interval. +bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li, + VirtRegMap &vrm, unsigned reg) { for (LiveInterval::Ranges::const_iterator I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { - MachineInstr *DefMI = ReMatDefs[I->valno->id]; - MachineInstr *OrigDefMI = ReMatOrigDefs[I->valno->id]; - bool DefIsReMat = DefMI != NULL; - bool CanDelete = ReMatDelete[I->valno->id]; - int LdSlot = 0; - bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(DefMI, LdSlot); - bool isLoad = isLoadSS || - (DefIsReMat && (DefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG)); - unsigned index = getBaseIndex(I->start); - unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM; - for (; index != end; index += InstrSlots::NUM) { + for (unsigned index = getBaseIndex(I->start), + end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end; + index += InstrSlots::NUM) { // skip deleted instructions while (index != end && !getInstructionFromIndex(index)) index += InstrSlots::NUM; if (index == end) break; MachineInstr *MI = getInstructionFromIndex(index); - - RestartInstruction: + unsigned SrcReg, DstReg; + if (tii_->isMoveInstr(*MI, SrcReg, DstReg)) + if (SrcReg == li.reg || DstReg == li.reg) + continue; for (unsigned i = 0; i != MI->getNumOperands(); ++i) { MachineOperand& mop = MI->getOperand(i); if (!mop.isRegister()) continue; - unsigned Reg = mop.getReg(); - if (Reg == 0 || MRegisterInfo::isPhysicalRegister(Reg)) + unsigned PhysReg = mop.getReg(); + if (PhysReg == 0 || PhysReg == li.reg) continue; - bool isSubReg = RegMap->isSubRegister(Reg); - unsigned SubIdx = 0; - if (isSubReg) { - SubIdx = RegMap->getSubRegisterIndex(Reg); - Reg = RegMap->getSuperRegister(Reg); - } - if (Reg != li.reg) - continue; - - bool TryFold = !DefIsReMat; - bool FoldSS = true; - int FoldSlot = slot; - if (DefIsReMat) { - // If this is the rematerializable definition MI itself and - // all of its uses are rematerialized, simply delete it. - if (MI == OrigDefMI && CanDelete) { - RemoveMachineInstrFromMaps(MI); - MI->eraseFromParent(); - break; - } - - // If def for this use can't be rematerialized, then try folding. - TryFold = !OrigDefMI || (OrigDefMI && (MI == OrigDefMI || isLoad)); - if (isLoad) { - // Try fold loads (from stack slot, constant pool, etc.) into uses. - FoldSS = isLoadSS; - FoldSlot = LdSlot; - } - } - - // FIXME: fold subreg use - if (!isSubReg && TryFold && - tryFoldMemoryOperand(MI, vrm, DefMI, index, i, FoldSS, FoldSlot, Reg)) - // Folding the load/store can completely change the instruction in - // unpredictable ways, rescan it from the beginning. - goto RestartInstruction; - - // Create a new virtual register for the spill interval. - unsigned NewVReg = RegMap->createVirtualRegister(rc); - if (isSubReg) - RegMap->setIsSubRegister(NewVReg, NewVReg, SubIdx); - - // Scan all of the operands of this instruction rewriting operands - // to use NewVReg instead of li.reg as appropriate. We do this for - // two reasons: - // - // 1. If the instr reads the same spilled vreg multiple times, we - // want to reuse the NewVReg. - // 2. If the instr is a two-addr instruction, we are required to - // keep the src/dst regs pinned. - // - // Keep track of whether we replace a use and/or def so that we can - // create the spill interval with the appropriate range. - mop.setReg(NewVReg); - - bool HasUse = mop.isUse(); - bool HasDef = mop.isDef(); - for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) { - if (MI->getOperand(j).isRegister() && - MI->getOperand(j).getReg() == li.reg) { - MI->getOperand(j).setReg(NewVReg); - HasUse |= MI->getOperand(j).isUse(); - HasDef |= MI->getOperand(j).isDef(); - } - } - - vrm.grow(); - if (DefIsReMat) { - vrm.setVirtIsReMaterialized(NewVReg, DefMI/*, CanDelete*/); - if (ReMatIds[I->valno->id] == VirtRegMap::MAX_STACK_SLOT) { - // Each valnum may have its own remat id. - ReMatIds[I->valno->id] = vrm.assignVirtReMatId(NewVReg); - } else { - vrm.assignVirtReMatId(NewVReg, ReMatIds[I->valno->id]); - } - if (!CanDelete || (HasUse && HasDef)) { - // If this is a two-addr instruction then its use operands are - // rematerializable but its def is not. It should be assigned a - // stack slot. - vrm.assignVirt2StackSlot(NewVReg, slot); - } - } else { - vrm.assignVirt2StackSlot(NewVReg, slot); + if (MRegisterInfo::isVirtualRegister(PhysReg)) { + if (!vrm.hasPhys(PhysReg)) + continue; + PhysReg = vrm.getPhys(PhysReg); } - - // create a new register interval for this spill / remat. - LiveInterval &nI = getOrCreateInterval(NewVReg); - assert(nI.empty()); - - // the spill weight is now infinity as it - // cannot be spilled again - nI.weight = HUGE_VALF; - - if (HasUse) { - LiveRange LR(getLoadIndex(index), getUseIndex(index)+1, - nI.getNextValue(~0U, 0, VNInfoAllocator)); - DOUT << " +" << LR; - nI.addRange(LR); - } - if (HasDef) { - LiveRange LR(getDefIndex(index), getStoreIndex(index), - nI.getNextValue(~0U, 0, VNInfoAllocator)); - DOUT << " +" << LR; - nI.addRange(LR); - } - - added.push_back(&nI); - - // update live variables if it is available - if (lv_) - lv_->addVirtualRegisterKilled(NewVReg, MI); - - DOUT << "\t\t\t\tadded new interval: "; - nI.print(DOUT, mri_); - DOUT << '\n'; + if (PhysReg && mri_->regsOverlap(PhysReg, reg)) + return true; } } } - return added; + return false; } void LiveIntervals::printRegName(unsigned reg) const { @@ -604,7 +364,8 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, DOUT << " Removing [" << Start << "," << End << "] from: "; interval.print(DOUT, mri_); DOUT << "\n"; interval.removeRange(Start, End); - interval.addKill(VNI, Start+1); // odd # means phi node + interval.addKill(VNI, Start); + VNI->hasPHIKill = true; DOUT << " RESULT: "; interval.print(DOUT, mri_); // Replace the interval with one of a NEW value number. Note that this @@ -634,7 +395,8 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM; LiveRange LR(defIndex, killIndex, ValNo); interval.addRange(LR); - interval.addKill(ValNo, killIndex-1); // odd # means phi node + interval.addKill(ValNo, killIndex); + ValNo->hasPHIKill = true; DOUT << " +" << LR; } } @@ -838,3 +600,866 @@ LiveInterval LiveIntervals::createInterval(unsigned reg) { HUGE_VALF : 0.0F; return LiveInterval(reg, Weight); } + + +//===----------------------------------------------------------------------===// +// Register allocator hooks. +// + +/// isReMaterializable - Returns true if the definition MI of the specified +/// val# of the specified interval is re-materializable. +bool LiveIntervals::isReMaterializable(const LiveInterval &li, + const VNInfo *ValNo, MachineInstr *MI, + bool &isLoad) { + if (DisableReMat) + return false; + + isLoad = false; + const TargetInstrDescriptor *TID = MI->getDesc(); + if ((TID->Flags & M_IMPLICIT_DEF_FLAG) || + tii_->isTriviallyReMaterializable(MI)) { + isLoad = TID->isSimpleLoad(); + return true; + } + + int FrameIdx = 0; + if (!tii_->isLoadFromStackSlot(MI, FrameIdx) || + !mf_->getFrameInfo()->isFixedObjectIndex(FrameIdx)) + return false; + + // This is a load from fixed stack slot. It can be rematerialized unless it's + // re-defined by a two-address instruction. + isLoad = true; + for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); + i != e; ++i) { + const VNInfo *VNI = *i; + if (VNI == ValNo) + continue; + unsigned DefIdx = VNI->def; + if (DefIdx == ~1U) + continue; // Dead val#. + MachineInstr *DefMI = (DefIdx == ~0u) + ? NULL : getInstructionFromIndex(DefIdx); + if (DefMI && DefMI->isRegReDefinedByTwoAddr(li.reg)) { + isLoad = false; + return false; + } + } + return true; +} + +/// isReMaterializable - Returns true if every definition of MI of every +/// val# of the specified interval is re-materializable. +bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) { + isLoad = false; + for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); + i != e; ++i) { + const VNInfo *VNI = *i; + unsigned DefIdx = VNI->def; + if (DefIdx == ~1U) + continue; // Dead val#. + // Is the def for the val# rematerializable? + if (DefIdx == ~0u) + return false; + MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx); + bool DefIsLoad = false; + if (!ReMatDefMI || !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad)) + return false; + isLoad |= DefIsLoad; + } + return true; +} + +/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from +/// slot / to reg or any rematerialized load into ith operand of specified +/// MI. If it is successul, MI is updated with the newly created MI and +/// returns true. +bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI, + VirtRegMap &vrm, MachineInstr *DefMI, + unsigned InstrIdx, + SmallVector &Ops, + bool isSS, int Slot, unsigned Reg) { + unsigned MRInfo = 0; + const TargetInstrDescriptor *TID = MI->getDesc(); + // If it is an implicit def instruction, just delete it. + if (TID->Flags & M_IMPLICIT_DEF_FLAG) { + RemoveMachineInstrFromMaps(MI); + vrm.RemoveMachineInstrFromMaps(MI); + MI->eraseFromParent(); + ++numFolds; + return true; + } + + SmallVector FoldOps; + for (unsigned i = 0, e = Ops.size(); i != e; ++i) { + unsigned OpIdx = Ops[i]; + // FIXME: fold subreg use. + if (MI->getOperand(OpIdx).getSubReg()) + return false; + if (MI->getOperand(OpIdx).isDef()) + MRInfo |= (unsigned)VirtRegMap::isMod; + else { + // Filter out two-address use operand(s). + if (TID->getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) { + MRInfo = VirtRegMap::isModRef; + continue; + } + MRInfo |= (unsigned)VirtRegMap::isRef; + } + FoldOps.push_back(OpIdx); + } + + MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot) + : tii_->foldMemoryOperand(MI, FoldOps, DefMI); + if (fmi) { + // Attempt to fold the memory reference into the instruction. If + // we can do this, we don't need to insert spill code. + if (lv_) + lv_->instructionChanged(MI, fmi); + else + LiveVariables::transferKillDeadInfo(MI, fmi, mri_); + MachineBasicBlock &MBB = *MI->getParent(); + if (isSS && !mf_->getFrameInfo()->isFixedObjectIndex(Slot)) + vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo); + vrm.transferSpillPts(MI, fmi); + vrm.transferRestorePts(MI, fmi); + mi2iMap_.erase(MI); + i2miMap_[InstrIdx /InstrSlots::NUM] = fmi; + mi2iMap_[fmi] = InstrIdx; + MI = MBB.insert(MBB.erase(MI), fmi); + ++numFolds; + return true; + } + return false; +} + +/// canFoldMemoryOperand - Returns true if the specified load / store +/// folding is possible. +bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI, + SmallVector &Ops) const { + SmallVector FoldOps; + for (unsigned i = 0, e = Ops.size(); i != e; ++i) { + unsigned OpIdx = Ops[i]; + // FIXME: fold subreg use. + if (MI->getOperand(OpIdx).getSubReg()) + return false; + FoldOps.push_back(OpIdx); + } + + return tii_->canFoldMemoryOperand(MI, FoldOps); +} + +bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const { + SmallPtrSet MBBs; + for (LiveInterval::Ranges::const_iterator + I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { + std::vector::const_iterator II = + std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start); + if (II == Idx2MBBMap.end()) + continue; + if (I->end > II->first) // crossing a MBB. + return false; + MBBs.insert(II->second); + if (MBBs.size() > 1) + return false; + } + return true; +} + +/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions +/// for addIntervalsForSpills to rewrite uses / defs for the given live range. +bool LiveIntervals:: +rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit, + unsigned id, unsigned index, unsigned end, MachineInstr *MI, + MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, + unsigned Slot, int LdSlot, + bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, + VirtRegMap &vrm, MachineRegisterInfo &RegInfo, + const TargetRegisterClass* rc, + SmallVector &ReMatIds, + unsigned &NewVReg, bool &HasDef, bool &HasUse, + const MachineLoopInfo *loopInfo, + std::map &MBBVRegsMap, + std::vector &NewLIs) { + bool CanFold = false; + RestartInstruction: + for (unsigned i = 0; i != MI->getNumOperands(); ++i) { + MachineOperand& mop = MI->getOperand(i); + if (!mop.isRegister()) + continue; + unsigned Reg = mop.getReg(); + unsigned RegI = Reg; + if (Reg == 0 || MRegisterInfo::isPhysicalRegister(Reg)) + continue; + if (Reg != li.reg) + continue; + + bool TryFold = !DefIsReMat; + bool FoldSS = true; // Default behavior unless it's a remat. + int FoldSlot = Slot; + if (DefIsReMat) { + // If this is the rematerializable definition MI itself and + // all of its uses are rematerialized, simply delete it. + if (MI == ReMatOrigDefMI && CanDelete) { + DOUT << "\t\t\t\tErasing re-materlizable def: "; + DOUT << MI << '\n'; + RemoveMachineInstrFromMaps(MI); + vrm.RemoveMachineInstrFromMaps(MI); + MI->eraseFromParent(); + break; + } + + // If def for this use can't be rematerialized, then try folding. + // If def is rematerializable and it's a load, also try folding. + TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad)); + if (isLoad) { + // Try fold loads (from stack slot, constant pool, etc.) into uses. + FoldSS = isLoadSS; + FoldSlot = LdSlot; + } + } + + // Scan all of the operands of this instruction rewriting operands + // to use NewVReg instead of li.reg as appropriate. We do this for + // two reasons: + // + // 1. If the instr reads the same spilled vreg multiple times, we + // want to reuse the NewVReg. + // 2. If the instr is a two-addr instruction, we are required to + // keep the src/dst regs pinned. + // + // Keep track of whether we replace a use and/or def so that we can + // create the spill interval with the appropriate range. + + HasUse = mop.isUse(); + HasDef = mop.isDef(); + SmallVector Ops; + Ops.push_back(i); + for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) { + const MachineOperand &MOj = MI->getOperand(j); + if (!MOj.isRegister()) + continue; + unsigned RegJ = MOj.getReg(); + if (RegJ == 0 || MRegisterInfo::isPhysicalRegister(RegJ)) + continue; + if (RegJ == RegI) { + Ops.push_back(j); + HasUse |= MOj.isUse(); + HasDef |= MOj.isDef(); + } + } + + if (TryFold) { + // Do not fold load / store here if we are splitting. We'll find an + // optimal point to insert a load / store later. + if (!TrySplit) { + if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, + Ops, FoldSS, FoldSlot, Reg)) { + // Folding the load/store can completely change the instruction in + // unpredictable ways, rescan it from the beginning. + HasUse = false; + HasDef = false; + CanFold = false; + goto RestartInstruction; + } + } else { + CanFold = canFoldMemoryOperand(MI, Ops); + } + } else + CanFold = false; + + // Create a new virtual register for the spill interval. + bool CreatedNewVReg = false; + if (NewVReg == 0) { + NewVReg = RegInfo.createVirtualRegister(rc); + vrm.grow(); + CreatedNewVReg = true; + } + mop.setReg(NewVReg); + + // Reuse NewVReg for other reads. + for (unsigned j = 0, e = Ops.size(); j != e; ++j) + MI->getOperand(Ops[j]).setReg(NewVReg); + + if (CreatedNewVReg) { + if (DefIsReMat) { + vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/); + if (ReMatIds[id] == VirtRegMap::MAX_STACK_SLOT) { + // Each valnum may have its own remat id. + ReMatIds[id] = vrm.assignVirtReMatId(NewVReg); + } else { + vrm.assignVirtReMatId(NewVReg, ReMatIds[id]); + } + if (!CanDelete || (HasUse && HasDef)) { + // If this is a two-addr instruction then its use operands are + // rematerializable but its def is not. It should be assigned a + // stack slot. + vrm.assignVirt2StackSlot(NewVReg, Slot); + } + } else { + vrm.assignVirt2StackSlot(NewVReg, Slot); + } + } else if (HasUse && HasDef && + vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) { + // If this interval hasn't been assigned a stack slot (because earlier + // def is a deleted remat def), do it now. + assert(Slot != VirtRegMap::NO_STACK_SLOT); + vrm.assignVirt2StackSlot(NewVReg, Slot); + } + + // create a new register interval for this spill / remat. + LiveInterval &nI = getOrCreateInterval(NewVReg); + if (CreatedNewVReg) { + NewLIs.push_back(&nI); + MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg)); + if (TrySplit) + vrm.setIsSplitFromReg(NewVReg, li.reg); + } + + if (HasUse) { + if (CreatedNewVReg) { + LiveRange LR(getLoadIndex(index), getUseIndex(index)+1, + nI.getNextValue(~0U, 0, VNInfoAllocator)); + DOUT << " +" << LR; + nI.addRange(LR); + } else { + // Extend the split live interval to this def / use. + unsigned End = getUseIndex(index)+1; + LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End, + nI.getValNumInfo(nI.getNumValNums()-1)); + DOUT << " +" << LR; + nI.addRange(LR); + } + } + if (HasDef) { + LiveRange LR(getDefIndex(index), getStoreIndex(index), + nI.getNextValue(~0U, 0, VNInfoAllocator)); + DOUT << " +" << LR; + nI.addRange(LR); + } + + DOUT << "\t\t\t\tAdded new interval: "; + nI.print(DOUT, mri_); + DOUT << '\n'; + } + return CanFold; +} +bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li, + const VNInfo *VNI, + MachineBasicBlock *MBB, unsigned Idx) const { + unsigned End = getMBBEndIdx(MBB); + for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) { + unsigned KillIdx = VNI->kills[j]; + if (KillIdx > Idx && KillIdx < End) + return true; + } + return false; +} + +static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) { + const VNInfo *VNI = NULL; + for (LiveInterval::const_vni_iterator i = li.vni_begin(), + e = li.vni_end(); i != e; ++i) + if ((*i)->def == DefIdx) { + VNI = *i; + break; + } + return VNI; +} + +void LiveIntervals:: +rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit, + LiveInterval::Ranges::const_iterator &I, + MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, + unsigned Slot, int LdSlot, + bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, + VirtRegMap &vrm, MachineRegisterInfo &RegInfo, + const TargetRegisterClass* rc, + SmallVector &ReMatIds, + const MachineLoopInfo *loopInfo, + BitVector &SpillMBBs, + std::map > &SpillIdxes, + BitVector &RestoreMBBs, + std::map > &RestoreIdxes, + std::map &MBBVRegsMap, + std::vector &NewLIs) { + bool AllCanFold = true; + unsigned NewVReg = 0; + unsigned index = getBaseIndex(I->start); + unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM; + for (; index != end; index += InstrSlots::NUM) { + // skip deleted instructions + while (index != end && !getInstructionFromIndex(index)) + index += InstrSlots::NUM; + if (index == end) break; + + MachineInstr *MI = getInstructionFromIndex(index); + MachineBasicBlock *MBB = MI->getParent(); + unsigned ThisVReg = 0; + if (TrySplit) { + std::map::const_iterator NVI = + MBBVRegsMap.find(MBB->getNumber()); + if (NVI != MBBVRegsMap.end()) { + ThisVReg = NVI->second; + // One common case: + // x = use + // ... + // ... + // def = ... + // = use + // It's better to start a new interval to avoid artifically + // extend the new interval. + // FIXME: Too slow? Can we fix it after rewriteInstructionsForSpills? + bool MIHasUse = false; + bool MIHasDef = false; + for (unsigned i = 0; i != MI->getNumOperands(); ++i) { + MachineOperand& mop = MI->getOperand(i); + if (!mop.isRegister() || mop.getReg() != li.reg) + continue; + if (mop.isUse()) + MIHasUse = true; + else + MIHasDef = true; + } + if (MIHasDef && !MIHasUse) { + MBBVRegsMap.erase(MBB->getNumber()); + ThisVReg = 0; + } + } + } + + bool IsNew = ThisVReg == 0; + if (IsNew) { + // This ends the previous live interval. If all of its def / use + // can be folded, give it a low spill weight. + if (NewVReg && TrySplit && AllCanFold) { + LiveInterval &nI = getOrCreateInterval(NewVReg); + nI.weight /= 10.0F; + } + AllCanFold = true; + } + NewVReg = ThisVReg; + + bool HasDef = false; + bool HasUse = false; + bool CanFold = rewriteInstructionForSpills(li, TrySplit, I->valno->id, + index, end, MI, ReMatOrigDefMI, ReMatDefMI, + Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, + CanDelete, vrm, RegInfo, rc, ReMatIds, NewVReg, + HasDef, HasUse, loopInfo, MBBVRegsMap, NewLIs); + if (!HasDef && !HasUse) + continue; + + AllCanFold &= CanFold; + + // Update weight of spill interval. + LiveInterval &nI = getOrCreateInterval(NewVReg); + if (!TrySplit) { + // The spill weight is now infinity as it cannot be spilled again. + nI.weight = HUGE_VALF; + continue; + } + + // Keep track of the last def and first use in each MBB. + unsigned MBBId = MBB->getNumber(); + if (HasDef) { + if (MI != ReMatOrigDefMI || !CanDelete) { + bool HasKill = false; + if (!HasUse) + HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index)); + else { + // If this is a two-address code, then this index starts a new VNInfo. + const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index)); + if (VNI) + HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index)); + } + std::map >::iterator SII = + SpillIdxes.find(MBBId); + if (!HasKill) { + if (SII == SpillIdxes.end()) { + std::vector S; + S.push_back(SRInfo(index, NewVReg, true)); + SpillIdxes.insert(std::make_pair(MBBId, S)); + } else if (SII->second.back().vreg != NewVReg) { + SII->second.push_back(SRInfo(index, NewVReg, true)); + } else if ((int)index > SII->second.back().index) { + // If there is an earlier def and this is a two-address + // instruction, then it's not possible to fold the store (which + // would also fold the load). + SRInfo &Info = SII->second.back(); + Info.index = index; + Info.canFold = !HasUse; + } + SpillMBBs.set(MBBId); + } else if (SII != SpillIdxes.end() && + SII->second.back().vreg == NewVReg && + (int)index > SII->second.back().index) { + // There is an earlier def that's not killed (must be two-address). + // The spill is no longer needed. + SII->second.pop_back(); + if (SII->second.empty()) { + SpillIdxes.erase(MBBId); + SpillMBBs.reset(MBBId); + } + } + } + } + + if (HasUse) { + std::map >::iterator SII = + SpillIdxes.find(MBBId); + if (SII != SpillIdxes.end() && + SII->second.back().vreg == NewVReg && + (int)index > SII->second.back().index) + // Use(s) following the last def, it's not safe to fold the spill. + SII->second.back().canFold = false; + std::map >::iterator RII = + RestoreIdxes.find(MBBId); + if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg) + // If we are splitting live intervals, only fold if it's the first + // use and there isn't another use later in the MBB. + RII->second.back().canFold = false; + else if (IsNew) { + // Only need a reload if there isn't an earlier def / use. + if (RII == RestoreIdxes.end()) { + std::vector Infos; + Infos.push_back(SRInfo(index, NewVReg, true)); + RestoreIdxes.insert(std::make_pair(MBBId, Infos)); + } else { + RII->second.push_back(SRInfo(index, NewVReg, true)); + } + RestoreMBBs.set(MBBId); + } + } + + // Update spill weight. + unsigned loopDepth = loopInfo->getLoopDepth(MBB); + nI.weight += getSpillWeight(HasDef, HasUse, loopDepth); + } + + if (NewVReg && TrySplit && AllCanFold) { + // If all of its def / use can be folded, give it a low spill weight. + LiveInterval &nI = getOrCreateInterval(NewVReg); + nI.weight /= 10.0F; + } +} + +bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr, + BitVector &RestoreMBBs, + std::map > &RestoreIdxes) { + if (!RestoreMBBs[Id]) + return false; + std::vector &Restores = RestoreIdxes[Id]; + for (unsigned i = 0, e = Restores.size(); i != e; ++i) + if (Restores[i].index == index && + Restores[i].vreg == vr && + Restores[i].canFold) + return true; + return false; +} + +void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr, + BitVector &RestoreMBBs, + std::map > &RestoreIdxes) { + if (!RestoreMBBs[Id]) + return; + std::vector &Restores = RestoreIdxes[Id]; + for (unsigned i = 0, e = Restores.size(); i != e; ++i) + if (Restores[i].index == index && Restores[i].vreg) + Restores[i].index = -1; +} + + +std::vector LiveIntervals:: +addIntervalsForSpills(const LiveInterval &li, + const MachineLoopInfo *loopInfo, VirtRegMap &vrm) { + // Since this is called after the analysis is done we don't know if + // LiveVariables is available + lv_ = getAnalysisToUpdate(); + + assert(li.weight != HUGE_VALF && + "attempt to spill already spilled interval!"); + + DOUT << "\t\t\t\tadding intervals for spills for interval: "; + li.print(DOUT, mri_); + DOUT << '\n'; + + // Each bit specify whether it a spill is required in the MBB. + BitVector SpillMBBs(mf_->getNumBlockIDs()); + std::map > SpillIdxes; + BitVector RestoreMBBs(mf_->getNumBlockIDs()); + std::map > RestoreIdxes; + std::map MBBVRegsMap; + std::vector NewLIs; + MachineRegisterInfo &RegInfo = mf_->getRegInfo(); + const TargetRegisterClass* rc = RegInfo.getRegClass(li.reg); + + unsigned NumValNums = li.getNumValNums(); + SmallVector ReMatDefs; + ReMatDefs.resize(NumValNums, NULL); + SmallVector ReMatOrigDefs; + ReMatOrigDefs.resize(NumValNums, NULL); + SmallVector ReMatIds; + ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT); + BitVector ReMatDelete(NumValNums); + unsigned Slot = VirtRegMap::MAX_STACK_SLOT; + + // Spilling a split live interval. It cannot be split any further. Also, + // it's also guaranteed to be a single val# / range interval. + if (vrm.getPreSplitReg(li.reg)) { + vrm.setIsSplitFromReg(li.reg, 0); + // Unset the split kill marker on the last use. + unsigned KillIdx = vrm.getKillPoint(li.reg); + if (KillIdx) { + MachineInstr *KillMI = getInstructionFromIndex(KillIdx); + assert(KillMI && "Last use disappeared?"); + int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true); + assert(KillOp != -1 && "Last use disappeared?"); + KillMI->getOperand(KillOp).setIsKill(false); + } + vrm.removeKillPoint(li.reg); + bool DefIsReMat = vrm.isReMaterialized(li.reg); + Slot = vrm.getStackSlot(li.reg); + assert(Slot != VirtRegMap::MAX_STACK_SLOT); + MachineInstr *ReMatDefMI = DefIsReMat ? + vrm.getReMaterializedMI(li.reg) : NULL; + int LdSlot = 0; + bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); + bool isLoad = isLoadSS || + (DefIsReMat && (ReMatDefMI->getDesc()->isSimpleLoad())); + bool IsFirstRange = true; + for (LiveInterval::Ranges::const_iterator + I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { + // If this is a split live interval with multiple ranges, it means there + // are two-address instructions that re-defined the value. Only the + // first def can be rematerialized! + if (IsFirstRange) { + // Note ReMatOrigDefMI has already been deleted. + rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI, + Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, + false, vrm, RegInfo, rc, ReMatIds, loopInfo, + SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, + MBBVRegsMap, NewLIs); + } else { + rewriteInstructionsForSpills(li, false, I, NULL, 0, + Slot, 0, false, false, false, + false, vrm, RegInfo, rc, ReMatIds, loopInfo, + SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, + MBBVRegsMap, NewLIs); + } + IsFirstRange = false; + } + return NewLIs; + } + + bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li); + if (SplitLimit != -1 && (int)numSplits >= SplitLimit) + TrySplit = false; + if (TrySplit) + ++numSplits; + bool NeedStackSlot = false; + for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); + i != e; ++i) { + const VNInfo *VNI = *i; + unsigned VN = VNI->id; + unsigned DefIdx = VNI->def; + if (DefIdx == ~1U) + continue; // Dead val#. + // Is the def for the val# rematerializable? + MachineInstr *ReMatDefMI = (DefIdx == ~0u) + ? 0 : getInstructionFromIndex(DefIdx); + bool dummy; + if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) { + // Remember how to remat the def of this val#. + ReMatOrigDefs[VN] = ReMatDefMI; + // Original def may be modified so we have to make a copy here. vrm must + // delete these! + ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone(); + + bool CanDelete = true; + if (VNI->hasPHIKill) { + // A kill is a phi node, not all of its uses can be rematerialized. + // It must not be deleted. + CanDelete = false; + // Need a stack slot if there is any live range where uses cannot be + // rematerialized. + NeedStackSlot = true; + } + if (CanDelete) + ReMatDelete.set(VN); + } else { + // Need a stack slot if there is any live range where uses cannot be + // rematerialized. + NeedStackSlot = true; + } + } + + // One stack slot per live interval. + if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) + Slot = vrm.assignVirt2StackSlot(li.reg); + + // Create new intervals and rewrite defs and uses. + for (LiveInterval::Ranges::const_iterator + I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { + MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id]; + MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id]; + bool DefIsReMat = ReMatDefMI != NULL; + bool CanDelete = ReMatDelete[I->valno->id]; + int LdSlot = 0; + bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); + bool isLoad = isLoadSS || + (DefIsReMat && ReMatDefMI->getDesc()->isSimpleLoad()); + rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI, + Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, + CanDelete, vrm, RegInfo, rc, ReMatIds, loopInfo, + SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, + MBBVRegsMap, NewLIs); + } + + // Insert spills / restores if we are splitting. + if (!TrySplit) + return NewLIs; + + SmallPtrSet AddedKill; + SmallVector Ops; + if (NeedStackSlot) { + int Id = SpillMBBs.find_first(); + while (Id != -1) { + std::vector &spills = SpillIdxes[Id]; + for (unsigned i = 0, e = spills.size(); i != e; ++i) { + int index = spills[i].index; + unsigned VReg = spills[i].vreg; + LiveInterval &nI = getOrCreateInterval(VReg); + bool isReMat = vrm.isReMaterialized(VReg); + MachineInstr *MI = getInstructionFromIndex(index); + bool CanFold = false; + bool FoundUse = false; + Ops.clear(); + if (spills[i].canFold) { + CanFold = true; + for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { + MachineOperand &MO = MI->getOperand(j); + if (!MO.isRegister() || MO.getReg() != VReg) + continue; + + Ops.push_back(j); + if (MO.isDef()) + continue; + if (isReMat || + (!FoundUse && !alsoFoldARestore(Id, index, VReg, + RestoreMBBs, RestoreIdxes))) { + // MI has two-address uses of the same register. If the use + // isn't the first and only use in the BB, then we can't fold + // it. FIXME: Move this to rewriteInstructionsForSpills. + CanFold = false; + break; + } + FoundUse = true; + } + } + // Fold the store into the def if possible. + bool Folded = false; + if (CanFold && !Ops.empty()) { + if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){ + Folded = true; + if (FoundUse > 0) { + // Also folded uses, do not issue a load. + eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes); + nI.removeRange(getLoadIndex(index), getUseIndex(index)+1); + } + nI.removeRange(getDefIndex(index), getStoreIndex(index)); + } + } + + // Else tell the spiller to issue a spill. + if (!Folded) { + LiveRange *LR = &nI.ranges[nI.ranges.size()-1]; + bool isKill = LR->end == getStoreIndex(index); + vrm.addSpillPoint(VReg, isKill, MI); + if (isKill) + AddedKill.insert(&nI); + } + } + Id = SpillMBBs.find_next(Id); + } + } + + int Id = RestoreMBBs.find_first(); + while (Id != -1) { + std::vector &restores = RestoreIdxes[Id]; + for (unsigned i = 0, e = restores.size(); i != e; ++i) { + int index = restores[i].index; + if (index == -1) + continue; + unsigned VReg = restores[i].vreg; + LiveInterval &nI = getOrCreateInterval(VReg); + MachineInstr *MI = getInstructionFromIndex(index); + bool CanFold = false; + Ops.clear(); + if (restores[i].canFold) { + CanFold = true; + for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { + MachineOperand &MO = MI->getOperand(j); + if (!MO.isRegister() || MO.getReg() != VReg) + continue; + + if (MO.isDef()) { + // If this restore were to be folded, it would have been folded + // already. + CanFold = false; + break; + } + Ops.push_back(j); + } + } + + // Fold the load into the use if possible. + bool Folded = false; + if (CanFold && !Ops.empty()) { + if (!vrm.isReMaterialized(VReg)) + Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg); + else { + MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg); + int LdSlot = 0; + bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); + // If the rematerializable def is a load, also try to fold it. + if (isLoadSS || ReMatDefMI->getDesc()->isSimpleLoad()) + Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, + Ops, isLoadSS, LdSlot, VReg); + } + } + // If folding is not possible / failed, then tell the spiller to issue a + // load / rematerialization for us. + if (Folded) + nI.removeRange(getLoadIndex(index), getUseIndex(index)+1); + else + vrm.addRestorePoint(VReg, MI); + } + Id = RestoreMBBs.find_next(Id); + } + + // Finalize intervals: add kills, finalize spill weights, and filter out + // dead intervals. + std::vector RetNewLIs; + for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) { + LiveInterval *LI = NewLIs[i]; + if (!LI->empty()) { + LI->weight /= LI->getSize(); + if (!AddedKill.count(LI)) { + LiveRange *LR = &LI->ranges[LI->ranges.size()-1]; + unsigned LastUseIdx = getBaseIndex(LR->end); + MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx); + int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg); + assert(UseIdx != -1); + if (LastUse->getDesc()->getOperandConstraint(UseIdx, TOI::TIED_TO) == + -1) { + LastUse->getOperand(UseIdx).setIsKill(); + vrm.addKillPoint(LI->reg, LastUseIdx); + } + } + RetNewLIs.push_back(LI); + } + } + + return RetNewLIs; +}