//
// The LLVM Compiler Infrastructure
//
-// This file was developed by the LLVM research group and is distributed under
-// the University of Illinois Open Source License. See LICENSE.TXT for details.
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "VirtRegMap.h"
#include "llvm/Value.h"
-#include "llvm/Analysis/LoopInfo.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/SSARegMap.h"
#include "llvm/Target/MRegisterInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
cl::init(false), cl::Hidden);
cl::opt<bool> SplitAtBB("split-intervals-at-bb",
- cl::init(false), cl::Hidden);
+ cl::init(true), cl::Hidden);
cl::opt<int> SplitLimit("split-limit",
cl::init(-1), cl::Hidden);
}
void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addPreserved<LiveVariables>();
AU.addRequired<LiveVariables>();
+ AU.addPreservedID(MachineLoopInfoID);
+ AU.addPreservedID(MachineDominatorsID);
AU.addPreservedID(PHIEliminationID);
AU.addRequiredID(PHIEliminationID);
AU.addRequiredID(TwoAddressInstructionPassID);
/// isReMaterializable - Returns true if the definition MI of the specified
/// val# of the specified interval is re-materializable.
bool LiveIntervals::isReMaterializable(const LiveInterval &li,
- const VNInfo *ValNo, MachineInstr *MI) {
+ const VNInfo *ValNo, MachineInstr *MI,
+ bool &isLoad) {
if (DisableReMat)
return false;
- if (tii_->isTriviallyReMaterializable(MI))
+ isLoad = false;
+ const TargetInstrDescriptor *TID = MI->getDesc();
+ if ((TID->Flags & M_IMPLICIT_DEF_FLAG) ||
+ tii_->isTriviallyReMaterializable(MI)) {
+ isLoad = TID->isSimpleLoad();
return true;
+ }
int FrameIdx = 0;
if (!tii_->isLoadFromStackSlot(MI, FrameIdx) ||
// This is a load from fixed stack slot. It can be rematerialized unless it's
// re-defined by a two-address instruction.
+ isLoad = true;
for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
i != e; ++i) {
const VNInfo *VNI = *i;
continue; // Dead val#.
MachineInstr *DefMI = (DefIdx == ~0u)
? NULL : getInstructionFromIndex(DefIdx);
- if (DefMI && DefMI->isRegReDefinedByTwoAddr(li.reg))
+ if (DefMI && DefMI->isRegReDefinedByTwoAddr(li.reg)) {
+ isLoad = false;
+ return false;
+ }
+ }
+ return true;
+}
+
+/// isReMaterializable - Returns true if every definition of MI of every
+/// val# of the specified interval is re-materializable.
+bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
+ isLoad = false;
+ for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
+ i != e; ++i) {
+ const VNInfo *VNI = *i;
+ unsigned DefIdx = VNI->def;
+ if (DefIdx == ~1U)
+ continue; // Dead val#.
+ // Is the def for the val# rematerializable?
+ if (DefIdx == ~0u)
return false;
+ MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
+ bool DefIsLoad = false;
+ if (!ReMatDefMI || !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
+ return false;
+ isLoad |= DefIsLoad;
}
return true;
}
/// returns true.
bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
VirtRegMap &vrm, MachineInstr *DefMI,
- unsigned InstrIdx, unsigned OpIdx,
- SmallVector<unsigned, 2> &UseOps,
+ unsigned InstrIdx,
+ SmallVector<unsigned, 2> &Ops,
bool isSS, int Slot, unsigned Reg) {
- // FIXME: fold subreg use
- if (MI->getOperand(OpIdx).getSubReg())
- return false;
-
- MachineInstr *fmi = NULL;
+ unsigned MRInfo = 0;
+ const TargetInstrDescriptor *TID = MI->getDesc();
+ // If it is an implicit def instruction, just delete it.
+ if (TID->Flags & M_IMPLICIT_DEF_FLAG) {
+ RemoveMachineInstrFromMaps(MI);
+ vrm.RemoveMachineInstrFromMaps(MI);
+ MI->eraseFromParent();
+ ++numFolds;
+ return true;
+ }
- if (UseOps.size() < 2)
- fmi = isSS ? mri_->foldMemoryOperand(MI, OpIdx, Slot)
- : mri_->foldMemoryOperand(MI, OpIdx, DefMI);
- else {
- if (OpIdx != UseOps[0])
- // Must be two-address instruction + one more use. Not going to fold.
+ SmallVector<unsigned, 2> FoldOps;
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
+ unsigned OpIdx = Ops[i];
+ // FIXME: fold subreg use.
+ if (MI->getOperand(OpIdx).getSubReg())
return false;
- // It may be possible to fold load when there are multiple uses.
- // e.g. On x86, TEST32rr r, r -> CMP32rm [mem], 0
- fmi = isSS ? mri_->foldMemoryOperand(MI, UseOps, Slot)
- : mri_->foldMemoryOperand(MI, UseOps, DefMI);
+ if (MI->getOperand(OpIdx).isDef())
+ MRInfo |= (unsigned)VirtRegMap::isMod;
+ else {
+ // Filter out two-address use operand(s).
+ if (TID->getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
+ MRInfo = VirtRegMap::isModRef;
+ continue;
+ }
+ MRInfo |= (unsigned)VirtRegMap::isRef;
+ }
+ FoldOps.push_back(OpIdx);
}
+ MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
+ : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
if (fmi) {
// Attempt to fold the memory reference into the instruction. If
// we can do this, we don't need to insert spill code.
LiveVariables::transferKillDeadInfo(MI, fmi, mri_);
MachineBasicBlock &MBB = *MI->getParent();
if (isSS && !mf_->getFrameInfo()->isFixedObjectIndex(Slot))
- vrm.virtFolded(Reg, MI, OpIdx, fmi);
+ vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
vrm.transferSpillPts(MI, fmi);
vrm.transferRestorePts(MI, fmi);
mi2iMap_.erase(MI);
return false;
}
+/// canFoldMemoryOperand - Returns true if the specified load / store
+/// folding is possible.
+bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
+ SmallVector<unsigned, 2> &Ops) const {
+ SmallVector<unsigned, 2> FoldOps;
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
+ unsigned OpIdx = Ops[i];
+ // FIXME: fold subreg use.
+ if (MI->getOperand(OpIdx).getSubReg())
+ return false;
+ FoldOps.push_back(OpIdx);
+ }
+
+ return tii_->canFoldMemoryOperand(MI, FoldOps);
+}
+
bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
SmallPtrSet<MachineBasicBlock*, 4> MBBs;
for (LiveInterval::Ranges::const_iterator
/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
-void LiveIntervals::
+bool LiveIntervals::
rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
unsigned id, unsigned index, unsigned end, MachineInstr *MI,
MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
unsigned Slot, int LdSlot,
bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
- VirtRegMap &vrm, SSARegMap *RegMap,
+ VirtRegMap &vrm, MachineRegisterInfo &RegInfo,
const TargetRegisterClass* rc,
SmallVector<int, 4> &ReMatIds,
unsigned &NewVReg, bool &HasDef, bool &HasUse,
- const LoopInfo *loopInfo,
+ const MachineLoopInfo *loopInfo,
std::map<unsigned,unsigned> &MBBVRegsMap,
std::vector<LiveInterval*> &NewLIs) {
+ bool CanFold = false;
RestartInstruction:
for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
MachineOperand& mop = MI->getOperand(i);
}
}
- // Do not fold load / store here if we are splitting. We'll find an
- // optimal point to insert a load / store later.
- if (TryFold)
- TryFold = !TrySplit && NewVReg == 0;
-
// Scan all of the operands of this instruction rewriting operands
// to use NewVReg instead of li.reg as appropriate. We do this for
// two reasons:
HasUse = mop.isUse();
HasDef = mop.isDef();
- SmallVector<unsigned, 2> UseOps;
- if (HasUse)
- UseOps.push_back(i);
- std::vector<unsigned> UpdateOps;
+ SmallVector<unsigned, 2> Ops;
+ Ops.push_back(i);
for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
- if (!MI->getOperand(j).isRegister())
+ const MachineOperand &MOj = MI->getOperand(j);
+ if (!MOj.isRegister())
continue;
- unsigned RegJ = MI->getOperand(j).getReg();
+ unsigned RegJ = MOj.getReg();
if (RegJ == 0 || MRegisterInfo::isPhysicalRegister(RegJ))
continue;
if (RegJ == RegI) {
- UpdateOps.push_back(j);
- if (MI->getOperand(j).isUse())
- UseOps.push_back(j);
- HasUse |= MI->getOperand(j).isUse();
- HasDef |= MI->getOperand(j).isDef();
+ Ops.push_back(j);
+ HasUse |= MOj.isUse();
+ HasDef |= MOj.isDef();
}
}
- if (TryFold &&
- tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, i,
- UseOps, FoldSS, FoldSlot, Reg)) {
- // Folding the load/store can completely change the instruction in
- // unpredictable ways, rescan it from the beginning.
- HasUse = false;
- HasDef = false;
- goto RestartInstruction;
- }
+ if (TryFold) {
+ // Do not fold load / store here if we are splitting. We'll find an
+ // optimal point to insert a load / store later.
+ if (!TrySplit) {
+ if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
+ Ops, FoldSS, FoldSlot, Reg)) {
+ // Folding the load/store can completely change the instruction in
+ // unpredictable ways, rescan it from the beginning.
+ HasUse = false;
+ HasDef = false;
+ CanFold = false;
+ goto RestartInstruction;
+ }
+ } else {
+ CanFold = canFoldMemoryOperand(MI, Ops);
+ }
+ } else
+ CanFold = false;
// Create a new virtual register for the spill interval.
bool CreatedNewVReg = false;
if (NewVReg == 0) {
- NewVReg = RegMap->createVirtualRegister(rc);
+ NewVReg = RegInfo.createVirtualRegister(rc);
vrm.grow();
CreatedNewVReg = true;
}
mop.setReg(NewVReg);
// Reuse NewVReg for other reads.
- for (unsigned j = 0, e = UpdateOps.size(); j != e; ++j)
- MI->getOperand(UpdateOps[j]).setReg(NewVReg);
+ for (unsigned j = 0, e = Ops.size(); j != e; ++j)
+ MI->getOperand(Ops[j]).setReg(NewVReg);
if (CreatedNewVReg) {
if (DefIsReMat) {
nI.print(DOUT, mri_);
DOUT << '\n';
}
+ return CanFold;
}
-
bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
const VNInfo *VNI,
MachineBasicBlock *MBB, unsigned Idx) const {
MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
unsigned Slot, int LdSlot,
bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
- VirtRegMap &vrm, SSARegMap *RegMap,
+ VirtRegMap &vrm, MachineRegisterInfo &RegInfo,
const TargetRegisterClass* rc,
SmallVector<int, 4> &ReMatIds,
- const LoopInfo *loopInfo,
+ const MachineLoopInfo *loopInfo,
BitVector &SpillMBBs,
std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
BitVector &RestoreMBBs,
std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
std::map<unsigned,unsigned> &MBBVRegsMap,
std::vector<LiveInterval*> &NewLIs) {
+ bool AllCanFold = true;
unsigned NewVReg = 0;
unsigned index = getBaseIndex(I->start);
unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
- bool TrySplitMI = TrySplit && vrm.getPreSplitReg(li.reg) == 0;
for (; index != end; index += InstrSlots::NUM) {
// skip deleted instructions
while (index != end && !getInstructionFromIndex(index))
MachineInstr *MI = getInstructionFromIndex(index);
MachineBasicBlock *MBB = MI->getParent();
- NewVReg = 0;
- if (TrySplitMI) {
+ unsigned ThisVReg = 0;
+ if (TrySplit) {
std::map<unsigned,unsigned>::const_iterator NVI =
MBBVRegsMap.find(MBB->getNumber());
if (NVI != MBBVRegsMap.end()) {
- NewVReg = NVI->second;
+ ThisVReg = NVI->second;
// One common case:
// x = use
// ...
}
if (MIHasDef && !MIHasUse) {
MBBVRegsMap.erase(MBB->getNumber());
- NewVReg = 0;
+ ThisVReg = 0;
}
}
}
- bool IsNew = NewVReg == 0;
+
+ bool IsNew = ThisVReg == 0;
+ if (IsNew) {
+ // This ends the previous live interval. If all of its def / use
+ // can be folded, give it a low spill weight.
+ if (NewVReg && TrySplit && AllCanFold) {
+ LiveInterval &nI = getOrCreateInterval(NewVReg);
+ nI.weight /= 10.0F;
+ }
+ AllCanFold = true;
+ }
+ NewVReg = ThisVReg;
+
bool HasDef = false;
bool HasUse = false;
- rewriteInstructionForSpills(li, TrySplitMI, I->valno->id, index, end,
- MI, ReMatOrigDefMI, ReMatDefMI, Slot, LdSlot,
- isLoad, isLoadSS, DefIsReMat, CanDelete, vrm,
- RegMap, rc, ReMatIds, NewVReg, HasDef, HasUse,
- loopInfo, MBBVRegsMap, NewLIs);
+ bool CanFold = rewriteInstructionForSpills(li, TrySplit, I->valno->id,
+ index, end, MI, ReMatOrigDefMI, ReMatDefMI,
+ Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
+ CanDelete, vrm, RegInfo, rc, ReMatIds, NewVReg,
+ HasDef, HasUse, loopInfo, MBBVRegsMap, NewLIs);
if (!HasDef && !HasUse)
continue;
+ AllCanFold &= CanFold;
+
// Update weight of spill interval.
LiveInterval &nI = getOrCreateInterval(NewVReg);
- if (!TrySplitMI) {
+ if (!TrySplit) {
// The spill weight is now infinity as it cannot be spilled again.
nI.weight = HUGE_VALF;
continue;
if (VNI)
HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
}
+ std::map<unsigned, std::vector<SRInfo> >::iterator SII =
+ SpillIdxes.find(MBBId);
if (!HasKill) {
- std::map<unsigned, std::vector<SRInfo> >::iterator SII =
- SpillIdxes.find(MBBId);
if (SII == SpillIdxes.end()) {
std::vector<SRInfo> S;
S.push_back(SRInfo(index, NewVReg, true));
Info.canFold = !HasUse;
}
SpillMBBs.set(MBBId);
+ } else if (SII != SpillIdxes.end() &&
+ SII->second.back().vreg == NewVReg &&
+ (int)index > SII->second.back().index) {
+ // There is an earlier def that's not killed (must be two-address).
+ // The spill is no longer needed.
+ SII->second.pop_back();
+ if (SII->second.empty()) {
+ SpillIdxes.erase(MBBId);
+ SpillMBBs.reset(MBBId);
+ }
}
}
}
}
// Update spill weight.
- unsigned loopDepth = loopInfo->getLoopDepth(MBB->getBasicBlock());
+ unsigned loopDepth = loopInfo->getLoopDepth(MBB);
nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
}
+
+ if (NewVReg && TrySplit && AllCanFold) {
+ // If all of its def / use can be folded, give it a low spill weight.
+ LiveInterval &nI = getOrCreateInterval(NewVReg);
+ nI.weight /= 10.0F;
+ }
}
bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
std::vector<LiveInterval*> LiveIntervals::
addIntervalsForSpills(const LiveInterval &li,
- const LoopInfo *loopInfo, VirtRegMap &vrm) {
+ const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
// Since this is called after the analysis is done we don't know if
// LiveVariables is available
lv_ = getAnalysisToUpdate<LiveVariables>();
std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
std::map<unsigned,unsigned> MBBVRegsMap;
std::vector<LiveInterval*> NewLIs;
- SSARegMap *RegMap = mf_->getSSARegMap();
- const TargetRegisterClass* rc = RegMap->getRegClass(li.reg);
+ MachineRegisterInfo &RegInfo = mf_->getRegInfo();
+ const TargetRegisterClass* rc = RegInfo.getRegClass(li.reg);
unsigned NumValNums = li.getNumValNums();
SmallVector<MachineInstr*, 4> ReMatDefs;
// it's also guaranteed to be a single val# / range interval.
if (vrm.getPreSplitReg(li.reg)) {
vrm.setIsSplitFromReg(li.reg, 0);
+ // Unset the split kill marker on the last use.
+ unsigned KillIdx = vrm.getKillPoint(li.reg);
+ if (KillIdx) {
+ MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
+ assert(KillMI && "Last use disappeared?");
+ int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
+ assert(KillOp != -1 && "Last use disappeared?");
+ KillMI->getOperand(KillOp).setIsKill(false);
+ }
+ vrm.removeKillPoint(li.reg);
bool DefIsReMat = vrm.isReMaterialized(li.reg);
Slot = vrm.getStackSlot(li.reg);
assert(Slot != VirtRegMap::MAX_STACK_SLOT);
int LdSlot = 0;
bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
bool isLoad = isLoadSS ||
- (DefIsReMat && (ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG));
+ (DefIsReMat && (ReMatDefMI->getDesc()->isSimpleLoad()));
bool IsFirstRange = true;
for (LiveInterval::Ranges::const_iterator
I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
// Note ReMatOrigDefMI has already been deleted.
rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
- false, vrm, RegMap, rc, ReMatIds, loopInfo,
+ false, vrm, RegInfo, rc, ReMatIds, loopInfo,
SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
MBBVRegsMap, NewLIs);
} else {
rewriteInstructionsForSpills(li, false, I, NULL, 0,
Slot, 0, false, false, false,
- false, vrm, RegMap, rc, ReMatIds, loopInfo,
+ false, vrm, RegInfo, rc, ReMatIds, loopInfo,
SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
MBBVRegsMap, NewLIs);
}
// Is the def for the val# rematerializable?
MachineInstr *ReMatDefMI = (DefIdx == ~0u)
? 0 : getInstructionFromIndex(DefIdx);
- if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI)) {
+ bool dummy;
+ if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
// Remember how to remat the def of this val#.
ReMatOrigDefs[VN] = ReMatDefMI;
// Original def may be modified so we have to make a copy here. vrm must
// delete these!
ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
- vrm.setVirtIsReMaterialized(li.reg, ReMatDefMI);
bool CanDelete = true;
if (VNI->hasPHIKill) {
int LdSlot = 0;
bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
bool isLoad = isLoadSS ||
- (DefIsReMat && (ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG));
+ (DefIsReMat && ReMatDefMI->getDesc()->isSimpleLoad());
rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
- CanDelete, vrm, RegMap, rc, ReMatIds, loopInfo,
+ CanDelete, vrm, RegInfo, rc, ReMatIds, loopInfo,
SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
MBBVRegsMap, NewLIs);
}
if (!TrySplit)
return NewLIs;
- SmallVector<unsigned, 2> UseOps;
+ SmallPtrSet<LiveInterval*, 4> AddedKill;
+ SmallVector<unsigned, 2> Ops;
if (NeedStackSlot) {
int Id = SpillMBBs.find_first();
while (Id != -1) {
for (unsigned i = 0, e = spills.size(); i != e; ++i) {
int index = spills[i].index;
unsigned VReg = spills[i].vreg;
+ LiveInterval &nI = getOrCreateInterval(VReg);
bool isReMat = vrm.isReMaterialized(VReg);
MachineInstr *MI = getInstructionFromIndex(index);
- int OpIdx = -1;
- UseOps.clear();
+ bool CanFold = false;
+ bool FoundUse = false;
+ Ops.clear();
if (spills[i].canFold) {
+ CanFold = true;
for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
MachineOperand &MO = MI->getOperand(j);
if (!MO.isRegister() || MO.getReg() != VReg)
continue;
- if (MO.isDef()) {
- OpIdx = (int)j;
+
+ Ops.push_back(j);
+ if (MO.isDef())
continue;
- }
- // Can't fold if it's two-address code and the use isn't the
- // first and only use.
- if (isReMat ||
- (UseOps.empty() && !alsoFoldARestore(Id, index, VReg,
- RestoreMBBs, RestoreIdxes))) {
- OpIdx = -1;
+ if (isReMat ||
+ (!FoundUse && !alsoFoldARestore(Id, index, VReg,
+ RestoreMBBs, RestoreIdxes))) {
+ // MI has two-address uses of the same register. If the use
+ // isn't the first and only use in the BB, then we can't fold
+ // it. FIXME: Move this to rewriteInstructionsForSpills.
+ CanFold = false;
break;
}
- UseOps.push_back(j);
+ FoundUse = true;
}
}
// Fold the store into the def if possible.
bool Folded = false;
- if (OpIdx != -1) {
- if (tryFoldMemoryOperand(MI, vrm, NULL, index, OpIdx, UseOps,
- true, Slot, VReg)) {
- if (!UseOps.empty())
- // Folded a two-address instruction, do not issue a load.
- eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
+ if (CanFold && !Ops.empty()) {
+ if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
Folded = true;
+ if (FoundUse > 0) {
+ // Also folded uses, do not issue a load.
+ eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
+ nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
+ }
+ nI.removeRange(getDefIndex(index), getStoreIndex(index));
}
}
- // Else tell the spiller to issue a store for us.
- if (!Folded)
- vrm.addSpillPoint(VReg, MI);
+ // Else tell the spiller to issue a spill.
+ if (!Folded) {
+ LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
+ bool isKill = LR->end == getStoreIndex(index);
+ vrm.addSpillPoint(VReg, isKill, MI);
+ if (isKill)
+ AddedKill.insert(&nI);
+ }
}
Id = SpillMBBs.find_next(Id);
}
if (index == -1)
continue;
unsigned VReg = restores[i].vreg;
+ LiveInterval &nI = getOrCreateInterval(VReg);
MachineInstr *MI = getInstructionFromIndex(index);
- int OpIdx = -1;
- UseOps.clear();
+ bool CanFold = false;
+ Ops.clear();
if (restores[i].canFold) {
+ CanFold = true;
for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
MachineOperand &MO = MI->getOperand(j);
if (!MO.isRegister() || MO.getReg() != VReg)
continue;
+
if (MO.isDef()) {
- // Can't fold if it's two-address code and it hasn't already
- // been folded.
- OpIdx = -1;
+ // If this restore were to be folded, it would have been folded
+ // already.
+ CanFold = false;
break;
}
- if (UseOps.empty())
- // Use the first use index.
- OpIdx = (int)j;
- UseOps.push_back(j);
+ Ops.push_back(j);
}
}
// Fold the load into the use if possible.
bool Folded = false;
- if (OpIdx != -1) {
- if (vrm.isReMaterialized(VReg)) {
+ if (CanFold && !Ops.empty()) {
+ if (!vrm.isReMaterialized(VReg))
+ Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
+ else {
MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
int LdSlot = 0;
bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
// If the rematerializable def is a load, also try to fold it.
- if (isLoadSS ||
- (ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG))
- Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, OpIdx,
- UseOps, isLoadSS, LdSlot, VReg);
- } else
- Folded = tryFoldMemoryOperand(MI, vrm, NULL, index, OpIdx, UseOps,
- true, Slot, VReg);
+ if (isLoadSS || ReMatDefMI->getDesc()->isSimpleLoad())
+ Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
+ Ops, isLoadSS, LdSlot, VReg);
+ }
}
// If folding is not possible / failed, then tell the spiller to issue a
// load / rematerialization for us.
- if (!Folded)
+ if (Folded)
+ nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
+ else
vrm.addRestorePoint(VReg, MI);
}
Id = RestoreMBBs.find_next(Id);
}
- // Finalize spill weights.
- for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
- NewLIs[i]->weight /= NewLIs[i]->getSize();
+ // Finalize intervals: add kills, finalize spill weights, and filter out
+ // dead intervals.
+ std::vector<LiveInterval*> RetNewLIs;
+ for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
+ LiveInterval *LI = NewLIs[i];
+ if (!LI->empty()) {
+ LI->weight /= LI->getSize();
+ if (!AddedKill.count(LI)) {
+ LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
+ unsigned LastUseIdx = getBaseIndex(LR->end);
+ MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
+ int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg);
+ assert(UseIdx != -1);
+ if (LastUse->getDesc()->getOperandConstraint(UseIdx, TOI::TIED_TO) ==
+ -1) {
+ LastUse->getOperand(UseIdx).setIsKill();
+ vrm.addKillPoint(LI->reg, LastUseIdx);
+ }
+ }
+ RetNewLIs.push_back(LI);
+ }
+ }
- return NewLIs;
+ return RetNewLIs;
}