//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "pre-alloc-split"
+#include "VirtRegMap.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveStackAnalysis.h"
#include "llvm/CodeGen/MachineDominators.h"
static cl::opt<int> PreSplitLimit("pre-split-limit", cl::init(-1), cl::Hidden);
static cl::opt<int> DeadSplitLimit("dead-split-limit", cl::init(-1), cl::Hidden);
+static cl::opt<int> RestoreFoldLimit("restore-fold-limit", cl::init(-1), cl::Hidden);
STATISTIC(NumSplits, "Number of intervals split");
STATISTIC(NumRemats, "Number of intervals split by rematerialization");
STATISTIC(NumFolds, "Number of intervals split with spill folding");
+STATISTIC(NumRestoreFolds, "Number of intervals split with restore folding");
STATISTIC(NumRenumbers, "Number of intervals renumbered into new registers");
STATISTIC(NumDeadSpills, "Number of dead spills removed");
MachineRegisterInfo *MRI;
LiveIntervals *LIs;
LiveStacks *LSs;
+ VirtRegMap *VRM;
// Barrier - Current barrier being processed.
MachineInstr *Barrier;
AU.addPreservedID(PHIEliminationID);
AU.addRequired<MachineDominatorTree>();
AU.addRequired<MachineLoopInfo>();
+ AU.addRequired<VirtRegMap>();
AU.addPreserved<MachineDominatorTree>();
AU.addPreserved<MachineLoopInfo>();
+ AU.addPreserved<VirtRegMap>();
MachineFunctionPass::getAnalysisUsage(AU);
}
MachineBasicBlock* MBB,
int& SS,
SmallPtrSet<MachineInstr*, 4>& RefsInMBB);
+ MachineInstr* FoldRestore(unsigned vreg,
+ const TargetRegisterClass* RC,
+ MachineInstr* Barrier,
+ MachineBasicBlock* MBB,
+ int SS,
+ SmallPtrSet<MachineInstr*, 4>& RefsInMBB);
void RenumberValno(VNInfo* VN);
void ReconstructLiveInterval(LiveInterval* LI);
bool removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split);
unsigned &SpillIndex) {
MachineBasicBlock::iterator Pt = MBB->begin();
- // Go top down if RefsInMBB is empty.
- if (RefsInMBB.empty() && !DefMI) {
- MachineBasicBlock::iterator MII = MBB->begin();
- MachineBasicBlock::iterator EndPt = MI;
- do {
- ++MII;
- unsigned Index = LIs->getInstructionIndex(MII);
- unsigned Gap = LIs->findGapBeforeInstr(Index);
-
- // We can't insert the spill between the barrier (a call), and its
- // corresponding call frame setup/teardown.
- if (prior(MII)->getOpcode() == TRI->getCallFrameSetupOpcode()) {
- bool reachedBarrier = false;
- do {
- if (MII == EndPt) {
- reachedBarrier = true;
- break;
- }
- ++MII;
- } while (MII->getOpcode() != TRI->getCallFrameDestroyOpcode());
-
- if (reachedBarrier) break;
- } else if (Gap) {
- Pt = MII;
- SpillIndex = Gap;
- break;
- }
- } while (MII != EndPt);
- } else {
- MachineBasicBlock::iterator MII = MI;
- MachineBasicBlock::iterator EndPt = DefMI
- ? MachineBasicBlock::iterator(DefMI) : MBB->begin();
+ MachineBasicBlock::iterator MII = MI;
+ MachineBasicBlock::iterator EndPt = DefMI
+ ? MachineBasicBlock::iterator(DefMI) : MBB->begin();
- while (MII != EndPt && !RefsInMBB.count(MII)) {
- unsigned Index = LIs->getInstructionIndex(MII);
-
- // We can't insert the spill between the barrier (a call), and its
- // corresponding call frame setup.
- if (prior(MII)->getOpcode() == TRI->getCallFrameSetupOpcode()) {
+ while (MII != EndPt && !RefsInMBB.count(MII) &&
+ MII->getOpcode() != TRI->getCallFrameSetupOpcode())
+ --MII;
+ if (MII == EndPt || RefsInMBB.count(MII)) return Pt;
+
+ while (MII != EndPt && !RefsInMBB.count(MII)) {
+ unsigned Index = LIs->getInstructionIndex(MII);
+
+ // We can't insert the spill between the barrier (a call), and its
+ // corresponding call frame setup.
+ if (MII->getOpcode() == TRI->getCallFrameDestroyOpcode()) {
+ while (MII->getOpcode() != TRI->getCallFrameSetupOpcode()) {
--MII;
- continue;
- } if (MII->getOpcode() == TRI->getCallFrameDestroyOpcode()) {
- bool reachedBarrier = false;
- while (MII->getOpcode() != TRI->getCallFrameSetupOpcode()) {
- --MII;
- if (MII == EndPt) {
- reachedBarrier = true;
- break;
- }
+ if (MII == EndPt) {
+ return Pt;
}
-
- if (reachedBarrier) break;
- else continue;
- } else if (LIs->hasGapBeforeInstr(Index)) {
- Pt = MII;
- SpillIndex = LIs->findGapBeforeInstr(Index, true);
}
- --MII;
+ continue;
+ } else if (LIs->hasGapBeforeInstr(Index)) {
+ Pt = MII;
+ SpillIndex = LIs->findGapBeforeInstr(Index, true);
}
+
+ if (RefsInMBB.count(MII))
+ return Pt;
+
+
+ --MII;
}
return Pt;
// FIXME: Allow spill to be inserted to the beginning of the mbb. Update mbb
// begin index accordingly.
MachineBasicBlock::iterator Pt = MBB->end();
- unsigned EndIdx = LIs->getMBBEndIdx(MBB);
+ MachineBasicBlock::iterator EndPt = MBB->getFirstTerminator();
- // Go bottom up if RefsInMBB is empty and the end of the mbb isn't beyond
- // the last index in the live range.
- if (RefsInMBB.empty() && LastIdx >= EndIdx) {
- MachineBasicBlock::iterator MII = MBB->getFirstTerminator();
- MachineBasicBlock::iterator EndPt = MI;
- --MII;
- do {
- unsigned Index = LIs->getInstructionIndex(MII);
- unsigned Gap = LIs->findGapBeforeInstr(Index);
-
- // We can't insert a restore between the barrier (a call) and its
- // corresponding call frame teardown.
- if (MII->getOpcode() == TRI->getCallFrameDestroyOpcode()) {
- bool reachedBarrier = false;
- while (MII->getOpcode() != TRI->getCallFrameSetupOpcode()) {
- --MII;
- if (MII == EndPt) {
- reachedBarrier = true;
- break;
- }
- }
-
- if (reachedBarrier) break;
- else continue;
- } else if (Gap) {
- Pt = MII;
- RestoreIndex = Gap;
- break;
- }
-
- --MII;
- } while (MII != EndPt);
- } else {
- MachineBasicBlock::iterator MII = MI;
- MII = ++MII;
-
- // FIXME: Limit the number of instructions to examine to reduce
- // compile time?
- while (MII != MBB->getFirstTerminator()) {
- unsigned Index = LIs->getInstructionIndex(MII);
- if (Index > LastIdx)
- break;
- unsigned Gap = LIs->findGapBeforeInstr(Index);
+ // We start at the call, so walk forward until we find the call frame teardown
+ // since we can't insert restores before that. Bail if we encounter a use
+ // during this time.
+ MachineBasicBlock::iterator MII = MI;
+ if (MII == EndPt) return Pt;
+
+ while (MII != EndPt && !RefsInMBB.count(MII) &&
+ MII->getOpcode() != TRI->getCallFrameDestroyOpcode())
+ ++MII;
+ if (MII == EndPt || RefsInMBB.count(MII)) return Pt;
+ ++MII;
+
+ // FIXME: Limit the number of instructions to examine to reduce
+ // compile time?
+ while (MII != EndPt) {
+ unsigned Index = LIs->getInstructionIndex(MII);
+ if (Index > LastIdx)
+ break;
+ unsigned Gap = LIs->findGapBeforeInstr(Index);
- // We can't insert a restore between the barrier (a call) and its
- // corresponding call frame teardown.
- if (MII->getOpcode() == TRI->getCallFrameDestroyOpcode()) {
+ // We can't insert a restore between the barrier (a call) and its
+ // corresponding call frame teardown.
+ if (MII->getOpcode() == TRI->getCallFrameSetupOpcode()) {
+ do {
+ if (MII == EndPt || RefsInMBB.count(MII)) return Pt;
++MII;
- continue;
- } else if (prior(MII)->getOpcode() == TRI->getCallFrameSetupOpcode()) {
- bool reachedBarrier = false;
- do {
- if (MII == MBB->getFirstTerminator() || RefsInMBB.count(MII)) {
- reachedBarrier = true;
- break;
- }
-
- ++MII;
- } while (MII->getOpcode() != TRI->getCallFrameDestroyOpcode());
-
- if (reachedBarrier) break;
- } else if (Gap) {
- Pt = MII;
- RestoreIndex = Gap;
- }
-
- if (RefsInMBB.count(MII))
- break;
- ++MII;
+ } while (MII->getOpcode() != TRI->getCallFrameDestroyOpcode());
+ } else if (Gap) {
+ Pt = MII;
+ RestoreIndex = Gap;
}
+
+ if (RefsInMBB.count(MII))
+ return Pt;
+
+ ++MII;
}
return Pt;
}
// Create live interval for stack slot.
- CurrSLI = &LSs->getOrCreateInterval(SS);
+ CurrSLI = &LSs->getOrCreateInterval(SS, RC);
if (CurrSLI->hasAtLeastOneValue())
CurrSValNo = CurrSLI->getValNumInfo(0);
else
- CurrSValNo = CurrSLI->getNextValue(~0U, 0, LSs->getVNInfoAllocator());
+ CurrSValNo = CurrSLI->getNextValue(0, 0, false, LSs->getVNInfoAllocator());
return SS;
}
if (Phis.count(MBB)) return Phis[MBB];
unsigned StartIndex = LIs->getMBBStartIdx(MBB);
- VNInfo *RetVNI = Phis[MBB] = LI->getNextValue(~0U, /*FIXME*/ 0,
- LIs->getVNInfoAllocator());
+ VNInfo *RetVNI = Phis[MBB] =
+ LI->getNextValue(0, /*FIXME*/ 0, false, LIs->getVNInfoAllocator());
+
if (!IsIntraBlock) LiveOut[MBB] = RetVNI;
// If there are no uses or defs between our starting point and the
IncomingVNs[*PI] = Incoming;
}
- if (MBB->pred_size() == 1 && !RetVNI->hasPHIKill) {
+ if (MBB->pred_size() == 1 && !RetVNI->hasPHIKill()) {
VNInfo* OldVN = RetVNI;
VNInfo* NewVN = IncomingVNs.begin()->second;
VNInfo* MergedVN = LI->MergeValueNumberInto(OldVN, NewVN);
// VNInfo to represent the joined value.
for (DenseMap<MachineBasicBlock*, VNInfo*>::iterator I =
IncomingVNs.begin(), E = IncomingVNs.end(); I != E; ++I) {
- I->second->hasPHIKill = true;
+ I->second->setHasPHIKill(true);
unsigned KillIndex = LIs->getMBBEndIdx(I->first);
if (!LiveInterval::isKill(I->second, KillIndex))
LI->addKill(I->second, KillIndex);
unsigned DefIdx = LIs->getInstructionIndex(&*DI);
DefIdx = LiveIntervals::getDefIndex(DefIdx);
- VNInfo* NewVN = LI->getNextValue(DefIdx, 0, Alloc);
+ assert(DI->getOpcode() != TargetInstrInfo::PHI &&
+ "Following NewVN isPHIDef flag incorrect. Fix me!");
+ VNInfo* NewVN = LI->getNextValue(DefIdx, 0, true, Alloc);
// If the def is a move, set the copy field.
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
// Bail out if we ever encounter a valno that has a PHI kill. We can't
// renumber these.
- if (OldVN->hasPHIKill) return;
+ if (OldVN->hasPHIKill()) return;
VNsToCopy.push_back(OldVN);
MachineInstr* MI = LIs->getInstructionFromIndex(*KI);
unsigned DefIdx = MI->findRegisterDefOperandIdx(CurrLI->reg);
if (DefIdx == ~0U) continue;
- if (MI->isRegReDefinedByTwoAddr(DefIdx)) {
+ if (MI->isRegTiedToUseOperand(DefIdx)) {
VNInfo* NextVN =
CurrLI->findDefinedVNInfo(LiveIntervals::getDefIndex(*KI));
if (NextVN == OldVN) continue;
VNInfo* OldVN = *OI;
// Copy the valno over
- VNInfo* NewVN = NewLI.getNextValue(OldVN->def, OldVN->copy,
- LIs->getVNInfoAllocator());
- NewLI.copyValNumInfo(NewVN, OldVN);
+ VNInfo* NewVN = NewLI.createValueCopy(OldVN, LIs->getVNInfoAllocator());
NewLI.MergeValueInAsValue(*CurrLI, OldVN, NewVN);
// Remove the valno from the old interval
MO.setReg(NewVReg);
}
+ // Grow the VirtRegMap, since we've created a new vreg.
+ VRM->grow();
+
// The renumbered vreg shares a stack slot with the old register.
if (IntervalSSMap.count(CurrLI->reg))
IntervalSSMap[NewVReg] = IntervalSSMap[CurrLI->reg];
MachineBasicBlock::iterator KillPt = BarrierMBB->end();
unsigned KillIdx = 0;
- if (ValNo->def == ~0U || DefMI->getParent() == BarrierMBB)
+ if (!ValNo->isDefAccurate() || DefMI->getParent() == BarrierMBB)
KillPt = findSpillPoint(BarrierMBB, Barrier, NULL, RefsInMBB, KillIdx);
else
KillPt = findNextEmptySlot(DefMI->getParent(), DefMI, KillIdx);
if (I != IntervalSSMap.end()) {
SS = I->second;
} else {
- SS = MFI->CreateStackObject(RC->getSize(), RC->getAlignment());
-
+ SS = MFI->CreateStackObject(RC->getSize(), RC->getAlignment());
}
MachineInstr* FMI = TII->foldMemoryOperand(*MBB->getParent(),
++NumFolds;
IntervalSSMap[vreg] = SS;
- CurrSLI = &LSs->getOrCreateInterval(SS);
+ CurrSLI = &LSs->getOrCreateInterval(SS, RC);
if (CurrSLI->hasAtLeastOneValue())
CurrSValNo = CurrSLI->getValNumInfo(0);
else
- CurrSValNo = CurrSLI->getNextValue(~0U, 0, LSs->getVNInfoAllocator());
+ CurrSValNo = CurrSLI->getNextValue(0, 0, false, LSs->getVNInfoAllocator());
+ }
+
+ return FMI;
+}
+
+MachineInstr* PreAllocSplitting::FoldRestore(unsigned vreg,
+ const TargetRegisterClass* RC,
+ MachineInstr* Barrier,
+ MachineBasicBlock* MBB,
+ int SS,
+ SmallPtrSet<MachineInstr*, 4>& RefsInMBB) {
+ if ((int)RestoreFoldLimit != -1 && RestoreFoldLimit == (int)NumRestoreFolds)
+ return 0;
+
+ // Go top down if RefsInMBB is empty.
+ if (RefsInMBB.empty())
+ return 0;
+
+ // Can't fold a restore between a call stack setup and teardown.
+ MachineBasicBlock::iterator FoldPt = Barrier;
+
+ // Advance from barrier to call frame teardown.
+ while (FoldPt != MBB->getFirstTerminator() &&
+ FoldPt->getOpcode() != TRI->getCallFrameDestroyOpcode()) {
+ if (RefsInMBB.count(FoldPt))
+ return 0;
+
+ ++FoldPt;
+ }
+
+ if (FoldPt == MBB->getFirstTerminator())
+ return 0;
+ else
+ ++FoldPt;
+
+ // Now find the restore point.
+ while (FoldPt != MBB->getFirstTerminator() && !RefsInMBB.count(FoldPt)) {
+ if (FoldPt->getOpcode() == TRI->getCallFrameSetupOpcode()) {
+ while (FoldPt != MBB->getFirstTerminator() &&
+ FoldPt->getOpcode() != TRI->getCallFrameDestroyOpcode()) {
+ if (RefsInMBB.count(FoldPt))
+ return 0;
+
+ ++FoldPt;
+ }
+
+ if (FoldPt == MBB->getFirstTerminator())
+ return 0;
+ }
+
+ ++FoldPt;
+ }
+
+ if (FoldPt == MBB->getFirstTerminator())
+ return 0;
+
+ int OpIdx = FoldPt->findRegisterUseOperandIdx(vreg, true);
+ if (OpIdx == -1)
+ return 0;
+
+ SmallVector<unsigned, 1> Ops;
+ Ops.push_back(OpIdx);
+
+ if (!TII->canFoldMemoryOperand(FoldPt, Ops))
+ return 0;
+
+ MachineInstr* FMI = TII->foldMemoryOperand(*MBB->getParent(),
+ FoldPt, Ops, SS);
+
+ if (FMI) {
+ LIs->ReplaceMachineInstrInMaps(FoldPt, FMI);
+ FMI = MBB->insert(MBB->erase(FoldPt), FMI);
+ ++NumRestoreFolds;
}
return FMI;
CurrLI->FindLiveRangeContaining(LIs->getUseIndex(BarrierIdx));
VNInfo *ValNo = LR->valno;
- if (ValNo->def == ~1U) {
+ if (ValNo->isUnused()) {
// Defined by a dead def? How can this be?
assert(0 && "Val# is defined by a dead def?");
abort();
}
- MachineInstr *DefMI = (ValNo->def != ~0U)
+ MachineInstr *DefMI = ValNo->isDefAccurate()
? LIs->getInstructionFromIndex(ValNo->def) : NULL;
// If this would create a new join point, do not split.
unsigned SpillIndex = 0;
MachineInstr *SpillMI = NULL;
int SS = -1;
- if (ValNo->def == ~0U) {
- // If it's defined by a phi, we must split just before the barrier.
+ if (!ValNo->isDefAccurate()) {
+ // If we don't know where the def is we must split just before the barrier.
if ((SpillMI = FoldSpill(LI->reg, RC, 0, Barrier,
BarrierMBB, SS, RefsInMBB))) {
SpillIndex = LIs->getInstructionIndex(SpillMI);
Def2SpillMap[ValNo->def] = SpillIndex;
// Add restore.
- TII->loadRegFromStackSlot(*BarrierMBB, RestorePt, CurrLI->reg, SS, RC);
- MachineInstr *LoadMI = prior(RestorePt);
- LIs->InsertMachineInstrInMaps(LoadMI, RestoreIndex);
+ bool FoldedRestore = false;
+ if (MachineInstr* LMI = FoldRestore(CurrLI->reg, RC, Barrier,
+ BarrierMBB, SS, RefsInMBB)) {
+ RestorePt = LMI;
+ RestoreIndex = LIs->getInstructionIndex(RestorePt);
+ FoldedRestore = true;
+ } else {
+ TII->loadRegFromStackSlot(*BarrierMBB, RestorePt, CurrLI->reg, SS, RC);
+ MachineInstr *LoadMI = prior(RestorePt);
+ LIs->InsertMachineInstrInMaps(LoadMI, RestoreIndex);
+ }
// Update spill stack slot live interval.
UpdateSpillSlotInterval(ValNo, LIs->getUseIndex(SpillIndex)+1,
LIs->getDefIndex(RestoreIndex));
ReconstructLiveInterval(CurrLI);
- unsigned RestoreIdx = LIs->getInstructionIndex(prior(RestorePt));
- RestoreIdx = LiveIntervals::getDefIndex(RestoreIdx);
- RenumberValno(CurrLI->findDefinedVNInfo(RestoreIdx));
+
+ if (!FoldedRestore) {
+ unsigned RestoreIdx = LIs->getInstructionIndex(prior(RestorePt));
+ RestoreIdx = LiveIntervals::getDefIndex(RestoreIdx);
+ RenumberValno(CurrLI->findDefinedVNInfo(RestoreIdx));
+ }
++NumSplits;
return true;
// by the current barrier.
SmallVector<LiveInterval*, 8> Intervals;
for (const TargetRegisterClass **RC = RCs; *RC; ++RC) {
- if (TII->IgnoreRegisterClassBarriers(*RC))
+ // FIXME: If it's not safe to move any instruction that defines the barrier
+ // register class, then it means there are some special dependencies which
+ // codegen is not modelling. Ignore these barriers for now.
+ if (!TII->isSafeToMoveRegClassDefs(*RC))
continue;
std::vector<unsigned> &VRs = MRI->getRegClassVirtRegs(*RC);
for (unsigned i = 0, e = VRs.size(); i != e; ++i) {
NonSpills++;
int DefIdx = (*UI)->findRegisterDefOperandIdx(Reg);
- if (DefIdx != -1 && (*UI)->isRegReDefinedByTwoAddr(DefIdx))
+ if (DefIdx != -1 && (*UI)->isRegTiedToUseOperand(DefIdx))
FeedsTwoAddr = true;
}
// We don't currently try to handle definitions with PHI kills, because
// it would involve processing more than one VNInfo at once.
- if (CurrVN->hasPHIKill) continue;
+ if (CurrVN->hasPHIKill()) continue;
// We also don't try to handle the results of PHI joins, since there's
// no defining instruction to analyze.
- unsigned DefIdx = CurrVN->def;
- if (DefIdx == ~0U || DefIdx == ~1U) continue;
+ if (!CurrVN->isDefAccurate() || CurrVN->isUnused()) continue;
// We're only interested in eliminating cruft introduced by the splitter,
// is of the form load-use or load-use-store. First, check that the
// definition is a load, and remember what stack slot we loaded it from.
- MachineInstr* DefMI = LIs->getInstructionFromIndex(DefIdx);
+ MachineInstr* DefMI = LIs->getInstructionFromIndex(CurrVN->def);
int FrameIndex;
if (!TII->isLoadFromStackSlot(DefMI, FrameIndex)) continue;
if (DefMBB == BarrierMBB)
return false;
- if (LR->valno->hasPHIKill)
+ if (LR->valno->hasPHIKill())
return false;
unsigned MBBEnd = LIs->getMBBEndIdx(BarrierMBB);
MRI = &MF.getRegInfo();
LIs = &getAnalysis<LiveIntervals>();
LSs = &getAnalysis<LiveStacks>();
+ VRM = &getAnalysis<VirtRegMap>();
bool MadeChange = false;