#include "llvm/ADT/STLExtras.h"
#include <algorithm>
#include <cmath>
+#include <iostream>
using namespace llvm;
namespace {
EnableJoining("join-liveintervals",
cl::desc("Join compatible live intervals"),
cl::init(true));
-};
+}
void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const
{
}
+static bool isZeroLengthInterval(LiveInterval *li) {
+ for (LiveInterval::Ranges::const_iterator
+ i = li->ranges.begin(), e = li->ranges.end(); i != e; ++i)
+ if (i->end - i->start > LiveIntervals::InstrSlots::NUM)
+ return false;
+ return true;
+}
+
+
/// runOnMachineFunction - Register allocate the whole function
///
bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
MRegisterInfo::isVirtualRegister(mop.getReg())) {
// replace register with representative register
unsigned reg = rep(mop.getReg());
- mii->SetMachineOperandReg(i, reg);
+ mii->getOperand(i).setReg(reg);
LiveInterval &RegInt = getInterval(reg);
RegInt.weight +=
}
}
+ for (iterator I = begin(), E = end(); I != E; ++I) {
+ LiveInterval &li = I->second;
+ if (MRegisterInfo::isVirtualRegister(li.reg))
+ // If the live interval legnth is essentially zero, i.e. in every live
+ // range the use follows def immediately, it doesn't make sense to spill
+ // it and hope it will be easier to allocate for this li.
+ if (isZeroLengthInterval(&li))
+ li.weight = float(HUGE_VAL);
+ }
+
DEBUG(dump());
return true;
}
index += InstrSlots::NUM;
if (index == end) break;
- MachineBasicBlock::iterator mi = getInstructionFromIndex(index);
+ MachineInstr *MI = getInstructionFromIndex(index);
// NewRegLiveIn - This instruction might have multiple uses of the spilled
// register. In this case, for the first use, keep track of the new vreg
unsigned NewRegLiveIn = 0;
for_operand:
- for (unsigned i = 0; i != mi->getNumOperands(); ++i) {
- MachineOperand& mop = mi->getOperand(i);
+ for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
+ MachineOperand& mop = MI->getOperand(i);
if (mop.isRegister() && mop.getReg() == li.reg) {
if (NewRegLiveIn && mop.isUse()) {
// We already emitted a reload of this value, reuse it for
// subsequent operands.
- mi->SetMachineOperandReg(i, NewRegLiveIn);
+ MI->getOperand(i).setReg(NewRegLiveIn);
DEBUG(std::cerr << "\t\t\t\treused reload into reg" << NewRegLiveIn
<< " for operand #" << i << '\n');
- } else if (MachineInstr* fmi = mri_->foldMemoryOperand(mi, i, slot)) {
+ } else if (MachineInstr* fmi = mri_->foldMemoryOperand(MI, i, slot)) {
// Attempt to fold the memory reference into the instruction. If we
// can do this, we don't need to insert spill code.
if (lv_)
- lv_->instructionChanged(mi, fmi);
- vrm.virtFolded(li.reg, mi, i, fmi);
- mi2iMap_.erase(mi);
+ lv_->instructionChanged(MI, fmi);
+ MachineBasicBlock &MBB = *MI->getParent();
+ vrm.virtFolded(li.reg, MI, i, fmi);
+ mi2iMap_.erase(MI);
i2miMap_[index/InstrSlots::NUM] = fmi;
mi2iMap_[fmi] = index;
- MachineBasicBlock &MBB = *mi->getParent();
- mi = MBB.insert(MBB.erase(mi), fmi);
+ MI = MBB.insert(MBB.erase(MI), fmi);
++numFolded;
-
// Folding the load/store can completely change the instruction in
// unpredictable ways, rescan it from the beginning.
goto for_operand;
// create a new register for this spill
NewRegLiveIn = mf_->getSSARegMap()->createVirtualRegister(rc);
- mi->SetMachineOperandReg(i, NewRegLiveIn);
+ MI->getOperand(i).setReg(NewRegLiveIn);
vrm.grow();
vrm.assignVirt2StackSlot(NewRegLiveIn, slot);
LiveInterval& nI = getOrCreateInterval(NewRegLiveIn);
// update live variables if it is available
if (lv_)
- lv_->addVirtualRegisterKilled(NewRegLiveIn, mi);
+ lv_->addVirtualRegisterKilled(NewRegLiveIn, MI);
// If this is a live in, reuse it for subsequent live-ins. If it's
// a def, we can't do this.
handleVirtualRegisterDef(MBB, MI, getOrCreateInterval(reg));
else if (allocatableRegs_[reg]) {
unsigned SrcReg = 0, DestReg = 0;
- bool IsMove = tii_->isMoveInstr(*MI, SrcReg, DestReg);
+ if (!tii_->isMoveInstr(*MI, SrcReg, DestReg))
+ SrcReg = DestReg = 0;
handlePhysicalRegisterDef(MBB, MI, getOrCreateInterval(reg),
SrcReg, DestReg);
}
}
+/// IntA is defined as a copy from IntB and we know it only has one value
+/// number. If all of the places that IntA and IntB overlap are defined by
+/// copies from IntA to IntB, we know that these two ranges can really be
+/// merged if we adjust the value numbers. If it is safe, adjust the value
+/// numbers and return true, allowing coalescing to occur.
+bool LiveIntervals::
+AdjustIfAllOverlappingRangesAreCopiesFrom(LiveInterval &IntA,
+ LiveInterval &IntB,
+ unsigned CopyIdx) {
+ std::vector<LiveRange*> Ranges;
+ IntA.getOverlapingRanges(IntB, CopyIdx, Ranges);
+
+ assert(!Ranges.empty() && "Why didn't we do a simple join of this?");
+
+ unsigned IntBRep = rep(IntB.reg);
+
+ // Check to see if all of the overlaps (entries in Ranges) are defined by a
+ // copy from IntA. If not, exit.
+ for (unsigned i = 0, e = Ranges.size(); i != e; ++i) {
+ unsigned Idx = Ranges[i]->start;
+ MachineInstr *MI = getInstructionFromIndex(Idx);
+ unsigned SrcReg, DestReg;
+ if (!tii_->isMoveInstr(*MI, SrcReg, DestReg)) return false;
+
+ // If this copy isn't actually defining this range, it must be a live
+ // range spanning basic blocks or something.
+ if (rep(DestReg) != rep(IntA.reg)) return false;
+
+ // Check to see if this is coming from IntB. If not, bail out.
+ if (rep(SrcReg) != IntBRep) return false;
+ }
+
+ // Okay, we can change this one. Get the IntB value number that IntA is
+ // copied from.
+ unsigned ActualValNo = IntA.getLiveRangeContaining(CopyIdx-1)->ValId;
+
+ // Change all of the value numbers to the same as what we IntA is copied from.
+ for (unsigned i = 0, e = Ranges.size(); i != e; ++i)
+ Ranges[i]->ValId = ActualValNo;
+
+ return true;
+}
+
void LiveIntervals::joinIntervalsInMachineBB(MachineBasicBlock *MBB) {
DEBUG(std::cerr << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
// we only join virtual registers with allocatable
// physical registers since we do not have liveness information
// on not allocatable physical registers
- unsigned regA, regB;
- if (tii_->isMoveInstr(*mi, regA, regB) &&
- (MRegisterInfo::isVirtualRegister(regA) || allocatableRegs_[regA]) &&
- (MRegisterInfo::isVirtualRegister(regB) || allocatableRegs_[regB])) {
+ unsigned SrcReg, DestReg;
+ if (tii_->isMoveInstr(*mi, SrcReg, DestReg) &&
+ (MRegisterInfo::isVirtualRegister(SrcReg) || allocatableRegs_[SrcReg])&&
+ (MRegisterInfo::isVirtualRegister(DestReg)||allocatableRegs_[DestReg])){
// Get representative registers.
- regA = rep(regA);
- regB = rep(regB);
+ SrcReg = rep(SrcReg);
+ DestReg = rep(DestReg);
// If they are already joined we continue.
- if (regA == regB)
+ if (SrcReg == DestReg)
continue;
// If they are both physical registers, we cannot join them.
- if (MRegisterInfo::isPhysicalRegister(regA) &&
- MRegisterInfo::isPhysicalRegister(regB))
+ if (MRegisterInfo::isPhysicalRegister(SrcReg) &&
+ MRegisterInfo::isPhysicalRegister(DestReg))
continue;
// If they are not of the same register class, we cannot join them.
- if (differingRegisterClasses(regA, regB))
+ if (differingRegisterClasses(SrcReg, DestReg))
continue;
- LiveInterval &IntA = getInterval(regA);
- LiveInterval &IntB = getInterval(regB);
- assert(IntA.reg == regA && IntB.reg == regB &&
+ LiveInterval &SrcInt = getInterval(SrcReg);
+ LiveInterval &DestInt = getInterval(DestReg);
+ assert(SrcInt.reg == SrcReg && DestInt.reg == DestReg &&
"Register mapping is horribly broken!");
- DEBUG(std::cerr << "\t\tInspecting " << IntA << " and " << IntB << ": ");
+ DEBUG(std::cerr << "\t\tInspecting " << SrcInt << " and " << DestInt
+ << ": ");
// If two intervals contain a single value and are joined by a copy, it
// does not matter if the intervals overlap, they can always be joined.
- bool TriviallyJoinable =
- IntA.containsOneValue() && IntB.containsOneValue();
+ bool Joinable = SrcInt.containsOneValue() && DestInt.containsOneValue();
unsigned MIDefIdx = getDefIndex(getInstructionIndex(mi));
- if ((TriviallyJoinable || IntB.joinable(IntA, MIDefIdx)) &&
- !overlapsAliases(&IntA, &IntB)) {
- IntB.join(IntA, MIDefIdx);
- DEBUG(std::cerr << "Joined. Result = " << IntB << "\n");
-
- if (!MRegisterInfo::isPhysicalRegister(regA)) {
- r2iMap_.erase(regA);
- r2rMap_[regA] = regB;
+
+ // If the intervals think that this is joinable, do so now.
+ if (!Joinable && DestInt.joinable(SrcInt, MIDefIdx))
+ Joinable = true;
+
+ // If DestInt is actually a copy from SrcInt (which we know) that is used
+ // to define another value of SrcInt, we can change the other range of
+ // SrcInt to be the value of the range that defines DestInt, allowing a
+ // coalesce.
+ if (!Joinable && DestInt.containsOneValue() &&
+ AdjustIfAllOverlappingRangesAreCopiesFrom(SrcInt, DestInt, MIDefIdx))
+ Joinable = true;
+
+ if (!Joinable || overlapsAliases(&SrcInt, &DestInt)) {
+ DEBUG(std::cerr << "Interference!\n");
+ } else {
+ DestInt.join(SrcInt, MIDefIdx);
+ DEBUG(std::cerr << "Joined. Result = " << DestInt << "\n");
+
+ if (!MRegisterInfo::isPhysicalRegister(SrcReg)) {
+ r2iMap_.erase(SrcReg);
+ r2rMap_[SrcReg] = DestReg;
} else {
// Otherwise merge the data structures the other way so we don't lose
// the physreg information.
- r2rMap_[regB] = regA;
- IntB.reg = regA;
- IntA.swap(IntB);
- r2iMap_.erase(regB);
+ r2rMap_[DestReg] = SrcReg;
+ DestInt.reg = SrcReg;
+ SrcInt.swap(DestInt);
+ r2iMap_.erase(DestReg);
}
++numJoins;
- } else {
- DEBUG(std::cerr << "Interference!\n");
}
}
}