#include "VirtRegMap.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/Value.h"
-#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/RegisterCoalescer.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/ADT/SmallSet.h"
using namespace llvm;
STATISTIC(numJoins , "Number of interval joins performed");
+STATISTIC(numSubJoins , "Number of subclass joins performed");
STATISTIC(numCommutes , "Number of instruction commuting performed");
STATISTIC(numExtends , "Number of copies extended");
+STATISTIC(NumReMats , "Number of instructions re-materialized");
STATISTIC(numPeep , "Number of identity moves eliminated after coalescing");
STATISTIC(numAborts , "Number of times interval joining aborted");
static cl::opt<bool>
NewHeuristic("new-coalescer-heuristic",
- cl::desc("Use new coalescer heuristic"),
- cl::init(false));
+ cl::desc("Use new coalescer heuristic"),
+ cl::init(false), cl::Hidden);
+
+static cl::opt<bool>
+CrossClassJoin("join-subclass-copies",
+ cl::desc("Coalesce copies to sub- register class"),
+ cl::init(false), cl::Hidden);
static RegisterPass<SimpleRegisterCoalescing>
X("simple-register-coalescing", "Simple Register Coalescing");
const PassInfo *const llvm::SimpleRegisterCoalescingID = &X;
void SimpleRegisterCoalescing::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<LiveIntervals>();
AU.addPreserved<LiveIntervals>();
+ AU.addRequired<MachineLoopInfo>();
AU.addPreserved<MachineLoopInfo>();
AU.addPreservedID(MachineDominatorsID);
- AU.addPreservedID(PHIEliminationID);
+ if (StrongPHIElim)
+ AU.addPreservedID(StrongPHIEliminationID);
+ else
+ AU.addPreservedID(PHIEliminationID);
AU.addPreservedID(TwoAddressInstructionPassID);
- AU.addRequired<LiveVariables>();
- AU.addRequired<LiveIntervals>();
- AU.addRequired<MachineLoopInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
}
if (ALR == IntA.end()) // Should never happen!
return false;
VNInfo *AValNo = ALR->valno;
+ // If it's re-defined by an early clobber somewhere in the live range, then
+ // it's not safe to eliminate the copy. FIXME: This is a temporary workaround.
+ // See PR3149:
+ // 172 %ECX<def> = MOV32rr %reg1039<kill>
+ // 180 INLINEASM <es:subl $5,$1
+ // sbbl $3,$0>, 10, %EAX<def>, 14, %ECX<earlyclobber,def>, 9, %EAX<kill>,
+ // 36, <fi#0>, 1, %reg0, 0, 9, %ECX<kill>, 36, <fi#1>, 1, %reg0, 0
+ // 188 %EAX<def> = MOV32rr %EAX<kill>
+ // 196 %ECX<def> = MOV32rr %ECX<kill>
+ // 204 %ECX<def> = MOV32rr %ECX<kill>
+ // 212 %EAX<def> = MOV32rr %EAX<kill>
+ // 220 %EAX<def> = MOV32rr %EAX
+ // 228 %reg1039<def> = MOV32rr %ECX<kill>
+ // The early clobber operand ties ECX input to the ECX def.
+ //
+ // The live interval of ECX is represented as this:
+ // %reg20,inf = [46,47:1)[174,230:0) 0@174-(230) 1@46-(47)
+ // The coalescer has no idea there was a def in the middle of [174,230].
+ if (AValNo->redefByEC)
+ return false;
// If AValNo is defined as a copy from IntB, we can potentially process this.
// Get the instruction that defines this value number.
}
// Okay, merge "B1" into the same value number as "B0".
- if (BValNo != ValLR->valno)
+ if (BValNo != ValLR->valno) {
+ IntB.addKills(ValLR->valno, BValNo->kills);
IntB.MergeValueNumberInto(BValNo, ValLR->valno);
+ }
DOUT << " result = "; IntB.print(DOUT, tri_);
DOUT << "\n";
// If the source instruction was killing the source register before the
// merge, unset the isKill marker given the live range has been extended.
int UIdx = ValLREndInst->findRegisterUseOperandIdx(IntB.reg, true);
- if (UIdx != -1)
+ if (UIdx != -1) {
ValLREndInst->getOperand(UIdx).setIsKill(false);
+ IntB.removeKill(ValLR->valno, FillerStart);
+ }
++numExtends;
return true;
// simply extend BLR if CopyMI doesn't end the range.
DOUT << "\nExtending: "; IntB.print(DOUT, tri_);
- IntB.removeValNo(BValNo);
+ // Remove val#'s defined by copies that will be coalesced away.
for (unsigned i = 0, e = BDeadValNos.size(); i != e; ++i)
IntB.removeValNo(BDeadValNos[i]);
- VNInfo *ValNo = IntB.getNextValue(AValNo->def, 0, li_->getVNInfoAllocator());
+
+ // Extend BValNo by merging in IntA live ranges of AValNo. Val# definition
+ // is updated. Kills are also updated.
+ VNInfo *ValNo = BValNo;
+ ValNo->def = AValNo->def;
+ ValNo->copy = NULL;
+ for (unsigned j = 0, ee = ValNo->kills.size(); j != ee; ++j) {
+ unsigned Kill = ValNo->kills[j];
+ if (Kill != BLR->end)
+ BKills.push_back(Kill);
+ }
+ ValNo->kills.clear();
for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end();
AI != AE; ++AI) {
if (AI->valno != AValNo) continue;
return true;
}
+/// ReMaterializeTrivialDef - If the source of a copy is defined by a trivial
+/// computation, replace the copy by rematerialize the definition.
+bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
+ unsigned DstReg,
+ MachineInstr *CopyMI) {
+ unsigned CopyIdx = li_->getUseIndex(li_->getInstructionIndex(CopyMI));
+ LiveInterval::iterator SrcLR = SrcInt.FindLiveRangeContaining(CopyIdx);
+ if (SrcLR == SrcInt.end()) // Should never happen!
+ return false;
+ VNInfo *ValNo = SrcLR->valno;
+ // If other defs can reach uses of this def, then it's not safe to perform
+ // the optimization.
+ if (ValNo->def == ~0U || ValNo->def == ~1U || ValNo->hasPHIKill)
+ return false;
+ MachineInstr *DefMI = li_->getInstructionFromIndex(ValNo->def);
+ const TargetInstrDesc &TID = DefMI->getDesc();
+ if (!TID.isAsCheapAsAMove())
+ return false;
+ bool SawStore = false;
+ if (!DefMI->isSafeToMove(tii_, SawStore))
+ return false;
+
+ unsigned DefIdx = li_->getDefIndex(CopyIdx);
+ const LiveRange *DLR= li_->getInterval(DstReg).getLiveRangeContaining(DefIdx);
+ DLR->valno->copy = NULL;
+ // Don't forget to update sub-register intervals.
+ if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
+ for (const unsigned* SR = tri_->getSubRegisters(DstReg); *SR; ++SR) {
+ if (!li_->hasInterval(*SR))
+ continue;
+ DLR = li_->getInterval(*SR).getLiveRangeContaining(DefIdx);
+ if (DLR && DLR->valno->copy == CopyMI)
+ DLR->valno->copy = NULL;
+ }
+ }
+
+ MachineBasicBlock *MBB = CopyMI->getParent();
+ MachineBasicBlock::iterator MII = next(MachineBasicBlock::iterator(CopyMI));
+ CopyMI->removeFromParent();
+ tii_->reMaterialize(*MBB, MII, DstReg, DefMI);
+ MachineInstr *NewMI = prior(MII);
+ // CopyMI may have implicit operands, transfer them over to the newly
+ // rematerialized instruction. And update implicit def interval valnos.
+ for (unsigned i = CopyMI->getDesc().getNumOperands(),
+ e = CopyMI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = CopyMI->getOperand(i);
+ if (MO.isReg() && MO.isImplicit())
+ NewMI->addOperand(MO);
+ if (MO.isDef() && li_->hasInterval(MO.getReg())) {
+ unsigned Reg = MO.getReg();
+ DLR = li_->getInterval(Reg).getLiveRangeContaining(DefIdx);
+ if (DLR && DLR->valno->copy == CopyMI)
+ DLR->valno->copy = NULL;
+ }
+ }
+
+ li_->ReplaceMachineInstrInMaps(CopyMI, NewMI);
+ MBB->getParent()->DeleteMachineInstr(CopyMI);
+ ReMatCopies.insert(CopyMI);
+ ReMatDefs.insert(DefMI);
+ ++NumReMats;
+ return true;
+}
+
/// isBackEdgeCopy - Returns true if CopyMI is a back edge copy.
///
bool SimpleRegisterCoalescing::isBackEdgeCopy(MachineInstr *CopyMI,
LI.FindLiveRangeContaining(li_->getDefIndex(DefIdx));
if (DstLR == LI.end())
return false;
- unsigned KillIdx = li_->getInstructionIndex(&MBB->back()) + InstrSlots::NUM;
+ unsigned KillIdx = li_->getMBBEndIdx(MBB) + 1;
if (DstLR->valno->kills.size() == 1 &&
DstLR->valno->kills[0] == KillIdx && DstLR->valno->hasPHIKill)
return true;
unsigned UseDstReg = DstReg;
if (OldSubIdx)
UseDstReg = tri_->getSubReg(DstReg, OldSubIdx);
+
+ unsigned CopySrcReg, CopyDstReg;
+ if (tii_->isMoveInstr(*UseMI, CopySrcReg, CopyDstReg) &&
+ CopySrcReg != CopyDstReg &&
+ CopySrcReg == SrcReg && CopyDstReg != UseDstReg) {
+ // If the use is a copy and it won't be coalesced away, and its source
+ // is defined by a trivial computation, try to rematerialize it instead.
+ if (ReMaterializeTrivialDef(li_->getInterval(SrcReg), CopyDstReg,UseMI))
+ continue;
+ }
+
O.setReg(UseDstReg);
O.setSubReg(0);
- } else {
- // Sub-register indexes goes from small to large. e.g.
- // RAX: 1 -> AL, 2 -> AX, 3 -> EAX
- // EAX: 1 -> AL, 2 -> AX
- // So RAX's sub-register 2 is AX, RAX's sub-regsiter 3 is EAX, whose
- // sub-register 2 is also AX.
- if (SubIdx && OldSubIdx && SubIdx != OldSubIdx)
- assert(OldSubIdx < SubIdx && "Conflicting sub-register index!");
- else if (SubIdx)
- O.setSubReg(SubIdx);
- // Remove would-be duplicated kill marker.
- if (O.isKill() && UseMI->killsRegister(DstReg))
- O.setIsKill(false);
- O.setReg(DstReg);
+ continue;
+ }
+
+ // Sub-register indexes goes from small to large. e.g.
+ // RAX: 1 -> AL, 2 -> AX, 3 -> EAX
+ // EAX: 1 -> AL, 2 -> AX
+ // So RAX's sub-register 2 is AX, RAX's sub-regsiter 3 is EAX, whose
+ // sub-register 2 is also AX.
+ if (SubIdx && OldSubIdx && SubIdx != OldSubIdx)
+ assert(OldSubIdx < SubIdx && "Conflicting sub-register index!");
+ else if (SubIdx)
+ O.setSubReg(SubIdx);
+ // Remove would-be duplicated kill marker.
+ if (O.isKill() && UseMI->killsRegister(DstReg))
+ O.setIsKill(false);
+ O.setReg(DstReg);
+
+ // After updating the operand, check if the machine instruction has
+ // become a copy. If so, update its val# information.
+ const TargetInstrDesc &TID = UseMI->getDesc();
+ unsigned CopySrcReg, CopyDstReg;
+ if (TID.getNumDefs() == 1 && TID.getNumOperands() > 2 &&
+ tii_->isMoveInstr(*UseMI, CopySrcReg, CopyDstReg) &&
+ CopySrcReg != CopyDstReg &&
+ (TargetRegisterInfo::isVirtualRegister(CopyDstReg) ||
+ allocatableRegs_[CopyDstReg])) {
+ LiveInterval &LI = li_->getInterval(CopyDstReg);
+ unsigned DefIdx = li_->getDefIndex(li_->getInstructionIndex(UseMI));
+ const LiveRange *DLR = LI.getLiveRangeContaining(DefIdx);
+ if (DLR->valno->def == DefIdx)
+ DLR->valno->copy = UseMI;
}
}
}
MachineOperand &UseMO = UI.getOperand();
if (UseMO.isKill()) {
MachineInstr *UseMI = UseMO.getParent();
- unsigned SReg, DReg;
- if (!tii_->isMoveInstr(*UseMI, SReg, DReg))
- continue;
unsigned UseIdx = li_->getUseIndex(li_->getInstructionIndex(UseMI));
if (JoinedCopies.count(UseMI))
continue;
const LiveRange *UI = LI.getLiveRangeContaining(UseIdx);
- if (!LI.isKill(UI->valno, UseIdx+1))
+ if (!UI || !LI.isKill(UI->valno, UseIdx+1))
UseMO.setIsKill(false);
}
}
return false;
}
+/// RemoveDeadDef - If a def of a live interval is now determined dead, remove
+/// the val# it defines. If the live interval becomes empty, remove it as well.
+bool SimpleRegisterCoalescing::RemoveDeadDef(LiveInterval &li,
+ MachineInstr *DefMI) {
+ unsigned DefIdx = li_->getDefIndex(li_->getInstructionIndex(DefMI));
+ LiveInterval::iterator MLR = li.FindLiveRangeContaining(DefIdx);
+ if (DefIdx != MLR->valno->def)
+ return false;
+ li.removeValNo(MLR->valno);
+ return removeIntervalIfEmpty(li, li_, tri_);
+}
+
/// PropagateDeadness - Propagate the dead marker to the instruction which
/// defines the val#.
static void PropagateDeadness(LiveInterval &li, MachineInstr *CopyMI,
if (MBB == SuccMBB)
return true;
MachineBasicBlock *TBB = 0, *FBB = 0;
- std::vector<MachineOperand> Cond;
+ SmallVector<MachineOperand, 4> Cond;
return !tii_->AnalyzeBranch(*MBB, TBB, FBB, Cond) && !TBB && !FBB &&
MBB->isSuccessor(SuccMBB);
}
}
}
+/// getMatchingSuperReg - Return a super-register of the specified register
+/// Reg so its sub-register of index SubIdx is Reg.
static unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx,
const TargetRegisterClass *RC,
const TargetRegisterInfo* TRI) {
return 0;
}
+/// isProfitableToCoalesceToSubRC - Given that register class of DstReg is
+/// a subset of the register class of SrcReg, return true if it's profitable
+/// to coalesce the two registers.
+bool
+SimpleRegisterCoalescing::isProfitableToCoalesceToSubRC(unsigned SrcReg,
+ unsigned DstReg,
+ MachineBasicBlock *MBB){
+ if (!CrossClassJoin)
+ return false;
+
+ // First let's make sure all uses are in the same MBB.
+ for (MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(SrcReg),
+ RE = mri_->reg_end(); RI != RE; ++RI) {
+ MachineInstr &MI = *RI;
+ if (MI.getParent() != MBB)
+ return false;
+ }
+ for (MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(DstReg),
+ RE = mri_->reg_end(); RI != RE; ++RI) {
+ MachineInstr &MI = *RI;
+ if (MI.getParent() != MBB)
+ return false;
+ }
+
+ // Then make sure the intervals are *short*.
+ LiveInterval &SrcInt = li_->getInterval(SrcReg);
+ LiveInterval &DstInt = li_->getInterval(DstReg);
+ unsigned SrcSize = li_->getApproximateInstructionCount(SrcInt);
+ unsigned DstSize = li_->getApproximateInstructionCount(DstInt);
+ const TargetRegisterClass *RC = mri_->getRegClass(DstReg);
+ unsigned Threshold = allocatableRCRegs_[RC].count() * 2;
+ return (SrcSize + DstSize) <= Threshold;
+}
+
+/// HasIncompatibleSubRegDefUse - If we are trying to coalesce a virtual
+/// register with a physical register, check if any of the virtual register
+/// operand is a sub-register use or def. If so, make sure it won't result
+/// in an illegal extract_subreg or insert_subreg instruction. e.g.
+/// vr1024 = extract_subreg vr1025, 1
+/// ...
+/// vr1024 = mov8rr AH
+/// If vr1024 is coalesced with AH, the extract_subreg is now illegal since
+/// AH does not have a super-reg whose sub-register 1 is AH.
+bool
+SimpleRegisterCoalescing::HasIncompatibleSubRegDefUse(MachineInstr *CopyMI,
+ unsigned VirtReg,
+ unsigned PhysReg) {
+ for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(VirtReg),
+ E = mri_->reg_end(); I != E; ++I) {
+ MachineOperand &O = I.getOperand();
+ MachineInstr *MI = &*I;
+ if (MI == CopyMI || JoinedCopies.count(MI))
+ continue;
+ unsigned SubIdx = O.getSubReg();
+ if (SubIdx && !tri_->getSubReg(PhysReg, SubIdx))
+ return true;
+ if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
+ SubIdx = MI->getOperand(2).getImm();
+ if (O.isUse() && !tri_->getSubReg(PhysReg, SubIdx))
+ return true;
+ if (O.isDef()) {
+ unsigned SrcReg = MI->getOperand(1).getReg();
+ const TargetRegisterClass *RC =
+ TargetRegisterInfo::isPhysicalRegister(SrcReg)
+ ? tri_->getPhysicalRegisterRegClass(SrcReg)
+ : mri_->getRegClass(SrcReg);
+ if (!getMatchingSuperReg(PhysReg, SubIdx, RC, tri_))
+ return true;
+ }
+ }
+ if (MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG) {
+ SubIdx = MI->getOperand(3).getImm();
+ if (VirtReg == MI->getOperand(0).getReg()) {
+ if (!tri_->getSubReg(PhysReg, SubIdx))
+ return true;
+ } else {
+ unsigned DstReg = MI->getOperand(0).getReg();
+ const TargetRegisterClass *RC =
+ TargetRegisterInfo::isPhysicalRegister(DstReg)
+ ? tri_->getPhysicalRegisterRegClass(DstReg)
+ : mri_->getRegClass(DstReg);
+ if (!getMatchingSuperReg(PhysReg, SubIdx, RC, tri_))
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+
/// JoinCopy - Attempt to join intervals corresponding to SrcReg/DstReg,
/// which are the src/dst of the copy instruction CopyMI. This returns true
/// if the copy was successfully coalesced away. If it is not currently
MachineInstr *CopyMI = TheCopy.MI;
Again = false;
- if (JoinedCopies.count(CopyMI))
+ if (JoinedCopies.count(CopyMI) || ReMatCopies.count(CopyMI))
return false; // Already done.
DOUT << li_->getInstructionIndex(CopyMI) << '\t' << *CopyMI;
return false; // Not coalescable.
}
+ // Should be non-null only when coalescing to a sub-register class.
+ const TargetRegisterClass *SubRC = NULL;
+ MachineBasicBlock *CopyMBB = CopyMI->getParent();
unsigned RealDstReg = 0;
unsigned RealSrcReg = 0;
if (isExtSubReg || isInsSubReg) {
mri_->getRegClass(isExtSubReg ? SrcReg : DstReg);
if (isExtSubReg) {
RealDstReg = getMatchingSuperReg(DstReg, SubIdx, RC, tri_);
- assert(RealDstReg && "Invalid extra_subreg instruction!");
+ assert(RealDstReg && "Invalid extract_subreg instruction!");
} else {
RealSrcReg = getMatchingSuperReg(SrcReg, SubIdx, RC, tri_);
- assert(RealSrcReg && "Invalid extra_subreg instruction!");
+ assert(RealSrcReg && "Invalid extract_subreg instruction!");
}
// For this type of EXTRACT_SUBREG, conservatively
unsigned OldSubIdx = isExtSubReg ? CopyMI->getOperand(0).getSubReg()
: CopyMI->getOperand(2).getSubReg();
if (OldSubIdx) {
- if (OldSubIdx == SubIdx && !differingRegisterClasses(SrcReg, DstReg))
+ if (OldSubIdx == SubIdx &&
+ !differingRegisterClasses(SrcReg, DstReg, SubRC))
// r1024<2> = EXTRACT_SUBREG r1025, 2. Then r1024 has already been
// coalesced to a larger register so the subreg indices cancel out.
// Also check if the other larger register is of the same register
if (SubIdx) {
unsigned LargeReg = isExtSubReg ? SrcReg : DstReg;
unsigned SmallReg = isExtSubReg ? DstReg : SrcReg;
- unsigned LargeRegSize =
- li_->getInterval(LargeReg).getSize() / InstrSlots::NUM;
- unsigned SmallRegSize =
- li_->getInterval(SmallReg).getSize() / InstrSlots::NUM;
+ unsigned LargeRegSize =
+ li_->getApproximateInstructionCount(li_->getInterval(LargeReg));
+ unsigned SmallRegSize =
+ li_->getApproximateInstructionCount(li_->getInterval(SmallReg));
const TargetRegisterClass *RC = mri_->getRegClass(SmallReg);
unsigned Threshold = allocatableRCRegs_[RC].count();
// Be conservative. If both sides are virtual registers, do not coalesce
// if this will cause a high use density interval to target a smaller
// set of registers.
if (SmallRegSize > Threshold || LargeRegSize > Threshold) {
- LiveVariables::VarInfo &svi = lv_->getVarInfo(LargeReg);
- LiveVariables::VarInfo &dvi = lv_->getVarInfo(SmallReg);
- if ((float)dvi.NumUses / SmallRegSize <
- (float)svi.NumUses / LargeRegSize) {
+ if ((float)std::distance(mri_->use_begin(SmallReg),
+ mri_->use_end()) / SmallRegSize <
+ (float)std::distance(mri_->use_begin(LargeReg),
+ mri_->use_end()) / LargeRegSize) {
Again = true; // May be possible to coalesce later.
return false;
}
}
}
}
- } else if (differingRegisterClasses(SrcReg, DstReg)) {
+ } else if (differingRegisterClasses(SrcReg, DstReg, SubRC)) {
// FIXME: What if the resul of a EXTRACT_SUBREG is then coalesced
// with another? If it's the resulting destination register, then
// the subidx must be propagated to uses (but only those defined
// register, it should be safe because register is assumed to have
// the register class of the super-register.
- // If they are not of the same register class, we cannot join them.
- DOUT << "\tSrc/Dest are different register classes.\n";
- // Allow the coalescer to try again in case either side gets coalesced to
- // a physical register that's compatible with the other side. e.g.
- // r1024 = MOV32to32_ r1025
- // but later r1024 is assigned EAX then r1025 may be coalesced with EAX.
- Again = true; // May be possible to coalesce later.
- return false;
+ if (!SubRC || !isProfitableToCoalesceToSubRC(SrcReg, DstReg, CopyMBB)) {
+ // If they are not of the same register class, we cannot join them.
+ DOUT << "\tSrc/Dest are different register classes.\n";
+ // Allow the coalescer to try again in case either side gets coalesced to
+ // a physical register that's compatible with the other side. e.g.
+ // r1024 = MOV32to32_ r1025
+ // but later r1024 is assigned EAX then r1025 may be coalesced with EAX.
+ Again = true; // May be possible to coalesce later.
+ return false;
+ }
}
+
+ // Will it create illegal extract_subreg / insert_subreg?
+ if (SrcIsPhys && HasIncompatibleSubRegDefUse(CopyMI, DstReg, SrcReg))
+ return false;
+ if (DstIsPhys && HasIncompatibleSubRegDefUse(CopyMI, SrcReg, DstReg))
+ return false;
LiveInterval &SrcInt = li_->getInterval(SrcReg);
LiveInterval &DstInt = li_->getInterval(DstReg);
// If the virtual register live interval is long but it has low use desity,
// do not join them, instead mark the physical register as its allocation
// preference.
- unsigned Length = JoinVInt.getSize() / InstrSlots::NUM;
- LiveVariables::VarInfo &vi = lv_->getVarInfo(JoinVReg);
+ unsigned Length = li_->getApproximateInstructionCount(JoinVInt);
if (Length > Threshold &&
- (((float)vi.NumUses / Length) < (1.0 / Threshold))) {
+ (((float)std::distance(mri_->use_begin(JoinVReg),
+ mri_->use_end()) / Length) < (1.0 / Threshold))) {
JoinVInt.preference = JoinPReg;
++numAborts;
DOUT << "\tMay tie down a physical register, abort!\n";
if (!isEmpty && !JoinIntervals(DstInt, SrcInt, Swapped)) {
// Coalescing failed.
+
+ // If definition of source is defined by trivial computation, try
+ // rematerializing it.
+ if (!isExtSubReg && !isInsSubReg &&
+ ReMaterializeTrivialDef(SrcInt, DstInt.reg, CopyMI))
+ return true;
// If we can eliminate the copy without merging the live ranges, do so now.
if (!isExtSubReg && !isInsSubReg &&
for (const unsigned *AS = tri_->getSubRegisters(DstReg); *AS; ++AS)
li_->getOrCreateInterval(*AS).MergeInClobberRanges(*ResSrcInt,
li_->getVNInfoAllocator());
- } else {
- // Merge use info if the destination is a virtual register.
- LiveVariables::VarInfo& dVI = lv_->getVarInfo(DstReg);
- LiveVariables::VarInfo& sVI = lv_->getVarInfo(SrcReg);
- dVI.NumUses += sVI.NumUses;
}
// If this is a EXTRACT_SUBREG, make sure the result of coalescing is the
}
}
+ // Coalescing to a virtual register that is of a sub-register class of the
+ // other. Make sure the resulting register is set to the right register class.
+ if (SubRC) {
+ mri_->setRegClass(DstReg, SubRC);
+ ++numSubJoins;
+ }
+
if (NewHeuristic) {
// Add all copies that define val# in the source interval into the queue.
for (LiveInterval::const_vni_iterator i = ResSrcInt->vni_begin(),
if (CopyMI &&
JoinedCopies.count(CopyMI) == 0 &&
tii_->isMoveInstr(*CopyMI, NewSrcReg, NewDstReg)) {
- unsigned LoopDepth = loopInfo->getLoopDepth(CopyMI->getParent());
+ unsigned LoopDepth = loopInfo->getLoopDepth(CopyMBB);
JoinQueue->push(CopyRec(CopyMI, LoopDepth,
isBackEdgeCopy(CopyMI, DstReg)));
}
if (TargetRegisterInfo::isVirtualRegister(DstReg))
RemoveUnnecessaryKills(DstReg, *ResDstInt);
- // SrcReg is guarateed to be the register whose live interval that is
- // being merged.
- li_->removeInterval(SrcReg);
if (isInsSubReg)
// Avoid:
// r1024 = op
RemoveDeadImpDef(DstReg, *ResDstInt);
UpdateRegDefsUses(SrcReg, DstReg, SubIdx);
+ // SrcReg is guarateed to be the register whose live interval that is
+ // being merged.
+ li_->removeInterval(SrcReg);
+
if (isEmpty) {
// Now the copy is being coalesced away, the val# previously defined
// by the copy is being defined by an IMPLICIT_DEF which defines a zero
}
}
+ // If resulting interval has a preference that no longer fits because of subreg
+ // coalescing, just clear the preference.
+ if (ResDstInt->preference && (isExtSubReg || isInsSubReg) &&
+ TargetRegisterInfo::isVirtualRegister(ResDstInt->reg)) {
+ const TargetRegisterClass *RC = mri_->getRegClass(ResDstInt->reg);
+ if (!RC->contains(ResDstInt->preference))
+ ResDstInt->preference = 0;
+ }
+
DOUT << "\n\t\tJoined. Result = "; ResDstInt->print(DOUT, tri_);
DOUT << "\n";
// Re-compute it.
MachineInstr *DefMI = li_->getInstructionFromIndex(LR->start);
unsigned SrcReg, DstReg;
- if (tii_->isMoveInstr(*DefMI, SrcReg, DstReg) &&
+ if (DefMI && tii_->isMoveInstr(*DefMI, SrcReg, DstReg) &&
DstReg == li.reg && SrcReg == Reg) {
// Cache computed info.
LR->valno->def = LR->start;
// optimize for it: if there is more than one value, we merge them all into
// the lowest numbered one, then handle the interval as if we were merging
// with one value number.
- VNInfo *LHSValNo;
+ VNInfo *LHSValNo = NULL;
if (EliminatedLHSVals.size() > 1) {
// Loop through all the equal value numbers merging them into the smallest
// one.
JoinQueue = new JoinPriorityQueue<CopyRecSort>(this);
std::vector<CopyRec> TryAgainList;
- if (loopInfo->begin() == loopInfo->end()) {
+ if (loopInfo->empty()) {
// If there are no loops in the function, join intervals in function order.
for (MachineFunction::iterator I = mf_->begin(), E = mf_->end();
I != E; ++I)
}
/// Return true if the two specified registers belong to different register
-/// classes. The registers may be either phys or virt regs.
-bool SimpleRegisterCoalescing::differingRegisterClasses(unsigned RegA,
- unsigned RegB) const {
+/// classes. The registers may be either phys or virt regs. In the
+/// case where both registers are virtual registers, it would also returns
+/// true by reference the RegB register class in SubRC if it is a subset of
+/// RegA's register class.
+bool
+SimpleRegisterCoalescing::differingRegisterClasses(unsigned RegA, unsigned RegB,
+ const TargetRegisterClass *&SubRC) const {
// Get the register classes for the first reg.
if (TargetRegisterInfo::isPhysicalRegister(RegA)) {
}
// Compare against the regclass for the second reg.
- const TargetRegisterClass *RegClass = mri_->getRegClass(RegA);
- if (TargetRegisterInfo::isVirtualRegister(RegB))
- return RegClass != mri_->getRegClass(RegB);
- else
- return !RegClass->contains(RegB);
+ const TargetRegisterClass *RegClassA = mri_->getRegClass(RegA);
+ if (TargetRegisterInfo::isVirtualRegister(RegB)) {
+ const TargetRegisterClass *RegClassB = mri_->getRegClass(RegB);
+ if (RegClassA == RegClassB)
+ return false;
+ SubRC = (RegClassA->hasSubClass(RegClassB)) ? RegClassB : NULL;
+ return true;
+ }
+ return !RegClassA->contains(RegB);
}
/// lastRegisterUse - Returns the last use of the specific register between
if (!(tii_->isMoveInstr(*MI, SrcReg, DstReg) && SrcReg == DstReg))
for (unsigned i = 0, NumOps = MI->getNumOperands(); i != NumOps; ++i) {
MachineOperand &Use = MI->getOperand(i);
- if (Use.isRegister() && Use.isUse() && Use.getReg() &&
+ if (Use.isReg() && Use.isUse() && Use.getReg() &&
tri_->regsOverlap(Use.getReg(), Reg)) {
UseIdx = e;
return &Use;
void SimpleRegisterCoalescing::releaseMemory() {
JoinedCopies.clear();
+ ReMatCopies.clear();
+ ReMatDefs.clear();
}
static bool isZeroLengthInterval(LiveInterval *li) {
CopyMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
for (int i = CopyMI->getNumOperands() - 1, e = 0; i > e; --i)
CopyMI->RemoveOperand(i);
- bool NoUse = mri_->use_begin(SrcReg) == mri_->use_end();
+ bool NoUse = mri_->use_empty(SrcReg);
if (NoUse) {
for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(SrcReg),
E = mri_->reg_end(); I != E; ) {
tri_ = tm_->getRegisterInfo();
tii_ = tm_->getInstrInfo();
li_ = &getAnalysis<LiveIntervals>();
- lv_ = &getAnalysis<LiveVariables>();
loopInfo = &getAnalysis<MachineLoopInfo>();
DOUT << "********** SIMPLE REGISTER COALESCING **********\n"
// Join (coalesce) intervals if requested.
if (EnableJoining) {
joinIntervals();
- DOUT << "********** INTERVALS POST JOINING **********\n";
- for (LiveIntervals::iterator I = li_->begin(), E = li_->end(); I != E; ++I){
- I->second.print(DOUT, tri_);
- DOUT << "\n";
- }
+ DEBUG({
+ DOUT << "********** INTERVALS POST JOINING **********\n";
+ for (LiveIntervals::iterator I = li_->begin(), E = li_->end(); I != E; ++I){
+ I->second->print(DOUT, tri_);
+ DOUT << "\n";
+ }
+ });
}
// Perform a final pass over the instructions and compute spill weights
// and remove identity moves.
+ SmallVector<unsigned, 4> DeadDefs;
for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
mbbi != mbbe; ++mbbi) {
MachineBasicBlock* mbb = mbbi;
continue;
}
+ // Now check if this is a remat'ed def instruction which is now dead.
+ if (ReMatDefs.count(MI)) {
+ bool isDead = true;
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (TargetRegisterInfo::isVirtualRegister(Reg))
+ DeadDefs.push_back(Reg);
+ if (MO.isDead())
+ continue;
+ if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
+ !mri_->use_empty(Reg)) {
+ isDead = false;
+ break;
+ }
+ }
+ if (isDead) {
+ while (!DeadDefs.empty()) {
+ unsigned DeadDef = DeadDefs.back();
+ DeadDefs.pop_back();
+ RemoveDeadDef(li_->getInterval(DeadDef), MI);
+ }
+ li_->RemoveMachineInstrFromMaps(mii);
+ mii = mbbi->erase(mii);
+ continue;
+ } else
+ DeadDefs.clear();
+ }
+
// If the move will be an identity move delete it
- bool isMove = tii_->isMoveInstr(*mii, SrcReg, DstReg);
+ bool isMove = tii_->isMoveInstr(*MI, SrcReg, DstReg);
if (isMove && SrcReg == DstReg) {
if (li_->hasInterval(SrcReg)) {
LiveInterval &RegInt = li_->getInterval(SrcReg);
// If def of this move instruction is dead, remove its live range
// from the dstination register's live interval.
- if (mii->registerDefIsDead(DstReg)) {
- if (!ShortenDeadCopySrcLiveRange(RegInt, mii))
- ShortenDeadCopyLiveRange(RegInt, mii);
+ if (MI->registerDefIsDead(DstReg)) {
+ if (!ShortenDeadCopySrcLiveRange(RegInt, MI))
+ ShortenDeadCopyLiveRange(RegInt, MI);
}
}
- li_->RemoveMachineInstrFromMaps(mii);
+ li_->RemoveMachineInstrFromMaps(MI);
mii = mbbi->erase(mii);
++numPeep;
} else if (!isMove || !TurnCopyIntoImpDef(mii, mbb, DstReg, SrcReg)) {
SmallSet<unsigned, 4> UniqueUses;
- for (unsigned i = 0, e = mii->getNumOperands(); i != e; ++i) {
- const MachineOperand &mop = mii->getOperand(i);
- if (mop.isRegister() && mop.getReg() &&
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &mop = MI->getOperand(i);
+ if (mop.isReg() && mop.getReg() &&
TargetRegisterInfo::isVirtualRegister(mop.getReg())) {
unsigned reg = mop.getReg();
// Multiple uses of reg by the same instruction. It should not
}
for (LiveIntervals::iterator I = li_->begin(), E = li_->end(); I != E; ++I) {
- LiveInterval &LI = I->second;
+ LiveInterval &LI = *I->second;
if (TargetRegisterInfo::isVirtualRegister(LI.reg)) {
// If the live interval length is essentially zero, i.e. in every live
// range the use follows def immediately, it doesn't make sense to spill
LI.weight = HUGE_VALF;
else {
bool isLoad = false;
- if (li_->isReMaterializable(LI, isLoad)) {
+ SmallVector<LiveInterval*, 4> SpillIs;
+ if (li_->isReMaterializable(LI, SpillIs, isLoad)) {
// If all of the definitions of the interval are re-materializable,
// it is a preferred candidate for spilling. If non of the defs are
// loads, then it's potentially very cheap to re-materialize.
// Divide the weight of the interval by its size. This encourages
// spilling of intervals that are large and have few uses, and
// discourages spilling of small intervals with many uses.
- LI.weight /= LI.getSize();
+ LI.weight /= li_->getApproximateInstructionCount(LI) * InstrSlots::NUM;
}
}