#define DEBUG_TYPE "regalloc"
#include "RegisterCoalescer.h"
#include "LiveDebugVariables.h"
-#include "VirtRegMap.h"
-
-#include "llvm/Pass.h"
-#include "llvm/Value.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
-#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveRangeEdit.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
-#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
+#include "llvm/CodeGen/VirtRegMap.h"
+#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+#include "llvm/Value.h"
#include <algorithm>
#include <cmath>
using namespace llvm;
cl::desc("Coalesce copies (default=true)"),
cl::init(true));
+// Temporary flag to test critical edge unsplitting.
+static cl::opt<bool>
+EnableJoinSplits("join-splitedges",
+ cl::desc("Coalesce copies on split edges (default=subtarget)"), cl::Hidden);
+
+// Temporary flag to test global copy optimization.
+static cl::opt<cl::boolOrDefault>
+EnableGlobalCopies("join-globalcopies",
+ cl::desc("Coalesce copies that span blocks (default=subtarget)"),
+ cl::init(cl::BOU_UNSET), cl::Hidden);
+
static cl::opt<bool>
VerifyCoalescing("verify-coalescing",
cl::desc("Verify machine instrs before and after register coalescing"),
AliasAnalysis *AA;
RegisterClassInfo RegClassInfo;
+ /// \brief True if the coalescer should aggressively coalesce global copies
+ /// in favor of keeping local copies.
+ bool JoinGlobalCopies;
+
+ /// \brief True if the coalescer should aggressively coalesce fall-thru
+ /// blocks exclusively containing copies.
+ bool JoinSplitEdges;
+
/// WorkList - Copy instructions yet to be coalesced.
SmallVector<MachineInstr*, 8> WorkList;
+ SmallVector<MachineInstr*, 8> LocalWorkList;
/// ErasedInstrs - Set of instruction pointers that have been erased, and
/// that may be present in WorkList.
/// LiveRangeEdit callback.
void LRE_WillEraseInstruction(MachineInstr *MI);
+ /// coalesceLocals - coalesce the LocalWorkList.
+ void coalesceLocals();
+
/// joinAllIntervals - join compatible live intervals
void joinAllIntervals();
/// copies that cannot yet be coalesced into WorkList.
void copyCoalesceInMBB(MachineBasicBlock *MBB);
- /// copyCoalesceWorkList - Try to coalesce all copies in WorkList after
- /// position From. Return true if any progress was made.
- bool copyCoalesceWorkList(unsigned From = 0);
+ /// copyCoalesceWorkList - Try to coalesce all copies in CurrList. Return
+ /// true if any progress was made.
+ bool copyCoalesceWorkList(MutableArrayRef<MachineInstr*> CurrList);
/// joinCopy - Attempt to join intervals corresponding to SrcReg/DstReg,
/// which are the src/dst of the copy instruction CopyMI. This returns
MachineInstr *CopyMI);
/// canJoinPhys - Return true if a physreg copy should be joined.
- bool canJoinPhys(CoalescerPair &CP);
+ bool canJoinPhys(const CoalescerPair &CP);
/// updateRegDefsUses - Replace all defs and uses of SrcReg to DstReg and
/// update the subregister number if it is not zero. If DstReg is a
char RegisterCoalescer::ID = 0;
-static unsigned compose(const TargetRegisterInfo &tri, unsigned a, unsigned b) {
- if (!a) return b;
- if (!b) return a;
- return tri.composeSubRegIndices(a, b);
-}
-
static bool isMoveInstr(const TargetRegisterInfo &tri, const MachineInstr *MI,
unsigned &Src, unsigned &Dst,
unsigned &SrcSub, unsigned &DstSub) {
SrcSub = MI->getOperand(1).getSubReg();
} else if (MI->isSubregToReg()) {
Dst = MI->getOperand(0).getReg();
- DstSub = compose(tri, MI->getOperand(0).getSubReg(),
- MI->getOperand(3).getImm());
+ DstSub = tri.composeSubRegIndices(MI->getOperand(0).getSubReg(),
+ MI->getOperand(3).getImm());
Src = MI->getOperand(2).getReg();
SrcSub = MI->getOperand(2).getSubReg();
} else
return true;
}
+// Return true if this block should be vacated by the coalescer to eliminate
+// branches. The important cases to handle in the coalescer are critical edges
+// split during phi elimination which contain only copies. Simple blocks that
+// contain non-branches should also be vacated, but this can be handled by an
+// earlier pass similar to early if-conversion.
+static bool isSplitEdge(const MachineBasicBlock *MBB) {
+ if (MBB->pred_size() != 1 || MBB->succ_size() != 1)
+ return false;
+
+ for (MachineBasicBlock::const_iterator MII = MBB->begin(), E = MBB->end();
+ MII != E; ++MII) {
+ if (!MII->isCopyLike() && !MII->isUnconditionalBranch())
+ return false;
+ }
+ return true;
+}
+
bool CoalescerPair::setRegisters(const MachineInstr *MI) {
SrcReg = DstReg = 0;
SrcIdx = DstIdx = 0;
if (DstReg != Dst)
return false;
// Registers match, do the subregisters line up?
- return compose(TRI, SrcIdx, SrcSub) == compose(TRI, DstIdx, DstSub);
+ return TRI.composeSubRegIndices(SrcIdx, SrcSub) ==
+ TRI.composeSubRegIndices(DstIdx, DstSub);
}
}
// If AValNo is defined as a copy from IntB, we can potentially process this.
// Get the instruction that defines this value number.
MachineInstr *ACopyMI = LIS->getInstructionFromIndex(AValNo->def);
- if (!CP.isCoalescable(ACopyMI))
+ // Don't allow any partial copies, even if isCoalescable() allows them.
+ if (!CP.isCoalescable(ACopyMI) || !ACopyMI->isFullCopy())
return false;
// Get the LiveRange in IntB that this value number starts with.
// Update LiveDebugVariables.
LDV->renameRegister(SrcReg, DstReg, SubIdx);
+ SmallPtrSet<MachineInstr*, 8> Visited;
for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(SrcReg);
MachineInstr *UseMI = I.skipInstruction();) {
+ // Each instruction can only be rewritten once because sub-register
+ // composition is not always idempotent. When SrcReg != DstReg, rewriting
+ // the UseMI operands removes them from the SrcReg use-def chain, but when
+ // SrcReg is DstReg we could encounter UseMI twice if it has multiple
+ // operands mentioning the virtual register.
+ if (SrcReg == DstReg && !Visited.insert(UseMI))
+ continue;
+
SmallVector<unsigned,8> Ops;
bool Reads, Writes;
tie(Reads, Writes) = UseMI->readsWritesVirtualRegister(SrcReg, &Ops);
}
/// canJoinPhys - Return true if a copy involving a physreg should be joined.
-bool RegisterCoalescer::canJoinPhys(CoalescerPair &CP) {
+bool RegisterCoalescer::canJoinPhys(const CoalescerPair &CP) {
/// Always join simple intervals that are defined by a single copy from a
/// reserved register. This doesn't increase register pressure, so it is
/// always beneficial.
SmallVectorImpl<unsigned> &ShrinkRegs);
/// Get the value assignments suitable for passing to LiveInterval::join.
- const int *getAssignments() const { return &Assignments[0]; }
+ const int *getAssignments() const { return Assignments.data(); }
};
} // end anonymous namespace
for (ConstMIOperands MO(DefMI); MO.isValid(); ++MO) {
if (!MO->isReg() || MO->getReg() != LI.reg || !MO->isDef())
continue;
- L |= TRI->getSubRegIndexLaneMask(compose(*TRI, SubIdx, MO->getSubReg()));
+ L |= TRI->getSubRegIndexLaneMask(
+ TRI->composeSubRegIndices(SubIdx, MO->getSubReg()));
if (MO->readsReg())
Redef = true;
}
if ((V.WriteLanes & OtherV.ValidLanes) == 0)
return CR_Replace;
+ // If the other live range is killed by DefMI and the live ranges are still
+ // overlapping, it must be because we're looking at an early clobber def:
+ //
+ // %dst<def,early-clobber> = ASM %src<kill>
+ //
+ // In this case, it is illegal to merge the two live ranges since the early
+ // clobber def would clobber %src before it was read.
+ if (OtherLRQ.isKill()) {
+ // This case where the def doesn't overlap the kill is handled above.
+ assert(VNI->def.isEarlyClobber() &&
+ "Only early clobber defs can overlap a kill");
+ return CR_Impossible;
+ }
+
// VNI is clobbering live lanes in OtherVNI, but there is still the
// possibility that no instructions actually read the clobbered lanes.
// If we're clobbering all the lanes in OtherVNI, at least one must be read.
continue;
if (!MO->readsReg())
continue;
- if (Lanes &
- TRI->getSubRegIndexLaneMask(compose(*TRI, SubIdx, MO->getSubReg())))
+ if (Lanes & TRI->getSubRegIndexLaneMask(
+ TRI->composeSubRegIndices(SubIdx, MO->getSubReg())))
return true;
}
return false;
}
namespace {
- // DepthMBBCompare - Comparison predicate that sort first based on the loop
- // depth of the basic block (the unsigned), and then on the MBB number.
- struct DepthMBBCompare {
- typedef std::pair<unsigned, MachineBasicBlock*> DepthMBBPair;
- bool operator()(const DepthMBBPair &LHS, const DepthMBBPair &RHS) const {
- // Deeper loops first
- if (LHS.first != RHS.first)
- return LHS.first > RHS.first;
-
- // Prefer blocks that are more connected in the CFG. This takes care of
- // the most difficult copies first while intervals are short.
- unsigned cl = LHS.second->pred_size() + LHS.second->succ_size();
- unsigned cr = RHS.second->pred_size() + RHS.second->succ_size();
- if (cl != cr)
- return cl > cr;
-
- // As a last resort, sort by block number.
- return LHS.second->getNumber() < RHS.second->getNumber();
- }
- };
+// Information concerning MBB coalescing priority.
+struct MBBPriorityInfo {
+ MachineBasicBlock *MBB;
+ unsigned Depth;
+ bool IsSplit;
+
+ MBBPriorityInfo(MachineBasicBlock *mbb, unsigned depth, bool issplit)
+ : MBB(mbb), Depth(depth), IsSplit(issplit) {}
+};
+}
+
+// C-style comparator that sorts first based on the loop depth of the basic
+// block (the unsigned), and then on the MBB number.
+//
+// EnableGlobalCopies assumes that the primary sort key is loop depth.
+static int compareMBBPriority(const void *L, const void *R) {
+ const MBBPriorityInfo *LHS = static_cast<const MBBPriorityInfo*>(L);
+ const MBBPriorityInfo *RHS = static_cast<const MBBPriorityInfo*>(R);
+ // Deeper loops first
+ if (LHS->Depth != RHS->Depth)
+ return LHS->Depth > RHS->Depth ? -1 : 1;
+
+ // Try to unsplit critical edges next.
+ if (LHS->IsSplit != RHS->IsSplit)
+ return LHS->IsSplit ? -1 : 1;
+
+ // Prefer blocks that are more connected in the CFG. This takes care of
+ // the most difficult copies first while intervals are short.
+ unsigned cl = LHS->MBB->pred_size() + LHS->MBB->succ_size();
+ unsigned cr = RHS->MBB->pred_size() + RHS->MBB->succ_size();
+ if (cl != cr)
+ return cl > cr ? -1 : 1;
+
+ // As a last resort, sort by block number.
+ return LHS->MBB->getNumber() < RHS->MBB->getNumber() ? -1 : 1;
+}
+
+/// \returns true if the given copy uses or defines a local live range.
+static bool isLocalCopy(MachineInstr *Copy, const LiveIntervals *LIS) {
+ if (!Copy->isCopy())
+ return false;
+
+ unsigned SrcReg = Copy->getOperand(1).getReg();
+ unsigned DstReg = Copy->getOperand(0).getReg();
+ if (TargetRegisterInfo::isPhysicalRegister(SrcReg)
+ || TargetRegisterInfo::isPhysicalRegister(DstReg))
+ return false;
+
+ return LIS->intervalIsInOneMBB(LIS->getInterval(SrcReg))
+ || LIS->intervalIsInOneMBB(LIS->getInterval(DstReg));
}
// Try joining WorkList copies starting from index From.
// Null out any successful joins.
-bool RegisterCoalescer::copyCoalesceWorkList(unsigned From) {
- assert(From <= WorkList.size() && "Out of range");
+bool RegisterCoalescer::
+copyCoalesceWorkList(MutableArrayRef<MachineInstr*> CurrList) {
bool Progress = false;
- for (unsigned i = From, e = WorkList.size(); i != e; ++i) {
- if (!WorkList[i])
+ for (unsigned i = 0, e = CurrList.size(); i != e; ++i) {
+ if (!CurrList[i])
continue;
// Skip instruction pointers that have already been erased, for example by
// dead code elimination.
- if (ErasedInstrs.erase(WorkList[i])) {
- WorkList[i] = 0;
+ if (ErasedInstrs.erase(CurrList[i])) {
+ CurrList[i] = 0;
continue;
}
bool Again = false;
- bool Success = joinCopy(WorkList[i], Again);
+ bool Success = joinCopy(CurrList[i], Again);
Progress |= Success;
if (Success || !Again)
- WorkList[i] = 0;
+ CurrList[i] = 0;
}
return Progress;
}
// Collect all copy-like instructions in MBB. Don't start coalescing anything
// yet, it might invalidate the iterator.
const unsigned PrevSize = WorkList.size();
- for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
- MII != E; ++MII)
- if (MII->isCopyLike())
- WorkList.push_back(MII);
-
+ if (JoinGlobalCopies) {
+ // Coalesce copies bottom-up to coalesce local defs before local uses. They
+ // are not inherently easier to resolve, but slightly preferable until we
+ // have local live range splitting. In particular this is required by
+ // cmp+jmp macro fusion.
+ for (MachineBasicBlock::reverse_iterator
+ MII = MBB->rbegin(), E = MBB->rend(); MII != E; ++MII) {
+ if (!MII->isCopyLike())
+ continue;
+ if (isLocalCopy(&(*MII), LIS))
+ LocalWorkList.push_back(&(*MII));
+ else
+ WorkList.push_back(&(*MII));
+ }
+ }
+ else {
+ for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
+ MII != E; ++MII)
+ if (MII->isCopyLike())
+ WorkList.push_back(MII);
+ }
// Try coalescing the collected copies immediately, and remove the nulls.
// This prevents the WorkList from getting too large since most copies are
// joinable on the first attempt.
- if (copyCoalesceWorkList(PrevSize))
+ MutableArrayRef<MachineInstr*>
+ CurrList(WorkList.begin() + PrevSize, WorkList.end());
+ if (copyCoalesceWorkList(CurrList))
WorkList.erase(std::remove(WorkList.begin() + PrevSize, WorkList.end(),
(MachineInstr*)0), WorkList.end());
}
+void RegisterCoalescer::coalesceLocals() {
+ copyCoalesceWorkList(LocalWorkList);
+ for (unsigned j = 0, je = LocalWorkList.size(); j != je; ++j) {
+ if (LocalWorkList[j])
+ WorkList.push_back(LocalWorkList[j]);
+ }
+ LocalWorkList.clear();
+}
+
void RegisterCoalescer::joinAllIntervals() {
DEBUG(dbgs() << "********** JOINING INTERVALS ***********\n");
- assert(WorkList.empty() && "Old data still around.");
-
- if (Loops->empty()) {
- // If there are no loops in the function, join intervals in function order.
- for (MachineFunction::iterator I = MF->begin(), E = MF->end();
- I != E; ++I)
- copyCoalesceInMBB(I);
- } else {
- // Otherwise, join intervals in inner loops before other intervals.
- // Unfortunately we can't just iterate over loop hierarchy here because
- // there may be more MBB's than BB's. Collect MBB's for sorting.
-
- // Join intervals in the function prolog first. We want to join physical
- // registers with virtual registers before the intervals got too long.
- std::vector<std::pair<unsigned, MachineBasicBlock*> > MBBs;
- for (MachineFunction::iterator I = MF->begin(), E = MF->end();I != E;++I){
- MachineBasicBlock *MBB = I;
- MBBs.push_back(std::make_pair(Loops->getLoopDepth(MBB), I));
+ assert(WorkList.empty() && LocalWorkList.empty() && "Old data still around.");
+
+ std::vector<MBBPriorityInfo> MBBs;
+ MBBs.reserve(MF->size());
+ for (MachineFunction::iterator I = MF->begin(), E = MF->end();I != E;++I){
+ MachineBasicBlock *MBB = I;
+ MBBs.push_back(MBBPriorityInfo(MBB, Loops->getLoopDepth(MBB),
+ JoinSplitEdges && isSplitEdge(MBB)));
+ }
+ array_pod_sort(MBBs.begin(), MBBs.end(), compareMBBPriority);
+
+ // Coalesce intervals in MBB priority order.
+ unsigned CurrDepth = UINT_MAX;
+ for (unsigned i = 0, e = MBBs.size(); i != e; ++i) {
+ // Try coalescing the collected local copies for deeper loops.
+ if (JoinGlobalCopies && MBBs[i].Depth < CurrDepth) {
+ coalesceLocals();
+ CurrDepth = MBBs[i].Depth;
}
-
- // Sort by loop depth.
- std::sort(MBBs.begin(), MBBs.end(), DepthMBBCompare());
-
- // Finally, join intervals in loop nest order.
- for (unsigned i = 0, e = MBBs.size(); i != e; ++i)
- copyCoalesceInMBB(MBBs[i].second);
+ copyCoalesceInMBB(MBBs[i].MBB);
}
+ coalesceLocals();
// Joining intervals can allow other intervals to be joined. Iteratively join
// until we make no progress.
- while (copyCoalesceWorkList())
+ while (copyCoalesceWorkList(WorkList))
/* empty */ ;
}
AA = &getAnalysis<AliasAnalysis>();
Loops = &getAnalysis<MachineLoopInfo>();
+ const TargetSubtargetInfo &ST = TM->getSubtarget<TargetSubtargetInfo>();
+ if (EnableGlobalCopies == cl::BOU_UNSET)
+ JoinGlobalCopies = ST.enableMachineScheduler();
+ else
+ JoinGlobalCopies = (EnableGlobalCopies == cl::BOU_TRUE);
+
+ // The MachineScheduler does not currently require JoinSplitEdges. This will
+ // either be enabled unconditionally or replaced by a more general live range
+ // splitting optimization.
+ JoinSplitEdges = EnableJoinSplits;
+
DEBUG(dbgs() << "********** SIMPLE REGISTER COALESCING **********\n"
<< "********** Function: " << MF->getName() << '\n');