X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FSimpleRegisterCoalescing.cpp;h=8283965dd1bdbe7c82f93053aeade2c320104409;hb=b0efa94fb63d55c7aa5eb3c7c23d39a763036c24;hp=7bf0fd28f6b2c4f25f9244e3ac1e3db2168d76d3;hpb=04c528a0c86ddf3d6a70681f72e1b2ec07b0b53a;p=oota-llvm.git diff --git a/lib/CodeGen/SimpleRegisterCoalescing.cpp b/lib/CodeGen/SimpleRegisterCoalescing.cpp index 7bf0fd28f6b..8283965dd1b 100644 --- a/lib/CodeGen/SimpleRegisterCoalescing.cpp +++ b/lib/CodeGen/SimpleRegisterCoalescing.cpp @@ -13,8 +13,10 @@ //===----------------------------------------------------------------------===// #define DEBUG_TYPE "regcoalescing" -#include "SimpleRegisterCoalescing.h" +#include "RegisterCoalescer.h" #include "VirtRegMap.h" +#include "LiveDebugVariables.h" +#include "RegisterCoalescer.h" #include "llvm/CodeGen/LiveIntervalAnalysis.h" #include "llvm/Value.h" #include "llvm/Analysis/AliasAnalysis.h" @@ -23,7 +25,6 @@ #include "llvm/CodeGen/MachineLoopInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/Passes.h" -#include "llvm/CodeGen/RegisterCoalescer.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" @@ -46,7 +47,6 @@ STATISTIC(numExtends , "Number of copies extended"); STATISTIC(NumReMats , "Number of instructions re-materialized"); STATISTIC(numPeep , "Number of identity moves eliminated after coalescing"); STATISTIC(numAborts , "Number of times interval joining aborted"); -STATISTIC(numDeadValNo, "Number of valno def marked dead"); char SimpleRegisterCoalescing::ID = 0; static cl::opt @@ -59,31 +59,62 @@ DisableCrossClassJoin("disable-cross-class-join", cl::desc("Avoid coalescing cross register class copies"), cl::init(false), cl::Hidden); -static RegisterPass -X("simple-register-coalescing", "Simple Register Coalescing"); - -// Declare that we implement the RegisterCoalescer interface -static RegisterAnalysisGroup V(X); +static cl::opt +EnablePhysicalJoin("join-physregs", + cl::desc("Join physical register copies"), + cl::init(false), cl::Hidden); -const PassInfo *const llvm::SimpleRegisterCoalescingID = &X; +static cl::opt +VerifyCoalescing("verify-coalescing", + cl::desc("Verify machine instrs before and after register coalescing"), + cl::Hidden); + +INITIALIZE_AG_PASS_BEGIN(SimpleRegisterCoalescing, RegisterCoalescer, + "simple-register-coalescing", "Simple Register Coalescing", + false, false, true) +INITIALIZE_PASS_DEPENDENCY(LiveIntervals) +INITIALIZE_PASS_DEPENDENCY(LiveDebugVariables) +INITIALIZE_PASS_DEPENDENCY(SlotIndexes) +INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) +INITIALIZE_PASS_DEPENDENCY(StrongPHIElimination) +INITIALIZE_PASS_DEPENDENCY(PHIElimination) +INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass) +INITIALIZE_AG_DEPENDENCY(AliasAnalysis) +INITIALIZE_AG_PASS_END(SimpleRegisterCoalescing, RegisterCoalescer, + "simple-register-coalescing", "Simple Register Coalescing", + false, false, true) + +char &llvm::SimpleRegisterCoalescingID = SimpleRegisterCoalescing::ID; void SimpleRegisterCoalescing::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesCFG(); AU.addRequired(); AU.addRequired(); AU.addPreserved(); + AU.addRequired(); + AU.addPreserved(); AU.addPreserved(); AU.addRequired(); AU.addPreserved(); AU.addPreservedID(MachineDominatorsID); - if (StrongPHIElim) - AU.addPreservedID(StrongPHIEliminationID); - else - AU.addPreservedID(PHIEliminationID); + AU.addPreservedID(StrongPHIEliminationID); + AU.addPreservedID(PHIEliminationID); AU.addPreservedID(TwoAddressInstructionPassID); MachineFunctionPass::getAnalysisUsage(AU); } +void SimpleRegisterCoalescing::markAsJoined(MachineInstr *CopyMI) { + /// Joined copies are not deleted immediately, but kept in JoinedCopies. + JoinedCopies.insert(CopyMI); + + /// Mark all register operands of CopyMI as so they won't affect dead + /// code elimination. + for (MachineInstr::mop_iterator I = CopyMI->operands_begin(), + E = CopyMI->operands_end(); I != E; ++I) + if (I->isReg()) + I->setIsUndef(true); +} + /// AdjustCopiesBackFrom - We found a non-trivially-coalescable copy with IntA /// being the source and IntB being the dest, thus this defines a value number /// in IntB. If the source value number (in IntA) is defined by a copy from B, @@ -121,7 +152,7 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(const CoalescerPair &CP, // Get the location that B is defined at. Two options: either this value has // an unknown definition point or it is defined at CopyIdx. If unknown, we // can't process it. - if (!BValNo->getCopy()) return false; + if (!BValNo->isDefByCopy()) return false; assert(BValNo->def == CopyIdx && "Copy doesn't define the value?"); // AValNo is the value number in A that defines the copy, A3 in the example. @@ -176,15 +207,14 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(const CoalescerPair &CP, if (ValLR+1 != BLR) return false; // If a live interval is a physical register, conservatively check if any - // of its sub-registers is overlapping the live interval of the virtual - // register. If so, do not coalesce. - if (TargetRegisterInfo::isPhysicalRegister(IntB.reg) && - *tri_->getSubRegisters(IntB.reg)) { - for (const unsigned* SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR) - if (li_->hasInterval(*SR) && IntA.overlaps(li_->getInterval(*SR))) { + // of its aliases is overlapping the live interval of the virtual register. + // If so, do not coalesce. + if (TargetRegisterInfo::isPhysicalRegister(IntB.reg)) { + for (const unsigned *AS = tri_->getAliasSet(IntB.reg); *AS; ++AS) + if (li_->hasInterval(*AS) && IntA.overlaps(li_->getInterval(*AS))) { DEBUG({ - dbgs() << "\t\tInterfere with sub-register "; - li_->getInterval(*SR).print(dbgs(), tri_); + dbgs() << "\t\tInterfere with alias "; + li_->getInterval(*AS).print(dbgs(), tri_); }); return false; } @@ -215,14 +245,19 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(const CoalescerPair &CP, continue; LiveInterval &SRLI = li_->getInterval(*SR); SRLI.addRange(LiveRange(FillerStart, FillerEnd, - SRLI.getNextValue(FillerStart, 0, true, + SRLI.getNextValue(FillerStart, 0, li_->getVNInfoAllocator()))); } } // Okay, merge "B1" into the same value number as "B0". if (BValNo != ValLR->valno) { + // If B1 is killed by a PHI, then the merged live range must also be killed + // by the same PHI, as B0 and B1 can not overlap. + bool HasPHIKill = BValNo->hasPHIKill(); IntB.MergeValueNumberInto(BValNo, ValLR->valno); + if (HasPHIKill) + ValLR->valno->setHasPHIKill(true); } DEBUG({ dbgs() << " result = "; @@ -241,7 +276,7 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(const CoalescerPair &CP, // merge, find the last use and trim the live range. That will also add the // isKill marker. if (ALR->end == CopyIdx) - TrimLiveIntervalToLastUse(CopyUseIdx, CopyMI->getParent(), IntA, ALR); + li_->shrinkToUses(&IntA); ++numExtends; return true; @@ -263,9 +298,6 @@ bool SimpleRegisterCoalescing::HasOtherReachingDefs(LiveInterval &IntA, for (; BI != IntB.ranges.end() && AI->end >= BI->start; ++BI) { if (BI->valno == BValNo) continue; - // When BValNo is null, we're looking for a dummy clobber-value for a subreg. - if (!BValNo && !BI->valno->isDefAccurate() && !BI->valno->getCopy()) - continue; if (BI->start <= AI->start && BI->end > AI->start) return true; if (BI->start > AI->start && BI->start < AI->end) @@ -275,16 +307,6 @@ bool SimpleRegisterCoalescing::HasOtherReachingDefs(LiveInterval &IntA, return false; } -static void -TransferImplicitOps(MachineInstr *MI, MachineInstr *NewMI) { - for (unsigned i = MI->getDesc().getNumOperands(), e = MI->getNumOperands(); - i != e; ++i) { - MachineOperand &MO = MI->getOperand(i); - if (MO.isReg() && MO.isImplicit()) - NewMI->addOperand(MO); - } -} - /// RemoveCopyByCommutingDef - We found a non-trivially-coalescable copy with /// IntA being the source and IntB being the dest, thus this defines a value /// number in IntB. If the source value number (in IntA) is defined by a @@ -321,8 +343,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(const CoalescerPair &CP, if (!li_->hasInterval(CP.getDstReg())) return false; - SlotIndex CopyIdx = - li_->getInstructionIndex(CopyMI).getDefIndex(); + SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI).getDefIndex(); LiveInterval &IntA = li_->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg()); @@ -331,27 +352,19 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(const CoalescerPair &CP, // BValNo is a value number in B that is defined by a copy from A. 'B3' in // the example above. - LiveInterval::iterator BLR = IntB.FindLiveRangeContaining(CopyIdx); - if (BLR == IntB.end()) return false; - VNInfo *BValNo = BLR->valno; + VNInfo *BValNo = IntB.getVNInfoAt(CopyIdx); + if (!BValNo || !BValNo->isDefByCopy()) + return false; - // Get the location that B is defined at. Two options: either this value has - // an unknown definition point or it is defined at CopyIdx. If unknown, we - // can't process it. - if (!BValNo->getCopy()) return false; assert(BValNo->def == CopyIdx && "Copy doesn't define the value?"); // AValNo is the value number in A that defines the copy, A3 in the example. - LiveInterval::iterator ALR = - IntA.FindLiveRangeContaining(CopyIdx.getUseIndex()); // + VNInfo *AValNo = IntA.getVNInfoAt(CopyIdx.getUseIndex()); + assert(AValNo && "COPY source not live"); - assert(ALR != IntA.end() && "Live range not found!"); - VNInfo *AValNo = ALR->valno; // If other defs can reach uses of this def, then it's not safe to perform - // the optimization. FIXME: Do isPHIDef and isDefAccurate both need to be - // tested? - if (AValNo->isPHIDef() || !AValNo->isDefAccurate() || - AValNo->isUnused() || AValNo->hasPHIKill()) + // the optimization. + if (AValNo->isPHIDef() || AValNo->isUnused() || AValNo->hasPHIKill()) return false; MachineInstr *DefMI = li_->getInstructionFromIndex(AValNo->def); if (!DefMI) @@ -386,16 +399,12 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(const CoalescerPair &CP, if (HasOtherReachingDefs(IntA, IntB, AValNo, BValNo)) return false; - bool BHasSubRegs = false; - if (TargetRegisterInfo::isPhysicalRegister(IntB.reg)) - BHasSubRegs = *tri_->getSubRegisters(IntB.reg); - - // Abort if the subregisters of IntB.reg have values that are not simply the + // Abort if the aliases of IntB.reg have values that are not simply the // clobbers from the superreg. - if (BHasSubRegs) - for (const unsigned *SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR) - if (li_->hasInterval(*SR) && - HasOtherReachingDefs(IntA, li_->getInterval(*SR), AValNo, 0)) + if (TargetRegisterInfo::isPhysicalRegister(IntB.reg)) + for (const unsigned *AS = tri_->getAliasSet(IntB.reg); *AS; ++AS) + if (li_->hasInterval(*AS) && + HasOtherReachingDefs(IntA, li_->getInterval(*AS), AValNo, 0)) return false; // If some of the uses of IntA.reg is already coalesced away, return false. @@ -412,12 +421,19 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(const CoalescerPair &CP, return false; } + DEBUG(dbgs() << "\tRemoveCopyByCommutingDef: " << AValNo->def << '\t' + << *DefMI); + // At this point we have decided that it is legal to do this // transformation. Start by commuting the instruction. MachineBasicBlock *MBB = DefMI->getParent(); MachineInstr *NewMI = tii_->commuteInstruction(DefMI); if (!NewMI) return false; + if (TargetRegisterInfo::isVirtualRegister(IntA.reg) && + TargetRegisterInfo::isVirtualRegister(IntB.reg) && + !mri_->constrainRegClass(IntB.reg, mri_->getRegClass(IntA.reg))) + return false; if (NewMI != DefMI) { li_->ReplaceMachineInstrInMaps(DefMI, NewMI); MBB->insert(DefMI, NewMI); @@ -426,10 +442,6 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(const CoalescerPair &CP, unsigned OpIdx = NewMI->findRegisterUseOperandIdx(IntA.reg, false); NewMI->getOperand(OpIdx).setIsKill(); - bool BHasPHIKill = BValNo->hasPHIKill(); - SmallVector BDeadValNos; - std::map BExtend; - // If ALR and BLR overlaps and end of BLR extends beyond end of ALR, e.g. // A = or A, B // ... @@ -438,9 +450,6 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(const CoalescerPair &CP, // C = A // ... // = B - bool Extended = BLR->end > ALR->end && ALR->end != ALR->start; - if (Extended) - BExtend[ALR->end] = BLR->end; // Update uses of IntA of the specific Val# with IntB. for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(IntA.reg), @@ -466,50 +475,22 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(const CoalescerPair &CP, UseMO.setReg(NewReg); if (UseMI == CopyMI) continue; - if (UseMO.isKill()) { - if (Extended) - UseMO.setIsKill(false); - } if (!UseMI->isCopy()) continue; if (UseMI->getOperand(0).getReg() != IntB.reg || UseMI->getOperand(0).getSubReg()) continue; - - // This copy will become a noop. If it's defining a new val#, - // remove that val# as well. However this live range is being - // extended to the end of the existing live range defined by the copy. + + // This copy will become a noop. If it's defining a new val#, merge it into + // BValNo. SlotIndex DefIdx = UseIdx.getDefIndex(); - const LiveRange *DLR = IntB.getLiveRangeContaining(DefIdx); - if (!DLR) + VNInfo *DVNI = IntB.getVNInfoAt(DefIdx); + if (!DVNI) continue; - BHasPHIKill |= DLR->valno->hasPHIKill(); - assert(DLR->valno->def == DefIdx); - BDeadValNos.push_back(DLR->valno); - BExtend[DLR->start] = DLR->end; - JoinedCopies.insert(UseMI); - } - - // We need to insert a new liverange: [ALR.start, LastUse). It may be we can - // simply extend BLR if CopyMI doesn't end the range. - DEBUG({ - dbgs() << "Extending: "; - IntB.print(dbgs(), tri_); - }); - - // Remove val#'s defined by copies that will be coalesced away. - for (unsigned i = 0, e = BDeadValNos.size(); i != e; ++i) { - VNInfo *DeadVNI = BDeadValNos[i]; - if (BHasSubRegs) { - for (const unsigned *SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR) { - if (!li_->hasInterval(*SR)) - continue; - LiveInterval &SRLI = li_->getInterval(*SR); - if (const LiveRange *SRLR = SRLI.getLiveRangeContaining(DeadVNI->def)) - SRLI.removeValNo(SRLR->valno); - } - } - IntB.removeValNo(BDeadValNos[i]); + DEBUG(dbgs() << "\t\tnoop: " << DefIdx << '\t' << *UseMI); + assert(DVNI->def == DefIdx); + BValNo = IntB.MergeValueNumberInto(BValNo, DVNI); + markAsJoined(UseMI); } // Extend BValNo by merging in IntA live ranges of AValNo. Val# definition @@ -520,129 +501,20 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(const CoalescerPair &CP, for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end(); AI != AE; ++AI) { if (AI->valno != AValNo) continue; - SlotIndex End = AI->end; - std::map::iterator - EI = BExtend.find(End); - if (EI != BExtend.end()) - End = EI->second; - IntB.addRange(LiveRange(AI->start, End, ValNo)); + IntB.addRange(LiveRange(AI->start, AI->end, ValNo)); } - ValNo->setHasPHIKill(BHasPHIKill); - - DEBUG({ - dbgs() << " result = "; - IntB.print(dbgs(), tri_); - dbgs() << "\nShortening: "; - IntA.print(dbgs(), tri_); - }); + DEBUG(dbgs() << "\t\textended: " << IntB << '\n'); IntA.removeValNo(AValNo); - - DEBUG({ - dbgs() << " result = "; - IntA.print(dbgs(), tri_); - dbgs() << '\n'; - }); - + DEBUG(dbgs() << "\t\ttrimmed: " << IntA << '\n'); ++numCommutes; return true; } -/// isSameOrFallThroughBB - Return true if MBB == SuccMBB or MBB simply -/// fallthoughs to SuccMBB. -static bool isSameOrFallThroughBB(MachineBasicBlock *MBB, - MachineBasicBlock *SuccMBB, - const TargetInstrInfo *tii_) { - if (MBB == SuccMBB) - return true; - MachineBasicBlock *TBB = 0, *FBB = 0; - SmallVector Cond; - return !tii_->AnalyzeBranch(*MBB, TBB, FBB, Cond) && !TBB && !FBB && - MBB->isSuccessor(SuccMBB); -} - -/// removeRange - Wrapper for LiveInterval::removeRange. This removes a range -/// from a physical register live interval as well as from the live intervals -/// of its sub-registers. -static void removeRange(LiveInterval &li, - SlotIndex Start, SlotIndex End, - LiveIntervals *li_, const TargetRegisterInfo *tri_) { - li.removeRange(Start, End, true); - if (TargetRegisterInfo::isPhysicalRegister(li.reg)) { - for (const unsigned* SR = tri_->getSubRegisters(li.reg); *SR; ++SR) { - if (!li_->hasInterval(*SR)) - continue; - LiveInterval &sli = li_->getInterval(*SR); - SlotIndex RemoveStart = Start; - SlotIndex RemoveEnd = Start; - - while (RemoveEnd != End) { - LiveInterval::iterator LR = sli.FindLiveRangeContaining(RemoveStart); - if (LR == sli.end()) - break; - RemoveEnd = (LR->end < End) ? LR->end : End; - sli.removeRange(RemoveStart, RemoveEnd, true); - RemoveStart = RemoveEnd; - } - } - } -} - -/// TrimLiveIntervalToLastUse - If there is a last use in the same basic block -/// as the copy instruction, trim the live interval to the last use and return -/// true. -bool -SimpleRegisterCoalescing::TrimLiveIntervalToLastUse(SlotIndex CopyIdx, - MachineBasicBlock *CopyMBB, - LiveInterval &li, - const LiveRange *LR) { - SlotIndex MBBStart = li_->getMBBStartIdx(CopyMBB); - SlotIndex LastUseIdx; - MachineOperand *LastUse = - lastRegisterUse(LR->start, CopyIdx.getPrevSlot(), li.reg, LastUseIdx); - if (LastUse) { - MachineInstr *LastUseMI = LastUse->getParent(); - if (!isSameOrFallThroughBB(LastUseMI->getParent(), CopyMBB, tii_)) { - // r1024 = op - // ... - // BB1: - // = r1024 - // - // BB2: - // r1025 = r1024 - if (MBBStart < LR->end) - removeRange(li, MBBStart, LR->end, li_, tri_); - return true; - } - - // There are uses before the copy, just shorten the live range to the end - // of last use. - LastUse->setIsKill(); - removeRange(li, LastUseIdx.getDefIndex(), LR->end, li_, tri_); - if (LastUseMI->isCopy()) { - MachineOperand &DefMO = LastUseMI->getOperand(0); - if (DefMO.getReg() == li.reg && !DefMO.getSubReg()) - DefMO.setIsDead(); - } - return true; - } - - // Is it livein? - if (LR->start <= MBBStart && LR->end > MBBStart) { - if (LR->start == li_->getZeroIndex()) { - assert(TargetRegisterInfo::isPhysicalRegister(li.reg)); - // Live-in to the function but dead. Remove it from entry live-in set. - mf_->begin()->removeLiveIn(li.reg); - } - // FIXME: Shorten intervals in BBs that reaches this BB. - } - - return false; -} - /// ReMaterializeTrivialDef - If the source of a copy is defined by a trivial /// computation, replace the copy by rematerialize the definition. bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt, + bool preserveSrcInt, unsigned DstReg, unsigned DstSubIdx, MachineInstr *CopyMI) { @@ -651,12 +523,12 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt, assert(SrcLR != SrcInt.end() && "Live range not found!"); VNInfo *ValNo = SrcLR->valno; // If other defs can reach uses of this def, then it's not safe to perform - // the optimization. FIXME: Do isPHIDef and isDefAccurate both need to be - // tested? - if (ValNo->isPHIDef() || !ValNo->isDefAccurate() || - ValNo->isUnused() || ValNo->hasPHIKill()) + // the optimization. + if (ValNo->isPHIDef() || ValNo->isUnused() || ValNo->hasPHIKill()) return false; MachineInstr *DefMI = li_->getInstructionFromIndex(ValNo->def); + if (!DefMI) + return false; assert(DefMI && "Defining instruction disappeared"); const TargetInstrDesc &TID = DefMI->getDesc(); if (!TID.isAsCheapAsAMove()) @@ -680,8 +552,8 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt, return false; } - // If destination register has a sub-register index on it, make sure it mtches - // the instruction register class. + // If destination register has a sub-register index on it, make sure it + // matches the instruction register class. if (DstSubIdx) { const TargetInstrDesc &TID = DefMI->getDesc(); if (TID.getNumDefs() != 1) @@ -698,30 +570,12 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt, RemoveCopyFlag(DstReg, CopyMI); - // If copy kills the source register, find the last use and propagate - // kill. - bool checkForDeadDef = false; MachineBasicBlock *MBB = CopyMI->getParent(); - if (SrcLR->end == CopyIdx.getDefIndex()) - if (!TrimLiveIntervalToLastUse(CopyIdx, MBB, SrcInt, SrcLR)) { - checkForDeadDef = true; - } - MachineBasicBlock::iterator MII = llvm::next(MachineBasicBlock::iterator(CopyMI)); tii_->reMaterialize(*MBB, MII, DstReg, DstSubIdx, DefMI, *tri_); MachineInstr *NewMI = prior(MII); - if (checkForDeadDef) { - // PR4090 fix: Trim interval failed because there was no use of the - // source interval in this MBB. If the def is in this MBB too then we - // should mark it dead: - if (DefMI->getParent() == MBB) { - DefMI->addRegisterDead(SrcInt.reg, tri_); - SrcLR->end = SrcLR->start.getNextSlot(); - } - } - // CopyMI may have implicit operands, transfer them over to the newly // rematerialized instruction. And update implicit def interval valnos. for (unsigned i = CopyMI->getDesc().getNumOperands(), @@ -733,13 +587,18 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt, RemoveCopyFlag(MO.getReg(), CopyMI); } - TransferImplicitOps(CopyMI, NewMI); + NewMI->copyImplicitOps(CopyMI); li_->ReplaceMachineInstrInMaps(CopyMI, NewMI); CopyMI->eraseFromParent(); ReMatCopies.insert(CopyMI); ReMatDefs.insert(DefMI); DEBUG(dbgs() << "Remat: " << *NewMI); ++NumReMats; + + // The source interval can become smaller because we removed a use. + if (preserveSrcInt) + li_->shrinkToUses(&SrcInt); + return true; } @@ -755,6 +614,9 @@ SimpleRegisterCoalescing::UpdateRegDefsUses(const CoalescerPair &CP) { unsigned DstReg = CP.getDstReg(); unsigned SubIdx = CP.getSubIdx(); + // Update LiveDebugVariables. + ldv_->renameRegister(SrcReg, DstReg, SubIdx); + for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(SrcReg); MachineInstr *UseMI = I.skipInstruction();) { // A PhysReg copy that won't be coalesced can perhaps be rematerialized @@ -767,7 +629,7 @@ SimpleRegisterCoalescing::UpdateRegDefsUses(const CoalescerPair &CP) { UseMI->getOperand(0).getReg() != SrcReg && UseMI->getOperand(0).getReg() != DstReg && !JoinedCopies.count(UseMI) && - ReMaterializeTrivialDef(li_->getInterval(SrcReg), + ReMaterializeTrivialDef(li_->getInterval(SrcReg), false, UseMI->getOperand(0).getReg(), 0, UseMI)) continue; } @@ -835,26 +697,6 @@ static bool removeIntervalIfEmpty(LiveInterval &li, LiveIntervals *li_, return false; } -/// ShortenDeadCopyLiveRange - Shorten a live range defined by a dead copy. -/// Return true if live interval is removed. -bool SimpleRegisterCoalescing::ShortenDeadCopyLiveRange(LiveInterval &li, - MachineInstr *CopyMI) { - SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI); - LiveInterval::iterator MLR = - li.FindLiveRangeContaining(CopyIdx.getDefIndex()); - if (MLR == li.end()) - return false; // Already removed by ShortenDeadCopySrcLiveRange. - SlotIndex RemoveStart = MLR->start; - SlotIndex RemoveEnd = MLR->end; - SlotIndex DefIdx = CopyIdx.getDefIndex(); - // Remove the liverange that's defined by this. - if (RemoveStart == DefIdx && RemoveEnd == DefIdx.getStoreIndex()) { - removeRange(li, RemoveStart, RemoveEnd, li_, tri_); - return removeIntervalIfEmpty(li, li_, tri_); - } - return false; -} - /// RemoveDeadDef - If a def of a live interval is now determined dead, remove /// the val# it defines. If the live interval becomes empty, remove it as well. bool SimpleRegisterCoalescing::RemoveDeadDef(LiveInterval &li, @@ -873,7 +715,7 @@ void SimpleRegisterCoalescing::RemoveCopyFlag(unsigned DstReg, if (li_->hasInterval(DstReg)) { LiveInterval &LI = li_->getInterval(DstReg); if (const LiveRange *LR = LI.getLiveRangeContaining(DefIdx)) - if (LR->valno->getCopy() == CopyMI) + if (LR->valno->def == DefIdx) LR->valno->setCopy(0); } if (!TargetRegisterInfo::isPhysicalRegister(DstReg)) @@ -883,90 +725,64 @@ void SimpleRegisterCoalescing::RemoveCopyFlag(unsigned DstReg, continue; LiveInterval &LI = li_->getInterval(*AS); if (const LiveRange *LR = LI.getLiveRangeContaining(DefIdx)) - if (LR->valno->getCopy() == CopyMI) + if (LR->valno->def == DefIdx) LR->valno->setCopy(0); } } -/// PropagateDeadness - Propagate the dead marker to the instruction which -/// defines the val#. -static void PropagateDeadness(LiveInterval &li, MachineInstr *CopyMI, - SlotIndex &LRStart, LiveIntervals *li_, - const TargetRegisterInfo* tri_) { - MachineInstr *DefMI = - li_->getInstructionFromIndex(LRStart.getDefIndex()); - if (DefMI && DefMI != CopyMI) { - int DeadIdx = DefMI->findRegisterDefOperandIdx(li.reg); - if (DeadIdx != -1) - DefMI->getOperand(DeadIdx).setIsDead(); - else - DefMI->addOperand(MachineOperand::CreateReg(li.reg, - /*def*/true, /*implicit*/true, /*kill*/false, /*dead*/true)); - LRStart = LRStart.getNextSlot(); - } -} - -/// ShortenDeadCopySrcLiveRange - Shorten a live range as it's artificially -/// extended by a dead copy. Mark the last use (if any) of the val# as kill as -/// ends the live range there. If there isn't another use, then this live range -/// is dead. Return true if live interval is removed. -bool -SimpleRegisterCoalescing::ShortenDeadCopySrcLiveRange(LiveInterval &li, - MachineInstr *CopyMI) { - SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI); - if (CopyIdx == SlotIndex()) { - // FIXME: special case: function live in. It can be a general case if the - // first instruction index starts at > 0 value. - assert(TargetRegisterInfo::isPhysicalRegister(li.reg)); - // Live-in to the function but dead. Remove it from entry live-in set. - if (mf_->begin()->isLiveIn(li.reg)) - mf_->begin()->removeLiveIn(li.reg); - if (const LiveRange *LR = li.getLiveRangeContaining(CopyIdx)) - removeRange(li, LR->start, LR->end, li_, tri_); - return removeIntervalIfEmpty(li, li_, tri_); - } - - LiveInterval::iterator LR = - li.FindLiveRangeContaining(CopyIdx.getPrevIndex().getStoreIndex()); - if (LR == li.end()) - // Livein but defined by a phi. - return false; +/// shouldJoinPhys - Return true if a copy involving a physreg should be joined. +/// We need to be careful about coalescing a source physical register with a +/// virtual register. Once the coalescing is done, it cannot be broken and these +/// are not spillable! If the destination interval uses are far away, think +/// twice about coalescing them! +bool SimpleRegisterCoalescing::shouldJoinPhys(CoalescerPair &CP) { + bool Allocatable = li_->isAllocatable(CP.getDstReg()); + LiveInterval &JoinVInt = li_->getInterval(CP.getSrcReg()); + + /// Always join simple intervals that are defined by a single copy from a + /// reserved register. This doesn't increase register pressure, so it is + /// always beneficial. + if (!Allocatable && CP.isFlipped() && JoinVInt.containsOneValue()) + return true; - SlotIndex RemoveStart = LR->start; - SlotIndex RemoveEnd = CopyIdx.getStoreIndex(); - if (LR->end > RemoveEnd) - // More uses past this copy? Nothing to do. + if (!EnablePhysicalJoin) { + DEBUG(dbgs() << "\tPhysreg joins disabled.\n"); return false; + } - // If there is a last use in the same bb, we can't remove the live range. - // Shorten the live interval and return. - MachineBasicBlock *CopyMBB = CopyMI->getParent(); - if (TrimLiveIntervalToLastUse(CopyIdx, CopyMBB, li, LR)) - return false; + // Only coalesce to allocatable physreg, we don't want to risk modifying + // reserved registers. + if (!Allocatable) { + DEBUG(dbgs() << "\tRegister is an unallocatable physreg.\n"); + return false; // Not coalescable. + } - // There are other kills of the val#. Nothing to do. - if (!li.isOnlyLROfValNo(LR)) + // Don't join with physregs that have a ridiculous number of live + // ranges. The data structure performance is really bad when that + // happens. + if (li_->hasInterval(CP.getDstReg()) && + li_->getInterval(CP.getDstReg()).ranges.size() > 1000) { + ++numAborts; + DEBUG(dbgs() + << "\tPhysical register live interval too complicated, abort!\n"); return false; - - MachineBasicBlock *StartMBB = li_->getMBBFromIndex(RemoveStart); - if (!isSameOrFallThroughBB(StartMBB, CopyMBB, tii_)) - // If the live range starts in another mbb and the copy mbb is not a fall - // through mbb, then we can only cut the range from the beginning of the - // copy mbb. - RemoveStart = li_->getMBBStartIdx(CopyMBB).getNextIndex().getBaseIndex(); - - if (LR->valno->def == RemoveStart) { - // If the def MI defines the val# and this copy is the only kill of the - // val#, then propagate the dead marker. - PropagateDeadness(li, CopyMI, RemoveStart, li_, tri_); - ++numDeadValNo; } - removeRange(li, RemoveStart, RemoveEnd, li_, tri_); - return removeIntervalIfEmpty(li, li_, tri_); + // FIXME: Why are we skipping this test for partial copies? + // CodeGen/X86/phys_subreg_coalesce-3.ll needs it. + if (!CP.isPartial()) { + const TargetRegisterClass *RC = mri_->getRegClass(CP.getSrcReg()); + unsigned Threshold = RegClassInfo.getNumAllocatableRegs(RC) * 2; + unsigned Length = li_->getApproximateInstructionCount(JoinVInt); + if (Length > Threshold) { + ++numAborts; + DEBUG(dbgs() << "\tMay tie down a physical register, abort!\n"); + return false; + } + } + return true; } - /// isWinToJoinCrossClass - Return true if it's profitable to coalesce /// two virtual registers from different register classes. bool @@ -975,7 +791,7 @@ SimpleRegisterCoalescing::isWinToJoinCrossClass(unsigned SrcReg, const TargetRegisterClass *SrcRC, const TargetRegisterClass *DstRC, const TargetRegisterClass *NewRC) { - unsigned NewRCCount = allocatableRCRegs_[NewRC].count(); + unsigned NewRCCount = RegClassInfo.getNumAllocatableRegs(NewRC); // This heuristics is good enough in practice, but it's obviously not *right*. // 4 is a magic number that works well enough for x86, ARM, etc. It filter // out all but the most restrictive register classes. @@ -989,8 +805,14 @@ SimpleRegisterCoalescing::isWinToJoinCrossClass(unsigned SrcReg, LiveInterval &DstInt = li_->getInterval(DstReg); unsigned SrcSize = li_->getApproximateInstructionCount(SrcInt); unsigned DstSize = li_->getApproximateInstructionCount(DstInt); - if (SrcSize <= NewRCCount && DstSize <= NewRCCount) + + // Coalesce aggressively if the intervals are small compared to the number of + // registers in the new class. The number 4 is fairly arbitrary, chosen to be + // less aggressive than the 8 used for the whole function size. + const unsigned ThresSize = 4 * NewRCCount; + if (SrcSize <= ThresSize && DstSize <= ThresSize) return true; + // Estimate *register use density*. If it doubles or more, abort. unsigned SrcUses = std::distance(mri_->use_nodbg_begin(SrcReg), mri_->use_nodbg_end()); @@ -998,13 +820,13 @@ SimpleRegisterCoalescing::isWinToJoinCrossClass(unsigned SrcReg, mri_->use_nodbg_end()); unsigned NewUses = SrcUses + DstUses; unsigned NewSize = SrcSize + DstSize; - if (SrcRC != NewRC && SrcSize > NewRCCount) { - unsigned SrcRCCount = allocatableRCRegs_[SrcRC].count(); + if (SrcRC != NewRC && SrcSize > ThresSize) { + unsigned SrcRCCount = RegClassInfo.getNumAllocatableRegs(SrcRC); if (NewUses*SrcSize*SrcRCCount > 2*SrcUses*NewSize*NewRCCount) return false; } - if (DstRC != NewRC && DstSize > NewRCCount) { - unsigned DstRCCount = allocatableRCRegs_[DstRC].count(); + if (DstRC != NewRC && DstSize > ThresSize) { + unsigned DstRCCount = RegClassInfo.getNumAllocatableRegs(DstRC); if (NewUses*DstSize*DstRCCount > 2*DstUses*NewSize*NewRCCount) return false; } @@ -1017,8 +839,7 @@ SimpleRegisterCoalescing::isWinToJoinCrossClass(unsigned SrcReg, /// if the copy was successfully coalesced away. If it is not currently /// possible to coalesce this interval, but it may be possible if other /// things get coalesced, then it returns true by reference in 'Again'. -bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) { - MachineInstr *CopyMI = TheCopy.MI; +bool SimpleRegisterCoalescing::JoinCopy(MachineInstr *CopyMI, bool &Again) { Again = false; if (JoinedCopies.count(CopyMI) || ReMatCopies.count(CopyMI)) @@ -1034,30 +855,30 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) { // If they are already joined we continue. if (CP.getSrcReg() == CP.getDstReg()) { + markAsJoined(CopyMI); DEBUG(dbgs() << "\tCopy already coalesced.\n"); return false; // Not coalescable. } - DEBUG(dbgs() << "\tConsidering merging %reg" << CP.getSrcReg()); + DEBUG(dbgs() << "\tConsidering merging " << PrintReg(CP.getSrcReg(), tri_) + << " with " << PrintReg(CP.getDstReg(), tri_, CP.getSubIdx()) + << "\n"); // Enforce policies. if (CP.isPhys()) { - DEBUG(dbgs() <<" with physreg %" << tri_->getName(CP.getDstReg()) << "\n"); - // Only coalesce to allocatable physreg. - if (!allocatableRegs_[CP.getDstReg()]) { - DEBUG(dbgs() << "\tRegister is an unallocatable physreg.\n"); - return false; // Not coalescable. + if (!shouldJoinPhys(CP)) { + // Before giving up coalescing, if definition of source is defined by + // trivial computation, try rematerializing it. + if (!CP.isFlipped() && + ReMaterializeTrivialDef(li_->getInterval(CP.getSrcReg()), true, + CP.getDstReg(), 0, CopyMI)) + return true; + return false; } } else { - DEBUG({ - dbgs() << " with reg%" << CP.getDstReg(); - if (CP.getSubIdx()) - dbgs() << ":" << tri_->getSubRegIndexName(CP.getSubIdx()); - dbgs() << " to " << CP.getNewRC()->getName() << "\n"; - }); - // Avoid constraining virtual register regclass too much. if (CP.isCrossClass()) { + DEBUG(dbgs() << "\tCross-class to " << CP.getNewRC()->getName() << ".\n"); if (DisableCrossClassJoin) { DEBUG(dbgs() << "\tCross-class joins disabled.\n"); return false; @@ -1066,8 +887,7 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) { mri_->getRegClass(CP.getSrcReg()), mri_->getRegClass(CP.getDstReg()), CP.getNewRC())) { - DEBUG(dbgs() << "\tAvoid coalescing to constrained register class: " - << CP.getNewRC()->getName() << ".\n"); + DEBUG(dbgs() << "\tAvoid coalescing to constrained register class.\n"); Again = true; // May be possible to coalesce later. return false; } @@ -1079,47 +899,6 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) { CP.flip(); } - // We need to be careful about coalescing a source physical register with a - // virtual register. Once the coalescing is done, it cannot be broken and - // these are not spillable! If the destination interval uses are far away, - // think twice about coalescing them! - // FIXME: Why are we skipping this test for partial copies? - // CodeGen/X86/phys_subreg_coalesce-3.ll needs it. - if (!CP.isPartial() && CP.isPhys()) { - LiveInterval &JoinVInt = li_->getInterval(CP.getSrcReg()); - - // Don't join with physregs that have a ridiculous number of live - // ranges. The data structure performance is really bad when that - // happens. - if (li_->hasInterval(CP.getDstReg()) && - li_->getInterval(CP.getDstReg()).ranges.size() > 1000) { - mri_->setRegAllocationHint(CP.getSrcReg(), 0, CP.getDstReg()); - ++numAborts; - DEBUG(dbgs() - << "\tPhysical register live interval too complicated, abort!\n"); - return false; - } - - const TargetRegisterClass *RC = mri_->getRegClass(CP.getSrcReg()); - unsigned Threshold = allocatableRCRegs_[RC].count() * 2; - unsigned Length = li_->getApproximateInstructionCount(JoinVInt); - if (Length > Threshold && - std::distance(mri_->use_nodbg_begin(CP.getSrcReg()), - mri_->use_nodbg_end()) * Threshold < Length) { - // Before giving up coalescing, if definition of source is defined by - // trivial computation, try rematerializing it. - if (!CP.isFlipped() && - ReMaterializeTrivialDef(JoinVInt, CP.getDstReg(), 0, CopyMI)) - return true; - - mri_->setRegAllocationHint(CP.getSrcReg(), 0, CP.getDstReg()); - ++numAborts; - DEBUG(dbgs() << "\tMay tie down a physical register, abort!\n"); - Again = true; // May be possible to coalesce later. - return false; - } - } - // Okay, attempt to join these two intervals. On failure, this returns false. // Otherwise, if one of the intervals being joined is a physreg, this method // always canonicalizes DstInt to be it. The output "SrcInt" will not have @@ -1130,7 +909,7 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) { // If definition of source is defined by trivial computation, try // rematerializing it. if (!CP.isFlipped() && - ReMaterializeTrivialDef(li_->getInterval(CP.getSrcReg()), + ReMaterializeTrivialDef(li_->getInterval(CP.getSrcReg()), true, CP.getDstReg(), 0, CopyMI)) return true; @@ -1138,7 +917,7 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) { if (!CP.isPartial()) { if (AdjustCopiesBackFrom(CP, CopyMI) || RemoveCopyByCommutingDef(CP, CopyMI)) { - JoinedCopies.insert(CopyMI); + markAsJoined(CopyMI); DEBUG(dbgs() << "\tTrivial!\n"); return true; } @@ -1158,7 +937,7 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) { } // Remember to delete the copy instruction. - JoinedCopies.insert(CopyMI); + markAsJoined(CopyMI); UpdateRegDefsUses(CP); @@ -1313,7 +1092,7 @@ bool SimpleRegisterCoalescing::JoinIntervals(CoalescerPair &CP) { for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end(); i != e; ++i) { VNInfo *VNI = *i; - if (VNI->isUnused() || VNI->getCopy() == 0) // Src not defined by a copy? + if (VNI->isUnused() || !VNI->isDefByCopy()) // Src not defined by a copy? continue; // Never join with a register that has EarlyClobber redefs. @@ -1337,7 +1116,7 @@ bool SimpleRegisterCoalescing::JoinIntervals(CoalescerPair &CP) { for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end(); i != e; ++i) { VNInfo *VNI = *i; - if (VNI->isUnused() || VNI->getCopy() == 0) // Src not defined by a copy? + if (VNI->isUnused() || !VNI->isDefByCopy()) // Src not defined by a copy? continue; // Never join with a register that has EarlyClobber redefs. @@ -1488,12 +1267,12 @@ namespace { } void SimpleRegisterCoalescing::CopyCoalesceInMBB(MachineBasicBlock *MBB, - std::vector &TryAgain) { + std::vector &TryAgain) { DEBUG(dbgs() << MBB->getName() << ":\n"); - std::vector VirtCopies; - std::vector PhysCopies; - std::vector ImpDefCopies; + SmallVector VirtCopies; + SmallVector PhysCopies; + SmallVector ImpDefCopies; for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end(); MII != E;) { MachineInstr *Inst = MII++; @@ -1512,32 +1291,32 @@ void SimpleRegisterCoalescing::CopyCoalesceInMBB(MachineBasicBlock *MBB, bool SrcIsPhys = TargetRegisterInfo::isPhysicalRegister(SrcReg); bool DstIsPhys = TargetRegisterInfo::isPhysicalRegister(DstReg); if (li_->hasInterval(SrcReg) && li_->getInterval(SrcReg).empty()) - ImpDefCopies.push_back(CopyRec(Inst, 0)); + ImpDefCopies.push_back(Inst); else if (SrcIsPhys || DstIsPhys) - PhysCopies.push_back(CopyRec(Inst, 0)); + PhysCopies.push_back(Inst); else - VirtCopies.push_back(CopyRec(Inst, 0)); + VirtCopies.push_back(Inst); } // Try coalescing implicit copies and insert_subreg first, // followed by copies to / from physical registers, then finally copies // from virtual registers to virtual registers. for (unsigned i = 0, e = ImpDefCopies.size(); i != e; ++i) { - CopyRec &TheCopy = ImpDefCopies[i]; + MachineInstr *TheCopy = ImpDefCopies[i]; bool Again = false; if (!JoinCopy(TheCopy, Again)) if (Again) TryAgain.push_back(TheCopy); } for (unsigned i = 0, e = PhysCopies.size(); i != e; ++i) { - CopyRec &TheCopy = PhysCopies[i]; + MachineInstr *TheCopy = PhysCopies[i]; bool Again = false; if (!JoinCopy(TheCopy, Again)) if (Again) TryAgain.push_back(TheCopy); } for (unsigned i = 0, e = VirtCopies.size(); i != e; ++i) { - CopyRec &TheCopy = VirtCopies[i]; + MachineInstr *TheCopy = VirtCopies[i]; bool Again = false; if (!JoinCopy(TheCopy, Again)) if (Again) @@ -1548,7 +1327,7 @@ void SimpleRegisterCoalescing::CopyCoalesceInMBB(MachineBasicBlock *MBB, void SimpleRegisterCoalescing::joinIntervals() { DEBUG(dbgs() << "********** JOINING INTERVALS ***********\n"); - std::vector TryAgainList; + std::vector TryAgainList; if (loopInfo->empty()) { // If there are no loops in the function, join intervals in function order. for (MachineFunction::iterator I = mf_->begin(), E = mf_->end(); @@ -1582,97 +1361,20 @@ void SimpleRegisterCoalescing::joinIntervals() { ProgressMade = false; for (unsigned i = 0, e = TryAgainList.size(); i != e; ++i) { - CopyRec &TheCopy = TryAgainList[i]; - if (!TheCopy.MI) + MachineInstr *&TheCopy = TryAgainList[i]; + if (!TheCopy) continue; bool Again = false; bool Success = JoinCopy(TheCopy, Again); if (Success || !Again) { - TheCopy.MI = 0; // Mark this one as done. + TheCopy= 0; // Mark this one as done. ProgressMade = true; } } } } -/// Return true if the two specified registers belong to different register -/// classes. The registers may be either phys or virt regs. -bool -SimpleRegisterCoalescing::differingRegisterClasses(unsigned RegA, - unsigned RegB) const { - // Get the register classes for the first reg. - if (TargetRegisterInfo::isPhysicalRegister(RegA)) { - assert(TargetRegisterInfo::isVirtualRegister(RegB) && - "Shouldn't consider two physregs!"); - return !mri_->getRegClass(RegB)->contains(RegA); - } - - // Compare against the regclass for the second reg. - const TargetRegisterClass *RegClassA = mri_->getRegClass(RegA); - if (TargetRegisterInfo::isVirtualRegister(RegB)) { - const TargetRegisterClass *RegClassB = mri_->getRegClass(RegB); - return RegClassA != RegClassB; - } - return !RegClassA->contains(RegB); -} - -/// lastRegisterUse - Returns the last (non-debug) use of the specific register -/// between cycles Start and End or NULL if there are no uses. -MachineOperand * -SimpleRegisterCoalescing::lastRegisterUse(SlotIndex Start, - SlotIndex End, - unsigned Reg, - SlotIndex &UseIdx) const{ - UseIdx = SlotIndex(); - if (TargetRegisterInfo::isVirtualRegister(Reg)) { - MachineOperand *LastUse = NULL; - for (MachineRegisterInfo::use_nodbg_iterator I = mri_->use_nodbg_begin(Reg), - E = mri_->use_nodbg_end(); I != E; ++I) { - MachineOperand &Use = I.getOperand(); - MachineInstr *UseMI = Use.getParent(); - if (UseMI->isIdentityCopy()) - continue; - SlotIndex Idx = li_->getInstructionIndex(UseMI); - // FIXME: Should this be Idx != UseIdx? SlotIndex() will return something - // that compares higher than any other interval. - if (Idx >= Start && Idx < End && Idx >= UseIdx) { - LastUse = &Use; - UseIdx = Idx.getUseIndex(); - } - } - return LastUse; - } - - SlotIndex s = Start; - SlotIndex e = End.getPrevSlot().getBaseIndex(); - while (e >= s) { - // Skip deleted instructions - MachineInstr *MI = li_->getInstructionFromIndex(e); - while (e != SlotIndex() && e.getPrevIndex() >= s && !MI) { - e = e.getPrevIndex(); - MI = li_->getInstructionFromIndex(e); - } - if (e < s || MI == NULL) - return NULL; - - // Ignore identity copies. - if (!MI->isIdentityCopy()) - for (unsigned i = 0, NumOps = MI->getNumOperands(); i != NumOps; ++i) { - MachineOperand &Use = MI->getOperand(i); - if (Use.isReg() && Use.isUse() && Use.getReg() && - tri_->regsOverlap(Use.getReg(), Reg)) { - UseIdx = e.getUseIndex(); - return &Use; - } - } - - e = e.getPrevIndex(); - } - - return NULL; -} - void SimpleRegisterCoalescing::releaseMemory() { JoinedCopies.clear(); ReMatCopies.clear(); @@ -1686,6 +1388,7 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) { tri_ = tm_->getRegisterInfo(); tii_ = tm_->getInstrInfo(); li_ = &getAnalysis(); + ldv_ = &getAnalysis(); AA = &getAnalysis(); loopInfo = &getAnalysis(); @@ -1693,11 +1396,10 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) { << "********** Function: " << ((Value*)mf_->getFunction())->getName() << '\n'); - allocatableRegs_ = tri_->getAllocatableSet(fn); - for (TargetRegisterInfo::regclass_iterator I = tri_->regclass_begin(), - E = tri_->regclass_end(); I != E; ++I) - allocatableRCRegs_.insert(std::make_pair(*I, - tri_->getAllocatableSet(fn, *I))); + if (VerifyCoalescing) + mf_->verify(this, "Before register coalescing"); + + RegClassInfo.runOnMachineFunction(fn); // Join (coalesce) intervals if requested. if (EnableJoining) { @@ -1726,23 +1428,30 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) { bool DoDelete = true; assert(MI->isCopyLike() && "Unrecognized copy instruction"); unsigned SrcReg = MI->getOperand(MI->isSubregToReg() ? 2 : 1).getReg(); - if (TargetRegisterInfo::isPhysicalRegister(SrcReg)) + if (TargetRegisterInfo::isPhysicalRegister(SrcReg) && + MI->getNumOperands() > 2) // Do not delete extract_subreg, insert_subreg of physical // registers unless the definition is dead. e.g. // %DO = INSERT_SUBREG %D0, %S0, 1 // or else the scavenger may complain. LowerSubregs will // delete them later. DoDelete = false; - + if (MI->allDefsAreDead()) { - LiveInterval &li = li_->getInterval(SrcReg); - if (!ShortenDeadCopySrcLiveRange(li, MI)) - ShortenDeadCopyLiveRange(li, MI); + if (TargetRegisterInfo::isVirtualRegister(SrcReg) && + li_->hasInterval(SrcReg)) + li_->shrinkToUses(&li_->getInterval(SrcReg)); DoDelete = true; } - if (!DoDelete) + if (!DoDelete) { + // We need the instruction to adjust liveness, so make it a KILL. + if (MI->isSubregToReg()) { + MI->RemoveOperand(3); + MI->RemoveOperand(1); + } + MI->setDesc(tii_->get(TargetOpcode::KILL)); mii = llvm::next(mii); - else { + } else { li_->RemoveMachineInstrFromMaps(MI); mii = mbbi->erase(mii); ++numPeep; @@ -1783,24 +1492,6 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) { DeadDefs.clear(); } - // If the move will be an identity move delete it - if (MI->isIdentityCopy()) { - unsigned SrcReg = MI->getOperand(1).getReg(); - if (li_->hasInterval(SrcReg)) { - LiveInterval &RegInt = li_->getInterval(SrcReg); - // If def of this move instruction is dead, remove its live range - // from the destination register's live interval. - if (MI->allDefsAreDead()) { - if (!ShortenDeadCopySrcLiveRange(RegInt, MI)) - ShortenDeadCopyLiveRange(RegInt, MI); - } - } - li_->RemoveMachineInstrFromMaps(MI); - mii = mbbi->erase(mii); - ++numPeep; - continue; - } - ++mii; // Check for now unnecessary kill flags. @@ -1811,13 +1502,26 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) { if (!MO.isReg() || !MO.isKill()) continue; unsigned reg = MO.getReg(); if (!reg || !li_->hasInterval(reg)) continue; - if (!li_->getInterval(reg).killedAt(DefIdx)) + if (!li_->getInterval(reg).killedAt(DefIdx)) { MO.setIsKill(false); + continue; + } + // When leaving a kill flag on a physreg, check if any subregs should + // remain alive. + if (!TargetRegisterInfo::isPhysicalRegister(reg)) + continue; + for (const unsigned *SR = tri_->getSubRegisters(reg); + unsigned S = *SR; ++SR) + if (li_->hasInterval(S) && li_->getInterval(S).liveAt(DefIdx)) + MI->addRegisterDefined(S, tri_); } } } DEBUG(dump()); + DEBUG(ldv_->dump()); + if (VerifyCoalescing) + mf_->verify(this, "After register coalescing"); return true; }