X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FLiveIntervalAnalysis.cpp;h=32c553ff12d0ff6bd3eb7d7e3a004ad487c69f25;hb=cb6404711b7fe6f583480adce8d7e9d5e4b99ae6;hp=fc65f86e2021105bf8f8ac5423df97f833b8d775;hpb=273f7e42994a5bce0614d04d96dbfdf05fd652e5;p=oota-llvm.git diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index fc65f86e202..32c553ff12d 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -47,7 +47,7 @@ using namespace llvm; // Hidden options for help debugging. -static cl::opt DisableReMat("disable-rematerialization", +static cl::opt DisableReMat("disable-rematerialization", cl::init(false), cl::Hidden); STATISTIC(numIntervals , "Number of original intervals"); @@ -55,22 +55,33 @@ STATISTIC(numFolds , "Number of loads/stores folded into instructions"); STATISTIC(numSplits , "Number of intervals split"); char LiveIntervals::ID = 0; -static RegisterPass X("liveintervals", "Live Interval Analysis"); +INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals", + "Live Interval Analysis", false, false) +INITIALIZE_PASS_DEPENDENCY(LiveVariables) +INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) +INITIALIZE_PASS_DEPENDENCY(PHIElimination) +INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass) +INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs) +INITIALIZE_PASS_DEPENDENCY(SlotIndexes) +INITIALIZE_AG_DEPENDENCY(AliasAnalysis) +INITIALIZE_PASS_END(LiveIntervals, "liveintervals", + "Live Interval Analysis", false, false) void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesCFG(); AU.addRequired(); AU.addPreserved(); - AU.addPreserved(); AU.addRequired(); - AU.addPreservedID(MachineLoopInfoID); + AU.addPreserved(); + AU.addRequired(); + AU.addPreserved(); AU.addPreservedID(MachineDominatorsID); - + if (!StrongPHIElim) { AU.addPreservedID(PHIEliminationID); AU.addRequiredID(PHIEliminationID); } - + AU.addRequiredID(TwoAddressInstructionPassID); AU.addPreserved(); AU.addRequired(); @@ -84,7 +95,7 @@ void LiveIntervals::releaseMemory() { for (DenseMap::iterator I = r2iMap_.begin(), E = r2iMap_.end(); I != E; ++I) delete I->second; - + r2iMap_.clear(); // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd. @@ -130,19 +141,7 @@ void LiveIntervals::print(raw_ostream &OS, const Module* ) const { void LiveIntervals::printInstrs(raw_ostream &OS) const { OS << "********** MACHINEINSTRS **********\n"; - - for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end(); - mbbi != mbbe; ++mbbi) { - OS << "BB#" << mbbi->getNumber() - << ":\t\t# derived from " << mbbi->getName() << "\n"; - for (MachineBasicBlock::iterator mii = mbbi->begin(), - mie = mbbi->end(); mii != mie; ++mii) { - if (mii->isDebugValue()) - OS << " \t" << *mii; - else - OS << getInstructionIndex(mii) << '\t' << *mii; - } - } + mf_->print(OS, indexes_); } void LiveIntervals::dumpInstrs() const { @@ -188,9 +187,9 @@ bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li, const MachineInstr &MI = *I; // Allow copies to and from li.reg - unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; - if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg)) - if (SrcReg == li.reg || DstReg == li.reg) + if (MI.isCopy()) + if (MI.getOperand(0).getReg() == li.reg || + MI.getOperand(1).getReg() == li.reg) continue; // Check for operands using reg @@ -246,15 +245,6 @@ bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg, return false; } -#ifndef NDEBUG -static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) { - if (TargetRegisterInfo::isPhysicalRegister(reg)) - dbgs() << tri_->getName(reg); - else - dbgs() << "%reg" << reg; -} -#endif - static bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) { unsigned Reg = MI.getOperand(MOIdx).getReg(); @@ -265,7 +255,7 @@ bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) { if (MO.getReg() == Reg && MO.isDef()) { assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() && MI.getOperand(MOIdx).getSubReg() && - MO.getSubReg()); + (MO.getSubReg() || MO.isImplicit())); return true; } } @@ -274,7 +264,7 @@ bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) { /// isPartialRedef - Return true if the specified def at the specific index is /// partially re-defining the specified live interval. A common case of this is -/// a definition of the sub-register. +/// a definition of the sub-register. bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO, LiveInterval &interval) { if (!MO.getSubReg() || MO.isEarlyClobber()) @@ -283,8 +273,8 @@ bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO, SlotIndex RedefIndex = MIIdx.getDefIndex(); const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex.getUseIndex()); - if (OldLR->valno->isDefAccurate()) { - MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def); + MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def); + if (DefMI != 0) { return DefMI->findRegisterDefOperandIdx(interval.reg) != -1; } return false; @@ -296,10 +286,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, MachineOperand& MO, unsigned MOIdx, LiveInterval &interval) { - DEBUG({ - dbgs() << "\t\tregister: "; - printRegName(interval.reg, tri_); - }); + DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_)); // Virtual registers may be defined multiple times (due to phi // elimination and 2-addr elimination). Much of what we do only has to be @@ -320,20 +307,11 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, mi->addRegisterDefined(interval.reg); MachineInstr *CopyMI = NULL; - unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; - if (mi->isCopyLike() || - tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) { + if (mi->isCopyLike()) { CopyMI = mi; - - // Some of the REG_SEQUENCE lowering in TwoAddressInstrPass creates - // implicit defs without really knowing. It shows up as INSERT_SUBREG - // using an undefined register. - if (mi->isInsertSubreg()) - mi->getOperand(1).setIsUndef(); } - VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true, - VNInfoAllocator); + VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator); assert(ValNo->id == 0 && "First value in interval is not 0?"); // Loop over all of the blocks that the vreg is defined in. There are @@ -399,8 +377,9 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, // Create interval with one of a NEW value number. Note that this value // number isn't actually defined by an instruction, weird huh? :) if (PHIJoin) { - ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false, - VNInfoAllocator); + assert(getInstructionFromIndex(Start) == 0 && + "PHI def index points at actual instruction."); + ValNo = interval.getNextValue(Start, 0, VNInfoAllocator); ValNo->setIsPHIDef(true); } LiveRange LR(Start, killIdx, ValNo); @@ -422,8 +401,8 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, // def-and-use register operand. // It may also be partial redef like this: - // 80 %reg1041:6 = VSHRNv4i16 %reg1034, 12, pred:14, pred:%reg0 - // 120 %reg1041:5 = VSHRNv4i16 %reg1039, 12, pred:14, pred:%reg0 + // 80 %reg1041:6 = VSHRNv4i16 %reg1034, 12, pred:14, pred:%reg0 + // 120 %reg1041:5 = VSHRNv4i16 %reg1039, 12, pred:14, pred:%reg0 bool PartReDef = isPartialRedef(MIIdx, MO, interval); if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) { // If this is a two-address definition, then we have already processed @@ -446,21 +425,16 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, // The new value number (#1) is defined by the instruction we claimed // defined value #0. - VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(), - false, // update at * - VNInfoAllocator); - ValNo->setFlags(OldValNo->getFlags()); // * <- updating here + VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator); // Value#0 is now defined by the 2-addr instruction. OldValNo->def = RedefIndex; OldValNo->setCopy(0); // A re-def may be a copy. e.g. %reg1030:6 = VMOVD %reg1026, ... - unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; - if (PartReDef && (mi->isCopyLike() || - tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))) + if (PartReDef && mi->isCopyLike()) OldValNo->setCopy(&*mi); - + // Add the new live interval which replaces the range for the input copy. LiveRange LR(DefIndex, RedefIndex, ValNo); DEBUG(dbgs() << " replace range with " << LR); @@ -487,12 +461,10 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, VNInfo *ValNo; MachineInstr *CopyMI = NULL; - unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; - if (mi->isCopyLike() || - tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) + if (mi->isCopyLike()) CopyMI = mi; - ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator); - + ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator); + SlotIndex killIndex = getMBBEndIdx(mbb); LiveRange LR(defIndex, killIndex, ValNo); interval.addRange(LR); @@ -514,10 +486,7 @@ void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB, MachineInstr *CopyMI) { // A physical register cannot be live across basic block, so its // lifetime must end somewhere in its defining basic block. - DEBUG({ - dbgs() << "\t\tregister: "; - printRegName(interval.reg, tri_); - }); + DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_)); SlotIndex baseIndex = MIIdx; SlotIndex start = baseIndex.getDefIndex(); @@ -569,10 +538,10 @@ void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB, goto exit; } } - + baseIndex = baseIndex.getNextIndex(); } - + // The only case we should have a dead physreg here without a killing or // instruction where we know it's dead is if it is live-in to the function // and never used. Another possible case is the implicit use of the @@ -583,11 +552,11 @@ exit: assert(start < end && "did not find end of interval?"); // Already exists? Extend old live interval. - LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start); - bool Extend = OldLR != interval.end(); - VNInfo *ValNo = Extend - ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator); - if (MO.isEarlyClobber() && Extend) + VNInfo *ValNo = interval.getVNInfoAt(start); + bool Extend = ValNo != 0; + if (!Extend) + ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator); + if (Extend && MO.isEarlyClobber()) ValNo->setHasRedefByEC(true); LiveRange LR(start, end, ValNo); interval.addRange(LR); @@ -604,9 +573,7 @@ void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB, getOrCreateInterval(MO.getReg())); else if (allocatableRegs_[MO.getReg()]) { MachineInstr *CopyMI = NULL; - unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; - if (MI->isCopyLike() || - tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg)) + if (MI->isCopyLike()) CopyMI = MI; handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, getOrCreateInterval(MO.getReg()), CopyMI); @@ -623,10 +590,7 @@ void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB, void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB, SlotIndex MIIdx, LiveInterval &interval, bool isAlias) { - DEBUG({ - dbgs() << "\t\tlivein register: "; - printRegName(interval.reg, tri_); - }); + DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_)); // Look for kills, if it reaches a def before it's killed, then it shouldn't // be considered a livein. @@ -684,9 +648,11 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB, } } + SlotIndex defIdx = getMBBStartIdx(MBB); + assert(getInstructionFromIndex(defIdx) == 0 && + "PHI def index points at actual instruction."); VNInfo *vni = - interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true), - 0, false, VNInfoAllocator); + interval.getNextValue(defIdx, 0, VNInfoAllocator); vni->setIsPHIDef(true); LiveRange LR(start, end, vni); @@ -698,7 +664,7 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB, /// registers. for some ordering of the machine instructions [1,N] a /// live interval is an interval [i, j) where 1 <= i <= j < N for /// which a variable is live -void LiveIntervals::computeIntervals() { +void LiveIntervals::computeIntervals() { DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n" << "********** Function: " << ((Value*)mf_->getFunction())->getName() << '\n'); @@ -725,11 +691,11 @@ void LiveIntervals::computeIntervals() { handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS), true); } - + // Skip over empty initial indices. if (getInstructionFromIndex(MIIndex) == 0) MIIndex = indexes_->getNextNonNullIndex(MIIndex); - + for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end(); MI != miEnd; ++MI) { DEBUG(dbgs() << MIIndex << "\t" << *MI); @@ -748,7 +714,7 @@ void LiveIntervals::computeIntervals() { else if (MO.isUndef()) UndefUses.push_back(MO.getReg()); } - + // Move to the next instr slot. MIIndex = indexes_->getNextNonNullIndex(MIIndex); } @@ -780,6 +746,28 @@ LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) { // Register allocator hooks. // +MachineBasicBlock::iterator +LiveIntervals::getLastSplitPoint(const LiveInterval &li, + MachineBasicBlock *mbb) { + const MachineBasicBlock *lpad = mbb->getLandingPadSuccessor(); + + // If li is not live into a landing pad, we can insert spill code before the + // first terminator. + if (!lpad || !isLiveInToMBB(li, lpad)) + return mbb->getFirstTerminator(); + + // When there is a landing pad, spill code must go before the call instruction + // that can throw. + MachineBasicBlock::iterator I = mbb->end(), B = mbb->begin(); + while (I != B) { + --I; + if (I->getDesc().isCall()) + return I; + } + assert(0 && "Block with landing pad successor contains no call instruction"); + return mbb->getFirstTerminator(); +} + /// getReMatImplicitUse - If the remat definition MI has one (for now, we only /// allow one) virtual register operand, then its uses are implicitly using /// the register. Returns the virtual register. @@ -793,7 +781,7 @@ unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li, unsigned Reg = MO.getReg(); if (Reg == 0 || Reg == li.reg) continue; - + if (TargetRegisterInfo::isPhysicalRegister(Reg) && !allocatableRegs_[Reg]) continue; @@ -812,18 +800,17 @@ unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li, /// which reaches the given instruction also reaches the specified use index. bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI, SlotIndex UseIdx) const { - SlotIndex Index = getInstructionIndex(MI); - VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno; - LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx); - return UI != li.end() && UI->valno == ValNo; + VNInfo *UValNo = li.getVNInfoAt(UseIdx); + return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI)); } /// isReMaterializable - Returns true if the definition MI of the specified /// val# of the specified interval is re-materializable. -bool LiveIntervals::isReMaterializable(const LiveInterval &li, - const VNInfo *ValNo, MachineInstr *MI, - SmallVectorImpl &SpillIs, - bool &isLoad) { +bool +LiveIntervals::isReMaterializable(const LiveInterval &li, + const VNInfo *ValNo, MachineInstr *MI, + const SmallVectorImpl &SpillIs, + bool &isLoad) { if (DisableReMat) return false; @@ -841,7 +828,7 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li, ri != re; ++ri) { MachineInstr *UseMI = &*ri; SlotIndex UseIdx = getInstructionIndex(UseMI); - if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo) + if (li.getVNInfoAt(UseIdx) != ValNo) continue; if (!isValNoAvailableAt(ImpLi, MI, UseIdx)) return false; @@ -867,9 +854,10 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li, /// isReMaterializable - Returns true if every definition of MI of every /// val# of the specified interval is re-materializable. -bool LiveIntervals::isReMaterializable(const LiveInterval &li, - SmallVectorImpl &SpillIs, - bool &isLoad) { +bool +LiveIntervals::isReMaterializable(const LiveInterval &li, + const SmallVectorImpl &SpillIs, + bool &isLoad) { isLoad = false; for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); i != e; ++i) { @@ -877,9 +865,9 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li, if (VNI->isUnused()) continue; // Dead val#. // Is the def for the val# rematerializable? - if (!VNI->isDefAccurate()) - return false; MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def); + if (!ReMatDefMI) + return false; bool DefIsLoad = false; if (!ReMatDefMI || !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad)) @@ -917,7 +905,7 @@ static bool FilterFoldedOps(MachineInstr *MI, } return false; } - + /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from /// slot / to reg or any rematerialized load into ith operand of specified @@ -949,22 +937,22 @@ bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI, if (DefMI && (MRInfo & VirtRegMap::isMod)) return false; - MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot) - : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI); + MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot) + : tii_->foldMemoryOperand(MI, FoldOps, DefMI); if (fmi) { // Remember this instruction uses the spill slot. if (isSS) vrm.addSpillSlotUse(Slot, fmi); // Attempt to fold the memory reference into the instruction. If // we can do this, we don't need to insert spill code. - MachineBasicBlock &MBB = *MI->getParent(); if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot)) vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo); vrm.transferSpillPts(MI, fmi); vrm.transferRestorePts(MI, fmi); vrm.transferEmergencySpills(MI, fmi); ReplaceMachineInstrInMaps(MI, fmi); - MI = MBB.insert(MBB.erase(MI), fmi); + MI->eraseFromParent(); + MI = fmi; ++numFolds; return true; } @@ -1022,7 +1010,7 @@ void LiveIntervals::rewriteImplicitOps(const LiveInterval &li, if (!MO.isReg()) continue; unsigned Reg = MO.getReg(); - if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) + if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue; if (!vrm.isReMaterialized(Reg)) continue; @@ -1037,7 +1025,7 @@ void LiveIntervals::rewriteImplicitOps(const LiveInterval &li, /// for addIntervalsForSpills to rewrite uses / defs for the given live range. bool LiveIntervals:: rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI, - bool TrySplit, SlotIndex index, SlotIndex end, + bool TrySplit, SlotIndex index, SlotIndex end, MachineInstr *MI, MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, unsigned Slot, int LdSlot, @@ -1056,7 +1044,7 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI, if (!mop.isReg()) continue; unsigned Reg = mop.getReg(); - if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) + if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue; if (Reg != li.reg) continue; @@ -1096,7 +1084,7 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI, // keep the src/dst regs pinned. // // Keep track of whether we replace a use and/or def so that we can - // create the spill interval with the appropriate range. + // create the spill interval with the appropriate range. SmallVector Ops; tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops); @@ -1152,13 +1140,16 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI, rewriteImplicitOps(li, MI, NewVReg, vrm); // Reuse NewVReg for other reads. + bool HasEarlyClobber = false; for (unsigned j = 0, e = Ops.size(); j != e; ++j) { MachineOperand &mopj = MI->getOperand(Ops[j]); mopj.setReg(NewVReg); if (mopj.isImplicit()) rewriteImplicitOps(li, MI, NewVReg, vrm); + if (mopj.isEarlyClobber()) + HasEarlyClobber = true; } - + if (CreatedNewVReg) { if (DefIsReMat) { vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI); @@ -1202,7 +1193,7 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI, if (HasUse) { if (CreatedNewVReg) { LiveRange LR(index.getLoadIndex(), index.getDefIndex(), - nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator)); + nI.getNextValue(SlotIndex(), 0, VNInfoAllocator)); DEBUG(dbgs() << " +" << LR); nI.addRange(LR); } else { @@ -1215,8 +1206,12 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI, } } if (HasDef) { - LiveRange LR(index.getDefIndex(), index.getStoreIndex(), - nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator)); + // An early clobber starts at the use slot, except for an early clobber + // tied to a use operand (yes, that is a thing). + LiveRange LR(HasEarlyClobber && !HasUse ? + index.getUseIndex() : index.getDefIndex(), + index.getStoreIndex(), + nI.getNextValue(SlotIndex(), 0, VNInfoAllocator)); DEBUG(dbgs() << " +" << LR); nI.addRange(LR); } @@ -1574,7 +1569,7 @@ LiveIntervals::normalizeSpillWeights(std::vector &NewLIs) { std::vector LiveIntervals:: addIntervalsForSpills(const LiveInterval &li, - SmallVectorImpl &SpillIs, + const SmallVectorImpl &SpillIs, const MachineLoopInfo *loopInfo, VirtRegMap &vrm) { assert(li.isSpillable() && "attempt to spill already spilled interval!"); @@ -1665,8 +1660,7 @@ addIntervalsForSpills(const LiveInterval &li, if (VNI->isUnused()) continue; // Dead val#. // Is the def for the val# rematerializable? - MachineInstr *ReMatDefMI = VNI->isDefAccurate() - ? getInstructionFromIndex(VNI->def) : 0; + MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def); bool dummy; if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) { // Remember how to remat the def of this val#. @@ -1698,7 +1692,7 @@ addIntervalsForSpills(const LiveInterval &li, if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) { if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT) Slot = vrm.assignVirt2StackSlot(li.reg); - + // This case only occurs when the prealloc splitter has already assigned // a stack slot to this vreg. else @@ -1755,7 +1749,7 @@ addIntervalsForSpills(const LiveInterval &li, Ops.push_back(j); if (MO.isDef()) continue; - if (isReMat || + if (isReMat || (!FoundUse && !alsoFoldARestore(Id, index, VReg, RestoreMBBs, RestoreIdxes))) { // MI has two-address uses of the same register. If the use @@ -1868,7 +1862,6 @@ addIntervalsForSpills(const LiveInterval &li, for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) { LiveInterval *LI = NewLIs[i]; if (!LI->empty()) { - LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI); if (!AddedKill.count(LI)) { LiveRange *LR = &LI->ranges[LI->ranges.size()-1]; SlotIndex LastUseIdx = LR->end.getBaseIndex(); @@ -1901,7 +1894,7 @@ bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const { /// getRepresentativeReg - Find the largest super register of the specified /// physical register. unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const { - // Find the largest super-register that is allocatable. + // Find the largest super-register that is allocatable. unsigned BestReg = Reg; for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) { unsigned SuperReg = *AS; @@ -1939,6 +1932,9 @@ bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li, unsigned PhysReg, VirtRegMap &vrm) { unsigned SpillReg = getRepresentativeReg(PhysReg); + DEBUG(dbgs() << "spillPhysRegAroundRegDefsUses " << tri_->getName(PhysReg) + << " represented by " << tri_->getName(SpillReg) << '\n'); + for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS) // If there are registers which alias PhysReg, but which are not a // sub-register of the chosen representative super register. Assert @@ -1950,15 +1946,16 @@ bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li, SmallVector PRegs; if (hasInterval(SpillReg)) PRegs.push_back(SpillReg); - else { - SmallSet Added; - for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) - if (Added.insert(*AS) && hasInterval(*AS)) { - PRegs.push_back(*AS); - for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS) - Added.insert(*ASS); - } - } + for (const unsigned *SR = tri_->getSubRegisters(SpillReg); *SR; ++SR) + if (hasInterval(*SR)) + PRegs.push_back(*SR); + + DEBUG({ + dbgs() << "Trying to spill:"; + for (unsigned i = 0, e = PRegs.size(); i != e; ++i) + dbgs() << ' ' << tri_->getName(PRegs[i]); + dbgs() << '\n'; + }); SmallPtrSet SeenMIs; for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), @@ -1969,18 +1966,16 @@ bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li, continue; SeenMIs.insert(MI); SlotIndex Index = getInstructionIndex(MI); + bool LiveReg = false; for (unsigned i = 0, e = PRegs.size(); i != e; ++i) { unsigned PReg = PRegs[i]; LiveInterval &pli = getInterval(PReg); if (!pli.liveAt(Index)) continue; - vrm.addEmergencySpill(PReg, MI); + LiveReg = true; SlotIndex StartIdx = Index.getLoadIndex(); SlotIndex EndIdx = Index.getNextIndex().getBaseIndex(); - if (pli.isInOneLiveRange(StartIdx, EndIdx)) { - pli.removeRange(StartIdx, EndIdx); - Cut = true; - } else { + if (!pli.isInOneLiveRange(StartIdx, EndIdx)) { std::string msg; raw_string_ostream Msg(msg); Msg << "Ran out of registers during register allocation!"; @@ -1991,15 +1986,14 @@ bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li, } report_fatal_error(Msg.str()); } - for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) { - if (!hasInterval(*AS)) - continue; - LiveInterval &spli = getInterval(*AS); - if (spli.liveAt(Index)) - spli.removeRange(Index.getLoadIndex(), - Index.getNextIndex().getBaseIndex()); - } + pli.removeRange(StartIdx, EndIdx); + LiveReg = true; } + if (!LiveReg) + continue; + DEBUG(dbgs() << "Emergency spill around " << Index << '\t' << *MI); + vrm.addEmergencySpill(SpillReg, MI); + Cut = true; } return Cut; } @@ -2009,13 +2003,13 @@ LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg, LiveInterval& Interval = getOrCreateInterval(reg); VNInfo* VN = Interval.getNextValue( SlotIndex(getInstructionIndex(startInst).getDefIndex()), - startInst, true, getVNInfoAllocator()); + startInst, getVNInfoAllocator()); VN->setHasPHIKill(true); LiveRange LR( SlotIndex(getInstructionIndex(startInst).getDefIndex()), getMBBEndIdx(startInst->getParent()), VN); Interval.addRange(LR); - + return LR; }