DEBUG(dbgs() << '\n');
}
-#ifndef NDEBUG
static bool isRegLiveIntoSuccessor(const MachineBasicBlock *MBB, unsigned Reg) {
for (MachineBasicBlock::const_succ_iterator SI = MBB->succ_begin(),
SE = MBB->succ_end();
}
return false;
}
-#endif
void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
MachineBasicBlock::iterator mi,
}
}
-#ifndef NDEBUG
-static bool intervalRangesSane(const LiveInterval& li) {
- if (li.empty()) {
- return true;
- }
-
- SlotIndex lastEnd = li.begin()->start;
- for (LiveInterval::const_iterator lrItr = li.begin(), lrEnd = li.end();
- lrItr != lrEnd; ++lrItr) {
- const LiveRange& lr = *lrItr;
- if (lastEnd > lr.start || lr.start >= lr.end)
- return false;
- lastEnd = lr.end;
- }
-
- return true;
-}
-#endif
-
-template <typename DefSetT>
-static void handleMoveDefs(LiveIntervals& lis, SlotIndex origIdx,
- SlotIndex miIdx, const DefSetT& defs) {
- for (typename DefSetT::const_iterator defItr = defs.begin(),
- defEnd = defs.end();
- defItr != defEnd; ++defItr) {
- unsigned def = *defItr;
- LiveInterval& li = lis.getInterval(def);
- LiveRange* lr = li.getLiveRangeContaining(origIdx.getRegSlot());
- assert(lr != 0 && "No range for def?");
- lr->start = miIdx.getRegSlot();
- lr->valno->def = miIdx.getRegSlot();
- assert(intervalRangesSane(li) && "Broke live interval moving def.");
- }
-}
-
-template <typename DeadDefSetT>
-static void handleMoveDeadDefs(LiveIntervals& lis, SlotIndex origIdx,
- SlotIndex miIdx, const DeadDefSetT& deadDefs) {
- for (typename DeadDefSetT::const_iterator deadDefItr = deadDefs.begin(),
- deadDefEnd = deadDefs.end();
- deadDefItr != deadDefEnd; ++deadDefItr) {
- unsigned deadDef = *deadDefItr;
- LiveInterval& li = lis.getInterval(deadDef);
- LiveRange* lr = li.getLiveRangeContaining(origIdx.getRegSlot());
- assert(lr != 0 && "No range for dead def?");
- assert(lr->start == origIdx.getRegSlot() && "Bad dead range start?");
- assert(lr->end == origIdx.getDeadSlot() && "Bad dead range end?");
- assert(lr->valno->def == origIdx.getRegSlot() && "Bad dead valno def.");
- LiveRange t(*lr);
- t.start = miIdx.getRegSlot();
- t.valno->def = miIdx.getRegSlot();
- t.end = miIdx.getDeadSlot();
- li.removeRange(*lr);
- li.addRange(t);
- assert(intervalRangesSane(li) && "Broke live interval moving dead def.");
- }
-}
-
-template <typename ECSetT>
-static void handleMoveECs(LiveIntervals& lis, SlotIndex origIdx,
- SlotIndex miIdx, const ECSetT& ecs) {
- for (typename ECSetT::const_iterator ecItr = ecs.begin(), ecEnd = ecs.end();
- ecItr != ecEnd; ++ecItr) {
- unsigned ec = *ecItr;
- LiveInterval& li = lis.getInterval(ec);
- LiveRange* lr = li.getLiveRangeContaining(origIdx.getRegSlot(true));
- assert(lr != 0 && "No range for early clobber?");
- assert(lr->start == origIdx.getRegSlot(true) && "Bad EC range start?");
- assert(lr->end == origIdx.getRegSlot() && "Bad EC range end.");
- assert(lr->valno->def == origIdx.getRegSlot(true) && "Bad EC valno def.");
- LiveRange t(*lr);
- t.start = miIdx.getRegSlot(true);
- t.valno->def = miIdx.getRegSlot(true);
- t.end = miIdx.getRegSlot();
- li.removeRange(*lr);
- li.addRange(t);
- assert(intervalRangesSane(li) && "Broke live interval moving EC.");
- }
-}
-
-static void moveKillFlags(unsigned reg, SlotIndex oldIdx, SlotIndex newIdx,
- LiveIntervals& lis,
- const TargetRegisterInfo& tri) {
- MachineInstr* oldKillMI = lis.getInstructionFromIndex(oldIdx);
- MachineInstr* newKillMI = lis.getInstructionFromIndex(newIdx);
- assert(oldKillMI->killsRegister(reg) && "Old 'kill' instr isn't a kill.");
- assert(!newKillMI->killsRegister(reg) && "New kill instr is already a kill.");
- oldKillMI->clearRegisterKills(reg, &tri);
- newKillMI->addRegisterKilled(reg, &tri);
-}
-
-template <typename UseSetT>
-static void handleMoveUses(const MachineBasicBlock *mbb,
- const MachineRegisterInfo& mri,
- const TargetRegisterInfo& tri,
- const BitVector& reservedRegs, LiveIntervals &lis,
- SlotIndex origIdx, SlotIndex miIdx,
- const UseSetT &uses) {
- bool movingUp = miIdx < origIdx;
- for (typename UseSetT::const_iterator usesItr = uses.begin(),
- usesEnd = uses.end();
- usesItr != usesEnd; ++usesItr) {
- unsigned use = *usesItr;
- if (!lis.hasInterval(use))
- continue;
- if (TargetRegisterInfo::isPhysicalRegister(use) && reservedRegs.test(use))
- continue;
- LiveInterval& li = lis.getInterval(use);
- LiveRange* lr = li.getLiveRangeBefore(origIdx.getRegSlot());
- assert(lr != 0 && "No range for use?");
- bool liveThrough = lr->end > origIdx.getRegSlot();
-
- if (movingUp) {
- // If moving up and liveThrough - nothing to do.
- // If not live through we need to extend the range to the last use
- // between the old location and the new one.
- if (!liveThrough) {
- SlotIndex lastUseInRange = miIdx.getRegSlot();
- for (MachineRegisterInfo::use_iterator useI = mri.use_begin(use),
- useE = mri.use_end();
- useI != useE; ++useI) {
- const MachineInstr* mopI = &*useI;
- const MachineOperand& mop = useI.getOperand();
- SlotIndex instSlot = lis.getSlotIndexes()->getInstructionIndex(mopI);
- SlotIndex opSlot = instSlot.getRegSlot(mop.isEarlyClobber());
- if (opSlot > lastUseInRange && opSlot < origIdx)
- lastUseInRange = opSlot;
- }
-
- // If we found a new instr endpoint update the kill flags.
- if (lastUseInRange != miIdx.getRegSlot())
- moveKillFlags(use, miIdx, lastUseInRange, lis, tri);
-
- // Fix up the range end.
- lr->end = lastUseInRange;
- }
- } else {
- // Moving down is easy - the existing live range end tells us where
- // the last kill is.
- if (!liveThrough) {
- // Easy fix - just update the range endpoint.
- lr->end = miIdx.getRegSlot();
- } else {
- bool liveOut = lr->end >= lis.getSlotIndexes()->getMBBEndIdx(mbb);
- if (!liveOut && miIdx.getRegSlot() > lr->end) {
- moveKillFlags(use, lr->end, miIdx, lis, tri);
- lr->end = miIdx.getRegSlot();
- }
- }
- }
- assert(intervalRangesSane(li) && "Broke live interval moving use.");
- }
-}
-
-
-
-void LiveIntervals::handleMove(MachineInstr* mi) {
- SlotIndex origIdx = indexes_->getInstructionIndex(mi);
- indexes_->removeMachineInstrFromMaps(mi);
- SlotIndex miIdx = mi->isInsideBundle() ?
- indexes_->getInstructionIndex(mi->getBundleStart()) :
- indexes_->insertMachineInstrInMaps(mi);
- MachineBasicBlock* mbb = mi->getParent();
- assert(getMBBStartIdx(mbb) <= origIdx && origIdx < getMBBEndIdx(mbb) &&
- "Cannot handle moves across basic block boundaries.");
- assert(!mi->isBundled() && "Can't handle bundled instructions yet.");
-
- // Pick the direction.
- bool movingUp = miIdx < origIdx;
-
- // Collect the operands.
- DenseSet<unsigned> uses, defs, deadDefs, ecs;
- for (MachineInstr::mop_iterator mopItr = mi->operands_begin(),
- mopEnd = mi->operands_end();
- mopItr != mopEnd; ++mopItr) {
- const MachineOperand& mop = *mopItr;
-
- if (!mop.isReg() || mop.getReg() == 0)
- continue;
- unsigned reg = mop.getReg();
-
- if (mop.readsReg() && !ecs.count(reg)) {
- uses.insert(reg);
- }
- if (mop.isDef()) {
- if (mop.isDead()) {
- assert(!defs.count(reg) && "Can't mix defs with dead-defs.");
- deadDefs.insert(reg);
- } else if (mop.isEarlyClobber()) {
- uses.erase(reg);
- ecs.insert(reg);
- } else {
- assert(!deadDefs.count(reg) && "Can't mix defs with dead-defs.");
- defs.insert(reg);
- }
- }
- }
-
- if (movingUp) {
- handleMoveUses(mbb, *mri_, *tri_, reservedRegs_, *this, origIdx, miIdx, uses);
- handleMoveECs(*this, origIdx, miIdx, ecs);
- handleMoveDeadDefs(*this, origIdx, miIdx, deadDefs);
- handleMoveDefs(*this, origIdx, miIdx, defs);
- } else {
- handleMoveDefs(*this, origIdx, miIdx, defs);
- handleMoveDeadDefs(*this, origIdx, miIdx, deadDefs);
- handleMoveECs(*this, origIdx, miIdx, ecs);
- handleMoveUses(mbb, *mri_, *tri_, reservedRegs_, *this, origIdx, miIdx, uses);
- }
-}
-
/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
/// allow one) virtual register operand, then its uses are implicitly using
/// the register. Returns the virtual register.
return Found;
}
}
+
+//===----------------------------------------------------------------------===//
+// IntervalUpdate class.
+//===----------------------------------------------------------------------===//
+
+/// HMEditor is a toolkit used by handleMove to trim or extend live intervals.
+class LiveIntervals::HMEditor {
+private:
+ LiveIntervals& LIS;
+ const MachineRegisterInfo& MRI;
+ const TargetRegisterInfo& TRI;
+ SlotIndex NewIdx;
+
+ typedef std::pair<LiveInterval*, LiveRange*> IntRangePair;
+ typedef DenseSet<IntRangePair> RangeSet;
+
+public:
+ HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI,
+ const TargetRegisterInfo& TRI, SlotIndex NewIdx)
+ : LIS(LIS), MRI(MRI), TRI(TRI), NewIdx(NewIdx) {}
+
+ // Update intervals for all operands of MI from OldIdx to NewIdx.
+ // This assumes that MI used to be at OldIdx, and now resides at
+ // NewIdx.
+ void moveAllOperandsFrom(MachineInstr* MI, SlotIndex OldIdx) {
+ // Collect the operands.
+ RangeSet Entering, Internal, Exiting;
+ bool hasRegMaskOp = false;
+ collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx);
+
+ moveAllEnteringFrom(OldIdx, Entering);
+ moveAllInternalFrom(OldIdx, Internal);
+ moveAllExitingFrom(OldIdx, Exiting);
+
+ if (hasRegMaskOp)
+ updateRegMaskSlots(OldIdx);
+
+#ifndef NDEBUG
+ LIValidator validator;
+ std::for_each(Entering.begin(), Entering.end(), validator);
+ std::for_each(Internal.begin(), Internal.end(), validator);
+ std::for_each(Exiting.begin(), Exiting.end(), validator);
+ assert(validator.rangesOk() && "moveOperandsFrom broke liveness.");
+#endif
+
+ }
+
+private:
+
+#ifndef NDEBUG
+ class LIValidator {
+ private:
+ DenseSet<const LiveInterval*> Checked, Bogus;
+ public:
+ void operator()(const IntRangePair& P) {
+ const LiveInterval* LI = P.first;
+ if (Checked.count(LI))
+ return;
+ Checked.insert(LI);
+ if (LI->empty())
+ return;
+ SlotIndex LastEnd = LI->begin()->start;
+ for (LiveInterval::const_iterator LRI = LI->begin(), LRE = LI->end();
+ LRI != LRE; ++LRI) {
+ const LiveRange& LR = *LRI;
+ if (LastEnd > LR.start || LR.start >= LR.end)
+ Bogus.insert(LI);
+ LastEnd = LR.end;
+ }
+ }
+
+ bool rangesOk() const {
+ return Bogus.empty();
+ }
+ };
+#endif
+
+ // Collect IntRangePairs for all operands of MI that may need fixing.
+ // Treat's MI's index as OldIdx (regardless of what it is in SlotIndexes'
+ // maps).
+ void collectRanges(MachineInstr* MI, RangeSet& Entering, RangeSet& Internal,
+ RangeSet& Exiting, bool& hasRegMaskOp, SlotIndex OldIdx) {
+ hasRegMaskOp = false;
+ for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
+ MOE = MI->operands_end();
+ MOI != MOE; ++MOI) {
+ const MachineOperand& MO = *MOI;
+
+ if (MO.isRegMask()) {
+ hasRegMaskOp = true;
+ continue;
+ }
+
+ if (!MO.isReg() || MO.getReg() == 0)
+ continue;
+
+ unsigned Reg = MO.getReg();
+
+ // TODO: Currently we're skipping uses that are reserved or have no
+ // interval, but we're not updating their kills. This should be
+ // fixed.
+ if (!LIS.hasInterval(Reg) ||
+ (TargetRegisterInfo::isPhysicalRegister(Reg) && LIS.isReserved(Reg)))
+ continue;
+
+ LiveInterval* LI = &LIS.getInterval(Reg);
+
+ if (MO.readsReg()) {
+ LiveRange* LR = LI->getLiveRangeContaining(OldIdx);
+ if (LR != 0)
+ Entering.insert(std::make_pair(LI, LR));
+ }
+ if (MO.isDef()) {
+ if (MO.isEarlyClobber()) {
+ LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getRegSlot(true));
+ assert(LR != 0 && "No EC range?");
+ if (LR->end > OldIdx.getDeadSlot())
+ Exiting.insert(std::make_pair(LI, LR));
+ else
+ Internal.insert(std::make_pair(LI, LR));
+ } else if (MO.isDead()) {
+ LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getRegSlot());
+ assert(LR != 0 && "No dead-def range?");
+ Internal.insert(std::make_pair(LI, LR));
+ } else {
+ LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getDeadSlot());
+ assert(LR && LR->end > OldIdx.getDeadSlot() &&
+ "Non-dead-def should have live range exiting.");
+ Exiting.insert(std::make_pair(LI, LR));
+ }
+ }
+ }
+ }
+
+ void moveKillFlags(unsigned reg, SlotIndex OldIdx, SlotIndex newKillIdx) {
+ MachineInstr* OldKillMI = LIS.getInstructionFromIndex(OldIdx);
+ if (!OldKillMI->killsRegister(reg))
+ return; // Bail out if we don't have kill flags on the old register.
+ MachineInstr* NewKillMI = LIS.getInstructionFromIndex(newKillIdx);
+ assert(OldKillMI->killsRegister(reg) && "Old 'kill' instr isn't a kill.");
+ assert(!NewKillMI->killsRegister(reg) && "New kill instr is already a kill.");
+ OldKillMI->clearRegisterKills(reg, &TRI);
+ NewKillMI->addRegisterKilled(reg, &TRI);
+ }
+
+ void updateRegMaskSlots(SlotIndex OldIdx) {
+ SmallVectorImpl<SlotIndex>::iterator RI =
+ std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(),
+ OldIdx);
+ assert(*RI == OldIdx && "No RegMask at OldIdx.");
+ *RI = NewIdx;
+ assert(*prior(RI) < *RI && *RI < *next(RI) &&
+ "RegSlots out of order. Did you move one call across another?");
+ }
+
+ // Return the last use of reg between NewIdx and OldIdx.
+ SlotIndex findLastUseBefore(unsigned Reg, SlotIndex OldIdx) {
+ SlotIndex LastUse = NewIdx;
+ for (MachineRegisterInfo::use_nodbg_iterator
+ UI = MRI.use_nodbg_begin(Reg),
+ UE = MRI.use_nodbg_end();
+ UI != UE; UI.skipInstruction()) {
+ const MachineInstr* MI = &*UI;
+ SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI);
+ if (InstSlot > LastUse && InstSlot < OldIdx)
+ LastUse = InstSlot;
+ }
+ return LastUse;
+ }
+
+ void moveEnteringUpFrom(SlotIndex OldIdx, IntRangePair& P) {
+ LiveInterval* LI = P.first;
+ LiveRange* LR = P.second;
+ bool LiveThrough = LR->end > OldIdx.getRegSlot();
+ if (LiveThrough)
+ return;
+ SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx);
+ if (LastUse != NewIdx)
+ moveKillFlags(LI->reg, NewIdx, LastUse);
+ LR->end = LastUse.getRegSlot(LR->end.isEarlyClobber());
+ }
+
+ void moveEnteringDownFrom(SlotIndex OldIdx, IntRangePair& P) {
+ LiveInterval* LI = P.first;
+ LiveRange* LR = P.second;
+ if (NewIdx > LR->end) {
+ moveKillFlags(LI->reg, LR->end, NewIdx);
+ LR->end = NewIdx.getRegSlot();
+ }
+ }
+
+ void moveAllEnteringFrom(SlotIndex OldIdx, RangeSet& Entering) {
+ bool GoingUp = NewIdx < OldIdx;
+
+ if (GoingUp) {
+ for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
+ EI != EE; ++EI)
+ moveEnteringUpFrom(OldIdx, *EI);
+ } else {
+ for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
+ EI != EE; ++EI)
+ moveEnteringDownFrom(OldIdx, *EI);
+ }
+ }
+
+ void moveInternalFrom(SlotIndex OldIdx, IntRangePair& P) {
+ LiveInterval* LI = P.first;
+ LiveRange* LR = P.second;
+ assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() &&
+ LR->end <= OldIdx.getDeadSlot() &&
+ "Range should be internal to OldIdx.");
+ LiveRange Tmp(*LR);
+ Tmp.start = NewIdx.getRegSlot(LR->start.isEarlyClobber());
+ Tmp.valno->def = Tmp.start;
+ Tmp.end = LR->end.isDead() ? NewIdx.getDeadSlot() : NewIdx.getRegSlot();
+ LI->removeRange(*LR);
+ LI->addRange(Tmp);
+ }
+
+ void moveAllInternalFrom(SlotIndex OldIdx, RangeSet& Internal) {
+ for (RangeSet::iterator II = Internal.begin(), IE = Internal.end();
+ II != IE; ++II)
+ moveInternalFrom(OldIdx, *II);
+ }
+
+ void moveExitingFrom(SlotIndex OldIdx, IntRangePair& P) {
+ LiveRange* LR = P.second;
+ assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() &&
+ "Range should start in OldIdx.");
+ assert(LR->end > OldIdx.getDeadSlot() && "Range should exit OldIdx.");
+ SlotIndex NewStart = NewIdx.getRegSlot(LR->start.isEarlyClobber());
+ LR->start = NewStart;
+ LR->valno->def = NewStart;
+ }
+
+ void moveAllExitingFrom(SlotIndex OldIdx, RangeSet& Exiting) {
+ for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end();
+ EI != EE; ++EI)
+ moveExitingFrom(OldIdx, *EI);
+ }
+
+};
+
+void LiveIntervals::handleMove(MachineInstr* MI) {
+ SlotIndex OldIndex = indexes_->getInstructionIndex(MI);
+ indexes_->removeMachineInstrFromMaps(MI);
+ SlotIndex NewIndex = MI->isInsideBundle() ?
+ indexes_->getInstructionIndex(MI->getBundleStart()) :
+ indexes_->insertMachineInstrInMaps(MI);
+ assert(getMBBStartIdx(MI->getParent()) <= OldIndex &&
+ OldIndex < getMBBEndIdx(MI->getParent()) &&
+ "Cannot handle moves across basic block boundaries.");
+ assert(!MI->isBundled() && "Can't handle bundled instructions yet.");
+
+ HMEditor HME(*this, *mri_, *tri_, NewIndex);
+ HME.moveAllOperandsFrom(MI, OldIndex);
+}