1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the RAGreedy function pass for register allocation in
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "regalloc"
16 #include "AllocationOrder.h"
17 #include "InterferenceCache.h"
18 #include "LiveDebugVariables.h"
19 #include "LiveRangeEdit.h"
20 #include "RegAllocBase.h"
22 #include "SpillPlacement.h"
24 #include "VirtRegMap.h"
25 #include "llvm/ADT/SparseBitVector.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Function.h"
29 #include "llvm/PassAnalysisSupport.h"
30 #include "llvm/CodeGen/CalcSpillWeights.h"
31 #include "llvm/CodeGen/EdgeBundles.h"
32 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
33 #include "llvm/CodeGen/LiveStackAnalysis.h"
34 #include "llvm/CodeGen/MachineDominators.h"
35 #include "llvm/CodeGen/MachineFunctionPass.h"
36 #include "llvm/CodeGen/MachineLoopInfo.h"
37 #include "llvm/CodeGen/MachineLoopRanges.h"
38 #include "llvm/CodeGen/MachineRegisterInfo.h"
39 #include "llvm/CodeGen/Passes.h"
40 #include "llvm/CodeGen/RegAllocRegistry.h"
41 #include "llvm/CodeGen/RegisterCoalescer.h"
42 #include "llvm/Target/TargetOptions.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/raw_ostream.h"
46 #include "llvm/Support/Timer.h"
52 STATISTIC(NumGlobalSplits, "Number of split global live ranges");
53 STATISTIC(NumLocalSplits, "Number of split local live ranges");
54 STATISTIC(NumEvicted, "Number of interferences evicted");
56 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
57 createGreedyRegisterAllocator);
60 class RAGreedy : public MachineFunctionPass,
62 private LiveRangeEdit::Delegate {
66 BitVector ReservedRegs;
71 MachineDominatorTree *DomTree;
72 MachineLoopInfo *Loops;
73 MachineLoopRanges *LoopRanges;
75 SpillPlacement *SpillPlacer;
78 std::auto_ptr<Spiller> SpillerInstance;
79 std::priority_queue<std::pair<unsigned, unsigned> > Queue;
81 // Live ranges pass through a number of stages as we try to allocate them.
82 // Some of the stages may also create new live ranges:
84 // - Region splitting.
85 // - Per-block splitting.
89 // Ranges produced by one of the stages skip the previous stages when they are
90 // dequeued. This improves performance because we can skip interference checks
91 // that are unlikely to give any results. It also guarantees that the live
92 // range splitting algorithm terminates, something that is otherwise hard to
95 RS_New, ///< Never seen before.
96 RS_First, ///< First time in the queue.
97 RS_Second, ///< Second time in the queue.
98 RS_Region, ///< Produced by region splitting.
99 RS_Block, ///< Produced by per-block splitting.
100 RS_Local, ///< Produced by local splitting.
101 RS_Spill ///< Produced by spilling.
104 IndexedMap<unsigned char, VirtReg2IndexFunctor> LRStage;
106 LiveRangeStage getStage(const LiveInterval &VirtReg) const {
107 return LiveRangeStage(LRStage[VirtReg.reg]);
110 template<typename Iterator>
111 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
112 LRStage.resize(MRI->getNumVirtRegs());
113 for (;Begin != End; ++Begin) {
114 unsigned Reg = (*Begin)->reg;
115 if (LRStage[Reg] == RS_New)
116 LRStage[Reg] = NewStage;
121 std::auto_ptr<SplitAnalysis> SA;
122 std::auto_ptr<SplitEditor> SE;
124 /// Cached per-block interference maps
125 InterferenceCache IntfCache;
127 /// All basic blocks where the current register has uses.
128 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
130 /// Live-through blocks that have already been added to SpillPlacer.
131 SparseBitVector<> ActiveThroughBlocks;
133 /// Global live range splitting candidate info.
134 struct GlobalSplitCandidate {
136 BitVector LiveBundles;
139 /// Candidate info for for each PhysReg in AllocationOrder.
140 /// This vector never shrinks, but grows to the size of the largest register
142 SmallVector<GlobalSplitCandidate, 32> GlobalCand;
144 /// For every instruction in SA->UseSlots, store the previous non-copy
146 SmallVector<SlotIndex, 8> PrevSlot;
151 /// Return the pass name.
152 virtual const char* getPassName() const {
153 return "Greedy Register Allocator";
156 /// RAGreedy analysis usage.
157 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
158 virtual void releaseMemory();
159 virtual Spiller &spiller() { return *SpillerInstance; }
160 virtual void enqueue(LiveInterval *LI);
161 virtual LiveInterval *dequeue();
162 virtual unsigned selectOrSplit(LiveInterval&,
163 SmallVectorImpl<LiveInterval*>&);
165 /// Perform register allocation.
166 virtual bool runOnMachineFunction(MachineFunction &mf);
171 void LRE_WillEraseInstruction(MachineInstr*);
172 bool LRE_CanEraseVirtReg(unsigned);
173 void LRE_WillShrinkVirtReg(unsigned);
174 void LRE_DidCloneVirtReg(unsigned, unsigned);
176 bool addSplitConstraints(InterferenceCache::Cursor, float&);
177 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
178 void growRegion(InterferenceCache::Cursor);
179 float calcGlobalSplitCost(unsigned, const BitVector&);
180 void splitAroundRegion(LiveInterval&, unsigned, const BitVector&,
181 SmallVectorImpl<LiveInterval*>&);
182 void calcGapWeights(unsigned, SmallVectorImpl<float>&);
183 SlotIndex getPrevMappedIndex(const MachineInstr*);
184 void calcPrevSlots();
185 unsigned nextSplitPoint(unsigned);
186 bool canEvictInterference(LiveInterval&, unsigned, float&);
188 unsigned tryEvict(LiveInterval&, AllocationOrder&,
189 SmallVectorImpl<LiveInterval*>&);
190 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
191 SmallVectorImpl<LiveInterval*>&);
192 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
193 SmallVectorImpl<LiveInterval*>&);
194 unsigned trySplit(LiveInterval&, AllocationOrder&,
195 SmallVectorImpl<LiveInterval*>&);
197 } // end anonymous namespace
199 char RAGreedy::ID = 0;
201 FunctionPass* llvm::createGreedyRegisterAllocator() {
202 return new RAGreedy();
205 RAGreedy::RAGreedy(): MachineFunctionPass(ID), LRStage(RS_New) {
206 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
207 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
208 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
209 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
210 initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
211 initializeRegisterCoalescerAnalysisGroup(*PassRegistry::getPassRegistry());
212 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
213 initializeLiveStacksPass(*PassRegistry::getPassRegistry());
214 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
215 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
216 initializeMachineLoopRangesPass(*PassRegistry::getPassRegistry());
217 initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
218 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
219 initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
222 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
223 AU.setPreservesCFG();
224 AU.addRequired<AliasAnalysis>();
225 AU.addPreserved<AliasAnalysis>();
226 AU.addRequired<LiveIntervals>();
227 AU.addRequired<SlotIndexes>();
228 AU.addPreserved<SlotIndexes>();
229 AU.addRequired<LiveDebugVariables>();
230 AU.addPreserved<LiveDebugVariables>();
232 AU.addRequiredID(StrongPHIEliminationID);
233 AU.addRequiredTransitive<RegisterCoalescer>();
234 AU.addRequired<CalculateSpillWeights>();
235 AU.addRequired<LiveStacks>();
236 AU.addPreserved<LiveStacks>();
237 AU.addRequired<MachineDominatorTree>();
238 AU.addPreserved<MachineDominatorTree>();
239 AU.addRequired<MachineLoopInfo>();
240 AU.addPreserved<MachineLoopInfo>();
241 AU.addRequired<MachineLoopRanges>();
242 AU.addPreserved<MachineLoopRanges>();
243 AU.addRequired<VirtRegMap>();
244 AU.addPreserved<VirtRegMap>();
245 AU.addRequired<EdgeBundles>();
246 AU.addRequired<SpillPlacement>();
247 MachineFunctionPass::getAnalysisUsage(AU);
251 //===----------------------------------------------------------------------===//
252 // LiveRangeEdit delegate methods
253 //===----------------------------------------------------------------------===//
255 void RAGreedy::LRE_WillEraseInstruction(MachineInstr *MI) {
256 // LRE itself will remove from SlotIndexes and parent basic block.
257 VRM->RemoveMachineInstrFromMaps(MI);
260 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
261 if (unsigned PhysReg = VRM->getPhys(VirtReg)) {
262 unassign(LIS->getInterval(VirtReg), PhysReg);
265 // Unassigned virtreg is probably in the priority queue.
266 // RegAllocBase will erase it after dequeueing.
270 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) {
271 unsigned PhysReg = VRM->getPhys(VirtReg);
275 // Register is assigned, put it back on the queue for reassignment.
276 LiveInterval &LI = LIS->getInterval(VirtReg);
277 unassign(LI, PhysReg);
281 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
282 // LRE may clone a virtual register because dead code elimination causes it to
283 // be split into connected components. Ensure that the new register gets the
284 // same stage as the parent.
286 LRStage[New] = LRStage[Old];
289 void RAGreedy::releaseMemory() {
290 SpillerInstance.reset(0);
292 RegAllocBase::releaseMemory();
295 void RAGreedy::enqueue(LiveInterval *LI) {
296 // Prioritize live ranges by size, assigning larger ranges first.
297 // The queue holds (size, reg) pairs.
298 const unsigned Size = LI->getSize();
299 const unsigned Reg = LI->reg;
300 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
301 "Can only enqueue virtual registers");
305 if (LRStage[Reg] == RS_New)
306 LRStage[Reg] = RS_First;
308 if (LRStage[Reg] == RS_Second)
309 // Unsplit ranges that couldn't be allocated immediately are deferred until
310 // everything else has been allocated. Long ranges are allocated last so
311 // they are split against realistic interference.
312 Prio = (1u << 31) - Size;
314 // Everything else is allocated in long->short order. Long ranges that don't
315 // fit should be spilled ASAP so they don't create interference.
316 Prio = (1u << 31) + Size;
318 // Boost ranges that have a physical register hint.
319 if (TargetRegisterInfo::isPhysicalRegister(VRM->getRegAllocPref(Reg)))
323 Queue.push(std::make_pair(Prio, Reg));
326 LiveInterval *RAGreedy::dequeue() {
329 LiveInterval *LI = &LIS->getInterval(Queue.top().second);
334 //===----------------------------------------------------------------------===//
335 // Interference eviction
336 //===----------------------------------------------------------------------===//
338 /// canEvict - Return true if all interferences between VirtReg and PhysReg can
339 /// be evicted. Set maxWeight to the maximal spill weight of an interference.
340 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
343 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
344 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
345 // If there is 10 or more interferences, chances are one is smaller.
346 if (Q.collectInterferingVRegs(10) >= 10)
349 // Check if any interfering live range is heavier than VirtReg.
350 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
351 LiveInterval *Intf = Q.interferingVRegs()[i];
352 if (TargetRegisterInfo::isPhysicalRegister(Intf->reg))
354 if (Intf->weight >= VirtReg.weight)
356 Weight = std::max(Weight, Intf->weight);
363 /// tryEvict - Try to evict all interferences for a physreg.
364 /// @param VirtReg Currently unassigned virtual register.
365 /// @param Order Physregs to try.
366 /// @return Physreg to assign VirtReg, or 0.
367 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
368 AllocationOrder &Order,
369 SmallVectorImpl<LiveInterval*> &NewVRegs){
370 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
372 // Keep track of the lightest single interference seen so far.
373 float BestWeight = 0;
374 unsigned BestPhys = 0;
377 while (unsigned PhysReg = Order.next()) {
379 if (!canEvictInterference(VirtReg, PhysReg, Weight))
382 // This is an eviction candidate.
383 DEBUG(dbgs() << "max " << PrintReg(PhysReg, TRI) << " interference = "
385 if (BestPhys && Weight >= BestWeight)
391 // Stop if the hint can be used.
392 if (Order.isHint(PhysReg))
399 DEBUG(dbgs() << "evicting " << PrintReg(BestPhys, TRI) << " interference\n");
400 for (const unsigned *AliasI = TRI->getOverlaps(BestPhys); *AliasI; ++AliasI) {
401 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
402 assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
403 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
404 LiveInterval *Intf = Q.interferingVRegs()[i];
405 unassign(*Intf, VRM->getPhys(Intf->reg));
407 NewVRegs.push_back(Intf);
414 //===----------------------------------------------------------------------===//
416 //===----------------------------------------------------------------------===//
418 /// addSplitConstraints - Fill out the SplitConstraints vector based on the
419 /// interference pattern in Physreg and its aliases. Add the constraints to
420 /// SpillPlacement and return the static cost of this split in Cost, assuming
421 /// that all preferences in SplitConstraints are met.
422 /// Return false if there are no bundles with positive bias.
423 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
425 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
427 // Reset interference dependent info.
428 SplitConstraints.resize(UseBlocks.size());
429 float StaticCost = 0;
430 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
431 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
432 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
434 BC.Number = BI.MBB->getNumber();
435 Intf.moveToBlock(BC.Number);
436 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
437 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
439 if (!Intf.hasInterference())
442 // Number of spill code instructions to insert.
445 // Interference for the live-in value.
447 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number))
448 BC.Entry = SpillPlacement::MustSpill, ++Ins;
449 else if (Intf.first() < BI.FirstUse)
450 BC.Entry = SpillPlacement::PrefSpill, ++Ins;
451 else if (Intf.first() < (BI.LiveThrough ? BI.LastUse : BI.Kill))
455 // Interference for the live-out value.
457 if (Intf.last() >= SA->getLastSplitPoint(BC.Number))
458 BC.Exit = SpillPlacement::MustSpill, ++Ins;
459 else if (Intf.last() > BI.LastUse)
460 BC.Exit = SpillPlacement::PrefSpill, ++Ins;
461 else if (Intf.last() > (BI.LiveThrough ? BI.FirstUse : BI.Def))
465 // Accumulate the total frequency of inserted spill code.
467 StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
471 // Add constraints for use-blocks. Note that these are the only constraints
472 // that may add a positive bias, it is downhill from here.
473 SpillPlacer->addConstraints(SplitConstraints);
474 return SpillPlacer->scanActiveBundles();
478 /// addThroughConstraints - Add constraints and links to SpillPlacer from the
479 /// live-through blocks in Blocks.
480 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
481 ArrayRef<unsigned> Blocks) {
482 const unsigned GroupSize = 8;
483 SpillPlacement::BlockConstraint BCS[GroupSize];
484 unsigned TBS[GroupSize];
485 unsigned B = 0, T = 0;
487 for (unsigned i = 0; i != Blocks.size(); ++i) {
488 unsigned Number = Blocks[i];
489 Intf.moveToBlock(Number);
491 if (!Intf.hasInterference()) {
492 assert(T < GroupSize && "Array overflow");
494 if (++T == GroupSize) {
495 SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T));
501 assert(B < GroupSize && "Array overflow");
502 BCS[B].Number = Number;
504 // Interference for the live-in value.
505 if (Intf.first() <= Indexes->getMBBStartIdx(Number))
506 BCS[B].Entry = SpillPlacement::MustSpill;
508 BCS[B].Entry = SpillPlacement::PrefSpill;
510 // Interference for the live-out value.
511 if (Intf.last() >= SA->getLastSplitPoint(Number))
512 BCS[B].Exit = SpillPlacement::MustSpill;
514 BCS[B].Exit = SpillPlacement::PrefSpill;
516 if (++B == GroupSize) {
517 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
518 SpillPlacer->addConstraints(Array);
523 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
524 SpillPlacer->addConstraints(Array);
525 SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T));
528 void RAGreedy::growRegion(InterferenceCache::Cursor Intf) {
529 // Keep track of through blocks that have already been added to SpillPlacer.
530 SparseBitVector<> Added;
531 SmallVector<unsigned, 16> ThroughBlocks;
533 unsigned Visited = 0;
536 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
537 if (NewBundles.empty())
539 // Find new through blocks in the periphery of PrefRegBundles.
540 for (int i = 0, e = NewBundles.size(); i != e; ++i) {
541 unsigned Bundle = NewBundles[i];
542 // Look at all blocks connected to Bundle in the full graph.
543 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
544 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
547 if (!SA->isThroughBlock(Block) || !Added.test_and_set(Block))
549 // This is a new through block. Add it to SpillPlacer later.
550 ThroughBlocks.push_back(Block);
556 // Any new blocks to add?
557 if (!ThroughBlocks.empty()) {
558 addThroughConstraints(Intf, ThroughBlocks);
559 ThroughBlocks.clear();
561 // Perhaps iterating can enable more bundles?
562 SpillPlacer->iterate();
565 // Rememeber the relevant set of through blocks for splitAroundRegion().
566 ActiveThroughBlocks |= Added;
567 DEBUG(dbgs() << ", v=" << Visited);
570 /// calcGlobalSplitCost - Return the global split cost of following the split
571 /// pattern in LiveBundles. This cost should be added to the local cost of the
572 /// interference pattern in SplitConstraints.
574 float RAGreedy::calcGlobalSplitCost(unsigned PhysReg,
575 const BitVector &LiveBundles) {
576 float GlobalCost = 0;
577 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
578 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
579 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
580 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
581 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)];
582 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)];
586 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
588 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
590 GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
593 InterferenceCache::Cursor Intf(IntfCache, PhysReg);
594 for (SparseBitVector<>::iterator I = ActiveThroughBlocks.begin(),
595 E = ActiveThroughBlocks.end(); I != E; ++I) {
596 unsigned Number = *I;
597 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
598 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
599 if (!RegIn && !RegOut)
601 if (RegIn && RegOut) {
602 // We need double spill code if this block has interference.
603 Intf.moveToBlock(Number);
604 if (Intf.hasInterference())
605 GlobalCost += 2*SpillPlacer->getBlockFrequency(Number);
608 // live-in / stack-out or stack-in live-out.
609 GlobalCost += SpillPlacer->getBlockFrequency(Number);
614 /// splitAroundRegion - Split VirtReg around the region determined by
615 /// LiveBundles. Make an effort to avoid interference from PhysReg.
617 /// The 'register' interval is going to contain as many uses as possible while
618 /// avoiding interference. The 'stack' interval is the complement constructed by
619 /// SplitEditor. It will contain the rest.
621 void RAGreedy::splitAroundRegion(LiveInterval &VirtReg, unsigned PhysReg,
622 const BitVector &LiveBundles,
623 SmallVectorImpl<LiveInterval*> &NewVRegs) {
625 dbgs() << "Splitting around region for " << PrintReg(PhysReg, TRI)
627 for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
628 dbgs() << " EB#" << i;
632 InterferenceCache::Cursor Intf(IntfCache, PhysReg);
633 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
636 // Create the main cross-block interval.
639 // First add all defs that are live out of a block.
640 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
641 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
642 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
643 bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
644 bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
646 // Should the register be live out?
647 if (!BI.LiveOut || !RegOut)
650 SlotIndex Start, Stop;
651 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
652 Intf.moveToBlock(BI.MBB->getNumber());
653 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " -> EB#"
654 << Bundles->getBundle(BI.MBB->getNumber(), 1)
655 << " [" << Start << ';'
656 << SA->getLastSplitPoint(BI.MBB->getNumber()) << '-' << Stop
657 << ") intf [" << Intf.first() << ';' << Intf.last() << ')');
659 // The interference interval should either be invalid or overlap MBB.
660 assert((!Intf.hasInterference() || Intf.first() < Stop)
661 && "Bad interference");
662 assert((!Intf.hasInterference() || Intf.last() > Start)
663 && "Bad interference");
665 // Check interference leaving the block.
666 if (!Intf.hasInterference()) {
667 // Block is interference-free.
668 DEBUG(dbgs() << ", no interference");
669 if (!BI.LiveThrough) {
670 DEBUG(dbgs() << ", not live-through.\n");
671 SE->useIntv(SE->enterIntvBefore(BI.Def), Stop);
675 // Block is live-through, but entry bundle is on the stack.
676 // Reload just before the first use.
677 DEBUG(dbgs() << ", not live-in, enter before first use.\n");
678 SE->useIntv(SE->enterIntvBefore(BI.FirstUse), Stop);
681 DEBUG(dbgs() << ", live-through.\n");
685 // Block has interference.
686 DEBUG(dbgs() << ", interference to " << Intf.last());
688 if (!BI.LiveThrough && Intf.last() <= BI.Def) {
689 // The interference doesn't reach the outgoing segment.
690 DEBUG(dbgs() << " doesn't affect def from " << BI.Def << '\n');
691 SE->useIntv(BI.Def, Stop);
695 SlotIndex LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber());
696 if (Intf.last().getBoundaryIndex() < BI.LastUse) {
697 // There are interference-free uses at the end of the block.
698 // Find the first use that can get the live-out register.
699 SmallVectorImpl<SlotIndex>::const_iterator UI =
700 std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
701 Intf.last().getBoundaryIndex());
702 assert(UI != SA->UseSlots.end() && "Couldn't find last use");
704 assert(Use <= BI.LastUse && "Couldn't find last use");
705 // Only attempt a split befroe the last split point.
706 if (Use.getBaseIndex() <= LastSplitPoint) {
707 DEBUG(dbgs() << ", free use at " << Use << ".\n");
708 SlotIndex SegStart = SE->enterIntvBefore(Use);
709 assert(SegStart >= Intf.last() && "Couldn't avoid interference");
710 assert(SegStart < LastSplitPoint && "Impossible split point");
711 SE->useIntv(SegStart, Stop);
716 // Interference is after the last use.
717 DEBUG(dbgs() << " after last use.\n");
718 SlotIndex SegStart = SE->enterIntvAtEnd(*BI.MBB);
719 assert(SegStart >= Intf.last() && "Couldn't avoid interference");
722 // Now all defs leading to live bundles are handled, do everything else.
723 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
724 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
725 bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
726 bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
728 // Is the register live-in?
729 if (!BI.LiveIn || !RegIn)
732 // We have an incoming register. Check for interference.
733 SlotIndex Start, Stop;
734 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
735 Intf.moveToBlock(BI.MBB->getNumber());
736 DEBUG(dbgs() << "EB#" << Bundles->getBundle(BI.MBB->getNumber(), 0)
737 << " -> BB#" << BI.MBB->getNumber() << " [" << Start << ';'
738 << SA->getLastSplitPoint(BI.MBB->getNumber()) << '-' << Stop
741 // Check interference entering the block.
742 if (!Intf.hasInterference()) {
743 // Block is interference-free.
744 DEBUG(dbgs() << ", no interference");
745 if (!BI.LiveThrough) {
746 DEBUG(dbgs() << ", killed in block.\n");
747 SE->useIntv(Start, SE->leaveIntvAfter(BI.Kill));
751 SlotIndex LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber());
752 // Block is live-through, but exit bundle is on the stack.
753 // Spill immediately after the last use.
754 if (BI.LastUse < LastSplitPoint) {
755 DEBUG(dbgs() << ", uses, stack-out.\n");
756 SE->useIntv(Start, SE->leaveIntvAfter(BI.LastUse));
759 // The last use is after the last split point, it is probably an
761 DEBUG(dbgs() << ", uses at " << BI.LastUse << " after split point "
762 << LastSplitPoint << ", stack-out.\n");
763 SlotIndex SegEnd = SE->leaveIntvBefore(LastSplitPoint);
764 SE->useIntv(Start, SegEnd);
765 // Run a double interval from the split to the last use.
766 // This makes it possible to spill the complement without affecting the
768 SE->overlapIntv(SegEnd, BI.LastUse);
771 // Register is live-through.
772 DEBUG(dbgs() << ", uses, live-through.\n");
773 SE->useIntv(Start, Stop);
777 // Block has interference.
778 DEBUG(dbgs() << ", interference from " << Intf.first());
780 if (!BI.LiveThrough && Intf.first() >= BI.Kill) {
781 // The interference doesn't reach the outgoing segment.
782 DEBUG(dbgs() << " doesn't affect kill at " << BI.Kill << '\n');
783 SE->useIntv(Start, BI.Kill);
787 if (Intf.first().getBaseIndex() > BI.FirstUse) {
788 // There are interference-free uses at the beginning of the block.
789 // Find the last use that can get the register.
790 SmallVectorImpl<SlotIndex>::const_iterator UI =
791 std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
792 Intf.first().getBaseIndex());
793 assert(UI != SA->UseSlots.begin() && "Couldn't find first use");
794 SlotIndex Use = (--UI)->getBoundaryIndex();
795 DEBUG(dbgs() << ", free use at " << *UI << ".\n");
796 SlotIndex SegEnd = SE->leaveIntvAfter(Use);
797 assert(SegEnd <= Intf.first() && "Couldn't avoid interference");
798 SE->useIntv(Start, SegEnd);
802 // Interference is before the first use.
803 DEBUG(dbgs() << " before first use.\n");
804 SlotIndex SegEnd = SE->leaveIntvAtTop(*BI.MBB);
805 assert(SegEnd <= Intf.first() && "Couldn't avoid interference");
808 // Handle live-through blocks.
809 for (SparseBitVector<>::iterator I = ActiveThroughBlocks.begin(),
810 E = ActiveThroughBlocks.end(); I != E; ++I) {
811 unsigned Number = *I;
812 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
813 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
814 DEBUG(dbgs() << "Live through BB#" << Number << '\n');
815 if (RegIn && RegOut) {
816 Intf.moveToBlock(Number);
817 if (!Intf.hasInterference()) {
818 SE->useIntv(Indexes->getMBBStartIdx(Number),
819 Indexes->getMBBEndIdx(Number));
823 MachineBasicBlock *MBB = MF->getBlockNumbered(Number);
825 SE->leaveIntvAtTop(*MBB);
827 SE->enterIntvAtEnd(*MBB);
832 // FIXME: Should we be more aggressive about splitting the stack region into
833 // per-block segments? The current approach allows the stack region to
834 // separate into connected components. Some components may be allocatable.
839 MF->verify(this, "After splitting live range around region");
842 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
843 SmallVectorImpl<LiveInterval*> &NewVRegs) {
844 BitVector LiveBundles, BestBundles;
846 unsigned BestReg = 0;
847 ActiveThroughBlocks.clear();
850 for (unsigned Cand = 0; unsigned PhysReg = Order.next(); ++Cand) {
851 if (GlobalCand.size() <= Cand)
852 GlobalCand.resize(Cand+1);
853 GlobalCand[Cand].PhysReg = PhysReg;
855 SpillPlacer->prepare(LiveBundles);
857 InterferenceCache::Cursor Intf(IntfCache, PhysReg);
858 if (!addSplitConstraints(Intf, Cost)) {
859 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n");
862 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost);
863 if (BestReg && Cost >= BestCost) {
864 DEBUG(dbgs() << " worse than " << PrintReg(BestReg, TRI) << '\n');
869 SpillPlacer->finish();
871 // No live bundles, defer to splitSingleBlocks().
872 if (!LiveBundles.any()) {
873 DEBUG(dbgs() << " no bundles.\n");
877 Cost += calcGlobalSplitCost(PhysReg, LiveBundles);
879 dbgs() << ", total = " << Cost << " with bundles";
880 for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
881 dbgs() << " EB#" << i;
884 if (!BestReg || Cost < BestCost) {
886 BestCost = 0.98f * Cost; // Prevent rounding effects.
887 BestBundles.swap(LiveBundles);
894 splitAroundRegion(VirtReg, BestReg, BestBundles, NewVRegs);
895 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Region);
900 //===----------------------------------------------------------------------===//
902 //===----------------------------------------------------------------------===//
905 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted
906 /// in order to use PhysReg between two entries in SA->UseSlots.
908 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
910 void RAGreedy::calcGapWeights(unsigned PhysReg,
911 SmallVectorImpl<float> &GapWeight) {
912 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
913 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
914 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
915 const unsigned NumGaps = Uses.size()-1;
917 // Start and end points for the interference check.
918 SlotIndex StartIdx = BI.LiveIn ? BI.FirstUse.getBaseIndex() : BI.FirstUse;
919 SlotIndex StopIdx = BI.LiveOut ? BI.LastUse.getBoundaryIndex() : BI.LastUse;
921 GapWeight.assign(NumGaps, 0.0f);
923 // Add interference from each overlapping register.
924 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
925 if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI)
926 .checkInterference())
929 // We know that VirtReg is a continuous interval from FirstUse to LastUse,
930 // so we don't need InterferenceQuery.
932 // Interference that overlaps an instruction is counted in both gaps
933 // surrounding the instruction. The exception is interference before
934 // StartIdx and after StopIdx.
936 LiveIntervalUnion::SegmentIter IntI = PhysReg2LiveUnion[*AI].find(StartIdx);
937 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
938 // Skip the gaps before IntI.
939 while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
940 if (++Gap == NumGaps)
945 // Update the gaps covered by IntI.
946 const float weight = IntI.value()->weight;
947 for (; Gap != NumGaps; ++Gap) {
948 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
949 if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
958 /// getPrevMappedIndex - Return the slot index of the last non-copy instruction
959 /// before MI that has a slot index. If MI is the first mapped instruction in
960 /// its block, return the block start index instead.
962 SlotIndex RAGreedy::getPrevMappedIndex(const MachineInstr *MI) {
963 assert(MI && "Missing MachineInstr");
964 const MachineBasicBlock *MBB = MI->getParent();
965 MachineBasicBlock::const_iterator B = MBB->begin(), I = MI;
967 if (!(--I)->isDebugValue() && !I->isCopy())
968 return Indexes->getInstructionIndex(I);
969 return Indexes->getMBBStartIdx(MBB);
972 /// calcPrevSlots - Fill in the PrevSlot array with the index of the previous
973 /// real non-copy instruction for each instruction in SA->UseSlots.
975 void RAGreedy::calcPrevSlots() {
976 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
978 PrevSlot.reserve(Uses.size());
979 for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
980 const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]);
981 PrevSlot.push_back(getPrevMappedIndex(MI).getDefIndex());
985 /// nextSplitPoint - Find the next index into SA->UseSlots > i such that it may
986 /// be beneficial to split before UseSlots[i].
988 /// 0 is always a valid split point
989 unsigned RAGreedy::nextSplitPoint(unsigned i) {
990 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
991 const unsigned Size = Uses.size();
992 assert(i != Size && "No split points after the end");
993 // Allow split before i when Uses[i] is not adjacent to the previous use.
994 while (++i != Size && PrevSlot[i].getBaseIndex() <= Uses[i-1].getBaseIndex())
999 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
1002 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1003 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1004 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1005 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1007 // Note that it is possible to have an interval that is live-in or live-out
1008 // while only covering a single block - A phi-def can use undef values from
1009 // predecessors, and the block could be a single-block loop.
1010 // We don't bother doing anything clever about such a case, we simply assume
1011 // that the interval is continuous from FirstUse to LastUse. We should make
1012 // sure that we don't do anything illegal to such an interval, though.
1014 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1015 if (Uses.size() <= 2)
1017 const unsigned NumGaps = Uses.size()-1;
1020 dbgs() << "tryLocalSplit: ";
1021 for (unsigned i = 0, e = Uses.size(); i != e; ++i)
1022 dbgs() << ' ' << SA->UseSlots[i];
1026 // For every use, find the previous mapped non-copy instruction.
1027 // We use this to detect valid split points, and to estimate new interval
1031 unsigned BestBefore = NumGaps;
1032 unsigned BestAfter = 0;
1035 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber());
1036 SmallVector<float, 8> GapWeight;
1039 while (unsigned PhysReg = Order.next()) {
1040 // Keep track of the largest spill weight that would need to be evicted in
1041 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
1042 calcGapWeights(PhysReg, GapWeight);
1044 // Try to find the best sequence of gaps to close.
1045 // The new spill weight must be larger than any gap interference.
1047 // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
1048 unsigned SplitBefore = 0, SplitAfter = nextSplitPoint(1) - 1;
1050 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
1051 // It is the spill weight that needs to be evicted.
1052 float MaxGap = GapWeight[0];
1053 for (unsigned i = 1; i != SplitAfter; ++i)
1054 MaxGap = std::max(MaxGap, GapWeight[i]);
1057 // Live before/after split?
1058 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1059 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1061 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
1062 << Uses[SplitBefore] << '-' << Uses[SplitAfter]
1063 << " i=" << MaxGap);
1065 // Stop before the interval gets so big we wouldn't be making progress.
1066 if (!LiveBefore && !LiveAfter) {
1067 DEBUG(dbgs() << " all\n");
1070 // Should the interval be extended or shrunk?
1072 if (MaxGap < HUGE_VALF) {
1073 // Estimate the new spill weight.
1075 // Each instruction reads and writes the register, except the first
1076 // instr doesn't read when !FirstLive, and the last instr doesn't write
1079 // We will be inserting copies before and after, so the total number of
1080 // reads and writes is 2 * EstUses.
1082 const unsigned EstUses = 2*(SplitAfter - SplitBefore) +
1083 2*(LiveBefore + LiveAfter);
1085 // Try to guess the size of the new interval. This should be trivial,
1086 // but the slot index of an inserted copy can be a lot smaller than the
1087 // instruction it is inserted before if there are many dead indexes
1090 // We measure the distance from the instruction before SplitBefore to
1091 // get a conservative estimate.
1093 // The final distance can still be different if inserting copies
1094 // triggers a slot index renumbering.
1096 const float EstWeight = normalizeSpillWeight(blockFreq * EstUses,
1097 PrevSlot[SplitBefore].distance(Uses[SplitAfter]));
1098 // Would this split be possible to allocate?
1099 // Never allocate all gaps, we wouldn't be making progress.
1100 float Diff = EstWeight - MaxGap;
1101 DEBUG(dbgs() << " w=" << EstWeight << " d=" << Diff);
1104 if (Diff > BestDiff) {
1105 DEBUG(dbgs() << " (best)");
1107 BestBefore = SplitBefore;
1108 BestAfter = SplitAfter;
1115 SplitBefore = nextSplitPoint(SplitBefore);
1116 if (SplitBefore < SplitAfter) {
1117 DEBUG(dbgs() << " shrink\n");
1118 // Recompute the max when necessary.
1119 if (GapWeight[SplitBefore - 1] >= MaxGap) {
1120 MaxGap = GapWeight[SplitBefore];
1121 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
1122 MaxGap = std::max(MaxGap, GapWeight[i]);
1129 // Try to extend the interval.
1130 if (SplitAfter >= NumGaps) {
1131 DEBUG(dbgs() << " end\n");
1135 DEBUG(dbgs() << " extend\n");
1136 for (unsigned e = nextSplitPoint(SplitAfter + 1) - 1;
1137 SplitAfter != e; ++SplitAfter)
1138 MaxGap = std::max(MaxGap, GapWeight[SplitAfter]);
1143 // Didn't find any candidates?
1144 if (BestBefore == NumGaps)
1147 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
1148 << '-' << Uses[BestAfter] << ", " << BestDiff
1149 << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
1151 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
1155 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
1156 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]);
1157 SE->useIntv(SegStart, SegStop);
1160 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Local);
1166 //===----------------------------------------------------------------------===//
1167 // Live Range Splitting
1168 //===----------------------------------------------------------------------===//
1170 /// trySplit - Try to split VirtReg or one of its interferences, making it
1172 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
1173 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
1174 SmallVectorImpl<LiveInterval*>&NewVRegs) {
1175 // Local intervals are handled separately.
1176 if (LIS->intervalIsInOneMBB(VirtReg)) {
1177 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
1178 SA->analyze(&VirtReg);
1179 return tryLocalSplit(VirtReg, Order, NewVRegs);
1182 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
1184 // Don't iterate global splitting.
1185 // Move straight to spilling if this range was produced by a global split.
1186 LiveRangeStage Stage = getStage(VirtReg);
1187 if (Stage >= RS_Block)
1190 SA->analyze(&VirtReg);
1192 // First try to split around a region spanning multiple blocks.
1193 if (Stage < RS_Region) {
1194 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1195 if (PhysReg || !NewVRegs.empty())
1199 // Then isolate blocks with multiple uses.
1200 if (Stage < RS_Block) {
1201 SplitAnalysis::BlockPtrSet Blocks;
1202 if (SA->getMultiUseBlocks(Blocks)) {
1203 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
1205 SE->splitSingleBlocks(Blocks);
1206 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Block);
1208 MF->verify(this, "After splitting live range around basic blocks");
1212 // Don't assign any physregs.
1217 //===----------------------------------------------------------------------===//
1219 //===----------------------------------------------------------------------===//
1221 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
1222 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1223 // First try assigning a free register.
1224 AllocationOrder Order(VirtReg.reg, *VRM, ReservedRegs);
1225 while (unsigned PhysReg = Order.next()) {
1226 if (!checkPhysRegInterference(VirtReg, PhysReg))
1230 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs))
1233 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
1235 // The first time we see a live range, don't try to split or spill.
1236 // Wait until the second time, when all smaller ranges have been allocated.
1237 // This gives a better picture of the interference to split around.
1238 LiveRangeStage Stage = getStage(VirtReg);
1239 if (Stage == RS_First) {
1240 LRStage[VirtReg.reg] = RS_Second;
1241 DEBUG(dbgs() << "wait for second round\n");
1242 NewVRegs.push_back(&VirtReg);
1246 assert(Stage < RS_Spill && "Cannot allocate after spilling");
1248 // Try splitting VirtReg or interferences.
1249 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
1250 if (PhysReg || !NewVRegs.empty())
1253 // Finally spill VirtReg itself.
1254 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
1255 LiveRangeEdit LRE(VirtReg, NewVRegs, this);
1256 spiller().spill(LRE);
1257 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Spill);
1260 MF->verify(this, "After spilling");
1262 // The live virtual register requesting allocation was spilled, so tell
1263 // the caller not to allocate anything during this round.
1267 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
1268 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
1269 << "********** Function: "
1270 << ((Value*)mf.getFunction())->getName() << '\n');
1274 MF->verify(this, "Before greedy register allocator");
1276 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>());
1277 Indexes = &getAnalysis<SlotIndexes>();
1278 DomTree = &getAnalysis<MachineDominatorTree>();
1279 ReservedRegs = TRI->getReservedRegs(*MF);
1280 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
1281 Loops = &getAnalysis<MachineLoopInfo>();
1282 LoopRanges = &getAnalysis<MachineLoopRanges>();
1283 Bundles = &getAnalysis<EdgeBundles>();
1284 SpillPlacer = &getAnalysis<SpillPlacement>();
1286 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
1287 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree));
1289 LRStage.resize(MRI->getNumVirtRegs());
1290 IntfCache.init(MF, &PhysReg2LiveUnion[0], Indexes, TRI);
1294 LIS->addKillFlags();
1298 NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled);
1299 VRM->rewrite(Indexes);
1302 // Write out new DBG_VALUE instructions.
1303 getAnalysis<LiveDebugVariables>().emitDebugValues(VRM);
1305 // The pass output is in VirtRegMap. Release all the transient data.