1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the RAGreedy function pass for register allocation in
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "regalloc"
16 #include "AllocationOrder.h"
17 #include "InterferenceCache.h"
18 #include "LiveDebugVariables.h"
19 #include "LiveRangeEdit.h"
20 #include "RegAllocBase.h"
22 #include "SpillPlacement.h"
24 #include "VirtRegMap.h"
25 #include "llvm/ADT/SparseBitVector.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Function.h"
29 #include "llvm/PassAnalysisSupport.h"
30 #include "llvm/CodeGen/CalcSpillWeights.h"
31 #include "llvm/CodeGen/EdgeBundles.h"
32 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
33 #include "llvm/CodeGen/LiveStackAnalysis.h"
34 #include "llvm/CodeGen/MachineDominators.h"
35 #include "llvm/CodeGen/MachineFunctionPass.h"
36 #include "llvm/CodeGen/MachineLoopInfo.h"
37 #include "llvm/CodeGen/MachineLoopRanges.h"
38 #include "llvm/CodeGen/MachineRegisterInfo.h"
39 #include "llvm/CodeGen/Passes.h"
40 #include "llvm/CodeGen/RegAllocRegistry.h"
41 #include "llvm/CodeGen/RegisterCoalescer.h"
42 #include "llvm/Target/TargetOptions.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/raw_ostream.h"
46 #include "llvm/Support/Timer.h"
52 STATISTIC(NumGlobalSplits, "Number of split global live ranges");
53 STATISTIC(NumLocalSplits, "Number of split local live ranges");
54 STATISTIC(NumEvicted, "Number of interferences evicted");
56 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
57 createGreedyRegisterAllocator);
60 class RAGreedy : public MachineFunctionPass,
62 private LiveRangeEdit::Delegate {
66 BitVector ReservedRegs;
71 MachineDominatorTree *DomTree;
72 MachineLoopInfo *Loops;
73 MachineLoopRanges *LoopRanges;
75 SpillPlacement *SpillPlacer;
78 std::auto_ptr<Spiller> SpillerInstance;
79 std::priority_queue<std::pair<unsigned, unsigned> > Queue;
81 // Live ranges pass through a number of stages as we try to allocate them.
82 // Some of the stages may also create new live ranges:
84 // - Region splitting.
85 // - Per-block splitting.
89 // Ranges produced by one of the stages skip the previous stages when they are
90 // dequeued. This improves performance because we can skip interference checks
91 // that are unlikely to give any results. It also guarantees that the live
92 // range splitting algorithm terminates, something that is otherwise hard to
95 RS_New, ///< Never seen before.
96 RS_First, ///< First time in the queue.
97 RS_Second, ///< Second time in the queue.
98 RS_Region, ///< Produced by region splitting.
99 RS_Block, ///< Produced by per-block splitting.
100 RS_Local, ///< Produced by local splitting.
101 RS_Spill ///< Produced by spilling.
104 IndexedMap<unsigned char, VirtReg2IndexFunctor> LRStage;
106 LiveRangeStage getStage(const LiveInterval &VirtReg) const {
107 return LiveRangeStage(LRStage[VirtReg.reg]);
110 template<typename Iterator>
111 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
112 LRStage.resize(MRI->getNumVirtRegs());
113 for (;Begin != End; ++Begin) {
114 unsigned Reg = (*Begin)->reg;
115 if (LRStage[Reg] == RS_New)
116 LRStage[Reg] = NewStage;
121 std::auto_ptr<SplitAnalysis> SA;
122 std::auto_ptr<SplitEditor> SE;
124 /// Cached per-block interference maps
125 InterferenceCache IntfCache;
127 /// All basic blocks where the current register has uses.
128 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
130 /// Live-through blocks that have already been added to SpillPlacer.
131 SparseBitVector<> ActiveThroughBlocks;
133 /// Global live range splitting candidate info.
134 struct GlobalSplitCandidate {
136 BitVector LiveBundles;
139 /// Candidate info for for each PhysReg in AllocationOrder.
140 /// This vector never shrinks, but grows to the size of the largest register
142 SmallVector<GlobalSplitCandidate, 32> GlobalCand;
144 /// For every instruction in SA->UseSlots, store the previous non-copy
146 SmallVector<SlotIndex, 8> PrevSlot;
151 /// Return the pass name.
152 virtual const char* getPassName() const {
153 return "Greedy Register Allocator";
156 /// RAGreedy analysis usage.
157 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
158 virtual void releaseMemory();
159 virtual Spiller &spiller() { return *SpillerInstance; }
160 virtual void enqueue(LiveInterval *LI);
161 virtual LiveInterval *dequeue();
162 virtual unsigned selectOrSplit(LiveInterval&,
163 SmallVectorImpl<LiveInterval*>&);
165 /// Perform register allocation.
166 virtual bool runOnMachineFunction(MachineFunction &mf);
171 void LRE_WillEraseInstruction(MachineInstr*);
172 bool LRE_CanEraseVirtReg(unsigned);
173 void LRE_WillShrinkVirtReg(unsigned);
174 void LRE_DidCloneVirtReg(unsigned, unsigned);
176 bool addSplitConstraints(InterferenceCache::Cursor, float&);
177 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
178 void growRegion(InterferenceCache::Cursor);
179 float calcGlobalSplitCost(unsigned, const BitVector&);
180 void splitAroundRegion(LiveInterval&, unsigned, const BitVector&,
181 SmallVectorImpl<LiveInterval*>&);
182 void calcGapWeights(unsigned, SmallVectorImpl<float>&);
183 SlotIndex getPrevMappedIndex(const MachineInstr*);
184 void calcPrevSlots();
185 unsigned nextSplitPoint(unsigned);
186 bool canEvictInterference(LiveInterval&, unsigned, float&);
188 unsigned tryEvict(LiveInterval&, AllocationOrder&,
189 SmallVectorImpl<LiveInterval*>&);
190 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
191 SmallVectorImpl<LiveInterval*>&);
192 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
193 SmallVectorImpl<LiveInterval*>&);
194 unsigned trySplit(LiveInterval&, AllocationOrder&,
195 SmallVectorImpl<LiveInterval*>&);
197 } // end anonymous namespace
199 char RAGreedy::ID = 0;
201 FunctionPass* llvm::createGreedyRegisterAllocator() {
202 return new RAGreedy();
205 RAGreedy::RAGreedy(): MachineFunctionPass(ID), LRStage(RS_New) {
206 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
207 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
208 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
209 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
210 initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
211 initializeRegisterCoalescerAnalysisGroup(*PassRegistry::getPassRegistry());
212 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
213 initializeLiveStacksPass(*PassRegistry::getPassRegistry());
214 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
215 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
216 initializeMachineLoopRangesPass(*PassRegistry::getPassRegistry());
217 initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
218 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
219 initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
222 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
223 AU.setPreservesCFG();
224 AU.addRequired<AliasAnalysis>();
225 AU.addPreserved<AliasAnalysis>();
226 AU.addRequired<LiveIntervals>();
227 AU.addRequired<SlotIndexes>();
228 AU.addPreserved<SlotIndexes>();
229 AU.addRequired<LiveDebugVariables>();
230 AU.addPreserved<LiveDebugVariables>();
232 AU.addRequiredID(StrongPHIEliminationID);
233 AU.addRequiredTransitive<RegisterCoalescer>();
234 AU.addRequired<CalculateSpillWeights>();
235 AU.addRequired<LiveStacks>();
236 AU.addPreserved<LiveStacks>();
237 AU.addRequired<MachineDominatorTree>();
238 AU.addPreserved<MachineDominatorTree>();
239 AU.addRequired<MachineLoopInfo>();
240 AU.addPreserved<MachineLoopInfo>();
241 AU.addRequired<MachineLoopRanges>();
242 AU.addPreserved<MachineLoopRanges>();
243 AU.addRequired<VirtRegMap>();
244 AU.addPreserved<VirtRegMap>();
245 AU.addRequired<EdgeBundles>();
246 AU.addRequired<SpillPlacement>();
247 MachineFunctionPass::getAnalysisUsage(AU);
251 //===----------------------------------------------------------------------===//
252 // LiveRangeEdit delegate methods
253 //===----------------------------------------------------------------------===//
255 void RAGreedy::LRE_WillEraseInstruction(MachineInstr *MI) {
256 // LRE itself will remove from SlotIndexes and parent basic block.
257 VRM->RemoveMachineInstrFromMaps(MI);
260 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
261 if (unsigned PhysReg = VRM->getPhys(VirtReg)) {
262 unassign(LIS->getInterval(VirtReg), PhysReg);
265 // Unassigned virtreg is probably in the priority queue.
266 // RegAllocBase will erase it after dequeueing.
270 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) {
271 unsigned PhysReg = VRM->getPhys(VirtReg);
275 // Register is assigned, put it back on the queue for reassignment.
276 LiveInterval &LI = LIS->getInterval(VirtReg);
277 unassign(LI, PhysReg);
281 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
282 // LRE may clone a virtual register because dead code elimination causes it to
283 // be split into connected components. Ensure that the new register gets the
284 // same stage as the parent.
286 LRStage[New] = LRStage[Old];
289 void RAGreedy::releaseMemory() {
290 SpillerInstance.reset(0);
292 RegAllocBase::releaseMemory();
295 void RAGreedy::enqueue(LiveInterval *LI) {
296 // Prioritize live ranges by size, assigning larger ranges first.
297 // The queue holds (size, reg) pairs.
298 const unsigned Size = LI->getSize();
299 const unsigned Reg = LI->reg;
300 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
301 "Can only enqueue virtual registers");
305 if (LRStage[Reg] == RS_New)
306 LRStage[Reg] = RS_First;
308 if (LRStage[Reg] == RS_Second)
309 // Unsplit ranges that couldn't be allocated immediately are deferred until
310 // everything else has been allocated. Long ranges are allocated last so
311 // they are split against realistic interference.
312 Prio = (1u << 31) - Size;
314 // Everything else is allocated in long->short order. Long ranges that don't
315 // fit should be spilled ASAP so they don't create interference.
316 Prio = (1u << 31) + Size;
318 // Boost ranges that have a physical register hint.
319 if (TargetRegisterInfo::isPhysicalRegister(VRM->getRegAllocPref(Reg)))
323 Queue.push(std::make_pair(Prio, Reg));
326 LiveInterval *RAGreedy::dequeue() {
329 LiveInterval *LI = &LIS->getInterval(Queue.top().second);
334 //===----------------------------------------------------------------------===//
335 // Interference eviction
336 //===----------------------------------------------------------------------===//
338 /// canEvict - Return true if all interferences between VirtReg and PhysReg can
340 /// Return false if any interference is heavier than MaxWeight.
341 /// On return, set MaxWeight to the maximal spill weight of an interference.
342 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
345 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
346 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
347 // If there is 10 or more interferences, chances are one is heavier.
348 if (Q.collectInterferingVRegs(10, MaxWeight) >= 10)
351 // Check if any interfering live range is heavier than MaxWeight.
352 for (unsigned i = Q.interferingVRegs().size(); i; --i) {
353 LiveInterval *Intf = Q.interferingVRegs()[i - 1];
354 if (TargetRegisterInfo::isPhysicalRegister(Intf->reg))
356 if (Intf->weight >= MaxWeight)
358 Weight = std::max(Weight, Intf->weight);
365 /// tryEvict - Try to evict all interferences for a physreg.
366 /// @param VirtReg Currently unassigned virtual register.
367 /// @param Order Physregs to try.
368 /// @return Physreg to assign VirtReg, or 0.
369 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
370 AllocationOrder &Order,
371 SmallVectorImpl<LiveInterval*> &NewVRegs){
372 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
374 // Keep track of the lightest single interference seen so far.
375 float BestWeight = VirtReg.weight;
376 unsigned BestPhys = 0;
379 while (unsigned PhysReg = Order.next()) {
380 float Weight = BestWeight;
381 if (!canEvictInterference(VirtReg, PhysReg, Weight))
384 // This is an eviction candidate.
385 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " interference = "
387 if (BestPhys && Weight >= BestWeight)
393 // Stop if the hint can be used.
394 if (Order.isHint(PhysReg))
401 DEBUG(dbgs() << "evicting " << PrintReg(BestPhys, TRI) << " interference\n");
402 for (const unsigned *AliasI = TRI->getOverlaps(BestPhys); *AliasI; ++AliasI) {
403 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
404 assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
405 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
406 LiveInterval *Intf = Q.interferingVRegs()[i];
407 unassign(*Intf, VRM->getPhys(Intf->reg));
409 NewVRegs.push_back(Intf);
416 //===----------------------------------------------------------------------===//
418 //===----------------------------------------------------------------------===//
420 /// addSplitConstraints - Fill out the SplitConstraints vector based on the
421 /// interference pattern in Physreg and its aliases. Add the constraints to
422 /// SpillPlacement and return the static cost of this split in Cost, assuming
423 /// that all preferences in SplitConstraints are met.
424 /// Return false if there are no bundles with positive bias.
425 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
427 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
429 // Reset interference dependent info.
430 SplitConstraints.resize(UseBlocks.size());
431 float StaticCost = 0;
432 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
433 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
434 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
436 BC.Number = BI.MBB->getNumber();
437 Intf.moveToBlock(BC.Number);
438 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
439 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
441 if (!Intf.hasInterference())
444 // Number of spill code instructions to insert.
447 // Interference for the live-in value.
449 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number))
450 BC.Entry = SpillPlacement::MustSpill, ++Ins;
451 else if (Intf.first() < BI.FirstUse)
452 BC.Entry = SpillPlacement::PrefSpill, ++Ins;
453 else if (Intf.first() < (BI.LiveThrough ? BI.LastUse : BI.Kill))
457 // Interference for the live-out value.
459 if (Intf.last() >= SA->getLastSplitPoint(BC.Number))
460 BC.Exit = SpillPlacement::MustSpill, ++Ins;
461 else if (Intf.last() > BI.LastUse)
462 BC.Exit = SpillPlacement::PrefSpill, ++Ins;
463 else if (Intf.last() > (BI.LiveThrough ? BI.FirstUse : BI.Def))
467 // Accumulate the total frequency of inserted spill code.
469 StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
473 // Add constraints for use-blocks. Note that these are the only constraints
474 // that may add a positive bias, it is downhill from here.
475 SpillPlacer->addConstraints(SplitConstraints);
476 return SpillPlacer->scanActiveBundles();
480 /// addThroughConstraints - Add constraints and links to SpillPlacer from the
481 /// live-through blocks in Blocks.
482 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
483 ArrayRef<unsigned> Blocks) {
484 const unsigned GroupSize = 8;
485 SpillPlacement::BlockConstraint BCS[GroupSize];
486 unsigned TBS[GroupSize];
487 unsigned B = 0, T = 0;
489 for (unsigned i = 0; i != Blocks.size(); ++i) {
490 unsigned Number = Blocks[i];
491 Intf.moveToBlock(Number);
493 if (!Intf.hasInterference()) {
494 assert(T < GroupSize && "Array overflow");
496 if (++T == GroupSize) {
497 SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T));
503 assert(B < GroupSize && "Array overflow");
504 BCS[B].Number = Number;
506 // Interference for the live-in value.
507 if (Intf.first() <= Indexes->getMBBStartIdx(Number))
508 BCS[B].Entry = SpillPlacement::MustSpill;
510 BCS[B].Entry = SpillPlacement::PrefSpill;
512 // Interference for the live-out value.
513 if (Intf.last() >= SA->getLastSplitPoint(Number))
514 BCS[B].Exit = SpillPlacement::MustSpill;
516 BCS[B].Exit = SpillPlacement::PrefSpill;
518 if (++B == GroupSize) {
519 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
520 SpillPlacer->addConstraints(Array);
525 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
526 SpillPlacer->addConstraints(Array);
527 SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T));
530 void RAGreedy::growRegion(InterferenceCache::Cursor Intf) {
531 // Keep track of through blocks that have already been added to SpillPlacer.
532 SparseBitVector<> Added;
533 SmallVector<unsigned, 16> ThroughBlocks;
535 unsigned Visited = 0;
538 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
539 if (NewBundles.empty())
541 // Find new through blocks in the periphery of PrefRegBundles.
542 for (int i = 0, e = NewBundles.size(); i != e; ++i) {
543 unsigned Bundle = NewBundles[i];
544 // Look at all blocks connected to Bundle in the full graph.
545 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
546 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
549 if (!SA->isThroughBlock(Block) || !Added.test_and_set(Block))
551 // This is a new through block. Add it to SpillPlacer later.
552 ThroughBlocks.push_back(Block);
558 // Any new blocks to add?
559 if (!ThroughBlocks.empty()) {
560 addThroughConstraints(Intf, ThroughBlocks);
561 ThroughBlocks.clear();
563 // Perhaps iterating can enable more bundles?
564 SpillPlacer->iterate();
567 // Rememeber the relevant set of through blocks for splitAroundRegion().
568 ActiveThroughBlocks |= Added;
569 DEBUG(dbgs() << ", v=" << Visited);
572 /// calcGlobalSplitCost - Return the global split cost of following the split
573 /// pattern in LiveBundles. This cost should be added to the local cost of the
574 /// interference pattern in SplitConstraints.
576 float RAGreedy::calcGlobalSplitCost(unsigned PhysReg,
577 const BitVector &LiveBundles) {
578 float GlobalCost = 0;
579 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
580 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
581 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
582 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
583 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)];
584 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)];
588 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
590 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
592 GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
595 InterferenceCache::Cursor Intf(IntfCache, PhysReg);
596 for (SparseBitVector<>::iterator I = ActiveThroughBlocks.begin(),
597 E = ActiveThroughBlocks.end(); I != E; ++I) {
598 unsigned Number = *I;
599 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
600 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
601 if (!RegIn && !RegOut)
603 if (RegIn && RegOut) {
604 // We need double spill code if this block has interference.
605 Intf.moveToBlock(Number);
606 if (Intf.hasInterference())
607 GlobalCost += 2*SpillPlacer->getBlockFrequency(Number);
610 // live-in / stack-out or stack-in live-out.
611 GlobalCost += SpillPlacer->getBlockFrequency(Number);
616 /// splitAroundRegion - Split VirtReg around the region determined by
617 /// LiveBundles. Make an effort to avoid interference from PhysReg.
619 /// The 'register' interval is going to contain as many uses as possible while
620 /// avoiding interference. The 'stack' interval is the complement constructed by
621 /// SplitEditor. It will contain the rest.
623 void RAGreedy::splitAroundRegion(LiveInterval &VirtReg, unsigned PhysReg,
624 const BitVector &LiveBundles,
625 SmallVectorImpl<LiveInterval*> &NewVRegs) {
627 dbgs() << "Splitting around region for " << PrintReg(PhysReg, TRI)
629 for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
630 dbgs() << " EB#" << i;
634 InterferenceCache::Cursor Intf(IntfCache, PhysReg);
635 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
638 // Create the main cross-block interval.
641 // First add all defs that are live out of a block.
642 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
643 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
644 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
645 bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
646 bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
648 // Should the register be live out?
649 if (!BI.LiveOut || !RegOut)
652 SlotIndex Start, Stop;
653 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
654 Intf.moveToBlock(BI.MBB->getNumber());
655 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " -> EB#"
656 << Bundles->getBundle(BI.MBB->getNumber(), 1)
657 << " [" << Start << ';'
658 << SA->getLastSplitPoint(BI.MBB->getNumber()) << '-' << Stop
659 << ") intf [" << Intf.first() << ';' << Intf.last() << ')');
661 // The interference interval should either be invalid or overlap MBB.
662 assert((!Intf.hasInterference() || Intf.first() < Stop)
663 && "Bad interference");
664 assert((!Intf.hasInterference() || Intf.last() > Start)
665 && "Bad interference");
667 // Check interference leaving the block.
668 if (!Intf.hasInterference()) {
669 // Block is interference-free.
670 DEBUG(dbgs() << ", no interference");
671 if (!BI.LiveThrough) {
672 DEBUG(dbgs() << ", not live-through.\n");
673 SE->useIntv(SE->enterIntvBefore(BI.Def), Stop);
677 // Block is live-through, but entry bundle is on the stack.
678 // Reload just before the first use.
679 DEBUG(dbgs() << ", not live-in, enter before first use.\n");
680 SE->useIntv(SE->enterIntvBefore(BI.FirstUse), Stop);
683 DEBUG(dbgs() << ", live-through.\n");
687 // Block has interference.
688 DEBUG(dbgs() << ", interference to " << Intf.last());
690 if (!BI.LiveThrough && Intf.last() <= BI.Def) {
691 // The interference doesn't reach the outgoing segment.
692 DEBUG(dbgs() << " doesn't affect def from " << BI.Def << '\n');
693 SE->useIntv(BI.Def, Stop);
697 SlotIndex LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber());
698 if (Intf.last().getBoundaryIndex() < BI.LastUse) {
699 // There are interference-free uses at the end of the block.
700 // Find the first use that can get the live-out register.
701 SmallVectorImpl<SlotIndex>::const_iterator UI =
702 std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
703 Intf.last().getBoundaryIndex());
704 assert(UI != SA->UseSlots.end() && "Couldn't find last use");
706 assert(Use <= BI.LastUse && "Couldn't find last use");
707 // Only attempt a split befroe the last split point.
708 if (Use.getBaseIndex() <= LastSplitPoint) {
709 DEBUG(dbgs() << ", free use at " << Use << ".\n");
710 SlotIndex SegStart = SE->enterIntvBefore(Use);
711 assert(SegStart >= Intf.last() && "Couldn't avoid interference");
712 assert(SegStart < LastSplitPoint && "Impossible split point");
713 SE->useIntv(SegStart, Stop);
718 // Interference is after the last use.
719 DEBUG(dbgs() << " after last use.\n");
720 SlotIndex SegStart = SE->enterIntvAtEnd(*BI.MBB);
721 assert(SegStart >= Intf.last() && "Couldn't avoid interference");
724 // Now all defs leading to live bundles are handled, do everything else.
725 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
726 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
727 bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
728 bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
730 // Is the register live-in?
731 if (!BI.LiveIn || !RegIn)
734 // We have an incoming register. Check for interference.
735 SlotIndex Start, Stop;
736 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
737 Intf.moveToBlock(BI.MBB->getNumber());
738 DEBUG(dbgs() << "EB#" << Bundles->getBundle(BI.MBB->getNumber(), 0)
739 << " -> BB#" << BI.MBB->getNumber() << " [" << Start << ';'
740 << SA->getLastSplitPoint(BI.MBB->getNumber()) << '-' << Stop
743 // Check interference entering the block.
744 if (!Intf.hasInterference()) {
745 // Block is interference-free.
746 DEBUG(dbgs() << ", no interference");
747 if (!BI.LiveThrough) {
748 DEBUG(dbgs() << ", killed in block.\n");
749 SE->useIntv(Start, SE->leaveIntvAfter(BI.Kill));
753 SlotIndex LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber());
754 // Block is live-through, but exit bundle is on the stack.
755 // Spill immediately after the last use.
756 if (BI.LastUse < LastSplitPoint) {
757 DEBUG(dbgs() << ", uses, stack-out.\n");
758 SE->useIntv(Start, SE->leaveIntvAfter(BI.LastUse));
761 // The last use is after the last split point, it is probably an
763 DEBUG(dbgs() << ", uses at " << BI.LastUse << " after split point "
764 << LastSplitPoint << ", stack-out.\n");
765 SlotIndex SegEnd = SE->leaveIntvBefore(LastSplitPoint);
766 SE->useIntv(Start, SegEnd);
767 // Run a double interval from the split to the last use.
768 // This makes it possible to spill the complement without affecting the
770 SE->overlapIntv(SegEnd, BI.LastUse);
773 // Register is live-through.
774 DEBUG(dbgs() << ", uses, live-through.\n");
775 SE->useIntv(Start, Stop);
779 // Block has interference.
780 DEBUG(dbgs() << ", interference from " << Intf.first());
782 if (!BI.LiveThrough && Intf.first() >= BI.Kill) {
783 // The interference doesn't reach the outgoing segment.
784 DEBUG(dbgs() << " doesn't affect kill at " << BI.Kill << '\n');
785 SE->useIntv(Start, BI.Kill);
789 if (Intf.first().getBaseIndex() > BI.FirstUse) {
790 // There are interference-free uses at the beginning of the block.
791 // Find the last use that can get the register.
792 SmallVectorImpl<SlotIndex>::const_iterator UI =
793 std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
794 Intf.first().getBaseIndex());
795 assert(UI != SA->UseSlots.begin() && "Couldn't find first use");
796 SlotIndex Use = (--UI)->getBoundaryIndex();
797 DEBUG(dbgs() << ", free use at " << *UI << ".\n");
798 SlotIndex SegEnd = SE->leaveIntvAfter(Use);
799 assert(SegEnd <= Intf.first() && "Couldn't avoid interference");
800 SE->useIntv(Start, SegEnd);
804 // Interference is before the first use.
805 DEBUG(dbgs() << " before first use.\n");
806 SlotIndex SegEnd = SE->leaveIntvAtTop(*BI.MBB);
807 assert(SegEnd <= Intf.first() && "Couldn't avoid interference");
810 // Handle live-through blocks.
811 for (SparseBitVector<>::iterator I = ActiveThroughBlocks.begin(),
812 E = ActiveThroughBlocks.end(); I != E; ++I) {
813 unsigned Number = *I;
814 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
815 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
816 DEBUG(dbgs() << "Live through BB#" << Number << '\n');
817 if (RegIn && RegOut) {
818 Intf.moveToBlock(Number);
819 if (!Intf.hasInterference()) {
820 SE->useIntv(Indexes->getMBBStartIdx(Number),
821 Indexes->getMBBEndIdx(Number));
825 MachineBasicBlock *MBB = MF->getBlockNumbered(Number);
827 SE->leaveIntvAtTop(*MBB);
829 SE->enterIntvAtEnd(*MBB);
834 // FIXME: Should we be more aggressive about splitting the stack region into
835 // per-block segments? The current approach allows the stack region to
836 // separate into connected components. Some components may be allocatable.
841 MF->verify(this, "After splitting live range around region");
844 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
845 SmallVectorImpl<LiveInterval*> &NewVRegs) {
846 BitVector LiveBundles, BestBundles;
848 unsigned BestReg = 0;
849 ActiveThroughBlocks.clear();
852 for (unsigned Cand = 0; unsigned PhysReg = Order.next(); ++Cand) {
853 if (GlobalCand.size() <= Cand)
854 GlobalCand.resize(Cand+1);
855 GlobalCand[Cand].PhysReg = PhysReg;
857 SpillPlacer->prepare(LiveBundles);
859 InterferenceCache::Cursor Intf(IntfCache, PhysReg);
860 if (!addSplitConstraints(Intf, Cost)) {
861 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n");
864 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost);
865 if (BestReg && Cost >= BestCost) {
866 DEBUG(dbgs() << " worse than " << PrintReg(BestReg, TRI) << '\n');
871 SpillPlacer->finish();
873 // No live bundles, defer to splitSingleBlocks().
874 if (!LiveBundles.any()) {
875 DEBUG(dbgs() << " no bundles.\n");
879 Cost += calcGlobalSplitCost(PhysReg, LiveBundles);
881 dbgs() << ", total = " << Cost << " with bundles";
882 for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
883 dbgs() << " EB#" << i;
886 if (!BestReg || Cost < BestCost) {
888 BestCost = 0.98f * Cost; // Prevent rounding effects.
889 BestBundles.swap(LiveBundles);
896 splitAroundRegion(VirtReg, BestReg, BestBundles, NewVRegs);
897 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Region);
902 //===----------------------------------------------------------------------===//
904 //===----------------------------------------------------------------------===//
907 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted
908 /// in order to use PhysReg between two entries in SA->UseSlots.
910 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
912 void RAGreedy::calcGapWeights(unsigned PhysReg,
913 SmallVectorImpl<float> &GapWeight) {
914 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
915 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
916 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
917 const unsigned NumGaps = Uses.size()-1;
919 // Start and end points for the interference check.
920 SlotIndex StartIdx = BI.LiveIn ? BI.FirstUse.getBaseIndex() : BI.FirstUse;
921 SlotIndex StopIdx = BI.LiveOut ? BI.LastUse.getBoundaryIndex() : BI.LastUse;
923 GapWeight.assign(NumGaps, 0.0f);
925 // Add interference from each overlapping register.
926 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
927 if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI)
928 .checkInterference())
931 // We know that VirtReg is a continuous interval from FirstUse to LastUse,
932 // so we don't need InterferenceQuery.
934 // Interference that overlaps an instruction is counted in both gaps
935 // surrounding the instruction. The exception is interference before
936 // StartIdx and after StopIdx.
938 LiveIntervalUnion::SegmentIter IntI = PhysReg2LiveUnion[*AI].find(StartIdx);
939 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
940 // Skip the gaps before IntI.
941 while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
942 if (++Gap == NumGaps)
947 // Update the gaps covered by IntI.
948 const float weight = IntI.value()->weight;
949 for (; Gap != NumGaps; ++Gap) {
950 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
951 if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
960 /// getPrevMappedIndex - Return the slot index of the last non-copy instruction
961 /// before MI that has a slot index. If MI is the first mapped instruction in
962 /// its block, return the block start index instead.
964 SlotIndex RAGreedy::getPrevMappedIndex(const MachineInstr *MI) {
965 assert(MI && "Missing MachineInstr");
966 const MachineBasicBlock *MBB = MI->getParent();
967 MachineBasicBlock::const_iterator B = MBB->begin(), I = MI;
969 if (!(--I)->isDebugValue() && !I->isCopy())
970 return Indexes->getInstructionIndex(I);
971 return Indexes->getMBBStartIdx(MBB);
974 /// calcPrevSlots - Fill in the PrevSlot array with the index of the previous
975 /// real non-copy instruction for each instruction in SA->UseSlots.
977 void RAGreedy::calcPrevSlots() {
978 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
980 PrevSlot.reserve(Uses.size());
981 for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
982 const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]);
983 PrevSlot.push_back(getPrevMappedIndex(MI).getDefIndex());
987 /// nextSplitPoint - Find the next index into SA->UseSlots > i such that it may
988 /// be beneficial to split before UseSlots[i].
990 /// 0 is always a valid split point
991 unsigned RAGreedy::nextSplitPoint(unsigned i) {
992 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
993 const unsigned Size = Uses.size();
994 assert(i != Size && "No split points after the end");
995 // Allow split before i when Uses[i] is not adjacent to the previous use.
996 while (++i != Size && PrevSlot[i].getBaseIndex() <= Uses[i-1].getBaseIndex())
1001 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
1004 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1005 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1006 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1007 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1009 // Note that it is possible to have an interval that is live-in or live-out
1010 // while only covering a single block - A phi-def can use undef values from
1011 // predecessors, and the block could be a single-block loop.
1012 // We don't bother doing anything clever about such a case, we simply assume
1013 // that the interval is continuous from FirstUse to LastUse. We should make
1014 // sure that we don't do anything illegal to such an interval, though.
1016 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1017 if (Uses.size() <= 2)
1019 const unsigned NumGaps = Uses.size()-1;
1022 dbgs() << "tryLocalSplit: ";
1023 for (unsigned i = 0, e = Uses.size(); i != e; ++i)
1024 dbgs() << ' ' << SA->UseSlots[i];
1028 // For every use, find the previous mapped non-copy instruction.
1029 // We use this to detect valid split points, and to estimate new interval
1033 unsigned BestBefore = NumGaps;
1034 unsigned BestAfter = 0;
1037 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber());
1038 SmallVector<float, 8> GapWeight;
1041 while (unsigned PhysReg = Order.next()) {
1042 // Keep track of the largest spill weight that would need to be evicted in
1043 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
1044 calcGapWeights(PhysReg, GapWeight);
1046 // Try to find the best sequence of gaps to close.
1047 // The new spill weight must be larger than any gap interference.
1049 // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
1050 unsigned SplitBefore = 0, SplitAfter = nextSplitPoint(1) - 1;
1052 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
1053 // It is the spill weight that needs to be evicted.
1054 float MaxGap = GapWeight[0];
1055 for (unsigned i = 1; i != SplitAfter; ++i)
1056 MaxGap = std::max(MaxGap, GapWeight[i]);
1059 // Live before/after split?
1060 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1061 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1063 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
1064 << Uses[SplitBefore] << '-' << Uses[SplitAfter]
1065 << " i=" << MaxGap);
1067 // Stop before the interval gets so big we wouldn't be making progress.
1068 if (!LiveBefore && !LiveAfter) {
1069 DEBUG(dbgs() << " all\n");
1072 // Should the interval be extended or shrunk?
1074 if (MaxGap < HUGE_VALF) {
1075 // Estimate the new spill weight.
1077 // Each instruction reads and writes the register, except the first
1078 // instr doesn't read when !FirstLive, and the last instr doesn't write
1081 // We will be inserting copies before and after, so the total number of
1082 // reads and writes is 2 * EstUses.
1084 const unsigned EstUses = 2*(SplitAfter - SplitBefore) +
1085 2*(LiveBefore + LiveAfter);
1087 // Try to guess the size of the new interval. This should be trivial,
1088 // but the slot index of an inserted copy can be a lot smaller than the
1089 // instruction it is inserted before if there are many dead indexes
1092 // We measure the distance from the instruction before SplitBefore to
1093 // get a conservative estimate.
1095 // The final distance can still be different if inserting copies
1096 // triggers a slot index renumbering.
1098 const float EstWeight = normalizeSpillWeight(blockFreq * EstUses,
1099 PrevSlot[SplitBefore].distance(Uses[SplitAfter]));
1100 // Would this split be possible to allocate?
1101 // Never allocate all gaps, we wouldn't be making progress.
1102 float Diff = EstWeight - MaxGap;
1103 DEBUG(dbgs() << " w=" << EstWeight << " d=" << Diff);
1106 if (Diff > BestDiff) {
1107 DEBUG(dbgs() << " (best)");
1109 BestBefore = SplitBefore;
1110 BestAfter = SplitAfter;
1117 SplitBefore = nextSplitPoint(SplitBefore);
1118 if (SplitBefore < SplitAfter) {
1119 DEBUG(dbgs() << " shrink\n");
1120 // Recompute the max when necessary.
1121 if (GapWeight[SplitBefore - 1] >= MaxGap) {
1122 MaxGap = GapWeight[SplitBefore];
1123 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
1124 MaxGap = std::max(MaxGap, GapWeight[i]);
1131 // Try to extend the interval.
1132 if (SplitAfter >= NumGaps) {
1133 DEBUG(dbgs() << " end\n");
1137 DEBUG(dbgs() << " extend\n");
1138 for (unsigned e = nextSplitPoint(SplitAfter + 1) - 1;
1139 SplitAfter != e; ++SplitAfter)
1140 MaxGap = std::max(MaxGap, GapWeight[SplitAfter]);
1145 // Didn't find any candidates?
1146 if (BestBefore == NumGaps)
1149 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
1150 << '-' << Uses[BestAfter] << ", " << BestDiff
1151 << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
1153 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
1157 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
1158 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]);
1159 SE->useIntv(SegStart, SegStop);
1162 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Local);
1168 //===----------------------------------------------------------------------===//
1169 // Live Range Splitting
1170 //===----------------------------------------------------------------------===//
1172 /// trySplit - Try to split VirtReg or one of its interferences, making it
1174 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
1175 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
1176 SmallVectorImpl<LiveInterval*>&NewVRegs) {
1177 // Local intervals are handled separately.
1178 if (LIS->intervalIsInOneMBB(VirtReg)) {
1179 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
1180 SA->analyze(&VirtReg);
1181 return tryLocalSplit(VirtReg, Order, NewVRegs);
1184 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
1186 // Don't iterate global splitting.
1187 // Move straight to spilling if this range was produced by a global split.
1188 LiveRangeStage Stage = getStage(VirtReg);
1189 if (Stage >= RS_Block)
1192 SA->analyze(&VirtReg);
1194 // First try to split around a region spanning multiple blocks.
1195 if (Stage < RS_Region) {
1196 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1197 if (PhysReg || !NewVRegs.empty())
1201 // Then isolate blocks with multiple uses.
1202 if (Stage < RS_Block) {
1203 SplitAnalysis::BlockPtrSet Blocks;
1204 if (SA->getMultiUseBlocks(Blocks)) {
1205 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
1207 SE->splitSingleBlocks(Blocks);
1208 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Block);
1210 MF->verify(this, "After splitting live range around basic blocks");
1214 // Don't assign any physregs.
1219 //===----------------------------------------------------------------------===//
1221 //===----------------------------------------------------------------------===//
1223 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
1224 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1225 // First try assigning a free register.
1226 AllocationOrder Order(VirtReg.reg, *VRM, ReservedRegs);
1227 while (unsigned PhysReg = Order.next()) {
1228 if (!checkPhysRegInterference(VirtReg, PhysReg))
1232 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs))
1235 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
1237 // The first time we see a live range, don't try to split or spill.
1238 // Wait until the second time, when all smaller ranges have been allocated.
1239 // This gives a better picture of the interference to split around.
1240 LiveRangeStage Stage = getStage(VirtReg);
1241 if (Stage == RS_First) {
1242 LRStage[VirtReg.reg] = RS_Second;
1243 DEBUG(dbgs() << "wait for second round\n");
1244 NewVRegs.push_back(&VirtReg);
1248 assert(Stage < RS_Spill && "Cannot allocate after spilling");
1250 // Try splitting VirtReg or interferences.
1251 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
1252 if (PhysReg || !NewVRegs.empty())
1255 // Finally spill VirtReg itself.
1256 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
1257 LiveRangeEdit LRE(VirtReg, NewVRegs, this);
1258 spiller().spill(LRE);
1259 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Spill);
1262 MF->verify(this, "After spilling");
1264 // The live virtual register requesting allocation was spilled, so tell
1265 // the caller not to allocate anything during this round.
1269 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
1270 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
1271 << "********** Function: "
1272 << ((Value*)mf.getFunction())->getName() << '\n');
1276 MF->verify(this, "Before greedy register allocator");
1278 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>());
1279 Indexes = &getAnalysis<SlotIndexes>();
1280 DomTree = &getAnalysis<MachineDominatorTree>();
1281 ReservedRegs = TRI->getReservedRegs(*MF);
1282 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
1283 Loops = &getAnalysis<MachineLoopInfo>();
1284 LoopRanges = &getAnalysis<MachineLoopRanges>();
1285 Bundles = &getAnalysis<EdgeBundles>();
1286 SpillPlacer = &getAnalysis<SpillPlacement>();
1288 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
1289 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree));
1291 LRStage.resize(MRI->getNumVirtRegs());
1292 IntfCache.init(MF, &PhysReg2LiveUnion[0], Indexes, TRI);
1296 LIS->addKillFlags();
1300 NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled);
1301 VRM->rewrite(Indexes);
1304 // Write out new DBG_VALUE instructions.
1305 getAnalysis<LiveDebugVariables>().emitDebugValues(VRM);
1307 // The pass output is in VirtRegMap. Release all the transient data.