1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the RAGreedy function pass for register allocation in
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "regalloc"
16 #include "AllocationOrder.h"
17 #include "LiveIntervalUnion.h"
18 #include "LiveRangeEdit.h"
19 #include "RegAllocBase.h"
21 #include "SpillPlacement.h"
23 #include "VirtRegMap.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Analysis/AliasAnalysis.h"
26 #include "llvm/Function.h"
27 #include "llvm/PassAnalysisSupport.h"
28 #include "llvm/CodeGen/CalcSpillWeights.h"
29 #include "llvm/CodeGen/EdgeBundles.h"
30 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
31 #include "llvm/CodeGen/LiveStackAnalysis.h"
32 #include "llvm/CodeGen/MachineDominators.h"
33 #include "llvm/CodeGen/MachineFunctionPass.h"
34 #include "llvm/CodeGen/MachineLoopInfo.h"
35 #include "llvm/CodeGen/MachineLoopRanges.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/CodeGen/Passes.h"
38 #include "llvm/CodeGen/RegAllocRegistry.h"
39 #include "llvm/CodeGen/RegisterCoalescer.h"
40 #include "llvm/Target/TargetOptions.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/raw_ostream.h"
44 #include "llvm/Support/Timer.h"
50 STATISTIC(NumGlobalSplits, "Number of split global live ranges");
51 STATISTIC(NumLocalSplits, "Number of split local live ranges");
52 STATISTIC(NumReassigned, "Number of interferences reassigned");
53 STATISTIC(NumEvicted, "Number of interferences evicted");
55 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
56 createGreedyRegisterAllocator);
59 class RAGreedy : public MachineFunctionPass, public RegAllocBase {
62 BitVector ReservedRegs;
67 MachineDominatorTree *DomTree;
68 MachineLoopInfo *Loops;
69 MachineLoopRanges *LoopRanges;
71 SpillPlacement *SpillPlacer;
74 std::auto_ptr<Spiller> SpillerInstance;
75 std::auto_ptr<SplitAnalysis> SA;
76 std::priority_queue<std::pair<unsigned, unsigned> > Queue;
77 IndexedMap<unsigned, VirtReg2IndexFunctor> Generation;
81 /// All basic blocks where the current register is live.
82 SmallVector<SpillPlacement::BlockConstraint, 8> SpillConstraints;
84 /// For every instruction in SA->UseSlots, store the previous non-copy
86 SmallVector<SlotIndex, 8> PrevSlot;
91 /// Return the pass name.
92 virtual const char* getPassName() const {
93 return "Greedy Register Allocator";
96 /// RAGreedy analysis usage.
97 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
98 virtual void releaseMemory();
99 virtual Spiller &spiller() { return *SpillerInstance; }
100 virtual void enqueue(LiveInterval *LI);
101 virtual LiveInterval *dequeue();
102 virtual unsigned selectOrSplit(LiveInterval&,
103 SmallVectorImpl<LiveInterval*>&);
105 /// Perform register allocation.
106 virtual bool runOnMachineFunction(MachineFunction &mf);
111 bool checkUncachedInterference(LiveInterval&, unsigned);
112 LiveInterval *getSingleInterference(LiveInterval&, unsigned);
113 bool reassignVReg(LiveInterval &InterferingVReg, unsigned OldPhysReg);
114 float calcInterferenceWeight(LiveInterval&, unsigned);
115 float calcInterferenceInfo(LiveInterval&, unsigned);
116 float calcGlobalSplitCost(const BitVector&);
117 void splitAroundRegion(LiveInterval&, unsigned, const BitVector&,
118 SmallVectorImpl<LiveInterval*>&);
119 void calcGapWeights(unsigned, SmallVectorImpl<float>&);
120 SlotIndex getPrevMappedIndex(const MachineInstr*);
121 void calcPrevSlots();
122 unsigned nextSplitPoint(unsigned);
123 bool canEvictInterference(LiveInterval&, unsigned, unsigned, float&);
125 unsigned tryReassign(LiveInterval&, AllocationOrder&,
126 SmallVectorImpl<LiveInterval*>&);
127 unsigned tryEvict(LiveInterval&, AllocationOrder&,
128 SmallVectorImpl<LiveInterval*>&);
129 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
130 SmallVectorImpl<LiveInterval*>&);
131 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
132 SmallVectorImpl<LiveInterval*>&);
133 unsigned trySplit(LiveInterval&, AllocationOrder&,
134 SmallVectorImpl<LiveInterval*>&);
135 unsigned trySpillInterferences(LiveInterval&, AllocationOrder&,
136 SmallVectorImpl<LiveInterval*>&);
138 } // end anonymous namespace
140 char RAGreedy::ID = 0;
142 FunctionPass* llvm::createGreedyRegisterAllocator() {
143 return new RAGreedy();
146 RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
147 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
148 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
149 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
150 initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
151 initializeRegisterCoalescerAnalysisGroup(*PassRegistry::getPassRegistry());
152 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
153 initializeLiveStacksPass(*PassRegistry::getPassRegistry());
154 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
155 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
156 initializeMachineLoopRangesPass(*PassRegistry::getPassRegistry());
157 initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
158 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
159 initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
162 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
163 AU.setPreservesCFG();
164 AU.addRequired<AliasAnalysis>();
165 AU.addPreserved<AliasAnalysis>();
166 AU.addRequired<LiveIntervals>();
167 AU.addRequired<SlotIndexes>();
168 AU.addPreserved<SlotIndexes>();
170 AU.addRequiredID(StrongPHIEliminationID);
171 AU.addRequiredTransitive<RegisterCoalescer>();
172 AU.addRequired<CalculateSpillWeights>();
173 AU.addRequired<LiveStacks>();
174 AU.addPreserved<LiveStacks>();
175 AU.addRequired<MachineDominatorTree>();
176 AU.addPreserved<MachineDominatorTree>();
177 AU.addRequired<MachineLoopInfo>();
178 AU.addPreserved<MachineLoopInfo>();
179 AU.addRequired<MachineLoopRanges>();
180 AU.addPreserved<MachineLoopRanges>();
181 AU.addRequired<VirtRegMap>();
182 AU.addPreserved<VirtRegMap>();
183 AU.addRequired<EdgeBundles>();
184 AU.addRequired<SpillPlacement>();
185 MachineFunctionPass::getAnalysisUsage(AU);
188 void RAGreedy::releaseMemory() {
189 SpillerInstance.reset(0);
191 RegAllocBase::releaseMemory();
194 void RAGreedy::enqueue(LiveInterval *LI) {
195 // Prioritize live ranges by size, assigning larger ranges first.
196 // The queue holds (size, reg) pairs.
197 const unsigned Size = LI->getSize();
198 const unsigned Reg = LI->reg;
199 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
200 "Can only enqueue virtual registers");
201 const unsigned Hint = VRM->getRegAllocPref(Reg);
204 Generation.grow(Reg);
205 if (++Generation[Reg] == 1)
206 // 1st generation ranges are handled first, long -> short.
207 Prio = (1u << 31) + Size;
209 // Repeat offenders are handled second, short -> long
210 Prio = (1u << 30) - Size;
212 // Boost ranges that have a physical register hint.
213 if (TargetRegisterInfo::isPhysicalRegister(Hint))
216 Queue.push(std::make_pair(Prio, Reg));
219 LiveInterval *RAGreedy::dequeue() {
222 LiveInterval *LI = &LIS->getInterval(Queue.top().second);
227 //===----------------------------------------------------------------------===//
228 // Register Reassignment
229 //===----------------------------------------------------------------------===//
231 // Check interference without using the cache.
232 bool RAGreedy::checkUncachedInterference(LiveInterval &VirtReg,
234 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
235 LiveIntervalUnion::Query subQ(&VirtReg, &PhysReg2LiveUnion[*AliasI]);
236 if (subQ.checkInterference())
242 /// getSingleInterference - Return the single interfering virtual register
243 /// assigned to PhysReg. Return 0 if more than one virtual register is
245 LiveInterval *RAGreedy::getSingleInterference(LiveInterval &VirtReg,
247 // Check physreg and aliases.
248 LiveInterval *Interference = 0;
249 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
250 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
251 if (Q.checkInterference()) {
254 if (Q.collectInterferingVRegs(2) > 1)
256 Interference = Q.interferingVRegs().front();
262 // Attempt to reassign this virtual register to a different physical register.
264 // FIXME: we are not yet caching these "second-level" interferences discovered
265 // in the sub-queries. These interferences can change with each call to
266 // selectOrSplit. However, we could implement a "may-interfere" cache that
267 // could be conservatively dirtied when we reassign or split.
269 // FIXME: This may result in a lot of alias queries. We could summarize alias
270 // live intervals in their parent register's live union, but it's messy.
271 bool RAGreedy::reassignVReg(LiveInterval &InterferingVReg,
272 unsigned WantedPhysReg) {
273 assert(TargetRegisterInfo::isVirtualRegister(InterferingVReg.reg) &&
274 "Can only reassign virtual registers");
275 assert(TRI->regsOverlap(WantedPhysReg, VRM->getPhys(InterferingVReg.reg)) &&
276 "inconsistent phys reg assigment");
278 AllocationOrder Order(InterferingVReg.reg, *VRM, ReservedRegs);
279 while (unsigned PhysReg = Order.next()) {
280 // Don't reassign to a WantedPhysReg alias.
281 if (TRI->regsOverlap(PhysReg, WantedPhysReg))
284 if (checkUncachedInterference(InterferingVReg, PhysReg))
287 // Reassign the interfering virtual reg to this physical reg.
288 unsigned OldAssign = VRM->getPhys(InterferingVReg.reg);
289 DEBUG(dbgs() << "reassigning: " << InterferingVReg << " from " <<
290 TRI->getName(OldAssign) << " to " << TRI->getName(PhysReg) << '\n');
291 unassign(InterferingVReg, OldAssign);
292 assign(InterferingVReg, PhysReg);
299 /// tryReassign - Try to reassign a single interference to a different physreg.
300 /// @param VirtReg Currently unassigned virtual register.
301 /// @param Order Physregs to try.
302 /// @return Physreg to assign VirtReg, or 0.
303 unsigned RAGreedy::tryReassign(LiveInterval &VirtReg, AllocationOrder &Order,
304 SmallVectorImpl<LiveInterval*> &NewVRegs){
305 NamedRegionTimer T("Reassign", TimerGroupName, TimePassesIsEnabled);
308 while (unsigned PhysReg = Order.next()) {
309 LiveInterval *InterferingVReg = getSingleInterference(VirtReg, PhysReg);
310 if (!InterferingVReg)
312 if (TargetRegisterInfo::isPhysicalRegister(InterferingVReg->reg))
314 if (reassignVReg(*InterferingVReg, PhysReg))
321 //===----------------------------------------------------------------------===//
322 // Interference eviction
323 //===----------------------------------------------------------------------===//
325 /// canEvict - Return true if all interferences between VirtReg and PhysReg can
326 /// be evicted. Set maxWeight to the maximal spill weight of an interference.
327 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
328 unsigned Size, float &MaxWeight) {
330 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
331 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
332 // If there is 10 or more interferences, chances are one is smaller.
333 if (Q.collectInterferingVRegs(10) >= 10)
336 // CHeck if any interfering live range is shorter than VirtReg.
337 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
338 LiveInterval *Intf = Q.interferingVRegs()[i];
339 if (TargetRegisterInfo::isPhysicalRegister(Intf->reg))
341 if (Intf->getSize() <= Size)
343 Weight = std::max(Weight, Intf->weight);
350 /// tryEvict - Try to evict all interferences for a physreg.
351 /// @param VirtReg Currently unassigned virtual register.
352 /// @param Order Physregs to try.
353 /// @return Physreg to assign VirtReg, or 0.
354 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
355 AllocationOrder &Order,
356 SmallVectorImpl<LiveInterval*> &NewVRegs){
357 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
359 // We can only evict interference if all interfering registers are virtual and
360 // longer than VirtReg.
361 const unsigned Size = VirtReg.getSize();
363 // Keep track of the lightest single interference seen so far.
364 float BestWeight = 0;
365 unsigned BestPhys = 0;
368 while (unsigned PhysReg = Order.next()) {
370 if (!canEvictInterference(VirtReg, PhysReg, Size, Weight))
373 // This is an eviction candidate.
374 DEBUG(dbgs() << "max " << PrintReg(PhysReg, TRI) << " interference = "
376 if (BestPhys && Weight >= BestWeight)
382 // Stop if the hint can be used.
383 if (Order.isHint(PhysReg))
390 DEBUG(dbgs() << "evicting " << PrintReg(BestPhys, TRI) << " interference\n");
391 for (const unsigned *AliasI = TRI->getOverlaps(BestPhys); *AliasI; ++AliasI) {
392 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
393 assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
394 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
395 LiveInterval *Intf = Q.interferingVRegs()[i];
396 unassign(*Intf, VRM->getPhys(Intf->reg));
398 NewVRegs.push_back(Intf);
405 //===----------------------------------------------------------------------===//
407 //===----------------------------------------------------------------------===//
409 /// calcInterferenceInfo - Compute per-block outgoing and ingoing constraints
410 /// when considering interference from PhysReg. Also compute an optimistic local
411 /// cost of this interference pattern.
413 /// The final cost of a split is the local cost + global cost of preferences
414 /// broken by SpillPlacement.
416 float RAGreedy::calcInterferenceInfo(LiveInterval &VirtReg, unsigned PhysReg) {
417 // Reset interference dependent info.
418 SpillConstraints.resize(SA->LiveBlocks.size());
419 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
420 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
421 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
422 BC.Number = BI.MBB->getNumber();
423 BC.Entry = (BI.Uses && BI.LiveIn) ?
424 SpillPlacement::PrefReg : SpillPlacement::DontCare;
425 BC.Exit = (BI.Uses && BI.LiveOut) ?
426 SpillPlacement::PrefReg : SpillPlacement::DontCare;
427 BI.OverlapEntry = BI.OverlapExit = false;
430 // Add interference info from each PhysReg alias.
431 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
432 if (!query(VirtReg, *AI).checkInterference())
434 LiveIntervalUnion::SegmentIter IntI =
435 PhysReg2LiveUnion[*AI].find(VirtReg.beginIndex());
439 // Determine which blocks have interference live in or after the last split
441 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
442 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
443 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
444 SlotIndex Start, Stop;
445 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
447 // Skip interference-free blocks.
448 if (IntI.start() >= Stop)
451 // Is the interference live-in?
453 IntI.advanceTo(Start);
456 if (IntI.start() <= Start)
457 BC.Entry = SpillPlacement::MustSpill;
460 // Is the interference overlapping the last split point?
462 if (IntI.stop() < BI.LastSplitPoint)
463 IntI.advanceTo(BI.LastSplitPoint.getPrevSlot());
466 if (IntI.start() < Stop)
467 BC.Exit = SpillPlacement::MustSpill;
471 // Rewind iterator and check other interferences.
472 IntI.find(VirtReg.beginIndex());
473 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
474 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
475 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
476 SlotIndex Start, Stop;
477 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
479 // Skip interference-free blocks.
480 if (IntI.start() >= Stop)
483 // Handle transparent blocks with interference separately.
484 // Transparent blocks never incur any fixed cost.
485 if (BI.LiveThrough && !BI.Uses) {
486 IntI.advanceTo(Start);
489 if (IntI.start() >= Stop)
492 if (BC.Entry != SpillPlacement::MustSpill)
493 BC.Entry = SpillPlacement::PrefSpill;
494 if (BC.Exit != SpillPlacement::MustSpill)
495 BC.Exit = SpillPlacement::PrefSpill;
499 // Now we only have blocks with uses left.
500 // Check if the interference overlaps the uses.
501 assert(BI.Uses && "Non-transparent block without any uses");
503 // Check interference on entry.
504 if (BI.LiveIn && BC.Entry != SpillPlacement::MustSpill) {
505 IntI.advanceTo(Start);
508 // Not live in, but before the first use.
509 if (IntI.start() < BI.FirstUse) {
510 BC.Entry = SpillPlacement::PrefSpill;
511 // If the block contains a kill from an earlier split, never split
512 // again in the same block.
513 if (!BI.LiveThrough && !SA->isOriginalEndpoint(BI.Kill))
514 BC.Entry = SpillPlacement::MustSpill;
518 // Does interference overlap the uses in the entry segment
520 if (BI.LiveIn && !BI.OverlapEntry) {
521 IntI.advanceTo(BI.FirstUse);
524 // A live-through interval has no kill.
525 // Check [FirstUse;LastUse) instead.
526 if (IntI.start() < (BI.LiveThrough ? BI.LastUse : BI.Kill))
527 BI.OverlapEntry = true;
530 // Does interference overlap the uses in the exit segment [Def;LastUse)?
531 if (BI.LiveOut && !BI.LiveThrough && !BI.OverlapExit) {
532 IntI.advanceTo(BI.Def);
535 if (IntI.start() < BI.LastUse)
536 BI.OverlapExit = true;
539 // Check interference on exit.
540 if (BI.LiveOut && BC.Exit != SpillPlacement::MustSpill) {
541 // Check interference between LastUse and Stop.
542 if (BC.Exit != SpillPlacement::PrefSpill) {
543 IntI.advanceTo(BI.LastUse);
546 if (IntI.start() < Stop) {
547 BC.Exit = SpillPlacement::PrefSpill;
548 // Avoid splitting twice in the same block.
549 if (!BI.LiveThrough && !SA->isOriginalEndpoint(BI.Def))
550 BC.Exit = SpillPlacement::MustSpill;
557 // Accumulate a local cost of this interference pattern.
559 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
560 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
563 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
564 unsigned Inserts = 0;
566 // Do we need spill code for the entry segment?
568 Inserts += BI.OverlapEntry || BC.Entry != SpillPlacement::PrefReg;
570 // For the exit segment?
572 Inserts += BI.OverlapExit || BC.Exit != SpillPlacement::PrefReg;
574 // The local cost of spill code in this block is the block frequency times
575 // the number of spill instructions inserted.
577 LocalCost += Inserts * SpillPlacer->getBlockFrequency(BI.MBB);
579 DEBUG(dbgs() << "Local cost of " << PrintReg(PhysReg, TRI) << " = "
580 << LocalCost << '\n');
584 /// calcGlobalSplitCost - Return the global split cost of following the split
585 /// pattern in LiveBundles. This cost should be added to the local cost of the
586 /// interference pattern in SpillConstraints.
588 float RAGreedy::calcGlobalSplitCost(const BitVector &LiveBundles) {
589 float GlobalCost = 0;
590 for (unsigned i = 0, e = SpillConstraints.size(); i != e; ++i) {
591 SpillPlacement::BlockConstraint &BC = SpillConstraints[i];
592 unsigned Inserts = 0;
593 // Broken entry preference?
594 Inserts += LiveBundles[Bundles->getBundle(BC.Number, 0)] !=
595 (BC.Entry == SpillPlacement::PrefReg);
596 // Broken exit preference?
597 Inserts += LiveBundles[Bundles->getBundle(BC.Number, 1)] !=
598 (BC.Exit == SpillPlacement::PrefReg);
601 Inserts * SpillPlacer->getBlockFrequency(SA->LiveBlocks[i].MBB);
603 DEBUG(dbgs() << "Global cost = " << GlobalCost << '\n');
607 /// splitAroundRegion - Split VirtReg around the region determined by
608 /// LiveBundles. Make an effort to avoid interference from PhysReg.
610 /// The 'register' interval is going to contain as many uses as possible while
611 /// avoiding interference. The 'stack' interval is the complement constructed by
612 /// SplitEditor. It will contain the rest.
614 void RAGreedy::splitAroundRegion(LiveInterval &VirtReg, unsigned PhysReg,
615 const BitVector &LiveBundles,
616 SmallVectorImpl<LiveInterval*> &NewVRegs) {
618 dbgs() << "Splitting around region for " << PrintReg(PhysReg, TRI)
620 for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
621 dbgs() << " EB#" << i;
625 // First compute interference ranges in the live blocks.
626 typedef std::pair<SlotIndex, SlotIndex> IndexPair;
627 SmallVector<IndexPair, 8> InterferenceRanges;
628 InterferenceRanges.resize(SA->LiveBlocks.size());
629 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
630 if (!query(VirtReg, *AI).checkInterference())
632 LiveIntervalUnion::SegmentIter IntI =
633 PhysReg2LiveUnion[*AI].find(VirtReg.beginIndex());
636 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
637 const SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
638 IndexPair &IP = InterferenceRanges[i];
639 SlotIndex Start, Stop;
640 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
641 // Skip interference-free blocks.
642 if (IntI.start() >= Stop)
645 // First interference in block.
647 IntI.advanceTo(Start);
650 if (IntI.start() >= Stop)
652 if (!IP.first.isValid() || IntI.start() < IP.first)
653 IP.first = IntI.start();
656 // Last interference in block.
658 IntI.advanceTo(Stop);
659 if (!IntI.valid() || IntI.start() >= Stop)
661 if (IntI.stop() <= Start)
663 if (!IP.second.isValid() || IntI.stop() > IP.second)
664 IP.second = IntI.stop();
669 SmallVector<LiveInterval*, 4> SpillRegs;
670 LiveRangeEdit LREdit(VirtReg, NewVRegs, SpillRegs);
671 SplitEditor SE(*SA, *LIS, *VRM, *DomTree, LREdit);
673 // Create the main cross-block interval.
676 // First add all defs that are live out of a block.
677 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
678 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
679 bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
680 bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
682 // Should the register be live out?
683 if (!BI.LiveOut || !RegOut)
686 IndexPair &IP = InterferenceRanges[i];
687 SlotIndex Start, Stop;
688 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
690 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " -> EB#"
691 << Bundles->getBundle(BI.MBB->getNumber(), 1)
692 << " intf [" << IP.first << ';' << IP.second << ')');
694 // The interference interval should either be invalid or overlap MBB.
695 assert((!IP.first.isValid() || IP.first < Stop) && "Bad interference");
696 assert((!IP.second.isValid() || IP.second > Start) && "Bad interference");
698 // Check interference leaving the block.
699 if (!IP.second.isValid()) {
700 // Block is interference-free.
701 DEBUG(dbgs() << ", no interference");
703 assert(BI.LiveThrough && "No uses, but not live through block?");
704 // Block is live-through without interference.
705 DEBUG(dbgs() << ", no uses"
706 << (RegIn ? ", live-through.\n" : ", stack in.\n"));
708 SE.enterIntvAtEnd(*BI.MBB);
711 if (!BI.LiveThrough) {
712 DEBUG(dbgs() << ", not live-through.\n");
713 SE.useIntv(SE.enterIntvBefore(BI.Def), Stop);
717 // Block is live-through, but entry bundle is on the stack.
718 // Reload just before the first use.
719 DEBUG(dbgs() << ", not live-in, enter before first use.\n");
720 SE.useIntv(SE.enterIntvBefore(BI.FirstUse), Stop);
723 DEBUG(dbgs() << ", live-through.\n");
727 // Block has interference.
728 DEBUG(dbgs() << ", interference to " << IP.second);
730 if (!BI.LiveThrough && IP.second <= BI.Def) {
731 // The interference doesn't reach the outgoing segment.
732 DEBUG(dbgs() << " doesn't affect def from " << BI.Def << '\n');
733 SE.useIntv(BI.Def, Stop);
739 // No uses in block, avoid interference by reloading as late as possible.
740 DEBUG(dbgs() << ", no uses.\n");
741 SlotIndex SegStart = SE.enterIntvAtEnd(*BI.MBB);
742 assert(SegStart >= IP.second && "Couldn't avoid interference");
746 if (IP.second.getBoundaryIndex() < BI.LastUse) {
747 // There are interference-free uses at the end of the block.
748 // Find the first use that can get the live-out register.
749 SmallVectorImpl<SlotIndex>::const_iterator UI =
750 std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
751 IP.second.getBoundaryIndex());
752 assert(UI != SA->UseSlots.end() && "Couldn't find last use");
754 assert(Use <= BI.LastUse && "Couldn't find last use");
755 // Only attempt a split befroe the last split point.
756 if (Use.getBaseIndex() <= BI.LastSplitPoint) {
757 DEBUG(dbgs() << ", free use at " << Use << ".\n");
758 SlotIndex SegStart = SE.enterIntvBefore(Use);
759 assert(SegStart >= IP.second && "Couldn't avoid interference");
760 assert(SegStart < BI.LastSplitPoint && "Impossible split point");
761 SE.useIntv(SegStart, Stop);
766 // Interference is after the last use.
767 DEBUG(dbgs() << " after last use.\n");
768 SlotIndex SegStart = SE.enterIntvAtEnd(*BI.MBB);
769 assert(SegStart >= IP.second && "Couldn't avoid interference");
772 // Now all defs leading to live bundles are handled, do everything else.
773 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) {
774 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i];
775 bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
776 bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
778 // Is the register live-in?
779 if (!BI.LiveIn || !RegIn)
782 // We have an incoming register. Check for interference.
783 IndexPair &IP = InterferenceRanges[i];
784 SlotIndex Start, Stop;
785 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB);
787 DEBUG(dbgs() << "EB#" << Bundles->getBundle(BI.MBB->getNumber(), 0)
788 << " -> BB#" << BI.MBB->getNumber());
790 // Check interference entering the block.
791 if (!IP.first.isValid()) {
792 // Block is interference-free.
793 DEBUG(dbgs() << ", no interference");
795 assert(BI.LiveThrough && "No uses, but not live through block?");
796 // Block is live-through without interference.
798 DEBUG(dbgs() << ", no uses, live-through.\n");
799 SE.useIntv(Start, Stop);
801 DEBUG(dbgs() << ", no uses, stack-out.\n");
802 SE.leaveIntvAtTop(*BI.MBB);
806 if (!BI.LiveThrough) {
807 DEBUG(dbgs() << ", killed in block.\n");
808 SE.useIntv(Start, SE.leaveIntvAfter(BI.Kill));
812 // Block is live-through, but exit bundle is on the stack.
813 // Spill immediately after the last use.
814 if (BI.LastUse < BI.LastSplitPoint) {
815 DEBUG(dbgs() << ", uses, stack-out.\n");
816 SE.useIntv(Start, SE.leaveIntvAfter(BI.LastUse));
819 // The last use is after the last split point, it is probably an
821 DEBUG(dbgs() << ", uses at " << BI.LastUse << " after split point "
822 << BI.LastSplitPoint << ", stack-out.\n");
823 SlotIndex SegEnd = SE.leaveIntvBefore(BI.LastSplitPoint);
824 SE.useIntv(Start, SegEnd);
825 // Run a double interval from the split to the last use.
826 // This makes it possible to spill the complement without affecting the
828 SE.overlapIntv(SegEnd, BI.LastUse);
831 // Register is live-through.
832 DEBUG(dbgs() << ", uses, live-through.\n");
833 SE.useIntv(Start, Stop);
837 // Block has interference.
838 DEBUG(dbgs() << ", interference from " << IP.first);
840 if (!BI.LiveThrough && IP.first >= BI.Kill) {
841 // The interference doesn't reach the outgoing segment.
842 DEBUG(dbgs() << " doesn't affect kill at " << BI.Kill << '\n');
843 SE.useIntv(Start, BI.Kill);
848 // No uses in block, avoid interference by spilling as soon as possible.
849 DEBUG(dbgs() << ", no uses.\n");
850 SlotIndex SegEnd = SE.leaveIntvAtTop(*BI.MBB);
851 assert(SegEnd <= IP.first && "Couldn't avoid interference");
854 if (IP.first.getBaseIndex() > BI.FirstUse) {
855 // There are interference-free uses at the beginning of the block.
856 // Find the last use that can get the register.
857 SmallVectorImpl<SlotIndex>::const_iterator UI =
858 std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(),
859 IP.first.getBaseIndex());
860 assert(UI != SA->UseSlots.begin() && "Couldn't find first use");
861 SlotIndex Use = (--UI)->getBoundaryIndex();
862 DEBUG(dbgs() << ", free use at " << *UI << ".\n");
863 SlotIndex SegEnd = SE.leaveIntvAfter(Use);
864 assert(SegEnd <= IP.first && "Couldn't avoid interference");
865 SE.useIntv(Start, SegEnd);
869 // Interference is before the first use.
870 DEBUG(dbgs() << " before first use.\n");
871 SlotIndex SegEnd = SE.leaveIntvAtTop(*BI.MBB);
872 assert(SegEnd <= IP.first && "Couldn't avoid interference");
877 // FIXME: Should we be more aggressive about splitting the stack region into
878 // per-block segments? The current approach allows the stack region to
879 // separate into connected components. Some components may be allocatable.
884 MF->verify(this, "After splitting live range around region");
887 // Make sure that at least one of the new intervals can allocate to PhysReg.
888 // That was the whole point of splitting the live range.
890 for (LiveRangeEdit::iterator I = LREdit.begin(), E = LREdit.end(); I != E;
892 if (!checkUncachedInterference(**I, PhysReg)) {
896 assert(found && "No allocatable intervals after pointless splitting");
901 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
902 SmallVectorImpl<LiveInterval*> &NewVRegs) {
903 BitVector LiveBundles, BestBundles;
905 unsigned BestReg = 0;
907 while (unsigned PhysReg = Order.next()) {
908 float Cost = calcInterferenceInfo(VirtReg, PhysReg);
909 if (BestReg && Cost >= BestCost)
912 SpillPlacer->placeSpills(SpillConstraints, LiveBundles);
913 // No live bundles, defer to splitSingleBlocks().
914 if (!LiveBundles.any())
917 Cost += calcGlobalSplitCost(LiveBundles);
918 if (!BestReg || Cost < BestCost) {
921 BestBundles.swap(LiveBundles);
928 splitAroundRegion(VirtReg, BestReg, BestBundles, NewVRegs);
933 //===----------------------------------------------------------------------===//
935 //===----------------------------------------------------------------------===//
938 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted
939 /// in order to use PhysReg between two entries in SA->UseSlots.
941 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
943 void RAGreedy::calcGapWeights(unsigned PhysReg,
944 SmallVectorImpl<float> &GapWeight) {
945 assert(SA->LiveBlocks.size() == 1 && "Not a local interval");
946 const SplitAnalysis::BlockInfo &BI = SA->LiveBlocks.front();
947 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
948 const unsigned NumGaps = Uses.size()-1;
950 // Start and end points for the interference check.
951 SlotIndex StartIdx = BI.LiveIn ? BI.FirstUse.getBaseIndex() : BI.FirstUse;
952 SlotIndex StopIdx = BI.LiveOut ? BI.LastUse.getBoundaryIndex() : BI.LastUse;
954 GapWeight.assign(NumGaps, 0.0f);
956 // Add interference from each overlapping register.
957 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
958 if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI)
959 .checkInterference())
962 // We know that VirtReg is a continuous interval from FirstUse to LastUse,
963 // so we don't need InterferenceQuery.
965 // Interference that overlaps an instruction is counted in both gaps
966 // surrounding the instruction. The exception is interference before
967 // StartIdx and after StopIdx.
969 LiveIntervalUnion::SegmentIter IntI = PhysReg2LiveUnion[*AI].find(StartIdx);
970 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
971 // Skip the gaps before IntI.
972 while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
973 if (++Gap == NumGaps)
978 // Update the gaps covered by IntI.
979 const float weight = IntI.value()->weight;
980 for (; Gap != NumGaps; ++Gap) {
981 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
982 if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
991 /// getPrevMappedIndex - Return the slot index of the last non-copy instruction
992 /// before MI that has a slot index. If MI is the first mapped instruction in
993 /// its block, return the block start index instead.
995 SlotIndex RAGreedy::getPrevMappedIndex(const MachineInstr *MI) {
996 assert(MI && "Missing MachineInstr");
997 const MachineBasicBlock *MBB = MI->getParent();
998 MachineBasicBlock::const_iterator B = MBB->begin(), I = MI;
1000 if (!(--I)->isDebugValue() && !I->isCopy())
1001 return Indexes->getInstructionIndex(I);
1002 return Indexes->getMBBStartIdx(MBB);
1005 /// calcPrevSlots - Fill in the PrevSlot array with the index of the previous
1006 /// real non-copy instruction for each instruction in SA->UseSlots.
1008 void RAGreedy::calcPrevSlots() {
1009 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1011 PrevSlot.reserve(Uses.size());
1012 for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
1013 const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]);
1014 PrevSlot.push_back(getPrevMappedIndex(MI).getDefIndex());
1018 /// nextSplitPoint - Find the next index into SA->UseSlots > i such that it may
1019 /// be beneficial to split before UseSlots[i].
1021 /// 0 is always a valid split point
1022 unsigned RAGreedy::nextSplitPoint(unsigned i) {
1023 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1024 const unsigned Size = Uses.size();
1025 assert(i != Size && "No split points after the end");
1026 // Allow split before i when Uses[i] is not adjacent to the previous use.
1027 while (++i != Size && PrevSlot[i].getBaseIndex() <= Uses[i-1].getBaseIndex())
1032 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
1035 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1036 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1037 assert(SA->LiveBlocks.size() == 1 && "Not a local interval");
1038 const SplitAnalysis::BlockInfo &BI = SA->LiveBlocks.front();
1040 // Note that it is possible to have an interval that is live-in or live-out
1041 // while only covering a single block - A phi-def can use undef values from
1042 // predecessors, and the block could be a single-block loop.
1043 // We don't bother doing anything clever about such a case, we simply assume
1044 // that the interval is continuous from FirstUse to LastUse. We should make
1045 // sure that we don't do anything illegal to such an interval, though.
1047 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1048 if (Uses.size() <= 2)
1050 const unsigned NumGaps = Uses.size()-1;
1053 dbgs() << "tryLocalSplit: ";
1054 for (unsigned i = 0, e = Uses.size(); i != e; ++i)
1055 dbgs() << ' ' << SA->UseSlots[i];
1059 // For every use, find the previous mapped non-copy instruction.
1060 // We use this to detect valid split points, and to estimate new interval
1064 unsigned BestBefore = NumGaps;
1065 unsigned BestAfter = 0;
1068 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB);
1069 SmallVector<float, 8> GapWeight;
1072 while (unsigned PhysReg = Order.next()) {
1073 // Keep track of the largest spill weight that would need to be evicted in
1074 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
1075 calcGapWeights(PhysReg, GapWeight);
1077 // Try to find the best sequence of gaps to close.
1078 // The new spill weight must be larger than any gap interference.
1080 // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
1081 unsigned SplitBefore = 0, SplitAfter = nextSplitPoint(1) - 1;
1083 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
1084 // It is the spill weight that needs to be evicted.
1085 float MaxGap = GapWeight[0];
1086 for (unsigned i = 1; i != SplitAfter; ++i)
1087 MaxGap = std::max(MaxGap, GapWeight[i]);
1090 // Live before/after split?
1091 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1092 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1094 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
1095 << Uses[SplitBefore] << '-' << Uses[SplitAfter]
1096 << " i=" << MaxGap);
1098 // Stop before the interval gets so big we wouldn't be making progress.
1099 if (!LiveBefore && !LiveAfter) {
1100 DEBUG(dbgs() << " all\n");
1103 // Should the interval be extended or shrunk?
1105 if (MaxGap < HUGE_VALF) {
1106 // Estimate the new spill weight.
1108 // Each instruction reads and writes the register, except the first
1109 // instr doesn't read when !FirstLive, and the last instr doesn't write
1112 // We will be inserting copies before and after, so the total number of
1113 // reads and writes is 2 * EstUses.
1115 const unsigned EstUses = 2*(SplitAfter - SplitBefore) +
1116 2*(LiveBefore + LiveAfter);
1118 // Try to guess the size of the new interval. This should be trivial,
1119 // but the slot index of an inserted copy can be a lot smaller than the
1120 // instruction it is inserted before if there are many dead indexes
1123 // We measure the distance from the instruction before SplitBefore to
1124 // get a conservative estimate.
1126 // The final distance can still be different if inserting copies
1127 // triggers a slot index renumbering.
1129 const float EstWeight = normalizeSpillWeight(blockFreq * EstUses,
1130 PrevSlot[SplitBefore].distance(Uses[SplitAfter]));
1131 // Would this split be possible to allocate?
1132 // Never allocate all gaps, we wouldn't be making progress.
1133 float Diff = EstWeight - MaxGap;
1134 DEBUG(dbgs() << " w=" << EstWeight << " d=" << Diff);
1137 if (Diff > BestDiff) {
1138 DEBUG(dbgs() << " (best)");
1140 BestBefore = SplitBefore;
1141 BestAfter = SplitAfter;
1148 SplitBefore = nextSplitPoint(SplitBefore);
1149 if (SplitBefore < SplitAfter) {
1150 DEBUG(dbgs() << " shrink\n");
1151 // Recompute the max when necessary.
1152 if (GapWeight[SplitBefore - 1] >= MaxGap) {
1153 MaxGap = GapWeight[SplitBefore];
1154 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
1155 MaxGap = std::max(MaxGap, GapWeight[i]);
1162 // Try to extend the interval.
1163 if (SplitAfter >= NumGaps) {
1164 DEBUG(dbgs() << " end\n");
1168 DEBUG(dbgs() << " extend\n");
1169 for (unsigned e = nextSplitPoint(SplitAfter + 1) - 1;
1170 SplitAfter != e; ++SplitAfter)
1171 MaxGap = std::max(MaxGap, GapWeight[SplitAfter]);
1176 // Didn't find any candidates?
1177 if (BestBefore == NumGaps)
1180 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
1181 << '-' << Uses[BestAfter] << ", " << BestDiff
1182 << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
1184 SmallVector<LiveInterval*, 4> SpillRegs;
1185 LiveRangeEdit LREdit(VirtReg, NewVRegs, SpillRegs);
1186 SplitEditor SE(*SA, *LIS, *VRM, *DomTree, LREdit);
1189 SlotIndex SegStart = SE.enterIntvBefore(Uses[BestBefore]);
1190 SlotIndex SegStop = SE.leaveIntvAfter(Uses[BestAfter]);
1191 SE.useIntv(SegStart, SegStop);
1199 //===----------------------------------------------------------------------===//
1200 // Live Range Splitting
1201 //===----------------------------------------------------------------------===//
1203 /// trySplit - Try to split VirtReg or one of its interferences, making it
1205 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
1206 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
1207 SmallVectorImpl<LiveInterval*>&NewVRegs) {
1208 SA->analyze(&VirtReg);
1210 // Local intervals are handled separately.
1211 if (LIS->intervalIsInOneMBB(VirtReg)) {
1212 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
1213 return tryLocalSplit(VirtReg, Order, NewVRegs);
1216 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
1218 // First try to split around a region spanning multiple blocks.
1219 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1220 if (PhysReg || !NewVRegs.empty())
1223 // Then isolate blocks with multiple uses.
1224 SplitAnalysis::BlockPtrSet Blocks;
1225 if (SA->getMultiUseBlocks(Blocks)) {
1226 SmallVector<LiveInterval*, 4> SpillRegs;
1227 LiveRangeEdit LREdit(VirtReg, NewVRegs, SpillRegs);
1228 SplitEditor(*SA, *LIS, *VRM, *DomTree, LREdit).splitSingleBlocks(Blocks);
1230 MF->verify(this, "After splitting live range around basic blocks");
1233 // Don't assign any physregs.
1238 //===----------------------------------------------------------------------===//
1240 //===----------------------------------------------------------------------===//
1242 /// calcInterferenceWeight - Calculate the combined spill weight of
1243 /// interferences when assigning VirtReg to PhysReg.
1244 float RAGreedy::calcInterferenceWeight(LiveInterval &VirtReg, unsigned PhysReg){
1246 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
1247 LiveIntervalUnion::Query &Q = query(VirtReg, *AI);
1248 Q.collectInterferingVRegs();
1249 if (Q.seenUnspillableVReg())
1251 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i)
1252 Sum += Q.interferingVRegs()[i]->weight;
1257 /// trySpillInterferences - Try to spill interfering registers instead of the
1258 /// current one. Only do it if the accumulated spill weight is smaller than the
1259 /// current spill weight.
1260 unsigned RAGreedy::trySpillInterferences(LiveInterval &VirtReg,
1261 AllocationOrder &Order,
1262 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1263 NamedRegionTimer T("Spill Interference", TimerGroupName, TimePassesIsEnabled);
1264 unsigned BestPhys = 0;
1265 float BestWeight = 0;
1268 while (unsigned PhysReg = Order.next()) {
1269 float Weight = calcInterferenceWeight(VirtReg, PhysReg);
1270 if (Weight == HUGE_VALF || Weight >= VirtReg.weight)
1272 if (!BestPhys || Weight < BestWeight)
1273 BestPhys = PhysReg, BestWeight = Weight;
1276 // No candidates found.
1280 // Collect all interfering registers.
1281 SmallVector<LiveInterval*, 8> Spills;
1282 for (const unsigned *AI = TRI->getOverlaps(BestPhys); *AI; ++AI) {
1283 LiveIntervalUnion::Query &Q = query(VirtReg, *AI);
1284 Spills.append(Q.interferingVRegs().begin(), Q.interferingVRegs().end());
1285 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
1286 LiveInterval *VReg = Q.interferingVRegs()[i];
1287 unassign(*VReg, *AI);
1292 DEBUG(dbgs() << "spilling " << Spills.size() << " interferences with weight "
1293 << BestWeight << '\n');
1294 for (unsigned i = 0, e = Spills.size(); i != e; ++i)
1295 spiller().spill(Spills[i], NewVRegs, Spills);
1300 //===----------------------------------------------------------------------===//
1302 //===----------------------------------------------------------------------===//
1304 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
1305 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1306 // First try assigning a free register.
1307 AllocationOrder Order(VirtReg.reg, *VRM, ReservedRegs);
1308 while (unsigned PhysReg = Order.next()) {
1309 if (!checkPhysRegInterference(VirtReg, PhysReg))
1313 if (unsigned PhysReg = tryReassign(VirtReg, Order, NewVRegs))
1316 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs))
1319 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
1321 // The first time we see a live range, don't try to split or spill.
1322 // Wait until the second time, when all smaller ranges have been allocated.
1323 // This gives a better picture of the interference to split around.
1324 if (Generation[VirtReg.reg] == 1) {
1325 NewVRegs.push_back(&VirtReg);
1329 // Try splitting VirtReg or interferences.
1330 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
1331 if (PhysReg || !NewVRegs.empty())
1334 // Try to spill another interfering reg with less spill weight.
1335 PhysReg = trySpillInterferences(VirtReg, Order, NewVRegs);
1339 // Finally spill VirtReg itself.
1340 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
1341 SmallVector<LiveInterval*, 1> pendingSpills;
1342 spiller().spill(&VirtReg, NewVRegs, pendingSpills);
1344 // The live virtual register requesting allocation was spilled, so tell
1345 // the caller not to allocate anything during this round.
1349 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
1350 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
1351 << "********** Function: "
1352 << ((Value*)mf.getFunction())->getName() << '\n');
1356 MF->verify(this, "Before greedy register allocator");
1358 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>());
1359 Indexes = &getAnalysis<SlotIndexes>();
1360 DomTree = &getAnalysis<MachineDominatorTree>();
1361 ReservedRegs = TRI->getReservedRegs(*MF);
1362 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
1363 Loops = &getAnalysis<MachineLoopInfo>();
1364 LoopRanges = &getAnalysis<MachineLoopRanges>();
1365 Bundles = &getAnalysis<EdgeBundles>();
1366 SpillPlacer = &getAnalysis<SpillPlacement>();
1368 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
1372 LIS->addKillFlags();
1376 NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled);
1377 VRM->rewrite(Indexes);
1380 // The pass output is in VirtRegMap. Release all the transient data.