1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the RAGreedy function pass for register allocation in
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "regalloc"
16 #include "AllocationOrder.h"
17 #include "InterferenceCache.h"
18 #include "LiveDebugVariables.h"
19 #include "LiveRangeEdit.h"
20 #include "RegAllocBase.h"
22 #include "SpillPlacement.h"
24 #include "VirtRegMap.h"
25 #include "RegisterCoalescer.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Function.h"
29 #include "llvm/PassAnalysisSupport.h"
30 #include "llvm/CodeGen/CalcSpillWeights.h"
31 #include "llvm/CodeGen/EdgeBundles.h"
32 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
33 #include "llvm/CodeGen/LiveStackAnalysis.h"
34 #include "llvm/CodeGen/MachineDominators.h"
35 #include "llvm/CodeGen/MachineFunctionPass.h"
36 #include "llvm/CodeGen/MachineLoopInfo.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/Passes.h"
39 #include "llvm/CodeGen/RegAllocRegistry.h"
40 #include "llvm/Target/TargetOptions.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/raw_ostream.h"
44 #include "llvm/Support/Timer.h"
50 STATISTIC(NumGlobalSplits, "Number of split global live ranges");
51 STATISTIC(NumLocalSplits, "Number of split local live ranges");
52 STATISTIC(NumEvicted, "Number of interferences evicted");
54 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
55 createGreedyRegisterAllocator);
58 class RAGreedy : public MachineFunctionPass,
60 private LiveRangeEdit::Delegate {
68 MachineDominatorTree *DomTree;
69 MachineLoopInfo *Loops;
71 SpillPlacement *SpillPlacer;
72 LiveDebugVariables *DebugVars;
75 std::auto_ptr<Spiller> SpillerInstance;
76 std::priority_queue<std::pair<unsigned, unsigned> > Queue;
79 // Live ranges pass through a number of stages as we try to allocate them.
80 // Some of the stages may also create new live ranges:
82 // - Region splitting.
83 // - Per-block splitting.
87 // Ranges produced by one of the stages skip the previous stages when they are
88 // dequeued. This improves performance because we can skip interference checks
89 // that are unlikely to give any results. It also guarantees that the live
90 // range splitting algorithm terminates, something that is otherwise hard to
93 RS_New, ///< Never seen before.
94 RS_First, ///< First time in the queue.
95 RS_Second, ///< Second time in the queue.
96 RS_Global, ///< Produced by global splitting.
97 RS_Local, ///< Produced by local splitting.
98 RS_Spill ///< Produced by spilling.
101 static const char *const StageName[];
103 // RegInfo - Keep additional information about each live range.
105 LiveRangeStage Stage;
107 // Cascade - Eviction loop prevention. See canEvictInterference().
110 RegInfo() : Stage(RS_New), Cascade(0) {}
113 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo;
115 LiveRangeStage getStage(const LiveInterval &VirtReg) const {
116 return ExtraRegInfo[VirtReg.reg].Stage;
119 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) {
120 ExtraRegInfo.resize(MRI->getNumVirtRegs());
121 ExtraRegInfo[VirtReg.reg].Stage = Stage;
124 template<typename Iterator>
125 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
126 ExtraRegInfo.resize(MRI->getNumVirtRegs());
127 for (;Begin != End; ++Begin) {
128 unsigned Reg = (*Begin)->reg;
129 if (ExtraRegInfo[Reg].Stage == RS_New)
130 ExtraRegInfo[Reg].Stage = NewStage;
134 /// Cost of evicting interference.
135 struct EvictionCost {
136 unsigned BrokenHints; ///< Total number of broken hints.
137 float MaxWeight; ///< Maximum spill weight evicted.
139 EvictionCost(unsigned B = 0) : BrokenHints(B), MaxWeight(0) {}
141 bool operator<(const EvictionCost &O) const {
142 if (BrokenHints != O.BrokenHints)
143 return BrokenHints < O.BrokenHints;
144 return MaxWeight < O.MaxWeight;
149 std::auto_ptr<SplitAnalysis> SA;
150 std::auto_ptr<SplitEditor> SE;
152 /// Cached per-block interference maps
153 InterferenceCache IntfCache;
155 /// All basic blocks where the current register has uses.
156 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
158 /// Global live range splitting candidate info.
159 struct GlobalSplitCandidate {
161 InterferenceCache::Cursor Intf;
162 BitVector LiveBundles;
163 SmallVector<unsigned, 8> ActiveBlocks;
165 void reset(InterferenceCache &Cache, unsigned Reg) {
167 Intf.setPhysReg(Cache, Reg);
169 ActiveBlocks.clear();
173 /// Candidate info for for each PhysReg in AllocationOrder.
174 /// This vector never shrinks, but grows to the size of the largest register
176 SmallVector<GlobalSplitCandidate, 32> GlobalCand;
181 /// Return the pass name.
182 virtual const char* getPassName() const {
183 return "Greedy Register Allocator";
186 /// RAGreedy analysis usage.
187 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
188 virtual void releaseMemory();
189 virtual Spiller &spiller() { return *SpillerInstance; }
190 virtual void enqueue(LiveInterval *LI);
191 virtual LiveInterval *dequeue();
192 virtual unsigned selectOrSplit(LiveInterval&,
193 SmallVectorImpl<LiveInterval*>&);
195 /// Perform register allocation.
196 virtual bool runOnMachineFunction(MachineFunction &mf);
201 void LRE_WillEraseInstruction(MachineInstr*);
202 bool LRE_CanEraseVirtReg(unsigned);
203 void LRE_WillShrinkVirtReg(unsigned);
204 void LRE_DidCloneVirtReg(unsigned, unsigned);
206 float calcSpillCost();
207 bool addSplitConstraints(InterferenceCache::Cursor, float&);
208 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
209 void growRegion(GlobalSplitCandidate &Cand);
210 float calcGlobalSplitCost(GlobalSplitCandidate&);
211 void splitAroundRegion(LiveInterval&, GlobalSplitCandidate&,
212 SmallVectorImpl<LiveInterval*>&);
213 void calcGapWeights(unsigned, SmallVectorImpl<float>&);
214 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool);
215 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&);
216 void evictInterference(LiveInterval&, unsigned,
217 SmallVectorImpl<LiveInterval*>&);
219 unsigned tryAssign(LiveInterval&, AllocationOrder&,
220 SmallVectorImpl<LiveInterval*>&);
221 unsigned tryEvict(LiveInterval&, AllocationOrder&,
222 SmallVectorImpl<LiveInterval*>&, unsigned = ~0u);
223 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
224 SmallVectorImpl<LiveInterval*>&);
225 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
226 SmallVectorImpl<LiveInterval*>&);
227 unsigned trySplit(LiveInterval&, AllocationOrder&,
228 SmallVectorImpl<LiveInterval*>&);
230 } // end anonymous namespace
232 char RAGreedy::ID = 0;
235 const char *const RAGreedy::StageName[] = {
245 // Hysteresis to use when comparing floats.
246 // This helps stabilize decisions based on float comparisons.
247 const float Hysteresis = 0.98f;
250 FunctionPass* llvm::createGreedyRegisterAllocator() {
251 return new RAGreedy();
254 RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
255 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
256 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
257 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
258 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
259 initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry());
260 initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
261 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
262 initializeLiveStacksPass(*PassRegistry::getPassRegistry());
263 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
264 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
265 initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
266 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
267 initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
270 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
271 AU.setPreservesCFG();
272 AU.addRequired<AliasAnalysis>();
273 AU.addPreserved<AliasAnalysis>();
274 AU.addRequired<LiveIntervals>();
275 AU.addRequired<SlotIndexes>();
276 AU.addPreserved<SlotIndexes>();
277 AU.addRequired<LiveDebugVariables>();
278 AU.addPreserved<LiveDebugVariables>();
280 AU.addRequiredID(StrongPHIEliminationID);
281 AU.addRequiredTransitive<RegisterCoalescer>();
282 AU.addRequired<CalculateSpillWeights>();
283 AU.addRequired<LiveStacks>();
284 AU.addPreserved<LiveStacks>();
285 AU.addRequired<MachineDominatorTree>();
286 AU.addPreserved<MachineDominatorTree>();
287 AU.addRequired<MachineLoopInfo>();
288 AU.addPreserved<MachineLoopInfo>();
289 AU.addRequired<VirtRegMap>();
290 AU.addPreserved<VirtRegMap>();
291 AU.addRequired<EdgeBundles>();
292 AU.addRequired<SpillPlacement>();
293 MachineFunctionPass::getAnalysisUsage(AU);
297 //===----------------------------------------------------------------------===//
298 // LiveRangeEdit delegate methods
299 //===----------------------------------------------------------------------===//
301 void RAGreedy::LRE_WillEraseInstruction(MachineInstr *MI) {
302 // LRE itself will remove from SlotIndexes and parent basic block.
303 VRM->RemoveMachineInstrFromMaps(MI);
306 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
307 if (unsigned PhysReg = VRM->getPhys(VirtReg)) {
308 unassign(LIS->getInterval(VirtReg), PhysReg);
311 // Unassigned virtreg is probably in the priority queue.
312 // RegAllocBase will erase it after dequeueing.
316 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) {
317 unsigned PhysReg = VRM->getPhys(VirtReg);
321 // Register is assigned, put it back on the queue for reassignment.
322 LiveInterval &LI = LIS->getInterval(VirtReg);
323 unassign(LI, PhysReg);
327 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
328 // LRE may clone a virtual register because dead code elimination causes it to
329 // be split into connected components. Ensure that the new register gets the
330 // same stage as the parent.
331 ExtraRegInfo.grow(New);
332 ExtraRegInfo[New] = ExtraRegInfo[Old];
335 void RAGreedy::releaseMemory() {
336 SpillerInstance.reset(0);
337 ExtraRegInfo.clear();
339 RegAllocBase::releaseMemory();
342 void RAGreedy::enqueue(LiveInterval *LI) {
343 // Prioritize live ranges by size, assigning larger ranges first.
344 // The queue holds (size, reg) pairs.
345 const unsigned Size = LI->getSize();
346 const unsigned Reg = LI->reg;
347 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
348 "Can only enqueue virtual registers");
351 ExtraRegInfo.grow(Reg);
352 if (ExtraRegInfo[Reg].Stage == RS_New)
353 ExtraRegInfo[Reg].Stage = RS_First;
355 if (ExtraRegInfo[Reg].Stage == RS_Second)
356 // Unsplit ranges that couldn't be allocated immediately are deferred until
357 // everything else has been allocated. Long ranges are allocated last so
358 // they are split against realistic interference.
359 Prio = (1u << 31) - Size;
361 // Everything else is allocated in long->short order. Long ranges that don't
362 // fit should be spilled ASAP so they don't create interference.
363 Prio = (1u << 31) + Size;
365 // Boost ranges that have a physical register hint.
366 if (TargetRegisterInfo::isPhysicalRegister(VRM->getRegAllocPref(Reg)))
370 Queue.push(std::make_pair(Prio, Reg));
373 LiveInterval *RAGreedy::dequeue() {
376 LiveInterval *LI = &LIS->getInterval(Queue.top().second);
382 //===----------------------------------------------------------------------===//
384 //===----------------------------------------------------------------------===//
386 /// tryAssign - Try to assign VirtReg to an available register.
387 unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
388 AllocationOrder &Order,
389 SmallVectorImpl<LiveInterval*> &NewVRegs) {
392 while ((PhysReg = Order.next()))
393 if (!checkPhysRegInterference(VirtReg, PhysReg))
395 if (!PhysReg || Order.isHint(PhysReg))
398 // PhysReg is available, but there may be a better choice.
400 // If we missed a simple hint, try to cheaply evict interference from the
401 // preferred register.
402 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg))
403 if (Order.isHint(Hint)) {
404 DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n');
405 EvictionCost MaxCost(1);
406 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) {
407 evictInterference(VirtReg, Hint, NewVRegs);
412 // Try to evict interference from a cheaper alternative.
413 unsigned Cost = TRI->getCostPerUse(PhysReg);
415 // Most registers have 0 additional cost.
419 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost
421 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost);
422 return CheapReg ? CheapReg : PhysReg;
426 //===----------------------------------------------------------------------===//
427 // Interference eviction
428 //===----------------------------------------------------------------------===//
430 /// shouldEvict - determine if A should evict the assigned live range B. The
431 /// eviction policy defined by this function together with the allocation order
432 /// defined by enqueue() decides which registers ultimately end up being split
435 /// Cascade numbers are used to prevent infinite loops if this function is a
438 /// @param A The live range to be assigned.
439 /// @param IsHint True when A is about to be assigned to its preferred
441 /// @param B The live range to be evicted.
442 /// @param BreaksHint True when B is already assigned to its preferred register.
443 bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint,
444 LiveInterval &B, bool BreaksHint) {
445 bool CanSplit = getStage(B) <= RS_Second;
447 // Be fairly aggressive about following hints as long as the evictee can be
449 if (CanSplit && IsHint && !BreaksHint)
452 return A.weight > B.weight;
455 /// canEvictInterference - Return true if all interferences between VirtReg and
456 /// PhysReg can be evicted. When OnlyCheap is set, don't do anything
458 /// @param VirtReg Live range that is about to be assigned.
459 /// @param PhysReg Desired register for assignment.
460 /// @prarm IsHint True when PhysReg is VirtReg's preferred register.
461 /// @param MaxCost Only look for cheaper candidates and update with new cost
462 /// when returning true.
463 /// @returns True when interference can be evicted cheaper than MaxCost.
464 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
465 bool IsHint, EvictionCost &MaxCost) {
466 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never
467 // involved in an eviction before. If a cascade number was assigned, deny
468 // evicting anything with the same or a newer cascade number. This prevents
469 // infinite eviction loops.
471 // This works out so a register without a cascade number is allowed to evict
472 // anything, and it can be evicted by anything.
473 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
475 Cascade = NextCascade;
478 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
479 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
480 // If there is 10 or more interferences, chances are one is heavier.
481 if (Q.collectInterferingVRegs(10) >= 10)
484 // Check if any interfering live range is heavier than MaxWeight.
485 for (unsigned i = Q.interferingVRegs().size(); i; --i) {
486 LiveInterval *Intf = Q.interferingVRegs()[i - 1];
487 if (TargetRegisterInfo::isPhysicalRegister(Intf->reg))
489 // Never evict spill products. They cannot split or spill.
490 if (getStage(*Intf) == RS_Spill)
492 // Once a live range becomes small enough, it is urgent that we find a
493 // register for it. This is indicated by an infinite spill weight. These
494 // urgent live ranges get to evict almost anything.
495 bool Urgent = !VirtReg.isSpillable() && Intf->isSpillable();
496 // Only evict older cascades or live ranges without a cascade.
497 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade;
498 if (Cascade <= IntfCascade) {
501 // We permit breaking cascades for urgent evictions. It should be the
502 // last resort, though, so make it really expensive.
503 Cost.BrokenHints += 10;
505 // Would this break a satisfied hint?
506 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg);
507 // Update eviction cost.
508 Cost.BrokenHints += BreaksHint;
509 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight);
510 // Abort if this would be too expensive.
511 if (!(Cost < MaxCost))
513 // Finally, apply the eviction policy for non-urgent evictions.
514 if (!Urgent && !shouldEvict(VirtReg, IsHint, *Intf, BreaksHint))
522 /// evictInterference - Evict any interferring registers that prevent VirtReg
523 /// from being assigned to Physreg. This assumes that canEvictInterference
525 void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg,
526 SmallVectorImpl<LiveInterval*> &NewVRegs) {
527 // Make sure that VirtReg has a cascade number, and assign that cascade
528 // number to every evicted register. These live ranges than then only be
529 // evicted by a newer cascade, preventing infinite loops.
530 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
532 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++;
534 DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI)
535 << " interference: Cascade " << Cascade << '\n');
536 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) {
537 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI);
538 assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
539 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) {
540 LiveInterval *Intf = Q.interferingVRegs()[i];
541 unassign(*Intf, VRM->getPhys(Intf->reg));
542 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade ||
543 VirtReg.isSpillable() < Intf->isSpillable()) &&
544 "Cannot decrease cascade number, illegal eviction");
545 ExtraRegInfo[Intf->reg].Cascade = Cascade;
547 NewVRegs.push_back(Intf);
552 /// tryEvict - Try to evict all interferences for a physreg.
553 /// @param VirtReg Currently unassigned virtual register.
554 /// @param Order Physregs to try.
555 /// @return Physreg to assign VirtReg, or 0.
556 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
557 AllocationOrder &Order,
558 SmallVectorImpl<LiveInterval*> &NewVRegs,
559 unsigned CostPerUseLimit) {
560 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
562 // Keep track of the cheapest interference seen so far.
563 EvictionCost BestCost(~0u);
564 unsigned BestPhys = 0;
566 // When we are just looking for a reduced cost per use, don't break any
567 // hints, and only evict smaller spill weights.
568 if (CostPerUseLimit < ~0u) {
569 BestCost.BrokenHints = 0;
570 BestCost.MaxWeight = VirtReg.weight;
574 while (unsigned PhysReg = Order.next()) {
575 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit)
577 // The first use of a callee-saved register in a function has cost 1.
578 // Don't start using a CSR when the CostPerUseLimit is low.
579 if (CostPerUseLimit == 1)
580 if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg))
581 if (!MRI->isPhysRegUsed(CSR)) {
582 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR "
583 << PrintReg(CSR, TRI) << '\n');
587 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost))
593 // Stop if the hint can be used.
594 if (Order.isHint(PhysReg))
601 evictInterference(VirtReg, BestPhys, NewVRegs);
606 //===----------------------------------------------------------------------===//
608 //===----------------------------------------------------------------------===//
610 /// addSplitConstraints - Fill out the SplitConstraints vector based on the
611 /// interference pattern in Physreg and its aliases. Add the constraints to
612 /// SpillPlacement and return the static cost of this split in Cost, assuming
613 /// that all preferences in SplitConstraints are met.
614 /// Return false if there are no bundles with positive bias.
615 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
617 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
619 // Reset interference dependent info.
620 SplitConstraints.resize(UseBlocks.size());
621 float StaticCost = 0;
622 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
623 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
624 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
626 BC.Number = BI.MBB->getNumber();
627 Intf.moveToBlock(BC.Number);
628 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
629 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
631 if (!Intf.hasInterference())
634 // Number of spill code instructions to insert.
637 // Interference for the live-in value.
639 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number))
640 BC.Entry = SpillPlacement::MustSpill, ++Ins;
641 else if (Intf.first() < BI.FirstUse)
642 BC.Entry = SpillPlacement::PrefSpill, ++Ins;
643 else if (Intf.first() < BI.LastUse)
647 // Interference for the live-out value.
649 if (Intf.last() >= SA->getLastSplitPoint(BC.Number))
650 BC.Exit = SpillPlacement::MustSpill, ++Ins;
651 else if (Intf.last() > BI.LastUse)
652 BC.Exit = SpillPlacement::PrefSpill, ++Ins;
653 else if (Intf.last() > BI.FirstUse)
657 // Accumulate the total frequency of inserted spill code.
659 StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
663 // Add constraints for use-blocks. Note that these are the only constraints
664 // that may add a positive bias, it is downhill from here.
665 SpillPlacer->addConstraints(SplitConstraints);
666 return SpillPlacer->scanActiveBundles();
670 /// addThroughConstraints - Add constraints and links to SpillPlacer from the
671 /// live-through blocks in Blocks.
672 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
673 ArrayRef<unsigned> Blocks) {
674 const unsigned GroupSize = 8;
675 SpillPlacement::BlockConstraint BCS[GroupSize];
676 unsigned TBS[GroupSize];
677 unsigned B = 0, T = 0;
679 for (unsigned i = 0; i != Blocks.size(); ++i) {
680 unsigned Number = Blocks[i];
681 Intf.moveToBlock(Number);
683 if (!Intf.hasInterference()) {
684 assert(T < GroupSize && "Array overflow");
686 if (++T == GroupSize) {
687 SpillPlacer->addLinks(makeArrayRef(TBS, T));
693 assert(B < GroupSize && "Array overflow");
694 BCS[B].Number = Number;
696 // Interference for the live-in value.
697 if (Intf.first() <= Indexes->getMBBStartIdx(Number))
698 BCS[B].Entry = SpillPlacement::MustSpill;
700 BCS[B].Entry = SpillPlacement::PrefSpill;
702 // Interference for the live-out value.
703 if (Intf.last() >= SA->getLastSplitPoint(Number))
704 BCS[B].Exit = SpillPlacement::MustSpill;
706 BCS[B].Exit = SpillPlacement::PrefSpill;
708 if (++B == GroupSize) {
709 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
710 SpillPlacer->addConstraints(Array);
715 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
716 SpillPlacer->addConstraints(Array);
717 SpillPlacer->addLinks(makeArrayRef(TBS, T));
720 void RAGreedy::growRegion(GlobalSplitCandidate &Cand) {
721 // Keep track of through blocks that have not been added to SpillPlacer.
722 BitVector Todo = SA->getThroughBlocks();
723 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
724 unsigned AddedTo = 0;
726 unsigned Visited = 0;
730 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
731 // Find new through blocks in the periphery of PrefRegBundles.
732 for (int i = 0, e = NewBundles.size(); i != e; ++i) {
733 unsigned Bundle = NewBundles[i];
734 // Look at all blocks connected to Bundle in the full graph.
735 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
736 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
739 if (!Todo.test(Block))
742 // This is a new through block. Add it to SpillPlacer later.
743 ActiveBlocks.push_back(Block);
749 // Any new blocks to add?
750 if (ActiveBlocks.size() == AddedTo)
753 // Compute through constraints from the interference, or assume that all
754 // through blocks prefer spilling when forming compact regions.
755 ArrayRef<unsigned> NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo);
757 addThroughConstraints(Cand.Intf, NewBlocks);
759 SpillPlacer->addPrefSpill(NewBlocks);
760 AddedTo = ActiveBlocks.size();
762 // Perhaps iterating can enable more bundles?
763 SpillPlacer->iterate();
765 DEBUG(dbgs() << ", v=" << Visited);
768 /// calcSpillCost - Compute how expensive it would be to split the live range in
769 /// SA around all use blocks instead of forming bundle regions.
770 float RAGreedy::calcSpillCost() {
772 const LiveInterval &LI = SA->getParent();
773 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
774 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
775 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
776 unsigned Number = BI.MBB->getNumber();
777 // We normally only need one spill instruction - a load or a store.
778 Cost += SpillPlacer->getBlockFrequency(Number);
780 // Unless the value is redefined in the block.
781 if (BI.LiveIn && BI.LiveOut) {
782 SlotIndex Start, Stop;
783 tie(Start, Stop) = Indexes->getMBBRange(Number);
784 LiveInterval::const_iterator I = LI.find(Start);
785 assert(I != LI.end() && "Expected live-in value");
786 // Is there a different live-out value? If so, we need an extra spill
789 Cost += SpillPlacer->getBlockFrequency(Number);
795 /// calcGlobalSplitCost - Return the global split cost of following the split
796 /// pattern in LiveBundles. This cost should be added to the local cost of the
797 /// interference pattern in SplitConstraints.
799 float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) {
800 float GlobalCost = 0;
801 const BitVector &LiveBundles = Cand.LiveBundles;
802 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
803 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
804 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
805 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
806 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)];
807 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)];
811 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
813 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
815 GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
818 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
819 unsigned Number = Cand.ActiveBlocks[i];
820 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
821 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
822 if (!RegIn && !RegOut)
824 if (RegIn && RegOut) {
825 // We need double spill code if this block has interference.
826 Cand.Intf.moveToBlock(Number);
827 if (Cand.Intf.hasInterference())
828 GlobalCost += 2*SpillPlacer->getBlockFrequency(Number);
831 // live-in / stack-out or stack-in live-out.
832 GlobalCost += SpillPlacer->getBlockFrequency(Number);
837 /// splitAroundRegion - Split VirtReg around the region determined by
838 /// LiveBundles. Make an effort to avoid interference from PhysReg.
840 /// The 'register' interval is going to contain as many uses as possible while
841 /// avoiding interference. The 'stack' interval is the complement constructed by
842 /// SplitEditor. It will contain the rest.
844 void RAGreedy::splitAroundRegion(LiveInterval &VirtReg,
845 GlobalSplitCandidate &Cand,
846 SmallVectorImpl<LiveInterval*> &NewVRegs) {
847 const BitVector &LiveBundles = Cand.LiveBundles;
850 dbgs() << "Splitting around region for " << PrintReg(Cand.PhysReg, TRI)
852 for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i))
853 dbgs() << " EB#" << i;
857 InterferenceCache::Cursor &Intf = Cand.Intf;
858 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
861 // Create the main cross-block interval.
862 const unsigned MainIntv = SE->openIntv();
864 // First handle all the blocks with uses.
865 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
866 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
867 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
868 bool RegIn = BI.LiveIn &&
869 LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)];
870 bool RegOut = BI.LiveOut &&
871 LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)];
873 // Create separate intervals for isolated blocks with multiple uses.
874 if (!RegIn && !RegOut) {
875 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n");
876 if (!BI.isOneInstr()) {
877 SE->splitSingleBlock(BI);
878 SE->selectIntv(MainIntv);
883 Intf.moveToBlock(BI.MBB->getNumber());
886 SE->splitLiveThroughBlock(BI.MBB->getNumber(),
887 MainIntv, Intf.first(),
888 MainIntv, Intf.last());
890 SE->splitRegInBlock(BI, MainIntv, Intf.first());
892 SE->splitRegOutBlock(BI, MainIntv, Intf.last());
895 // Handle live-through blocks.
896 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
897 unsigned Number = Cand.ActiveBlocks[i];
898 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
899 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
900 if (!RegIn && !RegOut)
902 Intf.moveToBlock(Number);
903 SE->splitLiveThroughBlock(Number, RegIn ? MainIntv : 0, Intf.first(),
904 RegOut ? MainIntv : 0, Intf.last());
909 SmallVector<unsigned, 8> IntvMap;
910 SE->finish(&IntvMap);
911 DebugVars->splitRegister(VirtReg.reg, LREdit.regs());
913 ExtraRegInfo.resize(MRI->getNumVirtRegs());
914 unsigned OrigBlocks = SA->getNumLiveBlocks();
916 // Sort out the new intervals created by splitting. We get four kinds:
917 // - Remainder intervals should not be split again.
918 // - Candidate intervals can be assigned to Cand.PhysReg.
919 // - Block-local splits are candidates for local splitting.
920 // - DCE leftovers should go back on the queue.
921 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
922 LiveInterval &Reg = *LREdit.get(i);
924 // Ignore old intervals from DCE.
925 if (getStage(Reg) != RS_New)
928 // Remainder interval. Don't try splitting again, spill if it doesn't
930 if (IntvMap[i] == 0) {
931 setStage(Reg, RS_Global);
935 // Main interval. Allow repeated splitting as long as the number of live
936 // blocks is strictly decreasing.
937 if (IntvMap[i] == MainIntv) {
938 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) {
939 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
940 << " blocks as original.\n");
941 // Don't allow repeated splitting as a safe guard against looping.
942 setStage(Reg, RS_Global);
947 // Other intervals are treated as new. This includes local intervals created
948 // for blocks with multiple uses, and anything created by DCE.
952 MF->verify(this, "After splitting live range around region");
955 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
956 SmallVectorImpl<LiveInterval*> &NewVRegs) {
957 float BestCost = Hysteresis * calcSpillCost();
958 DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n');
959 const unsigned NoCand = ~0u;
960 unsigned BestCand = NoCand;
961 unsigned NumCands = 0;
964 while (unsigned PhysReg = Order.next()) {
965 // Discard bad candidates before we run out of interference cache cursors.
966 // This will only affect register classes with a lot of registers (>32).
967 if (NumCands == IntfCache.getMaxCursors()) {
968 unsigned WorstCount = ~0u;
970 for (unsigned i = 0; i != NumCands; ++i) {
973 unsigned Count = GlobalCand[i].LiveBundles.count();
974 if (Count < WorstCount)
975 Worst = i, WorstCount = Count;
978 GlobalCand[Worst] = GlobalCand[NumCands];
981 if (GlobalCand.size() <= NumCands)
982 GlobalCand.resize(NumCands+1);
983 GlobalSplitCandidate &Cand = GlobalCand[NumCands];
984 Cand.reset(IntfCache, PhysReg);
986 SpillPlacer->prepare(Cand.LiveBundles);
988 if (!addSplitConstraints(Cand.Intf, Cost)) {
989 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n");
992 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost);
993 if (Cost >= BestCost) {
995 if (BestCand == NoCand)
996 dbgs() << " worse than no bundles\n";
998 dbgs() << " worse than "
999 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n';
1005 SpillPlacer->finish();
1007 // No live bundles, defer to splitSingleBlocks().
1008 if (!Cand.LiveBundles.any()) {
1009 DEBUG(dbgs() << " no bundles.\n");
1013 Cost += calcGlobalSplitCost(Cand);
1015 dbgs() << ", total = " << Cost << " with bundles";
1016 for (int i = Cand.LiveBundles.find_first(); i>=0;
1017 i = Cand.LiveBundles.find_next(i))
1018 dbgs() << " EB#" << i;
1021 if (Cost < BestCost) {
1022 BestCand = NumCands;
1023 BestCost = Hysteresis * Cost; // Prevent rounding effects.
1028 if (BestCand == NoCand)
1031 splitAroundRegion(VirtReg, GlobalCand[BestCand], NewVRegs);
1036 //===----------------------------------------------------------------------===//
1038 //===----------------------------------------------------------------------===//
1041 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted
1042 /// in order to use PhysReg between two entries in SA->UseSlots.
1044 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
1046 void RAGreedy::calcGapWeights(unsigned PhysReg,
1047 SmallVectorImpl<float> &GapWeight) {
1048 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1049 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1050 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1051 const unsigned NumGaps = Uses.size()-1;
1053 // Start and end points for the interference check.
1054 SlotIndex StartIdx = BI.LiveIn ? BI.FirstUse.getBaseIndex() : BI.FirstUse;
1055 SlotIndex StopIdx = BI.LiveOut ? BI.LastUse.getBoundaryIndex() : BI.LastUse;
1057 GapWeight.assign(NumGaps, 0.0f);
1059 // Add interference from each overlapping register.
1060 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) {
1061 if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI)
1062 .checkInterference())
1065 // We know that VirtReg is a continuous interval from FirstUse to LastUse,
1066 // so we don't need InterferenceQuery.
1068 // Interference that overlaps an instruction is counted in both gaps
1069 // surrounding the instruction. The exception is interference before
1070 // StartIdx and after StopIdx.
1072 LiveIntervalUnion::SegmentIter IntI = PhysReg2LiveUnion[*AI].find(StartIdx);
1073 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
1074 // Skip the gaps before IntI.
1075 while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
1076 if (++Gap == NumGaps)
1081 // Update the gaps covered by IntI.
1082 const float weight = IntI.value()->weight;
1083 for (; Gap != NumGaps; ++Gap) {
1084 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
1085 if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
1094 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
1097 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1098 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1099 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1100 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1102 // Note that it is possible to have an interval that is live-in or live-out
1103 // while only covering a single block - A phi-def can use undef values from
1104 // predecessors, and the block could be a single-block loop.
1105 // We don't bother doing anything clever about such a case, we simply assume
1106 // that the interval is continuous from FirstUse to LastUse. We should make
1107 // sure that we don't do anything illegal to such an interval, though.
1109 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots;
1110 if (Uses.size() <= 2)
1112 const unsigned NumGaps = Uses.size()-1;
1115 dbgs() << "tryLocalSplit: ";
1116 for (unsigned i = 0, e = Uses.size(); i != e; ++i)
1117 dbgs() << ' ' << SA->UseSlots[i];
1121 // Since we allow local split results to be split again, there is a risk of
1122 // creating infinite loops. It is tempting to require that the new live
1123 // ranges have less instructions than the original. That would guarantee
1124 // convergence, but it is too strict. A live range with 3 instructions can be
1125 // split 2+3 (including the COPY), and we want to allow that.
1127 // Instead we use these rules:
1129 // 1. Allow any split for ranges with getStage() < RS_Local. (Except for the
1130 // noop split, of course).
1131 // 2. Require progress be made for ranges with getStage() >= RS_Local. All
1132 // the new ranges must have fewer instructions than before the split.
1133 // 3. New ranges with the same number of instructions are marked RS_Local,
1134 // smaller ranges are marked RS_New.
1136 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent
1137 // excessive splitting and infinite loops.
1139 bool ProgressRequired = getStage(VirtReg) >= RS_Local;
1141 // Best split candidate.
1142 unsigned BestBefore = NumGaps;
1143 unsigned BestAfter = 0;
1146 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber());
1147 SmallVector<float, 8> GapWeight;
1150 while (unsigned PhysReg = Order.next()) {
1151 // Keep track of the largest spill weight that would need to be evicted in
1152 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
1153 calcGapWeights(PhysReg, GapWeight);
1155 // Try to find the best sequence of gaps to close.
1156 // The new spill weight must be larger than any gap interference.
1158 // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
1159 unsigned SplitBefore = 0, SplitAfter = 1;
1161 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
1162 // It is the spill weight that needs to be evicted.
1163 float MaxGap = GapWeight[0];
1166 // Live before/after split?
1167 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1168 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1170 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
1171 << Uses[SplitBefore] << '-' << Uses[SplitAfter]
1172 << " i=" << MaxGap);
1174 // Stop before the interval gets so big we wouldn't be making progress.
1175 if (!LiveBefore && !LiveAfter) {
1176 DEBUG(dbgs() << " all\n");
1179 // Should the interval be extended or shrunk?
1182 // How many gaps would the new range have?
1183 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter;
1185 // Legally, without causing looping?
1186 bool Legal = !ProgressRequired || NewGaps < NumGaps;
1188 if (Legal && MaxGap < HUGE_VALF) {
1189 // Estimate the new spill weight. Each instruction reads or writes the
1190 // register. Conservatively assume there are no read-modify-write
1193 // Try to guess the size of the new interval.
1194 const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1),
1195 Uses[SplitBefore].distance(Uses[SplitAfter]) +
1196 (LiveBefore + LiveAfter)*SlotIndex::InstrDist);
1197 // Would this split be possible to allocate?
1198 // Never allocate all gaps, we wouldn't be making progress.
1199 DEBUG(dbgs() << " w=" << EstWeight);
1200 if (EstWeight * Hysteresis >= MaxGap) {
1202 float Diff = EstWeight - MaxGap;
1203 if (Diff > BestDiff) {
1204 DEBUG(dbgs() << " (best)");
1205 BestDiff = Hysteresis * Diff;
1206 BestBefore = SplitBefore;
1207 BestAfter = SplitAfter;
1214 if (++SplitBefore < SplitAfter) {
1215 DEBUG(dbgs() << " shrink\n");
1216 // Recompute the max when necessary.
1217 if (GapWeight[SplitBefore - 1] >= MaxGap) {
1218 MaxGap = GapWeight[SplitBefore];
1219 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
1220 MaxGap = std::max(MaxGap, GapWeight[i]);
1227 // Try to extend the interval.
1228 if (SplitAfter >= NumGaps) {
1229 DEBUG(dbgs() << " end\n");
1233 DEBUG(dbgs() << " extend\n");
1234 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]);
1238 // Didn't find any candidates?
1239 if (BestBefore == NumGaps)
1242 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
1243 << '-' << Uses[BestAfter] << ", " << BestDiff
1244 << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
1246 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
1250 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
1251 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]);
1252 SE->useIntv(SegStart, SegStop);
1253 SmallVector<unsigned, 8> IntvMap;
1254 SE->finish(&IntvMap);
1255 DebugVars->splitRegister(VirtReg.reg, LREdit.regs());
1257 // If the new range has the same number of instructions as before, mark it as
1258 // RS_Local so the next split will be forced to make progress. Otherwise,
1259 // leave the new intervals as RS_New so they can compete.
1260 bool LiveBefore = BestBefore != 0 || BI.LiveIn;
1261 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut;
1262 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter;
1263 if (NewGaps >= NumGaps) {
1264 DEBUG(dbgs() << "Tagging non-progress ranges: ");
1265 assert(!ProgressRequired && "Didn't make progress when it was required.");
1266 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i)
1267 if (IntvMap[i] == 1) {
1268 setStage(*LREdit.get(i), RS_Local);
1269 DEBUG(dbgs() << PrintReg(LREdit.get(i)->reg));
1271 DEBUG(dbgs() << '\n');
1278 //===----------------------------------------------------------------------===//
1279 // Live Range Splitting
1280 //===----------------------------------------------------------------------===//
1282 /// trySplit - Try to split VirtReg or one of its interferences, making it
1284 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
1285 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
1286 SmallVectorImpl<LiveInterval*>&NewVRegs) {
1287 // Local intervals are handled separately.
1288 if (LIS->intervalIsInOneMBB(VirtReg)) {
1289 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
1290 SA->analyze(&VirtReg);
1291 return tryLocalSplit(VirtReg, Order, NewVRegs);
1294 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
1296 // Don't iterate global splitting.
1297 // Move straight to spilling if this range was produced by a global split.
1298 if (getStage(VirtReg) >= RS_Global)
1301 SA->analyze(&VirtReg);
1303 // FIXME: SplitAnalysis may repair broken live ranges coming from the
1304 // coalescer. That may cause the range to become allocatable which means that
1305 // tryRegionSplit won't be making progress. This check should be replaced with
1306 // an assertion when the coalescer is fixed.
1307 if (SA->didRepairRange()) {
1308 // VirtReg has changed, so all cached queries are invalid.
1309 invalidateVirtRegs();
1310 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1314 // First try to split around a region spanning multiple blocks.
1315 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1316 if (PhysReg || !NewVRegs.empty())
1319 // Then isolate blocks with multiple uses.
1320 SplitAnalysis::BlockPtrSet Blocks;
1321 if (SA->getMultiUseBlocks(Blocks)) {
1322 LiveRangeEdit LREdit(VirtReg, NewVRegs, this);
1324 SE->splitSingleBlocks(Blocks);
1325 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Global);
1327 MF->verify(this, "After splitting live range around basic blocks");
1330 // Don't assign any physregs.
1335 //===----------------------------------------------------------------------===//
1337 //===----------------------------------------------------------------------===//
1339 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
1340 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1341 // First try assigning a free register.
1342 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
1343 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1346 LiveRangeStage Stage = getStage(VirtReg);
1347 DEBUG(dbgs() << StageName[Stage]
1348 << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n');
1350 // Try to evict a less worthy live range, but only for ranges from the primary
1351 // queue. The RS_Second ranges already failed to do this, and they should not
1352 // get a second chance until they have been split.
1353 if (Stage != RS_Second)
1354 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs))
1357 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
1359 // The first time we see a live range, don't try to split or spill.
1360 // Wait until the second time, when all smaller ranges have been allocated.
1361 // This gives a better picture of the interference to split around.
1362 if (Stage == RS_First) {
1363 setStage(VirtReg, RS_Second);
1364 DEBUG(dbgs() << "wait for second round\n");
1365 NewVRegs.push_back(&VirtReg);
1369 // If we couldn't allocate a register from spilling, there is probably some
1370 // invalid inline assembly. The base class wil report it.
1371 if (Stage >= RS_Spill || !VirtReg.isSpillable())
1374 // Try splitting VirtReg or interferences.
1375 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
1376 if (PhysReg || !NewVRegs.empty())
1379 // Finally spill VirtReg itself.
1380 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
1381 LiveRangeEdit LRE(VirtReg, NewVRegs, this);
1382 spiller().spill(LRE);
1383 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Spill);
1386 MF->verify(this, "After spilling");
1388 // The live virtual register requesting allocation was spilled, so tell
1389 // the caller not to allocate anything during this round.
1393 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
1394 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
1395 << "********** Function: "
1396 << ((Value*)mf.getFunction())->getName() << '\n');
1400 MF->verify(this, "Before greedy register allocator");
1402 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>());
1403 Indexes = &getAnalysis<SlotIndexes>();
1404 DomTree = &getAnalysis<MachineDominatorTree>();
1405 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
1406 Loops = &getAnalysis<MachineLoopInfo>();
1407 Bundles = &getAnalysis<EdgeBundles>();
1408 SpillPlacer = &getAnalysis<SpillPlacement>();
1409 DebugVars = &getAnalysis<LiveDebugVariables>();
1411 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
1412 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree));
1413 ExtraRegInfo.clear();
1414 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1416 IntfCache.init(MF, &PhysReg2LiveUnion[0], Indexes, TRI);
1420 LIS->addKillFlags();
1424 NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled);
1425 VRM->rewrite(Indexes);
1428 // Write out new DBG_VALUE instructions.
1429 DebugVars->emitDebugValues(VRM);
1431 // The pass output is in VirtRegMap. Release all the transient data.