1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the RAGreedy function pass for register allocation in
13 //===----------------------------------------------------------------------===//
15 #include "llvm/CodeGen/Passes.h"
16 #include "AllocationOrder.h"
17 #include "InterferenceCache.h"
18 #include "LiveDebugVariables.h"
19 #include "RegAllocBase.h"
20 #include "SpillPlacement.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/CodeGen/CalcSpillWeights.h"
26 #include "llvm/CodeGen/EdgeBundles.h"
27 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
28 #include "llvm/CodeGen/LiveRangeEdit.h"
29 #include "llvm/CodeGen/LiveRegMatrix.h"
30 #include "llvm/CodeGen/LiveStackAnalysis.h"
31 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
32 #include "llvm/CodeGen/MachineDominators.h"
33 #include "llvm/CodeGen/MachineFunctionPass.h"
34 #include "llvm/CodeGen/MachineLoopInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/CodeGen/RegAllocRegistry.h"
37 #include "llvm/CodeGen/RegisterClassInfo.h"
38 #include "llvm/CodeGen/VirtRegMap.h"
39 #include "llvm/IR/LLVMContext.h"
40 #include "llvm/PassAnalysisSupport.h"
41 #include "llvm/Support/BranchProbability.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/Timer.h"
46 #include "llvm/Support/raw_ostream.h"
47 #include "llvm/Target/TargetSubtargetInfo.h"
52 #define DEBUG_TYPE "regalloc"
54 STATISTIC(NumGlobalSplits, "Number of split global live ranges");
55 STATISTIC(NumLocalSplits, "Number of split local live ranges");
56 STATISTIC(NumEvicted, "Number of interferences evicted");
58 static cl::opt<SplitEditor::ComplementSpillMode>
59 SplitSpillMode("split-spill-mode", cl::Hidden,
60 cl::desc("Spill mode for splitting live ranges"),
61 cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"),
62 clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"),
63 clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed"),
65 cl::init(SplitEditor::SM_Partition));
67 static cl::opt<unsigned>
68 LastChanceRecoloringMaxDepth("lcr-max-depth", cl::Hidden,
69 cl::desc("Last chance recoloring max depth"),
72 static cl::opt<unsigned> LastChanceRecoloringMaxInterference(
73 "lcr-max-interf", cl::Hidden,
74 cl::desc("Last chance recoloring maximum number of considered"
75 " interference at a time"),
79 ExhaustiveSearch("exhaustive-register-search", cl::NotHidden,
80 cl::desc("Exhaustive Search for registers bypassing the depth "
81 "and interference cutoffs of last chance recoloring"));
83 static cl::opt<bool> EnableLocalReassignment(
84 "enable-local-reassign", cl::Hidden,
85 cl::desc("Local reassignment can yield better allocation decisions, but "
86 "may be compile time intensive"),
89 // FIXME: Find a good default for this flag and remove the flag.
90 static cl::opt<unsigned>
91 CSRFirstTimeCost("regalloc-csr-first-time-cost",
92 cl::desc("Cost for first time use of callee-saved register."),
93 cl::init(0), cl::Hidden);
95 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
96 createGreedyRegisterAllocator);
99 class RAGreedy : public MachineFunctionPass,
101 private LiveRangeEdit::Delegate {
102 // Convenient shortcuts.
103 typedef std::priority_queue<std::pair<unsigned, unsigned> > PQueue;
104 typedef SmallPtrSet<LiveInterval *, 4> SmallLISet;
105 typedef SmallSet<unsigned, 16> SmallVirtRegSet;
110 // Shortcuts to some useful interface.
111 const TargetInstrInfo *TII;
112 const TargetRegisterInfo *TRI;
113 RegisterClassInfo RCI;
116 SlotIndexes *Indexes;
117 MachineBlockFrequencyInfo *MBFI;
118 MachineDominatorTree *DomTree;
119 MachineLoopInfo *Loops;
120 EdgeBundles *Bundles;
121 SpillPlacement *SpillPlacer;
122 LiveDebugVariables *DebugVars;
125 std::unique_ptr<Spiller> SpillerInstance;
127 unsigned NextCascade;
129 // Live ranges pass through a number of stages as we try to allocate them.
130 // Some of the stages may also create new live ranges:
132 // - Region splitting.
133 // - Per-block splitting.
134 // - Local splitting.
137 // Ranges produced by one of the stages skip the previous stages when they are
138 // dequeued. This improves performance because we can skip interference checks
139 // that are unlikely to give any results. It also guarantees that the live
140 // range splitting algorithm terminates, something that is otherwise hard to
142 enum LiveRangeStage {
143 /// Newly created live range that has never been queued.
146 /// Only attempt assignment and eviction. Then requeue as RS_Split.
149 /// Attempt live range splitting if assignment is impossible.
152 /// Attempt more aggressive live range splitting that is guaranteed to make
153 /// progress. This is used for split products that may not be making
157 /// Live range will be spilled. No more splitting will be attempted.
160 /// There is nothing more we can do to this live range. Abort compilation
161 /// if it can't be assigned.
165 // Enum CutOffStage to keep a track whether the register allocation failed
166 // because of the cutoffs encountered in last chance recoloring.
167 // Note: This is used as bitmask. New value should be next power of 2.
169 // No cutoffs encountered
172 // lcr-max-depth cutoff encountered
175 // lcr-max-interf cutoff encountered
182 static const char *const StageName[];
185 // RegInfo - Keep additional information about each live range.
187 LiveRangeStage Stage;
189 // Cascade - Eviction loop prevention. See canEvictInterference().
192 RegInfo() : Stage(RS_New), Cascade(0) {}
195 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo;
197 LiveRangeStage getStage(const LiveInterval &VirtReg) const {
198 return ExtraRegInfo[VirtReg.reg].Stage;
201 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) {
202 ExtraRegInfo.resize(MRI->getNumVirtRegs());
203 ExtraRegInfo[VirtReg.reg].Stage = Stage;
206 template<typename Iterator>
207 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
208 ExtraRegInfo.resize(MRI->getNumVirtRegs());
209 for (;Begin != End; ++Begin) {
210 unsigned Reg = *Begin;
211 if (ExtraRegInfo[Reg].Stage == RS_New)
212 ExtraRegInfo[Reg].Stage = NewStage;
216 /// Cost of evicting interference.
217 struct EvictionCost {
218 unsigned BrokenHints; ///< Total number of broken hints.
219 float MaxWeight; ///< Maximum spill weight evicted.
221 EvictionCost(): BrokenHints(0), MaxWeight(0) {}
223 bool isMax() const { return BrokenHints == ~0u; }
225 void setMax() { BrokenHints = ~0u; }
227 void setBrokenHints(unsigned NHints) { BrokenHints = NHints; }
229 bool operator<(const EvictionCost &O) const {
230 return std::tie(BrokenHints, MaxWeight) <
231 std::tie(O.BrokenHints, O.MaxWeight);
236 std::unique_ptr<SplitAnalysis> SA;
237 std::unique_ptr<SplitEditor> SE;
239 /// Cached per-block interference maps
240 InterferenceCache IntfCache;
242 /// All basic blocks where the current register has uses.
243 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
245 /// Global live range splitting candidate info.
246 struct GlobalSplitCandidate {
247 // Register intended for assignment, or 0.
250 // SplitKit interval index for this candidate.
253 // Interference for PhysReg.
254 InterferenceCache::Cursor Intf;
256 // Bundles where this candidate should be live.
257 BitVector LiveBundles;
258 SmallVector<unsigned, 8> ActiveBlocks;
260 void reset(InterferenceCache &Cache, unsigned Reg) {
263 Intf.setPhysReg(Cache, Reg);
265 ActiveBlocks.clear();
268 // Set B[i] = C for every live bundle where B[i] was NoCand.
269 unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) {
271 for (int i = LiveBundles.find_first(); i >= 0;
272 i = LiveBundles.find_next(i))
273 if (B[i] == NoCand) {
281 /// Candidate info for each PhysReg in AllocationOrder.
282 /// This vector never shrinks, but grows to the size of the largest register
284 SmallVector<GlobalSplitCandidate, 32> GlobalCand;
286 enum : unsigned { NoCand = ~0u };
288 /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to
289 /// NoCand which indicates the stack interval.
290 SmallVector<unsigned, 32> BundleCand;
292 /// Callee-save register cost, calculated once per machine function.
293 BlockFrequency CSRCost;
295 /// Run or not the local reassignment heuristic. This information is
296 /// obtained from the TargetSubtargetInfo.
297 bool EnableLocalReassign;
299 /// Set of broken hints that may be reconciled later because of eviction.
300 SmallSetVector<LiveInterval *, 8> SetOfBrokenHints;
305 /// Return the pass name.
306 const char* getPassName() const override {
307 return "Greedy Register Allocator";
310 /// RAGreedy analysis usage.
311 void getAnalysisUsage(AnalysisUsage &AU) const override;
312 void releaseMemory() override;
313 Spiller &spiller() override { return *SpillerInstance; }
314 void enqueue(LiveInterval *LI) override;
315 LiveInterval *dequeue() override;
316 unsigned selectOrSplit(LiveInterval&, SmallVectorImpl<unsigned>&) override;
317 void aboutToRemoveInterval(LiveInterval &) override;
319 /// Perform register allocation.
320 bool runOnMachineFunction(MachineFunction &mf) override;
325 unsigned selectOrSplitImpl(LiveInterval &, SmallVectorImpl<unsigned> &,
326 SmallVirtRegSet &, unsigned = 0);
328 bool LRE_CanEraseVirtReg(unsigned) override;
329 void LRE_WillShrinkVirtReg(unsigned) override;
330 void LRE_DidCloneVirtReg(unsigned, unsigned) override;
331 void enqueue(PQueue &CurQueue, LiveInterval *LI);
332 LiveInterval *dequeue(PQueue &CurQueue);
334 BlockFrequency calcSpillCost();
335 bool addSplitConstraints(InterferenceCache::Cursor, BlockFrequency&);
336 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
337 void growRegion(GlobalSplitCandidate &Cand);
338 BlockFrequency calcGlobalSplitCost(GlobalSplitCandidate&);
339 bool calcCompactRegion(GlobalSplitCandidate&);
340 void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>);
341 void calcGapWeights(unsigned, SmallVectorImpl<float>&);
342 unsigned canReassign(LiveInterval &VirtReg, unsigned PhysReg);
343 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool);
344 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&);
345 void evictInterference(LiveInterval&, unsigned,
346 SmallVectorImpl<unsigned>&);
347 bool mayRecolorAllInterferences(unsigned PhysReg, LiveInterval &VirtReg,
348 SmallLISet &RecoloringCandidates,
349 const SmallVirtRegSet &FixedRegisters);
351 unsigned tryAssign(LiveInterval&, AllocationOrder&,
352 SmallVectorImpl<unsigned>&);
353 unsigned tryEvict(LiveInterval&, AllocationOrder&,
354 SmallVectorImpl<unsigned>&, unsigned = ~0u);
355 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
356 SmallVectorImpl<unsigned>&);
357 /// Calculate cost of region splitting.
358 unsigned calculateRegionSplitCost(LiveInterval &VirtReg,
359 AllocationOrder &Order,
360 BlockFrequency &BestCost,
361 unsigned &NumCands, bool IgnoreCSR);
362 /// Perform region splitting.
363 unsigned doRegionSplit(LiveInterval &VirtReg, unsigned BestCand,
365 SmallVectorImpl<unsigned> &NewVRegs);
366 /// Check other options before using a callee-saved register for the first
368 unsigned tryAssignCSRFirstTime(LiveInterval &VirtReg, AllocationOrder &Order,
369 unsigned PhysReg, unsigned &CostPerUseLimit,
370 SmallVectorImpl<unsigned> &NewVRegs);
371 void initializeCSRCost();
372 unsigned tryBlockSplit(LiveInterval&, AllocationOrder&,
373 SmallVectorImpl<unsigned>&);
374 unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&,
375 SmallVectorImpl<unsigned>&);
376 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
377 SmallVectorImpl<unsigned>&);
378 unsigned trySplit(LiveInterval&, AllocationOrder&,
379 SmallVectorImpl<unsigned>&);
380 unsigned tryLastChanceRecoloring(LiveInterval &, AllocationOrder &,
381 SmallVectorImpl<unsigned> &,
382 SmallVirtRegSet &, unsigned);
383 bool tryRecoloringCandidates(PQueue &, SmallVectorImpl<unsigned> &,
384 SmallVirtRegSet &, unsigned);
385 void tryHintRecoloring(LiveInterval &);
386 void tryHintsRecoloring();
388 /// Model the information carried by one end of a copy.
390 /// The frequency of the copy.
392 /// The virtual register or physical register.
394 /// Its currently assigned register.
395 /// In case of a physical register Reg == PhysReg.
397 HintInfo(BlockFrequency Freq, unsigned Reg, unsigned PhysReg)
398 : Freq(Freq), Reg(Reg), PhysReg(PhysReg) {}
400 typedef SmallVector<HintInfo, 4> HintsInfo;
401 BlockFrequency getBrokenHintFreq(const HintsInfo &, unsigned);
402 void collectHintInfo(unsigned, HintsInfo &);
404 bool isUnusedCalleeSavedReg(unsigned PhysReg) const;
406 } // end anonymous namespace
408 char RAGreedy::ID = 0;
411 const char *const RAGreedy::StageName[] = {
421 // Hysteresis to use when comparing floats.
422 // This helps stabilize decisions based on float comparisons.
423 const float Hysteresis = (2007 / 2048.0f); // 0.97998046875
426 FunctionPass* llvm::createGreedyRegisterAllocator() {
427 return new RAGreedy();
430 RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
431 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
432 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
433 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
434 initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
435 initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
436 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
437 initializeLiveStacksPass(*PassRegistry::getPassRegistry());
438 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
439 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
440 initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
441 initializeLiveRegMatrixPass(*PassRegistry::getPassRegistry());
442 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
443 initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
446 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
447 AU.setPreservesCFG();
448 AU.addRequired<MachineBlockFrequencyInfo>();
449 AU.addPreserved<MachineBlockFrequencyInfo>();
450 AU.addRequired<AliasAnalysis>();
451 AU.addPreserved<AliasAnalysis>();
452 AU.addRequired<LiveIntervals>();
453 AU.addPreserved<LiveIntervals>();
454 AU.addRequired<SlotIndexes>();
455 AU.addPreserved<SlotIndexes>();
456 AU.addRequired<LiveDebugVariables>();
457 AU.addPreserved<LiveDebugVariables>();
458 AU.addRequired<LiveStacks>();
459 AU.addPreserved<LiveStacks>();
460 AU.addRequired<MachineDominatorTree>();
461 AU.addPreserved<MachineDominatorTree>();
462 AU.addRequired<MachineLoopInfo>();
463 AU.addPreserved<MachineLoopInfo>();
464 AU.addRequired<VirtRegMap>();
465 AU.addPreserved<VirtRegMap>();
466 AU.addRequired<LiveRegMatrix>();
467 AU.addPreserved<LiveRegMatrix>();
468 AU.addRequired<EdgeBundles>();
469 AU.addRequired<SpillPlacement>();
470 MachineFunctionPass::getAnalysisUsage(AU);
474 //===----------------------------------------------------------------------===//
475 // LiveRangeEdit delegate methods
476 //===----------------------------------------------------------------------===//
478 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
479 if (VRM->hasPhys(VirtReg)) {
480 LiveInterval &LI = LIS->getInterval(VirtReg);
481 Matrix->unassign(LI);
482 aboutToRemoveInterval(LI);
485 // Unassigned virtreg is probably in the priority queue.
486 // RegAllocBase will erase it after dequeueing.
490 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) {
491 if (!VRM->hasPhys(VirtReg))
494 // Register is assigned, put it back on the queue for reassignment.
495 LiveInterval &LI = LIS->getInterval(VirtReg);
496 Matrix->unassign(LI);
500 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
501 // Cloning a register we haven't even heard about yet? Just ignore it.
502 if (!ExtraRegInfo.inBounds(Old))
505 // LRE may clone a virtual register because dead code elimination causes it to
506 // be split into connected components. The new components are much smaller
507 // than the original, so they should get a new chance at being assigned.
508 // same stage as the parent.
509 ExtraRegInfo[Old].Stage = RS_Assign;
510 ExtraRegInfo.grow(New);
511 ExtraRegInfo[New] = ExtraRegInfo[Old];
514 void RAGreedy::releaseMemory() {
515 SpillerInstance.reset();
516 ExtraRegInfo.clear();
520 void RAGreedy::enqueue(LiveInterval *LI) { enqueue(Queue, LI); }
522 void RAGreedy::enqueue(PQueue &CurQueue, LiveInterval *LI) {
523 // Prioritize live ranges by size, assigning larger ranges first.
524 // The queue holds (size, reg) pairs.
525 const unsigned Size = LI->getSize();
526 const unsigned Reg = LI->reg;
527 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
528 "Can only enqueue virtual registers");
531 ExtraRegInfo.grow(Reg);
532 if (ExtraRegInfo[Reg].Stage == RS_New)
533 ExtraRegInfo[Reg].Stage = RS_Assign;
535 if (ExtraRegInfo[Reg].Stage == RS_Split) {
536 // Unsplit ranges that couldn't be allocated immediately are deferred until
537 // everything else has been allocated.
540 // Giant live ranges fall back to the global assignment heuristic, which
541 // prevents excessive spilling in pathological cases.
542 bool ReverseLocal = TRI->reverseLocalAssignment();
543 const TargetRegisterClass &RC = *MRI->getRegClass(Reg);
544 bool ForceGlobal = !ReverseLocal &&
545 (Size / SlotIndex::InstrDist) > (2 * RC.getNumRegs());
547 if (ExtraRegInfo[Reg].Stage == RS_Assign && !ForceGlobal && !LI->empty() &&
548 LIS->intervalIsInOneMBB(*LI)) {
549 // Allocate original local ranges in linear instruction order. Since they
550 // are singly defined, this produces optimal coloring in the absence of
551 // global interference and other constraints.
553 Prio = LI->beginIndex().getInstrDistance(Indexes->getLastIndex());
555 // Allocating bottom up may allow many short LRGs to be assigned first
556 // to one of the cheap registers. This could be much faster for very
557 // large blocks on targets with many physical registers.
558 Prio = Indexes->getZeroIndex().getInstrDistance(LI->endIndex());
560 Prio |= RC.AllocationPriority << 24;
562 // Allocate global and split ranges in long->short order. Long ranges that
563 // don't fit should be spilled (or split) ASAP so they don't create
564 // interference. Mark a bit to prioritize global above local ranges.
565 Prio = (1u << 29) + Size;
567 // Mark a higher bit to prioritize global and local above RS_Split.
570 // Boost ranges that have a physical register hint.
571 if (VRM->hasKnownPreference(Reg))
574 // The virtual register number is a tie breaker for same-sized ranges.
575 // Give lower vreg numbers higher priority to assign them first.
576 CurQueue.push(std::make_pair(Prio, ~Reg));
579 LiveInterval *RAGreedy::dequeue() { return dequeue(Queue); }
581 LiveInterval *RAGreedy::dequeue(PQueue &CurQueue) {
582 if (CurQueue.empty())
584 LiveInterval *LI = &LIS->getInterval(~CurQueue.top().second);
590 //===----------------------------------------------------------------------===//
592 //===----------------------------------------------------------------------===//
594 /// tryAssign - Try to assign VirtReg to an available register.
595 unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
596 AllocationOrder &Order,
597 SmallVectorImpl<unsigned> &NewVRegs) {
600 while ((PhysReg = Order.next()))
601 if (!Matrix->checkInterference(VirtReg, PhysReg))
603 if (!PhysReg || Order.isHint())
606 // PhysReg is available, but there may be a better choice.
608 // If we missed a simple hint, try to cheaply evict interference from the
609 // preferred register.
610 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg))
611 if (Order.isHint(Hint)) {
612 DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n');
613 EvictionCost MaxCost;
614 MaxCost.setBrokenHints(1);
615 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) {
616 evictInterference(VirtReg, Hint, NewVRegs);
621 // Try to evict interference from a cheaper alternative.
622 unsigned Cost = TRI->getCostPerUse(PhysReg);
624 // Most registers have 0 additional cost.
628 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost
630 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost);
631 return CheapReg ? CheapReg : PhysReg;
635 //===----------------------------------------------------------------------===//
636 // Interference eviction
637 //===----------------------------------------------------------------------===//
639 unsigned RAGreedy::canReassign(LiveInterval &VirtReg, unsigned PrevReg) {
640 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
642 while ((PhysReg = Order.next())) {
643 if (PhysReg == PrevReg)
646 MCRegUnitIterator Units(PhysReg, TRI);
647 for (; Units.isValid(); ++Units) {
648 // Instantiate a "subquery", not to be confused with the Queries array.
649 LiveIntervalUnion::Query subQ(&VirtReg, &Matrix->getLiveUnions()[*Units]);
650 if (subQ.checkInterference())
653 // If no units have interference, break out with the current PhysReg.
654 if (!Units.isValid())
658 DEBUG(dbgs() << "can reassign: " << VirtReg << " from "
659 << PrintReg(PrevReg, TRI) << " to " << PrintReg(PhysReg, TRI)
664 /// shouldEvict - determine if A should evict the assigned live range B. The
665 /// eviction policy defined by this function together with the allocation order
666 /// defined by enqueue() decides which registers ultimately end up being split
669 /// Cascade numbers are used to prevent infinite loops if this function is a
672 /// @param A The live range to be assigned.
673 /// @param IsHint True when A is about to be assigned to its preferred
675 /// @param B The live range to be evicted.
676 /// @param BreaksHint True when B is already assigned to its preferred register.
677 bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint,
678 LiveInterval &B, bool BreaksHint) {
679 bool CanSplit = getStage(B) < RS_Spill;
681 // Be fairly aggressive about following hints as long as the evictee can be
683 if (CanSplit && IsHint && !BreaksHint)
686 if (A.weight > B.weight) {
687 DEBUG(dbgs() << "should evict: " << B << " w= " << B.weight << '\n');
693 /// canEvictInterference - Return true if all interferences between VirtReg and
694 /// PhysReg can be evicted.
696 /// @param VirtReg Live range that is about to be assigned.
697 /// @param PhysReg Desired register for assignment.
698 /// @param IsHint True when PhysReg is VirtReg's preferred register.
699 /// @param MaxCost Only look for cheaper candidates and update with new cost
700 /// when returning true.
701 /// @returns True when interference can be evicted cheaper than MaxCost.
702 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
703 bool IsHint, EvictionCost &MaxCost) {
704 // It is only possible to evict virtual register interference.
705 if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg)
708 bool IsLocal = LIS->intervalIsInOneMBB(VirtReg);
710 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never
711 // involved in an eviction before. If a cascade number was assigned, deny
712 // evicting anything with the same or a newer cascade number. This prevents
713 // infinite eviction loops.
715 // This works out so a register without a cascade number is allowed to evict
716 // anything, and it can be evicted by anything.
717 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
719 Cascade = NextCascade;
722 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
723 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
724 // If there is 10 or more interferences, chances are one is heavier.
725 if (Q.collectInterferingVRegs(10) >= 10)
728 // Check if any interfering live range is heavier than MaxWeight.
729 for (unsigned i = Q.interferingVRegs().size(); i; --i) {
730 LiveInterval *Intf = Q.interferingVRegs()[i - 1];
731 assert(TargetRegisterInfo::isVirtualRegister(Intf->reg) &&
732 "Only expecting virtual register interference from query");
733 // Never evict spill products. They cannot split or spill.
734 if (getStage(*Intf) == RS_Done)
736 // Once a live range becomes small enough, it is urgent that we find a
737 // register for it. This is indicated by an infinite spill weight. These
738 // urgent live ranges get to evict almost anything.
740 // Also allow urgent evictions of unspillable ranges from a strictly
741 // larger allocation order.
742 bool Urgent = !VirtReg.isSpillable() &&
743 (Intf->isSpillable() ||
744 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg)) <
745 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(Intf->reg)));
746 // Only evict older cascades or live ranges without a cascade.
747 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade;
748 if (Cascade <= IntfCascade) {
751 // We permit breaking cascades for urgent evictions. It should be the
752 // last resort, though, so make it really expensive.
753 Cost.BrokenHints += 10;
755 // Would this break a satisfied hint?
756 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg);
757 // Update eviction cost.
758 Cost.BrokenHints += BreaksHint;
759 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight);
760 // Abort if this would be too expensive.
761 if (!(Cost < MaxCost))
765 // Apply the eviction policy for non-urgent evictions.
766 if (!shouldEvict(VirtReg, IsHint, *Intf, BreaksHint))
768 // If !MaxCost.isMax(), then we're just looking for a cheap register.
769 // Evicting another local live range in this case could lead to suboptimal
771 if (!MaxCost.isMax() && IsLocal && LIS->intervalIsInOneMBB(*Intf) &&
772 (!EnableLocalReassign || !canReassign(*Intf, PhysReg))) {
781 /// evictInterference - Evict any interferring registers that prevent VirtReg
782 /// from being assigned to Physreg. This assumes that canEvictInterference
784 void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg,
785 SmallVectorImpl<unsigned> &NewVRegs) {
786 // Make sure that VirtReg has a cascade number, and assign that cascade
787 // number to every evicted register. These live ranges than then only be
788 // evicted by a newer cascade, preventing infinite loops.
789 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
791 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++;
793 DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI)
794 << " interference: Cascade " << Cascade << '\n');
796 // Collect all interfering virtregs first.
797 SmallVector<LiveInterval*, 8> Intfs;
798 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
799 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
800 assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
801 ArrayRef<LiveInterval*> IVR = Q.interferingVRegs();
802 Intfs.append(IVR.begin(), IVR.end());
805 // Evict them second. This will invalidate the queries.
806 for (unsigned i = 0, e = Intfs.size(); i != e; ++i) {
807 LiveInterval *Intf = Intfs[i];
808 // The same VirtReg may be present in multiple RegUnits. Skip duplicates.
809 if (!VRM->hasPhys(Intf->reg))
811 Matrix->unassign(*Intf);
812 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade ||
813 VirtReg.isSpillable() < Intf->isSpillable()) &&
814 "Cannot decrease cascade number, illegal eviction");
815 ExtraRegInfo[Intf->reg].Cascade = Cascade;
817 NewVRegs.push_back(Intf->reg);
821 /// Returns true if the given \p PhysReg is a callee saved register and has not
822 /// been used for allocation yet.
823 bool RAGreedy::isUnusedCalleeSavedReg(unsigned PhysReg) const {
824 unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg);
828 return !Matrix->isPhysRegUsed(PhysReg);
831 /// tryEvict - Try to evict all interferences for a physreg.
832 /// @param VirtReg Currently unassigned virtual register.
833 /// @param Order Physregs to try.
834 /// @return Physreg to assign VirtReg, or 0.
835 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
836 AllocationOrder &Order,
837 SmallVectorImpl<unsigned> &NewVRegs,
838 unsigned CostPerUseLimit) {
839 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
841 // Keep track of the cheapest interference seen so far.
842 EvictionCost BestCost;
844 unsigned BestPhys = 0;
845 unsigned OrderLimit = Order.getOrder().size();
847 // When we are just looking for a reduced cost per use, don't break any
848 // hints, and only evict smaller spill weights.
849 if (CostPerUseLimit < ~0u) {
850 BestCost.BrokenHints = 0;
851 BestCost.MaxWeight = VirtReg.weight;
853 // Check of any registers in RC are below CostPerUseLimit.
854 const TargetRegisterClass *RC = MRI->getRegClass(VirtReg.reg);
855 unsigned MinCost = RegClassInfo.getMinCost(RC);
856 if (MinCost >= CostPerUseLimit) {
857 DEBUG(dbgs() << TRI->getRegClassName(RC) << " minimum cost = " << MinCost
858 << ", no cheaper registers to be found.\n");
862 // It is normal for register classes to have a long tail of registers with
863 // the same cost. We don't need to look at them if they're too expensive.
864 if (TRI->getCostPerUse(Order.getOrder().back()) >= CostPerUseLimit) {
865 OrderLimit = RegClassInfo.getLastCostChange(RC);
866 DEBUG(dbgs() << "Only trying the first " << OrderLimit << " regs.\n");
871 while (unsigned PhysReg = Order.next(OrderLimit)) {
872 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit)
874 // The first use of a callee-saved register in a function has cost 1.
875 // Don't start using a CSR when the CostPerUseLimit is low.
876 if (CostPerUseLimit == 1 && isUnusedCalleeSavedReg(PhysReg)) {
877 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR "
878 << PrintReg(RegClassInfo.getLastCalleeSavedAlias(PhysReg), TRI)
883 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost))
889 // Stop if the hint can be used.
897 evictInterference(VirtReg, BestPhys, NewVRegs);
902 //===----------------------------------------------------------------------===//
904 //===----------------------------------------------------------------------===//
906 /// addSplitConstraints - Fill out the SplitConstraints vector based on the
907 /// interference pattern in Physreg and its aliases. Add the constraints to
908 /// SpillPlacement and return the static cost of this split in Cost, assuming
909 /// that all preferences in SplitConstraints are met.
910 /// Return false if there are no bundles with positive bias.
911 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
912 BlockFrequency &Cost) {
913 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
915 // Reset interference dependent info.
916 SplitConstraints.resize(UseBlocks.size());
917 BlockFrequency StaticCost = 0;
918 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
919 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
920 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
922 BC.Number = BI.MBB->getNumber();
923 Intf.moveToBlock(BC.Number);
924 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
925 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
926 BC.ChangesValue = BI.FirstDef.isValid();
928 if (!Intf.hasInterference())
931 // Number of spill code instructions to insert.
934 // Interference for the live-in value.
936 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number))
937 BC.Entry = SpillPlacement::MustSpill, ++Ins;
938 else if (Intf.first() < BI.FirstInstr)
939 BC.Entry = SpillPlacement::PrefSpill, ++Ins;
940 else if (Intf.first() < BI.LastInstr)
944 // Interference for the live-out value.
946 if (Intf.last() >= SA->getLastSplitPoint(BC.Number))
947 BC.Exit = SpillPlacement::MustSpill, ++Ins;
948 else if (Intf.last() > BI.LastInstr)
949 BC.Exit = SpillPlacement::PrefSpill, ++Ins;
950 else if (Intf.last() > BI.FirstInstr)
954 // Accumulate the total frequency of inserted spill code.
956 StaticCost += SpillPlacer->getBlockFrequency(BC.Number);
960 // Add constraints for use-blocks. Note that these are the only constraints
961 // that may add a positive bias, it is downhill from here.
962 SpillPlacer->addConstraints(SplitConstraints);
963 return SpillPlacer->scanActiveBundles();
967 /// addThroughConstraints - Add constraints and links to SpillPlacer from the
968 /// live-through blocks in Blocks.
969 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
970 ArrayRef<unsigned> Blocks) {
971 const unsigned GroupSize = 8;
972 SpillPlacement::BlockConstraint BCS[GroupSize];
973 unsigned TBS[GroupSize];
974 unsigned B = 0, T = 0;
976 for (unsigned i = 0; i != Blocks.size(); ++i) {
977 unsigned Number = Blocks[i];
978 Intf.moveToBlock(Number);
980 if (!Intf.hasInterference()) {
981 assert(T < GroupSize && "Array overflow");
983 if (++T == GroupSize) {
984 SpillPlacer->addLinks(makeArrayRef(TBS, T));
990 assert(B < GroupSize && "Array overflow");
991 BCS[B].Number = Number;
993 // Interference for the live-in value.
994 if (Intf.first() <= Indexes->getMBBStartIdx(Number))
995 BCS[B].Entry = SpillPlacement::MustSpill;
997 BCS[B].Entry = SpillPlacement::PrefSpill;
999 // Interference for the live-out value.
1000 if (Intf.last() >= SA->getLastSplitPoint(Number))
1001 BCS[B].Exit = SpillPlacement::MustSpill;
1003 BCS[B].Exit = SpillPlacement::PrefSpill;
1005 if (++B == GroupSize) {
1006 SpillPlacer->addConstraints(makeArrayRef(BCS, B));
1011 SpillPlacer->addConstraints(makeArrayRef(BCS, B));
1012 SpillPlacer->addLinks(makeArrayRef(TBS, T));
1015 void RAGreedy::growRegion(GlobalSplitCandidate &Cand) {
1016 // Keep track of through blocks that have not been added to SpillPlacer.
1017 BitVector Todo = SA->getThroughBlocks();
1018 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
1019 unsigned AddedTo = 0;
1021 unsigned Visited = 0;
1025 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
1026 // Find new through blocks in the periphery of PrefRegBundles.
1027 for (int i = 0, e = NewBundles.size(); i != e; ++i) {
1028 unsigned Bundle = NewBundles[i];
1029 // Look at all blocks connected to Bundle in the full graph.
1030 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
1031 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
1033 unsigned Block = *I;
1034 if (!Todo.test(Block))
1037 // This is a new through block. Add it to SpillPlacer later.
1038 ActiveBlocks.push_back(Block);
1044 // Any new blocks to add?
1045 if (ActiveBlocks.size() == AddedTo)
1048 // Compute through constraints from the interference, or assume that all
1049 // through blocks prefer spilling when forming compact regions.
1050 auto NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo);
1052 addThroughConstraints(Cand.Intf, NewBlocks);
1054 // Provide a strong negative bias on through blocks to prevent unwanted
1055 // liveness on loop backedges.
1056 SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true);
1057 AddedTo = ActiveBlocks.size();
1059 // Perhaps iterating can enable more bundles?
1060 SpillPlacer->iterate();
1062 DEBUG(dbgs() << ", v=" << Visited);
1065 /// calcCompactRegion - Compute the set of edge bundles that should be live
1066 /// when splitting the current live range into compact regions. Compact
1067 /// regions can be computed without looking at interference. They are the
1068 /// regions formed by removing all the live-through blocks from the live range.
1070 /// Returns false if the current live range is already compact, or if the
1071 /// compact regions would form single block regions anyway.
1072 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) {
1073 // Without any through blocks, the live range is already compact.
1074 if (!SA->getNumThroughBlocks())
1077 // Compact regions don't correspond to any physreg.
1078 Cand.reset(IntfCache, 0);
1080 DEBUG(dbgs() << "Compact region bundles");
1082 // Use the spill placer to determine the live bundles. GrowRegion pretends
1083 // that all the through blocks have interference when PhysReg is unset.
1084 SpillPlacer->prepare(Cand.LiveBundles);
1086 // The static split cost will be zero since Cand.Intf reports no interference.
1087 BlockFrequency Cost;
1088 if (!addSplitConstraints(Cand.Intf, Cost)) {
1089 DEBUG(dbgs() << ", none.\n");
1094 SpillPlacer->finish();
1096 if (!Cand.LiveBundles.any()) {
1097 DEBUG(dbgs() << ", none.\n");
1102 for (int i = Cand.LiveBundles.find_first(); i>=0;
1103 i = Cand.LiveBundles.find_next(i))
1104 dbgs() << " EB#" << i;
1110 /// calcSpillCost - Compute how expensive it would be to split the live range in
1111 /// SA around all use blocks instead of forming bundle regions.
1112 BlockFrequency RAGreedy::calcSpillCost() {
1113 BlockFrequency Cost = 0;
1114 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1115 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
1116 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
1117 unsigned Number = BI.MBB->getNumber();
1118 // We normally only need one spill instruction - a load or a store.
1119 Cost += SpillPlacer->getBlockFrequency(Number);
1121 // Unless the value is redefined in the block.
1122 if (BI.LiveIn && BI.LiveOut && BI.FirstDef)
1123 Cost += SpillPlacer->getBlockFrequency(Number);
1128 /// calcGlobalSplitCost - Return the global split cost of following the split
1129 /// pattern in LiveBundles. This cost should be added to the local cost of the
1130 /// interference pattern in SplitConstraints.
1132 BlockFrequency RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) {
1133 BlockFrequency GlobalCost = 0;
1134 const BitVector &LiveBundles = Cand.LiveBundles;
1135 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1136 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
1137 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
1138 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
1139 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)];
1140 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)];
1144 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
1146 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
1148 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number);
1151 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
1152 unsigned Number = Cand.ActiveBlocks[i];
1153 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
1154 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
1155 if (!RegIn && !RegOut)
1157 if (RegIn && RegOut) {
1158 // We need double spill code if this block has interference.
1159 Cand.Intf.moveToBlock(Number);
1160 if (Cand.Intf.hasInterference()) {
1161 GlobalCost += SpillPlacer->getBlockFrequency(Number);
1162 GlobalCost += SpillPlacer->getBlockFrequency(Number);
1166 // live-in / stack-out or stack-in live-out.
1167 GlobalCost += SpillPlacer->getBlockFrequency(Number);
1172 /// splitAroundRegion - Split the current live range around the regions
1173 /// determined by BundleCand and GlobalCand.
1175 /// Before calling this function, GlobalCand and BundleCand must be initialized
1176 /// so each bundle is assigned to a valid candidate, or NoCand for the
1177 /// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor
1178 /// objects must be initialized for the current live range, and intervals
1179 /// created for the used candidates.
1181 /// @param LREdit The LiveRangeEdit object handling the current split.
1182 /// @param UsedCands List of used GlobalCand entries. Every BundleCand value
1183 /// must appear in this list.
1184 void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit,
1185 ArrayRef<unsigned> UsedCands) {
1186 // These are the intervals created for new global ranges. We may create more
1187 // intervals for local ranges.
1188 const unsigned NumGlobalIntvs = LREdit.size();
1189 DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs << " globals.\n");
1190 assert(NumGlobalIntvs && "No global intervals configured");
1192 // Isolate even single instructions when dealing with a proper sub-class.
1193 // That guarantees register class inflation for the stack interval because it
1195 unsigned Reg = SA->getParent().reg;
1196 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
1198 // First handle all the blocks with uses.
1199 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1200 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
1201 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
1202 unsigned Number = BI.MBB->getNumber();
1203 unsigned IntvIn = 0, IntvOut = 0;
1204 SlotIndex IntfIn, IntfOut;
1206 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)];
1207 if (CandIn != NoCand) {
1208 GlobalSplitCandidate &Cand = GlobalCand[CandIn];
1209 IntvIn = Cand.IntvIdx;
1210 Cand.Intf.moveToBlock(Number);
1211 IntfIn = Cand.Intf.first();
1215 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)];
1216 if (CandOut != NoCand) {
1217 GlobalSplitCandidate &Cand = GlobalCand[CandOut];
1218 IntvOut = Cand.IntvIdx;
1219 Cand.Intf.moveToBlock(Number);
1220 IntfOut = Cand.Intf.last();
1224 // Create separate intervals for isolated blocks with multiple uses.
1225 if (!IntvIn && !IntvOut) {
1226 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n");
1227 if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
1228 SE->splitSingleBlock(BI);
1232 if (IntvIn && IntvOut)
1233 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
1235 SE->splitRegInBlock(BI, IntvIn, IntfIn);
1237 SE->splitRegOutBlock(BI, IntvOut, IntfOut);
1240 // Handle live-through blocks. The relevant live-through blocks are stored in
1241 // the ActiveBlocks list with each candidate. We need to filter out
1243 BitVector Todo = SA->getThroughBlocks();
1244 for (unsigned c = 0; c != UsedCands.size(); ++c) {
1245 ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks;
1246 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
1247 unsigned Number = Blocks[i];
1248 if (!Todo.test(Number))
1252 unsigned IntvIn = 0, IntvOut = 0;
1253 SlotIndex IntfIn, IntfOut;
1255 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)];
1256 if (CandIn != NoCand) {
1257 GlobalSplitCandidate &Cand = GlobalCand[CandIn];
1258 IntvIn = Cand.IntvIdx;
1259 Cand.Intf.moveToBlock(Number);
1260 IntfIn = Cand.Intf.first();
1263 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)];
1264 if (CandOut != NoCand) {
1265 GlobalSplitCandidate &Cand = GlobalCand[CandOut];
1266 IntvOut = Cand.IntvIdx;
1267 Cand.Intf.moveToBlock(Number);
1268 IntfOut = Cand.Intf.last();
1270 if (!IntvIn && !IntvOut)
1272 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
1278 SmallVector<unsigned, 8> IntvMap;
1279 SE->finish(&IntvMap);
1280 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS);
1282 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1283 unsigned OrigBlocks = SA->getNumLiveBlocks();
1285 // Sort out the new intervals created by splitting. We get four kinds:
1286 // - Remainder intervals should not be split again.
1287 // - Candidate intervals can be assigned to Cand.PhysReg.
1288 // - Block-local splits are candidates for local splitting.
1289 // - DCE leftovers should go back on the queue.
1290 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
1291 LiveInterval &Reg = LIS->getInterval(LREdit.get(i));
1293 // Ignore old intervals from DCE.
1294 if (getStage(Reg) != RS_New)
1297 // Remainder interval. Don't try splitting again, spill if it doesn't
1299 if (IntvMap[i] == 0) {
1300 setStage(Reg, RS_Spill);
1304 // Global intervals. Allow repeated splitting as long as the number of live
1305 // blocks is strictly decreasing.
1306 if (IntvMap[i] < NumGlobalIntvs) {
1307 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) {
1308 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
1309 << " blocks as original.\n");
1310 // Don't allow repeated splitting as a safe guard against looping.
1311 setStage(Reg, RS_Split2);
1316 // Other intervals are treated as new. This includes local intervals created
1317 // for blocks with multiple uses, and anything created by DCE.
1321 MF->verify(this, "After splitting live range around region");
1324 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1325 SmallVectorImpl<unsigned> &NewVRegs) {
1326 unsigned NumCands = 0;
1327 BlockFrequency BestCost;
1329 // Check if we can split this live range around a compact region.
1330 bool HasCompact = calcCompactRegion(GlobalCand.front());
1332 // Yes, keep GlobalCand[0] as the compact region candidate.
1334 BestCost = BlockFrequency::getMaxFrequency();
1336 // No benefit from the compact region, our fallback will be per-block
1337 // splitting. Make sure we find a solution that is cheaper than spilling.
1338 BestCost = calcSpillCost();
1339 DEBUG(dbgs() << "Cost of isolating all blocks = ";
1340 MBFI->printBlockFreq(dbgs(), BestCost) << '\n');
1344 calculateRegionSplitCost(VirtReg, Order, BestCost, NumCands,
1345 false/*IgnoreCSR*/);
1347 // No solutions found, fall back to single block splitting.
1348 if (!HasCompact && BestCand == NoCand)
1351 return doRegionSplit(VirtReg, BestCand, HasCompact, NewVRegs);
1354 unsigned RAGreedy::calculateRegionSplitCost(LiveInterval &VirtReg,
1355 AllocationOrder &Order,
1356 BlockFrequency &BestCost,
1359 unsigned BestCand = NoCand;
1361 while (unsigned PhysReg = Order.next()) {
1362 if (IgnoreCSR && isUnusedCalleeSavedReg(PhysReg))
1365 // Discard bad candidates before we run out of interference cache cursors.
1366 // This will only affect register classes with a lot of registers (>32).
1367 if (NumCands == IntfCache.getMaxCursors()) {
1368 unsigned WorstCount = ~0u;
1370 for (unsigned i = 0; i != NumCands; ++i) {
1371 if (i == BestCand || !GlobalCand[i].PhysReg)
1373 unsigned Count = GlobalCand[i].LiveBundles.count();
1374 if (Count < WorstCount)
1375 Worst = i, WorstCount = Count;
1378 GlobalCand[Worst] = GlobalCand[NumCands];
1379 if (BestCand == NumCands)
1383 if (GlobalCand.size() <= NumCands)
1384 GlobalCand.resize(NumCands+1);
1385 GlobalSplitCandidate &Cand = GlobalCand[NumCands];
1386 Cand.reset(IntfCache, PhysReg);
1388 SpillPlacer->prepare(Cand.LiveBundles);
1389 BlockFrequency Cost;
1390 if (!addSplitConstraints(Cand.Intf, Cost)) {
1391 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n");
1394 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = ";
1395 MBFI->printBlockFreq(dbgs(), Cost));
1396 if (Cost >= BestCost) {
1398 if (BestCand == NoCand)
1399 dbgs() << " worse than no bundles\n";
1401 dbgs() << " worse than "
1402 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n';
1408 SpillPlacer->finish();
1410 // No live bundles, defer to splitSingleBlocks().
1411 if (!Cand.LiveBundles.any()) {
1412 DEBUG(dbgs() << " no bundles.\n");
1416 Cost += calcGlobalSplitCost(Cand);
1418 dbgs() << ", total = "; MBFI->printBlockFreq(dbgs(), Cost)
1420 for (int i = Cand.LiveBundles.find_first(); i>=0;
1421 i = Cand.LiveBundles.find_next(i))
1422 dbgs() << " EB#" << i;
1425 if (Cost < BestCost) {
1426 BestCand = NumCands;
1434 unsigned RAGreedy::doRegionSplit(LiveInterval &VirtReg, unsigned BestCand,
1436 SmallVectorImpl<unsigned> &NewVRegs) {
1437 SmallVector<unsigned, 8> UsedCands;
1438 // Prepare split editor.
1439 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1440 SE->reset(LREdit, SplitSpillMode);
1442 // Assign all edge bundles to the preferred candidate, or NoCand.
1443 BundleCand.assign(Bundles->getNumBundles(), NoCand);
1445 // Assign bundles for the best candidate region.
1446 if (BestCand != NoCand) {
1447 GlobalSplitCandidate &Cand = GlobalCand[BestCand];
1448 if (unsigned B = Cand.getBundles(BundleCand, BestCand)) {
1449 UsedCands.push_back(BestCand);
1450 Cand.IntvIdx = SE->openIntv();
1451 DEBUG(dbgs() << "Split for " << PrintReg(Cand.PhysReg, TRI) << " in "
1452 << B << " bundles, intv " << Cand.IntvIdx << ".\n");
1457 // Assign bundles for the compact region.
1459 GlobalSplitCandidate &Cand = GlobalCand.front();
1460 assert(!Cand.PhysReg && "Compact region has no physreg");
1461 if (unsigned B = Cand.getBundles(BundleCand, 0)) {
1462 UsedCands.push_back(0);
1463 Cand.IntvIdx = SE->openIntv();
1464 DEBUG(dbgs() << "Split for compact region in " << B << " bundles, intv "
1465 << Cand.IntvIdx << ".\n");
1470 splitAroundRegion(LREdit, UsedCands);
1475 //===----------------------------------------------------------------------===//
1476 // Per-Block Splitting
1477 //===----------------------------------------------------------------------===//
1479 /// tryBlockSplit - Split a global live range around every block with uses. This
1480 /// creates a lot of local live ranges, that will be split by tryLocalSplit if
1481 /// they don't allocate.
1482 unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1483 SmallVectorImpl<unsigned> &NewVRegs) {
1484 assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed");
1485 unsigned Reg = VirtReg.reg;
1486 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
1487 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1488 SE->reset(LREdit, SplitSpillMode);
1489 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1490 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
1491 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
1492 if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
1493 SE->splitSingleBlock(BI);
1495 // No blocks were split.
1499 // We did split for some blocks.
1500 SmallVector<unsigned, 8> IntvMap;
1501 SE->finish(&IntvMap);
1503 // Tell LiveDebugVariables about the new ranges.
1504 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS);
1506 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1508 // Sort out the new intervals created by splitting. The remainder interval
1509 // goes straight to spilling, the new local ranges get to stay RS_New.
1510 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
1511 LiveInterval &LI = LIS->getInterval(LREdit.get(i));
1512 if (getStage(LI) == RS_New && IntvMap[i] == 0)
1513 setStage(LI, RS_Spill);
1517 MF->verify(this, "After splitting live range around basic blocks");
1522 //===----------------------------------------------------------------------===//
1523 // Per-Instruction Splitting
1524 //===----------------------------------------------------------------------===//
1526 /// Get the number of allocatable registers that match the constraints of \p Reg
1527 /// on \p MI and that are also in \p SuperRC.
1528 static unsigned getNumAllocatableRegsForConstraints(
1529 const MachineInstr *MI, unsigned Reg, const TargetRegisterClass *SuperRC,
1530 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
1531 const RegisterClassInfo &RCI) {
1532 assert(SuperRC && "Invalid register class");
1534 const TargetRegisterClass *ConstrainedRC =
1535 MI->getRegClassConstraintEffectForVReg(Reg, SuperRC, TII, TRI,
1536 /* ExploreBundle */ true);
1539 return RCI.getNumAllocatableRegs(ConstrainedRC);
1542 /// tryInstructionSplit - Split a live range around individual instructions.
1543 /// This is normally not worthwhile since the spiller is doing essentially the
1544 /// same thing. However, when the live range is in a constrained register
1545 /// class, it may help to insert copies such that parts of the live range can
1546 /// be moved to a larger register class.
1548 /// This is similar to spilling to a larger register class.
1550 RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1551 SmallVectorImpl<unsigned> &NewVRegs) {
1552 const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg);
1553 // There is no point to this if there are no larger sub-classes.
1554 if (!RegClassInfo.isProperSubClass(CurRC))
1557 // Always enable split spill mode, since we're effectively spilling to a
1559 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1560 SE->reset(LREdit, SplitEditor::SM_Size);
1562 ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1563 if (Uses.size() <= 1)
1566 DEBUG(dbgs() << "Split around " << Uses.size() << " individual instrs.\n");
1568 const TargetRegisterClass *SuperRC =
1569 TRI->getLargestLegalSuperClass(CurRC, *MF);
1570 unsigned SuperRCNumAllocatableRegs = RCI.getNumAllocatableRegs(SuperRC);
1571 // Split around every non-copy instruction if this split will relax
1572 // the constraints on the virtual register.
1573 // Otherwise, splitting just inserts uncoalescable copies that do not help
1575 for (unsigned i = 0; i != Uses.size(); ++i) {
1576 if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]))
1577 if (MI->isFullCopy() ||
1578 SuperRCNumAllocatableRegs ==
1579 getNumAllocatableRegsForConstraints(MI, VirtReg.reg, SuperRC, TII,
1581 DEBUG(dbgs() << " skip:\t" << Uses[i] << '\t' << *MI);
1585 SlotIndex SegStart = SE->enterIntvBefore(Uses[i]);
1586 SlotIndex SegStop = SE->leaveIntvAfter(Uses[i]);
1587 SE->useIntv(SegStart, SegStop);
1590 if (LREdit.empty()) {
1591 DEBUG(dbgs() << "All uses were copies.\n");
1595 SmallVector<unsigned, 8> IntvMap;
1596 SE->finish(&IntvMap);
1597 DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS);
1598 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1600 // Assign all new registers to RS_Spill. This was the last chance.
1601 setStage(LREdit.begin(), LREdit.end(), RS_Spill);
1606 //===----------------------------------------------------------------------===//
1608 //===----------------------------------------------------------------------===//
1611 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted
1612 /// in order to use PhysReg between two entries in SA->UseSlots.
1614 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
1616 void RAGreedy::calcGapWeights(unsigned PhysReg,
1617 SmallVectorImpl<float> &GapWeight) {
1618 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1619 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1620 ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1621 const unsigned NumGaps = Uses.size()-1;
1623 // Start and end points for the interference check.
1624 SlotIndex StartIdx =
1625 BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr;
1627 BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr;
1629 GapWeight.assign(NumGaps, 0.0f);
1631 // Add interference from each overlapping register.
1632 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
1633 if (!Matrix->query(const_cast<LiveInterval&>(SA->getParent()), *Units)
1634 .checkInterference())
1637 // We know that VirtReg is a continuous interval from FirstInstr to
1638 // LastInstr, so we don't need InterferenceQuery.
1640 // Interference that overlaps an instruction is counted in both gaps
1641 // surrounding the instruction. The exception is interference before
1642 // StartIdx and after StopIdx.
1644 LiveIntervalUnion::SegmentIter IntI =
1645 Matrix->getLiveUnions()[*Units] .find(StartIdx);
1646 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
1647 // Skip the gaps before IntI.
1648 while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
1649 if (++Gap == NumGaps)
1654 // Update the gaps covered by IntI.
1655 const float weight = IntI.value()->weight;
1656 for (; Gap != NumGaps; ++Gap) {
1657 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
1658 if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
1666 // Add fixed interference.
1667 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
1668 const LiveRange &LR = LIS->getRegUnit(*Units);
1669 LiveRange::const_iterator I = LR.find(StartIdx);
1670 LiveRange::const_iterator E = LR.end();
1672 // Same loop as above. Mark any overlapped gaps as HUGE_VALF.
1673 for (unsigned Gap = 0; I != E && I->start < StopIdx; ++I) {
1674 while (Uses[Gap+1].getBoundaryIndex() < I->start)
1675 if (++Gap == NumGaps)
1680 for (; Gap != NumGaps; ++Gap) {
1681 GapWeight[Gap] = llvm::huge_valf;
1682 if (Uses[Gap+1].getBaseIndex() >= I->end)
1691 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
1694 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1695 SmallVectorImpl<unsigned> &NewVRegs) {
1696 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1697 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1699 // Note that it is possible to have an interval that is live-in or live-out
1700 // while only covering a single block - A phi-def can use undef values from
1701 // predecessors, and the block could be a single-block loop.
1702 // We don't bother doing anything clever about such a case, we simply assume
1703 // that the interval is continuous from FirstInstr to LastInstr. We should
1704 // make sure that we don't do anything illegal to such an interval, though.
1706 ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1707 if (Uses.size() <= 2)
1709 const unsigned NumGaps = Uses.size()-1;
1712 dbgs() << "tryLocalSplit: ";
1713 for (unsigned i = 0, e = Uses.size(); i != e; ++i)
1714 dbgs() << ' ' << Uses[i];
1718 // If VirtReg is live across any register mask operands, compute a list of
1719 // gaps with register masks.
1720 SmallVector<unsigned, 8> RegMaskGaps;
1721 if (Matrix->checkRegMaskInterference(VirtReg)) {
1722 // Get regmask slots for the whole block.
1723 ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber());
1724 DEBUG(dbgs() << RMS.size() << " regmasks in block:");
1725 // Constrain to VirtReg's live range.
1726 unsigned ri = std::lower_bound(RMS.begin(), RMS.end(),
1727 Uses.front().getRegSlot()) - RMS.begin();
1728 unsigned re = RMS.size();
1729 for (unsigned i = 0; i != NumGaps && ri != re; ++i) {
1730 // Look for Uses[i] <= RMS <= Uses[i+1].
1731 assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i]));
1732 if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri]))
1734 // Skip a regmask on the same instruction as the last use. It doesn't
1735 // overlap the live range.
1736 if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps)
1738 DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-' << Uses[i+1]);
1739 RegMaskGaps.push_back(i);
1740 // Advance ri to the next gap. A regmask on one of the uses counts in
1742 while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1]))
1745 DEBUG(dbgs() << '\n');
1748 // Since we allow local split results to be split again, there is a risk of
1749 // creating infinite loops. It is tempting to require that the new live
1750 // ranges have less instructions than the original. That would guarantee
1751 // convergence, but it is too strict. A live range with 3 instructions can be
1752 // split 2+3 (including the COPY), and we want to allow that.
1754 // Instead we use these rules:
1756 // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the
1757 // noop split, of course).
1758 // 2. Require progress be made for ranges with getStage() == RS_Split2. All
1759 // the new ranges must have fewer instructions than before the split.
1760 // 3. New ranges with the same number of instructions are marked RS_Split2,
1761 // smaller ranges are marked RS_New.
1763 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent
1764 // excessive splitting and infinite loops.
1766 bool ProgressRequired = getStage(VirtReg) >= RS_Split2;
1768 // Best split candidate.
1769 unsigned BestBefore = NumGaps;
1770 unsigned BestAfter = 0;
1773 const float blockFreq =
1774 SpillPlacer->getBlockFrequency(BI.MBB->getNumber()).getFrequency() *
1775 (1.0f / MBFI->getEntryFreq());
1776 SmallVector<float, 8> GapWeight;
1779 while (unsigned PhysReg = Order.next()) {
1780 // Keep track of the largest spill weight that would need to be evicted in
1781 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
1782 calcGapWeights(PhysReg, GapWeight);
1784 // Remove any gaps with regmask clobbers.
1785 if (Matrix->checkRegMaskInterference(VirtReg, PhysReg))
1786 for (unsigned i = 0, e = RegMaskGaps.size(); i != e; ++i)
1787 GapWeight[RegMaskGaps[i]] = llvm::huge_valf;
1789 // Try to find the best sequence of gaps to close.
1790 // The new spill weight must be larger than any gap interference.
1792 // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
1793 unsigned SplitBefore = 0, SplitAfter = 1;
1795 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
1796 // It is the spill weight that needs to be evicted.
1797 float MaxGap = GapWeight[0];
1800 // Live before/after split?
1801 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1802 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1804 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
1805 << Uses[SplitBefore] << '-' << Uses[SplitAfter]
1806 << " i=" << MaxGap);
1808 // Stop before the interval gets so big we wouldn't be making progress.
1809 if (!LiveBefore && !LiveAfter) {
1810 DEBUG(dbgs() << " all\n");
1813 // Should the interval be extended or shrunk?
1816 // How many gaps would the new range have?
1817 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter;
1819 // Legally, without causing looping?
1820 bool Legal = !ProgressRequired || NewGaps < NumGaps;
1822 if (Legal && MaxGap < llvm::huge_valf) {
1823 // Estimate the new spill weight. Each instruction reads or writes the
1824 // register. Conservatively assume there are no read-modify-write
1827 // Try to guess the size of the new interval.
1828 const float EstWeight = normalizeSpillWeight(
1829 blockFreq * (NewGaps + 1),
1830 Uses[SplitBefore].distance(Uses[SplitAfter]) +
1831 (LiveBefore + LiveAfter) * SlotIndex::InstrDist,
1833 // Would this split be possible to allocate?
1834 // Never allocate all gaps, we wouldn't be making progress.
1835 DEBUG(dbgs() << " w=" << EstWeight);
1836 if (EstWeight * Hysteresis >= MaxGap) {
1838 float Diff = EstWeight - MaxGap;
1839 if (Diff > BestDiff) {
1840 DEBUG(dbgs() << " (best)");
1841 BestDiff = Hysteresis * Diff;
1842 BestBefore = SplitBefore;
1843 BestAfter = SplitAfter;
1850 if (++SplitBefore < SplitAfter) {
1851 DEBUG(dbgs() << " shrink\n");
1852 // Recompute the max when necessary.
1853 if (GapWeight[SplitBefore - 1] >= MaxGap) {
1854 MaxGap = GapWeight[SplitBefore];
1855 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
1856 MaxGap = std::max(MaxGap, GapWeight[i]);
1863 // Try to extend the interval.
1864 if (SplitAfter >= NumGaps) {
1865 DEBUG(dbgs() << " end\n");
1869 DEBUG(dbgs() << " extend\n");
1870 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]);
1874 // Didn't find any candidates?
1875 if (BestBefore == NumGaps)
1878 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
1879 << '-' << Uses[BestAfter] << ", " << BestDiff
1880 << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
1882 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1886 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
1887 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]);
1888 SE->useIntv(SegStart, SegStop);
1889 SmallVector<unsigned, 8> IntvMap;
1890 SE->finish(&IntvMap);
1891 DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS);
1893 // If the new range has the same number of instructions as before, mark it as
1894 // RS_Split2 so the next split will be forced to make progress. Otherwise,
1895 // leave the new intervals as RS_New so they can compete.
1896 bool LiveBefore = BestBefore != 0 || BI.LiveIn;
1897 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut;
1898 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter;
1899 if (NewGaps >= NumGaps) {
1900 DEBUG(dbgs() << "Tagging non-progress ranges: ");
1901 assert(!ProgressRequired && "Didn't make progress when it was required.");
1902 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i)
1903 if (IntvMap[i] == 1) {
1904 setStage(LIS->getInterval(LREdit.get(i)), RS_Split2);
1905 DEBUG(dbgs() << PrintReg(LREdit.get(i)));
1907 DEBUG(dbgs() << '\n');
1914 //===----------------------------------------------------------------------===//
1915 // Live Range Splitting
1916 //===----------------------------------------------------------------------===//
1918 /// trySplit - Try to split VirtReg or one of its interferences, making it
1920 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
1921 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
1922 SmallVectorImpl<unsigned>&NewVRegs) {
1923 // Ranges must be Split2 or less.
1924 if (getStage(VirtReg) >= RS_Spill)
1927 // Local intervals are handled separately.
1928 if (LIS->intervalIsInOneMBB(VirtReg)) {
1929 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
1930 SA->analyze(&VirtReg);
1931 unsigned PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs);
1932 if (PhysReg || !NewVRegs.empty())
1934 return tryInstructionSplit(VirtReg, Order, NewVRegs);
1937 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
1939 SA->analyze(&VirtReg);
1941 // FIXME: SplitAnalysis may repair broken live ranges coming from the
1942 // coalescer. That may cause the range to become allocatable which means that
1943 // tryRegionSplit won't be making progress. This check should be replaced with
1944 // an assertion when the coalescer is fixed.
1945 if (SA->didRepairRange()) {
1946 // VirtReg has changed, so all cached queries are invalid.
1947 Matrix->invalidateVirtRegs();
1948 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1952 // First try to split around a region spanning multiple blocks. RS_Split2
1953 // ranges already made dubious progress with region splitting, so they go
1954 // straight to single block splitting.
1955 if (getStage(VirtReg) < RS_Split2) {
1956 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1957 if (PhysReg || !NewVRegs.empty())
1961 // Then isolate blocks.
1962 return tryBlockSplit(VirtReg, Order, NewVRegs);
1965 //===----------------------------------------------------------------------===//
1966 // Last Chance Recoloring
1967 //===----------------------------------------------------------------------===//
1969 /// mayRecolorAllInterferences - Check if the virtual registers that
1970 /// interfere with \p VirtReg on \p PhysReg (or one of its aliases) may be
1971 /// recolored to free \p PhysReg.
1972 /// When true is returned, \p RecoloringCandidates has been augmented with all
1973 /// the live intervals that need to be recolored in order to free \p PhysReg
1975 /// \p FixedRegisters contains all the virtual registers that cannot be
1978 RAGreedy::mayRecolorAllInterferences(unsigned PhysReg, LiveInterval &VirtReg,
1979 SmallLISet &RecoloringCandidates,
1980 const SmallVirtRegSet &FixedRegisters) {
1981 const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg);
1983 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
1984 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
1985 // If there is LastChanceRecoloringMaxInterference or more interferences,
1986 // chances are one would not be recolorable.
1987 if (Q.collectInterferingVRegs(LastChanceRecoloringMaxInterference) >=
1988 LastChanceRecoloringMaxInterference && !ExhaustiveSearch) {
1989 DEBUG(dbgs() << "Early abort: too many interferences.\n");
1990 CutOffInfo |= CO_Interf;
1993 for (unsigned i = Q.interferingVRegs().size(); i; --i) {
1994 LiveInterval *Intf = Q.interferingVRegs()[i - 1];
1995 // If Intf is done and sit on the same register class as VirtReg,
1996 // it would not be recolorable as it is in the same state as VirtReg.
1997 if ((getStage(*Intf) == RS_Done &&
1998 MRI->getRegClass(Intf->reg) == CurRC) ||
1999 FixedRegisters.count(Intf->reg)) {
2000 DEBUG(dbgs() << "Early abort: the inteference is not recolorable.\n");
2003 RecoloringCandidates.insert(Intf);
2009 /// tryLastChanceRecoloring - Try to assign a color to \p VirtReg by recoloring
2010 /// its interferences.
2011 /// Last chance recoloring chooses a color for \p VirtReg and recolors every
2012 /// virtual register that was using it. The recoloring process may recursively
2013 /// use the last chance recoloring. Therefore, when a virtual register has been
2014 /// assigned a color by this mechanism, it is marked as Fixed, i.e., it cannot
2015 /// be last-chance-recolored again during this recoloring "session".
2018 /// vA can use {R1, R2 }
2019 /// vB can use { R2, R3}
2020 /// vC can use {R1 }
2021 /// Where vA, vB, and vC cannot be split anymore (they are reloads for
2022 /// instance) and they all interfere.
2024 /// vA is assigned R1
2025 /// vB is assigned R2
2026 /// vC tries to evict vA but vA is already done.
2027 /// Regular register allocation fails.
2029 /// Last chance recoloring kicks in:
2030 /// vC does as if vA was evicted => vC uses R1.
2031 /// vC is marked as fixed.
2032 /// vA needs to find a color.
2033 /// None are available.
2034 /// vA cannot evict vC: vC is a fixed virtual register now.
2035 /// vA does as if vB was evicted => vA uses R2.
2036 /// vB needs to find a color.
2037 /// R3 is available.
2038 /// Recoloring => vC = R1, vA = R2, vB = R3
2040 /// \p Order defines the preferred allocation order for \p VirtReg.
2041 /// \p NewRegs will contain any new virtual register that have been created
2042 /// (split, spill) during the process and that must be assigned.
2043 /// \p FixedRegisters contains all the virtual registers that cannot be
2045 /// \p Depth gives the current depth of the last chance recoloring.
2046 /// \return a physical register that can be used for VirtReg or ~0u if none
2048 unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg,
2049 AllocationOrder &Order,
2050 SmallVectorImpl<unsigned> &NewVRegs,
2051 SmallVirtRegSet &FixedRegisters,
2053 DEBUG(dbgs() << "Try last chance recoloring for " << VirtReg << '\n');
2054 // Ranges must be Done.
2055 assert((getStage(VirtReg) >= RS_Done || !VirtReg.isSpillable()) &&
2056 "Last chance recoloring should really be last chance");
2057 // Set the max depth to LastChanceRecoloringMaxDepth.
2058 // We may want to reconsider that if we end up with a too large search space
2059 // for target with hundreds of registers.
2060 // Indeed, in that case we may want to cut the search space earlier.
2061 if (Depth >= LastChanceRecoloringMaxDepth && !ExhaustiveSearch) {
2062 DEBUG(dbgs() << "Abort because max depth has been reached.\n");
2063 CutOffInfo |= CO_Depth;
2067 // Set of Live intervals that will need to be recolored.
2068 SmallLISet RecoloringCandidates;
2069 // Record the original mapping virtual register to physical register in case
2070 // the recoloring fails.
2071 DenseMap<unsigned, unsigned> VirtRegToPhysReg;
2072 // Mark VirtReg as fixed, i.e., it will not be recolored pass this point in
2073 // this recoloring "session".
2074 FixedRegisters.insert(VirtReg.reg);
2077 while (unsigned PhysReg = Order.next()) {
2078 DEBUG(dbgs() << "Try to assign: " << VirtReg << " to "
2079 << PrintReg(PhysReg, TRI) << '\n');
2080 RecoloringCandidates.clear();
2081 VirtRegToPhysReg.clear();
2083 // It is only possible to recolor virtual register interference.
2084 if (Matrix->checkInterference(VirtReg, PhysReg) >
2085 LiveRegMatrix::IK_VirtReg) {
2086 DEBUG(dbgs() << "Some inteferences are not with virtual registers.\n");
2091 // Early give up on this PhysReg if it is obvious we cannot recolor all
2092 // the interferences.
2093 if (!mayRecolorAllInterferences(PhysReg, VirtReg, RecoloringCandidates,
2095 DEBUG(dbgs() << "Some inteferences cannot be recolored.\n");
2099 // RecoloringCandidates contains all the virtual registers that interfer
2100 // with VirtReg on PhysReg (or one of its aliases).
2101 // Enqueue them for recoloring and perform the actual recoloring.
2102 PQueue RecoloringQueue;
2103 for (SmallLISet::iterator It = RecoloringCandidates.begin(),
2104 EndIt = RecoloringCandidates.end();
2105 It != EndIt; ++It) {
2106 unsigned ItVirtReg = (*It)->reg;
2107 enqueue(RecoloringQueue, *It);
2108 assert(VRM->hasPhys(ItVirtReg) &&
2109 "Interferences are supposed to be with allocated vairables");
2111 // Record the current allocation.
2112 VirtRegToPhysReg[ItVirtReg] = VRM->getPhys(ItVirtReg);
2113 // unset the related struct.
2114 Matrix->unassign(**It);
2117 // Do as if VirtReg was assigned to PhysReg so that the underlying
2118 // recoloring has the right information about the interferes and
2119 // available colors.
2120 Matrix->assign(VirtReg, PhysReg);
2122 // Save the current recoloring state.
2123 // If we cannot recolor all the interferences, we will have to start again
2124 // at this point for the next physical register.
2125 SmallVirtRegSet SaveFixedRegisters(FixedRegisters);
2126 if (tryRecoloringCandidates(RecoloringQueue, NewVRegs, FixedRegisters,
2128 // Do not mess up with the global assignment process.
2129 // I.e., VirtReg must be unassigned.
2130 Matrix->unassign(VirtReg);
2134 DEBUG(dbgs() << "Fail to assign: " << VirtReg << " to "
2135 << PrintReg(PhysReg, TRI) << '\n');
2137 // The recoloring attempt failed, undo the changes.
2138 FixedRegisters = SaveFixedRegisters;
2139 Matrix->unassign(VirtReg);
2141 for (SmallLISet::iterator It = RecoloringCandidates.begin(),
2142 EndIt = RecoloringCandidates.end();
2143 It != EndIt; ++It) {
2144 unsigned ItVirtReg = (*It)->reg;
2145 if (VRM->hasPhys(ItVirtReg))
2146 Matrix->unassign(**It);
2147 unsigned ItPhysReg = VirtRegToPhysReg[ItVirtReg];
2148 Matrix->assign(**It, ItPhysReg);
2152 // Last chance recoloring did not worked either, give up.
2156 /// tryRecoloringCandidates - Try to assign a new color to every register
2157 /// in \RecoloringQueue.
2158 /// \p NewRegs will contain any new virtual register created during the
2159 /// recoloring process.
2160 /// \p FixedRegisters[in/out] contains all the registers that have been
2162 /// \return true if all virtual registers in RecoloringQueue were successfully
2163 /// recolored, false otherwise.
2164 bool RAGreedy::tryRecoloringCandidates(PQueue &RecoloringQueue,
2165 SmallVectorImpl<unsigned> &NewVRegs,
2166 SmallVirtRegSet &FixedRegisters,
2168 while (!RecoloringQueue.empty()) {
2169 LiveInterval *LI = dequeue(RecoloringQueue);
2170 DEBUG(dbgs() << "Try to recolor: " << *LI << '\n');
2172 PhysReg = selectOrSplitImpl(*LI, NewVRegs, FixedRegisters, Depth + 1);
2173 if (PhysReg == ~0u || !PhysReg)
2175 DEBUG(dbgs() << "Recoloring of " << *LI
2176 << " succeeded with: " << PrintReg(PhysReg, TRI) << '\n');
2177 Matrix->assign(*LI, PhysReg);
2178 FixedRegisters.insert(LI->reg);
2183 //===----------------------------------------------------------------------===//
2185 //===----------------------------------------------------------------------===//
2187 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
2188 SmallVectorImpl<unsigned> &NewVRegs) {
2189 CutOffInfo = CO_None;
2190 LLVMContext &Ctx = MF->getFunction()->getContext();
2191 SmallVirtRegSet FixedRegisters;
2192 unsigned Reg = selectOrSplitImpl(VirtReg, NewVRegs, FixedRegisters);
2193 if (Reg == ~0U && (CutOffInfo != CO_None)) {
2194 uint8_t CutOffEncountered = CutOffInfo & (CO_Depth | CO_Interf);
2195 if (CutOffEncountered == CO_Depth)
2196 Ctx.emitError("register allocation failed: maximum depth for recoloring "
2197 "reached. Use -fexhaustive-register-search to skip "
2199 else if (CutOffEncountered == CO_Interf)
2200 Ctx.emitError("register allocation failed: maximum interference for "
2201 "recoloring reached. Use -fexhaustive-register-search "
2203 else if (CutOffEncountered == (CO_Depth | CO_Interf))
2204 Ctx.emitError("register allocation failed: maximum interference and "
2205 "depth for recoloring reached. Use "
2206 "-fexhaustive-register-search to skip cutoffs");
2211 /// Using a CSR for the first time has a cost because it causes push|pop
2212 /// to be added to prologue|epilogue. Splitting a cold section of the live
2213 /// range can have lower cost than using the CSR for the first time;
2214 /// Spilling a live range in the cold path can have lower cost than using
2215 /// the CSR for the first time. Returns the physical register if we decide
2216 /// to use the CSR; otherwise return 0.
2217 unsigned RAGreedy::tryAssignCSRFirstTime(LiveInterval &VirtReg,
2218 AllocationOrder &Order,
2220 unsigned &CostPerUseLimit,
2221 SmallVectorImpl<unsigned> &NewVRegs) {
2222 if (getStage(VirtReg) == RS_Spill && VirtReg.isSpillable()) {
2223 // We choose spill over using the CSR for the first time if the spill cost
2224 // is lower than CSRCost.
2225 SA->analyze(&VirtReg);
2226 if (calcSpillCost() >= CSRCost)
2229 // We are going to spill, set CostPerUseLimit to 1 to make sure that
2230 // we will not use a callee-saved register in tryEvict.
2231 CostPerUseLimit = 1;
2234 if (getStage(VirtReg) < RS_Split) {
2235 // We choose pre-splitting over using the CSR for the first time if
2236 // the cost of splitting is lower than CSRCost.
2237 SA->analyze(&VirtReg);
2238 unsigned NumCands = 0;
2239 BlockFrequency BestCost = CSRCost; // Don't modify CSRCost.
2240 unsigned BestCand = calculateRegionSplitCost(VirtReg, Order, BestCost,
2241 NumCands, true /*IgnoreCSR*/);
2242 if (BestCand == NoCand)
2243 // Use the CSR if we can't find a region split below CSRCost.
2246 // Perform the actual pre-splitting.
2247 doRegionSplit(VirtReg, BestCand, false/*HasCompact*/, NewVRegs);
2253 void RAGreedy::aboutToRemoveInterval(LiveInterval &LI) {
2254 // Do not keep invalid information around.
2255 SetOfBrokenHints.remove(&LI);
2258 void RAGreedy::initializeCSRCost() {
2259 // We use the larger one out of the command-line option and the value report
2261 CSRCost = BlockFrequency(
2262 std::max((unsigned)CSRFirstTimeCost, TRI->getCSRFirstUseCost()));
2263 if (!CSRCost.getFrequency())
2266 // Raw cost is relative to Entry == 2^14; scale it appropriately.
2267 uint64_t ActualEntry = MBFI->getEntryFreq();
2272 uint64_t FixedEntry = 1 << 14;
2273 if (ActualEntry < FixedEntry)
2274 CSRCost *= BranchProbability(ActualEntry, FixedEntry);
2275 else if (ActualEntry <= UINT32_MAX)
2276 // Invert the fraction and divide.
2277 CSRCost /= BranchProbability(FixedEntry, ActualEntry);
2279 // Can't use BranchProbability in general, since it takes 32-bit numbers.
2280 CSRCost = CSRCost.getFrequency() * (ActualEntry / FixedEntry);
2283 /// \brief Collect the hint info for \p Reg.
2284 /// The results are stored into \p Out.
2285 /// \p Out is not cleared before being populated.
2286 void RAGreedy::collectHintInfo(unsigned Reg, HintsInfo &Out) {
2287 for (const MachineInstr &Instr : MRI->reg_nodbg_instructions(Reg)) {
2288 if (!Instr.isFullCopy())
2290 // Look for the other end of the copy.
2291 unsigned OtherReg = Instr.getOperand(0).getReg();
2292 if (OtherReg == Reg) {
2293 OtherReg = Instr.getOperand(1).getReg();
2294 if (OtherReg == Reg)
2297 // Get the current assignment.
2298 unsigned OtherPhysReg = TargetRegisterInfo::isPhysicalRegister(OtherReg)
2300 : VRM->getPhys(OtherReg);
2301 // Push the collected information.
2302 Out.push_back(HintInfo(MBFI->getBlockFreq(Instr.getParent()), OtherReg,
2307 /// \brief Using the given \p List, compute the cost of the broken hints if
2308 /// \p PhysReg was used.
2309 /// \return The cost of \p List for \p PhysReg.
2310 BlockFrequency RAGreedy::getBrokenHintFreq(const HintsInfo &List,
2312 BlockFrequency Cost = 0;
2313 for (const HintInfo &Info : List) {
2314 if (Info.PhysReg != PhysReg)
2320 /// \brief Using the register assigned to \p VirtReg, try to recolor
2321 /// all the live ranges that are copy-related with \p VirtReg.
2322 /// The recoloring is then propagated to all the live-ranges that have
2323 /// been recolored and so on, until no more copies can be coalesced or
2324 /// it is not profitable.
2325 /// For a given live range, profitability is determined by the sum of the
2326 /// frequencies of the non-identity copies it would introduce with the old
2327 /// and new register.
2328 void RAGreedy::tryHintRecoloring(LiveInterval &VirtReg) {
2329 // We have a broken hint, check if it is possible to fix it by
2330 // reusing PhysReg for the copy-related live-ranges. Indeed, we evicted
2331 // some register and PhysReg may be available for the other live-ranges.
2332 SmallSet<unsigned, 4> Visited;
2333 SmallVector<unsigned, 2> RecoloringCandidates;
2335 unsigned Reg = VirtReg.reg;
2336 unsigned PhysReg = VRM->getPhys(Reg);
2337 // Start the recoloring algorithm from the input live-interval, then
2338 // it will propagate to the ones that are copy-related with it.
2339 Visited.insert(Reg);
2340 RecoloringCandidates.push_back(Reg);
2342 DEBUG(dbgs() << "Trying to reconcile hints for: " << PrintReg(Reg, TRI) << '('
2343 << PrintReg(PhysReg, TRI) << ")\n");
2346 Reg = RecoloringCandidates.pop_back_val();
2348 // We cannot recolor physcal register.
2349 if (TargetRegisterInfo::isPhysicalRegister(Reg))
2352 assert(VRM->hasPhys(Reg) && "We have unallocated variable!!");
2354 // Get the live interval mapped with this virtual register to be able
2355 // to check for the interference with the new color.
2356 LiveInterval &LI = LIS->getInterval(Reg);
2357 unsigned CurrPhys = VRM->getPhys(Reg);
2358 // Check that the new color matches the register class constraints and
2359 // that it is free for this live range.
2360 if (CurrPhys != PhysReg && (!MRI->getRegClass(Reg)->contains(PhysReg) ||
2361 Matrix->checkInterference(LI, PhysReg)))
2364 DEBUG(dbgs() << PrintReg(Reg, TRI) << '(' << PrintReg(CurrPhys, TRI)
2365 << ") is recolorable.\n");
2367 // Gather the hint info.
2369 collectHintInfo(Reg, Info);
2370 // Check if recoloring the live-range will increase the cost of the
2371 // non-identity copies.
2372 if (CurrPhys != PhysReg) {
2373 DEBUG(dbgs() << "Checking profitability:\n");
2374 BlockFrequency OldCopiesCost = getBrokenHintFreq(Info, CurrPhys);
2375 BlockFrequency NewCopiesCost = getBrokenHintFreq(Info, PhysReg);
2376 DEBUG(dbgs() << "Old Cost: " << OldCopiesCost.getFrequency()
2377 << "\nNew Cost: " << NewCopiesCost.getFrequency() << '\n');
2378 if (OldCopiesCost < NewCopiesCost) {
2379 DEBUG(dbgs() << "=> Not profitable.\n");
2382 // At this point, the cost is either cheaper or equal. If it is
2383 // equal, we consider this is profitable because it may expose
2384 // more recoloring opportunities.
2385 DEBUG(dbgs() << "=> Profitable.\n");
2386 // Recolor the live-range.
2387 Matrix->unassign(LI);
2388 Matrix->assign(LI, PhysReg);
2390 // Push all copy-related live-ranges to keep reconciling the broken
2392 for (const HintInfo &HI : Info) {
2393 if (Visited.insert(HI.Reg).second)
2394 RecoloringCandidates.push_back(HI.Reg);
2396 } while (!RecoloringCandidates.empty());
2399 /// \brief Try to recolor broken hints.
2400 /// Broken hints may be repaired by recoloring when an evicted variable
2401 /// freed up a register for a larger live-range.
2402 /// Consider the following example:
2410 /// Let us assume b gets split:
2420 /// Because of how the allocation work, b, c, and d may be assigned different
2421 /// colors. Now, if a gets evicted later:
2431 /// e = ld SpillSlot
2433 /// This is likely that we can assign the same register for b, c, and d,
2434 /// getting rid of 2 copies.
2435 void RAGreedy::tryHintsRecoloring() {
2436 for (LiveInterval *LI : SetOfBrokenHints) {
2437 assert(TargetRegisterInfo::isVirtualRegister(LI->reg) &&
2438 "Recoloring is possible only for virtual registers");
2439 // Some dead defs may be around (e.g., because of debug uses).
2441 if (!VRM->hasPhys(LI->reg))
2443 tryHintRecoloring(*LI);
2447 unsigned RAGreedy::selectOrSplitImpl(LiveInterval &VirtReg,
2448 SmallVectorImpl<unsigned> &NewVRegs,
2449 SmallVirtRegSet &FixedRegisters,
2451 unsigned CostPerUseLimit = ~0u;
2452 // First try assigning a free register.
2453 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
2454 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) {
2455 // When NewVRegs is not empty, we may have made decisions such as evicting
2456 // a virtual register, go with the earlier decisions and use the physical
2458 if (CSRCost.getFrequency() && isUnusedCalleeSavedReg(PhysReg) &&
2460 unsigned CSRReg = tryAssignCSRFirstTime(VirtReg, Order, PhysReg,
2461 CostPerUseLimit, NewVRegs);
2462 if (CSRReg || !NewVRegs.empty())
2463 // Return now if we decide to use a CSR or create new vregs due to
2470 LiveRangeStage Stage = getStage(VirtReg);
2471 DEBUG(dbgs() << StageName[Stage]
2472 << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n');
2474 // Try to evict a less worthy live range, but only for ranges from the primary
2475 // queue. The RS_Split ranges already failed to do this, and they should not
2476 // get a second chance until they have been split.
2477 if (Stage != RS_Split)
2478 if (unsigned PhysReg =
2479 tryEvict(VirtReg, Order, NewVRegs, CostPerUseLimit)) {
2480 unsigned Hint = MRI->getSimpleHint(VirtReg.reg);
2481 // If VirtReg has a hint and that hint is broken record this
2482 // virtual register as a recoloring candidate for broken hint.
2483 // Indeed, since we evicted a variable in its neighborhood it is
2484 // likely we can at least partially recolor some of the
2485 // copy-related live-ranges.
2486 if (Hint && Hint != PhysReg)
2487 SetOfBrokenHints.insert(&VirtReg);
2491 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
2493 // The first time we see a live range, don't try to split or spill.
2494 // Wait until the second time, when all smaller ranges have been allocated.
2495 // This gives a better picture of the interference to split around.
2496 if (Stage < RS_Split) {
2497 setStage(VirtReg, RS_Split);
2498 DEBUG(dbgs() << "wait for second round\n");
2499 NewVRegs.push_back(VirtReg.reg);
2503 // If we couldn't allocate a register from spilling, there is probably some
2504 // invalid inline assembly. The base class wil report it.
2505 if (Stage >= RS_Done || !VirtReg.isSpillable())
2506 return tryLastChanceRecoloring(VirtReg, Order, NewVRegs, FixedRegisters,
2509 // Try splitting VirtReg or interferences.
2510 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
2511 if (PhysReg || !NewVRegs.empty())
2514 // Finally spill VirtReg itself.
2515 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
2516 LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
2517 spiller().spill(LRE);
2518 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done);
2521 MF->verify(this, "After spilling");
2523 // The live virtual register requesting allocation was spilled, so tell
2524 // the caller not to allocate anything during this round.
2528 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
2529 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
2530 << "********** Function: " << mf.getName() << '\n');
2533 TRI = MF->getSubtarget().getRegisterInfo();
2534 TII = MF->getSubtarget().getInstrInfo();
2535 RCI.runOnMachineFunction(mf);
2537 EnableLocalReassign = EnableLocalReassignment ||
2538 MF->getSubtarget().enableRALocalReassignment(
2539 MF->getTarget().getOptLevel());
2542 MF->verify(this, "Before greedy register allocator");
2544 RegAllocBase::init(getAnalysis<VirtRegMap>(),
2545 getAnalysis<LiveIntervals>(),
2546 getAnalysis<LiveRegMatrix>());
2547 Indexes = &getAnalysis<SlotIndexes>();
2548 MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
2549 DomTree = &getAnalysis<MachineDominatorTree>();
2550 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
2551 Loops = &getAnalysis<MachineLoopInfo>();
2552 Bundles = &getAnalysis<EdgeBundles>();
2553 SpillPlacer = &getAnalysis<SpillPlacement>();
2554 DebugVars = &getAnalysis<LiveDebugVariables>();
2556 initializeCSRCost();
2558 calculateSpillWeightsAndHints(*LIS, mf, *Loops, *MBFI);
2562 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
2563 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree, *MBFI));
2564 ExtraRegInfo.clear();
2565 ExtraRegInfo.resize(MRI->getNumVirtRegs());
2567 IntfCache.init(MF, Matrix->getLiveUnions(), Indexes, LIS, TRI);
2568 GlobalCand.resize(32); // This will grow as needed.
2569 SetOfBrokenHints.clear();
2572 tryHintsRecoloring();