1 //===-------- InlineSpiller.cpp - Insert spills and restores inline -------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // The inline spiller modifies the machine function directly instead of
11 // inserting spills and restores in VirtRegMap.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "regalloc"
17 #include "LiveRangeEdit.h"
18 #include "VirtRegMap.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/ADT/TinyPtrVector.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
23 #include "llvm/CodeGen/LiveStackAnalysis.h"
24 #include "llvm/CodeGen/MachineDominators.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/Target/TargetMachine.h"
30 #include "llvm/Target/TargetInstrInfo.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/raw_ostream.h"
36 STATISTIC(NumSpilledRanges, "Number of spilled live ranges");
37 STATISTIC(NumSnippets, "Number of snippets included in spills");
38 STATISTIC(NumSpills, "Number of spills inserted");
39 STATISTIC(NumReloads, "Number of reloads inserted");
40 STATISTIC(NumFolded, "Number of folded stack accesses");
41 STATISTIC(NumFoldedLoads, "Number of folded loads");
42 STATISTIC(NumRemats, "Number of rematerialized defs for spilling");
43 STATISTIC(NumOmitReloadSpill, "Number of omitted spills after reloads");
44 STATISTIC(NumHoistLocal, "Number of locally hoisted spills");
45 STATISTIC(NumHoistGlobal, "Number of globally hoisted spills");
46 STATISTIC(NumRedundantSpills, "Number of redundant spills identified");
49 class InlineSpiller : public Spiller {
50 MachineFunctionPass &Pass;
55 MachineDominatorTree &MDT;
56 MachineLoopInfo &Loops;
58 MachineFrameInfo &MFI;
59 MachineRegisterInfo &MRI;
60 const TargetInstrInfo &TII;
61 const TargetRegisterInfo &TRI;
63 // Variables that are valid during spill(), but used by multiple methods.
65 LiveInterval *StackInt;
69 // All registers to spill to StackSlot, including the main register.
70 SmallVector<unsigned, 8> RegsToSpill;
72 // All COPY instructions to/from snippets.
73 // They are ignored since both operands refer to the same stack slot.
74 SmallPtrSet<MachineInstr*, 8> SnippetCopies;
76 // Values that failed to remat at some point.
77 SmallPtrSet<VNInfo*, 8> UsedValues;
80 // Information about a value that was defined by a copy from a sibling
83 // True when all reaching defs were reloads: No spill is necessary.
84 bool AllDefsAreReloads;
86 // True when value is defined by an original PHI not from splitting.
89 // The preferred register to spill.
92 // The value of SpillReg that should be spilled.
95 // The block where SpillVNI should be spilled. Currently, this must be the
96 // block containing SpillVNI->def.
97 MachineBasicBlock *SpillMBB;
99 // A defining instruction that is not a sibling copy or a reload, or NULL.
100 // This can be used as a template for rematerialization.
103 // List of values that depend on this one. These values are actually the
104 // same, but live range splitting has placed them in different registers,
105 // or SSA update needed to insert PHI-defs to preserve SSA form. This is
106 // copies of the current value and phi-kills. Usually only phi-kills cause
107 // more than one dependent value.
108 TinyPtrVector<VNInfo*> Deps;
110 SibValueInfo(unsigned Reg, VNInfo *VNI)
111 : AllDefsAreReloads(true), DefByOrigPHI(false),
112 SpillReg(Reg), SpillVNI(VNI), SpillMBB(0), DefMI(0) {}
114 // Returns true when a def has been found.
115 bool hasDef() const { return DefByOrigPHI || DefMI; }
119 // Values in RegsToSpill defined by sibling copies.
120 typedef DenseMap<VNInfo*, SibValueInfo> SibValueMap;
121 SibValueMap SibValues;
123 // Dead defs generated during spilling.
124 SmallVector<MachineInstr*, 8> DeadDefs;
129 InlineSpiller(MachineFunctionPass &pass,
134 LIS(pass.getAnalysis<LiveIntervals>()),
135 LSS(pass.getAnalysis<LiveStacks>()),
136 AA(&pass.getAnalysis<AliasAnalysis>()),
137 MDT(pass.getAnalysis<MachineDominatorTree>()),
138 Loops(pass.getAnalysis<MachineLoopInfo>()),
140 MFI(*mf.getFrameInfo()),
141 MRI(mf.getRegInfo()),
142 TII(*mf.getTarget().getInstrInfo()),
143 TRI(*mf.getTarget().getRegisterInfo()) {}
145 void spill(LiveRangeEdit &);
148 bool isSnippet(const LiveInterval &SnipLI);
149 void collectRegsToSpill();
151 bool isRegToSpill(unsigned Reg) {
152 return std::find(RegsToSpill.begin(),
153 RegsToSpill.end(), Reg) != RegsToSpill.end();
156 bool isSibling(unsigned Reg);
157 MachineInstr *traceSiblingValue(unsigned, VNInfo*, VNInfo*);
158 void propagateSiblingValue(SibValueMap::iterator, VNInfo *VNI = 0);
159 void analyzeSiblingValues();
161 bool hoistSpill(LiveInterval &SpillLI, MachineInstr *CopyMI);
162 void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
164 void markValueUsed(LiveInterval*, VNInfo*);
165 bool reMaterializeFor(LiveInterval&, MachineBasicBlock::iterator MI);
166 void reMaterializeAll();
168 bool coalesceStackAccess(MachineInstr *MI, unsigned Reg);
169 bool foldMemoryOperand(MachineBasicBlock::iterator MI,
170 const SmallVectorImpl<unsigned> &Ops,
171 MachineInstr *LoadMI = 0);
172 void insertReload(LiveInterval &NewLI, SlotIndex,
173 MachineBasicBlock::iterator MI);
174 void insertSpill(LiveInterval &NewLI, const LiveInterval &OldLI,
175 SlotIndex, MachineBasicBlock::iterator MI);
177 void spillAroundUses(unsigned Reg);
183 Spiller *createInlineSpiller(MachineFunctionPass &pass,
186 return new InlineSpiller(pass, mf, vrm);
190 //===----------------------------------------------------------------------===//
192 //===----------------------------------------------------------------------===//
194 // When spilling a virtual register, we also spill any snippets it is connected
195 // to. The snippets are small live ranges that only have a single real use,
196 // leftovers from live range splitting. Spilling them enables memory operand
197 // folding or tightens the live range around the single use.
199 // This minimizes register pressure and maximizes the store-to-load distance for
200 // spill slots which can be important in tight loops.
202 /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
203 /// otherwise return 0.
204 static unsigned isFullCopyOf(const MachineInstr *MI, unsigned Reg) {
205 if (!MI->isFullCopy())
207 if (MI->getOperand(0).getReg() == Reg)
208 return MI->getOperand(1).getReg();
209 if (MI->getOperand(1).getReg() == Reg)
210 return MI->getOperand(0).getReg();
214 /// isSnippet - Identify if a live interval is a snippet that should be spilled.
215 /// It is assumed that SnipLI is a virtual register with the same original as
217 bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
218 unsigned Reg = Edit->getReg();
220 // A snippet is a tiny live range with only a single instruction using it
221 // besides copies to/from Reg or spills/fills. We accept:
223 // %snip = COPY %Reg / FILL fi#
225 // %Reg = COPY %snip / SPILL %snip, fi#
227 if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
230 MachineInstr *UseMI = 0;
232 // Check that all uses satisfy our criteria.
233 for (MachineRegisterInfo::reg_nodbg_iterator
234 RI = MRI.reg_nodbg_begin(SnipLI.reg);
235 MachineInstr *MI = RI.skipInstruction();) {
237 // Allow copies to/from Reg.
238 if (isFullCopyOf(MI, Reg))
241 // Allow stack slot loads.
243 if (SnipLI.reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
246 // Allow stack slot stores.
247 if (SnipLI.reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
250 // Allow a single additional instruction.
251 if (UseMI && MI != UseMI)
258 /// collectRegsToSpill - Collect live range snippets that only have a single
260 void InlineSpiller::collectRegsToSpill() {
261 unsigned Reg = Edit->getReg();
263 // Main register always spills.
264 RegsToSpill.assign(1, Reg);
265 SnippetCopies.clear();
267 // Snippets all have the same original, so there can't be any for an original
272 for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Reg);
273 MachineInstr *MI = RI.skipInstruction();) {
274 unsigned SnipReg = isFullCopyOf(MI, Reg);
275 if (!isSibling(SnipReg))
277 LiveInterval &SnipLI = LIS.getInterval(SnipReg);
278 if (!isSnippet(SnipLI))
280 SnippetCopies.insert(MI);
281 if (isRegToSpill(SnipReg))
283 RegsToSpill.push_back(SnipReg);
284 DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n');
290 //===----------------------------------------------------------------------===//
292 //===----------------------------------------------------------------------===//
294 // After live range splitting, some values to be spilled may be defined by
295 // copies from sibling registers. We trace the sibling copies back to the
296 // original value if it still exists. We need it for rematerialization.
298 // Even when the value can't be rematerialized, we still want to determine if
299 // the value has already been spilled, or we may want to hoist the spill from a
302 bool InlineSpiller::isSibling(unsigned Reg) {
303 return TargetRegisterInfo::isVirtualRegister(Reg) &&
304 VRM.getOriginal(Reg) == Original;
308 static raw_ostream &operator<<(raw_ostream &OS,
309 const InlineSpiller::SibValueInfo &SVI) {
310 OS << "spill " << PrintReg(SVI.SpillReg) << ':'
311 << SVI.SpillVNI->id << '@' << SVI.SpillVNI->def;
313 OS << " in BB#" << SVI.SpillMBB->getNumber();
314 if (SVI.AllDefsAreReloads)
315 OS << " all-reloads";
316 if (SVI.DefByOrigPHI)
319 for (unsigned i = 0, e = SVI.Deps.size(); i != e; ++i)
320 OS << ' ' << SVI.Deps[i]->id << '@' << SVI.Deps[i]->def;
323 OS << " def: " << *SVI.DefMI;
330 /// propagateSiblingValue - Propagate the value in SVI to dependents if it is
331 /// known. Otherwise remember the dependency for later.
333 /// @param SVI SibValues entry to propagate.
334 /// @param VNI Dependent value, or NULL to propagate to all saved dependents.
335 void InlineSpiller::propagateSiblingValue(SibValueMap::iterator SVI,
337 // When VNI is non-NULL, add it to SVI's deps, and only propagate to that.
338 TinyPtrVector<VNInfo*> FirstDeps;
340 FirstDeps.push_back(VNI);
341 SVI->second.Deps.push_back(VNI);
344 // Has the value been completely determined yet? If not, defer propagation.
345 if (!SVI->second.hasDef())
348 // Work list of values to propagate. It would be nice to use a SetVector
349 // here, but then we would be forced to use a SmallSet.
350 SmallVector<SibValueMap::iterator, 8> WorkList(1, SVI);
351 SmallPtrSet<VNInfo*, 8> WorkSet;
354 SVI = WorkList.pop_back_val();
355 WorkSet.erase(SVI->first);
356 TinyPtrVector<VNInfo*> *Deps = VNI ? &FirstDeps : &SVI->second.Deps;
359 SibValueInfo &SV = SVI->second;
361 SV.SpillMBB = LIS.getMBBFromIndex(SV.SpillVNI->def);
363 DEBUG(dbgs() << " prop to " << Deps->size() << ": "
364 << SVI->first->id << '@' << SVI->first->def << ":\t" << SV);
366 assert(SV.hasDef() && "Propagating undefined value");
368 // Should this value be propagated as a preferred spill candidate? We don't
369 // propagate values of registers that are about to spill.
370 bool PropSpill = !isRegToSpill(SV.SpillReg);
371 unsigned SpillDepth = ~0u;
373 for (TinyPtrVector<VNInfo*>::iterator DepI = Deps->begin(),
374 DepE = Deps->end(); DepI != DepE; ++DepI) {
375 SibValueMap::iterator DepSVI = SibValues.find(*DepI);
376 assert(DepSVI != SibValues.end() && "Dependent value not in SibValues");
377 SibValueInfo &DepSV = DepSVI->second;
379 DepSV.SpillMBB = LIS.getMBBFromIndex(DepSV.SpillVNI->def);
381 bool Changed = false;
383 // Propagate defining instruction.
384 if (!DepSV.hasDef()) {
386 DepSV.DefMI = SV.DefMI;
387 DepSV.DefByOrigPHI = SV.DefByOrigPHI;
390 // Propagate AllDefsAreReloads. For PHI values, this computes an AND of
392 if (!SV.AllDefsAreReloads && DepSV.AllDefsAreReloads) {
394 DepSV.AllDefsAreReloads = false;
397 // Propagate best spill value.
398 if (PropSpill && SV.SpillVNI != DepSV.SpillVNI) {
399 if (SV.SpillMBB == DepSV.SpillMBB) {
400 // DepSV is in the same block. Hoist when dominated.
401 if (SV.SpillVNI->def < DepSV.SpillVNI->def) {
402 // This is an alternative def earlier in the same MBB.
403 // Hoist the spill as far as possible in SpillMBB. This can ease
404 // register pressure:
410 // Hoisting the spill of s to immediately after the def removes the
411 // interference between x and y:
418 DepSV.SpillReg = SV.SpillReg;
419 DepSV.SpillVNI = SV.SpillVNI;
420 DepSV.SpillMBB = SV.SpillMBB;
423 // DepSV is in a different block.
424 if (SpillDepth == ~0u)
425 SpillDepth = Loops.getLoopDepth(SV.SpillMBB);
427 // Also hoist spills to blocks with smaller loop depth, but make sure
428 // that the new value dominates. Non-phi dependents are always
429 // dominated, phis need checking.
430 if ((Loops.getLoopDepth(DepSV.SpillMBB) > SpillDepth) &&
431 (!DepSVI->first->isPHIDef() ||
432 MDT.dominates(SV.SpillMBB, DepSV.SpillMBB))) {
434 DepSV.SpillReg = SV.SpillReg;
435 DepSV.SpillVNI = SV.SpillVNI;
436 DepSV.SpillMBB = SV.SpillMBB;
444 // Something changed in DepSVI. Propagate to dependents.
445 if (WorkSet.insert(DepSVI->first))
446 WorkList.push_back(DepSVI);
448 DEBUG(dbgs() << " update " << DepSVI->first->id << '@'
449 << DepSVI->first->def << " to:\t" << DepSV);
451 } while (!WorkList.empty());
454 /// traceSiblingValue - Trace a value that is about to be spilled back to the
455 /// real defining instructions by looking through sibling copies. Always stay
456 /// within the range of OrigVNI so the registers are known to carry the same
459 /// Determine if the value is defined by all reloads, so spilling isn't
460 /// necessary - the value is already in the stack slot.
462 /// Return a defining instruction that may be a candidate for rematerialization.
464 MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI,
466 // Check if a cached value already exists.
467 SibValueMap::iterator SVI;
470 SibValues.insert(std::make_pair(UseVNI, SibValueInfo(UseReg, UseVNI)));
472 DEBUG(dbgs() << "Cached value " << PrintReg(UseReg) << ':'
473 << UseVNI->id << '@' << UseVNI->def << ' ' << SVI->second);
474 return SVI->second.DefMI;
477 DEBUG(dbgs() << "Tracing value " << PrintReg(UseReg) << ':'
478 << UseVNI->id << '@' << UseVNI->def << '\n');
480 // List of (Reg, VNI) that have been inserted into SibValues, but need to be
482 SmallVector<std::pair<unsigned, VNInfo*>, 8> WorkList;
483 WorkList.push_back(std::make_pair(UseReg, UseVNI));
488 tie(Reg, VNI) = WorkList.pop_back_val();
489 DEBUG(dbgs() << " " << PrintReg(Reg) << ':' << VNI->id << '@' << VNI->def
492 // First check if this value has already been computed.
493 SVI = SibValues.find(VNI);
494 assert(SVI != SibValues.end() && "Missing SibValues entry");
496 // Trace through PHI-defs created by live range splitting.
497 if (VNI->isPHIDef()) {
498 // Stop at original PHIs. We don't know the value at the predecessors.
499 if (VNI->def == OrigVNI->def) {
500 DEBUG(dbgs() << "orig phi value\n");
501 SVI->second.DefByOrigPHI = true;
502 SVI->second.AllDefsAreReloads = false;
503 propagateSiblingValue(SVI);
507 // This is a PHI inserted by live range splitting. We could trace the
508 // live-out value from predecessor blocks, but that search can be very
509 // expensive if there are many predecessors and many more PHIs as
510 // generated by tail-dup when it sees an indirectbr. Instead, look at
511 // all the non-PHI defs that have the same value as OrigVNI. They must
512 // jointly dominate VNI->def. This is not optimal since VNI may actually
513 // be jointly dominated by a smaller subset of defs, so there is a change
514 // we will miss a AllDefsAreReloads optimization.
516 // Separate all values dominated by OrigVNI into PHIs and non-PHIs.
517 SmallVector<VNInfo*, 8> PHIs, NonPHIs;
518 LiveInterval &LI = LIS.getInterval(Reg);
519 LiveInterval &OrigLI = LIS.getInterval(Original);
521 for (LiveInterval::vni_iterator VI = LI.vni_begin(), VE = LI.vni_end();
524 if (VNI2->isUnused())
526 if (!OrigLI.containsOneValue() &&
527 OrigLI.getVNInfoAt(VNI2->def) != OrigVNI)
529 if (VNI2->isPHIDef() && VNI2->def != OrigVNI->def)
530 PHIs.push_back(VNI2);
532 NonPHIs.push_back(VNI2);
534 DEBUG(dbgs() << "split phi value, checking " << PHIs.size()
535 << " phi-defs, and " << NonPHIs.size()
536 << " non-phi/orig defs\n");
538 // Create entries for all the PHIs. Don't add them to the worklist, we
539 // are processing all of them in one go here.
540 for (unsigned i = 0, e = PHIs.size(); i != e; ++i)
541 SibValues.insert(std::make_pair(PHIs[i], SibValueInfo(Reg, PHIs[i])));
543 // Add every PHI as a dependent of all the non-PHIs.
544 for (unsigned i = 0, e = NonPHIs.size(); i != e; ++i) {
545 VNInfo *NonPHI = NonPHIs[i];
546 // Known value? Try an insertion.
548 SibValues.insert(std::make_pair(NonPHI, SibValueInfo(Reg, NonPHI)));
549 // Add all the PHIs as dependents of NonPHI.
550 for (unsigned pi = 0, pe = PHIs.size(); pi != pe; ++pi)
551 SVI->second.Deps.push_back(PHIs[pi]);
552 // This is the first time we see NonPHI, add it to the worklist.
554 WorkList.push_back(std::make_pair(Reg, NonPHI));
556 // Propagate to all inserted PHIs, not just VNI.
557 propagateSiblingValue(SVI);
560 // Next work list item.
564 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
565 assert(MI && "Missing def");
567 // Trace through sibling copies.
568 if (unsigned SrcReg = isFullCopyOf(MI, Reg)) {
569 if (isSibling(SrcReg)) {
570 LiveInterval &SrcLI = LIS.getInterval(SrcReg);
571 VNInfo *SrcVNI = SrcLI.getVNInfoAt(VNI->def.getUseIndex());
572 assert(SrcVNI && "Copy from non-existing value");
573 DEBUG(dbgs() << "copy of " << PrintReg(SrcReg) << ':'
574 << SrcVNI->id << '@' << SrcVNI->def << '\n');
575 // Known sibling source value? Try an insertion.
576 tie(SVI, Inserted) = SibValues.insert(std::make_pair(SrcVNI,
577 SibValueInfo(SrcReg, SrcVNI)));
578 // This is the first time we see Src, add it to the worklist.
580 WorkList.push_back(std::make_pair(SrcReg, SrcVNI));
581 propagateSiblingValue(SVI, VNI);
582 // Next work list item.
587 // Track reachable reloads.
588 SVI->second.DefMI = MI;
589 SVI->second.SpillMBB = MI->getParent();
591 if (Reg == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot) {
592 DEBUG(dbgs() << "reload\n");
593 propagateSiblingValue(SVI);
594 // Next work list item.
598 // Potential remat candidate.
599 DEBUG(dbgs() << "def " << *MI);
600 SVI->second.AllDefsAreReloads = false;
601 propagateSiblingValue(SVI);
602 } while (!WorkList.empty());
604 // Look up the value we were looking for. We already did this lokup at the
605 // top of the function, but SibValues may have been invalidated.
606 SVI = SibValues.find(UseVNI);
607 assert(SVI != SibValues.end() && "Didn't compute requested info");
608 DEBUG(dbgs() << " traced to:\t" << SVI->second);
609 return SVI->second.DefMI;
612 /// analyzeSiblingValues - Trace values defined by sibling copies back to
613 /// something that isn't a sibling copy.
615 /// Keep track of values that may be rematerializable.
616 void InlineSpiller::analyzeSiblingValues() {
619 // No siblings at all?
620 if (Edit->getReg() == Original)
623 LiveInterval &OrigLI = LIS.getInterval(Original);
624 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) {
625 unsigned Reg = RegsToSpill[i];
626 LiveInterval &LI = LIS.getInterval(Reg);
627 for (LiveInterval::const_vni_iterator VI = LI.vni_begin(),
628 VE = LI.vni_end(); VI != VE; ++VI) {
632 MachineInstr *DefMI = 0;
633 // Check possible sibling copies.
634 if (VNI->isPHIDef() || VNI->getCopy()) {
635 VNInfo *OrigVNI = OrigLI.getVNInfoAt(VNI->def);
636 assert(OrigVNI && "Def outside original live range");
637 if (OrigVNI->def != VNI->def)
638 DefMI = traceSiblingValue(Reg, VNI, OrigVNI);
640 if (!DefMI && !VNI->isPHIDef())
641 DefMI = LIS.getInstructionFromIndex(VNI->def);
642 if (DefMI && Edit->checkRematerializable(VNI, DefMI, TII, AA)) {
643 DEBUG(dbgs() << "Value " << PrintReg(Reg) << ':' << VNI->id << '@'
644 << VNI->def << " may remat from " << *DefMI);
650 /// hoistSpill - Given a sibling copy that defines a value to be spilled, insert
651 /// a spill at a better location.
652 bool InlineSpiller::hoistSpill(LiveInterval &SpillLI, MachineInstr *CopyMI) {
653 SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
654 VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getDefIndex());
655 assert(VNI && VNI->def == Idx.getDefIndex() && "Not defined by copy");
656 SibValueMap::iterator I = SibValues.find(VNI);
657 if (I == SibValues.end())
660 const SibValueInfo &SVI = I->second;
662 // Let the normal folding code deal with the boring case.
663 if (!SVI.AllDefsAreReloads && SVI.SpillVNI == VNI)
666 // SpillReg may have been deleted by remat and DCE.
667 if (!LIS.hasInterval(SVI.SpillReg)) {
668 DEBUG(dbgs() << "Stale interval: " << PrintReg(SVI.SpillReg) << '\n');
673 LiveInterval &SibLI = LIS.getInterval(SVI.SpillReg);
674 if (!SibLI.containsValue(SVI.SpillVNI)) {
675 DEBUG(dbgs() << "Stale value: " << PrintReg(SVI.SpillReg) << '\n');
680 // Conservatively extend the stack slot range to the range of the original
681 // value. We may be able to do better with stack slot coloring by being more
683 assert(StackInt && "No stack slot assigned yet.");
684 LiveInterval &OrigLI = LIS.getInterval(Original);
685 VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
686 StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
687 DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "
688 << *StackInt << '\n');
690 // Already spilled everywhere.
691 if (SVI.AllDefsAreReloads) {
692 DEBUG(dbgs() << "\tno spill needed: " << SVI);
693 ++NumOmitReloadSpill;
696 // We are going to spill SVI.SpillVNI immediately after its def, so clear out
697 // any later spills of the same value.
698 eliminateRedundantSpills(SibLI, SVI.SpillVNI);
700 MachineBasicBlock *MBB = LIS.getMBBFromIndex(SVI.SpillVNI->def);
701 MachineBasicBlock::iterator MII;
702 if (SVI.SpillVNI->isPHIDef())
703 MII = MBB->SkipPHIsAndLabels(MBB->begin());
705 MachineInstr *DefMI = LIS.getInstructionFromIndex(SVI.SpillVNI->def);
706 assert(DefMI && "Defining instruction disappeared");
710 // Insert spill without kill flag immediately after def.
711 TII.storeRegToStackSlot(*MBB, MII, SVI.SpillReg, false, StackSlot,
712 MRI.getRegClass(SVI.SpillReg), &TRI);
713 --MII; // Point to store instruction.
714 LIS.InsertMachineInstrInMaps(MII);
715 VRM.addSpillSlotUse(StackSlot, MII);
716 DEBUG(dbgs() << "\thoisted: " << SVI.SpillVNI->def << '\t' << *MII);
718 if (MBB == CopyMI->getParent())
725 /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
726 /// redundant spills of this value in SLI.reg and sibling copies.
727 void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
728 assert(VNI && "Missing value");
729 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
730 WorkList.push_back(std::make_pair(&SLI, VNI));
731 assert(StackInt && "No stack slot assigned yet.");
735 tie(LI, VNI) = WorkList.pop_back_val();
736 unsigned Reg = LI->reg;
737 DEBUG(dbgs() << "Checking redundant spills for "
738 << VNI->id << '@' << VNI->def << " in " << *LI << '\n');
740 // Regs to spill are taken care of.
741 if (isRegToSpill(Reg))
744 // Add all of VNI's live range to StackInt.
745 StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
746 DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');
748 // Find all spills and copies of VNI.
749 for (MachineRegisterInfo::use_nodbg_iterator UI = MRI.use_nodbg_begin(Reg);
750 MachineInstr *MI = UI.skipInstruction();) {
751 if (!MI->isCopy() && !MI->getDesc().mayStore())
753 SlotIndex Idx = LIS.getInstructionIndex(MI);
754 if (LI->getVNInfoAt(Idx) != VNI)
757 // Follow sibling copies down the dominator tree.
758 if (unsigned DstReg = isFullCopyOf(MI, Reg)) {
759 if (isSibling(DstReg)) {
760 LiveInterval &DstLI = LIS.getInterval(DstReg);
761 VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getDefIndex());
762 assert(DstVNI && "Missing defined value");
763 assert(DstVNI->def == Idx.getDefIndex() && "Wrong copy def slot");
764 WorkList.push_back(std::make_pair(&DstLI, DstVNI));
771 if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
772 DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << *MI);
773 // eliminateDeadDefs won't normally remove stores, so switch opcode.
774 MI->setDesc(TII.get(TargetOpcode::KILL));
775 DeadDefs.push_back(MI);
776 ++NumRedundantSpills;
779 } while (!WorkList.empty());
783 //===----------------------------------------------------------------------===//
785 //===----------------------------------------------------------------------===//
787 /// markValueUsed - Remember that VNI failed to rematerialize, so its defining
788 /// instruction cannot be eliminated. See through snippet copies
789 void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
790 SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
791 WorkList.push_back(std::make_pair(LI, VNI));
793 tie(LI, VNI) = WorkList.pop_back_val();
794 if (!UsedValues.insert(VNI))
797 if (VNI->isPHIDef()) {
798 MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
799 for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
800 PE = MBB->pred_end(); PI != PE; ++PI) {
801 VNInfo *PVNI = LI->getVNInfoAt(LIS.getMBBEndIdx(*PI).getPrevSlot());
803 WorkList.push_back(std::make_pair(LI, PVNI));
808 // Follow snippet copies.
809 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
810 if (!SnippetCopies.count(MI))
812 LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
813 assert(isRegToSpill(SnipLI.reg) && "Unexpected register in copy");
814 VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getUseIndex());
815 assert(SnipVNI && "Snippet undefined before copy");
816 WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
817 } while (!WorkList.empty());
820 /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
821 bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg,
822 MachineBasicBlock::iterator MI) {
823 SlotIndex UseIdx = LIS.getInstructionIndex(MI).getUseIndex();
824 VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
827 DEBUG(dbgs() << "\tadding <undef> flags: ");
828 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
829 MachineOperand &MO = MI->getOperand(i);
830 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg)
833 DEBUG(dbgs() << UseIdx << '\t' << *MI);
837 if (SnippetCopies.count(MI))
840 // Use an OrigVNI from traceSiblingValue when ParentVNI is a sibling copy.
841 LiveRangeEdit::Remat RM(ParentVNI);
842 SibValueMap::const_iterator SibI = SibValues.find(ParentVNI);
843 if (SibI != SibValues.end())
844 RM.OrigMI = SibI->second.DefMI;
845 if (!Edit->canRematerializeAt(RM, UseIdx, false, LIS)) {
846 markValueUsed(&VirtReg, ParentVNI);
847 DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI);
851 // If the instruction also writes VirtReg.reg, it had better not require the
852 // same register for uses and defs.
854 SmallVector<unsigned, 8> Ops;
855 tie(Reads, Writes) = MI->readsWritesVirtualRegister(VirtReg.reg, &Ops);
857 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
858 MachineOperand &MO = MI->getOperand(Ops[i]);
859 if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) {
860 markValueUsed(&VirtReg, ParentVNI);
861 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI);
867 // Before rematerializing into a register for a single instruction, try to
868 // fold a load into the instruction. That avoids allocating a new register.
869 if (RM.OrigMI->getDesc().canFoldAsLoad() &&
870 foldMemoryOperand(MI, Ops, RM.OrigMI)) {
871 Edit->markRematerialized(RM.ParentVNI);
876 // Alocate a new register for the remat.
877 LiveInterval &NewLI = Edit->createFrom(Original, LIS, VRM);
878 NewLI.markNotSpillable();
880 // Finally we can rematerialize OrigMI before MI.
881 SlotIndex DefIdx = Edit->rematerializeAt(*MI->getParent(), MI, NewLI.reg, RM,
883 DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
884 << *LIS.getInstructionFromIndex(DefIdx));
887 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
888 MachineOperand &MO = MI->getOperand(Ops[i]);
889 if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg) {
890 MO.setReg(NewLI.reg);
894 DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI);
896 VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, LIS.getVNInfoAllocator());
897 NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI));
898 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
903 /// reMaterializeAll - Try to rematerialize as many uses as possible,
904 /// and trim the live ranges after.
905 void InlineSpiller::reMaterializeAll() {
906 // analyzeSiblingValues has already tested all relevant defining instructions.
907 if (!Edit->anyRematerializable(LIS, TII, AA))
912 // Try to remat before all uses of snippets.
913 bool anyRemat = false;
914 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) {
915 unsigned Reg = RegsToSpill[i];
916 LiveInterval &LI = LIS.getInterval(Reg);
917 for (MachineRegisterInfo::use_nodbg_iterator
918 RI = MRI.use_nodbg_begin(Reg);
919 MachineInstr *MI = RI.skipInstruction();)
920 anyRemat |= reMaterializeFor(LI, MI);
925 // Remove any values that were completely rematted.
926 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) {
927 unsigned Reg = RegsToSpill[i];
928 LiveInterval &LI = LIS.getInterval(Reg);
929 for (LiveInterval::vni_iterator I = LI.vni_begin(), E = LI.vni_end();
932 if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
934 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
935 MI->addRegisterDead(Reg, &TRI);
936 if (!MI->allDefsAreDead())
938 DEBUG(dbgs() << "All defs dead: " << *MI);
939 DeadDefs.push_back(MI);
943 // Eliminate dead code after remat. Note that some snippet copies may be
945 if (DeadDefs.empty())
947 DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
948 Edit->eliminateDeadDefs(DeadDefs, LIS, VRM, TII);
950 // Get rid of deleted and empty intervals.
951 for (unsigned i = RegsToSpill.size(); i != 0; --i) {
952 unsigned Reg = RegsToSpill[i-1];
953 if (!LIS.hasInterval(Reg)) {
954 RegsToSpill.erase(RegsToSpill.begin() + (i - 1));
957 LiveInterval &LI = LIS.getInterval(Reg);
960 Edit->eraseVirtReg(Reg, LIS);
961 RegsToSpill.erase(RegsToSpill.begin() + (i - 1));
963 DEBUG(dbgs() << RegsToSpill.size() << " registers to spill after remat.\n");
967 //===----------------------------------------------------------------------===//
969 //===----------------------------------------------------------------------===//
971 /// If MI is a load or store of StackSlot, it can be removed.
972 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) {
975 if (!(InstrReg = TII.isLoadFromStackSlot(MI, FI)) &&
976 !(InstrReg = TII.isStoreToStackSlot(MI, FI)))
979 // We have a stack access. Is it the right register and slot?
980 if (InstrReg != Reg || FI != StackSlot)
983 DEBUG(dbgs() << "Coalescing stack access: " << *MI);
984 LIS.RemoveMachineInstrFromMaps(MI);
985 MI->eraseFromParent();
989 /// foldMemoryOperand - Try folding stack slot references in Ops into MI.
990 /// @param MI Instruction using or defining the current register.
991 /// @param Ops Operand indices from readsWritesVirtualRegister().
992 /// @param LoadMI Load instruction to use instead of stack slot when non-null.
993 /// @return True on success, and MI will be erased.
994 bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
995 const SmallVectorImpl<unsigned> &Ops,
996 MachineInstr *LoadMI) {
997 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
999 SmallVector<unsigned, 8> FoldOps;
1000 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1001 unsigned Idx = Ops[i];
1002 MachineOperand &MO = MI->getOperand(Idx);
1003 if (MO.isImplicit())
1005 // FIXME: Teach targets to deal with subregs.
1008 // We cannot fold a load instruction into a def.
1009 if (LoadMI && MO.isDef())
1011 // Tied use operands should not be passed to foldMemoryOperand.
1012 if (!MI->isRegTiedToDefOperand(Idx))
1013 FoldOps.push_back(Idx);
1016 MachineInstr *FoldMI =
1017 LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI)
1018 : TII.foldMemoryOperand(MI, FoldOps, StackSlot);
1021 LIS.ReplaceMachineInstrInMaps(MI, FoldMI);
1023 VRM.addSpillSlotUse(StackSlot, FoldMI);
1024 MI->eraseFromParent();
1025 DEBUG(dbgs() << "\tfolded: " << *FoldMI);
1030 /// insertReload - Insert a reload of NewLI.reg before MI.
1031 void InlineSpiller::insertReload(LiveInterval &NewLI,
1033 MachineBasicBlock::iterator MI) {
1034 MachineBasicBlock &MBB = *MI->getParent();
1035 TII.loadRegFromStackSlot(MBB, MI, NewLI.reg, StackSlot,
1036 MRI.getRegClass(NewLI.reg), &TRI);
1037 --MI; // Point to load instruction.
1038 SlotIndex LoadIdx = LIS.InsertMachineInstrInMaps(MI).getDefIndex();
1039 VRM.addSpillSlotUse(StackSlot, MI);
1040 DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI);
1041 VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0,
1042 LIS.getVNInfoAllocator());
1043 NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI));
1047 /// insertSpill - Insert a spill of NewLI.reg after MI.
1048 void InlineSpiller::insertSpill(LiveInterval &NewLI, const LiveInterval &OldLI,
1049 SlotIndex Idx, MachineBasicBlock::iterator MI) {
1050 MachineBasicBlock &MBB = *MI->getParent();
1051 TII.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, StackSlot,
1052 MRI.getRegClass(NewLI.reg), &TRI);
1053 --MI; // Point to store instruction.
1054 SlotIndex StoreIdx = LIS.InsertMachineInstrInMaps(MI).getDefIndex();
1055 VRM.addSpillSlotUse(StackSlot, MI);
1056 DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI);
1057 VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, LIS.getVNInfoAllocator());
1058 NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI));
1062 /// spillAroundUses - insert spill code around each use of Reg.
1063 void InlineSpiller::spillAroundUses(unsigned Reg) {
1064 DEBUG(dbgs() << "spillAroundUses " << PrintReg(Reg) << '\n');
1065 LiveInterval &OldLI = LIS.getInterval(Reg);
1067 // Iterate over instructions using Reg.
1068 for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(Reg);
1069 MachineInstr *MI = RI.skipInstruction();) {
1071 // Debug values are not allowed to affect codegen.
1072 if (MI->isDebugValue()) {
1073 // Modify DBG_VALUE now that the value is in a spill slot.
1074 uint64_t Offset = MI->getOperand(1).getImm();
1075 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1076 DebugLoc DL = MI->getDebugLoc();
1077 if (MachineInstr *NewDV = TII.emitFrameIndexDebugValue(MF, StackSlot,
1078 Offset, MDPtr, DL)) {
1079 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1080 MachineBasicBlock *MBB = MI->getParent();
1081 MBB->insert(MBB->erase(MI), NewDV);
1083 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1084 MI->eraseFromParent();
1089 // Ignore copies to/from snippets. We'll delete them.
1090 if (SnippetCopies.count(MI))
1093 // Stack slot accesses may coalesce away.
1094 if (coalesceStackAccess(MI, Reg))
1097 // Analyze instruction.
1099 SmallVector<unsigned, 8> Ops;
1100 tie(Reads, Writes) = MI->readsWritesVirtualRegister(Reg, &Ops);
1102 // Find the slot index where this instruction reads and writes OldLI.
1103 // This is usually the def slot, except for tied early clobbers.
1104 SlotIndex Idx = LIS.getInstructionIndex(MI).getDefIndex();
1105 if (VNInfo *VNI = OldLI.getVNInfoAt(Idx.getUseIndex()))
1106 if (SlotIndex::isSameInstr(Idx, VNI->def))
1109 // Check for a sibling copy.
1110 unsigned SibReg = isFullCopyOf(MI, Reg);
1111 if (SibReg && isSibling(SibReg)) {
1112 // This may actually be a copy between snippets.
1113 if (isRegToSpill(SibReg)) {
1114 DEBUG(dbgs() << "Found new snippet copy: " << *MI);
1115 SnippetCopies.insert(MI);
1119 // Hoist the spill of a sib-reg copy.
1120 if (hoistSpill(OldLI, MI)) {
1121 // This COPY is now dead, the value is already in the stack slot.
1122 MI->getOperand(0).setIsDead();
1123 DeadDefs.push_back(MI);
1127 // This is a reload for a sib-reg copy. Drop spills downstream.
1128 LiveInterval &SibLI = LIS.getInterval(SibReg);
1129 eliminateRedundantSpills(SibLI, SibLI.getVNInfoAt(Idx));
1130 // The COPY will fold to a reload below.
1134 // Attempt to fold memory ops.
1135 if (foldMemoryOperand(MI, Ops))
1138 // Allocate interval around instruction.
1139 // FIXME: Infer regclass from instruction alone.
1140 LiveInterval &NewLI = Edit->createFrom(Reg, LIS, VRM);
1141 NewLI.markNotSpillable();
1144 insertReload(NewLI, Idx, MI);
1146 // Rewrite instruction operands.
1147 bool hasLiveDef = false;
1148 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1149 MachineOperand &MO = MI->getOperand(Ops[i]);
1150 MO.setReg(NewLI.reg);
1152 if (!MI->isRegTiedToDefOperand(Ops[i]))
1159 DEBUG(dbgs() << "\trewrite: " << Idx << '\t' << *MI);
1161 // FIXME: Use a second vreg if instruction has no tied ops.
1162 if (Writes && hasLiveDef)
1163 insertSpill(NewLI, OldLI, Idx, MI);
1165 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
1169 /// spillAll - Spill all registers remaining after rematerialization.
1170 void InlineSpiller::spillAll() {
1171 // Update LiveStacks now that we are committed to spilling.
1172 if (StackSlot == VirtRegMap::NO_STACK_SLOT) {
1173 StackSlot = VRM.assignVirt2StackSlot(Original);
1174 StackInt = &LSS.getOrCreateInterval(StackSlot, MRI.getRegClass(Original));
1175 StackInt->getNextValue(SlotIndex(), 0, LSS.getVNInfoAllocator());
1177 StackInt = &LSS.getInterval(StackSlot);
1179 if (Original != Edit->getReg())
1180 VRM.assignVirt2StackSlot(Edit->getReg(), StackSlot);
1182 assert(StackInt->getNumValNums() == 1 && "Bad stack interval values");
1183 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i)
1184 StackInt->MergeRangesInAsValue(LIS.getInterval(RegsToSpill[i]),
1185 StackInt->getValNumInfo(0));
1186 DEBUG(dbgs() << "Merged spilled regs: " << *StackInt << '\n');
1188 // Spill around uses of all RegsToSpill.
1189 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i)
1190 spillAroundUses(RegsToSpill[i]);
1192 // Hoisted spills may cause dead code.
1193 if (!DeadDefs.empty()) {
1194 DEBUG(dbgs() << "Eliminating " << DeadDefs.size() << " dead defs\n");
1195 Edit->eliminateDeadDefs(DeadDefs, LIS, VRM, TII);
1198 // Finally delete the SnippetCopies.
1199 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i) {
1200 for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(RegsToSpill[i]);
1201 MachineInstr *MI = RI.skipInstruction();) {
1202 assert(SnippetCopies.count(MI) && "Remaining use wasn't a snippet copy");
1203 // FIXME: Do this with a LiveRangeEdit callback.
1204 VRM.RemoveMachineInstrFromMaps(MI);
1205 LIS.RemoveMachineInstrFromMaps(MI);
1206 MI->eraseFromParent();
1210 // Delete all spilled registers.
1211 for (unsigned i = 0, e = RegsToSpill.size(); i != e; ++i)
1212 Edit->eraseVirtReg(RegsToSpill[i], LIS);
1215 void InlineSpiller::spill(LiveRangeEdit &edit) {
1218 assert(!TargetRegisterInfo::isStackSlot(edit.getReg())
1219 && "Trying to spill a stack slot.");
1220 // Share a stack slot among all descendants of Original.
1221 Original = VRM.getOriginal(edit.getReg());
1222 StackSlot = VRM.getStackSlot(Original);
1225 DEBUG(dbgs() << "Inline spilling "
1226 << MRI.getRegClass(edit.getReg())->getName()
1227 << ':' << edit.getParent() << "\nFrom original "
1228 << LIS.getInterval(Original) << '\n');
1229 assert(edit.getParent().isSpillable() &&
1230 "Attempting to spill already spilled value.");
1231 assert(DeadDefs.empty() && "Previous spill didn't remove dead defs");
1233 collectRegsToSpill();
1234 analyzeSiblingValues();
1237 // Remat may handle everything.
1238 if (!RegsToSpill.empty())
1241 Edit->calculateRegClassAndHint(MF, LIS, Loops);