1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineLoopInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/Passes.h"
28 #include "llvm/Target/TargetRegisterInfo.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/ADT/Statistic.h"
34 #include "llvm/ADT/STLExtras.h"
40 // Hidden options for help debugging.
41 cl::opt<bool> DisableReMat("disable-rematerialization",
42 cl::init(false), cl::Hidden);
44 cl::opt<bool> SplitAtBB("split-intervals-at-bb",
45 cl::init(true), cl::Hidden);
46 cl::opt<int> SplitLimit("split-limit",
47 cl::init(-1), cl::Hidden);
50 STATISTIC(numIntervals, "Number of original intervals");
51 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
52 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
53 STATISTIC(numSplits , "Number of intervals split");
55 char LiveIntervals::ID = 0;
57 RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
60 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61 AU.addPreserved<LiveVariables>();
62 AU.addRequired<LiveVariables>();
63 AU.addPreservedID(MachineLoopInfoID);
64 AU.addPreservedID(MachineDominatorsID);
65 AU.addPreservedID(PHIEliminationID);
66 AU.addRequiredID(PHIEliminationID);
67 AU.addRequiredID(TwoAddressInstructionPassID);
68 MachineFunctionPass::getAnalysisUsage(AU);
71 void LiveIntervals::releaseMemory() {
76 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
77 VNInfoAllocator.Reset();
78 for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
82 /// runOnMachineFunction - Register allocate the whole function
84 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
86 mri_ = &mf_->getRegInfo();
87 tm_ = &fn.getTarget();
88 tri_ = tm_->getRegisterInfo();
89 tii_ = tm_->getInstrInfo();
90 lv_ = &getAnalysis<LiveVariables>();
91 allocatableRegs_ = tri_->getAllocatableSet(fn);
93 // Number MachineInstrs and MachineBasicBlocks.
94 // Initialize MBB indexes to a sentinal.
95 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
98 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
100 unsigned StartIdx = MIIndex;
102 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
104 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
105 assert(inserted && "multiple MachineInstr -> index mappings");
106 i2miMap_.push_back(I);
107 MIIndex += InstrSlots::NUM;
110 // Set the MBB2IdxMap entry for this MBB.
111 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
112 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
114 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
118 numIntervals += getNumIntervals();
120 DOUT << "********** INTERVALS **********\n";
121 for (iterator I = begin(), E = end(); I != E; ++I) {
122 I->second.print(DOUT, tri_);
126 numIntervalsAfter += getNumIntervals();
131 /// print - Implement the dump method.
132 void LiveIntervals::print(std::ostream &O, const Module* ) const {
133 O << "********** INTERVALS **********\n";
134 for (const_iterator I = begin(), E = end(); I != E; ++I) {
135 I->second.print(DOUT, tri_);
139 O << "********** MACHINEINSTRS **********\n";
140 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
141 mbbi != mbbe; ++mbbi) {
142 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
143 for (MachineBasicBlock::iterator mii = mbbi->begin(),
144 mie = mbbi->end(); mii != mie; ++mii) {
145 O << getInstructionIndex(mii) << '\t' << *mii;
150 /// conflictsWithPhysRegDef - Returns true if the specified register
151 /// is defined during the duration of the specified interval.
152 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
153 VirtRegMap &vrm, unsigned reg) {
154 for (LiveInterval::Ranges::const_iterator
155 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
156 for (unsigned index = getBaseIndex(I->start),
157 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
158 index += InstrSlots::NUM) {
159 // skip deleted instructions
160 while (index != end && !getInstructionFromIndex(index))
161 index += InstrSlots::NUM;
162 if (index == end) break;
164 MachineInstr *MI = getInstructionFromIndex(index);
165 unsigned SrcReg, DstReg;
166 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
167 if (SrcReg == li.reg || DstReg == li.reg)
169 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
170 MachineOperand& mop = MI->getOperand(i);
171 if (!mop.isRegister())
173 unsigned PhysReg = mop.getReg();
174 if (PhysReg == 0 || PhysReg == li.reg)
176 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
177 if (!vrm.hasPhys(PhysReg))
179 PhysReg = vrm.getPhys(PhysReg);
181 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
190 void LiveIntervals::printRegName(unsigned reg) const {
191 if (TargetRegisterInfo::isPhysicalRegister(reg))
192 cerr << tri_->getName(reg);
194 cerr << "%reg" << reg;
197 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
198 MachineBasicBlock::iterator mi,
200 LiveInterval &interval) {
201 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
202 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
204 // Virtual registers may be defined multiple times (due to phi
205 // elimination and 2-addr elimination). Much of what we do only has to be
206 // done once for the vreg. We use an empty interval to detect the first
207 // time we see a vreg.
208 if (interval.empty()) {
209 // Get the Idx of the defining instructions.
210 unsigned defIndex = getDefIndex(MIIdx);
212 MachineInstr *CopyMI = NULL;
213 unsigned SrcReg, DstReg;
214 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
215 tii_->isMoveInstr(*mi, SrcReg, DstReg))
217 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
219 assert(ValNo->id == 0 && "First value in interval is not 0?");
221 // Loop over all of the blocks that the vreg is defined in. There are
222 // two cases we have to handle here. The most common case is a vreg
223 // whose lifetime is contained within a basic block. In this case there
224 // will be a single kill, in MBB, which comes after the definition.
225 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
226 // FIXME: what about dead vars?
228 if (vi.Kills[0] != mi)
229 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
231 killIdx = defIndex+1;
233 // If the kill happens after the definition, we have an intra-block
235 if (killIdx > defIndex) {
236 assert(vi.AliveBlocks.none() &&
237 "Shouldn't be alive across any blocks!");
238 LiveRange LR(defIndex, killIdx, ValNo);
239 interval.addRange(LR);
240 DOUT << " +" << LR << "\n";
241 interval.addKill(ValNo, killIdx);
246 // The other case we handle is when a virtual register lives to the end
247 // of the defining block, potentially live across some blocks, then is
248 // live into some number of blocks, but gets killed. Start by adding a
249 // range that goes from this definition to the end of the defining block.
250 LiveRange NewLR(defIndex,
251 getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
253 DOUT << " +" << NewLR;
254 interval.addRange(NewLR);
256 // Iterate over all of the blocks that the variable is completely
257 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
259 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
260 if (vi.AliveBlocks[i]) {
261 MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
263 LiveRange LR(getMBBStartIdx(i),
264 getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
266 interval.addRange(LR);
272 // Finally, this virtual register is live from the start of any killing
273 // block to the 'use' slot of the killing instruction.
274 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
275 MachineInstr *Kill = vi.Kills[i];
276 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
277 LiveRange LR(getMBBStartIdx(Kill->getParent()),
279 interval.addRange(LR);
280 interval.addKill(ValNo, killIdx);
285 // If this is the second time we see a virtual register definition, it
286 // must be due to phi elimination or two addr elimination. If this is
287 // the result of two address elimination, then the vreg is one of the
288 // def-and-use register operand.
289 if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
290 // If this is a two-address definition, then we have already processed
291 // the live range. The only problem is that we didn't realize there
292 // are actually two values in the live interval. Because of this we
293 // need to take the LiveRegion that defines this register and split it
295 assert(interval.containsOneValue());
296 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
297 unsigned RedefIndex = getDefIndex(MIIdx);
299 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
300 VNInfo *OldValNo = OldLR->valno;
302 // Delete the initial value, which should be short and continuous,
303 // because the 2-addr copy must be in the same MBB as the redef.
304 interval.removeRange(DefIndex, RedefIndex);
306 // Two-address vregs should always only be redefined once. This means
307 // that at this point, there should be exactly one value number in it.
308 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
310 // The new value number (#1) is defined by the instruction we claimed
312 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
315 // Value#0 is now defined by the 2-addr instruction.
316 OldValNo->def = RedefIndex;
319 // Add the new live interval which replaces the range for the input copy.
320 LiveRange LR(DefIndex, RedefIndex, ValNo);
321 DOUT << " replace range with " << LR;
322 interval.addRange(LR);
323 interval.addKill(ValNo, RedefIndex);
325 // If this redefinition is dead, we need to add a dummy unit live
326 // range covering the def slot.
327 if (mi->registerDefIsDead(interval.reg, tri_))
328 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
331 interval.print(DOUT, tri_);
334 // Otherwise, this must be because of phi elimination. If this is the
335 // first redefinition of the vreg that we have seen, go back and change
336 // the live range in the PHI block to be a different value number.
337 if (interval.containsOneValue()) {
338 assert(vi.Kills.size() == 1 &&
339 "PHI elimination vreg should have one kill, the PHI itself!");
341 // Remove the old range that we now know has an incorrect number.
342 VNInfo *VNI = interval.getValNumInfo(0);
343 MachineInstr *Killer = vi.Kills[0];
344 unsigned Start = getMBBStartIdx(Killer->getParent());
345 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
346 DOUT << " Removing [" << Start << "," << End << "] from: ";
347 interval.print(DOUT, tri_); DOUT << "\n";
348 interval.removeRange(Start, End);
349 VNI->hasPHIKill = true;
350 DOUT << " RESULT: "; interval.print(DOUT, tri_);
352 // Replace the interval with one of a NEW value number. Note that this
353 // value number isn't actually defined by an instruction, weird huh? :)
354 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
355 DOUT << " replace range with " << LR;
356 interval.addRange(LR);
357 interval.addKill(LR.valno, End);
358 DOUT << " RESULT: "; interval.print(DOUT, tri_);
361 // In the case of PHI elimination, each variable definition is only
362 // live until the end of the block. We've already taken care of the
363 // rest of the live range.
364 unsigned defIndex = getDefIndex(MIIdx);
367 MachineInstr *CopyMI = NULL;
368 unsigned SrcReg, DstReg;
369 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
370 tii_->isMoveInstr(*mi, SrcReg, DstReg))
372 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
374 unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
375 LiveRange LR(defIndex, killIndex, ValNo);
376 interval.addRange(LR);
377 interval.addKill(ValNo, killIndex);
378 ValNo->hasPHIKill = true;
386 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
387 MachineBasicBlock::iterator mi,
389 LiveInterval &interval,
390 MachineInstr *CopyMI) {
391 // A physical register cannot be live across basic block, so its
392 // lifetime must end somewhere in its defining basic block.
393 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
395 unsigned baseIndex = MIIdx;
396 unsigned start = getDefIndex(baseIndex);
397 unsigned end = start;
399 // If it is not used after definition, it is considered dead at
400 // the instruction defining it. Hence its interval is:
401 // [defSlot(def), defSlot(def)+1)
402 if (mi->registerDefIsDead(interval.reg, tri_)) {
404 end = getDefIndex(start) + 1;
408 // If it is not dead on definition, it must be killed by a
409 // subsequent instruction. Hence its interval is:
410 // [defSlot(def), useSlot(kill)+1)
411 while (++mi != MBB->end()) {
412 baseIndex += InstrSlots::NUM;
413 if (mi->killsRegister(interval.reg, tri_)) {
415 end = getUseIndex(baseIndex) + 1;
417 } else if (mi->modifiesRegister(interval.reg, tri_)) {
418 // Another instruction redefines the register before it is ever read.
419 // Then the register is essentially dead at the instruction that defines
420 // it. Hence its interval is:
421 // [defSlot(def), defSlot(def)+1)
423 end = getDefIndex(start) + 1;
428 // The only case we should have a dead physreg here without a killing or
429 // instruction where we know it's dead is if it is live-in to the function
431 assert(!CopyMI && "physreg was not killed in defining block!");
432 end = getDefIndex(start) + 1; // It's dead.
435 assert(start < end && "did not find end of interval?");
437 // Already exists? Extend old live interval.
438 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
439 VNInfo *ValNo = (OldLR != interval.end())
440 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
441 LiveRange LR(start, end, ValNo);
442 interval.addRange(LR);
443 interval.addKill(LR.valno, end);
444 DOUT << " +" << LR << '\n';
447 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
448 MachineBasicBlock::iterator MI,
451 if (TargetRegisterInfo::isVirtualRegister(reg))
452 handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
453 else if (allocatableRegs_[reg]) {
454 MachineInstr *CopyMI = NULL;
455 unsigned SrcReg, DstReg;
456 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
457 tii_->isMoveInstr(*MI, SrcReg, DstReg))
459 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI);
460 // Def of a register also defines its sub-registers.
461 for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
462 // If MI also modifies the sub-register explicitly, avoid processing it
463 // more than once. Do not pass in TRI here so it checks for exact match.
464 if (!MI->modifiesRegister(*AS))
465 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
469 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
471 LiveInterval &interval, bool isAlias) {
472 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
474 // Look for kills, if it reaches a def before it's killed, then it shouldn't
475 // be considered a livein.
476 MachineBasicBlock::iterator mi = MBB->begin();
477 unsigned baseIndex = MIIdx;
478 unsigned start = baseIndex;
479 unsigned end = start;
480 while (mi != MBB->end()) {
481 if (mi->killsRegister(interval.reg, tri_)) {
483 end = getUseIndex(baseIndex) + 1;
485 } else if (mi->modifiesRegister(interval.reg, tri_)) {
486 // Another instruction redefines the register before it is ever read.
487 // Then the register is essentially dead at the instruction that defines
488 // it. Hence its interval is:
489 // [defSlot(def), defSlot(def)+1)
491 end = getDefIndex(start) + 1;
495 baseIndex += InstrSlots::NUM;
500 // Live-in register might not be used at all.
504 end = getDefIndex(MIIdx) + 1;
506 DOUT << " live through";
511 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
512 interval.addRange(LR);
513 interval.addKill(LR.valno, end);
514 DOUT << " +" << LR << '\n';
517 /// computeIntervals - computes the live intervals for virtual
518 /// registers. for some ordering of the machine instructions [1,N] a
519 /// live interval is an interval [i, j) where 1 <= i <= j < N for
520 /// which a variable is live
521 void LiveIntervals::computeIntervals() {
522 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
523 << "********** Function: "
524 << ((Value*)mf_->getFunction())->getName() << '\n';
525 // Track the index of the current machine instr.
526 unsigned MIIndex = 0;
527 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
529 MachineBasicBlock *MBB = MBBI;
530 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
532 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
534 // Create intervals for live-ins to this BB first.
535 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
536 LE = MBB->livein_end(); LI != LE; ++LI) {
537 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
538 // Multiple live-ins can alias the same register.
539 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
540 if (!hasInterval(*AS))
541 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
545 for (; MI != miEnd; ++MI) {
546 DOUT << MIIndex << "\t" << *MI;
549 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
550 MachineOperand &MO = MI->getOperand(i);
551 // handle register defs - build intervals
552 if (MO.isRegister() && MO.getReg() && MO.isDef())
553 handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
556 MIIndex += InstrSlots::NUM;
561 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
562 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
563 std::vector<IdxMBBPair>::const_iterator I =
564 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
567 while (I != Idx2MBBMap.end()) {
568 if (LR.end <= I->first)
570 MBBs.push_back(I->second);
578 LiveInterval LiveIntervals::createInterval(unsigned reg) {
579 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
581 return LiveInterval(reg, Weight);
584 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
585 /// copy field and returns the source register that defines it.
586 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
590 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
591 return VNI->copy->getOperand(1).getReg();
592 unsigned SrcReg, DstReg;
593 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
595 assert(0 && "Unrecognized copy instruction!");
599 //===----------------------------------------------------------------------===//
600 // Register allocator hooks.
603 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
604 /// allow one) virtual register operand, then its uses are implicitly using
605 /// the register. Returns the virtual register.
606 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
607 MachineInstr *MI) const {
609 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
610 MachineOperand &MO = MI->getOperand(i);
611 if (!MO.isRegister() || !MO.isUse())
613 unsigned Reg = MO.getReg();
614 if (Reg == 0 || Reg == li.reg)
616 // FIXME: For now, only remat MI with at most one register operand.
618 "Can't rematerialize instruction with multiple register operand!");
625 /// isValNoAvailableAt - Return true if the val# of the specified interval
626 /// which reaches the given instruction also reaches the specified use index.
627 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
628 unsigned UseIdx) const {
629 unsigned Index = getInstructionIndex(MI);
630 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
631 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
632 return UI != li.end() && UI->valno == ValNo;
635 /// isReMaterializable - Returns true if the definition MI of the specified
636 /// val# of the specified interval is re-materializable.
637 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
638 const VNInfo *ValNo, MachineInstr *MI,
644 const TargetInstrDesc &TID = MI->getDesc();
645 if (TID.isImplicitDef())
649 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
650 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
651 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
652 // this but remember this is not safe to fold into a two-address
654 // This is a load from fixed stack slot. It can be rematerialized.
657 if (tii_->isTriviallyReMaterializable(MI)) {
658 isLoad = TID.isSimpleLoad();
660 unsigned ImpUse = getReMatImplicitUse(li, MI);
662 const LiveInterval &ImpLi = getInterval(ImpUse);
663 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
664 re = mri_->use_end(); ri != re; ++ri) {
665 MachineInstr *UseMI = &*ri;
666 unsigned UseIdx = getInstructionIndex(UseMI);
667 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
669 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
679 /// isReMaterializable - Returns true if every definition of MI of every
680 /// val# of the specified interval is re-materializable.
681 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
683 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
685 const VNInfo *VNI = *i;
686 unsigned DefIdx = VNI->def;
688 continue; // Dead val#.
689 // Is the def for the val# rematerializable?
692 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
693 bool DefIsLoad = false;
695 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
702 /// FilterFoldedOps - Filter out two-address use operands. Return
703 /// true if it finds any issue with the operands that ought to prevent
705 static bool FilterFoldedOps(MachineInstr *MI,
706 SmallVector<unsigned, 2> &Ops,
708 SmallVector<unsigned, 2> &FoldOps) {
709 const TargetInstrDesc &TID = MI->getDesc();
712 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
713 unsigned OpIdx = Ops[i];
714 MachineOperand &MO = MI->getOperand(OpIdx);
715 // FIXME: fold subreg use.
719 MRInfo |= (unsigned)VirtRegMap::isMod;
721 // Filter out two-address use operand(s).
722 if (!MO.isImplicit() &&
723 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
724 MRInfo = VirtRegMap::isModRef;
727 MRInfo |= (unsigned)VirtRegMap::isRef;
729 FoldOps.push_back(OpIdx);
735 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
736 /// slot / to reg or any rematerialized load into ith operand of specified
737 /// MI. If it is successul, MI is updated with the newly created MI and
739 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
740 VirtRegMap &vrm, MachineInstr *DefMI,
742 SmallVector<unsigned, 2> &Ops,
743 bool isSS, int Slot, unsigned Reg) {
744 const TargetInstrDesc &TID = MI->getDesc();
745 // If it is an implicit def instruction, just delete it.
746 if (TID.isImplicitDef()) {
747 RemoveMachineInstrFromMaps(MI);
748 vrm.RemoveMachineInstrFromMaps(MI);
749 MI->eraseFromParent();
754 // Filter the list of operand indexes that are to be folded. Abort if
755 // any operand will prevent folding.
757 SmallVector<unsigned, 2> FoldOps;
758 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
761 // Can't fold a load from fixed stack slot into a two address instruction.
762 if (isSS && DefMI && (MRInfo & VirtRegMap::isMod))
765 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
766 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
768 // Remember this instruction uses the spill slot.
769 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
771 // Attempt to fold the memory reference into the instruction. If
772 // we can do this, we don't need to insert spill code.
774 lv_->instructionChanged(MI, fmi);
776 fmi->copyKillDeadInfo(MI, tri_);
777 MachineBasicBlock &MBB = *MI->getParent();
778 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
779 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
780 vrm.transferSpillPts(MI, fmi);
781 vrm.transferRestorePts(MI, fmi);
783 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
784 mi2iMap_[fmi] = InstrIdx;
785 MI = MBB.insert(MBB.erase(MI), fmi);
792 /// canFoldMemoryOperand - Returns true if the specified load / store
793 /// folding is possible.
794 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
795 SmallVector<unsigned, 2> &Ops,
796 bool ReMatLoad) const {
797 // Filter the list of operand indexes that are to be folded. Abort if
798 // any operand will prevent folding.
800 SmallVector<unsigned, 2> FoldOps;
801 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
804 // Can't fold a remat'ed load into a two address instruction.
805 if (ReMatLoad && (MRInfo & VirtRegMap::isMod))
808 return tii_->canFoldMemoryOperand(MI, FoldOps);
811 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
812 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
813 for (LiveInterval::Ranges::const_iterator
814 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
815 std::vector<IdxMBBPair>::const_iterator II =
816 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
817 if (II == Idx2MBBMap.end())
819 if (I->end > II->first) // crossing a MBB.
821 MBBs.insert(II->second);
828 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
829 /// interval on to-be re-materialized operands of MI) with new register.
830 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
831 MachineInstr *MI, unsigned NewVReg,
833 // There is an implicit use. That means one of the other operand is
834 // being remat'ed and the remat'ed instruction has li.reg as an
835 // use operand. Make sure we rewrite that as well.
836 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
837 MachineOperand &MO = MI->getOperand(i);
838 if (!MO.isRegister())
840 unsigned Reg = MO.getReg();
841 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
843 if (!vrm.isReMaterialized(Reg))
845 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
846 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
848 UseMO->setReg(NewVReg);
852 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
853 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
855 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
856 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
857 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
858 unsigned Slot, int LdSlot,
859 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
861 const TargetRegisterClass* rc,
862 SmallVector<int, 4> &ReMatIds,
863 const MachineLoopInfo *loopInfo,
864 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
865 std::map<unsigned,unsigned> &MBBVRegsMap,
866 std::vector<LiveInterval*> &NewLIs) {
867 bool CanFold = false;
869 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
870 MachineOperand& mop = MI->getOperand(i);
871 if (!mop.isRegister())
873 unsigned Reg = mop.getReg();
875 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
880 bool TryFold = !DefIsReMat;
881 bool FoldSS = true; // Default behavior unless it's a remat.
884 // If this is the rematerializable definition MI itself and
885 // all of its uses are rematerialized, simply delete it.
886 if (MI == ReMatOrigDefMI && CanDelete) {
887 DOUT << "\t\t\t\tErasing re-materlizable def: ";
889 unsigned ImpUse = getReMatImplicitUse(li, MI);
891 // To be deleted MI has a virtual register operand, update the
892 // spill weight of the register interval.
893 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
894 LiveInterval &ImpLi = getInterval(ImpUse);
896 getSpillWeight(false, true, loopDepth) / ImpLi.getSize();
898 RemoveMachineInstrFromMaps(MI);
899 vrm.RemoveMachineInstrFromMaps(MI);
900 MI->eraseFromParent();
904 // If def for this use can't be rematerialized, then try folding.
905 // If def is rematerializable and it's a load, also try folding.
906 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
908 // Try fold loads (from stack slot, constant pool, etc.) into uses.
914 // Scan all of the operands of this instruction rewriting operands
915 // to use NewVReg instead of li.reg as appropriate. We do this for
918 // 1. If the instr reads the same spilled vreg multiple times, we
919 // want to reuse the NewVReg.
920 // 2. If the instr is a two-addr instruction, we are required to
921 // keep the src/dst regs pinned.
923 // Keep track of whether we replace a use and/or def so that we can
924 // create the spill interval with the appropriate range.
926 HasUse = mop.isUse();
927 HasDef = mop.isDef();
928 SmallVector<unsigned, 2> Ops;
930 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
931 const MachineOperand &MOj = MI->getOperand(j);
932 if (!MOj.isRegister())
934 unsigned RegJ = MOj.getReg();
935 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
939 HasUse |= MOj.isUse();
940 HasDef |= MOj.isDef();
945 // Do not fold load / store here if we are splitting. We'll find an
946 // optimal point to insert a load / store later.
948 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
949 Ops, FoldSS, FoldSlot, Reg)) {
950 // Folding the load/store can completely change the instruction in
951 // unpredictable ways, rescan it from the beginning.
955 goto RestartInstruction;
958 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat && isLoad);
963 // Create a new virtual register for the spill interval.
964 bool CreatedNewVReg = false;
966 NewVReg = mri_->createVirtualRegister(rc);
968 CreatedNewVReg = true;
971 if (mop.isImplicit())
972 rewriteImplicitOps(li, MI, NewVReg, vrm);
974 // Reuse NewVReg for other reads.
975 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
976 MachineOperand &mopj = MI->getOperand(Ops[j]);
977 mopj.setReg(NewVReg);
978 if (mopj.isImplicit())
979 rewriteImplicitOps(li, MI, NewVReg, vrm);
982 if (CreatedNewVReg) {
984 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
985 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
986 // Each valnum may have its own remat id.
987 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
989 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
991 if (!CanDelete || (HasUse && HasDef)) {
992 // If this is a two-addr instruction then its use operands are
993 // rematerializable but its def is not. It should be assigned a
995 vrm.assignVirt2StackSlot(NewVReg, Slot);
998 vrm.assignVirt2StackSlot(NewVReg, Slot);
1000 } else if (HasUse && HasDef &&
1001 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1002 // If this interval hasn't been assigned a stack slot (because earlier
1003 // def is a deleted remat def), do it now.
1004 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1005 vrm.assignVirt2StackSlot(NewVReg, Slot);
1008 // Re-matting an instruction with virtual register use. Add the
1009 // register as an implicit use on the use MI.
1010 if (DefIsReMat && ImpUse)
1011 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1013 // create a new register interval for this spill / remat.
1014 LiveInterval &nI = getOrCreateInterval(NewVReg);
1015 if (CreatedNewVReg) {
1016 NewLIs.push_back(&nI);
1017 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1019 vrm.setIsSplitFromReg(NewVReg, li.reg);
1023 if (CreatedNewVReg) {
1024 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1025 nI.getNextValue(~0U, 0, VNInfoAllocator));
1029 // Extend the split live interval to this def / use.
1030 unsigned End = getUseIndex(index)+1;
1031 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1032 nI.getValNumInfo(nI.getNumValNums()-1));
1038 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1039 nI.getNextValue(~0U, 0, VNInfoAllocator));
1044 DOUT << "\t\t\t\tAdded new interval: ";
1045 nI.print(DOUT, tri_);
1050 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1052 MachineBasicBlock *MBB, unsigned Idx) const {
1053 unsigned End = getMBBEndIdx(MBB);
1054 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1055 unsigned KillIdx = VNI->kills[j];
1056 if (KillIdx > Idx && KillIdx < End)
1062 static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
1063 const VNInfo *VNI = NULL;
1064 for (LiveInterval::const_vni_iterator i = li.vni_begin(),
1065 e = li.vni_end(); i != e; ++i)
1066 if ((*i)->def == DefIdx) {
1073 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1074 /// during spilling.
1075 struct RewriteInfo {
1080 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1081 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1084 struct RewriteInfoCompare {
1085 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1086 return LHS.Index < RHS.Index;
1090 void LiveIntervals::
1091 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1092 LiveInterval::Ranges::const_iterator &I,
1093 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1094 unsigned Slot, int LdSlot,
1095 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1097 const TargetRegisterClass* rc,
1098 SmallVector<int, 4> &ReMatIds,
1099 const MachineLoopInfo *loopInfo,
1100 BitVector &SpillMBBs,
1101 std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1102 BitVector &RestoreMBBs,
1103 std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1104 std::map<unsigned,unsigned> &MBBVRegsMap,
1105 std::vector<LiveInterval*> &NewLIs) {
1106 bool AllCanFold = true;
1107 unsigned NewVReg = 0;
1108 unsigned start = getBaseIndex(I->start);
1109 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1111 // First collect all the def / use in this live range that will be rewritten.
1112 // Make sure they are sorted according instruction index.
1113 std::vector<RewriteInfo> RewriteMIs;
1114 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1115 re = mri_->reg_end(); ri != re; ) {
1116 MachineInstr *MI = &(*ri);
1117 MachineOperand &O = ri.getOperand();
1119 unsigned index = getInstructionIndex(MI);
1120 if (index < start || index >= end)
1122 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1124 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1126 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1127 // Now rewrite the defs and uses.
1128 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1129 RewriteInfo &rwi = RewriteMIs[i];
1131 unsigned index = rwi.Index;
1132 bool MIHasUse = rwi.HasUse;
1133 bool MIHasDef = rwi.HasDef;
1134 MachineInstr *MI = rwi.MI;
1135 // If MI def and/or use the same register multiple times, then there
1136 // are multiple entries.
1137 unsigned NumUses = MIHasUse;
1138 while (i != e && RewriteMIs[i].MI == MI) {
1139 assert(RewriteMIs[i].Index == index);
1140 bool isUse = RewriteMIs[i].HasUse;
1141 if (isUse) ++NumUses;
1143 MIHasDef |= RewriteMIs[i].HasDef;
1146 MachineBasicBlock *MBB = MI->getParent();
1148 if (ImpUse && MI != ReMatDefMI) {
1149 // Re-matting an instruction with virtual register use. Update the
1150 // register interval's spill weight.
1151 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1152 LiveInterval &ImpLi = getInterval(ImpUse);
1154 getSpillWeight(false, true, loopDepth) * NumUses / ImpLi.getSize();
1157 unsigned MBBId = MBB->getNumber();
1158 unsigned ThisVReg = 0;
1160 std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1161 if (NVI != MBBVRegsMap.end()) {
1162 ThisVReg = NVI->second;
1169 // It's better to start a new interval to avoid artifically
1170 // extend the new interval.
1171 if (MIHasDef && !MIHasUse) {
1172 MBBVRegsMap.erase(MBB->getNumber());
1178 bool IsNew = ThisVReg == 0;
1180 // This ends the previous live interval. If all of its def / use
1181 // can be folded, give it a low spill weight.
1182 if (NewVReg && TrySplit && AllCanFold) {
1183 LiveInterval &nI = getOrCreateInterval(NewVReg);
1190 bool HasDef = false;
1191 bool HasUse = false;
1192 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1193 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1194 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1195 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1196 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1197 if (!HasDef && !HasUse)
1200 AllCanFold &= CanFold;
1202 // Update weight of spill interval.
1203 LiveInterval &nI = getOrCreateInterval(NewVReg);
1205 // The spill weight is now infinity as it cannot be spilled again.
1206 nI.weight = HUGE_VALF;
1210 // Keep track of the last def and first use in each MBB.
1212 if (MI != ReMatOrigDefMI || !CanDelete) {
1213 bool HasKill = false;
1215 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1217 // If this is a two-address code, then this index starts a new VNInfo.
1218 const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1220 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1222 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1223 SpillIdxes.find(MBBId);
1225 if (SII == SpillIdxes.end()) {
1226 std::vector<SRInfo> S;
1227 S.push_back(SRInfo(index, NewVReg, true));
1228 SpillIdxes.insert(std::make_pair(MBBId, S));
1229 } else if (SII->second.back().vreg != NewVReg) {
1230 SII->second.push_back(SRInfo(index, NewVReg, true));
1231 } else if ((int)index > SII->second.back().index) {
1232 // If there is an earlier def and this is a two-address
1233 // instruction, then it's not possible to fold the store (which
1234 // would also fold the load).
1235 SRInfo &Info = SII->second.back();
1237 Info.canFold = !HasUse;
1239 SpillMBBs.set(MBBId);
1240 } else if (SII != SpillIdxes.end() &&
1241 SII->second.back().vreg == NewVReg &&
1242 (int)index > SII->second.back().index) {
1243 // There is an earlier def that's not killed (must be two-address).
1244 // The spill is no longer needed.
1245 SII->second.pop_back();
1246 if (SII->second.empty()) {
1247 SpillIdxes.erase(MBBId);
1248 SpillMBBs.reset(MBBId);
1255 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1256 SpillIdxes.find(MBBId);
1257 if (SII != SpillIdxes.end() &&
1258 SII->second.back().vreg == NewVReg &&
1259 (int)index > SII->second.back().index)
1260 // Use(s) following the last def, it's not safe to fold the spill.
1261 SII->second.back().canFold = false;
1262 std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1263 RestoreIdxes.find(MBBId);
1264 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1265 // If we are splitting live intervals, only fold if it's the first
1266 // use and there isn't another use later in the MBB.
1267 RII->second.back().canFold = false;
1269 // Only need a reload if there isn't an earlier def / use.
1270 if (RII == RestoreIdxes.end()) {
1271 std::vector<SRInfo> Infos;
1272 Infos.push_back(SRInfo(index, NewVReg, true));
1273 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1275 RII->second.push_back(SRInfo(index, NewVReg, true));
1277 RestoreMBBs.set(MBBId);
1281 // Update spill weight.
1282 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1283 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1286 if (NewVReg && TrySplit && AllCanFold) {
1287 // If all of its def / use can be folded, give it a low spill weight.
1288 LiveInterval &nI = getOrCreateInterval(NewVReg);
1293 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1294 BitVector &RestoreMBBs,
1295 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1296 if (!RestoreMBBs[Id])
1298 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1299 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1300 if (Restores[i].index == index &&
1301 Restores[i].vreg == vr &&
1302 Restores[i].canFold)
1307 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1308 BitVector &RestoreMBBs,
1309 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1310 if (!RestoreMBBs[Id])
1312 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1313 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1314 if (Restores[i].index == index && Restores[i].vreg)
1315 Restores[i].index = -1;
1319 std::vector<LiveInterval*> LiveIntervals::
1320 addIntervalsForSpills(const LiveInterval &li,
1321 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1322 // Since this is called after the analysis is done we don't know if
1323 // LiveVariables is available
1324 lv_ = getAnalysisToUpdate<LiveVariables>();
1326 assert(li.weight != HUGE_VALF &&
1327 "attempt to spill already spilled interval!");
1329 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1330 li.print(DOUT, tri_);
1333 // Each bit specify whether it a spill is required in the MBB.
1334 BitVector SpillMBBs(mf_->getNumBlockIDs());
1335 std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1336 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1337 std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1338 std::map<unsigned,unsigned> MBBVRegsMap;
1339 std::vector<LiveInterval*> NewLIs;
1340 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1342 unsigned NumValNums = li.getNumValNums();
1343 SmallVector<MachineInstr*, 4> ReMatDefs;
1344 ReMatDefs.resize(NumValNums, NULL);
1345 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1346 ReMatOrigDefs.resize(NumValNums, NULL);
1347 SmallVector<int, 4> ReMatIds;
1348 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1349 BitVector ReMatDelete(NumValNums);
1350 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1352 // Spilling a split live interval. It cannot be split any further. Also,
1353 // it's also guaranteed to be a single val# / range interval.
1354 if (vrm.getPreSplitReg(li.reg)) {
1355 vrm.setIsSplitFromReg(li.reg, 0);
1356 // Unset the split kill marker on the last use.
1357 unsigned KillIdx = vrm.getKillPoint(li.reg);
1359 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1360 assert(KillMI && "Last use disappeared?");
1361 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1362 assert(KillOp != -1 && "Last use disappeared?");
1363 KillMI->getOperand(KillOp).setIsKill(false);
1365 vrm.removeKillPoint(li.reg);
1366 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1367 Slot = vrm.getStackSlot(li.reg);
1368 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1369 MachineInstr *ReMatDefMI = DefIsReMat ?
1370 vrm.getReMaterializedMI(li.reg) : NULL;
1372 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1373 bool isLoad = isLoadSS ||
1374 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1375 bool IsFirstRange = true;
1376 for (LiveInterval::Ranges::const_iterator
1377 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1378 // If this is a split live interval with multiple ranges, it means there
1379 // are two-address instructions that re-defined the value. Only the
1380 // first def can be rematerialized!
1382 // Note ReMatOrigDefMI has already been deleted.
1383 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1384 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1385 false, vrm, rc, ReMatIds, loopInfo,
1386 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1387 MBBVRegsMap, NewLIs);
1389 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1390 Slot, 0, false, false, false,
1391 false, vrm, rc, ReMatIds, loopInfo,
1392 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1393 MBBVRegsMap, NewLIs);
1395 IsFirstRange = false;
1400 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1401 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1405 bool NeedStackSlot = false;
1406 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1408 const VNInfo *VNI = *i;
1409 unsigned VN = VNI->id;
1410 unsigned DefIdx = VNI->def;
1412 continue; // Dead val#.
1413 // Is the def for the val# rematerializable?
1414 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1415 ? 0 : getInstructionFromIndex(DefIdx);
1417 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1418 // Remember how to remat the def of this val#.
1419 ReMatOrigDefs[VN] = ReMatDefMI;
1420 // Original def may be modified so we have to make a copy here. vrm must
1422 ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1424 bool CanDelete = true;
1425 if (VNI->hasPHIKill) {
1426 // A kill is a phi node, not all of its uses can be rematerialized.
1427 // It must not be deleted.
1429 // Need a stack slot if there is any live range where uses cannot be
1431 NeedStackSlot = true;
1434 ReMatDelete.set(VN);
1436 // Need a stack slot if there is any live range where uses cannot be
1438 NeedStackSlot = true;
1442 // One stack slot per live interval.
1443 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1444 Slot = vrm.assignVirt2StackSlot(li.reg);
1446 // Create new intervals and rewrite defs and uses.
1447 for (LiveInterval::Ranges::const_iterator
1448 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1449 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1450 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1451 bool DefIsReMat = ReMatDefMI != NULL;
1452 bool CanDelete = ReMatDelete[I->valno->id];
1454 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1455 bool isLoad = isLoadSS ||
1456 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1457 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1458 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1459 CanDelete, vrm, rc, ReMatIds, loopInfo,
1460 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1461 MBBVRegsMap, NewLIs);
1464 // Insert spills / restores if we are splitting.
1468 SmallPtrSet<LiveInterval*, 4> AddedKill;
1469 SmallVector<unsigned, 2> Ops;
1470 if (NeedStackSlot) {
1471 int Id = SpillMBBs.find_first();
1473 std::vector<SRInfo> &spills = SpillIdxes[Id];
1474 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1475 int index = spills[i].index;
1476 unsigned VReg = spills[i].vreg;
1477 LiveInterval &nI = getOrCreateInterval(VReg);
1478 bool isReMat = vrm.isReMaterialized(VReg);
1479 MachineInstr *MI = getInstructionFromIndex(index);
1480 bool CanFold = false;
1481 bool FoundUse = false;
1483 if (spills[i].canFold) {
1485 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1486 MachineOperand &MO = MI->getOperand(j);
1487 if (!MO.isRegister() || MO.getReg() != VReg)
1494 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1495 RestoreMBBs, RestoreIdxes))) {
1496 // MI has two-address uses of the same register. If the use
1497 // isn't the first and only use in the BB, then we can't fold
1498 // it. FIXME: Move this to rewriteInstructionsForSpills.
1505 // Fold the store into the def if possible.
1506 bool Folded = false;
1507 if (CanFold && !Ops.empty()) {
1508 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1511 // Also folded uses, do not issue a load.
1512 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1513 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1515 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1519 // Else tell the spiller to issue a spill.
1521 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1522 bool isKill = LR->end == getStoreIndex(index);
1523 vrm.addSpillPoint(VReg, isKill, MI);
1525 AddedKill.insert(&nI);
1528 Id = SpillMBBs.find_next(Id);
1532 int Id = RestoreMBBs.find_first();
1534 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1535 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1536 int index = restores[i].index;
1539 unsigned VReg = restores[i].vreg;
1540 LiveInterval &nI = getOrCreateInterval(VReg);
1541 MachineInstr *MI = getInstructionFromIndex(index);
1542 bool CanFold = false;
1544 if (restores[i].canFold) {
1546 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1547 MachineOperand &MO = MI->getOperand(j);
1548 if (!MO.isRegister() || MO.getReg() != VReg)
1552 // If this restore were to be folded, it would have been folded
1561 // Fold the load into the use if possible.
1562 bool Folded = false;
1563 if (CanFold && !Ops.empty()) {
1564 if (!vrm.isReMaterialized(VReg))
1565 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1567 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1569 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1570 // If the rematerializable def is a load, also try to fold it.
1571 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1572 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1573 Ops, isLoadSS, LdSlot, VReg);
1574 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1576 // Re-matting an instruction with virtual register use. Add the
1577 // register as an implicit use on the use MI and update the register
1578 // interval's spill weight.
1579 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1580 LiveInterval &ImpLi = getInterval(ImpUse);
1582 getSpillWeight(false, true, loopDepth) / ImpLi.getSize();
1584 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1588 // If folding is not possible / failed, then tell the spiller to issue a
1589 // load / rematerialization for us.
1591 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1593 vrm.addRestorePoint(VReg, MI);
1595 Id = RestoreMBBs.find_next(Id);
1598 // Finalize intervals: add kills, finalize spill weights, and filter out
1600 std::vector<LiveInterval*> RetNewLIs;
1601 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1602 LiveInterval *LI = NewLIs[i];
1604 LI->weight /= LI->getSize();
1605 if (!AddedKill.count(LI)) {
1606 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1607 unsigned LastUseIdx = getBaseIndex(LR->end);
1608 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1609 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1610 assert(UseIdx != -1);
1611 if (LastUse->getOperand(UseIdx).isImplicit() ||
1612 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1613 LastUse->getOperand(UseIdx).setIsKill();
1614 vrm.addKillPoint(LI->reg, LastUseIdx);
1617 RetNewLIs.push_back(LI);
1624 /// hasAllocatableSuperReg - Return true if the specified physical register has
1625 /// any super register that's allocatable.
1626 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1627 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1628 if (allocatableRegs_[*AS] && hasInterval(*AS))
1633 /// getRepresentativeReg - Find the largest super register of the specified
1634 /// physical register.
1635 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1636 // Find the largest super-register that is allocatable.
1637 unsigned BestReg = Reg;
1638 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1639 unsigned SuperReg = *AS;
1640 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1648 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1649 /// specified interval that conflicts with the specified physical register.
1650 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1651 unsigned PhysReg) const {
1652 unsigned NumConflicts = 0;
1653 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1654 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1655 E = mri_->reg_end(); I != E; ++I) {
1656 MachineOperand &O = I.getOperand();
1657 MachineInstr *MI = O.getParent();
1658 unsigned Index = getInstructionIndex(MI);
1659 if (pli.liveAt(Index))
1662 return NumConflicts;
1665 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1666 /// around all defs and uses of the specified interval.
1667 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1668 unsigned PhysReg, VirtRegMap &vrm) {
1669 unsigned SpillReg = getRepresentativeReg(PhysReg);
1671 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1672 // If there are registers which alias PhysReg, but which are not a
1673 // sub-register of the chosen representative super register. Assert
1674 // since we can't handle it yet.
1675 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1676 tri_->isSuperRegister(*AS, SpillReg));
1678 LiveInterval &pli = getInterval(SpillReg);
1679 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1680 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1681 E = mri_->reg_end(); I != E; ++I) {
1682 MachineOperand &O = I.getOperand();
1683 MachineInstr *MI = O.getParent();
1684 if (SeenMIs.count(MI))
1687 unsigned Index = getInstructionIndex(MI);
1688 if (pli.liveAt(Index)) {
1689 vrm.addEmergencySpill(SpillReg, MI);
1690 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1691 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1692 if (!hasInterval(*AS))
1694 LiveInterval &spli = getInterval(*AS);
1695 if (spli.liveAt(Index))
1696 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);