1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/STLExtras.h"
41 // Hidden options for help debugging.
42 static cl::opt<bool> DisableReMat("disable-rematerialization",
43 cl::init(false), cl::Hidden);
45 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
46 cl::init(true), cl::Hidden);
47 static cl::opt<int> SplitLimit("split-limit",
48 cl::init(-1), cl::Hidden);
50 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
52 static cl::opt<bool> EnableFastSpilling("fast-spill",
53 cl::init(false), cl::Hidden);
55 STATISTIC(numIntervals, "Number of original intervals");
56 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64 AU.addRequired<AliasAnalysis>();
65 AU.addPreserved<AliasAnalysis>();
66 AU.addPreserved<LiveVariables>();
67 AU.addRequired<LiveVariables>();
68 AU.addPreservedID(MachineLoopInfoID);
69 AU.addPreservedID(MachineDominatorsID);
70 AU.addPreservedID(PHIEliminationID);
71 AU.addRequiredID(PHIEliminationID);
72 AU.addRequiredID(TwoAddressInstructionPassID);
73 MachineFunctionPass::getAnalysisUsage(AU);
76 void LiveIntervals::releaseMemory() {
77 // Free the live intervals themselves.
78 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
79 E = r2iMap_.end(); I != E; ++I)
87 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
88 VNInfoAllocator.Reset();
89 while (!ClonedMIs.empty()) {
90 MachineInstr *MI = ClonedMIs.back();
92 mf_->DeleteMachineInstr(MI);
96 void LiveIntervals::computeNumbering() {
97 Index2MiMap OldI2MI = i2miMap_;
98 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
107 // Number MachineInstrs and MachineBasicBlocks.
108 // Initialize MBB indexes to a sentinal.
109 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
111 unsigned MIIndex = 0;
112 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
114 unsigned StartIdx = MIIndex;
116 // Insert an empty slot at the beginning of each block.
117 MIIndex += InstrSlots::NUM;
118 i2miMap_.push_back(0);
120 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
122 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
123 assert(inserted && "multiple MachineInstr -> index mappings");
124 i2miMap_.push_back(I);
125 MIIndex += InstrSlots::NUM;
128 // Insert an empty slot after every instruction.
129 MIIndex += InstrSlots::NUM;
130 i2miMap_.push_back(0);
133 // Set the MBB2IdxMap entry for this MBB.
134 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
135 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
137 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
139 if (!OldI2MI.empty())
140 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
141 for (LiveInterval::iterator LI = OI->second->begin(),
142 LE = OI->second->end(); LI != LE; ++LI) {
144 // Remap the start index of the live range to the corresponding new
145 // number, or our best guess at what it _should_ correspond to if the
146 // original instruction has been erased. This is either the following
147 // instruction or its predecessor.
148 unsigned index = LI->start / InstrSlots::NUM;
149 unsigned offset = LI->start % InstrSlots::NUM;
150 if (offset == InstrSlots::LOAD) {
151 std::vector<IdxMBBPair>::const_iterator I =
152 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
153 // Take the pair containing the index
154 std::vector<IdxMBBPair>::const_iterator J =
155 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
157 LI->start = getMBBStartIdx(J->second);
159 LI->start = mi2iMap_[OldI2MI[index]] + offset;
162 // Remap the ending index in the same way that we remapped the start,
163 // except for the final step where we always map to the immediately
164 // following instruction.
165 index = (LI->end - 1) / InstrSlots::NUM;
166 offset = LI->end % InstrSlots::NUM;
167 if (offset == InstrSlots::LOAD) {
168 // VReg dies at end of block.
169 std::vector<IdxMBBPair>::const_iterator I =
170 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
173 LI->end = getMBBEndIdx(I->second) + 1;
175 unsigned idx = index;
176 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
178 if (index != OldI2MI.size())
179 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
181 LI->end = InstrSlots::NUM * i2miMap_.size();
185 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
186 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
189 // Remap the VNInfo def index, which works the same as the
190 // start indices above. VN's with special sentinel defs
191 // don't need to be remapped.
192 if (vni->def != ~0U && vni->def != ~1U) {
193 unsigned index = vni->def / InstrSlots::NUM;
194 unsigned offset = vni->def % InstrSlots::NUM;
195 if (offset == InstrSlots::LOAD) {
196 std::vector<IdxMBBPair>::const_iterator I =
197 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
198 // Take the pair containing the index
199 std::vector<IdxMBBPair>::const_iterator J =
200 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
202 vni->def = getMBBStartIdx(J->second);
204 vni->def = mi2iMap_[OldI2MI[index]] + offset;
208 // Remap the VNInfo kill indices, which works the same as
209 // the end indices above.
210 for (size_t i = 0; i < vni->kills.size(); ++i) {
211 // PHI kills don't need to be remapped.
212 if (!vni->kills[i]) continue;
214 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
215 unsigned offset = vni->kills[i] % InstrSlots::NUM;
216 if (offset == InstrSlots::STORE) {
217 std::vector<IdxMBBPair>::const_iterator I =
218 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
221 vni->kills[i] = getMBBEndIdx(I->second);
223 unsigned idx = index;
224 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
226 if (index != OldI2MI.size())
227 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
228 (idx == index ? offset : 0);
230 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
237 /// runOnMachineFunction - Register allocate the whole function
239 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
241 mri_ = &mf_->getRegInfo();
242 tm_ = &fn.getTarget();
243 tri_ = tm_->getRegisterInfo();
244 tii_ = tm_->getInstrInfo();
245 aa_ = &getAnalysis<AliasAnalysis>();
246 lv_ = &getAnalysis<LiveVariables>();
247 allocatableRegs_ = tri_->getAllocatableSet(fn);
252 numIntervals += getNumIntervals();
254 DOUT << "********** INTERVALS **********\n";
255 for (iterator I = begin(), E = end(); I != E; ++I) {
256 I->second->print(DOUT, tri_);
260 numIntervalsAfter += getNumIntervals();
265 /// print - Implement the dump method.
266 void LiveIntervals::print(std::ostream &O, const Module* ) const {
267 O << "********** INTERVALS **********\n";
268 for (const_iterator I = begin(), E = end(); I != E; ++I) {
269 I->second->print(O, tri_);
273 O << "********** MACHINEINSTRS **********\n";
274 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
275 mbbi != mbbe; ++mbbi) {
276 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
277 for (MachineBasicBlock::iterator mii = mbbi->begin(),
278 mie = mbbi->end(); mii != mie; ++mii) {
279 O << getInstructionIndex(mii) << '\t' << *mii;
284 /// conflictsWithPhysRegDef - Returns true if the specified register
285 /// is defined during the duration of the specified interval.
286 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
287 VirtRegMap &vrm, unsigned reg) {
288 for (LiveInterval::Ranges::const_iterator
289 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
290 for (unsigned index = getBaseIndex(I->start),
291 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
292 index += InstrSlots::NUM) {
293 // skip deleted instructions
294 while (index != end && !getInstructionFromIndex(index))
295 index += InstrSlots::NUM;
296 if (index == end) break;
298 MachineInstr *MI = getInstructionFromIndex(index);
299 unsigned SrcReg, DstReg;
300 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
301 if (SrcReg == li.reg || DstReg == li.reg)
303 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
304 MachineOperand& mop = MI->getOperand(i);
305 if (!mop.isRegister())
307 unsigned PhysReg = mop.getReg();
308 if (PhysReg == 0 || PhysReg == li.reg)
310 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
311 if (!vrm.hasPhys(PhysReg))
313 PhysReg = vrm.getPhys(PhysReg);
315 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
324 void LiveIntervals::printRegName(unsigned reg) const {
325 if (TargetRegisterInfo::isPhysicalRegister(reg))
326 cerr << tri_->getName(reg);
328 cerr << "%reg" << reg;
331 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
332 MachineBasicBlock::iterator mi,
333 unsigned MIIdx, MachineOperand& MO,
335 LiveInterval &interval) {
336 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
337 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
339 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
340 DOUT << "is a implicit_def\n";
344 // Virtual registers may be defined multiple times (due to phi
345 // elimination and 2-addr elimination). Much of what we do only has to be
346 // done once for the vreg. We use an empty interval to detect the first
347 // time we see a vreg.
348 if (interval.empty()) {
349 // Get the Idx of the defining instructions.
350 unsigned defIndex = getDefIndex(MIIdx);
352 MachineInstr *CopyMI = NULL;
353 unsigned SrcReg, DstReg;
354 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
355 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
356 tii_->isMoveInstr(*mi, SrcReg, DstReg))
358 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
360 assert(ValNo->id == 0 && "First value in interval is not 0?");
362 // Loop over all of the blocks that the vreg is defined in. There are
363 // two cases we have to handle here. The most common case is a vreg
364 // whose lifetime is contained within a basic block. In this case there
365 // will be a single kill, in MBB, which comes after the definition.
366 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
367 // FIXME: what about dead vars?
369 if (vi.Kills[0] != mi)
370 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
372 killIdx = defIndex+1;
374 // If the kill happens after the definition, we have an intra-block
376 if (killIdx > defIndex) {
377 assert(vi.AliveBlocks.none() &&
378 "Shouldn't be alive across any blocks!");
379 LiveRange LR(defIndex, killIdx, ValNo);
380 interval.addRange(LR);
381 DOUT << " +" << LR << "\n";
382 interval.addKill(ValNo, killIdx);
387 // The other case we handle is when a virtual register lives to the end
388 // of the defining block, potentially live across some blocks, then is
389 // live into some number of blocks, but gets killed. Start by adding a
390 // range that goes from this definition to the end of the defining block.
391 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
392 DOUT << " +" << NewLR;
393 interval.addRange(NewLR);
395 // Iterate over all of the blocks that the variable is completely
396 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
398 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
399 if (vi.AliveBlocks[i]) {
400 LiveRange LR(getMBBStartIdx(i),
401 getMBBEndIdx(i)+1, // MBB ends at -1.
403 interval.addRange(LR);
408 // Finally, this virtual register is live from the start of any killing
409 // block to the 'use' slot of the killing instruction.
410 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
411 MachineInstr *Kill = vi.Kills[i];
412 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
413 LiveRange LR(getMBBStartIdx(Kill->getParent()),
415 interval.addRange(LR);
416 interval.addKill(ValNo, killIdx);
421 // If this is the second time we see a virtual register definition, it
422 // must be due to phi elimination or two addr elimination. If this is
423 // the result of two address elimination, then the vreg is one of the
424 // def-and-use register operand.
425 if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
426 // If this is a two-address definition, then we have already processed
427 // the live range. The only problem is that we didn't realize there
428 // are actually two values in the live interval. Because of this we
429 // need to take the LiveRegion that defines this register and split it
431 assert(interval.containsOneValue());
432 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
433 unsigned RedefIndex = getDefIndex(MIIdx);
435 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
436 VNInfo *OldValNo = OldLR->valno;
438 // Delete the initial value, which should be short and continuous,
439 // because the 2-addr copy must be in the same MBB as the redef.
440 interval.removeRange(DefIndex, RedefIndex);
442 // Two-address vregs should always only be redefined once. This means
443 // that at this point, there should be exactly one value number in it.
444 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
446 // The new value number (#1) is defined by the instruction we claimed
448 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
451 // Value#0 is now defined by the 2-addr instruction.
452 OldValNo->def = RedefIndex;
455 // Add the new live interval which replaces the range for the input copy.
456 LiveRange LR(DefIndex, RedefIndex, ValNo);
457 DOUT << " replace range with " << LR;
458 interval.addRange(LR);
459 interval.addKill(ValNo, RedefIndex);
461 // If this redefinition is dead, we need to add a dummy unit live
462 // range covering the def slot.
464 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
467 interval.print(DOUT, tri_);
470 // Otherwise, this must be because of phi elimination. If this is the
471 // first redefinition of the vreg that we have seen, go back and change
472 // the live range in the PHI block to be a different value number.
473 if (interval.containsOneValue()) {
474 assert(vi.Kills.size() == 1 &&
475 "PHI elimination vreg should have one kill, the PHI itself!");
477 // Remove the old range that we now know has an incorrect number.
478 VNInfo *VNI = interval.getValNumInfo(0);
479 MachineInstr *Killer = vi.Kills[0];
480 unsigned Start = getMBBStartIdx(Killer->getParent());
481 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
482 DOUT << " Removing [" << Start << "," << End << "] from: ";
483 interval.print(DOUT, tri_); DOUT << "\n";
484 interval.removeRange(Start, End);
485 VNI->hasPHIKill = true;
486 DOUT << " RESULT: "; interval.print(DOUT, tri_);
488 // Replace the interval with one of a NEW value number. Note that this
489 // value number isn't actually defined by an instruction, weird huh? :)
490 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
491 DOUT << " replace range with " << LR;
492 interval.addRange(LR);
493 interval.addKill(LR.valno, End);
494 DOUT << " RESULT: "; interval.print(DOUT, tri_);
497 // In the case of PHI elimination, each variable definition is only
498 // live until the end of the block. We've already taken care of the
499 // rest of the live range.
500 unsigned defIndex = getDefIndex(MIIdx);
503 MachineInstr *CopyMI = NULL;
504 unsigned SrcReg, DstReg;
505 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
506 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
507 tii_->isMoveInstr(*mi, SrcReg, DstReg))
509 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
511 unsigned killIndex = getMBBEndIdx(mbb) + 1;
512 LiveRange LR(defIndex, killIndex, ValNo);
513 interval.addRange(LR);
514 interval.addKill(ValNo, killIndex);
515 ValNo->hasPHIKill = true;
523 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
524 MachineBasicBlock::iterator mi,
527 LiveInterval &interval,
528 MachineInstr *CopyMI) {
529 // A physical register cannot be live across basic block, so its
530 // lifetime must end somewhere in its defining basic block.
531 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
533 unsigned baseIndex = MIIdx;
534 unsigned start = getDefIndex(baseIndex);
535 unsigned end = start;
537 // If it is not used after definition, it is considered dead at
538 // the instruction defining it. Hence its interval is:
539 // [defSlot(def), defSlot(def)+1)
542 end = getDefIndex(start) + 1;
546 // If it is not dead on definition, it must be killed by a
547 // subsequent instruction. Hence its interval is:
548 // [defSlot(def), useSlot(kill)+1)
549 baseIndex += InstrSlots::NUM;
550 while (++mi != MBB->end()) {
551 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
552 getInstructionFromIndex(baseIndex) == 0)
553 baseIndex += InstrSlots::NUM;
554 if (mi->killsRegister(interval.reg, tri_)) {
556 end = getUseIndex(baseIndex) + 1;
558 } else if (mi->modifiesRegister(interval.reg, tri_)) {
559 // Another instruction redefines the register before it is ever read.
560 // Then the register is essentially dead at the instruction that defines
561 // it. Hence its interval is:
562 // [defSlot(def), defSlot(def)+1)
564 end = getDefIndex(start) + 1;
568 baseIndex += InstrSlots::NUM;
571 // The only case we should have a dead physreg here without a killing or
572 // instruction where we know it's dead is if it is live-in to the function
574 assert(!CopyMI && "physreg was not killed in defining block!");
575 end = getDefIndex(start) + 1; // It's dead.
578 assert(start < end && "did not find end of interval?");
580 // Already exists? Extend old live interval.
581 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
582 VNInfo *ValNo = (OldLR != interval.end())
583 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
584 LiveRange LR(start, end, ValNo);
585 interval.addRange(LR);
586 interval.addKill(LR.valno, end);
587 DOUT << " +" << LR << '\n';
590 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
591 MachineBasicBlock::iterator MI,
595 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
596 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
597 getOrCreateInterval(MO.getReg()));
598 else if (allocatableRegs_[MO.getReg()]) {
599 MachineInstr *CopyMI = NULL;
600 unsigned SrcReg, DstReg;
601 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
602 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
603 tii_->isMoveInstr(*MI, SrcReg, DstReg))
605 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
606 getOrCreateInterval(MO.getReg()), CopyMI);
607 // Def of a register also defines its sub-registers.
608 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
609 // If MI also modifies the sub-register explicitly, avoid processing it
610 // more than once. Do not pass in TRI here so it checks for exact match.
611 if (!MI->modifiesRegister(*AS))
612 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
613 getOrCreateInterval(*AS), 0);
617 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
619 LiveInterval &interval, bool isAlias) {
620 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
622 // Look for kills, if it reaches a def before it's killed, then it shouldn't
623 // be considered a livein.
624 MachineBasicBlock::iterator mi = MBB->begin();
625 unsigned baseIndex = MIIdx;
626 unsigned start = baseIndex;
627 unsigned end = start;
628 while (mi != MBB->end()) {
629 if (mi->killsRegister(interval.reg, tri_)) {
631 end = getUseIndex(baseIndex) + 1;
633 } else if (mi->modifiesRegister(interval.reg, tri_)) {
634 // Another instruction redefines the register before it is ever read.
635 // Then the register is essentially dead at the instruction that defines
636 // it. Hence its interval is:
637 // [defSlot(def), defSlot(def)+1)
639 end = getDefIndex(start) + 1;
643 baseIndex += InstrSlots::NUM;
644 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
645 getInstructionFromIndex(baseIndex) == 0)
646 baseIndex += InstrSlots::NUM;
651 // Live-in register might not be used at all.
655 end = getDefIndex(MIIdx) + 1;
657 DOUT << " live through";
662 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
663 interval.addRange(LR);
664 interval.addKill(LR.valno, end);
665 DOUT << " +" << LR << '\n';
668 /// computeIntervals - computes the live intervals for virtual
669 /// registers. for some ordering of the machine instructions [1,N] a
670 /// live interval is an interval [i, j) where 1 <= i <= j < N for
671 /// which a variable is live
672 void LiveIntervals::computeIntervals() {
673 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
674 << "********** Function: "
675 << ((Value*)mf_->getFunction())->getName() << '\n';
676 // Track the index of the current machine instr.
677 unsigned MIIndex = 0;
679 // Skip over empty initial indices.
680 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
681 getInstructionFromIndex(MIIndex) == 0)
682 MIIndex += InstrSlots::NUM;
684 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
686 MachineBasicBlock *MBB = MBBI;
687 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
689 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
691 // Create intervals for live-ins to this BB first.
692 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
693 LE = MBB->livein_end(); LI != LE; ++LI) {
694 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
695 // Multiple live-ins can alias the same register.
696 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
697 if (!hasInterval(*AS))
698 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
702 for (; MI != miEnd; ++MI) {
703 DOUT << MIIndex << "\t" << *MI;
706 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
707 MachineOperand &MO = MI->getOperand(i);
708 // handle register defs - build intervals
709 if (MO.isRegister() && MO.getReg() && MO.isDef())
710 handleRegisterDef(MBB, MI, MIIndex, MO, i);
713 MIIndex += InstrSlots::NUM;
715 // Skip over empty indices.
716 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
717 getInstructionFromIndex(MIIndex) == 0)
718 MIIndex += InstrSlots::NUM;
723 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
724 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
725 std::vector<IdxMBBPair>::const_iterator I =
726 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
729 while (I != Idx2MBBMap.end()) {
730 if (LR.end <= I->first)
732 MBBs.push_back(I->second);
740 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
741 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
743 return new LiveInterval(reg, Weight);
746 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
747 /// copy field and returns the source register that defines it.
748 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
752 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
753 return VNI->copy->getOperand(1).getReg();
754 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
755 return VNI->copy->getOperand(2).getReg();
756 unsigned SrcReg, DstReg;
757 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
759 assert(0 && "Unrecognized copy instruction!");
763 //===----------------------------------------------------------------------===//
764 // Register allocator hooks.
767 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
768 /// allow one) virtual register operand, then its uses are implicitly using
769 /// the register. Returns the virtual register.
770 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
771 MachineInstr *MI) const {
773 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
774 MachineOperand &MO = MI->getOperand(i);
775 if (!MO.isRegister() || !MO.isUse())
777 unsigned Reg = MO.getReg();
778 if (Reg == 0 || Reg == li.reg)
780 // FIXME: For now, only remat MI with at most one register operand.
782 "Can't rematerialize instruction with multiple register operand!");
791 /// isValNoAvailableAt - Return true if the val# of the specified interval
792 /// which reaches the given instruction also reaches the specified use index.
793 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
794 unsigned UseIdx) const {
795 unsigned Index = getInstructionIndex(MI);
796 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
797 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
798 return UI != li.end() && UI->valno == ValNo;
801 /// isReMaterializable - Returns true if the definition MI of the specified
802 /// val# of the specified interval is re-materializable.
803 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
804 const VNInfo *ValNo, MachineInstr *MI,
809 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
813 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
814 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
815 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
816 // this but remember this is not safe to fold into a two-address
818 // This is a load from fixed stack slot. It can be rematerialized.
821 // If the target-specific rules don't identify an instruction as
822 // being trivially rematerializable, use some target-independent
824 if (!MI->getDesc().isRematerializable() ||
825 !tii_->isTriviallyReMaterializable(MI)) {
826 if (!EnableAggressiveRemat)
829 // If the instruction accesses memory but the memoperands have been lost,
830 // we can't analyze it.
831 const TargetInstrDesc &TID = MI->getDesc();
832 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
835 // Avoid instructions obviously unsafe for remat.
836 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
839 // If the instruction accesses memory and the memory could be non-constant,
840 // assume the instruction is not rematerializable.
841 for (std::list<MachineMemOperand>::const_iterator I = MI->memoperands_begin(),
842 E = MI->memoperands_end(); I != E; ++I) {
843 const MachineMemOperand &MMO = *I;
844 if (MMO.isVolatile() || MMO.isStore())
846 const Value *V = MMO.getValue();
849 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
850 if (!PSV->isConstant(mf_->getFrameInfo()))
852 } else if (!aa_->pointsToConstantMemory(V))
856 // If any of the registers accessed are non-constant, conservatively assume
857 // the instruction is not rematerializable.
859 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
860 const MachineOperand &MO = MI->getOperand(i);
862 unsigned Reg = MO.getReg();
865 if (TargetRegisterInfo::isPhysicalRegister(Reg))
868 // Only allow one def, and that in the first operand.
869 if (MO.isDef() != (i == 0))
872 // Only allow constant-valued registers.
873 bool IsLiveIn = mri_->isLiveIn(Reg);
874 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
877 // For the def, it should be the only def.
878 if (MO.isDef() && (next(I) != E || IsLiveIn))
882 // Only allow one use other register use, as that's all the
883 // remat mechanisms support currently.
887 else if (Reg != ImpUse)
890 // For uses, there should be only one associate def.
891 if (I != E && (next(I) != E || IsLiveIn))
898 unsigned ImpUse = getReMatImplicitUse(li, MI);
900 const LiveInterval &ImpLi = getInterval(ImpUse);
901 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
902 re = mri_->use_end(); ri != re; ++ri) {
903 MachineInstr *UseMI = &*ri;
904 unsigned UseIdx = getInstructionIndex(UseMI);
905 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
907 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
914 /// isReMaterializable - Returns true if every definition of MI of every
915 /// val# of the specified interval is re-materializable.
916 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
918 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
920 const VNInfo *VNI = *i;
921 unsigned DefIdx = VNI->def;
923 continue; // Dead val#.
924 // Is the def for the val# rematerializable?
927 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
928 bool DefIsLoad = false;
930 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
937 /// FilterFoldedOps - Filter out two-address use operands. Return
938 /// true if it finds any issue with the operands that ought to prevent
940 static bool FilterFoldedOps(MachineInstr *MI,
941 SmallVector<unsigned, 2> &Ops,
943 SmallVector<unsigned, 2> &FoldOps) {
944 const TargetInstrDesc &TID = MI->getDesc();
947 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
948 unsigned OpIdx = Ops[i];
949 MachineOperand &MO = MI->getOperand(OpIdx);
950 // FIXME: fold subreg use.
954 MRInfo |= (unsigned)VirtRegMap::isMod;
956 // Filter out two-address use operand(s).
957 if (!MO.isImplicit() &&
958 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
959 MRInfo = VirtRegMap::isModRef;
962 MRInfo |= (unsigned)VirtRegMap::isRef;
964 FoldOps.push_back(OpIdx);
970 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
971 /// slot / to reg or any rematerialized load into ith operand of specified
972 /// MI. If it is successul, MI is updated with the newly created MI and
974 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
975 VirtRegMap &vrm, MachineInstr *DefMI,
977 SmallVector<unsigned, 2> &Ops,
978 bool isSS, int Slot, unsigned Reg) {
979 // If it is an implicit def instruction, just delete it.
980 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
981 RemoveMachineInstrFromMaps(MI);
982 vrm.RemoveMachineInstrFromMaps(MI);
983 MI->eraseFromParent();
988 // Filter the list of operand indexes that are to be folded. Abort if
989 // any operand will prevent folding.
991 SmallVector<unsigned, 2> FoldOps;
992 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
995 // The only time it's safe to fold into a two address instruction is when
996 // it's folding reload and spill from / into a spill stack slot.
997 if (DefMI && (MRInfo & VirtRegMap::isMod))
1000 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1001 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1003 // Remember this instruction uses the spill slot.
1004 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1006 // Attempt to fold the memory reference into the instruction. If
1007 // we can do this, we don't need to insert spill code.
1008 MachineBasicBlock &MBB = *MI->getParent();
1009 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1010 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1011 vrm.transferSpillPts(MI, fmi);
1012 vrm.transferRestorePts(MI, fmi);
1013 vrm.transferEmergencySpills(MI, fmi);
1015 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1016 mi2iMap_[fmi] = InstrIdx;
1017 MI = MBB.insert(MBB.erase(MI), fmi);
1024 /// canFoldMemoryOperand - Returns true if the specified load / store
1025 /// folding is possible.
1026 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1027 SmallVector<unsigned, 2> &Ops,
1029 // Filter the list of operand indexes that are to be folded. Abort if
1030 // any operand will prevent folding.
1031 unsigned MRInfo = 0;
1032 SmallVector<unsigned, 2> FoldOps;
1033 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1036 // It's only legal to remat for a use, not a def.
1037 if (ReMat && (MRInfo & VirtRegMap::isMod))
1040 return tii_->canFoldMemoryOperand(MI, FoldOps);
1043 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1044 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1045 for (LiveInterval::Ranges::const_iterator
1046 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1047 std::vector<IdxMBBPair>::const_iterator II =
1048 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1049 if (II == Idx2MBBMap.end())
1051 if (I->end > II->first) // crossing a MBB.
1053 MBBs.insert(II->second);
1054 if (MBBs.size() > 1)
1060 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1061 /// interval on to-be re-materialized operands of MI) with new register.
1062 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1063 MachineInstr *MI, unsigned NewVReg,
1065 // There is an implicit use. That means one of the other operand is
1066 // being remat'ed and the remat'ed instruction has li.reg as an
1067 // use operand. Make sure we rewrite that as well.
1068 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1069 MachineOperand &MO = MI->getOperand(i);
1070 if (!MO.isRegister())
1072 unsigned Reg = MO.getReg();
1073 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1075 if (!vrm.isReMaterialized(Reg))
1077 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1078 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1080 UseMO->setReg(NewVReg);
1084 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1085 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1086 bool LiveIntervals::
1087 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1088 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1089 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1090 unsigned Slot, int LdSlot,
1091 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1093 const TargetRegisterClass* rc,
1094 SmallVector<int, 4> &ReMatIds,
1095 const MachineLoopInfo *loopInfo,
1096 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1097 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1098 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1099 MachineBasicBlock *MBB = MI->getParent();
1100 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1101 bool CanFold = false;
1103 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1104 MachineOperand& mop = MI->getOperand(i);
1105 if (!mop.isRegister())
1107 unsigned Reg = mop.getReg();
1108 unsigned RegI = Reg;
1109 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1114 bool TryFold = !DefIsReMat;
1115 bool FoldSS = true; // Default behavior unless it's a remat.
1116 int FoldSlot = Slot;
1118 // If this is the rematerializable definition MI itself and
1119 // all of its uses are rematerialized, simply delete it.
1120 if (MI == ReMatOrigDefMI && CanDelete) {
1121 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1123 RemoveMachineInstrFromMaps(MI);
1124 vrm.RemoveMachineInstrFromMaps(MI);
1125 MI->eraseFromParent();
1129 // If def for this use can't be rematerialized, then try folding.
1130 // If def is rematerializable and it's a load, also try folding.
1131 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1133 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1139 // Scan all of the operands of this instruction rewriting operands
1140 // to use NewVReg instead of li.reg as appropriate. We do this for
1143 // 1. If the instr reads the same spilled vreg multiple times, we
1144 // want to reuse the NewVReg.
1145 // 2. If the instr is a two-addr instruction, we are required to
1146 // keep the src/dst regs pinned.
1148 // Keep track of whether we replace a use and/or def so that we can
1149 // create the spill interval with the appropriate range.
1151 HasUse = mop.isUse();
1152 HasDef = mop.isDef();
1153 SmallVector<unsigned, 2> Ops;
1155 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1156 const MachineOperand &MOj = MI->getOperand(j);
1157 if (!MOj.isRegister())
1159 unsigned RegJ = MOj.getReg();
1160 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1164 HasUse |= MOj.isUse();
1165 HasDef |= MOj.isDef();
1169 if (HasUse && !li.liveAt(getUseIndex(index)))
1170 // Must be defined by an implicit def. It should not be spilled. Note,
1171 // this is for correctness reason. e.g.
1172 // 8 %reg1024<def> = IMPLICIT_DEF
1173 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1174 // The live range [12, 14) are not part of the r1024 live interval since
1175 // it's defined by an implicit def. It will not conflicts with live
1176 // interval of r1025. Now suppose both registers are spilled, you can
1177 // easily see a situation where both registers are reloaded before
1178 // the INSERT_SUBREG and both target registers that would overlap.
1181 // Update stack slot spill weight if we are splitting.
1182 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1189 // Do not fold load / store here if we are splitting. We'll find an
1190 // optimal point to insert a load / store later.
1192 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1193 Ops, FoldSS, FoldSlot, Reg)) {
1194 // Folding the load/store can completely change the instruction in
1195 // unpredictable ways, rescan it from the beginning.
1199 if (isRemoved(MI)) {
1203 goto RestartInstruction;
1206 // We'll try to fold it later if it's profitable.
1207 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1211 // Create a new virtual register for the spill interval.
1212 bool CreatedNewVReg = false;
1214 NewVReg = mri_->createVirtualRegister(rc);
1216 CreatedNewVReg = true;
1218 mop.setReg(NewVReg);
1219 if (mop.isImplicit())
1220 rewriteImplicitOps(li, MI, NewVReg, vrm);
1222 // Reuse NewVReg for other reads.
1223 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1224 MachineOperand &mopj = MI->getOperand(Ops[j]);
1225 mopj.setReg(NewVReg);
1226 if (mopj.isImplicit())
1227 rewriteImplicitOps(li, MI, NewVReg, vrm);
1230 if (CreatedNewVReg) {
1232 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1233 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1234 // Each valnum may have its own remat id.
1235 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1237 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1239 if (!CanDelete || (HasUse && HasDef)) {
1240 // If this is a two-addr instruction then its use operands are
1241 // rematerializable but its def is not. It should be assigned a
1243 vrm.assignVirt2StackSlot(NewVReg, Slot);
1246 vrm.assignVirt2StackSlot(NewVReg, Slot);
1248 } else if (HasUse && HasDef &&
1249 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1250 // If this interval hasn't been assigned a stack slot (because earlier
1251 // def is a deleted remat def), do it now.
1252 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1253 vrm.assignVirt2StackSlot(NewVReg, Slot);
1256 // Re-matting an instruction with virtual register use. Add the
1257 // register as an implicit use on the use MI.
1258 if (DefIsReMat && ImpUse)
1259 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1261 // create a new register interval for this spill / remat.
1262 LiveInterval &nI = getOrCreateInterval(NewVReg);
1263 if (CreatedNewVReg) {
1264 NewLIs.push_back(&nI);
1265 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1267 vrm.setIsSplitFromReg(NewVReg, li.reg);
1271 if (CreatedNewVReg) {
1272 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1273 nI.getNextValue(~0U, 0, VNInfoAllocator));
1277 // Extend the split live interval to this def / use.
1278 unsigned End = getUseIndex(index)+1;
1279 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1280 nI.getValNumInfo(nI.getNumValNums()-1));
1286 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1287 nI.getNextValue(~0U, 0, VNInfoAllocator));
1292 DOUT << "\t\t\t\tAdded new interval: ";
1293 nI.print(DOUT, tri_);
1298 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1300 MachineBasicBlock *MBB, unsigned Idx) const {
1301 unsigned End = getMBBEndIdx(MBB);
1302 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1303 unsigned KillIdx = VNI->kills[j];
1304 if (KillIdx > Idx && KillIdx < End)
1310 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1311 /// during spilling.
1313 struct RewriteInfo {
1318 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1319 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1322 struct RewriteInfoCompare {
1323 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1324 return LHS.Index < RHS.Index;
1329 void LiveIntervals::
1330 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1331 LiveInterval::Ranges::const_iterator &I,
1332 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1333 unsigned Slot, int LdSlot,
1334 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1336 const TargetRegisterClass* rc,
1337 SmallVector<int, 4> &ReMatIds,
1338 const MachineLoopInfo *loopInfo,
1339 BitVector &SpillMBBs,
1340 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1341 BitVector &RestoreMBBs,
1342 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1343 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1344 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1345 bool AllCanFold = true;
1346 unsigned NewVReg = 0;
1347 unsigned start = getBaseIndex(I->start);
1348 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1350 // First collect all the def / use in this live range that will be rewritten.
1351 // Make sure they are sorted according to instruction index.
1352 std::vector<RewriteInfo> RewriteMIs;
1353 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1354 re = mri_->reg_end(); ri != re; ) {
1355 MachineInstr *MI = &*ri;
1356 MachineOperand &O = ri.getOperand();
1358 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1359 unsigned index = getInstructionIndex(MI);
1360 if (index < start || index >= end)
1362 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1363 // Must be defined by an implicit def. It should not be spilled. Note,
1364 // this is for correctness reason. e.g.
1365 // 8 %reg1024<def> = IMPLICIT_DEF
1366 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1367 // The live range [12, 14) are not part of the r1024 live interval since
1368 // it's defined by an implicit def. It will not conflicts with live
1369 // interval of r1025. Now suppose both registers are spilled, you can
1370 // easily see a situation where both registers are reloaded before
1371 // the INSERT_SUBREG and both target registers that would overlap.
1373 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1375 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1377 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1378 // Now rewrite the defs and uses.
1379 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1380 RewriteInfo &rwi = RewriteMIs[i];
1382 unsigned index = rwi.Index;
1383 bool MIHasUse = rwi.HasUse;
1384 bool MIHasDef = rwi.HasDef;
1385 MachineInstr *MI = rwi.MI;
1386 // If MI def and/or use the same register multiple times, then there
1387 // are multiple entries.
1388 unsigned NumUses = MIHasUse;
1389 while (i != e && RewriteMIs[i].MI == MI) {
1390 assert(RewriteMIs[i].Index == index);
1391 bool isUse = RewriteMIs[i].HasUse;
1392 if (isUse) ++NumUses;
1394 MIHasDef |= RewriteMIs[i].HasDef;
1397 MachineBasicBlock *MBB = MI->getParent();
1399 if (ImpUse && MI != ReMatDefMI) {
1400 // Re-matting an instruction with virtual register use. Update the
1401 // register interval's spill weight to HUGE_VALF to prevent it from
1403 LiveInterval &ImpLi = getInterval(ImpUse);
1404 ImpLi.weight = HUGE_VALF;
1407 unsigned MBBId = MBB->getNumber();
1408 unsigned ThisVReg = 0;
1410 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1411 if (NVI != MBBVRegsMap.end()) {
1412 ThisVReg = NVI->second;
1419 // It's better to start a new interval to avoid artifically
1420 // extend the new interval.
1421 if (MIHasDef && !MIHasUse) {
1422 MBBVRegsMap.erase(MBB->getNumber());
1428 bool IsNew = ThisVReg == 0;
1430 // This ends the previous live interval. If all of its def / use
1431 // can be folded, give it a low spill weight.
1432 if (NewVReg && TrySplit && AllCanFold) {
1433 LiveInterval &nI = getOrCreateInterval(NewVReg);
1440 bool HasDef = false;
1441 bool HasUse = false;
1442 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1443 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1444 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1445 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1446 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1447 if (!HasDef && !HasUse)
1450 AllCanFold &= CanFold;
1452 // Update weight of spill interval.
1453 LiveInterval &nI = getOrCreateInterval(NewVReg);
1455 // The spill weight is now infinity as it cannot be spilled again.
1456 nI.weight = HUGE_VALF;
1460 // Keep track of the last def and first use in each MBB.
1462 if (MI != ReMatOrigDefMI || !CanDelete) {
1463 bool HasKill = false;
1465 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1467 // If this is a two-address code, then this index starts a new VNInfo.
1468 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1470 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1472 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1473 SpillIdxes.find(MBBId);
1475 if (SII == SpillIdxes.end()) {
1476 std::vector<SRInfo> S;
1477 S.push_back(SRInfo(index, NewVReg, true));
1478 SpillIdxes.insert(std::make_pair(MBBId, S));
1479 } else if (SII->second.back().vreg != NewVReg) {
1480 SII->second.push_back(SRInfo(index, NewVReg, true));
1481 } else if ((int)index > SII->second.back().index) {
1482 // If there is an earlier def and this is a two-address
1483 // instruction, then it's not possible to fold the store (which
1484 // would also fold the load).
1485 SRInfo &Info = SII->second.back();
1487 Info.canFold = !HasUse;
1489 SpillMBBs.set(MBBId);
1490 } else if (SII != SpillIdxes.end() &&
1491 SII->second.back().vreg == NewVReg &&
1492 (int)index > SII->second.back().index) {
1493 // There is an earlier def that's not killed (must be two-address).
1494 // The spill is no longer needed.
1495 SII->second.pop_back();
1496 if (SII->second.empty()) {
1497 SpillIdxes.erase(MBBId);
1498 SpillMBBs.reset(MBBId);
1505 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1506 SpillIdxes.find(MBBId);
1507 if (SII != SpillIdxes.end() &&
1508 SII->second.back().vreg == NewVReg &&
1509 (int)index > SII->second.back().index)
1510 // Use(s) following the last def, it's not safe to fold the spill.
1511 SII->second.back().canFold = false;
1512 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1513 RestoreIdxes.find(MBBId);
1514 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1515 // If we are splitting live intervals, only fold if it's the first
1516 // use and there isn't another use later in the MBB.
1517 RII->second.back().canFold = false;
1519 // Only need a reload if there isn't an earlier def / use.
1520 if (RII == RestoreIdxes.end()) {
1521 std::vector<SRInfo> Infos;
1522 Infos.push_back(SRInfo(index, NewVReg, true));
1523 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1525 RII->second.push_back(SRInfo(index, NewVReg, true));
1527 RestoreMBBs.set(MBBId);
1531 // Update spill weight.
1532 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1533 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1536 if (NewVReg && TrySplit && AllCanFold) {
1537 // If all of its def / use can be folded, give it a low spill weight.
1538 LiveInterval &nI = getOrCreateInterval(NewVReg);
1543 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1544 BitVector &RestoreMBBs,
1545 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1546 if (!RestoreMBBs[Id])
1548 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1549 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1550 if (Restores[i].index == index &&
1551 Restores[i].vreg == vr &&
1552 Restores[i].canFold)
1557 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1558 BitVector &RestoreMBBs,
1559 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1560 if (!RestoreMBBs[Id])
1562 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1563 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1564 if (Restores[i].index == index && Restores[i].vreg)
1565 Restores[i].index = -1;
1568 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1569 /// spilled and create empty intervals for their uses.
1571 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1572 const TargetRegisterClass* rc,
1573 std::vector<LiveInterval*> &NewLIs) {
1574 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1575 re = mri_->reg_end(); ri != re; ) {
1576 MachineOperand &O = ri.getOperand();
1577 MachineInstr *MI = &*ri;
1580 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1581 "Register def was not rewritten?");
1582 RemoveMachineInstrFromMaps(MI);
1583 vrm.RemoveMachineInstrFromMaps(MI);
1584 MI->eraseFromParent();
1586 // This must be an use of an implicit_def so it's not part of the live
1587 // interval. Create a new empty live interval for it.
1588 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1589 unsigned NewVReg = mri_->createVirtualRegister(rc);
1591 vrm.setIsImplicitlyDefined(NewVReg);
1592 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1593 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1594 MachineOperand &MO = MI->getOperand(i);
1595 if (MO.isReg() && MO.getReg() == li.reg)
1604 bool operator()(LiveInterval* A, LiveInterval* B) {
1605 return A->beginNumber() < B->beginNumber();
1610 std::vector<LiveInterval*> LiveIntervals::
1611 addIntervalsForSpillsFast(const LiveInterval &li,
1612 const MachineLoopInfo *loopInfo,
1613 VirtRegMap &vrm, float& SSWeight) {
1614 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1616 std::vector<LiveInterval*> added;
1618 assert(li.weight != HUGE_VALF &&
1619 "attempt to spill already spilled interval!");
1621 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1625 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1629 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1630 while (RI != mri_->reg_end()) {
1631 MachineInstr* MI = &*RI;
1633 SmallVector<unsigned, 2> Indices;
1634 bool HasUse = false;
1635 bool HasDef = false;
1637 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1638 MachineOperand& mop = MI->getOperand(i);
1639 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1641 HasUse |= MI->getOperand(i).isUse();
1642 HasDef |= MI->getOperand(i).isDef();
1644 Indices.push_back(i);
1647 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1648 Indices, true, slot, li.reg)) {
1649 unsigned NewVReg = mri_->createVirtualRegister(rc);
1651 vrm.assignVirt2StackSlot(NewVReg, slot);
1653 // create a new register for this spill
1654 LiveInterval &nI = getOrCreateInterval(NewVReg);
1656 // the spill weight is now infinity as it
1657 // cannot be spilled again
1658 nI.weight = HUGE_VALF;
1660 // Rewrite register operands to use the new vreg.
1661 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1662 E = Indices.end(); I != E; ++I) {
1663 MI->getOperand(*I).setReg(NewVReg);
1665 if (MI->getOperand(*I).isUse())
1666 MI->getOperand(*I).setIsKill(true);
1669 // Fill in the new live interval.
1670 unsigned index = getInstructionIndex(MI);
1672 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1673 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1676 vrm.addRestorePoint(NewVReg, MI);
1679 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1680 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1683 vrm.addSpillPoint(NewVReg, true, MI);
1686 added.push_back(&nI);
1688 DOUT << "\t\t\t\tadded new interval: ";
1692 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1695 SSWeight += getSpillWeight(true, true, loopDepth);
1697 SSWeight += getSpillWeight(false, true, loopDepth);
1699 SSWeight += getSpillWeight(true, false, loopDepth);
1703 RI = mri_->reg_begin(li.reg);
1706 // Clients expect the new intervals to be returned in sorted order.
1707 std::sort(added.begin(), added.end(), LISorter());
1712 std::vector<LiveInterval*> LiveIntervals::
1713 addIntervalsForSpills(const LiveInterval &li,
1714 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1717 if (EnableFastSpilling)
1718 return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1720 assert(li.weight != HUGE_VALF &&
1721 "attempt to spill already spilled interval!");
1723 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1724 li.print(DOUT, tri_);
1727 // Spill slot weight.
1730 // Each bit specify whether it a spill is required in the MBB.
1731 BitVector SpillMBBs(mf_->getNumBlockIDs());
1732 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1733 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1734 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1735 DenseMap<unsigned,unsigned> MBBVRegsMap;
1736 std::vector<LiveInterval*> NewLIs;
1737 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1739 unsigned NumValNums = li.getNumValNums();
1740 SmallVector<MachineInstr*, 4> ReMatDefs;
1741 ReMatDefs.resize(NumValNums, NULL);
1742 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1743 ReMatOrigDefs.resize(NumValNums, NULL);
1744 SmallVector<int, 4> ReMatIds;
1745 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1746 BitVector ReMatDelete(NumValNums);
1747 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1749 // Spilling a split live interval. It cannot be split any further. Also,
1750 // it's also guaranteed to be a single val# / range interval.
1751 if (vrm.getPreSplitReg(li.reg)) {
1752 vrm.setIsSplitFromReg(li.reg, 0);
1753 // Unset the split kill marker on the last use.
1754 unsigned KillIdx = vrm.getKillPoint(li.reg);
1756 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1757 assert(KillMI && "Last use disappeared?");
1758 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1759 assert(KillOp != -1 && "Last use disappeared?");
1760 KillMI->getOperand(KillOp).setIsKill(false);
1762 vrm.removeKillPoint(li.reg);
1763 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1764 Slot = vrm.getStackSlot(li.reg);
1765 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1766 MachineInstr *ReMatDefMI = DefIsReMat ?
1767 vrm.getReMaterializedMI(li.reg) : NULL;
1769 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1770 bool isLoad = isLoadSS ||
1771 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1772 bool IsFirstRange = true;
1773 for (LiveInterval::Ranges::const_iterator
1774 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1775 // If this is a split live interval with multiple ranges, it means there
1776 // are two-address instructions that re-defined the value. Only the
1777 // first def can be rematerialized!
1779 // Note ReMatOrigDefMI has already been deleted.
1780 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1781 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1782 false, vrm, rc, ReMatIds, loopInfo,
1783 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1784 MBBVRegsMap, NewLIs, SSWeight);
1786 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1787 Slot, 0, false, false, false,
1788 false, vrm, rc, ReMatIds, loopInfo,
1789 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1790 MBBVRegsMap, NewLIs, SSWeight);
1792 IsFirstRange = false;
1795 SSWeight = 0.0f; // Already accounted for when split.
1796 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1800 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1801 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1805 bool NeedStackSlot = false;
1806 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1808 const VNInfo *VNI = *i;
1809 unsigned VN = VNI->id;
1810 unsigned DefIdx = VNI->def;
1812 continue; // Dead val#.
1813 // Is the def for the val# rematerializable?
1814 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1815 ? 0 : getInstructionFromIndex(DefIdx);
1817 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1818 // Remember how to remat the def of this val#.
1819 ReMatOrigDefs[VN] = ReMatDefMI;
1820 // Original def may be modified so we have to make a copy here.
1821 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1822 ClonedMIs.push_back(Clone);
1823 ReMatDefs[VN] = Clone;
1825 bool CanDelete = true;
1826 if (VNI->hasPHIKill) {
1827 // A kill is a phi node, not all of its uses can be rematerialized.
1828 // It must not be deleted.
1830 // Need a stack slot if there is any live range where uses cannot be
1832 NeedStackSlot = true;
1835 ReMatDelete.set(VN);
1837 // Need a stack slot if there is any live range where uses cannot be
1839 NeedStackSlot = true;
1843 // One stack slot per live interval.
1844 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1845 Slot = vrm.assignVirt2StackSlot(li.reg);
1847 // Create new intervals and rewrite defs and uses.
1848 for (LiveInterval::Ranges::const_iterator
1849 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1850 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1851 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1852 bool DefIsReMat = ReMatDefMI != NULL;
1853 bool CanDelete = ReMatDelete[I->valno->id];
1855 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1856 bool isLoad = isLoadSS ||
1857 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1858 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1859 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1860 CanDelete, vrm, rc, ReMatIds, loopInfo,
1861 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1862 MBBVRegsMap, NewLIs, SSWeight);
1865 // Insert spills / restores if we are splitting.
1867 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1871 SmallPtrSet<LiveInterval*, 4> AddedKill;
1872 SmallVector<unsigned, 2> Ops;
1873 if (NeedStackSlot) {
1874 int Id = SpillMBBs.find_first();
1876 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1877 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1878 std::vector<SRInfo> &spills = SpillIdxes[Id];
1879 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1880 int index = spills[i].index;
1881 unsigned VReg = spills[i].vreg;
1882 LiveInterval &nI = getOrCreateInterval(VReg);
1883 bool isReMat = vrm.isReMaterialized(VReg);
1884 MachineInstr *MI = getInstructionFromIndex(index);
1885 bool CanFold = false;
1886 bool FoundUse = false;
1888 if (spills[i].canFold) {
1890 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1891 MachineOperand &MO = MI->getOperand(j);
1892 if (!MO.isRegister() || MO.getReg() != VReg)
1899 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1900 RestoreMBBs, RestoreIdxes))) {
1901 // MI has two-address uses of the same register. If the use
1902 // isn't the first and only use in the BB, then we can't fold
1903 // it. FIXME: Move this to rewriteInstructionsForSpills.
1910 // Fold the store into the def if possible.
1911 bool Folded = false;
1912 if (CanFold && !Ops.empty()) {
1913 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1916 // Also folded uses, do not issue a load.
1917 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1918 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1920 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1924 // Otherwise tell the spiller to issue a spill.
1926 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1927 bool isKill = LR->end == getStoreIndex(index);
1928 if (!MI->registerDefIsDead(nI.reg))
1929 // No need to spill a dead def.
1930 vrm.addSpillPoint(VReg, isKill, MI);
1932 AddedKill.insert(&nI);
1935 // Update spill slot weight.
1937 SSWeight += getSpillWeight(true, false, loopDepth);
1939 Id = SpillMBBs.find_next(Id);
1943 int Id = RestoreMBBs.find_first();
1945 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1946 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1948 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1949 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1950 int index = restores[i].index;
1953 unsigned VReg = restores[i].vreg;
1954 LiveInterval &nI = getOrCreateInterval(VReg);
1955 bool isReMat = vrm.isReMaterialized(VReg);
1956 MachineInstr *MI = getInstructionFromIndex(index);
1957 bool CanFold = false;
1959 if (restores[i].canFold) {
1961 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1962 MachineOperand &MO = MI->getOperand(j);
1963 if (!MO.isRegister() || MO.getReg() != VReg)
1967 // If this restore were to be folded, it would have been folded
1976 // Fold the load into the use if possible.
1977 bool Folded = false;
1978 if (CanFold && !Ops.empty()) {
1980 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1982 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1984 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1985 // If the rematerializable def is a load, also try to fold it.
1986 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1987 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1988 Ops, isLoadSS, LdSlot, VReg);
1989 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1991 // Re-matting an instruction with virtual register use. Add the
1992 // register as an implicit use on the use MI and update the register
1993 // interval's spill weight to HUGE_VALF to prevent it from being
1995 LiveInterval &ImpLi = getInterval(ImpUse);
1996 ImpLi.weight = HUGE_VALF;
1997 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2001 // If folding is not possible / failed, then tell the spiller to issue a
2002 // load / rematerialization for us.
2004 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2006 vrm.addRestorePoint(VReg, MI);
2008 // Update spill slot weight.
2010 SSWeight += getSpillWeight(false, true, loopDepth);
2012 Id = RestoreMBBs.find_next(Id);
2015 // Finalize intervals: add kills, finalize spill weights, and filter out
2017 std::vector<LiveInterval*> RetNewLIs;
2018 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2019 LiveInterval *LI = NewLIs[i];
2021 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2022 if (!AddedKill.count(LI)) {
2023 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2024 unsigned LastUseIdx = getBaseIndex(LR->end);
2025 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2026 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2027 assert(UseIdx != -1);
2028 if (LastUse->getOperand(UseIdx).isImplicit() ||
2029 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
2030 LastUse->getOperand(UseIdx).setIsKill();
2031 vrm.addKillPoint(LI->reg, LastUseIdx);
2034 RetNewLIs.push_back(LI);
2038 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2042 /// hasAllocatableSuperReg - Return true if the specified physical register has
2043 /// any super register that's allocatable.
2044 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2045 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2046 if (allocatableRegs_[*AS] && hasInterval(*AS))
2051 /// getRepresentativeReg - Find the largest super register of the specified
2052 /// physical register.
2053 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2054 // Find the largest super-register that is allocatable.
2055 unsigned BestReg = Reg;
2056 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2057 unsigned SuperReg = *AS;
2058 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2066 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2067 /// specified interval that conflicts with the specified physical register.
2068 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2069 unsigned PhysReg) const {
2070 unsigned NumConflicts = 0;
2071 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2072 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2073 E = mri_->reg_end(); I != E; ++I) {
2074 MachineOperand &O = I.getOperand();
2075 MachineInstr *MI = O.getParent();
2076 unsigned Index = getInstructionIndex(MI);
2077 if (pli.liveAt(Index))
2080 return NumConflicts;
2083 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2084 /// around all defs and uses of the specified interval.
2085 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2086 unsigned PhysReg, VirtRegMap &vrm) {
2087 unsigned SpillReg = getRepresentativeReg(PhysReg);
2089 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2090 // If there are registers which alias PhysReg, but which are not a
2091 // sub-register of the chosen representative super register. Assert
2092 // since we can't handle it yet.
2093 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2094 tri_->isSuperRegister(*AS, SpillReg));
2096 LiveInterval &pli = getInterval(SpillReg);
2097 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2098 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2099 E = mri_->reg_end(); I != E; ++I) {
2100 MachineOperand &O = I.getOperand();
2101 MachineInstr *MI = O.getParent();
2102 if (SeenMIs.count(MI))
2105 unsigned Index = getInstructionIndex(MI);
2106 if (pli.liveAt(Index)) {
2107 vrm.addEmergencySpill(SpillReg, MI);
2108 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2109 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2110 if (!hasInterval(*AS))
2112 LiveInterval &spli = getInterval(*AS);
2113 if (spli.liveAt(Index))
2114 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2120 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2121 MachineInstr* startInst) {
2122 LiveInterval& Interval = getOrCreateInterval(reg);
2123 VNInfo* VN = Interval.getNextValue(
2124 getInstructionIndex(startInst) + InstrSlots::DEF,
2125 startInst, getVNInfoAllocator());
2126 VN->hasPHIKill = true;
2127 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2128 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2129 getMBBEndIdx(startInst->getParent()) + 1, VN);
2130 Interval.addRange(LR);