1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/ADT/DepthFirstIterator.h"
38 #include "llvm/ADT/SmallSet.h"
39 #include "llvm/ADT/Statistic.h"
40 #include "llvm/ADT/STLExtras.h"
46 // Hidden options for help debugging.
47 static cl::opt<bool> DisableReMat("disable-rematerialization",
48 cl::init(false), cl::Hidden);
50 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
51 cl::init(true), cl::Hidden);
52 static cl::opt<int> SplitLimit("split-limit",
53 cl::init(-1), cl::Hidden);
55 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
57 static cl::opt<bool> EnableFastSpilling("fast-spill",
58 cl::init(false), cl::Hidden);
60 STATISTIC(numIntervals, "Number of original intervals");
61 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
62 STATISTIC(numSplits , "Number of intervals split");
64 char LiveIntervals::ID = 0;
65 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
67 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
68 AU.addRequired<AliasAnalysis>();
69 AU.addPreserved<AliasAnalysis>();
70 AU.addPreserved<LiveVariables>();
71 AU.addRequired<LiveVariables>();
72 AU.addPreservedID(MachineLoopInfoID);
73 AU.addPreservedID(MachineDominatorsID);
76 AU.addPreservedID(PHIEliminationID);
77 AU.addRequiredID(PHIEliminationID);
80 AU.addRequiredID(TwoAddressInstructionPassID);
81 MachineFunctionPass::getAnalysisUsage(AU);
84 void LiveIntervals::releaseMemory() {
85 // Free the live intervals themselves.
86 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
87 E = r2iMap_.end(); I != E; ++I)
95 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
96 VNInfoAllocator.Reset();
97 while (!ClonedMIs.empty()) {
98 MachineInstr *MI = ClonedMIs.back();
100 mf_->DeleteMachineInstr(MI);
104 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
105 /// there is one implicit_def for each use. Add isUndef marker to
106 /// implicit_def defs and their uses.
107 void LiveIntervals::processImplicitDefs() {
108 SmallSet<unsigned, 8> ImpDefRegs;
109 SmallVector<MachineInstr*, 8> ImpDefMIs;
110 MachineBasicBlock *Entry = mf_->begin();
111 SmallPtrSet<MachineBasicBlock*,16> Visited;
112 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
113 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
115 MachineBasicBlock *MBB = *DFI;
116 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
118 MachineInstr *MI = &*I;
120 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
121 unsigned Reg = MI->getOperand(0).getReg();
122 MI->getOperand(0).setIsUndef();
123 ImpDefRegs.insert(Reg);
124 ImpDefMIs.push_back(MI);
127 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
128 MachineOperand& MO = MI->getOperand(i);
129 if (!MO.isReg() || !MO.isUse())
131 unsigned Reg = MO.getReg();
134 if (!ImpDefRegs.count(Reg))
137 if (MO.isKill() || MI->isRegTiedToDefOperand(i))
138 ImpDefRegs.erase(Reg);
141 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
142 MachineOperand& MO = MI->getOperand(i);
143 if (!MO.isReg() || !MO.isDef())
145 ImpDefRegs.erase(MO.getReg());
149 // Any outstanding liveout implicit_def's?
150 for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
151 MachineInstr *MI = ImpDefMIs[i];
152 unsigned Reg = MI->getOperand(0).getReg();
153 if (TargetRegisterInfo::isPhysicalRegister(Reg))
154 // Physical registers are not liveout (yet).
156 if (!ImpDefRegs.count(Reg))
158 bool HasLocalUse = false;
159 for (MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(Reg),
160 RE = mri_->reg_end(); RI != RE; ) {
161 MachineOperand &RMO = RI.getOperand();
162 MachineInstr *RMI = &*RI;
165 // Don't expect another def of the same register.
167 "Register with multiple defs including an implicit_def?");
170 MachineBasicBlock *RMBB = RMI->getParent();
175 const TargetRegisterClass* RC = mri_->getRegClass(Reg);
176 unsigned NewVReg = mri_->createVirtualRegister(RC);
177 BuildMI(*RMBB, RMI, RMI->getDebugLoc(),
178 tii_->get(TargetInstrInfo::IMPLICIT_DEF), NewVReg);
184 MI->eraseFromParent();
191 void LiveIntervals::computeNumbering() {
192 Index2MiMap OldI2MI = i2miMap_;
193 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
202 // Number MachineInstrs and MachineBasicBlocks.
203 // Initialize MBB indexes to a sentinal.
204 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
206 unsigned MIIndex = 0;
207 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
209 unsigned StartIdx = MIIndex;
211 // Insert an empty slot at the beginning of each block.
212 MIIndex += InstrSlots::NUM;
213 i2miMap_.push_back(0);
215 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
217 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
218 assert(inserted && "multiple MachineInstr -> index mappings");
220 i2miMap_.push_back(I);
221 MIIndex += InstrSlots::NUM;
224 // Insert max(1, numdefs) empty slots after every instruction.
225 unsigned Slots = I->getDesc().getNumDefs();
228 MIIndex += InstrSlots::NUM * Slots;
230 i2miMap_.push_back(0);
233 // Set the MBB2IdxMap entry for this MBB.
234 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
235 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
237 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
239 if (!OldI2MI.empty())
240 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
241 for (LiveInterval::iterator LI = OI->second->begin(),
242 LE = OI->second->end(); LI != LE; ++LI) {
244 // Remap the start index of the live range to the corresponding new
245 // number, or our best guess at what it _should_ correspond to if the
246 // original instruction has been erased. This is either the following
247 // instruction or its predecessor.
248 unsigned index = LI->start / InstrSlots::NUM;
249 unsigned offset = LI->start % InstrSlots::NUM;
250 if (offset == InstrSlots::LOAD) {
251 std::vector<IdxMBBPair>::const_iterator I =
252 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
253 // Take the pair containing the index
254 std::vector<IdxMBBPair>::const_iterator J =
255 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
257 LI->start = getMBBStartIdx(J->second);
259 LI->start = mi2iMap_[OldI2MI[index]] + offset;
262 // Remap the ending index in the same way that we remapped the start,
263 // except for the final step where we always map to the immediately
264 // following instruction.
265 index = (LI->end - 1) / InstrSlots::NUM;
266 offset = LI->end % InstrSlots::NUM;
267 if (offset == InstrSlots::LOAD) {
268 // VReg dies at end of block.
269 std::vector<IdxMBBPair>::const_iterator I =
270 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
273 LI->end = getMBBEndIdx(I->second) + 1;
275 unsigned idx = index;
276 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
278 if (index != OldI2MI.size())
279 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
281 LI->end = InstrSlots::NUM * i2miMap_.size();
285 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
286 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
289 // Remap the VNInfo def index, which works the same as the
290 // start indices above. VN's with special sentinel defs
291 // don't need to be remapped.
292 if (vni->isDefAccurate() && !vni->isUnused()) {
293 unsigned index = vni->def / InstrSlots::NUM;
294 unsigned offset = vni->def % InstrSlots::NUM;
295 if (offset == InstrSlots::LOAD) {
296 std::vector<IdxMBBPair>::const_iterator I =
297 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
298 // Take the pair containing the index
299 std::vector<IdxMBBPair>::const_iterator J =
300 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
302 vni->def = getMBBStartIdx(J->second);
304 vni->def = mi2iMap_[OldI2MI[index]] + offset;
308 // Remap the VNInfo kill indices, which works the same as
309 // the end indices above.
310 for (size_t i = 0; i < vni->kills.size(); ++i) {
311 // PHI kills don't need to be remapped.
312 if (!vni->kills[i]) continue;
314 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
315 unsigned offset = vni->kills[i] % InstrSlots::NUM;
316 if (offset == InstrSlots::LOAD) {
317 std::vector<IdxMBBPair>::const_iterator I =
318 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
321 vni->kills[i] = getMBBEndIdx(I->second);
323 unsigned idx = index;
324 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
326 if (index != OldI2MI.size())
327 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
328 (idx == index ? offset : 0);
330 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
337 void LiveIntervals::scaleNumbering(int factor) {
339 // * scale MBB begin and end points
340 // * scale all ranges.
341 // * Update VNI structures.
342 // * Scale instruction numberings
344 // Scale the MBB indices.
346 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
347 MBB != MBBE; ++MBB) {
348 std::pair<unsigned, unsigned> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
349 mbbIndices.first = InstrSlots::scale(mbbIndices.first, factor);
350 mbbIndices.second = InstrSlots::scale(mbbIndices.second, factor);
351 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
353 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
355 // Scale the intervals.
356 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
357 LI->second->scaleNumbering(factor);
360 // Scale MachineInstrs.
361 Mi2IndexMap oldmi2iMap = mi2iMap_;
362 unsigned highestSlot = 0;
363 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
365 unsigned newSlot = InstrSlots::scale(MI->second, factor);
366 mi2iMap_[MI->first] = newSlot;
367 highestSlot = std::max(highestSlot, newSlot);
371 i2miMap_.resize(highestSlot + 1);
372 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
374 i2miMap_[MI->second] = MI->first;
380 /// runOnMachineFunction - Register allocate the whole function
382 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
384 mri_ = &mf_->getRegInfo();
385 tm_ = &fn.getTarget();
386 tri_ = tm_->getRegisterInfo();
387 tii_ = tm_->getInstrInfo();
388 aa_ = &getAnalysis<AliasAnalysis>();
389 lv_ = &getAnalysis<LiveVariables>();
390 allocatableRegs_ = tri_->getAllocatableSet(fn);
392 processImplicitDefs();
396 numIntervals += getNumIntervals();
402 /// print - Implement the dump method.
403 void LiveIntervals::print(std::ostream &O, const Module* ) const {
404 O << "********** INTERVALS **********\n";
405 for (const_iterator I = begin(), E = end(); I != E; ++I) {
406 I->second->print(O, tri_);
410 O << "********** MACHINEINSTRS **********\n";
411 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
412 mbbi != mbbe; ++mbbi) {
413 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
414 for (MachineBasicBlock::iterator mii = mbbi->begin(),
415 mie = mbbi->end(); mii != mie; ++mii) {
416 O << getInstructionIndex(mii) << '\t' << *mii;
421 /// conflictsWithPhysRegDef - Returns true if the specified register
422 /// is defined during the duration of the specified interval.
423 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
424 VirtRegMap &vrm, unsigned reg) {
425 for (LiveInterval::Ranges::const_iterator
426 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
427 for (unsigned index = getBaseIndex(I->start),
428 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
429 index += InstrSlots::NUM) {
430 // skip deleted instructions
431 while (index != end && !getInstructionFromIndex(index))
432 index += InstrSlots::NUM;
433 if (index == end) break;
435 MachineInstr *MI = getInstructionFromIndex(index);
436 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
437 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
438 if (SrcReg == li.reg || DstReg == li.reg)
440 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
441 MachineOperand& mop = MI->getOperand(i);
444 unsigned PhysReg = mop.getReg();
445 if (PhysReg == 0 || PhysReg == li.reg)
447 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
448 if (!vrm.hasPhys(PhysReg))
450 PhysReg = vrm.getPhys(PhysReg);
452 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
461 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
462 /// it can check use as well.
463 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
464 unsigned Reg, bool CheckUse,
465 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
466 for (LiveInterval::Ranges::const_iterator
467 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
468 for (unsigned index = getBaseIndex(I->start),
469 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
470 index += InstrSlots::NUM) {
471 // Skip deleted instructions.
472 MachineInstr *MI = 0;
473 while (index != end) {
474 MI = getInstructionFromIndex(index);
477 index += InstrSlots::NUM;
479 if (index == end) break;
481 if (JoinedCopies.count(MI))
483 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
484 MachineOperand& MO = MI->getOperand(i);
487 if (MO.isUse() && !CheckUse)
489 unsigned PhysReg = MO.getReg();
490 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
492 if (tri_->isSubRegister(Reg, PhysReg))
502 void LiveIntervals::printRegName(unsigned reg) const {
503 if (TargetRegisterInfo::isPhysicalRegister(reg))
504 cerr << tri_->getName(reg);
506 cerr << "%reg" << reg;
509 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
510 MachineBasicBlock::iterator mi,
511 unsigned MIIdx, MachineOperand& MO,
513 LiveInterval &interval) {
514 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
515 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
517 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
518 DOUT << "is a implicit_def\n";
522 // Virtual registers may be defined multiple times (due to phi
523 // elimination and 2-addr elimination). Much of what we do only has to be
524 // done once for the vreg. We use an empty interval to detect the first
525 // time we see a vreg.
526 if (interval.empty()) {
527 // Get the Idx of the defining instructions.
528 unsigned defIndex = getDefIndex(MIIdx);
529 // Earlyclobbers move back one.
530 if (MO.isEarlyClobber())
531 defIndex = getUseIndex(MIIdx);
533 MachineInstr *CopyMI = NULL;
534 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
535 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
536 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
537 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
538 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
540 // Earlyclobbers move back one.
541 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
543 assert(ValNo->id == 0 && "First value in interval is not 0?");
545 // Loop over all of the blocks that the vreg is defined in. There are
546 // two cases we have to handle here. The most common case is a vreg
547 // whose lifetime is contained within a basic block. In this case there
548 // will be a single kill, in MBB, which comes after the definition.
549 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
550 // FIXME: what about dead vars?
552 if (vi.Kills[0] != mi)
553 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
555 killIdx = defIndex+1;
557 // If the kill happens after the definition, we have an intra-block
559 if (killIdx > defIndex) {
560 assert(vi.AliveBlocks.empty() &&
561 "Shouldn't be alive across any blocks!");
562 LiveRange LR(defIndex, killIdx, ValNo);
563 interval.addRange(LR);
564 DOUT << " +" << LR << "\n";
565 interval.addKill(ValNo, killIdx);
570 // The other case we handle is when a virtual register lives to the end
571 // of the defining block, potentially live across some blocks, then is
572 // live into some number of blocks, but gets killed. Start by adding a
573 // range that goes from this definition to the end of the defining block.
574 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
575 DOUT << " +" << NewLR;
576 interval.addRange(NewLR);
578 // Iterate over all of the blocks that the variable is completely
579 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
581 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
582 E = vi.AliveBlocks.end(); I != E; ++I) {
583 LiveRange LR(getMBBStartIdx(*I),
584 getMBBEndIdx(*I)+1, // MBB ends at -1.
586 interval.addRange(LR);
590 // Finally, this virtual register is live from the start of any killing
591 // block to the 'use' slot of the killing instruction.
592 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
593 MachineInstr *Kill = vi.Kills[i];
594 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
595 LiveRange LR(getMBBStartIdx(Kill->getParent()),
597 interval.addRange(LR);
598 interval.addKill(ValNo, killIdx);
603 // If this is the second time we see a virtual register definition, it
604 // must be due to phi elimination or two addr elimination. If this is
605 // the result of two address elimination, then the vreg is one of the
606 // def-and-use register operand.
607 if (mi->isRegTiedToUseOperand(MOIdx)) {
608 // If this is a two-address definition, then we have already processed
609 // the live range. The only problem is that we didn't realize there
610 // are actually two values in the live interval. Because of this we
611 // need to take the LiveRegion that defines this register and split it
613 assert(interval.containsOneValue());
614 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
615 unsigned RedefIndex = getDefIndex(MIIdx);
616 if (MO.isEarlyClobber())
617 RedefIndex = getUseIndex(MIIdx);
619 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
620 VNInfo *OldValNo = OldLR->valno;
622 // Delete the initial value, which should be short and continuous,
623 // because the 2-addr copy must be in the same MBB as the redef.
624 interval.removeRange(DefIndex, RedefIndex);
626 // Two-address vregs should always only be redefined once. This means
627 // that at this point, there should be exactly one value number in it.
628 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
630 // The new value number (#1) is defined by the instruction we claimed
632 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
633 false, // update at *
635 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
637 // Value#0 is now defined by the 2-addr instruction.
638 OldValNo->def = RedefIndex;
640 if (MO.isEarlyClobber())
641 OldValNo->setHasRedefByEC(true);
643 // Add the new live interval which replaces the range for the input copy.
644 LiveRange LR(DefIndex, RedefIndex, ValNo);
645 DOUT << " replace range with " << LR;
646 interval.addRange(LR);
647 interval.addKill(ValNo, RedefIndex);
649 // If this redefinition is dead, we need to add a dummy unit live
650 // range covering the def slot.
652 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
655 interval.print(DOUT, tri_);
658 // Otherwise, this must be because of phi elimination. If this is the
659 // first redefinition of the vreg that we have seen, go back and change
660 // the live range in the PHI block to be a different value number.
661 if (interval.containsOneValue()) {
662 assert(vi.Kills.size() == 1 &&
663 "PHI elimination vreg should have one kill, the PHI itself!");
665 // Remove the old range that we now know has an incorrect number.
666 VNInfo *VNI = interval.getValNumInfo(0);
667 MachineInstr *Killer = vi.Kills[0];
668 unsigned Start = getMBBStartIdx(Killer->getParent());
669 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
670 DOUT << " Removing [" << Start << "," << End << "] from: ";
671 interval.print(DOUT, tri_); DOUT << "\n";
672 interval.removeRange(Start, End);
673 VNI->setHasPHIKill(true);
674 DOUT << " RESULT: "; interval.print(DOUT, tri_);
676 // Replace the interval with one of a NEW value number. Note that this
677 // value number isn't actually defined by an instruction, weird huh? :)
678 LiveRange LR(Start, End,
679 interval.getNextValue(mbb->getNumber(), 0, false, VNInfoAllocator));
680 LR.valno->setIsPHIDef(true);
681 DOUT << " replace range with " << LR;
682 interval.addRange(LR);
683 interval.addKill(LR.valno, End);
684 DOUT << " RESULT: "; interval.print(DOUT, tri_);
687 // In the case of PHI elimination, each variable definition is only
688 // live until the end of the block. We've already taken care of the
689 // rest of the live range.
690 unsigned defIndex = getDefIndex(MIIdx);
691 if (MO.isEarlyClobber())
692 defIndex = getUseIndex(MIIdx);
695 MachineInstr *CopyMI = NULL;
696 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
697 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
698 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
699 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
700 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
702 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
704 unsigned killIndex = getMBBEndIdx(mbb) + 1;
705 LiveRange LR(defIndex, killIndex, ValNo);
706 interval.addRange(LR);
707 interval.addKill(ValNo, killIndex);
708 ValNo->setHasPHIKill(true);
716 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
717 MachineBasicBlock::iterator mi,
720 LiveInterval &interval,
721 MachineInstr *CopyMI) {
722 // A physical register cannot be live across basic block, so its
723 // lifetime must end somewhere in its defining basic block.
724 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
726 unsigned baseIndex = MIIdx;
727 unsigned start = getDefIndex(baseIndex);
728 // Earlyclobbers move back one.
729 if (MO.isEarlyClobber())
730 start = getUseIndex(MIIdx);
731 unsigned end = start;
733 // If it is not used after definition, it is considered dead at
734 // the instruction defining it. Hence its interval is:
735 // [defSlot(def), defSlot(def)+1)
742 // If it is not dead on definition, it must be killed by a
743 // subsequent instruction. Hence its interval is:
744 // [defSlot(def), useSlot(kill)+1)
745 baseIndex += InstrSlots::NUM;
746 while (++mi != MBB->end()) {
747 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
748 getInstructionFromIndex(baseIndex) == 0)
749 baseIndex += InstrSlots::NUM;
750 if (mi->killsRegister(interval.reg, tri_)) {
752 end = getUseIndex(baseIndex) + 1;
755 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
757 if (mi->isRegTiedToUseOperand(DefIdx)) {
758 // Two-address instruction.
759 end = getDefIndex(baseIndex);
760 if (mi->getOperand(DefIdx).isEarlyClobber())
761 end = getUseIndex(baseIndex);
763 // Another instruction redefines the register before it is ever read.
764 // Then the register is essentially dead at the instruction that defines
765 // it. Hence its interval is:
766 // [defSlot(def), defSlot(def)+1)
774 baseIndex += InstrSlots::NUM;
777 // The only case we should have a dead physreg here without a killing or
778 // instruction where we know it's dead is if it is live-in to the function
779 // and never used. Another possible case is the implicit use of the
780 // physical register has been deleted by two-address pass.
784 assert(start < end && "did not find end of interval?");
786 // Already exists? Extend old live interval.
787 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
788 bool Extend = OldLR != interval.end();
789 VNInfo *ValNo = Extend
790 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
791 if (MO.isEarlyClobber() && Extend)
792 ValNo->setHasRedefByEC(true);
793 LiveRange LR(start, end, ValNo);
794 interval.addRange(LR);
795 interval.addKill(LR.valno, end);
796 DOUT << " +" << LR << '\n';
799 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
800 MachineBasicBlock::iterator MI,
804 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
805 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
806 getOrCreateInterval(MO.getReg()));
807 else if (allocatableRegs_[MO.getReg()]) {
808 MachineInstr *CopyMI = NULL;
809 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
810 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
811 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
812 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
813 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
815 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
816 getOrCreateInterval(MO.getReg()), CopyMI);
817 // Def of a register also defines its sub-registers.
818 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
819 // If MI also modifies the sub-register explicitly, avoid processing it
820 // more than once. Do not pass in TRI here so it checks for exact match.
821 if (!MI->modifiesRegister(*AS))
822 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
823 getOrCreateInterval(*AS), 0);
827 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
829 LiveInterval &interval, bool isAlias) {
830 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
832 // Look for kills, if it reaches a def before it's killed, then it shouldn't
833 // be considered a livein.
834 MachineBasicBlock::iterator mi = MBB->begin();
835 unsigned baseIndex = MIIdx;
836 unsigned start = baseIndex;
837 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
838 getInstructionFromIndex(baseIndex) == 0)
839 baseIndex += InstrSlots::NUM;
840 unsigned end = baseIndex;
841 bool SeenDefUse = false;
843 while (mi != MBB->end()) {
844 if (mi->killsRegister(interval.reg, tri_)) {
846 end = getUseIndex(baseIndex) + 1;
849 } else if (mi->modifiesRegister(interval.reg, tri_)) {
850 // Another instruction redefines the register before it is ever read.
851 // Then the register is essentially dead at the instruction that defines
852 // it. Hence its interval is:
853 // [defSlot(def), defSlot(def)+1)
855 end = getDefIndex(start) + 1;
860 baseIndex += InstrSlots::NUM;
862 if (mi != MBB->end()) {
863 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
864 getInstructionFromIndex(baseIndex) == 0)
865 baseIndex += InstrSlots::NUM;
869 // Live-in register might not be used at all.
873 end = getDefIndex(MIIdx) + 1;
875 DOUT << " live through";
881 interval.getNextValue(MBB->getNumber(), 0, false, VNInfoAllocator);
882 vni->setIsPHIDef(true);
883 LiveRange LR(start, end, vni);
885 interval.addRange(LR);
886 interval.addKill(LR.valno, end);
887 DOUT << " +" << LR << '\n';
890 /// computeIntervals - computes the live intervals for virtual
891 /// registers. for some ordering of the machine instructions [1,N] a
892 /// live interval is an interval [i, j) where 1 <= i <= j < N for
893 /// which a variable is live
894 void LiveIntervals::computeIntervals() {
896 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
897 << "********** Function: "
898 << ((Value*)mf_->getFunction())->getName() << '\n';
900 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
902 MachineBasicBlock *MBB = MBBI;
903 // Track the index of the current machine instr.
904 unsigned MIIndex = getMBBStartIdx(MBB);
905 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
907 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
909 // Create intervals for live-ins to this BB first.
910 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
911 LE = MBB->livein_end(); LI != LE; ++LI) {
912 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
913 // Multiple live-ins can alias the same register.
914 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
915 if (!hasInterval(*AS))
916 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
920 // Skip over empty initial indices.
921 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
922 getInstructionFromIndex(MIIndex) == 0)
923 MIIndex += InstrSlots::NUM;
925 for (; MI != miEnd; ++MI) {
926 DOUT << MIIndex << "\t" << *MI;
929 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
930 MachineOperand &MO = MI->getOperand(i);
931 // handle register defs - build intervals
932 if (MO.isReg() && MO.getReg() && MO.isDef()) {
933 handleRegisterDef(MBB, MI, MIIndex, MO, i);
937 // Skip over the empty slots after each instruction.
938 unsigned Slots = MI->getDesc().getNumDefs();
941 MIIndex += InstrSlots::NUM * Slots;
943 // Skip over empty indices.
944 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
945 getInstructionFromIndex(MIIndex) == 0)
946 MIIndex += InstrSlots::NUM;
951 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
952 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
953 std::vector<IdxMBBPair>::const_iterator I =
954 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
957 while (I != Idx2MBBMap.end()) {
960 MBBs.push_back(I->second);
967 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
968 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
969 std::vector<IdxMBBPair>::const_iterator I =
970 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
973 while (I != Idx2MBBMap.end()) {
976 MachineBasicBlock *MBB = I->second;
977 if (getMBBEndIdx(MBB) > End)
979 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
980 SE = MBB->succ_end(); SI != SE; ++SI)
988 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
989 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
990 return new LiveInterval(reg, Weight);
993 /// dupInterval - Duplicate a live interval. The caller is responsible for
994 /// managing the allocated memory.
995 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
996 LiveInterval *NewLI = createInterval(li->reg);
997 NewLI->Copy(*li, mri_, getVNInfoAllocator());
1001 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1002 /// copy field and returns the source register that defines it.
1003 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1007 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1008 // If it's extracting out of a physical register, return the sub-register.
1009 unsigned Reg = VNI->copy->getOperand(1).getReg();
1010 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1011 Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
1013 } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1014 VNI->copy->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1015 return VNI->copy->getOperand(2).getReg();
1017 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1018 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
1020 assert(0 && "Unrecognized copy instruction!");
1024 //===----------------------------------------------------------------------===//
1025 // Register allocator hooks.
1028 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1029 /// allow one) virtual register operand, then its uses are implicitly using
1030 /// the register. Returns the virtual register.
1031 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1032 MachineInstr *MI) const {
1034 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1035 MachineOperand &MO = MI->getOperand(i);
1036 if (!MO.isReg() || !MO.isUse())
1038 unsigned Reg = MO.getReg();
1039 if (Reg == 0 || Reg == li.reg)
1042 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1043 !allocatableRegs_[Reg])
1045 // FIXME: For now, only remat MI with at most one register operand.
1047 "Can't rematerialize instruction with multiple register operand!");
1048 RegOp = MO.getReg();
1056 /// isValNoAvailableAt - Return true if the val# of the specified interval
1057 /// which reaches the given instruction also reaches the specified use index.
1058 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1059 unsigned UseIdx) const {
1060 unsigned Index = getInstructionIndex(MI);
1061 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1062 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1063 return UI != li.end() && UI->valno == ValNo;
1066 /// isReMaterializable - Returns true if the definition MI of the specified
1067 /// val# of the specified interval is re-materializable.
1068 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1069 const VNInfo *ValNo, MachineInstr *MI,
1070 SmallVectorImpl<LiveInterval*> &SpillIs,
1075 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1079 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1080 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1081 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1082 // this but remember this is not safe to fold into a two-address
1084 // This is a load from fixed stack slot. It can be rematerialized.
1087 // If the target-specific rules don't identify an instruction as
1088 // being trivially rematerializable, use some target-independent
1090 if (!MI->getDesc().isRematerializable() ||
1091 !tii_->isTriviallyReMaterializable(MI)) {
1092 if (!EnableAggressiveRemat)
1095 // If the instruction accesses memory but the memoperands have been lost,
1096 // we can't analyze it.
1097 const TargetInstrDesc &TID = MI->getDesc();
1098 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1101 // Avoid instructions obviously unsafe for remat.
1102 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1105 // If the instruction accesses memory and the memory could be non-constant,
1106 // assume the instruction is not rematerializable.
1107 for (std::list<MachineMemOperand>::const_iterator
1108 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1109 const MachineMemOperand &MMO = *I;
1110 if (MMO.isVolatile() || MMO.isStore())
1112 const Value *V = MMO.getValue();
1115 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1116 if (!PSV->isConstant(mf_->getFrameInfo()))
1118 } else if (!aa_->pointsToConstantMemory(V))
1122 // If any of the registers accessed are non-constant, conservatively assume
1123 // the instruction is not rematerializable.
1124 unsigned ImpUse = 0;
1125 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1126 const MachineOperand &MO = MI->getOperand(i);
1128 unsigned Reg = MO.getReg();
1131 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1134 // Only allow one def, and that in the first operand.
1135 if (MO.isDef() != (i == 0))
1138 // Only allow constant-valued registers.
1139 bool IsLiveIn = mri_->isLiveIn(Reg);
1140 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1141 E = mri_->def_end();
1143 // For the def, it should be the only def of that register.
1144 if (MO.isDef() && (next(I) != E || IsLiveIn))
1148 // Only allow one use other register use, as that's all the
1149 // remat mechanisms support currently.
1150 if (Reg != li.reg) {
1153 else if (Reg != ImpUse)
1156 // For the use, there should be only one associated def.
1157 if (I != E && (next(I) != E || IsLiveIn))
1164 unsigned ImpUse = getReMatImplicitUse(li, MI);
1166 const LiveInterval &ImpLi = getInterval(ImpUse);
1167 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1168 re = mri_->use_end(); ri != re; ++ri) {
1169 MachineInstr *UseMI = &*ri;
1170 unsigned UseIdx = getInstructionIndex(UseMI);
1171 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1173 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1177 // If a register operand of the re-materialized instruction is going to
1178 // be spilled next, then it's not legal to re-materialize this instruction.
1179 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1180 if (ImpUse == SpillIs[i]->reg)
1186 /// isReMaterializable - Returns true if the definition MI of the specified
1187 /// val# of the specified interval is re-materializable.
1188 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1189 const VNInfo *ValNo, MachineInstr *MI) {
1190 SmallVector<LiveInterval*, 4> Dummy1;
1192 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1195 /// isReMaterializable - Returns true if every definition of MI of every
1196 /// val# of the specified interval is re-materializable.
1197 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1198 SmallVectorImpl<LiveInterval*> &SpillIs,
1201 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1203 const VNInfo *VNI = *i;
1204 if (VNI->isUnused())
1205 continue; // Dead val#.
1206 // Is the def for the val# rematerializable?
1207 if (!VNI->isDefAccurate())
1209 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1210 bool DefIsLoad = false;
1212 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1214 isLoad |= DefIsLoad;
1219 /// FilterFoldedOps - Filter out two-address use operands. Return
1220 /// true if it finds any issue with the operands that ought to prevent
1222 static bool FilterFoldedOps(MachineInstr *MI,
1223 SmallVector<unsigned, 2> &Ops,
1225 SmallVector<unsigned, 2> &FoldOps) {
1227 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1228 unsigned OpIdx = Ops[i];
1229 MachineOperand &MO = MI->getOperand(OpIdx);
1230 // FIXME: fold subreg use.
1234 MRInfo |= (unsigned)VirtRegMap::isMod;
1236 // Filter out two-address use operand(s).
1237 if (MI->isRegTiedToDefOperand(OpIdx)) {
1238 MRInfo = VirtRegMap::isModRef;
1241 MRInfo |= (unsigned)VirtRegMap::isRef;
1243 FoldOps.push_back(OpIdx);
1249 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1250 /// slot / to reg or any rematerialized load into ith operand of specified
1251 /// MI. If it is successul, MI is updated with the newly created MI and
1253 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1254 VirtRegMap &vrm, MachineInstr *DefMI,
1256 SmallVector<unsigned, 2> &Ops,
1257 bool isSS, int Slot, unsigned Reg) {
1258 // If it is an implicit def instruction, just delete it.
1259 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1260 RemoveMachineInstrFromMaps(MI);
1261 vrm.RemoveMachineInstrFromMaps(MI);
1262 MI->eraseFromParent();
1267 // Filter the list of operand indexes that are to be folded. Abort if
1268 // any operand will prevent folding.
1269 unsigned MRInfo = 0;
1270 SmallVector<unsigned, 2> FoldOps;
1271 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1274 // The only time it's safe to fold into a two address instruction is when
1275 // it's folding reload and spill from / into a spill stack slot.
1276 if (DefMI && (MRInfo & VirtRegMap::isMod))
1279 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1280 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1282 // Remember this instruction uses the spill slot.
1283 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1285 // Attempt to fold the memory reference into the instruction. If
1286 // we can do this, we don't need to insert spill code.
1287 MachineBasicBlock &MBB = *MI->getParent();
1288 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1289 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1290 vrm.transferSpillPts(MI, fmi);
1291 vrm.transferRestorePts(MI, fmi);
1292 vrm.transferEmergencySpills(MI, fmi);
1294 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1295 mi2iMap_[fmi] = InstrIdx;
1296 MI = MBB.insert(MBB.erase(MI), fmi);
1303 /// canFoldMemoryOperand - Returns true if the specified load / store
1304 /// folding is possible.
1305 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1306 SmallVector<unsigned, 2> &Ops,
1308 // Filter the list of operand indexes that are to be folded. Abort if
1309 // any operand will prevent folding.
1310 unsigned MRInfo = 0;
1311 SmallVector<unsigned, 2> FoldOps;
1312 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1315 // It's only legal to remat for a use, not a def.
1316 if (ReMat && (MRInfo & VirtRegMap::isMod))
1319 return tii_->canFoldMemoryOperand(MI, FoldOps);
1322 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1323 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1324 for (LiveInterval::Ranges::const_iterator
1325 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1326 std::vector<IdxMBBPair>::const_iterator II =
1327 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1328 if (II == Idx2MBBMap.end())
1330 if (I->end > II->first) // crossing a MBB.
1332 MBBs.insert(II->second);
1333 if (MBBs.size() > 1)
1339 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1340 /// interval on to-be re-materialized operands of MI) with new register.
1341 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1342 MachineInstr *MI, unsigned NewVReg,
1344 // There is an implicit use. That means one of the other operand is
1345 // being remat'ed and the remat'ed instruction has li.reg as an
1346 // use operand. Make sure we rewrite that as well.
1347 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1348 MachineOperand &MO = MI->getOperand(i);
1351 unsigned Reg = MO.getReg();
1352 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1354 if (!vrm.isReMaterialized(Reg))
1356 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1357 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1359 UseMO->setReg(NewVReg);
1363 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1364 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1365 bool LiveIntervals::
1366 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1367 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1368 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1369 unsigned Slot, int LdSlot,
1370 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1372 const TargetRegisterClass* rc,
1373 SmallVector<int, 4> &ReMatIds,
1374 const MachineLoopInfo *loopInfo,
1375 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1376 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1377 std::vector<LiveInterval*> &NewLIs) {
1378 bool CanFold = false;
1380 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1381 MachineOperand& mop = MI->getOperand(i);
1384 unsigned Reg = mop.getReg();
1385 unsigned RegI = Reg;
1386 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1391 bool TryFold = !DefIsReMat;
1392 bool FoldSS = true; // Default behavior unless it's a remat.
1393 int FoldSlot = Slot;
1395 // If this is the rematerializable definition MI itself and
1396 // all of its uses are rematerialized, simply delete it.
1397 if (MI == ReMatOrigDefMI && CanDelete) {
1398 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1400 RemoveMachineInstrFromMaps(MI);
1401 vrm.RemoveMachineInstrFromMaps(MI);
1402 MI->eraseFromParent();
1406 // If def for this use can't be rematerialized, then try folding.
1407 // If def is rematerializable and it's a load, also try folding.
1408 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1410 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1416 // Scan all of the operands of this instruction rewriting operands
1417 // to use NewVReg instead of li.reg as appropriate. We do this for
1420 // 1. If the instr reads the same spilled vreg multiple times, we
1421 // want to reuse the NewVReg.
1422 // 2. If the instr is a two-addr instruction, we are required to
1423 // keep the src/dst regs pinned.
1425 // Keep track of whether we replace a use and/or def so that we can
1426 // create the spill interval with the appropriate range.
1428 HasUse = mop.isUse();
1429 HasDef = mop.isDef();
1430 SmallVector<unsigned, 2> Ops;
1432 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1433 const MachineOperand &MOj = MI->getOperand(j);
1436 unsigned RegJ = MOj.getReg();
1437 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1441 HasUse |= MOj.isUse();
1442 HasDef |= MOj.isDef();
1446 if (HasUse && !li.liveAt(getUseIndex(index)))
1447 // Must be defined by an implicit def. It should not be spilled. Note,
1448 // this is for correctness reason. e.g.
1449 // 8 %reg1024<def> = IMPLICIT_DEF
1450 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1451 // The live range [12, 14) are not part of the r1024 live interval since
1452 // it's defined by an implicit def. It will not conflicts with live
1453 // interval of r1025. Now suppose both registers are spilled, you can
1454 // easily see a situation where both registers are reloaded before
1455 // the INSERT_SUBREG and both target registers that would overlap.
1458 // Create a new virtual register for the spill interval.
1459 // Create the new register now so we can map the fold instruction
1460 // to the new register so when it is unfolded we get the correct
1462 bool CreatedNewVReg = false;
1464 NewVReg = mri_->createVirtualRegister(rc);
1466 CreatedNewVReg = true;
1472 // Do not fold load / store here if we are splitting. We'll find an
1473 // optimal point to insert a load / store later.
1475 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1476 Ops, FoldSS, FoldSlot, NewVReg)) {
1477 // Folding the load/store can completely change the instruction in
1478 // unpredictable ways, rescan it from the beginning.
1481 // We need to give the new vreg the same stack slot as the
1482 // spilled interval.
1483 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1489 if (isNotInMIMap(MI))
1491 goto RestartInstruction;
1494 // We'll try to fold it later if it's profitable.
1495 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1499 mop.setReg(NewVReg);
1500 if (mop.isImplicit())
1501 rewriteImplicitOps(li, MI, NewVReg, vrm);
1503 // Reuse NewVReg for other reads.
1504 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1505 MachineOperand &mopj = MI->getOperand(Ops[j]);
1506 mopj.setReg(NewVReg);
1507 if (mopj.isImplicit())
1508 rewriteImplicitOps(li, MI, NewVReg, vrm);
1511 if (CreatedNewVReg) {
1513 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1514 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1515 // Each valnum may have its own remat id.
1516 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1518 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1520 if (!CanDelete || (HasUse && HasDef)) {
1521 // If this is a two-addr instruction then its use operands are
1522 // rematerializable but its def is not. It should be assigned a
1524 vrm.assignVirt2StackSlot(NewVReg, Slot);
1527 vrm.assignVirt2StackSlot(NewVReg, Slot);
1529 } else if (HasUse && HasDef &&
1530 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1531 // If this interval hasn't been assigned a stack slot (because earlier
1532 // def is a deleted remat def), do it now.
1533 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1534 vrm.assignVirt2StackSlot(NewVReg, Slot);
1537 // Re-matting an instruction with virtual register use. Add the
1538 // register as an implicit use on the use MI.
1539 if (DefIsReMat && ImpUse)
1540 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1542 // Create a new register interval for this spill / remat.
1543 LiveInterval &nI = getOrCreateInterval(NewVReg);
1544 if (CreatedNewVReg) {
1545 NewLIs.push_back(&nI);
1546 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1548 vrm.setIsSplitFromReg(NewVReg, li.reg);
1552 if (CreatedNewVReg) {
1553 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1554 nI.getNextValue(0, 0, false, VNInfoAllocator));
1558 // Extend the split live interval to this def / use.
1559 unsigned End = getUseIndex(index)+1;
1560 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1561 nI.getValNumInfo(nI.getNumValNums()-1));
1567 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1568 nI.getNextValue(0, 0, false, VNInfoAllocator));
1573 DOUT << "\t\t\t\tAdded new interval: ";
1574 nI.print(DOUT, tri_);
1579 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1581 MachineBasicBlock *MBB, unsigned Idx) const {
1582 unsigned End = getMBBEndIdx(MBB);
1583 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1584 unsigned KillIdx = VNI->kills[j];
1585 if (KillIdx > Idx && KillIdx < End)
1591 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1592 /// during spilling.
1594 struct RewriteInfo {
1599 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1600 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1603 struct RewriteInfoCompare {
1604 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1605 return LHS.Index < RHS.Index;
1610 void LiveIntervals::
1611 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1612 LiveInterval::Ranges::const_iterator &I,
1613 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1614 unsigned Slot, int LdSlot,
1615 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1617 const TargetRegisterClass* rc,
1618 SmallVector<int, 4> &ReMatIds,
1619 const MachineLoopInfo *loopInfo,
1620 BitVector &SpillMBBs,
1621 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1622 BitVector &RestoreMBBs,
1623 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1624 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1625 std::vector<LiveInterval*> &NewLIs) {
1626 bool AllCanFold = true;
1627 unsigned NewVReg = 0;
1628 unsigned start = getBaseIndex(I->start);
1629 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1631 // First collect all the def / use in this live range that will be rewritten.
1632 // Make sure they are sorted according to instruction index.
1633 std::vector<RewriteInfo> RewriteMIs;
1634 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1635 re = mri_->reg_end(); ri != re; ) {
1636 MachineInstr *MI = &*ri;
1637 MachineOperand &O = ri.getOperand();
1639 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1640 unsigned index = getInstructionIndex(MI);
1641 if (index < start || index >= end)
1643 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1644 // Must be defined by an implicit def. It should not be spilled. Note,
1645 // this is for correctness reason. e.g.
1646 // 8 %reg1024<def> = IMPLICIT_DEF
1647 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1648 // The live range [12, 14) are not part of the r1024 live interval since
1649 // it's defined by an implicit def. It will not conflicts with live
1650 // interval of r1025. Now suppose both registers are spilled, you can
1651 // easily see a situation where both registers are reloaded before
1652 // the INSERT_SUBREG and both target registers that would overlap.
1654 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1656 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1658 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1659 // Now rewrite the defs and uses.
1660 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1661 RewriteInfo &rwi = RewriteMIs[i];
1663 unsigned index = rwi.Index;
1664 bool MIHasUse = rwi.HasUse;
1665 bool MIHasDef = rwi.HasDef;
1666 MachineInstr *MI = rwi.MI;
1667 // If MI def and/or use the same register multiple times, then there
1668 // are multiple entries.
1669 unsigned NumUses = MIHasUse;
1670 while (i != e && RewriteMIs[i].MI == MI) {
1671 assert(RewriteMIs[i].Index == index);
1672 bool isUse = RewriteMIs[i].HasUse;
1673 if (isUse) ++NumUses;
1675 MIHasDef |= RewriteMIs[i].HasDef;
1678 MachineBasicBlock *MBB = MI->getParent();
1680 if (ImpUse && MI != ReMatDefMI) {
1681 // Re-matting an instruction with virtual register use. Update the
1682 // register interval's spill weight to HUGE_VALF to prevent it from
1684 LiveInterval &ImpLi = getInterval(ImpUse);
1685 ImpLi.weight = HUGE_VALF;
1688 unsigned MBBId = MBB->getNumber();
1689 unsigned ThisVReg = 0;
1691 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1692 if (NVI != MBBVRegsMap.end()) {
1693 ThisVReg = NVI->second;
1700 // It's better to start a new interval to avoid artifically
1701 // extend the new interval.
1702 if (MIHasDef && !MIHasUse) {
1703 MBBVRegsMap.erase(MBB->getNumber());
1709 bool IsNew = ThisVReg == 0;
1711 // This ends the previous live interval. If all of its def / use
1712 // can be folded, give it a low spill weight.
1713 if (NewVReg && TrySplit && AllCanFold) {
1714 LiveInterval &nI = getOrCreateInterval(NewVReg);
1721 bool HasDef = false;
1722 bool HasUse = false;
1723 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1724 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1725 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1726 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1727 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1728 if (!HasDef && !HasUse)
1731 AllCanFold &= CanFold;
1733 // Update weight of spill interval.
1734 LiveInterval &nI = getOrCreateInterval(NewVReg);
1736 // The spill weight is now infinity as it cannot be spilled again.
1737 nI.weight = HUGE_VALF;
1741 // Keep track of the last def and first use in each MBB.
1743 if (MI != ReMatOrigDefMI || !CanDelete) {
1744 bool HasKill = false;
1746 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1748 // If this is a two-address code, then this index starts a new VNInfo.
1749 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1751 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1753 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1754 SpillIdxes.find(MBBId);
1756 if (SII == SpillIdxes.end()) {
1757 std::vector<SRInfo> S;
1758 S.push_back(SRInfo(index, NewVReg, true));
1759 SpillIdxes.insert(std::make_pair(MBBId, S));
1760 } else if (SII->second.back().vreg != NewVReg) {
1761 SII->second.push_back(SRInfo(index, NewVReg, true));
1762 } else if ((int)index > SII->second.back().index) {
1763 // If there is an earlier def and this is a two-address
1764 // instruction, then it's not possible to fold the store (which
1765 // would also fold the load).
1766 SRInfo &Info = SII->second.back();
1768 Info.canFold = !HasUse;
1770 SpillMBBs.set(MBBId);
1771 } else if (SII != SpillIdxes.end() &&
1772 SII->second.back().vreg == NewVReg &&
1773 (int)index > SII->second.back().index) {
1774 // There is an earlier def that's not killed (must be two-address).
1775 // The spill is no longer needed.
1776 SII->second.pop_back();
1777 if (SII->second.empty()) {
1778 SpillIdxes.erase(MBBId);
1779 SpillMBBs.reset(MBBId);
1786 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1787 SpillIdxes.find(MBBId);
1788 if (SII != SpillIdxes.end() &&
1789 SII->second.back().vreg == NewVReg &&
1790 (int)index > SII->second.back().index)
1791 // Use(s) following the last def, it's not safe to fold the spill.
1792 SII->second.back().canFold = false;
1793 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1794 RestoreIdxes.find(MBBId);
1795 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1796 // If we are splitting live intervals, only fold if it's the first
1797 // use and there isn't another use later in the MBB.
1798 RII->second.back().canFold = false;
1800 // Only need a reload if there isn't an earlier def / use.
1801 if (RII == RestoreIdxes.end()) {
1802 std::vector<SRInfo> Infos;
1803 Infos.push_back(SRInfo(index, NewVReg, true));
1804 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1806 RII->second.push_back(SRInfo(index, NewVReg, true));
1808 RestoreMBBs.set(MBBId);
1812 // Update spill weight.
1813 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1814 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1817 if (NewVReg && TrySplit && AllCanFold) {
1818 // If all of its def / use can be folded, give it a low spill weight.
1819 LiveInterval &nI = getOrCreateInterval(NewVReg);
1824 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1825 BitVector &RestoreMBBs,
1826 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1827 if (!RestoreMBBs[Id])
1829 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1830 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1831 if (Restores[i].index == index &&
1832 Restores[i].vreg == vr &&
1833 Restores[i].canFold)
1838 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1839 BitVector &RestoreMBBs,
1840 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1841 if (!RestoreMBBs[Id])
1843 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1844 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1845 if (Restores[i].index == index && Restores[i].vreg)
1846 Restores[i].index = -1;
1849 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1850 /// spilled and create empty intervals for their uses.
1852 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1853 const TargetRegisterClass* rc,
1854 std::vector<LiveInterval*> &NewLIs) {
1855 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1856 re = mri_->reg_end(); ri != re; ) {
1857 MachineOperand &O = ri.getOperand();
1858 MachineInstr *MI = &*ri;
1861 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1862 "Register def was not rewritten?");
1863 RemoveMachineInstrFromMaps(MI);
1864 vrm.RemoveMachineInstrFromMaps(MI);
1865 MI->eraseFromParent();
1867 // This must be an use of an implicit_def so it's not part of the live
1868 // interval. Create a new empty live interval for it.
1869 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1870 unsigned NewVReg = mri_->createVirtualRegister(rc);
1872 vrm.setIsImplicitlyDefined(NewVReg);
1873 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1874 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1875 MachineOperand &MO = MI->getOperand(i);
1876 if (MO.isReg() && MO.getReg() == li.reg) {
1885 std::vector<LiveInterval*> LiveIntervals::
1886 addIntervalsForSpillsFast(const LiveInterval &li,
1887 const MachineLoopInfo *loopInfo,
1889 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1891 std::vector<LiveInterval*> added;
1893 assert(li.weight != HUGE_VALF &&
1894 "attempt to spill already spilled interval!");
1896 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1900 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1902 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1903 while (RI != mri_->reg_end()) {
1904 MachineInstr* MI = &*RI;
1906 SmallVector<unsigned, 2> Indices;
1907 bool HasUse = false;
1908 bool HasDef = false;
1910 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1911 MachineOperand& mop = MI->getOperand(i);
1912 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1914 HasUse |= MI->getOperand(i).isUse();
1915 HasDef |= MI->getOperand(i).isDef();
1917 Indices.push_back(i);
1920 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1921 Indices, true, slot, li.reg)) {
1922 unsigned NewVReg = mri_->createVirtualRegister(rc);
1924 vrm.assignVirt2StackSlot(NewVReg, slot);
1926 // create a new register for this spill
1927 LiveInterval &nI = getOrCreateInterval(NewVReg);
1929 // the spill weight is now infinity as it
1930 // cannot be spilled again
1931 nI.weight = HUGE_VALF;
1933 // Rewrite register operands to use the new vreg.
1934 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1935 E = Indices.end(); I != E; ++I) {
1936 MI->getOperand(*I).setReg(NewVReg);
1938 if (MI->getOperand(*I).isUse())
1939 MI->getOperand(*I).setIsKill(true);
1942 // Fill in the new live interval.
1943 unsigned index = getInstructionIndex(MI);
1945 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1946 nI.getNextValue(0, 0, false, getVNInfoAllocator()));
1949 vrm.addRestorePoint(NewVReg, MI);
1952 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1953 nI.getNextValue(0, 0, false, getVNInfoAllocator()));
1956 vrm.addSpillPoint(NewVReg, true, MI);
1959 added.push_back(&nI);
1961 DOUT << "\t\t\t\tadded new interval: ";
1967 RI = mri_->reg_begin(li.reg);
1973 std::vector<LiveInterval*> LiveIntervals::
1974 addIntervalsForSpills(const LiveInterval &li,
1975 SmallVectorImpl<LiveInterval*> &SpillIs,
1976 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1978 if (EnableFastSpilling)
1979 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1981 assert(li.weight != HUGE_VALF &&
1982 "attempt to spill already spilled interval!");
1984 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1985 li.print(DOUT, tri_);
1988 // Each bit specify whether a spill is required in the MBB.
1989 BitVector SpillMBBs(mf_->getNumBlockIDs());
1990 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1991 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1992 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1993 DenseMap<unsigned,unsigned> MBBVRegsMap;
1994 std::vector<LiveInterval*> NewLIs;
1995 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1997 unsigned NumValNums = li.getNumValNums();
1998 SmallVector<MachineInstr*, 4> ReMatDefs;
1999 ReMatDefs.resize(NumValNums, NULL);
2000 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2001 ReMatOrigDefs.resize(NumValNums, NULL);
2002 SmallVector<int, 4> ReMatIds;
2003 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2004 BitVector ReMatDelete(NumValNums);
2005 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2007 // Spilling a split live interval. It cannot be split any further. Also,
2008 // it's also guaranteed to be a single val# / range interval.
2009 if (vrm.getPreSplitReg(li.reg)) {
2010 vrm.setIsSplitFromReg(li.reg, 0);
2011 // Unset the split kill marker on the last use.
2012 unsigned KillIdx = vrm.getKillPoint(li.reg);
2014 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2015 assert(KillMI && "Last use disappeared?");
2016 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2017 assert(KillOp != -1 && "Last use disappeared?");
2018 KillMI->getOperand(KillOp).setIsKill(false);
2020 vrm.removeKillPoint(li.reg);
2021 bool DefIsReMat = vrm.isReMaterialized(li.reg);
2022 Slot = vrm.getStackSlot(li.reg);
2023 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2024 MachineInstr *ReMatDefMI = DefIsReMat ?
2025 vrm.getReMaterializedMI(li.reg) : NULL;
2027 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2028 bool isLoad = isLoadSS ||
2029 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2030 bool IsFirstRange = true;
2031 for (LiveInterval::Ranges::const_iterator
2032 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2033 // If this is a split live interval with multiple ranges, it means there
2034 // are two-address instructions that re-defined the value. Only the
2035 // first def can be rematerialized!
2037 // Note ReMatOrigDefMI has already been deleted.
2038 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2039 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2040 false, vrm, rc, ReMatIds, loopInfo,
2041 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2042 MBBVRegsMap, NewLIs);
2044 rewriteInstructionsForSpills(li, false, I, NULL, 0,
2045 Slot, 0, false, false, false,
2046 false, vrm, rc, ReMatIds, loopInfo,
2047 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2048 MBBVRegsMap, NewLIs);
2050 IsFirstRange = false;
2053 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2057 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
2058 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
2062 bool NeedStackSlot = false;
2063 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2065 const VNInfo *VNI = *i;
2066 unsigned VN = VNI->id;
2067 if (VNI->isUnused())
2068 continue; // Dead val#.
2069 // Is the def for the val# rematerializable?
2070 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2071 ? getInstructionFromIndex(VNI->def) : 0;
2073 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2074 // Remember how to remat the def of this val#.
2075 ReMatOrigDefs[VN] = ReMatDefMI;
2076 // Original def may be modified so we have to make a copy here.
2077 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2078 ClonedMIs.push_back(Clone);
2079 ReMatDefs[VN] = Clone;
2081 bool CanDelete = true;
2082 if (VNI->hasPHIKill()) {
2083 // A kill is a phi node, not all of its uses can be rematerialized.
2084 // It must not be deleted.
2086 // Need a stack slot if there is any live range where uses cannot be
2088 NeedStackSlot = true;
2091 ReMatDelete.set(VN);
2093 // Need a stack slot if there is any live range where uses cannot be
2095 NeedStackSlot = true;
2099 // One stack slot per live interval.
2100 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2101 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2102 Slot = vrm.assignVirt2StackSlot(li.reg);
2104 // This case only occurs when the prealloc splitter has already assigned
2105 // a stack slot to this vreg.
2107 Slot = vrm.getStackSlot(li.reg);
2110 // Create new intervals and rewrite defs and uses.
2111 for (LiveInterval::Ranges::const_iterator
2112 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2113 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2114 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2115 bool DefIsReMat = ReMatDefMI != NULL;
2116 bool CanDelete = ReMatDelete[I->valno->id];
2118 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2119 bool isLoad = isLoadSS ||
2120 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2121 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2122 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2123 CanDelete, vrm, rc, ReMatIds, loopInfo,
2124 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2125 MBBVRegsMap, NewLIs);
2128 // Insert spills / restores if we are splitting.
2130 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2134 SmallPtrSet<LiveInterval*, 4> AddedKill;
2135 SmallVector<unsigned, 2> Ops;
2136 if (NeedStackSlot) {
2137 int Id = SpillMBBs.find_first();
2139 std::vector<SRInfo> &spills = SpillIdxes[Id];
2140 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2141 int index = spills[i].index;
2142 unsigned VReg = spills[i].vreg;
2143 LiveInterval &nI = getOrCreateInterval(VReg);
2144 bool isReMat = vrm.isReMaterialized(VReg);
2145 MachineInstr *MI = getInstructionFromIndex(index);
2146 bool CanFold = false;
2147 bool FoundUse = false;
2149 if (spills[i].canFold) {
2151 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2152 MachineOperand &MO = MI->getOperand(j);
2153 if (!MO.isReg() || MO.getReg() != VReg)
2160 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2161 RestoreMBBs, RestoreIdxes))) {
2162 // MI has two-address uses of the same register. If the use
2163 // isn't the first and only use in the BB, then we can't fold
2164 // it. FIXME: Move this to rewriteInstructionsForSpills.
2171 // Fold the store into the def if possible.
2172 bool Folded = false;
2173 if (CanFold && !Ops.empty()) {
2174 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2177 // Also folded uses, do not issue a load.
2178 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2179 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2181 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2185 // Otherwise tell the spiller to issue a spill.
2187 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2188 bool isKill = LR->end == getStoreIndex(index);
2189 if (!MI->registerDefIsDead(nI.reg))
2190 // No need to spill a dead def.
2191 vrm.addSpillPoint(VReg, isKill, MI);
2193 AddedKill.insert(&nI);
2196 Id = SpillMBBs.find_next(Id);
2200 int Id = RestoreMBBs.find_first();
2202 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2203 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2204 int index = restores[i].index;
2207 unsigned VReg = restores[i].vreg;
2208 LiveInterval &nI = getOrCreateInterval(VReg);
2209 bool isReMat = vrm.isReMaterialized(VReg);
2210 MachineInstr *MI = getInstructionFromIndex(index);
2211 bool CanFold = false;
2213 if (restores[i].canFold) {
2215 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2216 MachineOperand &MO = MI->getOperand(j);
2217 if (!MO.isReg() || MO.getReg() != VReg)
2221 // If this restore were to be folded, it would have been folded
2230 // Fold the load into the use if possible.
2231 bool Folded = false;
2232 if (CanFold && !Ops.empty()) {
2234 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2236 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2238 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2239 // If the rematerializable def is a load, also try to fold it.
2240 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2241 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2242 Ops, isLoadSS, LdSlot, VReg);
2244 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2246 // Re-matting an instruction with virtual register use. Add the
2247 // register as an implicit use on the use MI and update the register
2248 // interval's spill weight to HUGE_VALF to prevent it from being
2250 LiveInterval &ImpLi = getInterval(ImpUse);
2251 ImpLi.weight = HUGE_VALF;
2252 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2257 // If folding is not possible / failed, then tell the spiller to issue a
2258 // load / rematerialization for us.
2260 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2262 vrm.addRestorePoint(VReg, MI);
2264 Id = RestoreMBBs.find_next(Id);
2267 // Finalize intervals: add kills, finalize spill weights, and filter out
2269 std::vector<LiveInterval*> RetNewLIs;
2270 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2271 LiveInterval *LI = NewLIs[i];
2273 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2274 if (!AddedKill.count(LI)) {
2275 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2276 unsigned LastUseIdx = getBaseIndex(LR->end);
2277 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2278 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2279 assert(UseIdx != -1);
2280 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2281 LastUse->getOperand(UseIdx).setIsKill();
2282 vrm.addKillPoint(LI->reg, LastUseIdx);
2285 RetNewLIs.push_back(LI);
2289 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2293 /// hasAllocatableSuperReg - Return true if the specified physical register has
2294 /// any super register that's allocatable.
2295 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2296 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2297 if (allocatableRegs_[*AS] && hasInterval(*AS))
2302 /// getRepresentativeReg - Find the largest super register of the specified
2303 /// physical register.
2304 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2305 // Find the largest super-register that is allocatable.
2306 unsigned BestReg = Reg;
2307 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2308 unsigned SuperReg = *AS;
2309 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2317 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2318 /// specified interval that conflicts with the specified physical register.
2319 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2320 unsigned PhysReg) const {
2321 unsigned NumConflicts = 0;
2322 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2323 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2324 E = mri_->reg_end(); I != E; ++I) {
2325 MachineOperand &O = I.getOperand();
2326 MachineInstr *MI = O.getParent();
2327 unsigned Index = getInstructionIndex(MI);
2328 if (pli.liveAt(Index))
2331 return NumConflicts;
2334 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2335 /// around all defs and uses of the specified interval. Return true if it
2336 /// was able to cut its interval.
2337 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2338 unsigned PhysReg, VirtRegMap &vrm) {
2339 unsigned SpillReg = getRepresentativeReg(PhysReg);
2341 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2342 // If there are registers which alias PhysReg, but which are not a
2343 // sub-register of the chosen representative super register. Assert
2344 // since we can't handle it yet.
2345 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2346 tri_->isSuperRegister(*AS, SpillReg));
2349 LiveInterval &pli = getInterval(SpillReg);
2350 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2351 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2352 E = mri_->reg_end(); I != E; ++I) {
2353 MachineOperand &O = I.getOperand();
2354 MachineInstr *MI = O.getParent();
2355 if (SeenMIs.count(MI))
2358 unsigned Index = getInstructionIndex(MI);
2359 if (pli.liveAt(Index)) {
2360 vrm.addEmergencySpill(SpillReg, MI);
2361 unsigned StartIdx = getLoadIndex(Index);
2362 unsigned EndIdx = getStoreIndex(Index)+1;
2363 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2364 pli.removeRange(StartIdx, EndIdx);
2367 cerr << "Ran out of registers during register allocation!\n";
2368 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2369 cerr << "Please check your inline asm statement for invalid "
2370 << "constraints:\n";
2371 MI->print(cerr.stream(), tm_);
2375 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2376 if (!hasInterval(*AS))
2378 LiveInterval &spli = getInterval(*AS);
2379 if (spli.liveAt(Index))
2380 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2387 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2388 MachineInstr* startInst) {
2389 LiveInterval& Interval = getOrCreateInterval(reg);
2390 VNInfo* VN = Interval.getNextValue(
2391 getInstructionIndex(startInst) + InstrSlots::DEF,
2392 startInst, true, getVNInfoAllocator());
2393 VN->setHasPHIKill(true);
2394 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2395 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2396 getMBBEndIdx(startInst->getParent()) + 1, VN);
2397 Interval.addRange(LR);