1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/PseudoSourceValue.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
55 static cl::opt<bool> EnableFastSpilling("fast-spill",
56 cl::init(false), cl::Hidden);
58 static cl::opt<bool> EarlyCoalescing("early-coalescing", cl::init(false));
60 static cl::opt<int> CoalescingLimit("early-coalescing-limit",
61 cl::init(-1), cl::Hidden);
63 STATISTIC(numIntervals , "Number of original intervals");
64 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
65 STATISTIC(numSplits , "Number of intervals split");
66 STATISTIC(numCoalescing, "Number of early coalescing performed");
68 char LiveIntervals::ID = 0;
69 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
71 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
73 AU.addRequired<AliasAnalysis>();
74 AU.addPreserved<AliasAnalysis>();
75 AU.addPreserved<LiveVariables>();
76 AU.addRequired<LiveVariables>();
77 AU.addPreservedID(MachineLoopInfoID);
78 AU.addPreservedID(MachineDominatorsID);
81 AU.addPreservedID(PHIEliminationID);
82 AU.addRequiredID(PHIEliminationID);
85 AU.addRequiredID(TwoAddressInstructionPassID);
86 MachineFunctionPass::getAnalysisUsage(AU);
89 void LiveIntervals::releaseMemory() {
90 // Free the live intervals themselves.
91 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
92 E = r2iMap_.end(); I != E; ++I)
100 terminatorGaps.clear();
101 phiJoinCopies.clear();
103 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
104 VNInfoAllocator.Reset();
105 while (!CloneMIs.empty()) {
106 MachineInstr *MI = CloneMIs.back();
108 mf_->DeleteMachineInstr(MI);
112 static bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
113 unsigned OpIdx, const TargetInstrInfo *tii_){
114 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
115 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
119 if (OpIdx == 2 && MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
121 if (OpIdx == 1 && MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
126 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
127 /// there is one implicit_def for each use. Add isUndef marker to
128 /// implicit_def defs and their uses.
129 void LiveIntervals::processImplicitDefs() {
130 SmallSet<unsigned, 8> ImpDefRegs;
131 SmallVector<MachineInstr*, 8> ImpDefMIs;
132 MachineBasicBlock *Entry = mf_->begin();
133 SmallPtrSet<MachineBasicBlock*,16> Visited;
134 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
135 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
137 MachineBasicBlock *MBB = *DFI;
138 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
140 MachineInstr *MI = &*I;
142 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
143 unsigned Reg = MI->getOperand(0).getReg();
144 ImpDefRegs.insert(Reg);
145 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
146 for (const unsigned *SS = tri_->getSubRegisters(Reg); *SS; ++SS)
147 ImpDefRegs.insert(*SS);
149 ImpDefMIs.push_back(MI);
153 if (MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG) {
154 MachineOperand &MO = MI->getOperand(2);
155 if (ImpDefRegs.count(MO.getReg())) {
156 // %reg1032<def> = INSERT_SUBREG %reg1032, undef, 2
157 // This is an identity copy, eliminate it now.
159 LiveVariables::VarInfo& vi = lv_->getVarInfo(MO.getReg());
162 MI->eraseFromParent();
167 bool ChangedToImpDef = false;
168 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
169 MachineOperand& MO = MI->getOperand(i);
170 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
172 unsigned Reg = MO.getReg();
175 if (!ImpDefRegs.count(Reg))
177 // Use is a copy, just turn it into an implicit_def.
178 if (CanTurnIntoImplicitDef(MI, Reg, i, tii_)) {
179 bool isKill = MO.isKill();
180 MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
181 for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
182 MI->RemoveOperand(j);
184 ImpDefRegs.erase(Reg);
185 LiveVariables::VarInfo& vi = lv_->getVarInfo(Reg);
188 ChangedToImpDef = true;
193 if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
194 // Make sure other uses of
195 for (unsigned j = i+1; j != e; ++j) {
196 MachineOperand &MOJ = MI->getOperand(j);
197 if (MOJ.isReg() && MOJ.isUse() && MOJ.getReg() == Reg)
200 ImpDefRegs.erase(Reg);
204 if (ChangedToImpDef) {
205 // Backtrack to process this new implicit_def.
208 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
209 MachineOperand& MO = MI->getOperand(i);
210 if (!MO.isReg() || !MO.isDef())
212 ImpDefRegs.erase(MO.getReg());
217 // Any outstanding liveout implicit_def's?
218 for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
219 MachineInstr *MI = ImpDefMIs[i];
220 unsigned Reg = MI->getOperand(0).getReg();
221 if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
222 !ImpDefRegs.count(Reg)) {
223 // Delete all "local" implicit_def's. That include those which define
224 // physical registers since they cannot be liveout.
225 MI->eraseFromParent();
229 // If there are multiple defs of the same register and at least one
230 // is not an implicit_def, do not insert implicit_def's before the
233 for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
234 DE = mri_->def_end(); DI != DE; ++DI) {
235 if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
243 // The only implicit_def which we want to keep are those that are live
245 MI->eraseFromParent();
247 for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
248 UE = mri_->use_end(); UI != UE; ) {
249 MachineOperand &RMO = UI.getOperand();
250 MachineInstr *RMI = &*UI;
252 MachineBasicBlock *RMBB = RMI->getParent();
256 // Turn a copy use into an implicit_def.
257 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
258 if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
260 RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
261 for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
262 RMI->RemoveOperand(j);
266 const TargetRegisterClass* RC = mri_->getRegClass(Reg);
267 unsigned NewVReg = mri_->createVirtualRegister(RC);
279 void LiveIntervals::computeNumbering() {
280 Index2MiMap OldI2MI = i2miMap_;
281 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
287 terminatorGaps.clear();
288 phiJoinCopies.clear();
292 // Number MachineInstrs and MachineBasicBlocks.
293 // Initialize MBB indexes to a sentinal.
294 MBB2IdxMap.resize(mf_->getNumBlockIDs(),
295 std::make_pair(MachineInstrIndex(),MachineInstrIndex()));
297 MachineInstrIndex MIIndex;
298 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
300 MachineInstrIndex StartIdx = MIIndex;
302 // Insert an empty slot at the beginning of each block.
303 MIIndex = getNextIndex(MIIndex);
304 i2miMap_.push_back(0);
306 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
309 if (I == MBB->getFirstTerminator()) {
310 // Leave a gap for before terminators, this is where we will point
312 MachineInstrIndex tGap(true, MIIndex);
314 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
316 "Multiple 'first' terminators encountered during numbering.");
317 inserted = inserted; // Avoid compiler warning if assertions turned off.
318 i2miMap_.push_back(0);
320 MIIndex = getNextIndex(MIIndex);
323 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
324 assert(inserted && "multiple MachineInstr -> index mappings");
326 i2miMap_.push_back(I);
327 MIIndex = getNextIndex(MIIndex);
330 // Insert max(1, numdefs) empty slots after every instruction.
331 unsigned Slots = I->getDesc().getNumDefs();
335 MIIndex = getNextIndex(MIIndex);
336 i2miMap_.push_back(0);
341 if (MBB->getFirstTerminator() == MBB->end()) {
342 // Leave a gap for before terminators, this is where we will point
344 MachineInstrIndex tGap(true, MIIndex);
346 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
348 "Multiple 'first' terminators encountered during numbering.");
349 inserted = inserted; // Avoid compiler warning if assertions turned off.
350 i2miMap_.push_back(0);
352 MIIndex = getNextIndex(MIIndex);
355 // Set the MBB2IdxMap entry for this MBB.
356 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, getPrevSlot(MIIndex));
357 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
360 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
362 if (!OldI2MI.empty())
363 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
364 for (LiveInterval::iterator LI = OI->second->begin(),
365 LE = OI->second->end(); LI != LE; ++LI) {
367 // Remap the start index of the live range to the corresponding new
368 // number, or our best guess at what it _should_ correspond to if the
369 // original instruction has been erased. This is either the following
370 // instruction or its predecessor.
371 unsigned index = LI->start.getVecIndex();
372 MachineInstrIndex::Slot offset = LI->start.getSlot();
373 if (LI->start.isLoad()) {
374 std::vector<IdxMBBPair>::const_iterator I =
375 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
376 // Take the pair containing the index
377 std::vector<IdxMBBPair>::const_iterator J =
378 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
380 LI->start = getMBBStartIdx(J->second);
382 LI->start = MachineInstrIndex(
383 MachineInstrIndex(mi2iMap_[OldI2MI[index]]),
384 (MachineInstrIndex::Slot)offset);
387 // Remap the ending index in the same way that we remapped the start,
388 // except for the final step where we always map to the immediately
389 // following instruction.
390 index = (getPrevSlot(LI->end)).getVecIndex();
391 offset = LI->end.getSlot();
392 if (LI->end.isLoad()) {
393 // VReg dies at end of block.
394 std::vector<IdxMBBPair>::const_iterator I =
395 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
398 LI->end = getNextSlot(getMBBEndIdx(I->second));
400 unsigned idx = index;
401 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
403 if (index != OldI2MI.size())
405 MachineInstrIndex(mi2iMap_[OldI2MI[index]],
406 (idx == index ? offset : MachineInstrIndex::LOAD));
409 MachineInstrIndex(MachineInstrIndex::NUM * i2miMap_.size());
413 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
414 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
417 // Remap the VNInfo def index, which works the same as the
418 // start indices above. VN's with special sentinel defs
419 // don't need to be remapped.
420 if (vni->isDefAccurate() && !vni->isUnused()) {
421 unsigned index = vni->def.getVecIndex();
422 MachineInstrIndex::Slot offset = vni->def.getSlot();
423 if (vni->def.isLoad()) {
424 std::vector<IdxMBBPair>::const_iterator I =
425 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
426 // Take the pair containing the index
427 std::vector<IdxMBBPair>::const_iterator J =
428 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
430 vni->def = getMBBStartIdx(J->second);
432 vni->def = MachineInstrIndex(mi2iMap_[OldI2MI[index]], offset);
436 // Remap the VNInfo kill indices, which works the same as
437 // the end indices above.
438 for (size_t i = 0; i < vni->kills.size(); ++i) {
439 unsigned index = getPrevSlot(vni->kills[i]).getVecIndex();
440 MachineInstrIndex::Slot offset = vni->kills[i].getSlot();
442 if (vni->kills[i].isLoad()) {
443 assert("Value killed at a load slot.");
444 /*std::vector<IdxMBBPair>::const_iterator I =
445 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
448 vni->kills[i] = getMBBEndIdx(I->second);*/
450 if (vni->kills[i].isPHIIndex()) {
451 std::vector<IdxMBBPair>::const_iterator I =
452 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
454 vni->kills[i] = terminatorGaps[I->second];
456 assert(OldI2MI[index] != 0 &&
457 "Kill refers to instruction not present in index maps.");
458 vni->kills[i] = MachineInstrIndex(mi2iMap_[OldI2MI[index]], offset);
462 unsigned idx = index;
463 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
465 if (index != OldI2MI.size())
466 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
467 (idx == index ? offset : 0);
469 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
477 void LiveIntervals::scaleNumbering(int factor) {
479 // * scale MBB begin and end points
480 // * scale all ranges.
481 // * Update VNI structures.
482 // * Scale instruction numberings
484 // Scale the MBB indices.
486 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
487 MBB != MBBE; ++MBB) {
488 std::pair<MachineInstrIndex, MachineInstrIndex> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
489 mbbIndices.first = mbbIndices.first.scale(factor);
490 mbbIndices.second = mbbIndices.second.scale(factor);
491 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
493 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
495 // Scale terminator gaps.
496 for (DenseMap<MachineBasicBlock*, MachineInstrIndex>::iterator
497 TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
499 terminatorGaps[TGI->first] = TGI->second.scale(factor);
502 // Scale the intervals.
503 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
504 LI->second->scaleNumbering(factor);
507 // Scale MachineInstrs.
508 Mi2IndexMap oldmi2iMap = mi2iMap_;
509 MachineInstrIndex highestSlot;
510 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
512 MachineInstrIndex newSlot = MI->second.scale(factor);
513 mi2iMap_[MI->first] = newSlot;
514 highestSlot = std::max(highestSlot, newSlot);
517 unsigned highestVIndex = highestSlot.getVecIndex();
519 i2miMap_.resize(highestVIndex + 1);
520 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
522 i2miMap_[MI->second.getVecIndex()] = const_cast<MachineInstr *>(MI->first);
528 /// runOnMachineFunction - Register allocate the whole function
530 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
532 mri_ = &mf_->getRegInfo();
533 tm_ = &fn.getTarget();
534 tri_ = tm_->getRegisterInfo();
535 tii_ = tm_->getInstrInfo();
536 aa_ = &getAnalysis<AliasAnalysis>();
537 lv_ = &getAnalysis<LiveVariables>();
538 allocatableRegs_ = tri_->getAllocatableSet(fn);
540 processImplicitDefs();
543 performEarlyCoalescing();
545 numIntervals += getNumIntervals();
551 /// print - Implement the dump method.
552 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
553 OS << "********** INTERVALS **********\n";
554 for (const_iterator I = begin(), E = end(); I != E; ++I) {
555 I->second->print(OS, tri_);
562 void LiveIntervals::printInstrs(raw_ostream &OS) const {
563 OS << "********** MACHINEINSTRS **********\n";
565 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
566 mbbi != mbbe; ++mbbi) {
567 OS << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
568 for (MachineBasicBlock::iterator mii = mbbi->begin(),
569 mie = mbbi->end(); mii != mie; ++mii) {
570 OS << getInstructionIndex(mii) << '\t' << *mii;
575 void LiveIntervals::dumpInstrs() const {
579 /// conflictsWithPhysRegDef - Returns true if the specified register
580 /// is defined during the duration of the specified interval.
581 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
582 VirtRegMap &vrm, unsigned reg) {
583 for (LiveInterval::Ranges::const_iterator
584 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
585 for (MachineInstrIndex index = getBaseIndex(I->start),
586 end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
587 index = getNextIndex(index)) {
588 // skip deleted instructions
589 while (index != end && !getInstructionFromIndex(index))
590 index = getNextIndex(index);
591 if (index == end) break;
593 MachineInstr *MI = getInstructionFromIndex(index);
594 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
595 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
596 if (SrcReg == li.reg || DstReg == li.reg)
598 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
599 MachineOperand& mop = MI->getOperand(i);
602 unsigned PhysReg = mop.getReg();
603 if (PhysReg == 0 || PhysReg == li.reg)
605 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
606 if (!vrm.hasPhys(PhysReg))
608 PhysReg = vrm.getPhys(PhysReg);
610 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
619 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
620 /// it can check use as well.
621 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
622 unsigned Reg, bool CheckUse,
623 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
624 for (LiveInterval::Ranges::const_iterator
625 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
626 for (MachineInstrIndex index = getBaseIndex(I->start),
627 end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
628 index = getNextIndex(index)) {
629 // Skip deleted instructions.
630 MachineInstr *MI = 0;
631 while (index != end) {
632 MI = getInstructionFromIndex(index);
635 index = getNextIndex(index);
637 if (index == end) break;
639 if (JoinedCopies.count(MI))
641 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
642 MachineOperand& MO = MI->getOperand(i);
645 if (MO.isUse() && !CheckUse)
647 unsigned PhysReg = MO.getReg();
648 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
650 if (tri_->isSubRegister(Reg, PhysReg))
660 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
661 if (TargetRegisterInfo::isPhysicalRegister(reg))
662 errs() << tri_->getName(reg);
664 errs() << "%reg" << reg;
668 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
669 MachineBasicBlock::iterator mi,
670 MachineInstrIndex MIIdx,
673 LiveInterval &interval) {
675 errs() << "\t\tregister: ";
676 printRegName(interval.reg, tri_);
679 // Virtual registers may be defined multiple times (due to phi
680 // elimination and 2-addr elimination). Much of what we do only has to be
681 // done once for the vreg. We use an empty interval to detect the first
682 // time we see a vreg.
683 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
684 if (interval.empty()) {
685 // Get the Idx of the defining instructions.
686 MachineInstrIndex defIndex = getDefIndex(MIIdx);
687 // Earlyclobbers move back one, so that they overlap the live range
689 if (MO.isEarlyClobber())
690 defIndex = getUseIndex(MIIdx);
692 MachineInstr *CopyMI = NULL;
693 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
694 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
695 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
696 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
697 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
699 // Earlyclobbers move back one.
700 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
702 assert(ValNo->id == 0 && "First value in interval is not 0?");
704 // Loop over all of the blocks that the vreg is defined in. There are
705 // two cases we have to handle here. The most common case is a vreg
706 // whose lifetime is contained within a basic block. In this case there
707 // will be a single kill, in MBB, which comes after the definition.
708 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
709 // FIXME: what about dead vars?
710 MachineInstrIndex killIdx;
711 if (vi.Kills[0] != mi)
712 killIdx = getNextSlot(getUseIndex(getInstructionIndex(vi.Kills[0])));
713 else if (MO.isEarlyClobber())
714 // Earlyclobbers that die in this instruction move up one extra, to
715 // compensate for having the starting point moved back one. This
716 // gets them to overlap the live range of other outputs.
717 killIdx = getNextSlot(getNextSlot(defIndex));
719 killIdx = getNextSlot(defIndex);
721 // If the kill happens after the definition, we have an intra-block
723 if (killIdx > defIndex) {
724 assert(vi.AliveBlocks.empty() &&
725 "Shouldn't be alive across any blocks!");
726 LiveRange LR(defIndex, killIdx, ValNo);
727 interval.addRange(LR);
728 DEBUG(errs() << " +" << LR << "\n");
729 ValNo->addKill(killIdx);
734 // The other case we handle is when a virtual register lives to the end
735 // of the defining block, potentially live across some blocks, then is
736 // live into some number of blocks, but gets killed. Start by adding a
737 // range that goes from this definition to the end of the defining block.
738 LiveRange NewLR(defIndex, getNextSlot(getMBBEndIdx(mbb)), ValNo);
739 DEBUG(errs() << " +" << NewLR);
740 interval.addRange(NewLR);
742 // Iterate over all of the blocks that the variable is completely
743 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
745 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
746 E = vi.AliveBlocks.end(); I != E; ++I) {
747 LiveRange LR(getMBBStartIdx(*I),
748 getNextSlot(getMBBEndIdx(*I)), // MBB ends at -1.
750 interval.addRange(LR);
751 DEBUG(errs() << " +" << LR);
754 // Finally, this virtual register is live from the start of any killing
755 // block to the 'use' slot of the killing instruction.
756 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
757 MachineInstr *Kill = vi.Kills[i];
758 MachineInstrIndex killIdx =
759 getNextSlot(getUseIndex(getInstructionIndex(Kill)));
760 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
761 interval.addRange(LR);
762 ValNo->addKill(killIdx);
763 DEBUG(errs() << " +" << LR);
767 // If this is the second time we see a virtual register definition, it
768 // must be due to phi elimination or two addr elimination. If this is
769 // the result of two address elimination, then the vreg is one of the
770 // def-and-use register operand.
771 if (mi->isRegTiedToUseOperand(MOIdx)) {
772 // If this is a two-address definition, then we have already processed
773 // the live range. The only problem is that we didn't realize there
774 // are actually two values in the live interval. Because of this we
775 // need to take the LiveRegion that defines this register and split it
777 assert(interval.containsOneValue());
778 MachineInstrIndex DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
779 MachineInstrIndex RedefIndex = getDefIndex(MIIdx);
780 if (MO.isEarlyClobber())
781 RedefIndex = getUseIndex(MIIdx);
783 const LiveRange *OldLR =
784 interval.getLiveRangeContaining(getPrevSlot(RedefIndex));
785 VNInfo *OldValNo = OldLR->valno;
787 // Delete the initial value, which should be short and continuous,
788 // because the 2-addr copy must be in the same MBB as the redef.
789 interval.removeRange(DefIndex, RedefIndex);
791 // Two-address vregs should always only be redefined once. This means
792 // that at this point, there should be exactly one value number in it.
793 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
795 // The new value number (#1) is defined by the instruction we claimed
797 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
798 false, // update at *
800 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
802 // Value#0 is now defined by the 2-addr instruction.
803 OldValNo->def = RedefIndex;
804 OldValNo->setCopy(0);
805 if (MO.isEarlyClobber())
806 OldValNo->setHasRedefByEC(true);
808 // Add the new live interval which replaces the range for the input copy.
809 LiveRange LR(DefIndex, RedefIndex, ValNo);
810 DEBUG(errs() << " replace range with " << LR);
811 interval.addRange(LR);
812 ValNo->addKill(RedefIndex);
814 // If this redefinition is dead, we need to add a dummy unit live
815 // range covering the def slot.
818 LiveRange(RedefIndex, MO.isEarlyClobber() ?
819 getNextSlot(getNextSlot(RedefIndex)) :
820 getNextSlot(RedefIndex), OldValNo));
823 errs() << " RESULT: ";
824 interval.print(errs(), tri_);
827 // Otherwise, this must be because of phi elimination. If this is the
828 // first redefinition of the vreg that we have seen, go back and change
829 // the live range in the PHI block to be a different value number.
830 if (interval.containsOneValue()) {
831 // Remove the old range that we now know has an incorrect number.
832 VNInfo *VNI = interval.getValNumInfo(0);
833 MachineInstr *Killer = vi.Kills[0];
834 phiJoinCopies.push_back(Killer);
835 MachineInstrIndex Start = getMBBStartIdx(Killer->getParent());
836 MachineInstrIndex End =
837 getNextSlot(getUseIndex(getInstructionIndex(Killer)));
839 errs() << " Removing [" << Start << "," << End << "] from: ";
840 interval.print(errs(), tri_);
843 interval.removeRange(Start, End);
844 assert(interval.ranges.size() == 1 &&
845 "Newly discovered PHI interval has >1 ranges.");
846 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
847 VNI->addKill(terminatorGaps[killMBB]);
848 VNI->setHasPHIKill(true);
850 errs() << " RESULT: ";
851 interval.print(errs(), tri_);
854 // Replace the interval with one of a NEW value number. Note that this
855 // value number isn't actually defined by an instruction, weird huh? :)
856 LiveRange LR(Start, End,
857 interval.getNextValue(MachineInstrIndex(mbb->getNumber()),
858 0, false, VNInfoAllocator));
859 LR.valno->setIsPHIDef(true);
860 DEBUG(errs() << " replace range with " << LR);
861 interval.addRange(LR);
862 LR.valno->addKill(End);
864 errs() << " RESULT: ";
865 interval.print(errs(), tri_);
869 // In the case of PHI elimination, each variable definition is only
870 // live until the end of the block. We've already taken care of the
871 // rest of the live range.
872 MachineInstrIndex defIndex = getDefIndex(MIIdx);
873 if (MO.isEarlyClobber())
874 defIndex = getUseIndex(MIIdx);
877 MachineInstr *CopyMI = NULL;
878 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
879 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
880 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
881 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
882 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
884 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
886 MachineInstrIndex killIndex = getNextSlot(getMBBEndIdx(mbb));
887 LiveRange LR(defIndex, killIndex, ValNo);
888 interval.addRange(LR);
889 ValNo->addKill(terminatorGaps[mbb]);
890 ValNo->setHasPHIKill(true);
891 DEBUG(errs() << " +" << LR);
895 DEBUG(errs() << '\n');
898 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
899 MachineBasicBlock::iterator mi,
900 MachineInstrIndex MIIdx,
902 LiveInterval &interval,
903 MachineInstr *CopyMI) {
904 // A physical register cannot be live across basic block, so its
905 // lifetime must end somewhere in its defining basic block.
907 errs() << "\t\tregister: ";
908 printRegName(interval.reg, tri_);
911 MachineInstrIndex baseIndex = MIIdx;
912 MachineInstrIndex start = getDefIndex(baseIndex);
913 // Earlyclobbers move back one.
914 if (MO.isEarlyClobber())
915 start = getUseIndex(MIIdx);
916 MachineInstrIndex end = start;
918 // If it is not used after definition, it is considered dead at
919 // the instruction defining it. Hence its interval is:
920 // [defSlot(def), defSlot(def)+1)
921 // For earlyclobbers, the defSlot was pushed back one; the extra
922 // advance below compensates.
924 DEBUG(errs() << " dead");
925 if (MO.isEarlyClobber())
926 end = getNextSlot(getNextSlot(start));
928 end = getNextSlot(start);
932 // If it is not dead on definition, it must be killed by a
933 // subsequent instruction. Hence its interval is:
934 // [defSlot(def), useSlot(kill)+1)
935 baseIndex = getNextIndex(baseIndex);
936 while (++mi != MBB->end()) {
937 while (baseIndex.getVecIndex() < i2miMap_.size() &&
938 getInstructionFromIndex(baseIndex) == 0)
939 baseIndex = getNextIndex(baseIndex);
940 if (mi->killsRegister(interval.reg, tri_)) {
941 DEBUG(errs() << " killed");
942 end = getNextSlot(getUseIndex(baseIndex));
945 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
947 if (mi->isRegTiedToUseOperand(DefIdx)) {
948 // Two-address instruction.
949 end = getDefIndex(baseIndex);
950 if (mi->getOperand(DefIdx).isEarlyClobber())
951 end = getUseIndex(baseIndex);
953 // Another instruction redefines the register before it is ever read.
954 // Then the register is essentially dead at the instruction that defines
955 // it. Hence its interval is:
956 // [defSlot(def), defSlot(def)+1)
957 DEBUG(errs() << " dead");
958 end = getNextSlot(start);
964 baseIndex = getNextIndex(baseIndex);
967 // The only case we should have a dead physreg here without a killing or
968 // instruction where we know it's dead is if it is live-in to the function
969 // and never used. Another possible case is the implicit use of the
970 // physical register has been deleted by two-address pass.
971 end = getNextSlot(start);
974 assert(start < end && "did not find end of interval?");
976 // Already exists? Extend old live interval.
977 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
978 bool Extend = OldLR != interval.end();
979 VNInfo *ValNo = Extend
980 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
981 if (MO.isEarlyClobber() && Extend)
982 ValNo->setHasRedefByEC(true);
983 LiveRange LR(start, end, ValNo);
984 interval.addRange(LR);
985 LR.valno->addKill(end);
986 DEBUG(errs() << " +" << LR << '\n');
989 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
990 MachineBasicBlock::iterator MI,
991 MachineInstrIndex MIIdx,
994 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
995 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
996 getOrCreateInterval(MO.getReg()));
997 else if (allocatableRegs_[MO.getReg()]) {
998 MachineInstr *CopyMI = NULL;
999 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1000 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
1001 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1002 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
1003 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
1005 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
1006 getOrCreateInterval(MO.getReg()), CopyMI);
1007 // Def of a register also defines its sub-registers.
1008 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
1009 // If MI also modifies the sub-register explicitly, avoid processing it
1010 // more than once. Do not pass in TRI here so it checks for exact match.
1011 if (!MI->modifiesRegister(*AS))
1012 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
1013 getOrCreateInterval(*AS), 0);
1017 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
1018 MachineInstrIndex MIIdx,
1019 LiveInterval &interval, bool isAlias) {
1021 errs() << "\t\tlivein register: ";
1022 printRegName(interval.reg, tri_);
1025 // Look for kills, if it reaches a def before it's killed, then it shouldn't
1026 // be considered a livein.
1027 MachineBasicBlock::iterator mi = MBB->begin();
1028 MachineInstrIndex baseIndex = MIIdx;
1029 MachineInstrIndex start = baseIndex;
1030 while (baseIndex.getVecIndex() < i2miMap_.size() &&
1031 getInstructionFromIndex(baseIndex) == 0)
1032 baseIndex = getNextIndex(baseIndex);
1033 MachineInstrIndex end = baseIndex;
1034 bool SeenDefUse = false;
1036 while (mi != MBB->end()) {
1037 if (mi->killsRegister(interval.reg, tri_)) {
1038 DEBUG(errs() << " killed");
1039 end = getNextSlot(getUseIndex(baseIndex));
1042 } else if (mi->modifiesRegister(interval.reg, tri_)) {
1043 // Another instruction redefines the register before it is ever read.
1044 // Then the register is essentially dead at the instruction that defines
1045 // it. Hence its interval is:
1046 // [defSlot(def), defSlot(def)+1)
1047 DEBUG(errs() << " dead");
1048 end = getNextSlot(getDefIndex(start));
1053 baseIndex = getNextIndex(baseIndex);
1055 if (mi != MBB->end()) {
1056 while (baseIndex.getVecIndex() < i2miMap_.size() &&
1057 getInstructionFromIndex(baseIndex) == 0)
1058 baseIndex = getNextIndex(baseIndex);
1062 // Live-in register might not be used at all.
1065 DEBUG(errs() << " dead");
1066 end = getNextSlot(getDefIndex(MIIdx));
1068 DEBUG(errs() << " live through");
1074 interval.getNextValue(MachineInstrIndex(MBB->getNumber()),
1075 0, false, VNInfoAllocator);
1076 vni->setIsPHIDef(true);
1077 LiveRange LR(start, end, vni);
1079 interval.addRange(LR);
1080 LR.valno->addKill(end);
1081 DEBUG(errs() << " +" << LR << '\n');
1085 LiveIntervals::isProfitableToCoalesce(LiveInterval &DstInt, LiveInterval &SrcInt,
1086 SmallVector<MachineInstr*,16> &IdentCopies,
1087 SmallVector<MachineInstr*,16> &OtherCopies) {
1088 bool HaveConflict = false;
1089 unsigned NumIdent = 0;
1090 for (MachineRegisterInfo::def_iterator ri = mri_->def_begin(SrcInt.reg),
1091 re = mri_->def_end(); ri != re; ++ri) {
1092 MachineOperand &O = ri.getOperand();
1094 MachineInstr *MI = &*ri;
1095 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1096 if (!tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
1098 if (SrcReg != DstInt.reg) {
1099 OtherCopies.push_back(MI);
1100 HaveConflict |= DstInt.liveAt(getInstructionIndex(MI));
1102 IdentCopies.push_back(MI);
1108 return false; // Let coalescer handle it
1109 return IdentCopies.size() > OtherCopies.size();
1112 void LiveIntervals::performEarlyCoalescing() {
1113 if (!EarlyCoalescing)
1116 /// Perform early coalescing: eliminate copies which feed into phi joins
1117 /// and whose sources are defined by the phi joins.
1118 for (unsigned i = 0, e = phiJoinCopies.size(); i != e; ++i) {
1119 MachineInstr *Join = phiJoinCopies[i];
1120 if (CoalescingLimit != -1 && (int)numCoalescing == CoalescingLimit)
1123 unsigned PHISrc, PHIDst, SrcSubReg, DstSubReg;
1124 bool isMove= tii_->isMoveInstr(*Join, PHISrc, PHIDst, SrcSubReg, DstSubReg);
1126 assert(isMove && "PHI join instruction must be a move!");
1131 LiveInterval &DstInt = getInterval(PHIDst);
1132 LiveInterval &SrcInt = getInterval(PHISrc);
1133 SmallVector<MachineInstr*, 16> IdentCopies;
1134 SmallVector<MachineInstr*, 16> OtherCopies;
1135 if (!isProfitableToCoalesce(DstInt, SrcInt, IdentCopies, OtherCopies))
1138 DEBUG(errs() << "PHI Join: " << *Join);
1139 assert(DstInt.containsOneValue() && "PHI join should have just one val#!");
1140 VNInfo *VNI = DstInt.getValNumInfo(0);
1142 // Change the non-identity copies to directly target the phi destination.
1143 for (unsigned i = 0, e = OtherCopies.size(); i != e; ++i) {
1144 MachineInstr *PHICopy = OtherCopies[i];
1145 DEBUG(errs() << "Moving: " << *PHICopy);
1147 MachineInstrIndex MIIndex = getInstructionIndex(PHICopy);
1148 MachineInstrIndex DefIndex = getDefIndex(MIIndex);
1149 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
1150 MachineInstrIndex StartIndex = SLR->start;
1151 MachineInstrIndex EndIndex = SLR->end;
1153 // Delete val# defined by the now identity copy and add the range from
1154 // beginning of the mbb to the end of the range.
1155 SrcInt.removeValNo(SLR->valno);
1156 DEBUG(errs() << " added range [" << StartIndex << ','
1157 << EndIndex << "] to reg" << DstInt.reg << '\n');
1158 if (DstInt.liveAt(StartIndex))
1159 DstInt.removeRange(StartIndex, EndIndex);
1160 VNInfo *NewVNI = DstInt.getNextValue(DefIndex, PHICopy, true,
1162 NewVNI->setHasPHIKill(true);
1163 DstInt.addRange(LiveRange(StartIndex, EndIndex, NewVNI));
1164 for (unsigned j = 0, ee = PHICopy->getNumOperands(); j != ee; ++j) {
1165 MachineOperand &MO = PHICopy->getOperand(j);
1166 if (!MO.isReg() || MO.getReg() != PHISrc)
1172 // Now let's eliminate all the would-be identity copies.
1173 for (unsigned i = 0, e = IdentCopies.size(); i != e; ++i) {
1174 MachineInstr *PHICopy = IdentCopies[i];
1175 DEBUG(errs() << "Coalescing: " << *PHICopy);
1177 MachineInstrIndex MIIndex = getInstructionIndex(PHICopy);
1178 MachineInstrIndex DefIndex = getDefIndex(MIIndex);
1179 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
1180 MachineInstrIndex StartIndex = SLR->start;
1181 MachineInstrIndex EndIndex = SLR->end;
1183 // Delete val# defined by the now identity copy and add the range from
1184 // beginning of the mbb to the end of the range.
1185 SrcInt.removeValNo(SLR->valno);
1186 RemoveMachineInstrFromMaps(PHICopy);
1187 PHICopy->eraseFromParent();
1188 DEBUG(errs() << " added range [" << StartIndex << ','
1189 << EndIndex << "] to reg" << DstInt.reg << '\n');
1190 DstInt.addRange(LiveRange(StartIndex, EndIndex, VNI));
1193 // Remove the phi join and update the phi block liveness.
1194 MachineInstrIndex MIIndex = getInstructionIndex(Join);
1195 MachineInstrIndex UseIndex = getUseIndex(MIIndex);
1196 MachineInstrIndex DefIndex = getDefIndex(MIIndex);
1197 LiveRange *SLR = SrcInt.getLiveRangeContaining(UseIndex);
1198 LiveRange *DLR = DstInt.getLiveRangeContaining(DefIndex);
1199 DLR->valno->setCopy(0);
1200 DLR->valno->setIsDefAccurate(false);
1201 DstInt.addRange(LiveRange(SLR->start, SLR->end, DLR->valno));
1202 SrcInt.removeRange(SLR->start, SLR->end);
1203 assert(SrcInt.empty());
1204 removeInterval(PHISrc);
1205 RemoveMachineInstrFromMaps(Join);
1206 Join->eraseFromParent();
1212 /// computeIntervals - computes the live intervals for virtual
1213 /// registers. for some ordering of the machine instructions [1,N] a
1214 /// live interval is an interval [i, j) where 1 <= i <= j < N for
1215 /// which a variable is live
1216 void LiveIntervals::computeIntervals() {
1217 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
1218 << "********** Function: "
1219 << ((Value*)mf_->getFunction())->getName() << '\n');
1221 SmallVector<unsigned, 8> UndefUses;
1222 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
1223 MBBI != E; ++MBBI) {
1224 MachineBasicBlock *MBB = MBBI;
1225 // Track the index of the current machine instr.
1226 MachineInstrIndex MIIndex = getMBBStartIdx(MBB);
1227 DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
1229 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
1231 // Create intervals for live-ins to this BB first.
1232 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
1233 LE = MBB->livein_end(); LI != LE; ++LI) {
1234 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
1235 // Multiple live-ins can alias the same register.
1236 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1237 if (!hasInterval(*AS))
1238 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1242 // Skip over empty initial indices.
1243 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1244 getInstructionFromIndex(MIIndex) == 0)
1245 MIIndex = getNextIndex(MIIndex);
1247 for (; MI != miEnd; ++MI) {
1248 DEBUG(errs() << MIIndex << "\t" << *MI);
1251 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1252 MachineOperand &MO = MI->getOperand(i);
1253 if (!MO.isReg() || !MO.getReg())
1256 // handle register defs - build intervals
1258 handleRegisterDef(MBB, MI, MIIndex, MO, i);
1259 else if (MO.isUndef())
1260 UndefUses.push_back(MO.getReg());
1263 // Skip over the empty slots after each instruction.
1264 unsigned Slots = MI->getDesc().getNumDefs();
1269 MIIndex = getNextIndex(MIIndex);
1271 // Skip over empty indices.
1272 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1273 getInstructionFromIndex(MIIndex) == 0)
1274 MIIndex = getNextIndex(MIIndex);
1278 // Create empty intervals for registers defined by implicit_def's (except
1279 // for those implicit_def that define values which are liveout of their
1281 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
1282 unsigned UndefReg = UndefUses[i];
1283 (void)getOrCreateInterval(UndefReg);
1287 bool LiveIntervals::findLiveInMBBs(
1288 MachineInstrIndex Start, MachineInstrIndex End,
1289 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1290 std::vector<IdxMBBPair>::const_iterator I =
1291 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1293 bool ResVal = false;
1294 while (I != Idx2MBBMap.end()) {
1295 if (I->first >= End)
1297 MBBs.push_back(I->second);
1304 bool LiveIntervals::findReachableMBBs(
1305 MachineInstrIndex Start, MachineInstrIndex End,
1306 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1307 std::vector<IdxMBBPair>::const_iterator I =
1308 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1310 bool ResVal = false;
1311 while (I != Idx2MBBMap.end()) {
1314 MachineBasicBlock *MBB = I->second;
1315 if (getMBBEndIdx(MBB) > End)
1317 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1318 SE = MBB->succ_end(); SI != SE; ++SI)
1319 MBBs.push_back(*SI);
1326 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1327 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1328 return new LiveInterval(reg, Weight);
1331 /// dupInterval - Duplicate a live interval. The caller is responsible for
1332 /// managing the allocated memory.
1333 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1334 LiveInterval *NewLI = createInterval(li->reg);
1335 NewLI->Copy(*li, mri_, getVNInfoAllocator());
1339 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1340 /// copy field and returns the source register that defines it.
1341 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1342 if (!VNI->getCopy())
1345 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1346 // If it's extracting out of a physical register, return the sub-register.
1347 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
1348 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1349 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
1351 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1352 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1353 return VNI->getCopy()->getOperand(2).getReg();
1355 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1356 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
1358 llvm_unreachable("Unrecognized copy instruction!");
1362 //===----------------------------------------------------------------------===//
1363 // Register allocator hooks.
1366 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1367 /// allow one) virtual register operand, then its uses are implicitly using
1368 /// the register. Returns the virtual register.
1369 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1370 MachineInstr *MI) const {
1372 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1373 MachineOperand &MO = MI->getOperand(i);
1374 if (!MO.isReg() || !MO.isUse())
1376 unsigned Reg = MO.getReg();
1377 if (Reg == 0 || Reg == li.reg)
1380 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1381 !allocatableRegs_[Reg])
1383 // FIXME: For now, only remat MI with at most one register operand.
1385 "Can't rematerialize instruction with multiple register operand!");
1386 RegOp = MO.getReg();
1394 /// isValNoAvailableAt - Return true if the val# of the specified interval
1395 /// which reaches the given instruction also reaches the specified use index.
1396 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1397 MachineInstrIndex UseIdx) const {
1398 MachineInstrIndex Index = getInstructionIndex(MI);
1399 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1400 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1401 return UI != li.end() && UI->valno == ValNo;
1404 /// isReMaterializable - Returns true if the definition MI of the specified
1405 /// val# of the specified interval is re-materializable.
1406 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1407 const VNInfo *ValNo, MachineInstr *MI,
1408 SmallVectorImpl<LiveInterval*> &SpillIs,
1413 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1417 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1418 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1419 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1420 // this but remember this is not safe to fold into a two-address
1422 // This is a load from fixed stack slot. It can be rematerialized.
1425 // If the target-specific rules don't identify an instruction as
1426 // being trivially rematerializable, use some target-independent
1428 if (!MI->getDesc().isRematerializable() ||
1429 !tii_->isTriviallyReMaterializable(MI)) {
1430 if (!EnableAggressiveRemat)
1433 // If the instruction accesses memory but the memoperands have been lost,
1434 // we can't analyze it.
1435 const TargetInstrDesc &TID = MI->getDesc();
1436 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1439 // Avoid instructions obviously unsafe for remat.
1440 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1443 // If the instruction accesses memory and the memory could be non-constant,
1444 // assume the instruction is not rematerializable.
1445 for (MachineInstr::mmo_iterator I = MI->memoperands_begin(),
1446 E = MI->memoperands_end(); I != E; ++I){
1447 const MachineMemOperand *MMO = *I;
1448 if (MMO->isVolatile() || MMO->isStore())
1450 const Value *V = MMO->getValue();
1453 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1454 if (!PSV->isConstant(mf_->getFrameInfo()))
1456 } else if (!aa_->pointsToConstantMemory(V))
1460 // If any of the registers accessed are non-constant, conservatively assume
1461 // the instruction is not rematerializable.
1462 unsigned ImpUse = 0;
1463 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1464 const MachineOperand &MO = MI->getOperand(i);
1466 unsigned Reg = MO.getReg();
1469 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1472 // Only allow one def, and that in the first operand.
1473 if (MO.isDef() != (i == 0))
1476 // Only allow constant-valued registers.
1477 bool IsLiveIn = mri_->isLiveIn(Reg);
1478 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1479 E = mri_->def_end();
1481 // For the def, it should be the only def of that register.
1482 if (MO.isDef() && (next(I) != E || IsLiveIn))
1486 // Only allow one use other register use, as that's all the
1487 // remat mechanisms support currently.
1488 if (Reg != li.reg) {
1491 else if (Reg != ImpUse)
1494 // For the use, there should be only one associated def.
1495 if (I != E && (next(I) != E || IsLiveIn))
1502 unsigned ImpUse = getReMatImplicitUse(li, MI);
1504 const LiveInterval &ImpLi = getInterval(ImpUse);
1505 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1506 re = mri_->use_end(); ri != re; ++ri) {
1507 MachineInstr *UseMI = &*ri;
1508 MachineInstrIndex UseIdx = getInstructionIndex(UseMI);
1509 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1511 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1515 // If a register operand of the re-materialized instruction is going to
1516 // be spilled next, then it's not legal to re-materialize this instruction.
1517 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1518 if (ImpUse == SpillIs[i]->reg)
1524 /// isReMaterializable - Returns true if the definition MI of the specified
1525 /// val# of the specified interval is re-materializable.
1526 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1527 const VNInfo *ValNo, MachineInstr *MI) {
1528 SmallVector<LiveInterval*, 4> Dummy1;
1530 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1533 /// isReMaterializable - Returns true if every definition of MI of every
1534 /// val# of the specified interval is re-materializable.
1535 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1536 SmallVectorImpl<LiveInterval*> &SpillIs,
1539 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1541 const VNInfo *VNI = *i;
1542 if (VNI->isUnused())
1543 continue; // Dead val#.
1544 // Is the def for the val# rematerializable?
1545 if (!VNI->isDefAccurate())
1547 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1548 bool DefIsLoad = false;
1550 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1552 isLoad |= DefIsLoad;
1557 /// FilterFoldedOps - Filter out two-address use operands. Return
1558 /// true if it finds any issue with the operands that ought to prevent
1560 static bool FilterFoldedOps(MachineInstr *MI,
1561 SmallVector<unsigned, 2> &Ops,
1563 SmallVector<unsigned, 2> &FoldOps) {
1565 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1566 unsigned OpIdx = Ops[i];
1567 MachineOperand &MO = MI->getOperand(OpIdx);
1568 // FIXME: fold subreg use.
1572 MRInfo |= (unsigned)VirtRegMap::isMod;
1574 // Filter out two-address use operand(s).
1575 if (MI->isRegTiedToDefOperand(OpIdx)) {
1576 MRInfo = VirtRegMap::isModRef;
1579 MRInfo |= (unsigned)VirtRegMap::isRef;
1581 FoldOps.push_back(OpIdx);
1587 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1588 /// slot / to reg or any rematerialized load into ith operand of specified
1589 /// MI. If it is successul, MI is updated with the newly created MI and
1591 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1592 VirtRegMap &vrm, MachineInstr *DefMI,
1593 MachineInstrIndex InstrIdx,
1594 SmallVector<unsigned, 2> &Ops,
1595 bool isSS, int Slot, unsigned Reg) {
1596 // If it is an implicit def instruction, just delete it.
1597 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1598 RemoveMachineInstrFromMaps(MI);
1599 vrm.RemoveMachineInstrFromMaps(MI);
1600 MI->eraseFromParent();
1605 // Filter the list of operand indexes that are to be folded. Abort if
1606 // any operand will prevent folding.
1607 unsigned MRInfo = 0;
1608 SmallVector<unsigned, 2> FoldOps;
1609 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1612 // The only time it's safe to fold into a two address instruction is when
1613 // it's folding reload and spill from / into a spill stack slot.
1614 if (DefMI && (MRInfo & VirtRegMap::isMod))
1617 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1618 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1620 // Remember this instruction uses the spill slot.
1621 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1623 // Attempt to fold the memory reference into the instruction. If
1624 // we can do this, we don't need to insert spill code.
1625 MachineBasicBlock &MBB = *MI->getParent();
1626 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1627 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1628 vrm.transferSpillPts(MI, fmi);
1629 vrm.transferRestorePts(MI, fmi);
1630 vrm.transferEmergencySpills(MI, fmi);
1632 i2miMap_[InstrIdx.getVecIndex()] = fmi;
1633 mi2iMap_[fmi] = InstrIdx;
1634 MI = MBB.insert(MBB.erase(MI), fmi);
1641 /// canFoldMemoryOperand - Returns true if the specified load / store
1642 /// folding is possible.
1643 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1644 SmallVector<unsigned, 2> &Ops,
1646 // Filter the list of operand indexes that are to be folded. Abort if
1647 // any operand will prevent folding.
1648 unsigned MRInfo = 0;
1649 SmallVector<unsigned, 2> FoldOps;
1650 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1653 // It's only legal to remat for a use, not a def.
1654 if (ReMat && (MRInfo & VirtRegMap::isMod))
1657 return tii_->canFoldMemoryOperand(MI, FoldOps);
1660 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1661 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1662 for (LiveInterval::Ranges::const_iterator
1663 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1664 std::vector<IdxMBBPair>::const_iterator II =
1665 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1666 if (II == Idx2MBBMap.end())
1668 if (I->end > II->first) // crossing a MBB.
1670 MBBs.insert(II->second);
1671 if (MBBs.size() > 1)
1677 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1678 /// interval on to-be re-materialized operands of MI) with new register.
1679 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1680 MachineInstr *MI, unsigned NewVReg,
1682 // There is an implicit use. That means one of the other operand is
1683 // being remat'ed and the remat'ed instruction has li.reg as an
1684 // use operand. Make sure we rewrite that as well.
1685 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1686 MachineOperand &MO = MI->getOperand(i);
1689 unsigned Reg = MO.getReg();
1690 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1692 if (!vrm.isReMaterialized(Reg))
1694 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1695 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1697 UseMO->setReg(NewVReg);
1701 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1702 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1703 bool LiveIntervals::
1704 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1705 bool TrySplit, MachineInstrIndex index, MachineInstrIndex end,
1707 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1708 unsigned Slot, int LdSlot,
1709 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1711 const TargetRegisterClass* rc,
1712 SmallVector<int, 4> &ReMatIds,
1713 const MachineLoopInfo *loopInfo,
1714 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1715 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1716 std::vector<LiveInterval*> &NewLIs) {
1717 bool CanFold = false;
1719 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1720 MachineOperand& mop = MI->getOperand(i);
1723 unsigned Reg = mop.getReg();
1724 unsigned RegI = Reg;
1725 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1730 bool TryFold = !DefIsReMat;
1731 bool FoldSS = true; // Default behavior unless it's a remat.
1732 int FoldSlot = Slot;
1734 // If this is the rematerializable definition MI itself and
1735 // all of its uses are rematerialized, simply delete it.
1736 if (MI == ReMatOrigDefMI && CanDelete) {
1737 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1739 RemoveMachineInstrFromMaps(MI);
1740 vrm.RemoveMachineInstrFromMaps(MI);
1741 MI->eraseFromParent();
1745 // If def for this use can't be rematerialized, then try folding.
1746 // If def is rematerializable and it's a load, also try folding.
1747 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1749 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1755 // Scan all of the operands of this instruction rewriting operands
1756 // to use NewVReg instead of li.reg as appropriate. We do this for
1759 // 1. If the instr reads the same spilled vreg multiple times, we
1760 // want to reuse the NewVReg.
1761 // 2. If the instr is a two-addr instruction, we are required to
1762 // keep the src/dst regs pinned.
1764 // Keep track of whether we replace a use and/or def so that we can
1765 // create the spill interval with the appropriate range.
1767 HasUse = mop.isUse();
1768 HasDef = mop.isDef();
1769 SmallVector<unsigned, 2> Ops;
1771 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1772 const MachineOperand &MOj = MI->getOperand(j);
1775 unsigned RegJ = MOj.getReg();
1776 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1780 if (!MOj.isUndef()) {
1781 HasUse |= MOj.isUse();
1782 HasDef |= MOj.isDef();
1787 // Create a new virtual register for the spill interval.
1788 // Create the new register now so we can map the fold instruction
1789 // to the new register so when it is unfolded we get the correct
1791 bool CreatedNewVReg = false;
1793 NewVReg = mri_->createVirtualRegister(rc);
1795 CreatedNewVReg = true;
1801 // Do not fold load / store here if we are splitting. We'll find an
1802 // optimal point to insert a load / store later.
1804 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1805 Ops, FoldSS, FoldSlot, NewVReg)) {
1806 // Folding the load/store can completely change the instruction in
1807 // unpredictable ways, rescan it from the beginning.
1810 // We need to give the new vreg the same stack slot as the
1811 // spilled interval.
1812 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1818 if (isNotInMIMap(MI))
1820 goto RestartInstruction;
1823 // We'll try to fold it later if it's profitable.
1824 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1828 mop.setReg(NewVReg);
1829 if (mop.isImplicit())
1830 rewriteImplicitOps(li, MI, NewVReg, vrm);
1832 // Reuse NewVReg for other reads.
1833 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1834 MachineOperand &mopj = MI->getOperand(Ops[j]);
1835 mopj.setReg(NewVReg);
1836 if (mopj.isImplicit())
1837 rewriteImplicitOps(li, MI, NewVReg, vrm);
1840 if (CreatedNewVReg) {
1842 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1843 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1844 // Each valnum may have its own remat id.
1845 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1847 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1849 if (!CanDelete || (HasUse && HasDef)) {
1850 // If this is a two-addr instruction then its use operands are
1851 // rematerializable but its def is not. It should be assigned a
1853 vrm.assignVirt2StackSlot(NewVReg, Slot);
1856 vrm.assignVirt2StackSlot(NewVReg, Slot);
1858 } else if (HasUse && HasDef &&
1859 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1860 // If this interval hasn't been assigned a stack slot (because earlier
1861 // def is a deleted remat def), do it now.
1862 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1863 vrm.assignVirt2StackSlot(NewVReg, Slot);
1866 // Re-matting an instruction with virtual register use. Add the
1867 // register as an implicit use on the use MI.
1868 if (DefIsReMat && ImpUse)
1869 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1871 // Create a new register interval for this spill / remat.
1872 LiveInterval &nI = getOrCreateInterval(NewVReg);
1873 if (CreatedNewVReg) {
1874 NewLIs.push_back(&nI);
1875 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1877 vrm.setIsSplitFromReg(NewVReg, li.reg);
1881 if (CreatedNewVReg) {
1882 LiveRange LR(getLoadIndex(index), getNextSlot(getUseIndex(index)),
1883 nI.getNextValue(MachineInstrIndex(), 0, false,
1885 DEBUG(errs() << " +" << LR);
1888 // Extend the split live interval to this def / use.
1889 MachineInstrIndex End = getNextSlot(getUseIndex(index));
1890 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1891 nI.getValNumInfo(nI.getNumValNums()-1));
1892 DEBUG(errs() << " +" << LR);
1897 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1898 nI.getNextValue(MachineInstrIndex(), 0, false,
1900 DEBUG(errs() << " +" << LR);
1905 errs() << "\t\t\t\tAdded new interval: ";
1906 nI.print(errs(), tri_);
1912 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1914 MachineBasicBlock *MBB,
1915 MachineInstrIndex Idx) const {
1916 MachineInstrIndex End = getMBBEndIdx(MBB);
1917 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1918 if (VNI->kills[j].isPHIIndex())
1921 MachineInstrIndex KillIdx = VNI->kills[j];
1922 if (KillIdx > Idx && KillIdx < End)
1928 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1929 /// during spilling.
1931 struct RewriteInfo {
1932 MachineInstrIndex Index;
1936 RewriteInfo(MachineInstrIndex i, MachineInstr *mi, bool u, bool d)
1937 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1940 struct RewriteInfoCompare {
1941 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1942 return LHS.Index < RHS.Index;
1947 void LiveIntervals::
1948 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1949 LiveInterval::Ranges::const_iterator &I,
1950 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1951 unsigned Slot, int LdSlot,
1952 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1954 const TargetRegisterClass* rc,
1955 SmallVector<int, 4> &ReMatIds,
1956 const MachineLoopInfo *loopInfo,
1957 BitVector &SpillMBBs,
1958 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1959 BitVector &RestoreMBBs,
1960 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1961 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1962 std::vector<LiveInterval*> &NewLIs) {
1963 bool AllCanFold = true;
1964 unsigned NewVReg = 0;
1965 MachineInstrIndex start = getBaseIndex(I->start);
1966 MachineInstrIndex end = getNextIndex(getBaseIndex(getPrevSlot(I->end)));
1968 // First collect all the def / use in this live range that will be rewritten.
1969 // Make sure they are sorted according to instruction index.
1970 std::vector<RewriteInfo> RewriteMIs;
1971 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1972 re = mri_->reg_end(); ri != re; ) {
1973 MachineInstr *MI = &*ri;
1974 MachineOperand &O = ri.getOperand();
1976 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1977 MachineInstrIndex index = getInstructionIndex(MI);
1978 if (index < start || index >= end)
1982 // Must be defined by an implicit def. It should not be spilled. Note,
1983 // this is for correctness reason. e.g.
1984 // 8 %reg1024<def> = IMPLICIT_DEF
1985 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1986 // The live range [12, 14) are not part of the r1024 live interval since
1987 // it's defined by an implicit def. It will not conflicts with live
1988 // interval of r1025. Now suppose both registers are spilled, you can
1989 // easily see a situation where both registers are reloaded before
1990 // the INSERT_SUBREG and both target registers that would overlap.
1992 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1994 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1996 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1997 // Now rewrite the defs and uses.
1998 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1999 RewriteInfo &rwi = RewriteMIs[i];
2001 MachineInstrIndex index = rwi.Index;
2002 bool MIHasUse = rwi.HasUse;
2003 bool MIHasDef = rwi.HasDef;
2004 MachineInstr *MI = rwi.MI;
2005 // If MI def and/or use the same register multiple times, then there
2006 // are multiple entries.
2007 unsigned NumUses = MIHasUse;
2008 while (i != e && RewriteMIs[i].MI == MI) {
2009 assert(RewriteMIs[i].Index == index);
2010 bool isUse = RewriteMIs[i].HasUse;
2011 if (isUse) ++NumUses;
2013 MIHasDef |= RewriteMIs[i].HasDef;
2016 MachineBasicBlock *MBB = MI->getParent();
2018 if (ImpUse && MI != ReMatDefMI) {
2019 // Re-matting an instruction with virtual register use. Update the
2020 // register interval's spill weight to HUGE_VALF to prevent it from
2022 LiveInterval &ImpLi = getInterval(ImpUse);
2023 ImpLi.weight = HUGE_VALF;
2026 unsigned MBBId = MBB->getNumber();
2027 unsigned ThisVReg = 0;
2029 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
2030 if (NVI != MBBVRegsMap.end()) {
2031 ThisVReg = NVI->second;
2038 // It's better to start a new interval to avoid artifically
2039 // extend the new interval.
2040 if (MIHasDef && !MIHasUse) {
2041 MBBVRegsMap.erase(MBB->getNumber());
2047 bool IsNew = ThisVReg == 0;
2049 // This ends the previous live interval. If all of its def / use
2050 // can be folded, give it a low spill weight.
2051 if (NewVReg && TrySplit && AllCanFold) {
2052 LiveInterval &nI = getOrCreateInterval(NewVReg);
2059 bool HasDef = false;
2060 bool HasUse = false;
2061 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
2062 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
2063 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2064 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
2065 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
2066 if (!HasDef && !HasUse)
2069 AllCanFold &= CanFold;
2071 // Update weight of spill interval.
2072 LiveInterval &nI = getOrCreateInterval(NewVReg);
2074 // The spill weight is now infinity as it cannot be spilled again.
2075 nI.weight = HUGE_VALF;
2079 // Keep track of the last def and first use in each MBB.
2081 if (MI != ReMatOrigDefMI || !CanDelete) {
2082 bool HasKill = false;
2084 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
2086 // If this is a two-address code, then this index starts a new VNInfo.
2087 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(getDefIndex(index));
2089 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
2091 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
2092 SpillIdxes.find(MBBId);
2094 if (SII == SpillIdxes.end()) {
2095 std::vector<SRInfo> S;
2096 S.push_back(SRInfo(index, NewVReg, true));
2097 SpillIdxes.insert(std::make_pair(MBBId, S));
2098 } else if (SII->second.back().vreg != NewVReg) {
2099 SII->second.push_back(SRInfo(index, NewVReg, true));
2100 } else if (index > SII->second.back().index) {
2101 // If there is an earlier def and this is a two-address
2102 // instruction, then it's not possible to fold the store (which
2103 // would also fold the load).
2104 SRInfo &Info = SII->second.back();
2106 Info.canFold = !HasUse;
2108 SpillMBBs.set(MBBId);
2109 } else if (SII != SpillIdxes.end() &&
2110 SII->second.back().vreg == NewVReg &&
2111 index > SII->second.back().index) {
2112 // There is an earlier def that's not killed (must be two-address).
2113 // The spill is no longer needed.
2114 SII->second.pop_back();
2115 if (SII->second.empty()) {
2116 SpillIdxes.erase(MBBId);
2117 SpillMBBs.reset(MBBId);
2124 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
2125 SpillIdxes.find(MBBId);
2126 if (SII != SpillIdxes.end() &&
2127 SII->second.back().vreg == NewVReg &&
2128 index > SII->second.back().index)
2129 // Use(s) following the last def, it's not safe to fold the spill.
2130 SII->second.back().canFold = false;
2131 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
2132 RestoreIdxes.find(MBBId);
2133 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
2134 // If we are splitting live intervals, only fold if it's the first
2135 // use and there isn't another use later in the MBB.
2136 RII->second.back().canFold = false;
2138 // Only need a reload if there isn't an earlier def / use.
2139 if (RII == RestoreIdxes.end()) {
2140 std::vector<SRInfo> Infos;
2141 Infos.push_back(SRInfo(index, NewVReg, true));
2142 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
2144 RII->second.push_back(SRInfo(index, NewVReg, true));
2146 RestoreMBBs.set(MBBId);
2150 // Update spill weight.
2151 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2152 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
2155 if (NewVReg && TrySplit && AllCanFold) {
2156 // If all of its def / use can be folded, give it a low spill weight.
2157 LiveInterval &nI = getOrCreateInterval(NewVReg);
2162 bool LiveIntervals::alsoFoldARestore(int Id, MachineInstrIndex index,
2163 unsigned vr, BitVector &RestoreMBBs,
2164 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2165 if (!RestoreMBBs[Id])
2167 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2168 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2169 if (Restores[i].index == index &&
2170 Restores[i].vreg == vr &&
2171 Restores[i].canFold)
2176 void LiveIntervals::eraseRestoreInfo(int Id, MachineInstrIndex index,
2177 unsigned vr, BitVector &RestoreMBBs,
2178 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2179 if (!RestoreMBBs[Id])
2181 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2182 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2183 if (Restores[i].index == index && Restores[i].vreg)
2184 Restores[i].index = MachineInstrIndex();
2187 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
2188 /// spilled and create empty intervals for their uses.
2190 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
2191 const TargetRegisterClass* rc,
2192 std::vector<LiveInterval*> &NewLIs) {
2193 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
2194 re = mri_->reg_end(); ri != re; ) {
2195 MachineOperand &O = ri.getOperand();
2196 MachineInstr *MI = &*ri;
2199 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
2200 "Register def was not rewritten?");
2201 RemoveMachineInstrFromMaps(MI);
2202 vrm.RemoveMachineInstrFromMaps(MI);
2203 MI->eraseFromParent();
2205 // This must be an use of an implicit_def so it's not part of the live
2206 // interval. Create a new empty live interval for it.
2207 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
2208 unsigned NewVReg = mri_->createVirtualRegister(rc);
2210 vrm.setIsImplicitlyDefined(NewVReg);
2211 NewLIs.push_back(&getOrCreateInterval(NewVReg));
2212 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2213 MachineOperand &MO = MI->getOperand(i);
2214 if (MO.isReg() && MO.getReg() == li.reg) {
2223 std::vector<LiveInterval*> LiveIntervals::
2224 addIntervalsForSpillsFast(const LiveInterval &li,
2225 const MachineLoopInfo *loopInfo,
2227 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
2229 std::vector<LiveInterval*> added;
2231 assert(li.weight != HUGE_VALF &&
2232 "attempt to spill already spilled interval!");
2235 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2240 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2242 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
2243 while (RI != mri_->reg_end()) {
2244 MachineInstr* MI = &*RI;
2246 SmallVector<unsigned, 2> Indices;
2247 bool HasUse = false;
2248 bool HasDef = false;
2250 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2251 MachineOperand& mop = MI->getOperand(i);
2252 if (!mop.isReg() || mop.getReg() != li.reg) continue;
2254 HasUse |= MI->getOperand(i).isUse();
2255 HasDef |= MI->getOperand(i).isDef();
2257 Indices.push_back(i);
2260 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2261 Indices, true, slot, li.reg)) {
2262 unsigned NewVReg = mri_->createVirtualRegister(rc);
2264 vrm.assignVirt2StackSlot(NewVReg, slot);
2266 // create a new register for this spill
2267 LiveInterval &nI = getOrCreateInterval(NewVReg);
2269 // the spill weight is now infinity as it
2270 // cannot be spilled again
2271 nI.weight = HUGE_VALF;
2273 // Rewrite register operands to use the new vreg.
2274 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2275 E = Indices.end(); I != E; ++I) {
2276 MI->getOperand(*I).setReg(NewVReg);
2278 if (MI->getOperand(*I).isUse())
2279 MI->getOperand(*I).setIsKill(true);
2282 // Fill in the new live interval.
2283 MachineInstrIndex index = getInstructionIndex(MI);
2285 LiveRange LR(getLoadIndex(index), getUseIndex(index),
2286 nI.getNextValue(MachineInstrIndex(), 0, false,
2287 getVNInfoAllocator()));
2288 DEBUG(errs() << " +" << LR);
2290 vrm.addRestorePoint(NewVReg, MI);
2293 LiveRange LR(getDefIndex(index), getStoreIndex(index),
2294 nI.getNextValue(MachineInstrIndex(), 0, false,
2295 getVNInfoAllocator()));
2296 DEBUG(errs() << " +" << LR);
2298 vrm.addSpillPoint(NewVReg, true, MI);
2301 added.push_back(&nI);
2304 errs() << "\t\t\t\tadded new interval: ";
2311 RI = mri_->reg_begin(li.reg);
2317 std::vector<LiveInterval*> LiveIntervals::
2318 addIntervalsForSpills(const LiveInterval &li,
2319 SmallVectorImpl<LiveInterval*> &SpillIs,
2320 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2322 if (EnableFastSpilling)
2323 return addIntervalsForSpillsFast(li, loopInfo, vrm);
2325 assert(li.weight != HUGE_VALF &&
2326 "attempt to spill already spilled interval!");
2329 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2330 li.print(errs(), tri_);
2334 // Each bit specify whether a spill is required in the MBB.
2335 BitVector SpillMBBs(mf_->getNumBlockIDs());
2336 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2337 BitVector RestoreMBBs(mf_->getNumBlockIDs());
2338 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2339 DenseMap<unsigned,unsigned> MBBVRegsMap;
2340 std::vector<LiveInterval*> NewLIs;
2341 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2343 unsigned NumValNums = li.getNumValNums();
2344 SmallVector<MachineInstr*, 4> ReMatDefs;
2345 ReMatDefs.resize(NumValNums, NULL);
2346 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2347 ReMatOrigDefs.resize(NumValNums, NULL);
2348 SmallVector<int, 4> ReMatIds;
2349 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2350 BitVector ReMatDelete(NumValNums);
2351 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2353 // Spilling a split live interval. It cannot be split any further. Also,
2354 // it's also guaranteed to be a single val# / range interval.
2355 if (vrm.getPreSplitReg(li.reg)) {
2356 vrm.setIsSplitFromReg(li.reg, 0);
2357 // Unset the split kill marker on the last use.
2358 MachineInstrIndex KillIdx = vrm.getKillPoint(li.reg);
2359 if (KillIdx != MachineInstrIndex()) {
2360 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2361 assert(KillMI && "Last use disappeared?");
2362 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2363 assert(KillOp != -1 && "Last use disappeared?");
2364 KillMI->getOperand(KillOp).setIsKill(false);
2366 vrm.removeKillPoint(li.reg);
2367 bool DefIsReMat = vrm.isReMaterialized(li.reg);
2368 Slot = vrm.getStackSlot(li.reg);
2369 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2370 MachineInstr *ReMatDefMI = DefIsReMat ?
2371 vrm.getReMaterializedMI(li.reg) : NULL;
2373 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2374 bool isLoad = isLoadSS ||
2375 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2376 bool IsFirstRange = true;
2377 for (LiveInterval::Ranges::const_iterator
2378 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2379 // If this is a split live interval with multiple ranges, it means there
2380 // are two-address instructions that re-defined the value. Only the
2381 // first def can be rematerialized!
2383 // Note ReMatOrigDefMI has already been deleted.
2384 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2385 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2386 false, vrm, rc, ReMatIds, loopInfo,
2387 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2388 MBBVRegsMap, NewLIs);
2390 rewriteInstructionsForSpills(li, false, I, NULL, 0,
2391 Slot, 0, false, false, false,
2392 false, vrm, rc, ReMatIds, loopInfo,
2393 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2394 MBBVRegsMap, NewLIs);
2396 IsFirstRange = false;
2399 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2403 bool TrySplit = !intervalIsInOneMBB(li);
2406 bool NeedStackSlot = false;
2407 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2409 const VNInfo *VNI = *i;
2410 unsigned VN = VNI->id;
2411 if (VNI->isUnused())
2412 continue; // Dead val#.
2413 // Is the def for the val# rematerializable?
2414 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2415 ? getInstructionFromIndex(VNI->def) : 0;
2417 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2418 // Remember how to remat the def of this val#.
2419 ReMatOrigDefs[VN] = ReMatDefMI;
2420 // Original def may be modified so we have to make a copy here.
2421 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2422 CloneMIs.push_back(Clone);
2423 ReMatDefs[VN] = Clone;
2425 bool CanDelete = true;
2426 if (VNI->hasPHIKill()) {
2427 // A kill is a phi node, not all of its uses can be rematerialized.
2428 // It must not be deleted.
2430 // Need a stack slot if there is any live range where uses cannot be
2432 NeedStackSlot = true;
2435 ReMatDelete.set(VN);
2437 // Need a stack slot if there is any live range where uses cannot be
2439 NeedStackSlot = true;
2443 // One stack slot per live interval.
2444 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2445 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2446 Slot = vrm.assignVirt2StackSlot(li.reg);
2448 // This case only occurs when the prealloc splitter has already assigned
2449 // a stack slot to this vreg.
2451 Slot = vrm.getStackSlot(li.reg);
2454 // Create new intervals and rewrite defs and uses.
2455 for (LiveInterval::Ranges::const_iterator
2456 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2457 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2458 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2459 bool DefIsReMat = ReMatDefMI != NULL;
2460 bool CanDelete = ReMatDelete[I->valno->id];
2462 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2463 bool isLoad = isLoadSS ||
2464 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2465 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2466 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2467 CanDelete, vrm, rc, ReMatIds, loopInfo,
2468 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2469 MBBVRegsMap, NewLIs);
2472 // Insert spills / restores if we are splitting.
2474 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2478 SmallPtrSet<LiveInterval*, 4> AddedKill;
2479 SmallVector<unsigned, 2> Ops;
2480 if (NeedStackSlot) {
2481 int Id = SpillMBBs.find_first();
2483 std::vector<SRInfo> &spills = SpillIdxes[Id];
2484 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2485 MachineInstrIndex index = spills[i].index;
2486 unsigned VReg = spills[i].vreg;
2487 LiveInterval &nI = getOrCreateInterval(VReg);
2488 bool isReMat = vrm.isReMaterialized(VReg);
2489 MachineInstr *MI = getInstructionFromIndex(index);
2490 bool CanFold = false;
2491 bool FoundUse = false;
2493 if (spills[i].canFold) {
2495 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2496 MachineOperand &MO = MI->getOperand(j);
2497 if (!MO.isReg() || MO.getReg() != VReg)
2504 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2505 RestoreMBBs, RestoreIdxes))) {
2506 // MI has two-address uses of the same register. If the use
2507 // isn't the first and only use in the BB, then we can't fold
2508 // it. FIXME: Move this to rewriteInstructionsForSpills.
2515 // Fold the store into the def if possible.
2516 bool Folded = false;
2517 if (CanFold && !Ops.empty()) {
2518 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2521 // Also folded uses, do not issue a load.
2522 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2523 nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2525 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2529 // Otherwise tell the spiller to issue a spill.
2531 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2532 bool isKill = LR->end == getStoreIndex(index);
2533 if (!MI->registerDefIsDead(nI.reg))
2534 // No need to spill a dead def.
2535 vrm.addSpillPoint(VReg, isKill, MI);
2537 AddedKill.insert(&nI);
2540 Id = SpillMBBs.find_next(Id);
2544 int Id = RestoreMBBs.find_first();
2546 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2547 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2548 MachineInstrIndex index = restores[i].index;
2549 if (index == MachineInstrIndex())
2551 unsigned VReg = restores[i].vreg;
2552 LiveInterval &nI = getOrCreateInterval(VReg);
2553 bool isReMat = vrm.isReMaterialized(VReg);
2554 MachineInstr *MI = getInstructionFromIndex(index);
2555 bool CanFold = false;
2557 if (restores[i].canFold) {
2559 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2560 MachineOperand &MO = MI->getOperand(j);
2561 if (!MO.isReg() || MO.getReg() != VReg)
2565 // If this restore were to be folded, it would have been folded
2574 // Fold the load into the use if possible.
2575 bool Folded = false;
2576 if (CanFold && !Ops.empty()) {
2578 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2580 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2582 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2583 // If the rematerializable def is a load, also try to fold it.
2584 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2585 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2586 Ops, isLoadSS, LdSlot, VReg);
2588 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2590 // Re-matting an instruction with virtual register use. Add the
2591 // register as an implicit use on the use MI and update the register
2592 // interval's spill weight to HUGE_VALF to prevent it from being
2594 LiveInterval &ImpLi = getInterval(ImpUse);
2595 ImpLi.weight = HUGE_VALF;
2596 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2601 // If folding is not possible / failed, then tell the spiller to issue a
2602 // load / rematerialization for us.
2604 nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2606 vrm.addRestorePoint(VReg, MI);
2608 Id = RestoreMBBs.find_next(Id);
2611 // Finalize intervals: add kills, finalize spill weights, and filter out
2613 std::vector<LiveInterval*> RetNewLIs;
2614 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2615 LiveInterval *LI = NewLIs[i];
2617 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2618 if (!AddedKill.count(LI)) {
2619 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2620 MachineInstrIndex LastUseIdx = getBaseIndex(LR->end);
2621 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2622 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2623 assert(UseIdx != -1);
2624 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2625 LastUse->getOperand(UseIdx).setIsKill();
2626 vrm.addKillPoint(LI->reg, LastUseIdx);
2629 RetNewLIs.push_back(LI);
2633 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2637 /// hasAllocatableSuperReg - Return true if the specified physical register has
2638 /// any super register that's allocatable.
2639 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2640 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2641 if (allocatableRegs_[*AS] && hasInterval(*AS))
2646 /// getRepresentativeReg - Find the largest super register of the specified
2647 /// physical register.
2648 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2649 // Find the largest super-register that is allocatable.
2650 unsigned BestReg = Reg;
2651 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2652 unsigned SuperReg = *AS;
2653 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2661 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2662 /// specified interval that conflicts with the specified physical register.
2663 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2664 unsigned PhysReg) const {
2665 unsigned NumConflicts = 0;
2666 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2667 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2668 E = mri_->reg_end(); I != E; ++I) {
2669 MachineOperand &O = I.getOperand();
2670 MachineInstr *MI = O.getParent();
2671 MachineInstrIndex Index = getInstructionIndex(MI);
2672 if (pli.liveAt(Index))
2675 return NumConflicts;
2678 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2679 /// around all defs and uses of the specified interval. Return true if it
2680 /// was able to cut its interval.
2681 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2682 unsigned PhysReg, VirtRegMap &vrm) {
2683 unsigned SpillReg = getRepresentativeReg(PhysReg);
2685 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2686 // If there are registers which alias PhysReg, but which are not a
2687 // sub-register of the chosen representative super register. Assert
2688 // since we can't handle it yet.
2689 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2690 tri_->isSuperRegister(*AS, SpillReg));
2693 LiveInterval &pli = getInterval(SpillReg);
2694 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2695 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2696 E = mri_->reg_end(); I != E; ++I) {
2697 MachineOperand &O = I.getOperand();
2698 MachineInstr *MI = O.getParent();
2699 if (SeenMIs.count(MI))
2702 MachineInstrIndex Index = getInstructionIndex(MI);
2703 if (pli.liveAt(Index)) {
2704 vrm.addEmergencySpill(SpillReg, MI);
2705 MachineInstrIndex StartIdx = getLoadIndex(Index);
2706 MachineInstrIndex EndIdx = getNextSlot(getStoreIndex(Index));
2707 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2708 pli.removeRange(StartIdx, EndIdx);
2712 raw_string_ostream Msg(msg);
2713 Msg << "Ran out of registers during register allocation!";
2714 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2715 Msg << "\nPlease check your inline asm statement for invalid "
2716 << "constraints:\n";
2717 MI->print(Msg, tm_);
2719 llvm_report_error(Msg.str());
2721 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2722 if (!hasInterval(*AS))
2724 LiveInterval &spli = getInterval(*AS);
2725 if (spli.liveAt(Index))
2726 spli.removeRange(getLoadIndex(Index), getNextSlot(getStoreIndex(Index)));
2733 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2734 MachineInstr* startInst) {
2735 LiveInterval& Interval = getOrCreateInterval(reg);
2736 VNInfo* VN = Interval.getNextValue(
2737 MachineInstrIndex(getInstructionIndex(startInst), MachineInstrIndex::DEF),
2738 startInst, true, getVNInfoAllocator());
2739 VN->setHasPHIKill(true);
2740 VN->kills.push_back(terminatorGaps[startInst->getParent()]);
2742 MachineInstrIndex(getInstructionIndex(startInst), MachineInstrIndex::DEF),
2743 getNextSlot(getMBBEndIdx(startInst->getParent())), VN);
2744 Interval.addRange(LR);