1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/DepthFirstIterator.h"
40 #include "llvm/ADT/SmallSet.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/ADT/STLExtras.h"
48 // Hidden options for help debugging.
49 static cl::opt<bool> DisableReMat("disable-rematerialization",
50 cl::init(false), cl::Hidden);
52 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
54 static cl::opt<bool> EnableFastSpilling("fast-spill",
55 cl::init(false), cl::Hidden);
57 static cl::opt<bool> EarlyCoalescing("early-coalescing", cl::init(false));
59 static cl::opt<int> CoalescingLimit("early-coalescing-limit",
60 cl::init(-1), cl::Hidden);
62 STATISTIC(numIntervals , "Number of original intervals");
63 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
64 STATISTIC(numSplits , "Number of intervals split");
65 STATISTIC(numCoalescing, "Number of early coalescing performed");
67 char LiveIntervals::ID = 0;
68 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
70 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
72 AU.addRequired<AliasAnalysis>();
73 AU.addPreserved<AliasAnalysis>();
74 AU.addPreserved<LiveVariables>();
75 AU.addRequired<LiveVariables>();
76 AU.addPreservedID(MachineLoopInfoID);
77 AU.addPreservedID(MachineDominatorsID);
80 AU.addPreservedID(PHIEliminationID);
81 AU.addRequiredID(PHIEliminationID);
84 AU.addRequiredID(TwoAddressInstructionPassID);
85 MachineFunctionPass::getAnalysisUsage(AU);
88 void LiveIntervals::releaseMemory() {
89 // Free the live intervals themselves.
90 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
91 E = r2iMap_.end(); I != E; ++I)
99 terminatorGaps.clear();
100 phiJoinCopies.clear();
102 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
103 VNInfoAllocator.Reset();
104 while (!CloneMIs.empty()) {
105 MachineInstr *MI = CloneMIs.back();
107 mf_->DeleteMachineInstr(MI);
111 static bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
112 unsigned OpIdx, const TargetInstrInfo *tii_){
113 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
114 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
118 if (OpIdx == 2 && MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
120 if (OpIdx == 1 && MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
125 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
126 /// there is one implicit_def for each use. Add isUndef marker to
127 /// implicit_def defs and their uses.
128 void LiveIntervals::processImplicitDefs() {
129 SmallSet<unsigned, 8> ImpDefRegs;
130 SmallVector<MachineInstr*, 8> ImpDefMIs;
131 MachineBasicBlock *Entry = mf_->begin();
132 SmallPtrSet<MachineBasicBlock*,16> Visited;
133 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
134 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
136 MachineBasicBlock *MBB = *DFI;
137 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
139 MachineInstr *MI = &*I;
141 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
142 unsigned Reg = MI->getOperand(0).getReg();
143 ImpDefRegs.insert(Reg);
144 ImpDefMIs.push_back(MI);
148 if (MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG) {
149 MachineOperand &MO = MI->getOperand(2);
150 if (ImpDefRegs.count(MO.getReg())) {
151 // %reg1032<def> = INSERT_SUBREG %reg1032, undef, 2
152 // This is an identity copy, eliminate it now.
154 LiveVariables::VarInfo& vi = lv_->getVarInfo(MO.getReg());
157 MI->eraseFromParent();
162 bool ChangedToImpDef = false;
163 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
164 MachineOperand& MO = MI->getOperand(i);
165 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
167 unsigned Reg = MO.getReg();
170 if (!ImpDefRegs.count(Reg))
172 // Use is a copy, just turn it into an implicit_def.
173 if (CanTurnIntoImplicitDef(MI, Reg, i, tii_)) {
174 bool isKill = MO.isKill();
175 MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
176 for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
177 MI->RemoveOperand(j);
179 ImpDefRegs.erase(Reg);
180 LiveVariables::VarInfo& vi = lv_->getVarInfo(Reg);
183 ChangedToImpDef = true;
188 if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
189 // Make sure other uses of
190 for (unsigned j = i+1; j != e; ++j) {
191 MachineOperand &MOJ = MI->getOperand(j);
192 if (MOJ.isReg() && MOJ.isUse() && MOJ.getReg() == Reg)
195 ImpDefRegs.erase(Reg);
199 if (ChangedToImpDef) {
200 // Backtrack to process this new implicit_def.
203 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
204 MachineOperand& MO = MI->getOperand(i);
205 if (!MO.isReg() || !MO.isDef())
207 ImpDefRegs.erase(MO.getReg());
212 // Any outstanding liveout implicit_def's?
213 for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
214 MachineInstr *MI = ImpDefMIs[i];
215 unsigned Reg = MI->getOperand(0).getReg();
216 if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
217 !ImpDefRegs.count(Reg)) {
218 // Delete all "local" implicit_def's. That include those which define
219 // physical registers since they cannot be liveout.
220 MI->eraseFromParent();
224 // If there are multiple defs of the same register and at least one
225 // is not an implicit_def, do not insert implicit_def's before the
228 for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
229 DE = mri_->def_end(); DI != DE; ++DI) {
230 if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
238 // The only implicit_def which we want to keep are those that are live
240 MI->eraseFromParent();
242 for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
243 UE = mri_->use_end(); UI != UE; ) {
244 MachineOperand &RMO = UI.getOperand();
245 MachineInstr *RMI = &*UI;
247 MachineBasicBlock *RMBB = RMI->getParent();
251 // Turn a copy use into an implicit_def.
252 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
253 if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
255 RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
256 for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
257 RMI->RemoveOperand(j);
261 const TargetRegisterClass* RC = mri_->getRegClass(Reg);
262 unsigned NewVReg = mri_->createVirtualRegister(RC);
274 void LiveIntervals::computeNumbering() {
275 Index2MiMap OldI2MI = i2miMap_;
276 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
282 terminatorGaps.clear();
283 phiJoinCopies.clear();
287 // Number MachineInstrs and MachineBasicBlocks.
288 // Initialize MBB indexes to a sentinal.
289 MBB2IdxMap.resize(mf_->getNumBlockIDs(),
290 std::make_pair(MachineInstrIndex(),MachineInstrIndex()));
292 MachineInstrIndex MIIndex;
293 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
295 MachineInstrIndex StartIdx = MIIndex;
297 // Insert an empty slot at the beginning of each block.
298 MIIndex = getNextIndex(MIIndex);
299 i2miMap_.push_back(0);
301 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
304 if (I == MBB->getFirstTerminator()) {
305 // Leave a gap for before terminators, this is where we will point
307 MachineInstrIndex tGap(true, MIIndex);
309 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
311 "Multiple 'first' terminators encountered during numbering.");
312 inserted = inserted; // Avoid compiler warning if assertions turned off.
313 i2miMap_.push_back(0);
315 MIIndex = getNextIndex(MIIndex);
318 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
319 assert(inserted && "multiple MachineInstr -> index mappings");
321 i2miMap_.push_back(I);
322 MIIndex = getNextIndex(MIIndex);
325 // Insert max(1, numdefs) empty slots after every instruction.
326 unsigned Slots = I->getDesc().getNumDefs();
330 MIIndex = getNextIndex(MIIndex);
331 i2miMap_.push_back(0);
336 if (MBB->getFirstTerminator() == MBB->end()) {
337 // Leave a gap for before terminators, this is where we will point
339 MachineInstrIndex tGap(true, MIIndex);
341 terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
343 "Multiple 'first' terminators encountered during numbering.");
344 inserted = inserted; // Avoid compiler warning if assertions turned off.
345 i2miMap_.push_back(0);
347 MIIndex = getNextIndex(MIIndex);
350 // Set the MBB2IdxMap entry for this MBB.
351 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, getPrevSlot(MIIndex));
352 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
355 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
357 if (!OldI2MI.empty())
358 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
359 for (LiveInterval::iterator LI = OI->second->begin(),
360 LE = OI->second->end(); LI != LE; ++LI) {
362 // Remap the start index of the live range to the corresponding new
363 // number, or our best guess at what it _should_ correspond to if the
364 // original instruction has been erased. This is either the following
365 // instruction or its predecessor.
366 unsigned index = LI->start.getVecIndex();
367 MachineInstrIndex::Slot offset = LI->start.getSlot();
368 if (LI->start.isLoad()) {
369 std::vector<IdxMBBPair>::const_iterator I =
370 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
371 // Take the pair containing the index
372 std::vector<IdxMBBPair>::const_iterator J =
373 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
375 LI->start = getMBBStartIdx(J->second);
377 LI->start = MachineInstrIndex(
378 MachineInstrIndex(mi2iMap_[OldI2MI[index]]),
379 (MachineInstrIndex::Slot)offset);
382 // Remap the ending index in the same way that we remapped the start,
383 // except for the final step where we always map to the immediately
384 // following instruction.
385 index = (getPrevSlot(LI->end)).getVecIndex();
386 offset = LI->end.getSlot();
387 if (LI->end.isLoad()) {
388 // VReg dies at end of block.
389 std::vector<IdxMBBPair>::const_iterator I =
390 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
393 LI->end = getNextSlot(getMBBEndIdx(I->second));
395 unsigned idx = index;
396 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
398 if (index != OldI2MI.size())
400 MachineInstrIndex(mi2iMap_[OldI2MI[index]],
401 (idx == index ? offset : MachineInstrIndex::LOAD));
404 MachineInstrIndex(MachineInstrIndex::NUM * i2miMap_.size());
408 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
409 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
412 // Remap the VNInfo def index, which works the same as the
413 // start indices above. VN's with special sentinel defs
414 // don't need to be remapped.
415 if (vni->isDefAccurate() && !vni->isUnused()) {
416 unsigned index = vni->def.getVecIndex();
417 MachineInstrIndex::Slot offset = vni->def.getSlot();
418 if (vni->def.isLoad()) {
419 std::vector<IdxMBBPair>::const_iterator I =
420 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
421 // Take the pair containing the index
422 std::vector<IdxMBBPair>::const_iterator J =
423 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
425 vni->def = getMBBStartIdx(J->second);
427 vni->def = MachineInstrIndex(mi2iMap_[OldI2MI[index]], offset);
431 // Remap the VNInfo kill indices, which works the same as
432 // the end indices above.
433 for (size_t i = 0; i < vni->kills.size(); ++i) {
434 unsigned index = getPrevSlot(vni->kills[i]).getVecIndex();
435 MachineInstrIndex::Slot offset = vni->kills[i].getSlot();
437 if (vni->kills[i].isLoad()) {
438 assert("Value killed at a load slot.");
439 /*std::vector<IdxMBBPair>::const_iterator I =
440 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
443 vni->kills[i] = getMBBEndIdx(I->second);*/
445 if (vni->kills[i].isPHIIndex()) {
446 std::vector<IdxMBBPair>::const_iterator I =
447 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
449 vni->kills[i] = terminatorGaps[I->second];
451 assert(OldI2MI[index] != 0 &&
452 "Kill refers to instruction not present in index maps.");
453 vni->kills[i] = MachineInstrIndex(mi2iMap_[OldI2MI[index]], offset);
457 unsigned idx = index;
458 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
460 if (index != OldI2MI.size())
461 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
462 (idx == index ? offset : 0);
464 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
472 void LiveIntervals::scaleNumbering(int factor) {
474 // * scale MBB begin and end points
475 // * scale all ranges.
476 // * Update VNI structures.
477 // * Scale instruction numberings
479 // Scale the MBB indices.
481 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
482 MBB != MBBE; ++MBB) {
483 std::pair<MachineInstrIndex, MachineInstrIndex> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
484 mbbIndices.first = mbbIndices.first.scale(factor);
485 mbbIndices.second = mbbIndices.second.scale(factor);
486 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
488 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
490 // Scale terminator gaps.
491 for (DenseMap<MachineBasicBlock*, MachineInstrIndex>::iterator
492 TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
494 terminatorGaps[TGI->first] = TGI->second.scale(factor);
497 // Scale the intervals.
498 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
499 LI->second->scaleNumbering(factor);
502 // Scale MachineInstrs.
503 Mi2IndexMap oldmi2iMap = mi2iMap_;
504 MachineInstrIndex highestSlot;
505 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
507 MachineInstrIndex newSlot = MI->second.scale(factor);
508 mi2iMap_[MI->first] = newSlot;
509 highestSlot = std::max(highestSlot, newSlot);
512 unsigned highestVIndex = highestSlot.getVecIndex();
514 i2miMap_.resize(highestVIndex + 1);
515 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
517 i2miMap_[MI->second.getVecIndex()] = const_cast<MachineInstr *>(MI->first);
523 /// runOnMachineFunction - Register allocate the whole function
525 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
527 mri_ = &mf_->getRegInfo();
528 tm_ = &fn.getTarget();
529 tri_ = tm_->getRegisterInfo();
530 tii_ = tm_->getInstrInfo();
531 aa_ = &getAnalysis<AliasAnalysis>();
532 lv_ = &getAnalysis<LiveVariables>();
533 allocatableRegs_ = tri_->getAllocatableSet(fn);
535 processImplicitDefs();
538 performEarlyCoalescing();
540 numIntervals += getNumIntervals();
546 /// print - Implement the dump method.
547 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
548 OS << "********** INTERVALS **********\n";
549 for (const_iterator I = begin(), E = end(); I != E; ++I) {
550 I->second->print(OS, tri_);
557 void LiveIntervals::printInstrs(raw_ostream &OS) const {
558 OS << "********** MACHINEINSTRS **********\n";
560 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
561 mbbi != mbbe; ++mbbi) {
562 OS << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
563 for (MachineBasicBlock::iterator mii = mbbi->begin(),
564 mie = mbbi->end(); mii != mie; ++mii) {
565 OS << getInstructionIndex(mii) << '\t' << *mii;
570 void LiveIntervals::dumpInstrs() const {
574 /// conflictsWithPhysRegDef - Returns true if the specified register
575 /// is defined during the duration of the specified interval.
576 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
577 VirtRegMap &vrm, unsigned reg) {
578 for (LiveInterval::Ranges::const_iterator
579 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
580 for (MachineInstrIndex index = getBaseIndex(I->start),
581 end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
582 index = getNextIndex(index)) {
583 // skip deleted instructions
584 while (index != end && !getInstructionFromIndex(index))
585 index = getNextIndex(index);
586 if (index == end) break;
588 MachineInstr *MI = getInstructionFromIndex(index);
589 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
590 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
591 if (SrcReg == li.reg || DstReg == li.reg)
593 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
594 MachineOperand& mop = MI->getOperand(i);
597 unsigned PhysReg = mop.getReg();
598 if (PhysReg == 0 || PhysReg == li.reg)
600 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
601 if (!vrm.hasPhys(PhysReg))
603 PhysReg = vrm.getPhys(PhysReg);
605 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
614 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
615 /// it can check use as well.
616 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
617 unsigned Reg, bool CheckUse,
618 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
619 for (LiveInterval::Ranges::const_iterator
620 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
621 for (MachineInstrIndex index = getBaseIndex(I->start),
622 end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
623 index = getNextIndex(index)) {
624 // Skip deleted instructions.
625 MachineInstr *MI = 0;
626 while (index != end) {
627 MI = getInstructionFromIndex(index);
630 index = getNextIndex(index);
632 if (index == end) break;
634 if (JoinedCopies.count(MI))
636 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
637 MachineOperand& MO = MI->getOperand(i);
640 if (MO.isUse() && !CheckUse)
642 unsigned PhysReg = MO.getReg();
643 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
645 if (tri_->isSubRegister(Reg, PhysReg))
655 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
656 if (TargetRegisterInfo::isPhysicalRegister(reg))
657 errs() << tri_->getName(reg);
659 errs() << "%reg" << reg;
663 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
664 MachineBasicBlock::iterator mi,
665 MachineInstrIndex MIIdx,
668 LiveInterval &interval) {
670 errs() << "\t\tregister: ";
671 printRegName(interval.reg, tri_);
674 // Virtual registers may be defined multiple times (due to phi
675 // elimination and 2-addr elimination). Much of what we do only has to be
676 // done once for the vreg. We use an empty interval to detect the first
677 // time we see a vreg.
678 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
679 if (interval.empty()) {
680 // Get the Idx of the defining instructions.
681 MachineInstrIndex defIndex = getDefIndex(MIIdx);
682 // Earlyclobbers move back one, so that they overlap the live range
684 if (MO.isEarlyClobber())
685 defIndex = getUseIndex(MIIdx);
687 MachineInstr *CopyMI = NULL;
688 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
689 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
690 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
691 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
692 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
694 // Earlyclobbers move back one.
695 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
697 assert(ValNo->id == 0 && "First value in interval is not 0?");
699 // Loop over all of the blocks that the vreg is defined in. There are
700 // two cases we have to handle here. The most common case is a vreg
701 // whose lifetime is contained within a basic block. In this case there
702 // will be a single kill, in MBB, which comes after the definition.
703 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
704 // FIXME: what about dead vars?
705 MachineInstrIndex killIdx;
706 if (vi.Kills[0] != mi)
707 killIdx = getNextSlot(getUseIndex(getInstructionIndex(vi.Kills[0])));
708 else if (MO.isEarlyClobber())
709 // Earlyclobbers that die in this instruction move up one extra, to
710 // compensate for having the starting point moved back one. This
711 // gets them to overlap the live range of other outputs.
712 killIdx = getNextSlot(getNextSlot(defIndex));
714 killIdx = getNextSlot(defIndex);
716 // If the kill happens after the definition, we have an intra-block
718 if (killIdx > defIndex) {
719 assert(vi.AliveBlocks.empty() &&
720 "Shouldn't be alive across any blocks!");
721 LiveRange LR(defIndex, killIdx, ValNo);
722 interval.addRange(LR);
723 DEBUG(errs() << " +" << LR << "\n");
724 ValNo->addKill(killIdx);
729 // The other case we handle is when a virtual register lives to the end
730 // of the defining block, potentially live across some blocks, then is
731 // live into some number of blocks, but gets killed. Start by adding a
732 // range that goes from this definition to the end of the defining block.
733 LiveRange NewLR(defIndex, getNextSlot(getMBBEndIdx(mbb)), ValNo);
734 DEBUG(errs() << " +" << NewLR);
735 interval.addRange(NewLR);
737 // Iterate over all of the blocks that the variable is completely
738 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
740 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
741 E = vi.AliveBlocks.end(); I != E; ++I) {
742 LiveRange LR(getMBBStartIdx(*I),
743 getNextSlot(getMBBEndIdx(*I)), // MBB ends at -1.
745 interval.addRange(LR);
746 DEBUG(errs() << " +" << LR);
749 // Finally, this virtual register is live from the start of any killing
750 // block to the 'use' slot of the killing instruction.
751 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
752 MachineInstr *Kill = vi.Kills[i];
753 MachineInstrIndex killIdx =
754 getNextSlot(getUseIndex(getInstructionIndex(Kill)));
755 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
756 interval.addRange(LR);
757 ValNo->addKill(killIdx);
758 DEBUG(errs() << " +" << LR);
762 // If this is the second time we see a virtual register definition, it
763 // must be due to phi elimination or two addr elimination. If this is
764 // the result of two address elimination, then the vreg is one of the
765 // def-and-use register operand.
766 if (mi->isRegTiedToUseOperand(MOIdx)) {
767 // If this is a two-address definition, then we have already processed
768 // the live range. The only problem is that we didn't realize there
769 // are actually two values in the live interval. Because of this we
770 // need to take the LiveRegion that defines this register and split it
772 assert(interval.containsOneValue());
773 MachineInstrIndex DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
774 MachineInstrIndex RedefIndex = getDefIndex(MIIdx);
775 if (MO.isEarlyClobber())
776 RedefIndex = getUseIndex(MIIdx);
778 const LiveRange *OldLR =
779 interval.getLiveRangeContaining(getPrevSlot(RedefIndex));
780 VNInfo *OldValNo = OldLR->valno;
782 // Delete the initial value, which should be short and continuous,
783 // because the 2-addr copy must be in the same MBB as the redef.
784 interval.removeRange(DefIndex, RedefIndex);
786 // Two-address vregs should always only be redefined once. This means
787 // that at this point, there should be exactly one value number in it.
788 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
790 // The new value number (#1) is defined by the instruction we claimed
792 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
793 false, // update at *
795 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
797 // Value#0 is now defined by the 2-addr instruction.
798 OldValNo->def = RedefIndex;
799 OldValNo->setCopy(0);
800 if (MO.isEarlyClobber())
801 OldValNo->setHasRedefByEC(true);
803 // Add the new live interval which replaces the range for the input copy.
804 LiveRange LR(DefIndex, RedefIndex, ValNo);
805 DEBUG(errs() << " replace range with " << LR);
806 interval.addRange(LR);
807 ValNo->addKill(RedefIndex);
809 // If this redefinition is dead, we need to add a dummy unit live
810 // range covering the def slot.
813 LiveRange(RedefIndex, MO.isEarlyClobber() ?
814 getNextSlot(getNextSlot(RedefIndex)) :
815 getNextSlot(RedefIndex), OldValNo));
818 errs() << " RESULT: ";
819 interval.print(errs(), tri_);
822 // Otherwise, this must be because of phi elimination. If this is the
823 // first redefinition of the vreg that we have seen, go back and change
824 // the live range in the PHI block to be a different value number.
825 if (interval.containsOneValue()) {
826 // Remove the old range that we now know has an incorrect number.
827 VNInfo *VNI = interval.getValNumInfo(0);
828 MachineInstr *Killer = vi.Kills[0];
829 phiJoinCopies.push_back(Killer);
830 MachineInstrIndex Start = getMBBStartIdx(Killer->getParent());
831 MachineInstrIndex End =
832 getNextSlot(getUseIndex(getInstructionIndex(Killer)));
834 errs() << " Removing [" << Start << "," << End << "] from: ";
835 interval.print(errs(), tri_);
838 interval.removeRange(Start, End);
839 assert(interval.ranges.size() == 1 &&
840 "Newly discovered PHI interval has >1 ranges.");
841 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
842 VNI->addKill(terminatorGaps[killMBB]);
843 VNI->setHasPHIKill(true);
845 errs() << " RESULT: ";
846 interval.print(errs(), tri_);
849 // Replace the interval with one of a NEW value number. Note that this
850 // value number isn't actually defined by an instruction, weird huh? :)
851 LiveRange LR(Start, End,
852 interval.getNextValue(MachineInstrIndex(mbb->getNumber()),
853 0, false, VNInfoAllocator));
854 LR.valno->setIsPHIDef(true);
855 DEBUG(errs() << " replace range with " << LR);
856 interval.addRange(LR);
857 LR.valno->addKill(End);
859 errs() << " RESULT: ";
860 interval.print(errs(), tri_);
864 // In the case of PHI elimination, each variable definition is only
865 // live until the end of the block. We've already taken care of the
866 // rest of the live range.
867 MachineInstrIndex defIndex = getDefIndex(MIIdx);
868 if (MO.isEarlyClobber())
869 defIndex = getUseIndex(MIIdx);
872 MachineInstr *CopyMI = NULL;
873 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
874 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
875 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
876 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
877 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
879 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
881 MachineInstrIndex killIndex = getNextSlot(getMBBEndIdx(mbb));
882 LiveRange LR(defIndex, killIndex, ValNo);
883 interval.addRange(LR);
884 ValNo->addKill(terminatorGaps[mbb]);
885 ValNo->setHasPHIKill(true);
886 DEBUG(errs() << " +" << LR);
890 DEBUG(errs() << '\n');
893 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
894 MachineBasicBlock::iterator mi,
895 MachineInstrIndex MIIdx,
897 LiveInterval &interval,
898 MachineInstr *CopyMI) {
899 // A physical register cannot be live across basic block, so its
900 // lifetime must end somewhere in its defining basic block.
902 errs() << "\t\tregister: ";
903 printRegName(interval.reg, tri_);
906 MachineInstrIndex baseIndex = MIIdx;
907 MachineInstrIndex start = getDefIndex(baseIndex);
908 // Earlyclobbers move back one.
909 if (MO.isEarlyClobber())
910 start = getUseIndex(MIIdx);
911 MachineInstrIndex end = start;
913 // If it is not used after definition, it is considered dead at
914 // the instruction defining it. Hence its interval is:
915 // [defSlot(def), defSlot(def)+1)
916 // For earlyclobbers, the defSlot was pushed back one; the extra
917 // advance below compensates.
919 DEBUG(errs() << " dead");
920 if (MO.isEarlyClobber())
921 end = getNextSlot(getNextSlot(start));
923 end = getNextSlot(start);
927 // If it is not dead on definition, it must be killed by a
928 // subsequent instruction. Hence its interval is:
929 // [defSlot(def), useSlot(kill)+1)
930 baseIndex = getNextIndex(baseIndex);
931 while (++mi != MBB->end()) {
932 while (baseIndex.getVecIndex() < i2miMap_.size() &&
933 getInstructionFromIndex(baseIndex) == 0)
934 baseIndex = getNextIndex(baseIndex);
935 if (mi->killsRegister(interval.reg, tri_)) {
936 DEBUG(errs() << " killed");
937 end = getNextSlot(getUseIndex(baseIndex));
940 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
942 if (mi->isRegTiedToUseOperand(DefIdx)) {
943 // Two-address instruction.
944 end = getDefIndex(baseIndex);
945 if (mi->getOperand(DefIdx).isEarlyClobber())
946 end = getUseIndex(baseIndex);
948 // Another instruction redefines the register before it is ever read.
949 // Then the register is essentially dead at the instruction that defines
950 // it. Hence its interval is:
951 // [defSlot(def), defSlot(def)+1)
952 DEBUG(errs() << " dead");
953 end = getNextSlot(start);
959 baseIndex = getNextIndex(baseIndex);
962 // The only case we should have a dead physreg here without a killing or
963 // instruction where we know it's dead is if it is live-in to the function
964 // and never used. Another possible case is the implicit use of the
965 // physical register has been deleted by two-address pass.
966 end = getNextSlot(start);
969 assert(start < end && "did not find end of interval?");
971 // Already exists? Extend old live interval.
972 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
973 bool Extend = OldLR != interval.end();
974 VNInfo *ValNo = Extend
975 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
976 if (MO.isEarlyClobber() && Extend)
977 ValNo->setHasRedefByEC(true);
978 LiveRange LR(start, end, ValNo);
979 interval.addRange(LR);
980 LR.valno->addKill(end);
981 DEBUG(errs() << " +" << LR << '\n');
984 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
985 MachineBasicBlock::iterator MI,
986 MachineInstrIndex MIIdx,
989 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
990 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
991 getOrCreateInterval(MO.getReg()));
992 else if (allocatableRegs_[MO.getReg()]) {
993 MachineInstr *CopyMI = NULL;
994 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
995 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
996 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
997 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
998 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
1000 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
1001 getOrCreateInterval(MO.getReg()), CopyMI);
1002 // Def of a register also defines its sub-registers.
1003 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
1004 // If MI also modifies the sub-register explicitly, avoid processing it
1005 // more than once. Do not pass in TRI here so it checks for exact match.
1006 if (!MI->modifiesRegister(*AS))
1007 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
1008 getOrCreateInterval(*AS), 0);
1012 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
1013 MachineInstrIndex MIIdx,
1014 LiveInterval &interval, bool isAlias) {
1016 errs() << "\t\tlivein register: ";
1017 printRegName(interval.reg, tri_);
1020 // Look for kills, if it reaches a def before it's killed, then it shouldn't
1021 // be considered a livein.
1022 MachineBasicBlock::iterator mi = MBB->begin();
1023 MachineInstrIndex baseIndex = MIIdx;
1024 MachineInstrIndex start = baseIndex;
1025 while (baseIndex.getVecIndex() < i2miMap_.size() &&
1026 getInstructionFromIndex(baseIndex) == 0)
1027 baseIndex = getNextIndex(baseIndex);
1028 MachineInstrIndex end = baseIndex;
1029 bool SeenDefUse = false;
1031 while (mi != MBB->end()) {
1032 if (mi->killsRegister(interval.reg, tri_)) {
1033 DEBUG(errs() << " killed");
1034 end = getNextSlot(getUseIndex(baseIndex));
1037 } else if (mi->modifiesRegister(interval.reg, tri_)) {
1038 // Another instruction redefines the register before it is ever read.
1039 // Then the register is essentially dead at the instruction that defines
1040 // it. Hence its interval is:
1041 // [defSlot(def), defSlot(def)+1)
1042 DEBUG(errs() << " dead");
1043 end = getNextSlot(getDefIndex(start));
1048 baseIndex = getNextIndex(baseIndex);
1050 if (mi != MBB->end()) {
1051 while (baseIndex.getVecIndex() < i2miMap_.size() &&
1052 getInstructionFromIndex(baseIndex) == 0)
1053 baseIndex = getNextIndex(baseIndex);
1057 // Live-in register might not be used at all.
1060 DEBUG(errs() << " dead");
1061 end = getNextSlot(getDefIndex(MIIdx));
1063 DEBUG(errs() << " live through");
1069 interval.getNextValue(MachineInstrIndex(MBB->getNumber()),
1070 0, false, VNInfoAllocator);
1071 vni->setIsPHIDef(true);
1072 LiveRange LR(start, end, vni);
1074 interval.addRange(LR);
1075 LR.valno->addKill(end);
1076 DEBUG(errs() << " +" << LR << '\n');
1080 LiveIntervals::isProfitableToCoalesce(LiveInterval &DstInt, LiveInterval &SrcInt,
1081 SmallVector<MachineInstr*,16> &IdentCopies,
1082 SmallVector<MachineInstr*,16> &OtherCopies) {
1083 bool HaveConflict = false;
1084 unsigned NumIdent = 0;
1085 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(SrcInt.reg),
1086 re = mri_->reg_end(); ri != re; ++ri) {
1087 MachineOperand &O = ri.getOperand();
1091 MachineInstr *MI = &*ri;
1092 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1093 if (!tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
1095 if (SrcReg != DstInt.reg) {
1096 OtherCopies.push_back(MI);
1097 HaveConflict |= DstInt.liveAt(getInstructionIndex(MI));
1099 IdentCopies.push_back(MI);
1105 return false; // Let coalescer handle it
1106 return IdentCopies.size() > OtherCopies.size();
1109 void LiveIntervals::performEarlyCoalescing() {
1110 if (!EarlyCoalescing)
1113 /// Perform early coalescing: eliminate copies which feed into phi joins
1114 /// and whose sources are defined by the phi joins.
1115 for (unsigned i = 0, e = phiJoinCopies.size(); i != e; ++i) {
1116 MachineInstr *Join = phiJoinCopies[i];
1117 if (CoalescingLimit != -1 && (int)numCoalescing == CoalescingLimit)
1120 unsigned PHISrc, PHIDst, SrcSubReg, DstSubReg;
1121 bool isMove= tii_->isMoveInstr(*Join, PHISrc, PHIDst, SrcSubReg, DstSubReg);
1123 assert(isMove && "PHI join instruction must be a move!");
1128 LiveInterval &DstInt = getInterval(PHIDst);
1129 LiveInterval &SrcInt = getInterval(PHISrc);
1130 SmallVector<MachineInstr*, 16> IdentCopies;
1131 SmallVector<MachineInstr*, 16> OtherCopies;
1132 if (!isProfitableToCoalesce(DstInt, SrcInt, IdentCopies, OtherCopies))
1135 DEBUG(errs() << "PHI Join: " << *Join);
1136 assert(DstInt.containsOneValue() && "PHI join should have just one val#!");
1137 VNInfo *VNI = DstInt.getValNumInfo(0);
1139 // Change the non-identity copies to directly target the phi destination.
1140 for (unsigned i = 0, e = OtherCopies.size(); i != e; ++i) {
1141 MachineInstr *PHICopy = OtherCopies[i];
1142 DEBUG(errs() << "Moving: " << *PHICopy);
1144 MachineInstrIndex MIIndex = getInstructionIndex(PHICopy);
1145 MachineInstrIndex DefIndex = getDefIndex(MIIndex);
1146 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
1147 MachineInstrIndex StartIndex = SLR->start;
1148 MachineInstrIndex EndIndex = SLR->end;
1150 // Delete val# defined by the now identity copy and add the range from
1151 // beginning of the mbb to the end of the range.
1152 SrcInt.removeValNo(SLR->valno);
1153 DEBUG(errs() << " added range [" << StartIndex << ','
1154 << EndIndex << "] to reg" << DstInt.reg << '\n');
1155 if (DstInt.liveAt(StartIndex))
1156 DstInt.removeRange(StartIndex, EndIndex);
1157 VNInfo *NewVNI = DstInt.getNextValue(DefIndex, PHICopy, true,
1159 NewVNI->setHasPHIKill(true);
1160 DstInt.addRange(LiveRange(StartIndex, EndIndex, NewVNI));
1161 for (unsigned j = 0, ee = PHICopy->getNumOperands(); j != ee; ++j) {
1162 MachineOperand &MO = PHICopy->getOperand(j);
1163 if (!MO.isReg() || MO.getReg() != PHISrc)
1169 // Now let's eliminate all the would-be identity copies.
1170 for (unsigned i = 0, e = IdentCopies.size(); i != e; ++i) {
1171 MachineInstr *PHICopy = IdentCopies[i];
1172 DEBUG(errs() << "Coalescing: " << *PHICopy);
1174 MachineInstrIndex MIIndex = getInstructionIndex(PHICopy);
1175 MachineInstrIndex DefIndex = getDefIndex(MIIndex);
1176 LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
1177 MachineInstrIndex StartIndex = SLR->start;
1178 MachineInstrIndex EndIndex = SLR->end;
1180 // Delete val# defined by the now identity copy and add the range from
1181 // beginning of the mbb to the end of the range.
1182 SrcInt.removeValNo(SLR->valno);
1183 RemoveMachineInstrFromMaps(PHICopy);
1184 PHICopy->eraseFromParent();
1185 DEBUG(errs() << " added range [" << StartIndex << ','
1186 << EndIndex << "] to reg" << DstInt.reg << '\n');
1187 DstInt.addRange(LiveRange(StartIndex, EndIndex, VNI));
1190 // Remove the phi join and update the phi block liveness.
1191 MachineInstrIndex MIIndex = getInstructionIndex(Join);
1192 MachineInstrIndex UseIndex = getUseIndex(MIIndex);
1193 MachineInstrIndex DefIndex = getDefIndex(MIIndex);
1194 LiveRange *SLR = SrcInt.getLiveRangeContaining(UseIndex);
1195 LiveRange *DLR = DstInt.getLiveRangeContaining(DefIndex);
1196 DLR->valno->setCopy(0);
1197 DLR->valno->setIsDefAccurate(false);
1198 DstInt.addRange(LiveRange(SLR->start, SLR->end, DLR->valno));
1199 SrcInt.removeRange(SLR->start, SLR->end);
1200 assert(SrcInt.empty());
1201 removeInterval(PHISrc);
1202 RemoveMachineInstrFromMaps(Join);
1203 Join->eraseFromParent();
1209 /// computeIntervals - computes the live intervals for virtual
1210 /// registers. for some ordering of the machine instructions [1,N] a
1211 /// live interval is an interval [i, j) where 1 <= i <= j < N for
1212 /// which a variable is live
1213 void LiveIntervals::computeIntervals() {
1214 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
1215 << "********** Function: "
1216 << ((Value*)mf_->getFunction())->getName() << '\n');
1218 SmallVector<unsigned, 8> UndefUses;
1219 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
1220 MBBI != E; ++MBBI) {
1221 MachineBasicBlock *MBB = MBBI;
1222 // Track the index of the current machine instr.
1223 MachineInstrIndex MIIndex = getMBBStartIdx(MBB);
1224 DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
1226 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
1228 // Create intervals for live-ins to this BB first.
1229 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
1230 LE = MBB->livein_end(); LI != LE; ++LI) {
1231 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
1232 // Multiple live-ins can alias the same register.
1233 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1234 if (!hasInterval(*AS))
1235 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1239 // Skip over empty initial indices.
1240 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1241 getInstructionFromIndex(MIIndex) == 0)
1242 MIIndex = getNextIndex(MIIndex);
1244 for (; MI != miEnd; ++MI) {
1245 DEBUG(errs() << MIIndex << "\t" << *MI);
1248 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1249 MachineOperand &MO = MI->getOperand(i);
1250 if (!MO.isReg() || !MO.getReg())
1253 // handle register defs - build intervals
1255 handleRegisterDef(MBB, MI, MIIndex, MO, i);
1256 else if (MO.isUndef())
1257 UndefUses.push_back(MO.getReg());
1260 // Skip over the empty slots after each instruction.
1261 unsigned Slots = MI->getDesc().getNumDefs();
1266 MIIndex = getNextIndex(MIIndex);
1268 // Skip over empty indices.
1269 while (MIIndex.getVecIndex() < i2miMap_.size() &&
1270 getInstructionFromIndex(MIIndex) == 0)
1271 MIIndex = getNextIndex(MIIndex);
1275 // Create empty intervals for registers defined by implicit_def's (except
1276 // for those implicit_def that define values which are liveout of their
1278 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
1279 unsigned UndefReg = UndefUses[i];
1280 (void)getOrCreateInterval(UndefReg);
1284 bool LiveIntervals::findLiveInMBBs(
1285 MachineInstrIndex Start, MachineInstrIndex End,
1286 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1287 std::vector<IdxMBBPair>::const_iterator I =
1288 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1290 bool ResVal = false;
1291 while (I != Idx2MBBMap.end()) {
1292 if (I->first >= End)
1294 MBBs.push_back(I->second);
1301 bool LiveIntervals::findReachableMBBs(
1302 MachineInstrIndex Start, MachineInstrIndex End,
1303 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1304 std::vector<IdxMBBPair>::const_iterator I =
1305 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1307 bool ResVal = false;
1308 while (I != Idx2MBBMap.end()) {
1311 MachineBasicBlock *MBB = I->second;
1312 if (getMBBEndIdx(MBB) > End)
1314 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1315 SE = MBB->succ_end(); SI != SE; ++SI)
1316 MBBs.push_back(*SI);
1323 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1324 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1325 return new LiveInterval(reg, Weight);
1328 /// dupInterval - Duplicate a live interval. The caller is responsible for
1329 /// managing the allocated memory.
1330 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1331 LiveInterval *NewLI = createInterval(li->reg);
1332 NewLI->Copy(*li, mri_, getVNInfoAllocator());
1336 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1337 /// copy field and returns the source register that defines it.
1338 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1339 if (!VNI->getCopy())
1342 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1343 // If it's extracting out of a physical register, return the sub-register.
1344 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
1345 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1346 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
1348 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1349 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1350 return VNI->getCopy()->getOperand(2).getReg();
1352 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1353 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
1355 llvm_unreachable("Unrecognized copy instruction!");
1359 //===----------------------------------------------------------------------===//
1360 // Register allocator hooks.
1363 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1364 /// allow one) virtual register operand, then its uses are implicitly using
1365 /// the register. Returns the virtual register.
1366 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1367 MachineInstr *MI) const {
1369 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1370 MachineOperand &MO = MI->getOperand(i);
1371 if (!MO.isReg() || !MO.isUse())
1373 unsigned Reg = MO.getReg();
1374 if (Reg == 0 || Reg == li.reg)
1377 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1378 !allocatableRegs_[Reg])
1380 // FIXME: For now, only remat MI with at most one register operand.
1382 "Can't rematerialize instruction with multiple register operand!");
1383 RegOp = MO.getReg();
1391 /// isValNoAvailableAt - Return true if the val# of the specified interval
1392 /// which reaches the given instruction also reaches the specified use index.
1393 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1394 MachineInstrIndex UseIdx) const {
1395 MachineInstrIndex Index = getInstructionIndex(MI);
1396 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1397 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1398 return UI != li.end() && UI->valno == ValNo;
1401 /// isReMaterializable - Returns true if the definition MI of the specified
1402 /// val# of the specified interval is re-materializable.
1403 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1404 const VNInfo *ValNo, MachineInstr *MI,
1405 SmallVectorImpl<LiveInterval*> &SpillIs,
1410 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1414 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1415 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1416 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1417 // this but remember this is not safe to fold into a two-address
1419 // This is a load from fixed stack slot. It can be rematerialized.
1422 // If the target-specific rules don't identify an instruction as
1423 // being trivially rematerializable, use some target-independent
1425 if (!MI->getDesc().isRematerializable() ||
1426 !tii_->isTriviallyReMaterializable(MI)) {
1427 if (!EnableAggressiveRemat)
1430 // If the instruction accesses memory but the memoperands have been lost,
1431 // we can't analyze it.
1432 const TargetInstrDesc &TID = MI->getDesc();
1433 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1436 // Avoid instructions obviously unsafe for remat.
1437 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1440 // If the instruction accesses memory and the memory could be non-constant,
1441 // assume the instruction is not rematerializable.
1442 for (std::list<MachineMemOperand>::const_iterator
1443 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1444 const MachineMemOperand &MMO = *I;
1445 if (MMO.isVolatile() || MMO.isStore())
1447 const Value *V = MMO.getValue();
1450 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1451 if (!PSV->isConstant(mf_->getFrameInfo()))
1453 } else if (!aa_->pointsToConstantMemory(V))
1457 // If any of the registers accessed are non-constant, conservatively assume
1458 // the instruction is not rematerializable.
1459 unsigned ImpUse = 0;
1460 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1461 const MachineOperand &MO = MI->getOperand(i);
1463 unsigned Reg = MO.getReg();
1466 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1469 // Only allow one def, and that in the first operand.
1470 if (MO.isDef() != (i == 0))
1473 // Only allow constant-valued registers.
1474 bool IsLiveIn = mri_->isLiveIn(Reg);
1475 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1476 E = mri_->def_end();
1478 // For the def, it should be the only def of that register.
1479 if (MO.isDef() && (next(I) != E || IsLiveIn))
1483 // Only allow one use other register use, as that's all the
1484 // remat mechanisms support currently.
1485 if (Reg != li.reg) {
1488 else if (Reg != ImpUse)
1491 // For the use, there should be only one associated def.
1492 if (I != E && (next(I) != E || IsLiveIn))
1499 unsigned ImpUse = getReMatImplicitUse(li, MI);
1501 const LiveInterval &ImpLi = getInterval(ImpUse);
1502 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1503 re = mri_->use_end(); ri != re; ++ri) {
1504 MachineInstr *UseMI = &*ri;
1505 MachineInstrIndex UseIdx = getInstructionIndex(UseMI);
1506 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1508 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1512 // If a register operand of the re-materialized instruction is going to
1513 // be spilled next, then it's not legal to re-materialize this instruction.
1514 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1515 if (ImpUse == SpillIs[i]->reg)
1521 /// isReMaterializable - Returns true if the definition MI of the specified
1522 /// val# of the specified interval is re-materializable.
1523 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1524 const VNInfo *ValNo, MachineInstr *MI) {
1525 SmallVector<LiveInterval*, 4> Dummy1;
1527 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1530 /// isReMaterializable - Returns true if every definition of MI of every
1531 /// val# of the specified interval is re-materializable.
1532 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1533 SmallVectorImpl<LiveInterval*> &SpillIs,
1536 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1538 const VNInfo *VNI = *i;
1539 if (VNI->isUnused())
1540 continue; // Dead val#.
1541 // Is the def for the val# rematerializable?
1542 if (!VNI->isDefAccurate())
1544 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1545 bool DefIsLoad = false;
1547 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1549 isLoad |= DefIsLoad;
1554 /// FilterFoldedOps - Filter out two-address use operands. Return
1555 /// true if it finds any issue with the operands that ought to prevent
1557 static bool FilterFoldedOps(MachineInstr *MI,
1558 SmallVector<unsigned, 2> &Ops,
1560 SmallVector<unsigned, 2> &FoldOps) {
1562 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1563 unsigned OpIdx = Ops[i];
1564 MachineOperand &MO = MI->getOperand(OpIdx);
1565 // FIXME: fold subreg use.
1569 MRInfo |= (unsigned)VirtRegMap::isMod;
1571 // Filter out two-address use operand(s).
1572 if (MI->isRegTiedToDefOperand(OpIdx)) {
1573 MRInfo = VirtRegMap::isModRef;
1576 MRInfo |= (unsigned)VirtRegMap::isRef;
1578 FoldOps.push_back(OpIdx);
1584 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1585 /// slot / to reg or any rematerialized load into ith operand of specified
1586 /// MI. If it is successul, MI is updated with the newly created MI and
1588 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1589 VirtRegMap &vrm, MachineInstr *DefMI,
1590 MachineInstrIndex InstrIdx,
1591 SmallVector<unsigned, 2> &Ops,
1592 bool isSS, int Slot, unsigned Reg) {
1593 // If it is an implicit def instruction, just delete it.
1594 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1595 RemoveMachineInstrFromMaps(MI);
1596 vrm.RemoveMachineInstrFromMaps(MI);
1597 MI->eraseFromParent();
1602 // Filter the list of operand indexes that are to be folded. Abort if
1603 // any operand will prevent folding.
1604 unsigned MRInfo = 0;
1605 SmallVector<unsigned, 2> FoldOps;
1606 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1609 // The only time it's safe to fold into a two address instruction is when
1610 // it's folding reload and spill from / into a spill stack slot.
1611 if (DefMI && (MRInfo & VirtRegMap::isMod))
1614 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1615 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1617 // Remember this instruction uses the spill slot.
1618 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1620 // Attempt to fold the memory reference into the instruction. If
1621 // we can do this, we don't need to insert spill code.
1622 MachineBasicBlock &MBB = *MI->getParent();
1623 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1624 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1625 vrm.transferSpillPts(MI, fmi);
1626 vrm.transferRestorePts(MI, fmi);
1627 vrm.transferEmergencySpills(MI, fmi);
1629 i2miMap_[InstrIdx.getVecIndex()] = fmi;
1630 mi2iMap_[fmi] = InstrIdx;
1631 MI = MBB.insert(MBB.erase(MI), fmi);
1638 /// canFoldMemoryOperand - Returns true if the specified load / store
1639 /// folding is possible.
1640 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1641 SmallVector<unsigned, 2> &Ops,
1643 // Filter the list of operand indexes that are to be folded. Abort if
1644 // any operand will prevent folding.
1645 unsigned MRInfo = 0;
1646 SmallVector<unsigned, 2> FoldOps;
1647 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1650 // It's only legal to remat for a use, not a def.
1651 if (ReMat && (MRInfo & VirtRegMap::isMod))
1654 return tii_->canFoldMemoryOperand(MI, FoldOps);
1657 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1658 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1659 for (LiveInterval::Ranges::const_iterator
1660 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1661 std::vector<IdxMBBPair>::const_iterator II =
1662 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1663 if (II == Idx2MBBMap.end())
1665 if (I->end > II->first) // crossing a MBB.
1667 MBBs.insert(II->second);
1668 if (MBBs.size() > 1)
1674 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1675 /// interval on to-be re-materialized operands of MI) with new register.
1676 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1677 MachineInstr *MI, unsigned NewVReg,
1679 // There is an implicit use. That means one of the other operand is
1680 // being remat'ed and the remat'ed instruction has li.reg as an
1681 // use operand. Make sure we rewrite that as well.
1682 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1683 MachineOperand &MO = MI->getOperand(i);
1686 unsigned Reg = MO.getReg();
1687 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1689 if (!vrm.isReMaterialized(Reg))
1691 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1692 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1694 UseMO->setReg(NewVReg);
1698 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1699 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1700 bool LiveIntervals::
1701 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1702 bool TrySplit, MachineInstrIndex index, MachineInstrIndex end,
1704 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1705 unsigned Slot, int LdSlot,
1706 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1708 const TargetRegisterClass* rc,
1709 SmallVector<int, 4> &ReMatIds,
1710 const MachineLoopInfo *loopInfo,
1711 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1712 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1713 std::vector<LiveInterval*> &NewLIs) {
1714 bool CanFold = false;
1716 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1717 MachineOperand& mop = MI->getOperand(i);
1720 unsigned Reg = mop.getReg();
1721 unsigned RegI = Reg;
1722 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1727 bool TryFold = !DefIsReMat;
1728 bool FoldSS = true; // Default behavior unless it's a remat.
1729 int FoldSlot = Slot;
1731 // If this is the rematerializable definition MI itself and
1732 // all of its uses are rematerialized, simply delete it.
1733 if (MI == ReMatOrigDefMI && CanDelete) {
1734 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1736 RemoveMachineInstrFromMaps(MI);
1737 vrm.RemoveMachineInstrFromMaps(MI);
1738 MI->eraseFromParent();
1742 // If def for this use can't be rematerialized, then try folding.
1743 // If def is rematerializable and it's a load, also try folding.
1744 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1746 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1752 // Scan all of the operands of this instruction rewriting operands
1753 // to use NewVReg instead of li.reg as appropriate. We do this for
1756 // 1. If the instr reads the same spilled vreg multiple times, we
1757 // want to reuse the NewVReg.
1758 // 2. If the instr is a two-addr instruction, we are required to
1759 // keep the src/dst regs pinned.
1761 // Keep track of whether we replace a use and/or def so that we can
1762 // create the spill interval with the appropriate range.
1764 HasUse = mop.isUse();
1765 HasDef = mop.isDef();
1766 SmallVector<unsigned, 2> Ops;
1768 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1769 const MachineOperand &MOj = MI->getOperand(j);
1772 unsigned RegJ = MOj.getReg();
1773 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1777 if (!MOj.isUndef()) {
1778 HasUse |= MOj.isUse();
1779 HasDef |= MOj.isDef();
1784 // Create a new virtual register for the spill interval.
1785 // Create the new register now so we can map the fold instruction
1786 // to the new register so when it is unfolded we get the correct
1788 bool CreatedNewVReg = false;
1790 NewVReg = mri_->createVirtualRegister(rc);
1792 CreatedNewVReg = true;
1798 // Do not fold load / store here if we are splitting. We'll find an
1799 // optimal point to insert a load / store later.
1801 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1802 Ops, FoldSS, FoldSlot, NewVReg)) {
1803 // Folding the load/store can completely change the instruction in
1804 // unpredictable ways, rescan it from the beginning.
1807 // We need to give the new vreg the same stack slot as the
1808 // spilled interval.
1809 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1815 if (isNotInMIMap(MI))
1817 goto RestartInstruction;
1820 // We'll try to fold it later if it's profitable.
1821 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1825 mop.setReg(NewVReg);
1826 if (mop.isImplicit())
1827 rewriteImplicitOps(li, MI, NewVReg, vrm);
1829 // Reuse NewVReg for other reads.
1830 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1831 MachineOperand &mopj = MI->getOperand(Ops[j]);
1832 mopj.setReg(NewVReg);
1833 if (mopj.isImplicit())
1834 rewriteImplicitOps(li, MI, NewVReg, vrm);
1837 if (CreatedNewVReg) {
1839 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1840 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1841 // Each valnum may have its own remat id.
1842 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1844 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1846 if (!CanDelete || (HasUse && HasDef)) {
1847 // If this is a two-addr instruction then its use operands are
1848 // rematerializable but its def is not. It should be assigned a
1850 vrm.assignVirt2StackSlot(NewVReg, Slot);
1853 vrm.assignVirt2StackSlot(NewVReg, Slot);
1855 } else if (HasUse && HasDef &&
1856 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1857 // If this interval hasn't been assigned a stack slot (because earlier
1858 // def is a deleted remat def), do it now.
1859 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1860 vrm.assignVirt2StackSlot(NewVReg, Slot);
1863 // Re-matting an instruction with virtual register use. Add the
1864 // register as an implicit use on the use MI.
1865 if (DefIsReMat && ImpUse)
1866 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1868 // Create a new register interval for this spill / remat.
1869 LiveInterval &nI = getOrCreateInterval(NewVReg);
1870 if (CreatedNewVReg) {
1871 NewLIs.push_back(&nI);
1872 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1874 vrm.setIsSplitFromReg(NewVReg, li.reg);
1878 if (CreatedNewVReg) {
1879 LiveRange LR(getLoadIndex(index), getNextSlot(getUseIndex(index)),
1880 nI.getNextValue(MachineInstrIndex(), 0, false,
1882 DEBUG(errs() << " +" << LR);
1885 // Extend the split live interval to this def / use.
1886 MachineInstrIndex End = getNextSlot(getUseIndex(index));
1887 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1888 nI.getValNumInfo(nI.getNumValNums()-1));
1889 DEBUG(errs() << " +" << LR);
1894 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1895 nI.getNextValue(MachineInstrIndex(), 0, false,
1897 DEBUG(errs() << " +" << LR);
1902 errs() << "\t\t\t\tAdded new interval: ";
1903 nI.print(errs(), tri_);
1909 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1911 MachineBasicBlock *MBB,
1912 MachineInstrIndex Idx) const {
1913 MachineInstrIndex End = getMBBEndIdx(MBB);
1914 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1915 if (VNI->kills[j].isPHIIndex())
1918 MachineInstrIndex KillIdx = VNI->kills[j];
1919 if (KillIdx > Idx && KillIdx < End)
1925 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1926 /// during spilling.
1928 struct RewriteInfo {
1929 MachineInstrIndex Index;
1933 RewriteInfo(MachineInstrIndex i, MachineInstr *mi, bool u, bool d)
1934 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1937 struct RewriteInfoCompare {
1938 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1939 return LHS.Index < RHS.Index;
1944 void LiveIntervals::
1945 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1946 LiveInterval::Ranges::const_iterator &I,
1947 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1948 unsigned Slot, int LdSlot,
1949 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1951 const TargetRegisterClass* rc,
1952 SmallVector<int, 4> &ReMatIds,
1953 const MachineLoopInfo *loopInfo,
1954 BitVector &SpillMBBs,
1955 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1956 BitVector &RestoreMBBs,
1957 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1958 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1959 std::vector<LiveInterval*> &NewLIs) {
1960 bool AllCanFold = true;
1961 unsigned NewVReg = 0;
1962 MachineInstrIndex start = getBaseIndex(I->start);
1963 MachineInstrIndex end = getNextIndex(getBaseIndex(getPrevSlot(I->end)));
1965 // First collect all the def / use in this live range that will be rewritten.
1966 // Make sure they are sorted according to instruction index.
1967 std::vector<RewriteInfo> RewriteMIs;
1968 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1969 re = mri_->reg_end(); ri != re; ) {
1970 MachineInstr *MI = &*ri;
1971 MachineOperand &O = ri.getOperand();
1973 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1974 MachineInstrIndex index = getInstructionIndex(MI);
1975 if (index < start || index >= end)
1979 // Must be defined by an implicit def. It should not be spilled. Note,
1980 // this is for correctness reason. e.g.
1981 // 8 %reg1024<def> = IMPLICIT_DEF
1982 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1983 // The live range [12, 14) are not part of the r1024 live interval since
1984 // it's defined by an implicit def. It will not conflicts with live
1985 // interval of r1025. Now suppose both registers are spilled, you can
1986 // easily see a situation where both registers are reloaded before
1987 // the INSERT_SUBREG and both target registers that would overlap.
1989 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1991 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1993 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1994 // Now rewrite the defs and uses.
1995 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1996 RewriteInfo &rwi = RewriteMIs[i];
1998 MachineInstrIndex index = rwi.Index;
1999 bool MIHasUse = rwi.HasUse;
2000 bool MIHasDef = rwi.HasDef;
2001 MachineInstr *MI = rwi.MI;
2002 // If MI def and/or use the same register multiple times, then there
2003 // are multiple entries.
2004 unsigned NumUses = MIHasUse;
2005 while (i != e && RewriteMIs[i].MI == MI) {
2006 assert(RewriteMIs[i].Index == index);
2007 bool isUse = RewriteMIs[i].HasUse;
2008 if (isUse) ++NumUses;
2010 MIHasDef |= RewriteMIs[i].HasDef;
2013 MachineBasicBlock *MBB = MI->getParent();
2015 if (ImpUse && MI != ReMatDefMI) {
2016 // Re-matting an instruction with virtual register use. Update the
2017 // register interval's spill weight to HUGE_VALF to prevent it from
2019 LiveInterval &ImpLi = getInterval(ImpUse);
2020 ImpLi.weight = HUGE_VALF;
2023 unsigned MBBId = MBB->getNumber();
2024 unsigned ThisVReg = 0;
2026 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
2027 if (NVI != MBBVRegsMap.end()) {
2028 ThisVReg = NVI->second;
2035 // It's better to start a new interval to avoid artifically
2036 // extend the new interval.
2037 if (MIHasDef && !MIHasUse) {
2038 MBBVRegsMap.erase(MBB->getNumber());
2044 bool IsNew = ThisVReg == 0;
2046 // This ends the previous live interval. If all of its def / use
2047 // can be folded, give it a low spill weight.
2048 if (NewVReg && TrySplit && AllCanFold) {
2049 LiveInterval &nI = getOrCreateInterval(NewVReg);
2056 bool HasDef = false;
2057 bool HasUse = false;
2058 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
2059 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
2060 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2061 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
2062 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
2063 if (!HasDef && !HasUse)
2066 AllCanFold &= CanFold;
2068 // Update weight of spill interval.
2069 LiveInterval &nI = getOrCreateInterval(NewVReg);
2071 // The spill weight is now infinity as it cannot be spilled again.
2072 nI.weight = HUGE_VALF;
2076 // Keep track of the last def and first use in each MBB.
2078 if (MI != ReMatOrigDefMI || !CanDelete) {
2079 bool HasKill = false;
2081 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
2083 // If this is a two-address code, then this index starts a new VNInfo.
2084 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(getDefIndex(index));
2086 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
2088 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
2089 SpillIdxes.find(MBBId);
2091 if (SII == SpillIdxes.end()) {
2092 std::vector<SRInfo> S;
2093 S.push_back(SRInfo(index, NewVReg, true));
2094 SpillIdxes.insert(std::make_pair(MBBId, S));
2095 } else if (SII->second.back().vreg != NewVReg) {
2096 SII->second.push_back(SRInfo(index, NewVReg, true));
2097 } else if (index > SII->second.back().index) {
2098 // If there is an earlier def and this is a two-address
2099 // instruction, then it's not possible to fold the store (which
2100 // would also fold the load).
2101 SRInfo &Info = SII->second.back();
2103 Info.canFold = !HasUse;
2105 SpillMBBs.set(MBBId);
2106 } else if (SII != SpillIdxes.end() &&
2107 SII->second.back().vreg == NewVReg &&
2108 index > SII->second.back().index) {
2109 // There is an earlier def that's not killed (must be two-address).
2110 // The spill is no longer needed.
2111 SII->second.pop_back();
2112 if (SII->second.empty()) {
2113 SpillIdxes.erase(MBBId);
2114 SpillMBBs.reset(MBBId);
2121 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
2122 SpillIdxes.find(MBBId);
2123 if (SII != SpillIdxes.end() &&
2124 SII->second.back().vreg == NewVReg &&
2125 index > SII->second.back().index)
2126 // Use(s) following the last def, it's not safe to fold the spill.
2127 SII->second.back().canFold = false;
2128 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
2129 RestoreIdxes.find(MBBId);
2130 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
2131 // If we are splitting live intervals, only fold if it's the first
2132 // use and there isn't another use later in the MBB.
2133 RII->second.back().canFold = false;
2135 // Only need a reload if there isn't an earlier def / use.
2136 if (RII == RestoreIdxes.end()) {
2137 std::vector<SRInfo> Infos;
2138 Infos.push_back(SRInfo(index, NewVReg, true));
2139 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
2141 RII->second.push_back(SRInfo(index, NewVReg, true));
2143 RestoreMBBs.set(MBBId);
2147 // Update spill weight.
2148 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2149 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
2152 if (NewVReg && TrySplit && AllCanFold) {
2153 // If all of its def / use can be folded, give it a low spill weight.
2154 LiveInterval &nI = getOrCreateInterval(NewVReg);
2159 bool LiveIntervals::alsoFoldARestore(int Id, MachineInstrIndex index,
2160 unsigned vr, BitVector &RestoreMBBs,
2161 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2162 if (!RestoreMBBs[Id])
2164 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2165 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2166 if (Restores[i].index == index &&
2167 Restores[i].vreg == vr &&
2168 Restores[i].canFold)
2173 void LiveIntervals::eraseRestoreInfo(int Id, MachineInstrIndex index,
2174 unsigned vr, BitVector &RestoreMBBs,
2175 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
2176 if (!RestoreMBBs[Id])
2178 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
2179 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
2180 if (Restores[i].index == index && Restores[i].vreg)
2181 Restores[i].index = MachineInstrIndex();
2184 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
2185 /// spilled and create empty intervals for their uses.
2187 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
2188 const TargetRegisterClass* rc,
2189 std::vector<LiveInterval*> &NewLIs) {
2190 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
2191 re = mri_->reg_end(); ri != re; ) {
2192 MachineOperand &O = ri.getOperand();
2193 MachineInstr *MI = &*ri;
2196 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
2197 "Register def was not rewritten?");
2198 RemoveMachineInstrFromMaps(MI);
2199 vrm.RemoveMachineInstrFromMaps(MI);
2200 MI->eraseFromParent();
2202 // This must be an use of an implicit_def so it's not part of the live
2203 // interval. Create a new empty live interval for it.
2204 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
2205 unsigned NewVReg = mri_->createVirtualRegister(rc);
2207 vrm.setIsImplicitlyDefined(NewVReg);
2208 NewLIs.push_back(&getOrCreateInterval(NewVReg));
2209 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2210 MachineOperand &MO = MI->getOperand(i);
2211 if (MO.isReg() && MO.getReg() == li.reg) {
2220 std::vector<LiveInterval*> LiveIntervals::
2221 addIntervalsForSpillsFast(const LiveInterval &li,
2222 const MachineLoopInfo *loopInfo,
2224 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
2226 std::vector<LiveInterval*> added;
2228 assert(li.weight != HUGE_VALF &&
2229 "attempt to spill already spilled interval!");
2232 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2237 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2239 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
2240 while (RI != mri_->reg_end()) {
2241 MachineInstr* MI = &*RI;
2243 SmallVector<unsigned, 2> Indices;
2244 bool HasUse = false;
2245 bool HasDef = false;
2247 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2248 MachineOperand& mop = MI->getOperand(i);
2249 if (!mop.isReg() || mop.getReg() != li.reg) continue;
2251 HasUse |= MI->getOperand(i).isUse();
2252 HasDef |= MI->getOperand(i).isDef();
2254 Indices.push_back(i);
2257 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2258 Indices, true, slot, li.reg)) {
2259 unsigned NewVReg = mri_->createVirtualRegister(rc);
2261 vrm.assignVirt2StackSlot(NewVReg, slot);
2263 // create a new register for this spill
2264 LiveInterval &nI = getOrCreateInterval(NewVReg);
2266 // the spill weight is now infinity as it
2267 // cannot be spilled again
2268 nI.weight = HUGE_VALF;
2270 // Rewrite register operands to use the new vreg.
2271 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2272 E = Indices.end(); I != E; ++I) {
2273 MI->getOperand(*I).setReg(NewVReg);
2275 if (MI->getOperand(*I).isUse())
2276 MI->getOperand(*I).setIsKill(true);
2279 // Fill in the new live interval.
2280 MachineInstrIndex index = getInstructionIndex(MI);
2282 LiveRange LR(getLoadIndex(index), getUseIndex(index),
2283 nI.getNextValue(MachineInstrIndex(), 0, false,
2284 getVNInfoAllocator()));
2285 DEBUG(errs() << " +" << LR);
2287 vrm.addRestorePoint(NewVReg, MI);
2290 LiveRange LR(getDefIndex(index), getStoreIndex(index),
2291 nI.getNextValue(MachineInstrIndex(), 0, false,
2292 getVNInfoAllocator()));
2293 DEBUG(errs() << " +" << LR);
2295 vrm.addSpillPoint(NewVReg, true, MI);
2298 added.push_back(&nI);
2301 errs() << "\t\t\t\tadded new interval: ";
2308 RI = mri_->reg_begin(li.reg);
2314 std::vector<LiveInterval*> LiveIntervals::
2315 addIntervalsForSpills(const LiveInterval &li,
2316 SmallVectorImpl<LiveInterval*> &SpillIs,
2317 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2319 if (EnableFastSpilling)
2320 return addIntervalsForSpillsFast(li, loopInfo, vrm);
2322 assert(li.weight != HUGE_VALF &&
2323 "attempt to spill already spilled interval!");
2326 errs() << "\t\t\t\tadding intervals for spills for interval: ";
2327 li.print(errs(), tri_);
2331 // Each bit specify whether a spill is required in the MBB.
2332 BitVector SpillMBBs(mf_->getNumBlockIDs());
2333 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2334 BitVector RestoreMBBs(mf_->getNumBlockIDs());
2335 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2336 DenseMap<unsigned,unsigned> MBBVRegsMap;
2337 std::vector<LiveInterval*> NewLIs;
2338 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2340 unsigned NumValNums = li.getNumValNums();
2341 SmallVector<MachineInstr*, 4> ReMatDefs;
2342 ReMatDefs.resize(NumValNums, NULL);
2343 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2344 ReMatOrigDefs.resize(NumValNums, NULL);
2345 SmallVector<int, 4> ReMatIds;
2346 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2347 BitVector ReMatDelete(NumValNums);
2348 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2350 // Spilling a split live interval. It cannot be split any further. Also,
2351 // it's also guaranteed to be a single val# / range interval.
2352 if (vrm.getPreSplitReg(li.reg)) {
2353 vrm.setIsSplitFromReg(li.reg, 0);
2354 // Unset the split kill marker on the last use.
2355 MachineInstrIndex KillIdx = vrm.getKillPoint(li.reg);
2356 if (KillIdx != MachineInstrIndex()) {
2357 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2358 assert(KillMI && "Last use disappeared?");
2359 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2360 assert(KillOp != -1 && "Last use disappeared?");
2361 KillMI->getOperand(KillOp).setIsKill(false);
2363 vrm.removeKillPoint(li.reg);
2364 bool DefIsReMat = vrm.isReMaterialized(li.reg);
2365 Slot = vrm.getStackSlot(li.reg);
2366 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2367 MachineInstr *ReMatDefMI = DefIsReMat ?
2368 vrm.getReMaterializedMI(li.reg) : NULL;
2370 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2371 bool isLoad = isLoadSS ||
2372 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2373 bool IsFirstRange = true;
2374 for (LiveInterval::Ranges::const_iterator
2375 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2376 // If this is a split live interval with multiple ranges, it means there
2377 // are two-address instructions that re-defined the value. Only the
2378 // first def can be rematerialized!
2380 // Note ReMatOrigDefMI has already been deleted.
2381 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2382 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2383 false, vrm, rc, ReMatIds, loopInfo,
2384 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2385 MBBVRegsMap, NewLIs);
2387 rewriteInstructionsForSpills(li, false, I, NULL, 0,
2388 Slot, 0, false, false, false,
2389 false, vrm, rc, ReMatIds, loopInfo,
2390 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2391 MBBVRegsMap, NewLIs);
2393 IsFirstRange = false;
2396 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2400 bool TrySplit = !intervalIsInOneMBB(li);
2403 bool NeedStackSlot = false;
2404 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2406 const VNInfo *VNI = *i;
2407 unsigned VN = VNI->id;
2408 if (VNI->isUnused())
2409 continue; // Dead val#.
2410 // Is the def for the val# rematerializable?
2411 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2412 ? getInstructionFromIndex(VNI->def) : 0;
2414 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2415 // Remember how to remat the def of this val#.
2416 ReMatOrigDefs[VN] = ReMatDefMI;
2417 // Original def may be modified so we have to make a copy here.
2418 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2419 CloneMIs.push_back(Clone);
2420 ReMatDefs[VN] = Clone;
2422 bool CanDelete = true;
2423 if (VNI->hasPHIKill()) {
2424 // A kill is a phi node, not all of its uses can be rematerialized.
2425 // It must not be deleted.
2427 // Need a stack slot if there is any live range where uses cannot be
2429 NeedStackSlot = true;
2432 ReMatDelete.set(VN);
2434 // Need a stack slot if there is any live range where uses cannot be
2436 NeedStackSlot = true;
2440 // One stack slot per live interval.
2441 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2442 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2443 Slot = vrm.assignVirt2StackSlot(li.reg);
2445 // This case only occurs when the prealloc splitter has already assigned
2446 // a stack slot to this vreg.
2448 Slot = vrm.getStackSlot(li.reg);
2451 // Create new intervals and rewrite defs and uses.
2452 for (LiveInterval::Ranges::const_iterator
2453 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2454 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2455 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2456 bool DefIsReMat = ReMatDefMI != NULL;
2457 bool CanDelete = ReMatDelete[I->valno->id];
2459 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2460 bool isLoad = isLoadSS ||
2461 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2462 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2463 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2464 CanDelete, vrm, rc, ReMatIds, loopInfo,
2465 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2466 MBBVRegsMap, NewLIs);
2469 // Insert spills / restores if we are splitting.
2471 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2475 SmallPtrSet<LiveInterval*, 4> AddedKill;
2476 SmallVector<unsigned, 2> Ops;
2477 if (NeedStackSlot) {
2478 int Id = SpillMBBs.find_first();
2480 std::vector<SRInfo> &spills = SpillIdxes[Id];
2481 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2482 MachineInstrIndex index = spills[i].index;
2483 unsigned VReg = spills[i].vreg;
2484 LiveInterval &nI = getOrCreateInterval(VReg);
2485 bool isReMat = vrm.isReMaterialized(VReg);
2486 MachineInstr *MI = getInstructionFromIndex(index);
2487 bool CanFold = false;
2488 bool FoundUse = false;
2490 if (spills[i].canFold) {
2492 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2493 MachineOperand &MO = MI->getOperand(j);
2494 if (!MO.isReg() || MO.getReg() != VReg)
2501 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2502 RestoreMBBs, RestoreIdxes))) {
2503 // MI has two-address uses of the same register. If the use
2504 // isn't the first and only use in the BB, then we can't fold
2505 // it. FIXME: Move this to rewriteInstructionsForSpills.
2512 // Fold the store into the def if possible.
2513 bool Folded = false;
2514 if (CanFold && !Ops.empty()) {
2515 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2518 // Also folded uses, do not issue a load.
2519 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2520 nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2522 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2526 // Otherwise tell the spiller to issue a spill.
2528 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2529 bool isKill = LR->end == getStoreIndex(index);
2530 if (!MI->registerDefIsDead(nI.reg))
2531 // No need to spill a dead def.
2532 vrm.addSpillPoint(VReg, isKill, MI);
2534 AddedKill.insert(&nI);
2537 Id = SpillMBBs.find_next(Id);
2541 int Id = RestoreMBBs.find_first();
2543 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2544 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2545 MachineInstrIndex index = restores[i].index;
2546 if (index == MachineInstrIndex())
2548 unsigned VReg = restores[i].vreg;
2549 LiveInterval &nI = getOrCreateInterval(VReg);
2550 bool isReMat = vrm.isReMaterialized(VReg);
2551 MachineInstr *MI = getInstructionFromIndex(index);
2552 bool CanFold = false;
2554 if (restores[i].canFold) {
2556 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2557 MachineOperand &MO = MI->getOperand(j);
2558 if (!MO.isReg() || MO.getReg() != VReg)
2562 // If this restore were to be folded, it would have been folded
2571 // Fold the load into the use if possible.
2572 bool Folded = false;
2573 if (CanFold && !Ops.empty()) {
2575 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2577 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2579 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2580 // If the rematerializable def is a load, also try to fold it.
2581 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2582 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2583 Ops, isLoadSS, LdSlot, VReg);
2585 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2587 // Re-matting an instruction with virtual register use. Add the
2588 // register as an implicit use on the use MI and update the register
2589 // interval's spill weight to HUGE_VALF to prevent it from being
2591 LiveInterval &ImpLi = getInterval(ImpUse);
2592 ImpLi.weight = HUGE_VALF;
2593 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2598 // If folding is not possible / failed, then tell the spiller to issue a
2599 // load / rematerialization for us.
2601 nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
2603 vrm.addRestorePoint(VReg, MI);
2605 Id = RestoreMBBs.find_next(Id);
2608 // Finalize intervals: add kills, finalize spill weights, and filter out
2610 std::vector<LiveInterval*> RetNewLIs;
2611 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2612 LiveInterval *LI = NewLIs[i];
2614 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2615 if (!AddedKill.count(LI)) {
2616 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2617 MachineInstrIndex LastUseIdx = getBaseIndex(LR->end);
2618 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2619 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2620 assert(UseIdx != -1);
2621 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2622 LastUse->getOperand(UseIdx).setIsKill();
2623 vrm.addKillPoint(LI->reg, LastUseIdx);
2626 RetNewLIs.push_back(LI);
2630 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2634 /// hasAllocatableSuperReg - Return true if the specified physical register has
2635 /// any super register that's allocatable.
2636 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2637 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2638 if (allocatableRegs_[*AS] && hasInterval(*AS))
2643 /// getRepresentativeReg - Find the largest super register of the specified
2644 /// physical register.
2645 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2646 // Find the largest super-register that is allocatable.
2647 unsigned BestReg = Reg;
2648 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2649 unsigned SuperReg = *AS;
2650 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2658 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2659 /// specified interval that conflicts with the specified physical register.
2660 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2661 unsigned PhysReg) const {
2662 unsigned NumConflicts = 0;
2663 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2664 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2665 E = mri_->reg_end(); I != E; ++I) {
2666 MachineOperand &O = I.getOperand();
2667 MachineInstr *MI = O.getParent();
2668 MachineInstrIndex Index = getInstructionIndex(MI);
2669 if (pli.liveAt(Index))
2672 return NumConflicts;
2675 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2676 /// around all defs and uses of the specified interval. Return true if it
2677 /// was able to cut its interval.
2678 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2679 unsigned PhysReg, VirtRegMap &vrm) {
2680 unsigned SpillReg = getRepresentativeReg(PhysReg);
2682 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2683 // If there are registers which alias PhysReg, but which are not a
2684 // sub-register of the chosen representative super register. Assert
2685 // since we can't handle it yet.
2686 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2687 tri_->isSuperRegister(*AS, SpillReg));
2690 LiveInterval &pli = getInterval(SpillReg);
2691 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2692 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2693 E = mri_->reg_end(); I != E; ++I) {
2694 MachineOperand &O = I.getOperand();
2695 MachineInstr *MI = O.getParent();
2696 if (SeenMIs.count(MI))
2699 MachineInstrIndex Index = getInstructionIndex(MI);
2700 if (pli.liveAt(Index)) {
2701 vrm.addEmergencySpill(SpillReg, MI);
2702 MachineInstrIndex StartIdx = getLoadIndex(Index);
2703 MachineInstrIndex EndIdx = getNextSlot(getStoreIndex(Index));
2704 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2705 pli.removeRange(StartIdx, EndIdx);
2709 raw_string_ostream Msg(msg);
2710 Msg << "Ran out of registers during register allocation!";
2711 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2712 Msg << "\nPlease check your inline asm statement for invalid "
2713 << "constraints:\n";
2714 MI->print(Msg, tm_);
2716 llvm_report_error(Msg.str());
2718 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2719 if (!hasInterval(*AS))
2721 LiveInterval &spli = getInterval(*AS);
2722 if (spli.liveAt(Index))
2723 spli.removeRange(getLoadIndex(Index), getNextSlot(getStoreIndex(Index)));
2730 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2731 MachineInstr* startInst) {
2732 LiveInterval& Interval = getOrCreateInterval(reg);
2733 VNInfo* VN = Interval.getNextValue(
2734 MachineInstrIndex(getInstructionIndex(startInst), MachineInstrIndex::DEF),
2735 startInst, true, getVNInfoAllocator());
2736 VN->setHasPHIKill(true);
2737 VN->kills.push_back(terminatorGaps[startInst->getParent()]);
2739 MachineInstrIndex(getInstructionIndex(startInst), MachineInstrIndex::DEF),
2740 getNextSlot(getMBBEndIdx(startInst->getParent())), VN);
2741 Interval.addRange(LR);