1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/DepthFirstIterator.h"
40 #include "llvm/ADT/SmallSet.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/ADT/STLExtras.h"
48 // Hidden options for help debugging.
49 static cl::opt<bool> DisableReMat("disable-rematerialization",
50 cl::init(false), cl::Hidden);
52 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
53 cl::init(true), cl::Hidden);
54 static cl::opt<int> SplitLimit("split-limit",
55 cl::init(-1), cl::Hidden);
57 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
59 static cl::opt<bool> EnableFastSpilling("fast-spill",
60 cl::init(false), cl::Hidden);
62 STATISTIC(numIntervals, "Number of original intervals");
63 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
64 STATISTIC(numSplits , "Number of intervals split");
66 char LiveIntervals::ID = 0;
67 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
69 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
70 AU.addRequired<AliasAnalysis>();
71 AU.addPreserved<AliasAnalysis>();
72 AU.addPreserved<LiveVariables>();
73 AU.addRequired<LiveVariables>();
74 AU.addPreservedID(MachineLoopInfoID);
75 AU.addPreservedID(MachineDominatorsID);
78 AU.addPreservedID(PHIEliminationID);
79 AU.addRequiredID(PHIEliminationID);
82 AU.addRequiredID(TwoAddressInstructionPassID);
83 MachineFunctionPass::getAnalysisUsage(AU);
86 void LiveIntervals::releaseMemory() {
87 // Free the live intervals themselves.
88 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
89 E = r2iMap_.end(); I != E; ++I)
97 terminatorGaps.clear();
99 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
100 VNInfoAllocator.Reset();
101 while (!ClonedMIs.empty()) {
102 MachineInstr *MI = ClonedMIs.back();
103 ClonedMIs.pop_back();
104 mf_->DeleteMachineInstr(MI);
108 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
109 /// there is one implicit_def for each use. Add isUndef marker to
110 /// implicit_def defs and their uses.
111 void LiveIntervals::processImplicitDefs() {
112 SmallSet<unsigned, 8> ImpDefRegs;
113 SmallVector<MachineInstr*, 8> ImpDefMIs;
114 MachineBasicBlock *Entry = mf_->begin();
115 SmallPtrSet<MachineBasicBlock*,16> Visited;
116 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
117 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
119 MachineBasicBlock *MBB = *DFI;
120 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
122 MachineInstr *MI = &*I;
124 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
125 unsigned Reg = MI->getOperand(0).getReg();
126 ImpDefRegs.insert(Reg);
127 ImpDefMIs.push_back(MI);
131 bool ChangedToImpDef = false;
132 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
133 MachineOperand& MO = MI->getOperand(i);
134 if (!MO.isReg() || !MO.isUse())
136 unsigned Reg = MO.getReg();
139 if (!ImpDefRegs.count(Reg))
141 // Use is a copy, just turn it into an implicit_def.
142 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
143 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
145 bool isKill = MO.isKill();
146 MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
147 for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
148 MI->RemoveOperand(j);
150 ImpDefRegs.erase(Reg);
151 ChangedToImpDef = true;
156 if (MO.isKill() || MI->isRegTiedToDefOperand(i))
157 ImpDefRegs.erase(Reg);
160 if (ChangedToImpDef) {
161 // Backtrack to process this new implicit_def.
164 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
165 MachineOperand& MO = MI->getOperand(i);
166 if (!MO.isReg() || !MO.isDef())
168 ImpDefRegs.erase(MO.getReg());
173 // Any outstanding liveout implicit_def's?
174 for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
175 MachineInstr *MI = ImpDefMIs[i];
176 unsigned Reg = MI->getOperand(0).getReg();
177 if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
178 !ImpDefRegs.count(Reg)) {
179 // Delete all "local" implicit_def's. That include those which define
180 // physical registers since they cannot be liveout.
181 MI->eraseFromParent();
185 // If there are multiple defs of the same register and at least one
186 // is not an implicit_def, do not insert implicit_def's before the
189 for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
190 DE = mri_->def_end(); DI != DE; ++DI) {
191 if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
199 // The only implicit_def which we want to keep are those that are live
201 MI->eraseFromParent();
203 for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
204 UE = mri_->use_end(); UI != UE; ) {
205 MachineOperand &RMO = UI.getOperand();
206 MachineInstr *RMI = &*UI;
208 MachineBasicBlock *RMBB = RMI->getParent();
212 // Turn a copy use into an implicit_def.
213 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
214 if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
216 RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
217 for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
218 RMI->RemoveOperand(j);
222 const TargetRegisterClass* RC = mri_->getRegClass(Reg);
223 unsigned NewVReg = mri_->createVirtualRegister(RC);
234 void LiveIntervals::computeNumbering() {
235 Index2MiMap OldI2MI = i2miMap_;
236 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
242 terminatorGaps.clear();
246 // Number MachineInstrs and MachineBasicBlocks.
247 // Initialize MBB indexes to a sentinal.
248 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
250 unsigned MIIndex = 0;
251 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
253 unsigned StartIdx = MIIndex;
255 // Insert an empty slot at the beginning of each block.
256 MIIndex += InstrSlots::NUM;
257 i2miMap_.push_back(0);
259 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
262 if (I == MBB->getFirstTerminator()) {
263 // Leave a gap for before terminators, this is where we will point
266 terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
268 "Multiple 'first' terminators encountered during numbering.");
269 inserted = inserted; // Avoid compiler warning if assertions turned off.
270 i2miMap_.push_back(0);
272 MIIndex += InstrSlots::NUM;
275 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
276 assert(inserted && "multiple MachineInstr -> index mappings");
278 i2miMap_.push_back(I);
279 MIIndex += InstrSlots::NUM;
282 // Insert max(1, numdefs) empty slots after every instruction.
283 unsigned Slots = I->getDesc().getNumDefs();
286 MIIndex += InstrSlots::NUM * Slots;
288 i2miMap_.push_back(0);
291 if (MBB->getFirstTerminator() == MBB->end()) {
292 // Leave a gap for before terminators, this is where we will point
295 terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
297 "Multiple 'first' terminators encountered during numbering.");
298 inserted = inserted; // Avoid compiler warning if assertions turned off.
299 i2miMap_.push_back(0);
301 MIIndex += InstrSlots::NUM;
304 // Set the MBB2IdxMap entry for this MBB.
305 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
306 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
309 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
311 if (!OldI2MI.empty())
312 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
313 for (LiveInterval::iterator LI = OI->second->begin(),
314 LE = OI->second->end(); LI != LE; ++LI) {
316 // Remap the start index of the live range to the corresponding new
317 // number, or our best guess at what it _should_ correspond to if the
318 // original instruction has been erased. This is either the following
319 // instruction or its predecessor.
320 unsigned index = LI->start / InstrSlots::NUM;
321 unsigned offset = LI->start % InstrSlots::NUM;
322 if (offset == InstrSlots::LOAD) {
323 std::vector<IdxMBBPair>::const_iterator I =
324 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
325 // Take the pair containing the index
326 std::vector<IdxMBBPair>::const_iterator J =
327 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
329 LI->start = getMBBStartIdx(J->second);
331 LI->start = mi2iMap_[OldI2MI[index]] + offset;
334 // Remap the ending index in the same way that we remapped the start,
335 // except for the final step where we always map to the immediately
336 // following instruction.
337 index = (LI->end - 1) / InstrSlots::NUM;
338 offset = LI->end % InstrSlots::NUM;
339 if (offset == InstrSlots::LOAD) {
340 // VReg dies at end of block.
341 std::vector<IdxMBBPair>::const_iterator I =
342 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
345 LI->end = getMBBEndIdx(I->second) + 1;
347 unsigned idx = index;
348 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
350 if (index != OldI2MI.size())
351 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
353 LI->end = InstrSlots::NUM * i2miMap_.size();
357 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
358 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
361 // Remap the VNInfo def index, which works the same as the
362 // start indices above. VN's with special sentinel defs
363 // don't need to be remapped.
364 if (vni->isDefAccurate() && !vni->isUnused()) {
365 unsigned index = vni->def / InstrSlots::NUM;
366 unsigned offset = vni->def % InstrSlots::NUM;
367 if (offset == InstrSlots::LOAD) {
368 std::vector<IdxMBBPair>::const_iterator I =
369 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
370 // Take the pair containing the index
371 std::vector<IdxMBBPair>::const_iterator J =
372 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
374 vni->def = getMBBStartIdx(J->second);
376 vni->def = mi2iMap_[OldI2MI[index]] + offset;
380 // Remap the VNInfo kill indices, which works the same as
381 // the end indices above.
382 for (size_t i = 0; i < vni->kills.size(); ++i) {
383 unsigned killIdx = vni->kills[i].killIdx;
385 unsigned index = (killIdx - 1) / InstrSlots::NUM;
386 unsigned offset = killIdx % InstrSlots::NUM;
388 if (offset == InstrSlots::LOAD) {
389 assert("Value killed at a load slot.");
390 /*std::vector<IdxMBBPair>::const_iterator I =
391 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
394 vni->kills[i] = getMBBEndIdx(I->second);*/
396 if (vni->kills[i].isPHIKill) {
397 std::vector<IdxMBBPair>::const_iterator I =
398 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), index);
400 vni->kills[i].killIdx = terminatorGaps[I->second];
402 assert(OldI2MI[index] != 0 &&
403 "Kill refers to instruction not present in index maps.");
404 vni->kills[i].killIdx = mi2iMap_[OldI2MI[index]] + offset;
408 unsigned idx = index;
409 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
411 if (index != OldI2MI.size())
412 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
413 (idx == index ? offset : 0);
415 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
423 void LiveIntervals::scaleNumbering(int factor) {
425 // * scale MBB begin and end points
426 // * scale all ranges.
427 // * Update VNI structures.
428 // * Scale instruction numberings
430 // Scale the MBB indices.
432 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
433 MBB != MBBE; ++MBB) {
434 std::pair<unsigned, unsigned> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
435 mbbIndices.first = InstrSlots::scale(mbbIndices.first, factor);
436 mbbIndices.second = InstrSlots::scale(mbbIndices.second, factor);
437 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
439 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
441 // Scale terminator gaps.
442 for (DenseMap<MachineBasicBlock*, unsigned>::iterator
443 TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
445 terminatorGaps[TGI->first] = InstrSlots::scale(TGI->second, factor);
448 // Scale the intervals.
449 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
450 LI->second->scaleNumbering(factor);
453 // Scale MachineInstrs.
454 Mi2IndexMap oldmi2iMap = mi2iMap_;
455 unsigned highestSlot = 0;
456 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
458 unsigned newSlot = InstrSlots::scale(MI->second, factor);
459 mi2iMap_[MI->first] = newSlot;
460 highestSlot = std::max(highestSlot, newSlot);
464 i2miMap_.resize(highestSlot + 1);
465 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
467 i2miMap_[MI->second] = const_cast<MachineInstr *>(MI->first);
473 /// runOnMachineFunction - Register allocate the whole function
475 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
477 mri_ = &mf_->getRegInfo();
478 tm_ = &fn.getTarget();
479 tri_ = tm_->getRegisterInfo();
480 tii_ = tm_->getInstrInfo();
481 aa_ = &getAnalysis<AliasAnalysis>();
482 lv_ = &getAnalysis<LiveVariables>();
483 allocatableRegs_ = tri_->getAllocatableSet(fn);
485 processImplicitDefs();
489 numIntervals += getNumIntervals();
495 /// print - Implement the dump method.
496 void LiveIntervals::print(std::ostream &O, const Module* ) const {
497 O << "********** INTERVALS **********\n";
498 for (const_iterator I = begin(), E = end(); I != E; ++I) {
499 I->second->print(O, tri_);
503 O << "********** MACHINEINSTRS **********\n";
504 mf_->print(O, IntervalPrefixPrinter(*this));
507 /// conflictsWithPhysRegDef - Returns true if the specified register
508 /// is defined during the duration of the specified interval.
509 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
510 VirtRegMap &vrm, unsigned reg) {
511 for (LiveInterval::Ranges::const_iterator
512 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
513 for (unsigned index = getBaseIndex(I->start),
514 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
515 index += InstrSlots::NUM) {
516 // skip deleted instructions
517 while (index != end && !getInstructionFromIndex(index))
518 index += InstrSlots::NUM;
519 if (index == end) break;
521 MachineInstr *MI = getInstructionFromIndex(index);
522 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
523 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
524 if (SrcReg == li.reg || DstReg == li.reg)
526 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
527 MachineOperand& mop = MI->getOperand(i);
530 unsigned PhysReg = mop.getReg();
531 if (PhysReg == 0 || PhysReg == li.reg)
533 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
534 if (!vrm.hasPhys(PhysReg))
536 PhysReg = vrm.getPhys(PhysReg);
538 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
547 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
548 /// it can check use as well.
549 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
550 unsigned Reg, bool CheckUse,
551 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
552 for (LiveInterval::Ranges::const_iterator
553 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
554 for (unsigned index = getBaseIndex(I->start),
555 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
556 index += InstrSlots::NUM) {
557 // Skip deleted instructions.
558 MachineInstr *MI = 0;
559 while (index != end) {
560 MI = getInstructionFromIndex(index);
563 index += InstrSlots::NUM;
565 if (index == end) break;
567 if (JoinedCopies.count(MI))
569 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
570 MachineOperand& MO = MI->getOperand(i);
573 if (MO.isUse() && !CheckUse)
575 unsigned PhysReg = MO.getReg();
576 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
578 if (tri_->isSubRegister(Reg, PhysReg))
588 void LiveIntervals::printRegName(unsigned reg) const {
589 if (TargetRegisterInfo::isPhysicalRegister(reg))
590 cerr << tri_->getName(reg);
592 cerr << "%reg" << reg;
595 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
596 MachineBasicBlock::iterator mi,
597 unsigned MIIdx, MachineOperand& MO,
599 LiveInterval &interval) {
600 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
602 // Virtual registers may be defined multiple times (due to phi
603 // elimination and 2-addr elimination). Much of what we do only has to be
604 // done once for the vreg. We use an empty interval to detect the first
605 // time we see a vreg.
606 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
607 if (interval.empty()) {
608 // Get the Idx of the defining instructions.
609 unsigned defIndex = getDefIndex(MIIdx);
610 // Earlyclobbers move back one.
611 if (MO.isEarlyClobber())
612 defIndex = getUseIndex(MIIdx);
614 MachineInstr *CopyMI = NULL;
615 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
616 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
617 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
618 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
619 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
621 // Earlyclobbers move back one.
622 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
624 assert(ValNo->id == 0 && "First value in interval is not 0?");
626 // Loop over all of the blocks that the vreg is defined in. There are
627 // two cases we have to handle here. The most common case is a vreg
628 // whose lifetime is contained within a basic block. In this case there
629 // will be a single kill, in MBB, which comes after the definition.
630 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
631 // FIXME: what about dead vars?
633 if (vi.Kills[0] != mi)
634 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
636 killIdx = defIndex+1;
638 // If the kill happens after the definition, we have an intra-block
640 if (killIdx > defIndex) {
641 assert(vi.AliveBlocks.empty() &&
642 "Shouldn't be alive across any blocks!");
643 LiveRange LR(defIndex, killIdx, ValNo);
644 interval.addRange(LR);
645 DOUT << " +" << LR << "\n";
646 interval.addKill(ValNo, killIdx, false);
651 // The other case we handle is when a virtual register lives to the end
652 // of the defining block, potentially live across some blocks, then is
653 // live into some number of blocks, but gets killed. Start by adding a
654 // range that goes from this definition to the end of the defining block.
655 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
656 DOUT << " +" << NewLR;
657 interval.addRange(NewLR);
659 // Iterate over all of the blocks that the variable is completely
660 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
662 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
663 E = vi.AliveBlocks.end(); I != E; ++I) {
664 LiveRange LR(getMBBStartIdx(*I),
665 getMBBEndIdx(*I)+1, // MBB ends at -1.
667 interval.addRange(LR);
671 // Finally, this virtual register is live from the start of any killing
672 // block to the 'use' slot of the killing instruction.
673 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
674 MachineInstr *Kill = vi.Kills[i];
675 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
676 LiveRange LR(getMBBStartIdx(Kill->getParent()),
678 interval.addRange(LR);
679 interval.addKill(ValNo, killIdx, false);
684 // If this is the second time we see a virtual register definition, it
685 // must be due to phi elimination or two addr elimination. If this is
686 // the result of two address elimination, then the vreg is one of the
687 // def-and-use register operand.
688 if (mi->isRegTiedToUseOperand(MOIdx)) {
689 // If this is a two-address definition, then we have already processed
690 // the live range. The only problem is that we didn't realize there
691 // are actually two values in the live interval. Because of this we
692 // need to take the LiveRegion that defines this register and split it
694 assert(interval.containsOneValue());
695 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
696 unsigned RedefIndex = getDefIndex(MIIdx);
697 if (MO.isEarlyClobber())
698 RedefIndex = getUseIndex(MIIdx);
700 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
701 VNInfo *OldValNo = OldLR->valno;
703 // Delete the initial value, which should be short and continuous,
704 // because the 2-addr copy must be in the same MBB as the redef.
705 interval.removeRange(DefIndex, RedefIndex);
707 // Two-address vregs should always only be redefined once. This means
708 // that at this point, there should be exactly one value number in it.
709 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
711 // The new value number (#1) is defined by the instruction we claimed
713 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
714 false, // update at *
716 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
718 // Value#0 is now defined by the 2-addr instruction.
719 OldValNo->def = RedefIndex;
721 if (MO.isEarlyClobber())
722 OldValNo->setHasRedefByEC(true);
724 // Add the new live interval which replaces the range for the input copy.
725 LiveRange LR(DefIndex, RedefIndex, ValNo);
726 DOUT << " replace range with " << LR;
727 interval.addRange(LR);
728 interval.addKill(ValNo, RedefIndex, false);
730 // If this redefinition is dead, we need to add a dummy unit live
731 // range covering the def slot.
733 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
736 interval.print(DOUT, tri_);
739 // Otherwise, this must be because of phi elimination. If this is the
740 // first redefinition of the vreg that we have seen, go back and change
741 // the live range in the PHI block to be a different value number.
742 if (interval.containsOneValue()) {
743 assert(vi.Kills.size() == 1 &&
744 "PHI elimination vreg should have one kill, the PHI itself!");
746 // Remove the old range that we now know has an incorrect number.
747 VNInfo *VNI = interval.getValNumInfo(0);
748 MachineInstr *Killer = vi.Kills[0];
749 unsigned Start = getMBBStartIdx(Killer->getParent());
750 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
751 DOUT << " Removing [" << Start << "," << End << "] from: ";
752 interval.print(DOUT, tri_); DOUT << "\n";
753 interval.removeRange(Start, End);
754 assert(interval.ranges.size() == 1 &&
755 "newly discovered PHI interval has >1 ranges.");
756 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endNumber());
757 interval.addKill(VNI, terminatorGaps[killMBB], true);
758 VNI->setHasPHIKill(true);
759 DOUT << " RESULT: "; interval.print(DOUT, tri_);
761 // Replace the interval with one of a NEW value number. Note that this
762 // value number isn't actually defined by an instruction, weird huh? :)
763 LiveRange LR(Start, End,
764 interval.getNextValue(mbb->getNumber(), 0, false, VNInfoAllocator));
765 LR.valno->setIsPHIDef(true);
766 DOUT << " replace range with " << LR;
767 interval.addRange(LR);
768 interval.addKill(LR.valno, End, false);
769 DOUT << " RESULT: "; interval.print(DOUT, tri_);
772 // In the case of PHI elimination, each variable definition is only
773 // live until the end of the block. We've already taken care of the
774 // rest of the live range.
775 unsigned defIndex = getDefIndex(MIIdx);
776 if (MO.isEarlyClobber())
777 defIndex = getUseIndex(MIIdx);
780 MachineInstr *CopyMI = NULL;
781 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
782 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
783 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
784 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
785 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
787 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
789 unsigned killIndex = getMBBEndIdx(mbb) + 1;
790 LiveRange LR(defIndex, killIndex, ValNo);
791 interval.addRange(LR);
792 interval.addKill(ValNo, terminatorGaps[mbb], true);
793 ValNo->setHasPHIKill(true);
801 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
802 MachineBasicBlock::iterator mi,
805 LiveInterval &interval,
806 MachineInstr *CopyMI) {
807 // A physical register cannot be live across basic block, so its
808 // lifetime must end somewhere in its defining basic block.
809 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
811 unsigned baseIndex = MIIdx;
812 unsigned start = getDefIndex(baseIndex);
813 // Earlyclobbers move back one.
814 if (MO.isEarlyClobber())
815 start = getUseIndex(MIIdx);
816 unsigned end = start;
818 // If it is not used after definition, it is considered dead at
819 // the instruction defining it. Hence its interval is:
820 // [defSlot(def), defSlot(def)+1)
827 // If it is not dead on definition, it must be killed by a
828 // subsequent instruction. Hence its interval is:
829 // [defSlot(def), useSlot(kill)+1)
830 baseIndex += InstrSlots::NUM;
831 while (++mi != MBB->end()) {
832 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
833 getInstructionFromIndex(baseIndex) == 0)
834 baseIndex += InstrSlots::NUM;
835 if (mi->killsRegister(interval.reg, tri_)) {
837 end = getUseIndex(baseIndex) + 1;
840 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
842 if (mi->isRegTiedToUseOperand(DefIdx)) {
843 // Two-address instruction.
844 end = getDefIndex(baseIndex);
845 if (mi->getOperand(DefIdx).isEarlyClobber())
846 end = getUseIndex(baseIndex);
848 // Another instruction redefines the register before it is ever read.
849 // Then the register is essentially dead at the instruction that defines
850 // it. Hence its interval is:
851 // [defSlot(def), defSlot(def)+1)
859 baseIndex += InstrSlots::NUM;
862 // The only case we should have a dead physreg here without a killing or
863 // instruction where we know it's dead is if it is live-in to the function
864 // and never used. Another possible case is the implicit use of the
865 // physical register has been deleted by two-address pass.
869 assert(start < end && "did not find end of interval?");
871 // Already exists? Extend old live interval.
872 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
873 bool Extend = OldLR != interval.end();
874 VNInfo *ValNo = Extend
875 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
876 if (MO.isEarlyClobber() && Extend)
877 ValNo->setHasRedefByEC(true);
878 LiveRange LR(start, end, ValNo);
879 interval.addRange(LR);
880 interval.addKill(LR.valno, end, false);
881 DOUT << " +" << LR << '\n';
884 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
885 MachineBasicBlock::iterator MI,
889 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
890 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
891 getOrCreateInterval(MO.getReg()));
892 else if (allocatableRegs_[MO.getReg()]) {
893 MachineInstr *CopyMI = NULL;
894 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
895 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
896 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
897 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
898 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
900 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
901 getOrCreateInterval(MO.getReg()), CopyMI);
902 // Def of a register also defines its sub-registers.
903 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
904 // If MI also modifies the sub-register explicitly, avoid processing it
905 // more than once. Do not pass in TRI here so it checks for exact match.
906 if (!MI->modifiesRegister(*AS))
907 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
908 getOrCreateInterval(*AS), 0);
912 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
914 LiveInterval &interval, bool isAlias) {
915 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
917 // Look for kills, if it reaches a def before it's killed, then it shouldn't
918 // be considered a livein.
919 MachineBasicBlock::iterator mi = MBB->begin();
920 unsigned baseIndex = MIIdx;
921 unsigned start = baseIndex;
922 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
923 getInstructionFromIndex(baseIndex) == 0)
924 baseIndex += InstrSlots::NUM;
925 unsigned end = baseIndex;
926 bool SeenDefUse = false;
928 while (mi != MBB->end()) {
929 if (mi->killsRegister(interval.reg, tri_)) {
931 end = getUseIndex(baseIndex) + 1;
934 } else if (mi->modifiesRegister(interval.reg, tri_)) {
935 // Another instruction redefines the register before it is ever read.
936 // Then the register is essentially dead at the instruction that defines
937 // it. Hence its interval is:
938 // [defSlot(def), defSlot(def)+1)
940 end = getDefIndex(start) + 1;
945 baseIndex += InstrSlots::NUM;
947 if (mi != MBB->end()) {
948 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
949 getInstructionFromIndex(baseIndex) == 0)
950 baseIndex += InstrSlots::NUM;
954 // Live-in register might not be used at all.
958 end = getDefIndex(MIIdx) + 1;
960 DOUT << " live through";
966 interval.getNextValue(MBB->getNumber(), 0, false, VNInfoAllocator);
967 vni->setIsPHIDef(true);
968 LiveRange LR(start, end, vni);
970 interval.addRange(LR);
971 interval.addKill(LR.valno, end, false);
972 DOUT << " +" << LR << '\n';
975 /// computeIntervals - computes the live intervals for virtual
976 /// registers. for some ordering of the machine instructions [1,N] a
977 /// live interval is an interval [i, j) where 1 <= i <= j < N for
978 /// which a variable is live
979 void LiveIntervals::computeIntervals() {
981 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
982 << "********** Function: "
983 << ((Value*)mf_->getFunction())->getName() << '\n';
985 SmallVector<unsigned, 8> UndefUses;
986 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
988 MachineBasicBlock *MBB = MBBI;
989 // Track the index of the current machine instr.
990 unsigned MIIndex = getMBBStartIdx(MBB);
991 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
993 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
995 // Create intervals for live-ins to this BB first.
996 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
997 LE = MBB->livein_end(); LI != LE; ++LI) {
998 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
999 // Multiple live-ins can alias the same register.
1000 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1001 if (!hasInterval(*AS))
1002 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1006 // Skip over empty initial indices.
1007 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1008 getInstructionFromIndex(MIIndex) == 0)
1009 MIIndex += InstrSlots::NUM;
1011 for (; MI != miEnd; ++MI) {
1012 DOUT << MIIndex << "\t" << *MI;
1015 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1016 MachineOperand &MO = MI->getOperand(i);
1017 if (!MO.isReg() || !MO.getReg())
1020 // handle register defs - build intervals
1022 handleRegisterDef(MBB, MI, MIIndex, MO, i);
1023 else if (MO.isUndef())
1024 UndefUses.push_back(MO.getReg());
1027 // Skip over the empty slots after each instruction.
1028 unsigned Slots = MI->getDesc().getNumDefs();
1031 MIIndex += InstrSlots::NUM * Slots;
1033 // Skip over empty indices.
1034 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1035 getInstructionFromIndex(MIIndex) == 0)
1036 MIIndex += InstrSlots::NUM;
1040 // Create empty intervals for registers defined by implicit_def's (except
1041 // for those implicit_def that define values which are liveout of their
1043 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
1044 unsigned UndefReg = UndefUses[i];
1045 (void)getOrCreateInterval(UndefReg);
1049 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
1050 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1051 std::vector<IdxMBBPair>::const_iterator I =
1052 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1054 bool ResVal = false;
1055 while (I != Idx2MBBMap.end()) {
1056 if (I->first >= End)
1058 MBBs.push_back(I->second);
1065 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
1066 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1067 std::vector<IdxMBBPair>::const_iterator I =
1068 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1070 bool ResVal = false;
1071 while (I != Idx2MBBMap.end()) {
1074 MachineBasicBlock *MBB = I->second;
1075 if (getMBBEndIdx(MBB) > End)
1077 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1078 SE = MBB->succ_end(); SI != SE; ++SI)
1079 MBBs.push_back(*SI);
1086 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1087 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1088 return new LiveInterval(reg, Weight);
1091 /// dupInterval - Duplicate a live interval. The caller is responsible for
1092 /// managing the allocated memory.
1093 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1094 LiveInterval *NewLI = createInterval(li->reg);
1095 NewLI->Copy(*li, mri_, getVNInfoAllocator());
1099 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1100 /// copy field and returns the source register that defines it.
1101 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1105 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1106 // If it's extracting out of a physical register, return the sub-register.
1107 unsigned Reg = VNI->copy->getOperand(1).getReg();
1108 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1109 Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
1111 } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1112 VNI->copy->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1113 return VNI->copy->getOperand(2).getReg();
1115 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1116 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
1118 llvm_unreachable("Unrecognized copy instruction!");
1122 //===----------------------------------------------------------------------===//
1123 // Register allocator hooks.
1126 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1127 /// allow one) virtual register operand, then its uses are implicitly using
1128 /// the register. Returns the virtual register.
1129 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1130 MachineInstr *MI) const {
1132 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1133 MachineOperand &MO = MI->getOperand(i);
1134 if (!MO.isReg() || !MO.isUse())
1136 unsigned Reg = MO.getReg();
1137 if (Reg == 0 || Reg == li.reg)
1140 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1141 !allocatableRegs_[Reg])
1143 // FIXME: For now, only remat MI with at most one register operand.
1145 "Can't rematerialize instruction with multiple register operand!");
1146 RegOp = MO.getReg();
1154 /// isValNoAvailableAt - Return true if the val# of the specified interval
1155 /// which reaches the given instruction also reaches the specified use index.
1156 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1157 unsigned UseIdx) const {
1158 unsigned Index = getInstructionIndex(MI);
1159 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1160 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1161 return UI != li.end() && UI->valno == ValNo;
1164 /// isReMaterializable - Returns true if the definition MI of the specified
1165 /// val# of the specified interval is re-materializable.
1166 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1167 const VNInfo *ValNo, MachineInstr *MI,
1168 SmallVectorImpl<LiveInterval*> &SpillIs,
1173 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1177 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1178 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1179 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1180 // this but remember this is not safe to fold into a two-address
1182 // This is a load from fixed stack slot. It can be rematerialized.
1185 // If the target-specific rules don't identify an instruction as
1186 // being trivially rematerializable, use some target-independent
1188 if (!MI->getDesc().isRematerializable() ||
1189 !tii_->isTriviallyReMaterializable(MI)) {
1190 if (!EnableAggressiveRemat)
1193 // If the instruction accesses memory but the memoperands have been lost,
1194 // we can't analyze it.
1195 const TargetInstrDesc &TID = MI->getDesc();
1196 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1199 // Avoid instructions obviously unsafe for remat.
1200 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1203 // If the instruction accesses memory and the memory could be non-constant,
1204 // assume the instruction is not rematerializable.
1205 for (std::list<MachineMemOperand>::const_iterator
1206 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1207 const MachineMemOperand &MMO = *I;
1208 if (MMO.isVolatile() || MMO.isStore())
1210 const Value *V = MMO.getValue();
1213 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1214 if (!PSV->isConstant(mf_->getFrameInfo()))
1216 } else if (!aa_->pointsToConstantMemory(V))
1220 // If any of the registers accessed are non-constant, conservatively assume
1221 // the instruction is not rematerializable.
1222 unsigned ImpUse = 0;
1223 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1224 const MachineOperand &MO = MI->getOperand(i);
1226 unsigned Reg = MO.getReg();
1229 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1232 // Only allow one def, and that in the first operand.
1233 if (MO.isDef() != (i == 0))
1236 // Only allow constant-valued registers.
1237 bool IsLiveIn = mri_->isLiveIn(Reg);
1238 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1239 E = mri_->def_end();
1241 // For the def, it should be the only def of that register.
1242 if (MO.isDef() && (next(I) != E || IsLiveIn))
1246 // Only allow one use other register use, as that's all the
1247 // remat mechanisms support currently.
1248 if (Reg != li.reg) {
1251 else if (Reg != ImpUse)
1254 // For the use, there should be only one associated def.
1255 if (I != E && (next(I) != E || IsLiveIn))
1262 unsigned ImpUse = getReMatImplicitUse(li, MI);
1264 const LiveInterval &ImpLi = getInterval(ImpUse);
1265 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1266 re = mri_->use_end(); ri != re; ++ri) {
1267 MachineInstr *UseMI = &*ri;
1268 unsigned UseIdx = getInstructionIndex(UseMI);
1269 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1271 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1275 // If a register operand of the re-materialized instruction is going to
1276 // be spilled next, then it's not legal to re-materialize this instruction.
1277 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1278 if (ImpUse == SpillIs[i]->reg)
1284 /// isReMaterializable - Returns true if the definition MI of the specified
1285 /// val# of the specified interval is re-materializable.
1286 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1287 const VNInfo *ValNo, MachineInstr *MI) {
1288 SmallVector<LiveInterval*, 4> Dummy1;
1290 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1293 /// isReMaterializable - Returns true if every definition of MI of every
1294 /// val# of the specified interval is re-materializable.
1295 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1296 SmallVectorImpl<LiveInterval*> &SpillIs,
1299 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1301 const VNInfo *VNI = *i;
1302 if (VNI->isUnused())
1303 continue; // Dead val#.
1304 // Is the def for the val# rematerializable?
1305 if (!VNI->isDefAccurate())
1307 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1308 bool DefIsLoad = false;
1310 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1312 isLoad |= DefIsLoad;
1317 /// FilterFoldedOps - Filter out two-address use operands. Return
1318 /// true if it finds any issue with the operands that ought to prevent
1320 static bool FilterFoldedOps(MachineInstr *MI,
1321 SmallVector<unsigned, 2> &Ops,
1323 SmallVector<unsigned, 2> &FoldOps) {
1325 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1326 unsigned OpIdx = Ops[i];
1327 MachineOperand &MO = MI->getOperand(OpIdx);
1328 // FIXME: fold subreg use.
1332 MRInfo |= (unsigned)VirtRegMap::isMod;
1334 // Filter out two-address use operand(s).
1335 if (MI->isRegTiedToDefOperand(OpIdx)) {
1336 MRInfo = VirtRegMap::isModRef;
1339 MRInfo |= (unsigned)VirtRegMap::isRef;
1341 FoldOps.push_back(OpIdx);
1347 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1348 /// slot / to reg or any rematerialized load into ith operand of specified
1349 /// MI. If it is successul, MI is updated with the newly created MI and
1351 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1352 VirtRegMap &vrm, MachineInstr *DefMI,
1354 SmallVector<unsigned, 2> &Ops,
1355 bool isSS, int Slot, unsigned Reg) {
1356 // If it is an implicit def instruction, just delete it.
1357 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1358 RemoveMachineInstrFromMaps(MI);
1359 vrm.RemoveMachineInstrFromMaps(MI);
1360 MI->eraseFromParent();
1365 // Filter the list of operand indexes that are to be folded. Abort if
1366 // any operand will prevent folding.
1367 unsigned MRInfo = 0;
1368 SmallVector<unsigned, 2> FoldOps;
1369 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1372 // The only time it's safe to fold into a two address instruction is when
1373 // it's folding reload and spill from / into a spill stack slot.
1374 if (DefMI && (MRInfo & VirtRegMap::isMod))
1377 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1378 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1380 // Remember this instruction uses the spill slot.
1381 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1383 // Attempt to fold the memory reference into the instruction. If
1384 // we can do this, we don't need to insert spill code.
1385 MachineBasicBlock &MBB = *MI->getParent();
1386 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1387 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1388 vrm.transferSpillPts(MI, fmi);
1389 vrm.transferRestorePts(MI, fmi);
1390 vrm.transferEmergencySpills(MI, fmi);
1392 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1393 mi2iMap_[fmi] = InstrIdx;
1394 MI = MBB.insert(MBB.erase(MI), fmi);
1401 /// canFoldMemoryOperand - Returns true if the specified load / store
1402 /// folding is possible.
1403 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1404 SmallVector<unsigned, 2> &Ops,
1406 // Filter the list of operand indexes that are to be folded. Abort if
1407 // any operand will prevent folding.
1408 unsigned MRInfo = 0;
1409 SmallVector<unsigned, 2> FoldOps;
1410 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1413 // It's only legal to remat for a use, not a def.
1414 if (ReMat && (MRInfo & VirtRegMap::isMod))
1417 return tii_->canFoldMemoryOperand(MI, FoldOps);
1420 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1421 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1422 for (LiveInterval::Ranges::const_iterator
1423 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1424 std::vector<IdxMBBPair>::const_iterator II =
1425 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1426 if (II == Idx2MBBMap.end())
1428 if (I->end > II->first) // crossing a MBB.
1430 MBBs.insert(II->second);
1431 if (MBBs.size() > 1)
1437 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1438 /// interval on to-be re-materialized operands of MI) with new register.
1439 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1440 MachineInstr *MI, unsigned NewVReg,
1442 // There is an implicit use. That means one of the other operand is
1443 // being remat'ed and the remat'ed instruction has li.reg as an
1444 // use operand. Make sure we rewrite that as well.
1445 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1446 MachineOperand &MO = MI->getOperand(i);
1449 unsigned Reg = MO.getReg();
1450 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1452 if (!vrm.isReMaterialized(Reg))
1454 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1455 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1457 UseMO->setReg(NewVReg);
1461 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1462 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1463 bool LiveIntervals::
1464 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1465 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1466 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1467 unsigned Slot, int LdSlot,
1468 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1470 const TargetRegisterClass* rc,
1471 SmallVector<int, 4> &ReMatIds,
1472 const MachineLoopInfo *loopInfo,
1473 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1474 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1475 std::vector<LiveInterval*> &NewLIs) {
1476 bool CanFold = false;
1478 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1479 MachineOperand& mop = MI->getOperand(i);
1482 unsigned Reg = mop.getReg();
1483 unsigned RegI = Reg;
1484 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1489 bool TryFold = !DefIsReMat;
1490 bool FoldSS = true; // Default behavior unless it's a remat.
1491 int FoldSlot = Slot;
1493 // If this is the rematerializable definition MI itself and
1494 // all of its uses are rematerialized, simply delete it.
1495 if (MI == ReMatOrigDefMI && CanDelete) {
1496 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1498 RemoveMachineInstrFromMaps(MI);
1499 vrm.RemoveMachineInstrFromMaps(MI);
1500 MI->eraseFromParent();
1504 // If def for this use can't be rematerialized, then try folding.
1505 // If def is rematerializable and it's a load, also try folding.
1506 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1508 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1514 // Scan all of the operands of this instruction rewriting operands
1515 // to use NewVReg instead of li.reg as appropriate. We do this for
1518 // 1. If the instr reads the same spilled vreg multiple times, we
1519 // want to reuse the NewVReg.
1520 // 2. If the instr is a two-addr instruction, we are required to
1521 // keep the src/dst regs pinned.
1523 // Keep track of whether we replace a use and/or def so that we can
1524 // create the spill interval with the appropriate range.
1526 HasUse = mop.isUse();
1527 HasDef = mop.isDef();
1528 SmallVector<unsigned, 2> Ops;
1530 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1531 const MachineOperand &MOj = MI->getOperand(j);
1534 unsigned RegJ = MOj.getReg();
1535 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1539 if (!MOj.isUndef()) {
1540 HasUse |= MOj.isUse();
1541 HasDef |= MOj.isDef();
1546 // Create a new virtual register for the spill interval.
1547 // Create the new register now so we can map the fold instruction
1548 // to the new register so when it is unfolded we get the correct
1550 bool CreatedNewVReg = false;
1552 NewVReg = mri_->createVirtualRegister(rc);
1554 CreatedNewVReg = true;
1560 // Do not fold load / store here if we are splitting. We'll find an
1561 // optimal point to insert a load / store later.
1563 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1564 Ops, FoldSS, FoldSlot, NewVReg)) {
1565 // Folding the load/store can completely change the instruction in
1566 // unpredictable ways, rescan it from the beginning.
1569 // We need to give the new vreg the same stack slot as the
1570 // spilled interval.
1571 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1577 if (isNotInMIMap(MI))
1579 goto RestartInstruction;
1582 // We'll try to fold it later if it's profitable.
1583 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1587 mop.setReg(NewVReg);
1588 if (mop.isImplicit())
1589 rewriteImplicitOps(li, MI, NewVReg, vrm);
1591 // Reuse NewVReg for other reads.
1592 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1593 MachineOperand &mopj = MI->getOperand(Ops[j]);
1594 mopj.setReg(NewVReg);
1595 if (mopj.isImplicit())
1596 rewriteImplicitOps(li, MI, NewVReg, vrm);
1599 if (CreatedNewVReg) {
1601 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1602 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1603 // Each valnum may have its own remat id.
1604 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1606 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1608 if (!CanDelete || (HasUse && HasDef)) {
1609 // If this is a two-addr instruction then its use operands are
1610 // rematerializable but its def is not. It should be assigned a
1612 vrm.assignVirt2StackSlot(NewVReg, Slot);
1615 vrm.assignVirt2StackSlot(NewVReg, Slot);
1617 } else if (HasUse && HasDef &&
1618 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1619 // If this interval hasn't been assigned a stack slot (because earlier
1620 // def is a deleted remat def), do it now.
1621 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1622 vrm.assignVirt2StackSlot(NewVReg, Slot);
1625 // Re-matting an instruction with virtual register use. Add the
1626 // register as an implicit use on the use MI.
1627 if (DefIsReMat && ImpUse)
1628 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1630 // Create a new register interval for this spill / remat.
1631 LiveInterval &nI = getOrCreateInterval(NewVReg);
1632 if (CreatedNewVReg) {
1633 NewLIs.push_back(&nI);
1634 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1636 vrm.setIsSplitFromReg(NewVReg, li.reg);
1640 if (CreatedNewVReg) {
1641 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1642 nI.getNextValue(0, 0, false, VNInfoAllocator));
1646 // Extend the split live interval to this def / use.
1647 unsigned End = getUseIndex(index)+1;
1648 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1649 nI.getValNumInfo(nI.getNumValNums()-1));
1655 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1656 nI.getNextValue(0, 0, false, VNInfoAllocator));
1661 DOUT << "\t\t\t\tAdded new interval: ";
1662 nI.print(DOUT, tri_);
1667 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1669 MachineBasicBlock *MBB, unsigned Idx) const {
1670 unsigned End = getMBBEndIdx(MBB);
1671 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1672 if (VNI->kills[j].isPHIKill)
1675 unsigned KillIdx = VNI->kills[j].killIdx;
1676 if (KillIdx > Idx && KillIdx < End)
1682 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1683 /// during spilling.
1685 struct RewriteInfo {
1690 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1691 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1694 struct RewriteInfoCompare {
1695 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1696 return LHS.Index < RHS.Index;
1701 void LiveIntervals::
1702 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1703 LiveInterval::Ranges::const_iterator &I,
1704 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1705 unsigned Slot, int LdSlot,
1706 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1708 const TargetRegisterClass* rc,
1709 SmallVector<int, 4> &ReMatIds,
1710 const MachineLoopInfo *loopInfo,
1711 BitVector &SpillMBBs,
1712 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1713 BitVector &RestoreMBBs,
1714 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1715 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1716 std::vector<LiveInterval*> &NewLIs) {
1717 bool AllCanFold = true;
1718 unsigned NewVReg = 0;
1719 unsigned start = getBaseIndex(I->start);
1720 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1722 // First collect all the def / use in this live range that will be rewritten.
1723 // Make sure they are sorted according to instruction index.
1724 std::vector<RewriteInfo> RewriteMIs;
1725 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1726 re = mri_->reg_end(); ri != re; ) {
1727 MachineInstr *MI = &*ri;
1728 MachineOperand &O = ri.getOperand();
1730 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1731 unsigned index = getInstructionIndex(MI);
1732 if (index < start || index >= end)
1736 // Must be defined by an implicit def. It should not be spilled. Note,
1737 // this is for correctness reason. e.g.
1738 // 8 %reg1024<def> = IMPLICIT_DEF
1739 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1740 // The live range [12, 14) are not part of the r1024 live interval since
1741 // it's defined by an implicit def. It will not conflicts with live
1742 // interval of r1025. Now suppose both registers are spilled, you can
1743 // easily see a situation where both registers are reloaded before
1744 // the INSERT_SUBREG and both target registers that would overlap.
1746 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1748 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1750 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1751 // Now rewrite the defs and uses.
1752 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1753 RewriteInfo &rwi = RewriteMIs[i];
1755 unsigned index = rwi.Index;
1756 bool MIHasUse = rwi.HasUse;
1757 bool MIHasDef = rwi.HasDef;
1758 MachineInstr *MI = rwi.MI;
1759 // If MI def and/or use the same register multiple times, then there
1760 // are multiple entries.
1761 unsigned NumUses = MIHasUse;
1762 while (i != e && RewriteMIs[i].MI == MI) {
1763 assert(RewriteMIs[i].Index == index);
1764 bool isUse = RewriteMIs[i].HasUse;
1765 if (isUse) ++NumUses;
1767 MIHasDef |= RewriteMIs[i].HasDef;
1770 MachineBasicBlock *MBB = MI->getParent();
1772 if (ImpUse && MI != ReMatDefMI) {
1773 // Re-matting an instruction with virtual register use. Update the
1774 // register interval's spill weight to HUGE_VALF to prevent it from
1776 LiveInterval &ImpLi = getInterval(ImpUse);
1777 ImpLi.weight = HUGE_VALF;
1780 unsigned MBBId = MBB->getNumber();
1781 unsigned ThisVReg = 0;
1783 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1784 if (NVI != MBBVRegsMap.end()) {
1785 ThisVReg = NVI->second;
1792 // It's better to start a new interval to avoid artifically
1793 // extend the new interval.
1794 if (MIHasDef && !MIHasUse) {
1795 MBBVRegsMap.erase(MBB->getNumber());
1801 bool IsNew = ThisVReg == 0;
1803 // This ends the previous live interval. If all of its def / use
1804 // can be folded, give it a low spill weight.
1805 if (NewVReg && TrySplit && AllCanFold) {
1806 LiveInterval &nI = getOrCreateInterval(NewVReg);
1813 bool HasDef = false;
1814 bool HasUse = false;
1815 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1816 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1817 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1818 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1819 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1820 if (!HasDef && !HasUse)
1823 AllCanFold &= CanFold;
1825 // Update weight of spill interval.
1826 LiveInterval &nI = getOrCreateInterval(NewVReg);
1828 // The spill weight is now infinity as it cannot be spilled again.
1829 nI.weight = HUGE_VALF;
1833 // Keep track of the last def and first use in each MBB.
1835 if (MI != ReMatOrigDefMI || !CanDelete) {
1836 bool HasKill = false;
1838 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1840 // If this is a two-address code, then this index starts a new VNInfo.
1841 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1843 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1845 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1846 SpillIdxes.find(MBBId);
1848 if (SII == SpillIdxes.end()) {
1849 std::vector<SRInfo> S;
1850 S.push_back(SRInfo(index, NewVReg, true));
1851 SpillIdxes.insert(std::make_pair(MBBId, S));
1852 } else if (SII->second.back().vreg != NewVReg) {
1853 SII->second.push_back(SRInfo(index, NewVReg, true));
1854 } else if ((int)index > SII->second.back().index) {
1855 // If there is an earlier def and this is a two-address
1856 // instruction, then it's not possible to fold the store (which
1857 // would also fold the load).
1858 SRInfo &Info = SII->second.back();
1860 Info.canFold = !HasUse;
1862 SpillMBBs.set(MBBId);
1863 } else if (SII != SpillIdxes.end() &&
1864 SII->second.back().vreg == NewVReg &&
1865 (int)index > SII->second.back().index) {
1866 // There is an earlier def that's not killed (must be two-address).
1867 // The spill is no longer needed.
1868 SII->second.pop_back();
1869 if (SII->second.empty()) {
1870 SpillIdxes.erase(MBBId);
1871 SpillMBBs.reset(MBBId);
1878 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1879 SpillIdxes.find(MBBId);
1880 if (SII != SpillIdxes.end() &&
1881 SII->second.back().vreg == NewVReg &&
1882 (int)index > SII->second.back().index)
1883 // Use(s) following the last def, it's not safe to fold the spill.
1884 SII->second.back().canFold = false;
1885 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1886 RestoreIdxes.find(MBBId);
1887 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1888 // If we are splitting live intervals, only fold if it's the first
1889 // use and there isn't another use later in the MBB.
1890 RII->second.back().canFold = false;
1892 // Only need a reload if there isn't an earlier def / use.
1893 if (RII == RestoreIdxes.end()) {
1894 std::vector<SRInfo> Infos;
1895 Infos.push_back(SRInfo(index, NewVReg, true));
1896 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1898 RII->second.push_back(SRInfo(index, NewVReg, true));
1900 RestoreMBBs.set(MBBId);
1904 // Update spill weight.
1905 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1906 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1909 if (NewVReg && TrySplit && AllCanFold) {
1910 // If all of its def / use can be folded, give it a low spill weight.
1911 LiveInterval &nI = getOrCreateInterval(NewVReg);
1916 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1917 BitVector &RestoreMBBs,
1918 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1919 if (!RestoreMBBs[Id])
1921 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1922 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1923 if (Restores[i].index == index &&
1924 Restores[i].vreg == vr &&
1925 Restores[i].canFold)
1930 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1931 BitVector &RestoreMBBs,
1932 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1933 if (!RestoreMBBs[Id])
1935 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1936 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1937 if (Restores[i].index == index && Restores[i].vreg)
1938 Restores[i].index = -1;
1941 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1942 /// spilled and create empty intervals for their uses.
1944 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1945 const TargetRegisterClass* rc,
1946 std::vector<LiveInterval*> &NewLIs) {
1947 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1948 re = mri_->reg_end(); ri != re; ) {
1949 MachineOperand &O = ri.getOperand();
1950 MachineInstr *MI = &*ri;
1953 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1954 "Register def was not rewritten?");
1955 RemoveMachineInstrFromMaps(MI);
1956 vrm.RemoveMachineInstrFromMaps(MI);
1957 MI->eraseFromParent();
1959 // This must be an use of an implicit_def so it's not part of the live
1960 // interval. Create a new empty live interval for it.
1961 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1962 unsigned NewVReg = mri_->createVirtualRegister(rc);
1964 vrm.setIsImplicitlyDefined(NewVReg);
1965 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1966 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1967 MachineOperand &MO = MI->getOperand(i);
1968 if (MO.isReg() && MO.getReg() == li.reg) {
1977 std::vector<LiveInterval*> LiveIntervals::
1978 addIntervalsForSpillsFast(const LiveInterval &li,
1979 const MachineLoopInfo *loopInfo,
1981 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1983 std::vector<LiveInterval*> added;
1985 assert(li.weight != HUGE_VALF &&
1986 "attempt to spill already spilled interval!");
1988 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1992 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1994 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1995 while (RI != mri_->reg_end()) {
1996 MachineInstr* MI = &*RI;
1998 SmallVector<unsigned, 2> Indices;
1999 bool HasUse = false;
2000 bool HasDef = false;
2002 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
2003 MachineOperand& mop = MI->getOperand(i);
2004 if (!mop.isReg() || mop.getReg() != li.reg) continue;
2006 HasUse |= MI->getOperand(i).isUse();
2007 HasDef |= MI->getOperand(i).isDef();
2009 Indices.push_back(i);
2012 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2013 Indices, true, slot, li.reg)) {
2014 unsigned NewVReg = mri_->createVirtualRegister(rc);
2016 vrm.assignVirt2StackSlot(NewVReg, slot);
2018 // create a new register for this spill
2019 LiveInterval &nI = getOrCreateInterval(NewVReg);
2021 // the spill weight is now infinity as it
2022 // cannot be spilled again
2023 nI.weight = HUGE_VALF;
2025 // Rewrite register operands to use the new vreg.
2026 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2027 E = Indices.end(); I != E; ++I) {
2028 MI->getOperand(*I).setReg(NewVReg);
2030 if (MI->getOperand(*I).isUse())
2031 MI->getOperand(*I).setIsKill(true);
2034 // Fill in the new live interval.
2035 unsigned index = getInstructionIndex(MI);
2037 LiveRange LR(getLoadIndex(index), getUseIndex(index),
2038 nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2041 vrm.addRestorePoint(NewVReg, MI);
2044 LiveRange LR(getDefIndex(index), getStoreIndex(index),
2045 nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2048 vrm.addSpillPoint(NewVReg, true, MI);
2051 added.push_back(&nI);
2053 DOUT << "\t\t\t\tadded new interval: ";
2059 RI = mri_->reg_begin(li.reg);
2065 std::vector<LiveInterval*> LiveIntervals::
2066 addIntervalsForSpills(const LiveInterval &li,
2067 SmallVectorImpl<LiveInterval*> &SpillIs,
2068 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2070 if (EnableFastSpilling)
2071 return addIntervalsForSpillsFast(li, loopInfo, vrm);
2073 assert(li.weight != HUGE_VALF &&
2074 "attempt to spill already spilled interval!");
2076 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
2077 li.print(DOUT, tri_);
2080 // Each bit specify whether a spill is required in the MBB.
2081 BitVector SpillMBBs(mf_->getNumBlockIDs());
2082 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2083 BitVector RestoreMBBs(mf_->getNumBlockIDs());
2084 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2085 DenseMap<unsigned,unsigned> MBBVRegsMap;
2086 std::vector<LiveInterval*> NewLIs;
2087 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2089 unsigned NumValNums = li.getNumValNums();
2090 SmallVector<MachineInstr*, 4> ReMatDefs;
2091 ReMatDefs.resize(NumValNums, NULL);
2092 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2093 ReMatOrigDefs.resize(NumValNums, NULL);
2094 SmallVector<int, 4> ReMatIds;
2095 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2096 BitVector ReMatDelete(NumValNums);
2097 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2099 // Spilling a split live interval. It cannot be split any further. Also,
2100 // it's also guaranteed to be a single val# / range interval.
2101 if (vrm.getPreSplitReg(li.reg)) {
2102 vrm.setIsSplitFromReg(li.reg, 0);
2103 // Unset the split kill marker on the last use.
2104 unsigned KillIdx = vrm.getKillPoint(li.reg);
2106 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2107 assert(KillMI && "Last use disappeared?");
2108 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2109 assert(KillOp != -1 && "Last use disappeared?");
2110 KillMI->getOperand(KillOp).setIsKill(false);
2112 vrm.removeKillPoint(li.reg);
2113 bool DefIsReMat = vrm.isReMaterialized(li.reg);
2114 Slot = vrm.getStackSlot(li.reg);
2115 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2116 MachineInstr *ReMatDefMI = DefIsReMat ?
2117 vrm.getReMaterializedMI(li.reg) : NULL;
2119 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2120 bool isLoad = isLoadSS ||
2121 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2122 bool IsFirstRange = true;
2123 for (LiveInterval::Ranges::const_iterator
2124 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2125 // If this is a split live interval with multiple ranges, it means there
2126 // are two-address instructions that re-defined the value. Only the
2127 // first def can be rematerialized!
2129 // Note ReMatOrigDefMI has already been deleted.
2130 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2131 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2132 false, vrm, rc, ReMatIds, loopInfo,
2133 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2134 MBBVRegsMap, NewLIs);
2136 rewriteInstructionsForSpills(li, false, I, NULL, 0,
2137 Slot, 0, false, false, false,
2138 false, vrm, rc, ReMatIds, loopInfo,
2139 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2140 MBBVRegsMap, NewLIs);
2142 IsFirstRange = false;
2145 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2149 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
2150 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
2154 bool NeedStackSlot = false;
2155 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2157 const VNInfo *VNI = *i;
2158 unsigned VN = VNI->id;
2159 if (VNI->isUnused())
2160 continue; // Dead val#.
2161 // Is the def for the val# rematerializable?
2162 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2163 ? getInstructionFromIndex(VNI->def) : 0;
2165 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2166 // Remember how to remat the def of this val#.
2167 ReMatOrigDefs[VN] = ReMatDefMI;
2168 // Original def may be modified so we have to make a copy here.
2169 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2170 ClonedMIs.push_back(Clone);
2171 ReMatDefs[VN] = Clone;
2173 bool CanDelete = true;
2174 if (VNI->hasPHIKill()) {
2175 // A kill is a phi node, not all of its uses can be rematerialized.
2176 // It must not be deleted.
2178 // Need a stack slot if there is any live range where uses cannot be
2180 NeedStackSlot = true;
2183 ReMatDelete.set(VN);
2185 // Need a stack slot if there is any live range where uses cannot be
2187 NeedStackSlot = true;
2191 // One stack slot per live interval.
2192 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2193 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2194 Slot = vrm.assignVirt2StackSlot(li.reg);
2196 // This case only occurs when the prealloc splitter has already assigned
2197 // a stack slot to this vreg.
2199 Slot = vrm.getStackSlot(li.reg);
2202 // Create new intervals and rewrite defs and uses.
2203 for (LiveInterval::Ranges::const_iterator
2204 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2205 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2206 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2207 bool DefIsReMat = ReMatDefMI != NULL;
2208 bool CanDelete = ReMatDelete[I->valno->id];
2210 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2211 bool isLoad = isLoadSS ||
2212 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2213 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2214 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2215 CanDelete, vrm, rc, ReMatIds, loopInfo,
2216 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2217 MBBVRegsMap, NewLIs);
2220 // Insert spills / restores if we are splitting.
2222 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2226 SmallPtrSet<LiveInterval*, 4> AddedKill;
2227 SmallVector<unsigned, 2> Ops;
2228 if (NeedStackSlot) {
2229 int Id = SpillMBBs.find_first();
2231 std::vector<SRInfo> &spills = SpillIdxes[Id];
2232 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2233 int index = spills[i].index;
2234 unsigned VReg = spills[i].vreg;
2235 LiveInterval &nI = getOrCreateInterval(VReg);
2236 bool isReMat = vrm.isReMaterialized(VReg);
2237 MachineInstr *MI = getInstructionFromIndex(index);
2238 bool CanFold = false;
2239 bool FoundUse = false;
2241 if (spills[i].canFold) {
2243 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2244 MachineOperand &MO = MI->getOperand(j);
2245 if (!MO.isReg() || MO.getReg() != VReg)
2252 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2253 RestoreMBBs, RestoreIdxes))) {
2254 // MI has two-address uses of the same register. If the use
2255 // isn't the first and only use in the BB, then we can't fold
2256 // it. FIXME: Move this to rewriteInstructionsForSpills.
2263 // Fold the store into the def if possible.
2264 bool Folded = false;
2265 if (CanFold && !Ops.empty()) {
2266 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2269 // Also folded uses, do not issue a load.
2270 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2271 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2273 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2277 // Otherwise tell the spiller to issue a spill.
2279 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2280 bool isKill = LR->end == getStoreIndex(index);
2281 if (!MI->registerDefIsDead(nI.reg))
2282 // No need to spill a dead def.
2283 vrm.addSpillPoint(VReg, isKill, MI);
2285 AddedKill.insert(&nI);
2288 Id = SpillMBBs.find_next(Id);
2292 int Id = RestoreMBBs.find_first();
2294 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2295 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2296 int index = restores[i].index;
2299 unsigned VReg = restores[i].vreg;
2300 LiveInterval &nI = getOrCreateInterval(VReg);
2301 bool isReMat = vrm.isReMaterialized(VReg);
2302 MachineInstr *MI = getInstructionFromIndex(index);
2303 bool CanFold = false;
2305 if (restores[i].canFold) {
2307 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2308 MachineOperand &MO = MI->getOperand(j);
2309 if (!MO.isReg() || MO.getReg() != VReg)
2313 // If this restore were to be folded, it would have been folded
2322 // Fold the load into the use if possible.
2323 bool Folded = false;
2324 if (CanFold && !Ops.empty()) {
2326 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2328 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2330 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2331 // If the rematerializable def is a load, also try to fold it.
2332 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2333 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2334 Ops, isLoadSS, LdSlot, VReg);
2336 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2338 // Re-matting an instruction with virtual register use. Add the
2339 // register as an implicit use on the use MI and update the register
2340 // interval's spill weight to HUGE_VALF to prevent it from being
2342 LiveInterval &ImpLi = getInterval(ImpUse);
2343 ImpLi.weight = HUGE_VALF;
2344 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2349 // If folding is not possible / failed, then tell the spiller to issue a
2350 // load / rematerialization for us.
2352 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2354 vrm.addRestorePoint(VReg, MI);
2356 Id = RestoreMBBs.find_next(Id);
2359 // Finalize intervals: add kills, finalize spill weights, and filter out
2361 std::vector<LiveInterval*> RetNewLIs;
2362 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2363 LiveInterval *LI = NewLIs[i];
2365 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2366 if (!AddedKill.count(LI)) {
2367 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2368 unsigned LastUseIdx = getBaseIndex(LR->end);
2369 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2370 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2371 assert(UseIdx != -1);
2372 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2373 LastUse->getOperand(UseIdx).setIsKill();
2374 vrm.addKillPoint(LI->reg, LastUseIdx);
2377 RetNewLIs.push_back(LI);
2381 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2385 /// hasAllocatableSuperReg - Return true if the specified physical register has
2386 /// any super register that's allocatable.
2387 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2388 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2389 if (allocatableRegs_[*AS] && hasInterval(*AS))
2394 /// getRepresentativeReg - Find the largest super register of the specified
2395 /// physical register.
2396 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2397 // Find the largest super-register that is allocatable.
2398 unsigned BestReg = Reg;
2399 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2400 unsigned SuperReg = *AS;
2401 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2409 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2410 /// specified interval that conflicts with the specified physical register.
2411 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2412 unsigned PhysReg) const {
2413 unsigned NumConflicts = 0;
2414 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2415 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2416 E = mri_->reg_end(); I != E; ++I) {
2417 MachineOperand &O = I.getOperand();
2418 MachineInstr *MI = O.getParent();
2419 unsigned Index = getInstructionIndex(MI);
2420 if (pli.liveAt(Index))
2423 return NumConflicts;
2426 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2427 /// around all defs and uses of the specified interval. Return true if it
2428 /// was able to cut its interval.
2429 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2430 unsigned PhysReg, VirtRegMap &vrm) {
2431 unsigned SpillReg = getRepresentativeReg(PhysReg);
2433 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2434 // If there are registers which alias PhysReg, but which are not a
2435 // sub-register of the chosen representative super register. Assert
2436 // since we can't handle it yet.
2437 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2438 tri_->isSuperRegister(*AS, SpillReg));
2441 LiveInterval &pli = getInterval(SpillReg);
2442 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2443 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2444 E = mri_->reg_end(); I != E; ++I) {
2445 MachineOperand &O = I.getOperand();
2446 MachineInstr *MI = O.getParent();
2447 if (SeenMIs.count(MI))
2450 unsigned Index = getInstructionIndex(MI);
2451 if (pli.liveAt(Index)) {
2452 vrm.addEmergencySpill(SpillReg, MI);
2453 unsigned StartIdx = getLoadIndex(Index);
2454 unsigned EndIdx = getStoreIndex(Index)+1;
2455 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2456 pli.removeRange(StartIdx, EndIdx);
2460 raw_string_ostream Msg(msg);
2461 Msg << "Ran out of registers during register allocation!";
2462 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2463 Msg << "\nPlease check your inline asm statement for invalid "
2464 << "constraints:\n";
2465 MI->print(Msg, tm_);
2467 llvm_report_error(Msg.str());
2469 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2470 if (!hasInterval(*AS))
2472 LiveInterval &spli = getInterval(*AS);
2473 if (spli.liveAt(Index))
2474 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2481 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2482 MachineInstr* startInst) {
2483 LiveInterval& Interval = getOrCreateInterval(reg);
2484 VNInfo* VN = Interval.getNextValue(
2485 getInstructionIndex(startInst) + InstrSlots::DEF,
2486 startInst, true, getVNInfoAllocator());
2487 VN->setHasPHIKill(true);
2488 VN->kills.push_back(
2489 VNInfo::KillInfo(terminatorGaps[startInst->getParent()], true));
2490 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2491 getMBBEndIdx(startInst->getParent()) + 1, VN);
2492 Interval.addRange(LR);