1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/DepthFirstIterator.h"
40 #include "llvm/ADT/SmallSet.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/ADT/STLExtras.h"
48 // Hidden options for help debugging.
49 static cl::opt<bool> DisableReMat("disable-rematerialization",
50 cl::init(false), cl::Hidden);
52 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
53 cl::init(true), cl::Hidden);
54 static cl::opt<int> SplitLimit("split-limit",
55 cl::init(-1), cl::Hidden);
57 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
59 static cl::opt<bool> EnableFastSpilling("fast-spill",
60 cl::init(false), cl::Hidden);
62 STATISTIC(numIntervals, "Number of original intervals");
63 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
64 STATISTIC(numSplits , "Number of intervals split");
66 char LiveIntervals::ID = 0;
67 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
69 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
70 AU.addRequired<AliasAnalysis>();
71 AU.addPreserved<AliasAnalysis>();
72 AU.addPreserved<LiveVariables>();
73 AU.addRequired<LiveVariables>();
74 AU.addPreservedID(MachineLoopInfoID);
75 AU.addPreservedID(MachineDominatorsID);
78 AU.addPreservedID(PHIEliminationID);
79 AU.addRequiredID(PHIEliminationID);
82 AU.addRequiredID(TwoAddressInstructionPassID);
83 MachineFunctionPass::getAnalysisUsage(AU);
86 void LiveIntervals::releaseMemory() {
87 // Free the live intervals themselves.
88 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
89 E = r2iMap_.end(); I != E; ++I)
97 terminatorGaps.clear();
99 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
100 VNInfoAllocator.Reset();
101 while (!ClonedMIs.empty()) {
102 MachineInstr *MI = ClonedMIs.back();
103 ClonedMIs.pop_back();
104 mf_->DeleteMachineInstr(MI);
108 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
109 /// there is one implicit_def for each use. Add isUndef marker to
110 /// implicit_def defs and their uses.
111 void LiveIntervals::processImplicitDefs() {
112 SmallSet<unsigned, 8> ImpDefRegs;
113 SmallVector<MachineInstr*, 8> ImpDefMIs;
114 MachineBasicBlock *Entry = mf_->begin();
115 SmallPtrSet<MachineBasicBlock*,16> Visited;
116 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
117 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
119 MachineBasicBlock *MBB = *DFI;
120 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
122 MachineInstr *MI = &*I;
124 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
125 unsigned Reg = MI->getOperand(0).getReg();
126 MI->getOperand(0).setIsUndef();
127 ImpDefRegs.insert(Reg);
128 ImpDefMIs.push_back(MI);
132 bool ChangedToImpDef = false;
133 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
134 MachineOperand& MO = MI->getOperand(i);
135 if (!MO.isReg() || !MO.isUse())
137 unsigned Reg = MO.getReg();
140 if (!ImpDefRegs.count(Reg))
142 // Use is a copy, just turn it into an implicit_def.
143 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
144 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
146 bool isKill = MO.isKill();
147 MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
148 for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
149 MI->RemoveOperand(j);
151 ImpDefRegs.erase(Reg);
152 ChangedToImpDef = true;
157 if (MO.isKill() || MI->isRegTiedToDefOperand(i))
158 ImpDefRegs.erase(Reg);
161 if (ChangedToImpDef) {
162 // Backtrack to process this new implicit_def.
165 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
166 MachineOperand& MO = MI->getOperand(i);
167 if (!MO.isReg() || !MO.isDef())
169 ImpDefRegs.erase(MO.getReg());
174 // Any outstanding liveout implicit_def's?
175 for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
176 MachineInstr *MI = ImpDefMIs[i];
177 unsigned Reg = MI->getOperand(0).getReg();
178 if (TargetRegisterInfo::isPhysicalRegister(Reg))
179 // Physical registers are not liveout (yet).
181 if (!ImpDefRegs.count(Reg))
184 // If there are multiple defs of the same register and at least one
185 // is not an implicit_def, do not insert implicit_def's before the
188 for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
189 DE = mri_->def_end(); DI != DE; ++DI) {
190 if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
198 for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
199 UE = mri_->use_end(); UI != UE; ) {
200 MachineOperand &RMO = UI.getOperand();
201 MachineInstr *RMI = &*UI;
203 MachineBasicBlock *RMBB = RMI->getParent();
206 const TargetRegisterClass* RC = mri_->getRegClass(Reg);
207 unsigned NewVReg = mri_->createVirtualRegister(RC);
208 MachineInstrBuilder MIB =
209 BuildMI(*RMBB, RMI, RMI->getDebugLoc(),
210 tii_->get(TargetInstrInfo::IMPLICIT_DEF), NewVReg);
211 (*MIB).getOperand(0).setIsUndef();
222 void LiveIntervals::computeNumbering() {
223 Index2MiMap OldI2MI = i2miMap_;
224 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
230 terminatorGaps.clear();
234 // Number MachineInstrs and MachineBasicBlocks.
235 // Initialize MBB indexes to a sentinal.
236 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
238 unsigned MIIndex = 0;
239 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
241 unsigned StartIdx = MIIndex;
243 // Insert an empty slot at the beginning of each block.
244 MIIndex += InstrSlots::NUM;
245 i2miMap_.push_back(0);
247 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
250 if (I == MBB->getFirstTerminator()) {
251 // Leave a gap for before terminators, this is where we will point
254 terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
256 "Multiple 'first' terminators encountered during numbering.");
257 inserted = inserted; // Avoid compiler warning if assertions turned off.
258 i2miMap_.push_back(0);
260 MIIndex += InstrSlots::NUM;
263 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
264 assert(inserted && "multiple MachineInstr -> index mappings");
266 i2miMap_.push_back(I);
267 MIIndex += InstrSlots::NUM;
270 // Insert max(1, numdefs) empty slots after every instruction.
271 unsigned Slots = I->getDesc().getNumDefs();
274 MIIndex += InstrSlots::NUM * Slots;
276 i2miMap_.push_back(0);
279 if (MBB->getFirstTerminator() == MBB->end()) {
280 // Leave a gap for before terminators, this is where we will point
283 terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
285 "Multiple 'first' terminators encountered during numbering.");
286 inserted = inserted; // Avoid compiler warning if assertions turned off.
287 i2miMap_.push_back(0);
289 MIIndex += InstrSlots::NUM;
292 // Set the MBB2IdxMap entry for this MBB.
293 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
294 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
297 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
299 if (!OldI2MI.empty())
300 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
301 for (LiveInterval::iterator LI = OI->second->begin(),
302 LE = OI->second->end(); LI != LE; ++LI) {
304 // Remap the start index of the live range to the corresponding new
305 // number, or our best guess at what it _should_ correspond to if the
306 // original instruction has been erased. This is either the following
307 // instruction or its predecessor.
308 unsigned index = LI->start / InstrSlots::NUM;
309 unsigned offset = LI->start % InstrSlots::NUM;
310 if (offset == InstrSlots::LOAD) {
311 std::vector<IdxMBBPair>::const_iterator I =
312 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
313 // Take the pair containing the index
314 std::vector<IdxMBBPair>::const_iterator J =
315 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
317 LI->start = getMBBStartIdx(J->second);
319 LI->start = mi2iMap_[OldI2MI[index]] + offset;
322 // Remap the ending index in the same way that we remapped the start,
323 // except for the final step where we always map to the immediately
324 // following instruction.
325 index = (LI->end - 1) / InstrSlots::NUM;
326 offset = LI->end % InstrSlots::NUM;
327 if (offset == InstrSlots::LOAD) {
328 // VReg dies at end of block.
329 std::vector<IdxMBBPair>::const_iterator I =
330 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
333 LI->end = getMBBEndIdx(I->second) + 1;
335 unsigned idx = index;
336 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
338 if (index != OldI2MI.size())
339 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
341 LI->end = InstrSlots::NUM * i2miMap_.size();
345 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
346 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
349 // Remap the VNInfo def index, which works the same as the
350 // start indices above. VN's with special sentinel defs
351 // don't need to be remapped.
352 if (vni->isDefAccurate() && !vni->isUnused()) {
353 unsigned index = vni->def / InstrSlots::NUM;
354 unsigned offset = vni->def % InstrSlots::NUM;
355 if (offset == InstrSlots::LOAD) {
356 std::vector<IdxMBBPair>::const_iterator I =
357 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
358 // Take the pair containing the index
359 std::vector<IdxMBBPair>::const_iterator J =
360 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
362 vni->def = getMBBStartIdx(J->second);
364 vni->def = mi2iMap_[OldI2MI[index]] + offset;
368 // Remap the VNInfo kill indices, which works the same as
369 // the end indices above.
370 for (size_t i = 0; i < vni->kills.size(); ++i) {
371 unsigned killIdx = vni->kills[i].killIdx;
373 unsigned index = (killIdx - 1) / InstrSlots::NUM;
374 unsigned offset = killIdx % InstrSlots::NUM;
376 if (offset == InstrSlots::LOAD) {
377 assert("Value killed at a load slot.");
378 /*std::vector<IdxMBBPair>::const_iterator I =
379 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
382 vni->kills[i] = getMBBEndIdx(I->second);*/
384 if (vni->kills[i].isPHIKill) {
385 std::vector<IdxMBBPair>::const_iterator I =
386 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), index);
388 vni->kills[i].killIdx = terminatorGaps[I->second];
390 assert(OldI2MI[index] != 0 &&
391 "Kill refers to instruction not present in index maps.");
392 vni->kills[i].killIdx = mi2iMap_[OldI2MI[index]] + offset;
396 unsigned idx = index;
397 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
399 if (index != OldI2MI.size())
400 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
401 (idx == index ? offset : 0);
403 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
411 void LiveIntervals::scaleNumbering(int factor) {
413 // * scale MBB begin and end points
414 // * scale all ranges.
415 // * Update VNI structures.
416 // * Scale instruction numberings
418 // Scale the MBB indices.
420 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
421 MBB != MBBE; ++MBB) {
422 std::pair<unsigned, unsigned> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
423 mbbIndices.first = InstrSlots::scale(mbbIndices.first, factor);
424 mbbIndices.second = InstrSlots::scale(mbbIndices.second, factor);
425 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
427 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
429 // Scale terminator gaps.
430 for (DenseMap<MachineBasicBlock*, unsigned>::iterator
431 TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
433 terminatorGaps[TGI->first] = InstrSlots::scale(TGI->second, factor);
436 // Scale the intervals.
437 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
438 LI->second->scaleNumbering(factor);
441 // Scale MachineInstrs.
442 Mi2IndexMap oldmi2iMap = mi2iMap_;
443 unsigned highestSlot = 0;
444 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
446 unsigned newSlot = InstrSlots::scale(MI->second, factor);
447 mi2iMap_[MI->first] = newSlot;
448 highestSlot = std::max(highestSlot, newSlot);
452 i2miMap_.resize(highestSlot + 1);
453 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
455 i2miMap_[MI->second] = MI->first;
461 /// runOnMachineFunction - Register allocate the whole function
463 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
465 mri_ = &mf_->getRegInfo();
466 tm_ = &fn.getTarget();
467 tri_ = tm_->getRegisterInfo();
468 tii_ = tm_->getInstrInfo();
469 aa_ = &getAnalysis<AliasAnalysis>();
470 lv_ = &getAnalysis<LiveVariables>();
471 allocatableRegs_ = tri_->getAllocatableSet(fn);
473 processImplicitDefs();
477 numIntervals += getNumIntervals();
483 /// print - Implement the dump method.
484 void LiveIntervals::print(std::ostream &O, const Module* ) const {
485 O << "********** INTERVALS **********\n";
486 for (const_iterator I = begin(), E = end(); I != E; ++I) {
487 I->second->print(O, tri_);
491 O << "********** MACHINEINSTRS **********\n";
492 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
493 mbbi != mbbe; ++mbbi) {
494 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
495 for (MachineBasicBlock::iterator mii = mbbi->begin(),
496 mie = mbbi->end(); mii != mie; ++mii) {
497 O << getInstructionIndex(mii) << '\t' << *mii;
502 /// conflictsWithPhysRegDef - Returns true if the specified register
503 /// is defined during the duration of the specified interval.
504 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
505 VirtRegMap &vrm, unsigned reg) {
506 for (LiveInterval::Ranges::const_iterator
507 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
508 for (unsigned index = getBaseIndex(I->start),
509 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
510 index += InstrSlots::NUM) {
511 // skip deleted instructions
512 while (index != end && !getInstructionFromIndex(index))
513 index += InstrSlots::NUM;
514 if (index == end) break;
516 MachineInstr *MI = getInstructionFromIndex(index);
517 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
518 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
519 if (SrcReg == li.reg || DstReg == li.reg)
521 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
522 MachineOperand& mop = MI->getOperand(i);
525 unsigned PhysReg = mop.getReg();
526 if (PhysReg == 0 || PhysReg == li.reg)
528 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
529 if (!vrm.hasPhys(PhysReg))
531 PhysReg = vrm.getPhys(PhysReg);
533 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
542 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
543 /// it can check use as well.
544 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
545 unsigned Reg, bool CheckUse,
546 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
547 for (LiveInterval::Ranges::const_iterator
548 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
549 for (unsigned index = getBaseIndex(I->start),
550 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
551 index += InstrSlots::NUM) {
552 // Skip deleted instructions.
553 MachineInstr *MI = 0;
554 while (index != end) {
555 MI = getInstructionFromIndex(index);
558 index += InstrSlots::NUM;
560 if (index == end) break;
562 if (JoinedCopies.count(MI))
564 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
565 MachineOperand& MO = MI->getOperand(i);
568 if (MO.isUse() && !CheckUse)
570 unsigned PhysReg = MO.getReg();
571 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
573 if (tri_->isSubRegister(Reg, PhysReg))
583 void LiveIntervals::printRegName(unsigned reg) const {
584 if (TargetRegisterInfo::isPhysicalRegister(reg))
585 cerr << tri_->getName(reg);
587 cerr << "%reg" << reg;
590 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
591 MachineBasicBlock::iterator mi,
592 unsigned MIIdx, MachineOperand& MO,
594 LiveInterval &interval) {
595 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
596 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
598 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
599 DOUT << "is a implicit_def\n";
603 // Virtual registers may be defined multiple times (due to phi
604 // elimination and 2-addr elimination). Much of what we do only has to be
605 // done once for the vreg. We use an empty interval to detect the first
606 // time we see a vreg.
607 if (interval.empty()) {
608 // Get the Idx of the defining instructions.
609 unsigned defIndex = getDefIndex(MIIdx);
610 // Earlyclobbers move back one.
611 if (MO.isEarlyClobber())
612 defIndex = getUseIndex(MIIdx);
614 MachineInstr *CopyMI = NULL;
615 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
616 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
617 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
618 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
619 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
621 // Earlyclobbers move back one.
622 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
624 assert(ValNo->id == 0 && "First value in interval is not 0?");
626 // Loop over all of the blocks that the vreg is defined in. There are
627 // two cases we have to handle here. The most common case is a vreg
628 // whose lifetime is contained within a basic block. In this case there
629 // will be a single kill, in MBB, which comes after the definition.
630 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
631 // FIXME: what about dead vars?
633 if (vi.Kills[0] != mi)
634 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
636 killIdx = defIndex+1;
638 // If the kill happens after the definition, we have an intra-block
640 if (killIdx > defIndex) {
641 assert(vi.AliveBlocks.empty() &&
642 "Shouldn't be alive across any blocks!");
643 LiveRange LR(defIndex, killIdx, ValNo);
644 interval.addRange(LR);
645 DOUT << " +" << LR << "\n";
646 interval.addKill(ValNo, killIdx, false);
651 // The other case we handle is when a virtual register lives to the end
652 // of the defining block, potentially live across some blocks, then is
653 // live into some number of blocks, but gets killed. Start by adding a
654 // range that goes from this definition to the end of the defining block.
655 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
656 DOUT << " +" << NewLR;
657 interval.addRange(NewLR);
659 // Iterate over all of the blocks that the variable is completely
660 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
662 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
663 E = vi.AliveBlocks.end(); I != E; ++I) {
664 LiveRange LR(getMBBStartIdx(*I),
665 getMBBEndIdx(*I)+1, // MBB ends at -1.
667 interval.addRange(LR);
671 // Finally, this virtual register is live from the start of any killing
672 // block to the 'use' slot of the killing instruction.
673 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
674 MachineInstr *Kill = vi.Kills[i];
675 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
676 LiveRange LR(getMBBStartIdx(Kill->getParent()),
678 interval.addRange(LR);
679 interval.addKill(ValNo, killIdx, false);
684 // If this is the second time we see a virtual register definition, it
685 // must be due to phi elimination or two addr elimination. If this is
686 // the result of two address elimination, then the vreg is one of the
687 // def-and-use register operand.
688 if (mi->isRegTiedToUseOperand(MOIdx)) {
689 // If this is a two-address definition, then we have already processed
690 // the live range. The only problem is that we didn't realize there
691 // are actually two values in the live interval. Because of this we
692 // need to take the LiveRegion that defines this register and split it
694 assert(interval.containsOneValue());
695 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
696 unsigned RedefIndex = getDefIndex(MIIdx);
697 if (MO.isEarlyClobber())
698 RedefIndex = getUseIndex(MIIdx);
700 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
701 VNInfo *OldValNo = OldLR->valno;
703 // Delete the initial value, which should be short and continuous,
704 // because the 2-addr copy must be in the same MBB as the redef.
705 interval.removeRange(DefIndex, RedefIndex);
707 // Two-address vregs should always only be redefined once. This means
708 // that at this point, there should be exactly one value number in it.
709 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
711 // The new value number (#1) is defined by the instruction we claimed
713 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
714 false, // update at *
716 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
718 // Value#0 is now defined by the 2-addr instruction.
719 OldValNo->def = RedefIndex;
721 if (MO.isEarlyClobber())
722 OldValNo->setHasRedefByEC(true);
724 // Add the new live interval which replaces the range for the input copy.
725 LiveRange LR(DefIndex, RedefIndex, ValNo);
726 DOUT << " replace range with " << LR;
727 interval.addRange(LR);
728 interval.addKill(ValNo, RedefIndex, false);
730 // If this redefinition is dead, we need to add a dummy unit live
731 // range covering the def slot.
733 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
736 interval.print(DOUT, tri_);
739 // Otherwise, this must be because of phi elimination. If this is the
740 // first redefinition of the vreg that we have seen, go back and change
741 // the live range in the PHI block to be a different value number.
742 if (interval.containsOneValue()) {
743 assert(vi.Kills.size() == 1 &&
744 "PHI elimination vreg should have one kill, the PHI itself!");
746 // Remove the old range that we now know has an incorrect number.
747 VNInfo *VNI = interval.getValNumInfo(0);
748 MachineInstr *Killer = vi.Kills[0];
749 unsigned Start = getMBBStartIdx(Killer->getParent());
750 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
751 DOUT << " Removing [" << Start << "," << End << "] from: ";
752 interval.print(DOUT, tri_); DOUT << "\n";
753 interval.removeRange(Start, End);
754 assert(interval.ranges.size() == 1 &&
755 "newly discovered PHI interval has >1 ranges.");
756 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endNumber());
757 interval.addKill(VNI, terminatorGaps[killMBB], true);
758 VNI->setHasPHIKill(true);
759 DOUT << " RESULT: "; interval.print(DOUT, tri_);
761 // Replace the interval with one of a NEW value number. Note that this
762 // value number isn't actually defined by an instruction, weird huh? :)
763 LiveRange LR(Start, End,
764 interval.getNextValue(mbb->getNumber(), 0, false, VNInfoAllocator));
765 LR.valno->setIsPHIDef(true);
766 DOUT << " replace range with " << LR;
767 interval.addRange(LR);
768 interval.addKill(LR.valno, End, false);
769 DOUT << " RESULT: "; interval.print(DOUT, tri_);
772 // In the case of PHI elimination, each variable definition is only
773 // live until the end of the block. We've already taken care of the
774 // rest of the live range.
775 unsigned defIndex = getDefIndex(MIIdx);
776 if (MO.isEarlyClobber())
777 defIndex = getUseIndex(MIIdx);
780 MachineInstr *CopyMI = NULL;
781 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
782 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
783 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
784 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
785 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
787 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
789 unsigned killIndex = getMBBEndIdx(mbb) + 1;
790 LiveRange LR(defIndex, killIndex, ValNo);
791 interval.addRange(LR);
792 interval.addKill(ValNo, terminatorGaps[mbb], true);
793 ValNo->setHasPHIKill(true);
801 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
802 MachineBasicBlock::iterator mi,
805 LiveInterval &interval,
806 MachineInstr *CopyMI) {
807 // A physical register cannot be live across basic block, so its
808 // lifetime must end somewhere in its defining basic block.
809 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
811 unsigned baseIndex = MIIdx;
812 unsigned start = getDefIndex(baseIndex);
813 // Earlyclobbers move back one.
814 if (MO.isEarlyClobber())
815 start = getUseIndex(MIIdx);
816 unsigned end = start;
818 // If it is not used after definition, it is considered dead at
819 // the instruction defining it. Hence its interval is:
820 // [defSlot(def), defSlot(def)+1)
827 // If it is not dead on definition, it must be killed by a
828 // subsequent instruction. Hence its interval is:
829 // [defSlot(def), useSlot(kill)+1)
830 baseIndex += InstrSlots::NUM;
831 while (++mi != MBB->end()) {
832 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
833 getInstructionFromIndex(baseIndex) == 0)
834 baseIndex += InstrSlots::NUM;
835 if (mi->killsRegister(interval.reg, tri_)) {
837 end = getUseIndex(baseIndex) + 1;
840 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
842 if (mi->isRegTiedToUseOperand(DefIdx)) {
843 // Two-address instruction.
844 end = getDefIndex(baseIndex);
845 if (mi->getOperand(DefIdx).isEarlyClobber())
846 end = getUseIndex(baseIndex);
848 // Another instruction redefines the register before it is ever read.
849 // Then the register is essentially dead at the instruction that defines
850 // it. Hence its interval is:
851 // [defSlot(def), defSlot(def)+1)
859 baseIndex += InstrSlots::NUM;
862 // The only case we should have a dead physreg here without a killing or
863 // instruction where we know it's dead is if it is live-in to the function
864 // and never used. Another possible case is the implicit use of the
865 // physical register has been deleted by two-address pass.
869 assert(start < end && "did not find end of interval?");
871 // Already exists? Extend old live interval.
872 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
873 bool Extend = OldLR != interval.end();
874 VNInfo *ValNo = Extend
875 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
876 if (MO.isEarlyClobber() && Extend)
877 ValNo->setHasRedefByEC(true);
878 LiveRange LR(start, end, ValNo);
879 interval.addRange(LR);
880 interval.addKill(LR.valno, end, false);
881 DOUT << " +" << LR << '\n';
884 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
885 MachineBasicBlock::iterator MI,
889 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
890 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
891 getOrCreateInterval(MO.getReg()));
892 else if (allocatableRegs_[MO.getReg()]) {
893 MachineInstr *CopyMI = NULL;
894 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
895 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
896 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
897 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
898 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
900 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
901 getOrCreateInterval(MO.getReg()), CopyMI);
902 // Def of a register also defines its sub-registers.
903 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
904 // If MI also modifies the sub-register explicitly, avoid processing it
905 // more than once. Do not pass in TRI here so it checks for exact match.
906 if (!MI->modifiesRegister(*AS))
907 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
908 getOrCreateInterval(*AS), 0);
912 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
914 LiveInterval &interval, bool isAlias) {
915 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
917 // Look for kills, if it reaches a def before it's killed, then it shouldn't
918 // be considered a livein.
919 MachineBasicBlock::iterator mi = MBB->begin();
920 unsigned baseIndex = MIIdx;
921 unsigned start = baseIndex;
922 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
923 getInstructionFromIndex(baseIndex) == 0)
924 baseIndex += InstrSlots::NUM;
925 unsigned end = baseIndex;
926 bool SeenDefUse = false;
928 while (mi != MBB->end()) {
929 if (mi->killsRegister(interval.reg, tri_)) {
931 end = getUseIndex(baseIndex) + 1;
934 } else if (mi->modifiesRegister(interval.reg, tri_)) {
935 // Another instruction redefines the register before it is ever read.
936 // Then the register is essentially dead at the instruction that defines
937 // it. Hence its interval is:
938 // [defSlot(def), defSlot(def)+1)
940 end = getDefIndex(start) + 1;
945 baseIndex += InstrSlots::NUM;
947 if (mi != MBB->end()) {
948 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
949 getInstructionFromIndex(baseIndex) == 0)
950 baseIndex += InstrSlots::NUM;
954 // Live-in register might not be used at all.
958 end = getDefIndex(MIIdx) + 1;
960 DOUT << " live through";
966 interval.getNextValue(MBB->getNumber(), 0, false, VNInfoAllocator);
967 vni->setIsPHIDef(true);
968 LiveRange LR(start, end, vni);
970 interval.addRange(LR);
971 interval.addKill(LR.valno, end, false);
972 DOUT << " +" << LR << '\n';
975 /// computeIntervals - computes the live intervals for virtual
976 /// registers. for some ordering of the machine instructions [1,N] a
977 /// live interval is an interval [i, j) where 1 <= i <= j < N for
978 /// which a variable is live
979 void LiveIntervals::computeIntervals() {
981 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
982 << "********** Function: "
983 << ((Value*)mf_->getFunction())->getName() << '\n';
985 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
987 MachineBasicBlock *MBB = MBBI;
988 // Track the index of the current machine instr.
989 unsigned MIIndex = getMBBStartIdx(MBB);
990 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
992 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
994 // Create intervals for live-ins to this BB first.
995 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
996 LE = MBB->livein_end(); LI != LE; ++LI) {
997 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
998 // Multiple live-ins can alias the same register.
999 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
1000 if (!hasInterval(*AS))
1001 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1005 // Skip over empty initial indices.
1006 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1007 getInstructionFromIndex(MIIndex) == 0)
1008 MIIndex += InstrSlots::NUM;
1010 for (; MI != miEnd; ++MI) {
1011 DOUT << MIIndex << "\t" << *MI;
1014 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1015 MachineOperand &MO = MI->getOperand(i);
1016 // handle register defs - build intervals
1017 if (MO.isReg() && MO.getReg() && MO.isDef()) {
1018 handleRegisterDef(MBB, MI, MIIndex, MO, i);
1022 // Skip over the empty slots after each instruction.
1023 unsigned Slots = MI->getDesc().getNumDefs();
1026 MIIndex += InstrSlots::NUM * Slots;
1028 // Skip over empty indices.
1029 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1030 getInstructionFromIndex(MIIndex) == 0)
1031 MIIndex += InstrSlots::NUM;
1036 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
1037 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1038 std::vector<IdxMBBPair>::const_iterator I =
1039 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1041 bool ResVal = false;
1042 while (I != Idx2MBBMap.end()) {
1043 if (I->first >= End)
1045 MBBs.push_back(I->second);
1052 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
1053 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1054 std::vector<IdxMBBPair>::const_iterator I =
1055 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1057 bool ResVal = false;
1058 while (I != Idx2MBBMap.end()) {
1061 MachineBasicBlock *MBB = I->second;
1062 if (getMBBEndIdx(MBB) > End)
1064 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1065 SE = MBB->succ_end(); SI != SE; ++SI)
1066 MBBs.push_back(*SI);
1073 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1074 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1075 return new LiveInterval(reg, Weight);
1078 /// dupInterval - Duplicate a live interval. The caller is responsible for
1079 /// managing the allocated memory.
1080 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1081 LiveInterval *NewLI = createInterval(li->reg);
1082 NewLI->Copy(*li, mri_, getVNInfoAllocator());
1086 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1087 /// copy field and returns the source register that defines it.
1088 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1092 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1093 // If it's extracting out of a physical register, return the sub-register.
1094 unsigned Reg = VNI->copy->getOperand(1).getReg();
1095 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1096 Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
1098 } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1099 VNI->copy->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1100 return VNI->copy->getOperand(2).getReg();
1102 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1103 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
1105 assert(0 && "Unrecognized copy instruction!");
1109 //===----------------------------------------------------------------------===//
1110 // Register allocator hooks.
1113 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1114 /// allow one) virtual register operand, then its uses are implicitly using
1115 /// the register. Returns the virtual register.
1116 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1117 MachineInstr *MI) const {
1119 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1120 MachineOperand &MO = MI->getOperand(i);
1121 if (!MO.isReg() || !MO.isUse())
1123 unsigned Reg = MO.getReg();
1124 if (Reg == 0 || Reg == li.reg)
1127 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1128 !allocatableRegs_[Reg])
1130 // FIXME: For now, only remat MI with at most one register operand.
1132 "Can't rematerialize instruction with multiple register operand!");
1133 RegOp = MO.getReg();
1141 /// isValNoAvailableAt - Return true if the val# of the specified interval
1142 /// which reaches the given instruction also reaches the specified use index.
1143 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1144 unsigned UseIdx) const {
1145 unsigned Index = getInstructionIndex(MI);
1146 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1147 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1148 return UI != li.end() && UI->valno == ValNo;
1151 /// isReMaterializable - Returns true if the definition MI of the specified
1152 /// val# of the specified interval is re-materializable.
1153 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1154 const VNInfo *ValNo, MachineInstr *MI,
1155 SmallVectorImpl<LiveInterval*> &SpillIs,
1160 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1164 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1165 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1166 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1167 // this but remember this is not safe to fold into a two-address
1169 // This is a load from fixed stack slot. It can be rematerialized.
1172 // If the target-specific rules don't identify an instruction as
1173 // being trivially rematerializable, use some target-independent
1175 if (!MI->getDesc().isRematerializable() ||
1176 !tii_->isTriviallyReMaterializable(MI)) {
1177 if (!EnableAggressiveRemat)
1180 // If the instruction accesses memory but the memoperands have been lost,
1181 // we can't analyze it.
1182 const TargetInstrDesc &TID = MI->getDesc();
1183 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1186 // Avoid instructions obviously unsafe for remat.
1187 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1190 // If the instruction accesses memory and the memory could be non-constant,
1191 // assume the instruction is not rematerializable.
1192 for (std::list<MachineMemOperand>::const_iterator
1193 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1194 const MachineMemOperand &MMO = *I;
1195 if (MMO.isVolatile() || MMO.isStore())
1197 const Value *V = MMO.getValue();
1200 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1201 if (!PSV->isConstant(mf_->getFrameInfo()))
1203 } else if (!aa_->pointsToConstantMemory(V))
1207 // If any of the registers accessed are non-constant, conservatively assume
1208 // the instruction is not rematerializable.
1209 unsigned ImpUse = 0;
1210 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1211 const MachineOperand &MO = MI->getOperand(i);
1213 unsigned Reg = MO.getReg();
1216 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1219 // Only allow one def, and that in the first operand.
1220 if (MO.isDef() != (i == 0))
1223 // Only allow constant-valued registers.
1224 bool IsLiveIn = mri_->isLiveIn(Reg);
1225 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1226 E = mri_->def_end();
1228 // For the def, it should be the only def of that register.
1229 if (MO.isDef() && (next(I) != E || IsLiveIn))
1233 // Only allow one use other register use, as that's all the
1234 // remat mechanisms support currently.
1235 if (Reg != li.reg) {
1238 else if (Reg != ImpUse)
1241 // For the use, there should be only one associated def.
1242 if (I != E && (next(I) != E || IsLiveIn))
1249 unsigned ImpUse = getReMatImplicitUse(li, MI);
1251 const LiveInterval &ImpLi = getInterval(ImpUse);
1252 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1253 re = mri_->use_end(); ri != re; ++ri) {
1254 MachineInstr *UseMI = &*ri;
1255 unsigned UseIdx = getInstructionIndex(UseMI);
1256 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1258 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1262 // If a register operand of the re-materialized instruction is going to
1263 // be spilled next, then it's not legal to re-materialize this instruction.
1264 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1265 if (ImpUse == SpillIs[i]->reg)
1271 /// isReMaterializable - Returns true if the definition MI of the specified
1272 /// val# of the specified interval is re-materializable.
1273 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1274 const VNInfo *ValNo, MachineInstr *MI) {
1275 SmallVector<LiveInterval*, 4> Dummy1;
1277 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1280 /// isReMaterializable - Returns true if every definition of MI of every
1281 /// val# of the specified interval is re-materializable.
1282 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1283 SmallVectorImpl<LiveInterval*> &SpillIs,
1286 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1288 const VNInfo *VNI = *i;
1289 if (VNI->isUnused())
1290 continue; // Dead val#.
1291 // Is the def for the val# rematerializable?
1292 if (!VNI->isDefAccurate())
1294 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1295 bool DefIsLoad = false;
1297 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1299 isLoad |= DefIsLoad;
1304 /// FilterFoldedOps - Filter out two-address use operands. Return
1305 /// true if it finds any issue with the operands that ought to prevent
1307 static bool FilterFoldedOps(MachineInstr *MI,
1308 SmallVector<unsigned, 2> &Ops,
1310 SmallVector<unsigned, 2> &FoldOps) {
1312 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1313 unsigned OpIdx = Ops[i];
1314 MachineOperand &MO = MI->getOperand(OpIdx);
1315 // FIXME: fold subreg use.
1319 MRInfo |= (unsigned)VirtRegMap::isMod;
1321 // Filter out two-address use operand(s).
1322 if (MI->isRegTiedToDefOperand(OpIdx)) {
1323 MRInfo = VirtRegMap::isModRef;
1326 MRInfo |= (unsigned)VirtRegMap::isRef;
1328 FoldOps.push_back(OpIdx);
1334 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1335 /// slot / to reg or any rematerialized load into ith operand of specified
1336 /// MI. If it is successul, MI is updated with the newly created MI and
1338 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1339 VirtRegMap &vrm, MachineInstr *DefMI,
1341 SmallVector<unsigned, 2> &Ops,
1342 bool isSS, int Slot, unsigned Reg) {
1343 // If it is an implicit def instruction, just delete it.
1344 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1345 RemoveMachineInstrFromMaps(MI);
1346 vrm.RemoveMachineInstrFromMaps(MI);
1347 MI->eraseFromParent();
1352 // Filter the list of operand indexes that are to be folded. Abort if
1353 // any operand will prevent folding.
1354 unsigned MRInfo = 0;
1355 SmallVector<unsigned, 2> FoldOps;
1356 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1359 // The only time it's safe to fold into a two address instruction is when
1360 // it's folding reload and spill from / into a spill stack slot.
1361 if (DefMI && (MRInfo & VirtRegMap::isMod))
1364 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1365 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1367 // Remember this instruction uses the spill slot.
1368 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1370 // Attempt to fold the memory reference into the instruction. If
1371 // we can do this, we don't need to insert spill code.
1372 MachineBasicBlock &MBB = *MI->getParent();
1373 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1374 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1375 vrm.transferSpillPts(MI, fmi);
1376 vrm.transferRestorePts(MI, fmi);
1377 vrm.transferEmergencySpills(MI, fmi);
1379 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1380 mi2iMap_[fmi] = InstrIdx;
1381 MI = MBB.insert(MBB.erase(MI), fmi);
1388 /// canFoldMemoryOperand - Returns true if the specified load / store
1389 /// folding is possible.
1390 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1391 SmallVector<unsigned, 2> &Ops,
1393 // Filter the list of operand indexes that are to be folded. Abort if
1394 // any operand will prevent folding.
1395 unsigned MRInfo = 0;
1396 SmallVector<unsigned, 2> FoldOps;
1397 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1400 // It's only legal to remat for a use, not a def.
1401 if (ReMat && (MRInfo & VirtRegMap::isMod))
1404 return tii_->canFoldMemoryOperand(MI, FoldOps);
1407 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1408 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1409 for (LiveInterval::Ranges::const_iterator
1410 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1411 std::vector<IdxMBBPair>::const_iterator II =
1412 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1413 if (II == Idx2MBBMap.end())
1415 if (I->end > II->first) // crossing a MBB.
1417 MBBs.insert(II->second);
1418 if (MBBs.size() > 1)
1424 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1425 /// interval on to-be re-materialized operands of MI) with new register.
1426 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1427 MachineInstr *MI, unsigned NewVReg,
1429 // There is an implicit use. That means one of the other operand is
1430 // being remat'ed and the remat'ed instruction has li.reg as an
1431 // use operand. Make sure we rewrite that as well.
1432 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1433 MachineOperand &MO = MI->getOperand(i);
1436 unsigned Reg = MO.getReg();
1437 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1439 if (!vrm.isReMaterialized(Reg))
1441 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1442 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1444 UseMO->setReg(NewVReg);
1448 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1449 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1450 bool LiveIntervals::
1451 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1452 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1453 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1454 unsigned Slot, int LdSlot,
1455 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1457 const TargetRegisterClass* rc,
1458 SmallVector<int, 4> &ReMatIds,
1459 const MachineLoopInfo *loopInfo,
1460 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1461 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1462 std::vector<LiveInterval*> &NewLIs) {
1463 bool CanFold = false;
1465 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1466 MachineOperand& mop = MI->getOperand(i);
1469 unsigned Reg = mop.getReg();
1470 unsigned RegI = Reg;
1471 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1476 bool TryFold = !DefIsReMat;
1477 bool FoldSS = true; // Default behavior unless it's a remat.
1478 int FoldSlot = Slot;
1480 // If this is the rematerializable definition MI itself and
1481 // all of its uses are rematerialized, simply delete it.
1482 if (MI == ReMatOrigDefMI && CanDelete) {
1483 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1485 RemoveMachineInstrFromMaps(MI);
1486 vrm.RemoveMachineInstrFromMaps(MI);
1487 MI->eraseFromParent();
1491 // If def for this use can't be rematerialized, then try folding.
1492 // If def is rematerializable and it's a load, also try folding.
1493 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1495 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1501 // Scan all of the operands of this instruction rewriting operands
1502 // to use NewVReg instead of li.reg as appropriate. We do this for
1505 // 1. If the instr reads the same spilled vreg multiple times, we
1506 // want to reuse the NewVReg.
1507 // 2. If the instr is a two-addr instruction, we are required to
1508 // keep the src/dst regs pinned.
1510 // Keep track of whether we replace a use and/or def so that we can
1511 // create the spill interval with the appropriate range.
1513 HasUse = mop.isUse();
1514 HasDef = mop.isDef();
1515 SmallVector<unsigned, 2> Ops;
1517 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1518 const MachineOperand &MOj = MI->getOperand(j);
1521 unsigned RegJ = MOj.getReg();
1522 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1526 HasUse |= MOj.isUse();
1527 HasDef |= MOj.isDef();
1531 if (HasUse && !li.liveAt(getUseIndex(index)))
1532 // Must be defined by an implicit def. It should not be spilled. Note,
1533 // this is for correctness reason. e.g.
1534 // 8 %reg1024<def> = IMPLICIT_DEF
1535 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1536 // The live range [12, 14) are not part of the r1024 live interval since
1537 // it's defined by an implicit def. It will not conflicts with live
1538 // interval of r1025. Now suppose both registers are spilled, you can
1539 // easily see a situation where both registers are reloaded before
1540 // the INSERT_SUBREG and both target registers that would overlap.
1543 // Create a new virtual register for the spill interval.
1544 // Create the new register now so we can map the fold instruction
1545 // to the new register so when it is unfolded we get the correct
1547 bool CreatedNewVReg = false;
1549 NewVReg = mri_->createVirtualRegister(rc);
1551 CreatedNewVReg = true;
1557 // Do not fold load / store here if we are splitting. We'll find an
1558 // optimal point to insert a load / store later.
1560 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1561 Ops, FoldSS, FoldSlot, NewVReg)) {
1562 // Folding the load/store can completely change the instruction in
1563 // unpredictable ways, rescan it from the beginning.
1566 // We need to give the new vreg the same stack slot as the
1567 // spilled interval.
1568 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1574 if (isNotInMIMap(MI))
1576 goto RestartInstruction;
1579 // We'll try to fold it later if it's profitable.
1580 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1584 mop.setReg(NewVReg);
1585 if (mop.isImplicit())
1586 rewriteImplicitOps(li, MI, NewVReg, vrm);
1588 // Reuse NewVReg for other reads.
1589 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1590 MachineOperand &mopj = MI->getOperand(Ops[j]);
1591 mopj.setReg(NewVReg);
1592 if (mopj.isImplicit())
1593 rewriteImplicitOps(li, MI, NewVReg, vrm);
1596 if (CreatedNewVReg) {
1598 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1599 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1600 // Each valnum may have its own remat id.
1601 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1603 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1605 if (!CanDelete || (HasUse && HasDef)) {
1606 // If this is a two-addr instruction then its use operands are
1607 // rematerializable but its def is not. It should be assigned a
1609 vrm.assignVirt2StackSlot(NewVReg, Slot);
1612 vrm.assignVirt2StackSlot(NewVReg, Slot);
1614 } else if (HasUse && HasDef &&
1615 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1616 // If this interval hasn't been assigned a stack slot (because earlier
1617 // def is a deleted remat def), do it now.
1618 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1619 vrm.assignVirt2StackSlot(NewVReg, Slot);
1622 // Re-matting an instruction with virtual register use. Add the
1623 // register as an implicit use on the use MI.
1624 if (DefIsReMat && ImpUse)
1625 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1627 // Create a new register interval for this spill / remat.
1628 LiveInterval &nI = getOrCreateInterval(NewVReg);
1629 if (CreatedNewVReg) {
1630 NewLIs.push_back(&nI);
1631 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1633 vrm.setIsSplitFromReg(NewVReg, li.reg);
1637 if (CreatedNewVReg) {
1638 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1639 nI.getNextValue(0, 0, false, VNInfoAllocator));
1643 // Extend the split live interval to this def / use.
1644 unsigned End = getUseIndex(index)+1;
1645 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1646 nI.getValNumInfo(nI.getNumValNums()-1));
1652 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1653 nI.getNextValue(0, 0, false, VNInfoAllocator));
1658 DOUT << "\t\t\t\tAdded new interval: ";
1659 nI.print(DOUT, tri_);
1664 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1666 MachineBasicBlock *MBB, unsigned Idx) const {
1667 unsigned End = getMBBEndIdx(MBB);
1668 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1669 if (VNI->kills[j].isPHIKill)
1672 unsigned KillIdx = VNI->kills[j].killIdx;
1673 if (KillIdx > Idx && KillIdx < End)
1679 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1680 /// during spilling.
1682 struct RewriteInfo {
1687 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1688 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1691 struct RewriteInfoCompare {
1692 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1693 return LHS.Index < RHS.Index;
1698 void LiveIntervals::
1699 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1700 LiveInterval::Ranges::const_iterator &I,
1701 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1702 unsigned Slot, int LdSlot,
1703 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1705 const TargetRegisterClass* rc,
1706 SmallVector<int, 4> &ReMatIds,
1707 const MachineLoopInfo *loopInfo,
1708 BitVector &SpillMBBs,
1709 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1710 BitVector &RestoreMBBs,
1711 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1712 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1713 std::vector<LiveInterval*> &NewLIs) {
1714 bool AllCanFold = true;
1715 unsigned NewVReg = 0;
1716 unsigned start = getBaseIndex(I->start);
1717 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1719 // First collect all the def / use in this live range that will be rewritten.
1720 // Make sure they are sorted according to instruction index.
1721 std::vector<RewriteInfo> RewriteMIs;
1722 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1723 re = mri_->reg_end(); ri != re; ) {
1724 MachineInstr *MI = &*ri;
1725 MachineOperand &O = ri.getOperand();
1727 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1728 unsigned index = getInstructionIndex(MI);
1729 if (index < start || index >= end)
1731 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1732 // Must be defined by an implicit def. It should not be spilled. Note,
1733 // this is for correctness reason. e.g.
1734 // 8 %reg1024<def> = IMPLICIT_DEF
1735 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1736 // The live range [12, 14) are not part of the r1024 live interval since
1737 // it's defined by an implicit def. It will not conflicts with live
1738 // interval of r1025. Now suppose both registers are spilled, you can
1739 // easily see a situation where both registers are reloaded before
1740 // the INSERT_SUBREG and both target registers that would overlap.
1742 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1744 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1746 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1747 // Now rewrite the defs and uses.
1748 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1749 RewriteInfo &rwi = RewriteMIs[i];
1751 unsigned index = rwi.Index;
1752 bool MIHasUse = rwi.HasUse;
1753 bool MIHasDef = rwi.HasDef;
1754 MachineInstr *MI = rwi.MI;
1755 // If MI def and/or use the same register multiple times, then there
1756 // are multiple entries.
1757 unsigned NumUses = MIHasUse;
1758 while (i != e && RewriteMIs[i].MI == MI) {
1759 assert(RewriteMIs[i].Index == index);
1760 bool isUse = RewriteMIs[i].HasUse;
1761 if (isUse) ++NumUses;
1763 MIHasDef |= RewriteMIs[i].HasDef;
1766 MachineBasicBlock *MBB = MI->getParent();
1768 if (ImpUse && MI != ReMatDefMI) {
1769 // Re-matting an instruction with virtual register use. Update the
1770 // register interval's spill weight to HUGE_VALF to prevent it from
1772 LiveInterval &ImpLi = getInterval(ImpUse);
1773 ImpLi.weight = HUGE_VALF;
1776 unsigned MBBId = MBB->getNumber();
1777 unsigned ThisVReg = 0;
1779 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1780 if (NVI != MBBVRegsMap.end()) {
1781 ThisVReg = NVI->second;
1788 // It's better to start a new interval to avoid artifically
1789 // extend the new interval.
1790 if (MIHasDef && !MIHasUse) {
1791 MBBVRegsMap.erase(MBB->getNumber());
1797 bool IsNew = ThisVReg == 0;
1799 // This ends the previous live interval. If all of its def / use
1800 // can be folded, give it a low spill weight.
1801 if (NewVReg && TrySplit && AllCanFold) {
1802 LiveInterval &nI = getOrCreateInterval(NewVReg);
1809 bool HasDef = false;
1810 bool HasUse = false;
1811 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1812 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1813 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1814 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1815 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1816 if (!HasDef && !HasUse)
1819 AllCanFold &= CanFold;
1821 // Update weight of spill interval.
1822 LiveInterval &nI = getOrCreateInterval(NewVReg);
1824 // The spill weight is now infinity as it cannot be spilled again.
1825 nI.weight = HUGE_VALF;
1829 // Keep track of the last def and first use in each MBB.
1831 if (MI != ReMatOrigDefMI || !CanDelete) {
1832 bool HasKill = false;
1834 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1836 // If this is a two-address code, then this index starts a new VNInfo.
1837 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1839 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1841 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1842 SpillIdxes.find(MBBId);
1844 if (SII == SpillIdxes.end()) {
1845 std::vector<SRInfo> S;
1846 S.push_back(SRInfo(index, NewVReg, true));
1847 SpillIdxes.insert(std::make_pair(MBBId, S));
1848 } else if (SII->second.back().vreg != NewVReg) {
1849 SII->second.push_back(SRInfo(index, NewVReg, true));
1850 } else if ((int)index > SII->second.back().index) {
1851 // If there is an earlier def and this is a two-address
1852 // instruction, then it's not possible to fold the store (which
1853 // would also fold the load).
1854 SRInfo &Info = SII->second.back();
1856 Info.canFold = !HasUse;
1858 SpillMBBs.set(MBBId);
1859 } else if (SII != SpillIdxes.end() &&
1860 SII->second.back().vreg == NewVReg &&
1861 (int)index > SII->second.back().index) {
1862 // There is an earlier def that's not killed (must be two-address).
1863 // The spill is no longer needed.
1864 SII->second.pop_back();
1865 if (SII->second.empty()) {
1866 SpillIdxes.erase(MBBId);
1867 SpillMBBs.reset(MBBId);
1874 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1875 SpillIdxes.find(MBBId);
1876 if (SII != SpillIdxes.end() &&
1877 SII->second.back().vreg == NewVReg &&
1878 (int)index > SII->second.back().index)
1879 // Use(s) following the last def, it's not safe to fold the spill.
1880 SII->second.back().canFold = false;
1881 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1882 RestoreIdxes.find(MBBId);
1883 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1884 // If we are splitting live intervals, only fold if it's the first
1885 // use and there isn't another use later in the MBB.
1886 RII->second.back().canFold = false;
1888 // Only need a reload if there isn't an earlier def / use.
1889 if (RII == RestoreIdxes.end()) {
1890 std::vector<SRInfo> Infos;
1891 Infos.push_back(SRInfo(index, NewVReg, true));
1892 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1894 RII->second.push_back(SRInfo(index, NewVReg, true));
1896 RestoreMBBs.set(MBBId);
1900 // Update spill weight.
1901 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1902 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1905 if (NewVReg && TrySplit && AllCanFold) {
1906 // If all of its def / use can be folded, give it a low spill weight.
1907 LiveInterval &nI = getOrCreateInterval(NewVReg);
1912 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1913 BitVector &RestoreMBBs,
1914 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1915 if (!RestoreMBBs[Id])
1917 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1918 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1919 if (Restores[i].index == index &&
1920 Restores[i].vreg == vr &&
1921 Restores[i].canFold)
1926 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1927 BitVector &RestoreMBBs,
1928 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1929 if (!RestoreMBBs[Id])
1931 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1932 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1933 if (Restores[i].index == index && Restores[i].vreg)
1934 Restores[i].index = -1;
1937 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1938 /// spilled and create empty intervals for their uses.
1940 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1941 const TargetRegisterClass* rc,
1942 std::vector<LiveInterval*> &NewLIs) {
1943 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1944 re = mri_->reg_end(); ri != re; ) {
1945 MachineOperand &O = ri.getOperand();
1946 MachineInstr *MI = &*ri;
1949 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1950 "Register def was not rewritten?");
1951 RemoveMachineInstrFromMaps(MI);
1952 vrm.RemoveMachineInstrFromMaps(MI);
1953 MI->eraseFromParent();
1955 // This must be an use of an implicit_def so it's not part of the live
1956 // interval. Create a new empty live interval for it.
1957 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1958 unsigned NewVReg = mri_->createVirtualRegister(rc);
1960 vrm.setIsImplicitlyDefined(NewVReg);
1961 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1962 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1963 MachineOperand &MO = MI->getOperand(i);
1964 if (MO.isReg() && MO.getReg() == li.reg) {
1973 std::vector<LiveInterval*> LiveIntervals::
1974 addIntervalsForSpillsFast(const LiveInterval &li,
1975 const MachineLoopInfo *loopInfo,
1977 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1979 std::vector<LiveInterval*> added;
1981 assert(li.weight != HUGE_VALF &&
1982 "attempt to spill already spilled interval!");
1984 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1988 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1990 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1991 while (RI != mri_->reg_end()) {
1992 MachineInstr* MI = &*RI;
1994 SmallVector<unsigned, 2> Indices;
1995 bool HasUse = false;
1996 bool HasDef = false;
1998 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1999 MachineOperand& mop = MI->getOperand(i);
2000 if (!mop.isReg() || mop.getReg() != li.reg) continue;
2002 HasUse |= MI->getOperand(i).isUse();
2003 HasDef |= MI->getOperand(i).isDef();
2005 Indices.push_back(i);
2008 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2009 Indices, true, slot, li.reg)) {
2010 unsigned NewVReg = mri_->createVirtualRegister(rc);
2012 vrm.assignVirt2StackSlot(NewVReg, slot);
2014 // create a new register for this spill
2015 LiveInterval &nI = getOrCreateInterval(NewVReg);
2017 // the spill weight is now infinity as it
2018 // cannot be spilled again
2019 nI.weight = HUGE_VALF;
2021 // Rewrite register operands to use the new vreg.
2022 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2023 E = Indices.end(); I != E; ++I) {
2024 MI->getOperand(*I).setReg(NewVReg);
2026 if (MI->getOperand(*I).isUse())
2027 MI->getOperand(*I).setIsKill(true);
2030 // Fill in the new live interval.
2031 unsigned index = getInstructionIndex(MI);
2033 LiveRange LR(getLoadIndex(index), getUseIndex(index),
2034 nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2037 vrm.addRestorePoint(NewVReg, MI);
2040 LiveRange LR(getDefIndex(index), getStoreIndex(index),
2041 nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2044 vrm.addSpillPoint(NewVReg, true, MI);
2047 added.push_back(&nI);
2049 DOUT << "\t\t\t\tadded new interval: ";
2055 RI = mri_->reg_begin(li.reg);
2061 std::vector<LiveInterval*> LiveIntervals::
2062 addIntervalsForSpills(const LiveInterval &li,
2063 SmallVectorImpl<LiveInterval*> &SpillIs,
2064 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2066 if (EnableFastSpilling)
2067 return addIntervalsForSpillsFast(li, loopInfo, vrm);
2069 assert(li.weight != HUGE_VALF &&
2070 "attempt to spill already spilled interval!");
2072 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
2073 li.print(DOUT, tri_);
2076 // Each bit specify whether a spill is required in the MBB.
2077 BitVector SpillMBBs(mf_->getNumBlockIDs());
2078 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2079 BitVector RestoreMBBs(mf_->getNumBlockIDs());
2080 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2081 DenseMap<unsigned,unsigned> MBBVRegsMap;
2082 std::vector<LiveInterval*> NewLIs;
2083 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2085 unsigned NumValNums = li.getNumValNums();
2086 SmallVector<MachineInstr*, 4> ReMatDefs;
2087 ReMatDefs.resize(NumValNums, NULL);
2088 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2089 ReMatOrigDefs.resize(NumValNums, NULL);
2090 SmallVector<int, 4> ReMatIds;
2091 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2092 BitVector ReMatDelete(NumValNums);
2093 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2095 // Spilling a split live interval. It cannot be split any further. Also,
2096 // it's also guaranteed to be a single val# / range interval.
2097 if (vrm.getPreSplitReg(li.reg)) {
2098 vrm.setIsSplitFromReg(li.reg, 0);
2099 // Unset the split kill marker on the last use.
2100 unsigned KillIdx = vrm.getKillPoint(li.reg);
2102 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2103 assert(KillMI && "Last use disappeared?");
2104 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2105 assert(KillOp != -1 && "Last use disappeared?");
2106 KillMI->getOperand(KillOp).setIsKill(false);
2108 vrm.removeKillPoint(li.reg);
2109 bool DefIsReMat = vrm.isReMaterialized(li.reg);
2110 Slot = vrm.getStackSlot(li.reg);
2111 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2112 MachineInstr *ReMatDefMI = DefIsReMat ?
2113 vrm.getReMaterializedMI(li.reg) : NULL;
2115 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2116 bool isLoad = isLoadSS ||
2117 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2118 bool IsFirstRange = true;
2119 for (LiveInterval::Ranges::const_iterator
2120 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2121 // If this is a split live interval with multiple ranges, it means there
2122 // are two-address instructions that re-defined the value. Only the
2123 // first def can be rematerialized!
2125 // Note ReMatOrigDefMI has already been deleted.
2126 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2127 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2128 false, vrm, rc, ReMatIds, loopInfo,
2129 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2130 MBBVRegsMap, NewLIs);
2132 rewriteInstructionsForSpills(li, false, I, NULL, 0,
2133 Slot, 0, false, false, false,
2134 false, vrm, rc, ReMatIds, loopInfo,
2135 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2136 MBBVRegsMap, NewLIs);
2138 IsFirstRange = false;
2141 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2145 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
2146 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
2150 bool NeedStackSlot = false;
2151 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2153 const VNInfo *VNI = *i;
2154 unsigned VN = VNI->id;
2155 if (VNI->isUnused())
2156 continue; // Dead val#.
2157 // Is the def for the val# rematerializable?
2158 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2159 ? getInstructionFromIndex(VNI->def) : 0;
2161 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2162 // Remember how to remat the def of this val#.
2163 ReMatOrigDefs[VN] = ReMatDefMI;
2164 // Original def may be modified so we have to make a copy here.
2165 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2166 ClonedMIs.push_back(Clone);
2167 ReMatDefs[VN] = Clone;
2169 bool CanDelete = true;
2170 if (VNI->hasPHIKill()) {
2171 // A kill is a phi node, not all of its uses can be rematerialized.
2172 // It must not be deleted.
2174 // Need a stack slot if there is any live range where uses cannot be
2176 NeedStackSlot = true;
2179 ReMatDelete.set(VN);
2181 // Need a stack slot if there is any live range where uses cannot be
2183 NeedStackSlot = true;
2187 // One stack slot per live interval.
2188 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2189 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2190 Slot = vrm.assignVirt2StackSlot(li.reg);
2192 // This case only occurs when the prealloc splitter has already assigned
2193 // a stack slot to this vreg.
2195 Slot = vrm.getStackSlot(li.reg);
2198 // Create new intervals and rewrite defs and uses.
2199 for (LiveInterval::Ranges::const_iterator
2200 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2201 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2202 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2203 bool DefIsReMat = ReMatDefMI != NULL;
2204 bool CanDelete = ReMatDelete[I->valno->id];
2206 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2207 bool isLoad = isLoadSS ||
2208 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2209 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2210 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2211 CanDelete, vrm, rc, ReMatIds, loopInfo,
2212 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2213 MBBVRegsMap, NewLIs);
2216 // Insert spills / restores if we are splitting.
2218 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2222 SmallPtrSet<LiveInterval*, 4> AddedKill;
2223 SmallVector<unsigned, 2> Ops;
2224 if (NeedStackSlot) {
2225 int Id = SpillMBBs.find_first();
2227 std::vector<SRInfo> &spills = SpillIdxes[Id];
2228 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2229 int index = spills[i].index;
2230 unsigned VReg = spills[i].vreg;
2231 LiveInterval &nI = getOrCreateInterval(VReg);
2232 bool isReMat = vrm.isReMaterialized(VReg);
2233 MachineInstr *MI = getInstructionFromIndex(index);
2234 bool CanFold = false;
2235 bool FoundUse = false;
2237 if (spills[i].canFold) {
2239 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2240 MachineOperand &MO = MI->getOperand(j);
2241 if (!MO.isReg() || MO.getReg() != VReg)
2248 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2249 RestoreMBBs, RestoreIdxes))) {
2250 // MI has two-address uses of the same register. If the use
2251 // isn't the first and only use in the BB, then we can't fold
2252 // it. FIXME: Move this to rewriteInstructionsForSpills.
2259 // Fold the store into the def if possible.
2260 bool Folded = false;
2261 if (CanFold && !Ops.empty()) {
2262 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2265 // Also folded uses, do not issue a load.
2266 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2267 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2269 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2273 // Otherwise tell the spiller to issue a spill.
2275 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2276 bool isKill = LR->end == getStoreIndex(index);
2277 if (!MI->registerDefIsDead(nI.reg))
2278 // No need to spill a dead def.
2279 vrm.addSpillPoint(VReg, isKill, MI);
2281 AddedKill.insert(&nI);
2284 Id = SpillMBBs.find_next(Id);
2288 int Id = RestoreMBBs.find_first();
2290 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2291 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2292 int index = restores[i].index;
2295 unsigned VReg = restores[i].vreg;
2296 LiveInterval &nI = getOrCreateInterval(VReg);
2297 bool isReMat = vrm.isReMaterialized(VReg);
2298 MachineInstr *MI = getInstructionFromIndex(index);
2299 bool CanFold = false;
2301 if (restores[i].canFold) {
2303 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2304 MachineOperand &MO = MI->getOperand(j);
2305 if (!MO.isReg() || MO.getReg() != VReg)
2309 // If this restore were to be folded, it would have been folded
2318 // Fold the load into the use if possible.
2319 bool Folded = false;
2320 if (CanFold && !Ops.empty()) {
2322 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2324 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2326 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2327 // If the rematerializable def is a load, also try to fold it.
2328 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2329 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2330 Ops, isLoadSS, LdSlot, VReg);
2332 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2334 // Re-matting an instruction with virtual register use. Add the
2335 // register as an implicit use on the use MI and update the register
2336 // interval's spill weight to HUGE_VALF to prevent it from being
2338 LiveInterval &ImpLi = getInterval(ImpUse);
2339 ImpLi.weight = HUGE_VALF;
2340 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2345 // If folding is not possible / failed, then tell the spiller to issue a
2346 // load / rematerialization for us.
2348 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2350 vrm.addRestorePoint(VReg, MI);
2352 Id = RestoreMBBs.find_next(Id);
2355 // Finalize intervals: add kills, finalize spill weights, and filter out
2357 std::vector<LiveInterval*> RetNewLIs;
2358 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2359 LiveInterval *LI = NewLIs[i];
2361 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2362 if (!AddedKill.count(LI)) {
2363 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2364 unsigned LastUseIdx = getBaseIndex(LR->end);
2365 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2366 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2367 assert(UseIdx != -1);
2368 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2369 LastUse->getOperand(UseIdx).setIsKill();
2370 vrm.addKillPoint(LI->reg, LastUseIdx);
2373 RetNewLIs.push_back(LI);
2377 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2381 /// hasAllocatableSuperReg - Return true if the specified physical register has
2382 /// any super register that's allocatable.
2383 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2384 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2385 if (allocatableRegs_[*AS] && hasInterval(*AS))
2390 /// getRepresentativeReg - Find the largest super register of the specified
2391 /// physical register.
2392 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2393 // Find the largest super-register that is allocatable.
2394 unsigned BestReg = Reg;
2395 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2396 unsigned SuperReg = *AS;
2397 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2405 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2406 /// specified interval that conflicts with the specified physical register.
2407 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2408 unsigned PhysReg) const {
2409 unsigned NumConflicts = 0;
2410 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2411 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2412 E = mri_->reg_end(); I != E; ++I) {
2413 MachineOperand &O = I.getOperand();
2414 MachineInstr *MI = O.getParent();
2415 unsigned Index = getInstructionIndex(MI);
2416 if (pli.liveAt(Index))
2419 return NumConflicts;
2422 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2423 /// around all defs and uses of the specified interval. Return true if it
2424 /// was able to cut its interval.
2425 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2426 unsigned PhysReg, VirtRegMap &vrm) {
2427 unsigned SpillReg = getRepresentativeReg(PhysReg);
2429 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2430 // If there are registers which alias PhysReg, but which are not a
2431 // sub-register of the chosen representative super register. Assert
2432 // since we can't handle it yet.
2433 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2434 tri_->isSuperRegister(*AS, SpillReg));
2437 LiveInterval &pli = getInterval(SpillReg);
2438 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2439 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2440 E = mri_->reg_end(); I != E; ++I) {
2441 MachineOperand &O = I.getOperand();
2442 MachineInstr *MI = O.getParent();
2443 if (SeenMIs.count(MI))
2446 unsigned Index = getInstructionIndex(MI);
2447 if (pli.liveAt(Index)) {
2448 vrm.addEmergencySpill(SpillReg, MI);
2449 unsigned StartIdx = getLoadIndex(Index);
2450 unsigned EndIdx = getStoreIndex(Index)+1;
2451 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2452 pli.removeRange(StartIdx, EndIdx);
2456 raw_string_ostream Msg(msg);
2457 Msg << "Ran out of registers during register allocation!";
2458 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2459 Msg << "\nPlease check your inline asm statement for invalid "
2460 << "constraints:\n";
2461 MI->print(Msg, tm_);
2463 llvm_report_error(Msg.str());
2465 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2466 if (!hasInterval(*AS))
2468 LiveInterval &spli = getInterval(*AS);
2469 if (spli.liveAt(Index))
2470 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2477 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2478 MachineInstr* startInst) {
2479 LiveInterval& Interval = getOrCreateInterval(reg);
2480 VNInfo* VN = Interval.getNextValue(
2481 getInstructionIndex(startInst) + InstrSlots::DEF,
2482 startInst, true, getVNInfoAllocator());
2483 VN->setHasPHIKill(true);
2484 VN->kills.push_back(
2485 VNInfo::KillInfo(terminatorGaps[startInst->getParent()], true));
2486 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2487 getMBBEndIdx(startInst->getParent()) + 1, VN);
2488 Interval.addRange(LR);