1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/Passes.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/ADT/DepthFirstIterator.h"
38 #include "llvm/ADT/SmallSet.h"
39 #include "llvm/ADT/Statistic.h"
40 #include "llvm/ADT/STLExtras.h"
46 // Hidden options for help debugging.
47 static cl::opt<bool> DisableReMat("disable-rematerialization",
48 cl::init(false), cl::Hidden);
50 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
51 cl::init(true), cl::Hidden);
52 static cl::opt<int> SplitLimit("split-limit",
53 cl::init(-1), cl::Hidden);
55 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
57 static cl::opt<bool> EnableFastSpilling("fast-spill",
58 cl::init(false), cl::Hidden);
60 STATISTIC(numIntervals, "Number of original intervals");
61 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
62 STATISTIC(numSplits , "Number of intervals split");
64 char LiveIntervals::ID = 0;
65 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
67 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
68 AU.addRequired<AliasAnalysis>();
69 AU.addPreserved<AliasAnalysis>();
70 AU.addPreserved<LiveVariables>();
71 AU.addRequired<LiveVariables>();
72 AU.addPreservedID(MachineLoopInfoID);
73 AU.addPreservedID(MachineDominatorsID);
76 AU.addPreservedID(PHIEliminationID);
77 AU.addRequiredID(PHIEliminationID);
80 AU.addRequiredID(TwoAddressInstructionPassID);
81 MachineFunctionPass::getAnalysisUsage(AU);
84 void LiveIntervals::releaseMemory() {
85 // Free the live intervals themselves.
86 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
87 E = r2iMap_.end(); I != E; ++I)
95 terminatorGaps.clear();
97 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
98 VNInfoAllocator.Reset();
99 while (!ClonedMIs.empty()) {
100 MachineInstr *MI = ClonedMIs.back();
101 ClonedMIs.pop_back();
102 mf_->DeleteMachineInstr(MI);
106 /// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
107 /// there is one implicit_def for each use. Add isUndef marker to
108 /// implicit_def defs and their uses.
109 void LiveIntervals::processImplicitDefs() {
110 SmallSet<unsigned, 8> ImpDefRegs;
111 SmallVector<MachineInstr*, 8> ImpDefMIs;
112 MachineBasicBlock *Entry = mf_->begin();
113 SmallPtrSet<MachineBasicBlock*,16> Visited;
114 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
115 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
117 MachineBasicBlock *MBB = *DFI;
118 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
120 MachineInstr *MI = &*I;
122 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
123 unsigned Reg = MI->getOperand(0).getReg();
124 MI->getOperand(0).setIsUndef();
125 ImpDefRegs.insert(Reg);
126 ImpDefMIs.push_back(MI);
130 bool ChangedToImpDef = false;
131 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
132 MachineOperand& MO = MI->getOperand(i);
133 if (!MO.isReg() || !MO.isUse())
135 unsigned Reg = MO.getReg();
138 if (!ImpDefRegs.count(Reg))
140 // Use is a copy, just turn it into an implicit_def.
141 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
142 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
144 bool isKill = MO.isKill();
145 MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
146 for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
147 MI->RemoveOperand(j);
149 ImpDefRegs.erase(Reg);
150 ChangedToImpDef = true;
155 if (MO.isKill() || MI->isRegTiedToDefOperand(i))
156 ImpDefRegs.erase(Reg);
159 if (ChangedToImpDef) {
160 // Backtrack to process this new implicit_def.
163 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
164 MachineOperand& MO = MI->getOperand(i);
165 if (!MO.isReg() || !MO.isDef())
167 ImpDefRegs.erase(MO.getReg());
172 // Any outstanding liveout implicit_def's?
173 for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
174 MachineInstr *MI = ImpDefMIs[i];
175 unsigned Reg = MI->getOperand(0).getReg();
176 if (TargetRegisterInfo::isPhysicalRegister(Reg))
177 // Physical registers are not liveout (yet).
179 if (!ImpDefRegs.count(Reg))
182 // If there are multiple defs of the same register and at least one
183 // is not an implicit_def, do not insert implicit_def's before the
186 for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
187 DE = mri_->def_end(); DI != DE; ++DI) {
188 if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
196 for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
197 UE = mri_->use_end(); UI != UE; ) {
198 MachineOperand &RMO = UI.getOperand();
199 MachineInstr *RMI = &*UI;
201 MachineBasicBlock *RMBB = RMI->getParent();
204 const TargetRegisterClass* RC = mri_->getRegClass(Reg);
205 unsigned NewVReg = mri_->createVirtualRegister(RC);
206 MachineInstrBuilder MIB =
207 BuildMI(*RMBB, RMI, RMI->getDebugLoc(),
208 tii_->get(TargetInstrInfo::IMPLICIT_DEF), NewVReg);
209 (*MIB).getOperand(0).setIsUndef();
220 void LiveIntervals::computeNumbering() {
221 Index2MiMap OldI2MI = i2miMap_;
222 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
228 terminatorGaps.clear();
232 // Number MachineInstrs and MachineBasicBlocks.
233 // Initialize MBB indexes to a sentinal.
234 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
236 unsigned MIIndex = 0;
237 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
239 unsigned StartIdx = MIIndex;
241 // Insert an empty slot at the beginning of each block.
242 MIIndex += InstrSlots::NUM;
243 i2miMap_.push_back(0);
245 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
248 if (I == MBB->getFirstTerminator()) {
249 // Leave a gap for before terminators, this is where we will point
252 terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
254 "Multiple 'first' terminators encountered during numbering.");
255 i2miMap_.push_back(0);
257 MIIndex += InstrSlots::NUM;
260 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
261 assert(inserted && "multiple MachineInstr -> index mappings");
263 i2miMap_.push_back(I);
264 MIIndex += InstrSlots::NUM;
267 // Insert max(1, numdefs) empty slots after every instruction.
268 unsigned Slots = I->getDesc().getNumDefs();
271 MIIndex += InstrSlots::NUM * Slots;
273 i2miMap_.push_back(0);
276 if (MBB->getFirstTerminator() == MBB->end()) {
277 // Leave a gap for before terminators, this is where we will point
280 terminatorGaps.insert(std::make_pair(&*MBB, MIIndex)).second;
282 "Multiple 'first' terminators encountered during numbering.");
283 i2miMap_.push_back(0);
285 MIIndex += InstrSlots::NUM;
288 // Set the MBB2IdxMap entry for this MBB.
289 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
290 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
293 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
295 if (!OldI2MI.empty())
296 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
297 for (LiveInterval::iterator LI = OI->second->begin(),
298 LE = OI->second->end(); LI != LE; ++LI) {
300 // Remap the start index of the live range to the corresponding new
301 // number, or our best guess at what it _should_ correspond to if the
302 // original instruction has been erased. This is either the following
303 // instruction or its predecessor.
304 unsigned index = LI->start / InstrSlots::NUM;
305 unsigned offset = LI->start % InstrSlots::NUM;
306 if (offset == InstrSlots::LOAD) {
307 std::vector<IdxMBBPair>::const_iterator I =
308 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
309 // Take the pair containing the index
310 std::vector<IdxMBBPair>::const_iterator J =
311 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
313 LI->start = getMBBStartIdx(J->second);
315 LI->start = mi2iMap_[OldI2MI[index]] + offset;
318 // Remap the ending index in the same way that we remapped the start,
319 // except for the final step where we always map to the immediately
320 // following instruction.
321 index = (LI->end - 1) / InstrSlots::NUM;
322 offset = LI->end % InstrSlots::NUM;
323 if (offset == InstrSlots::LOAD) {
324 // VReg dies at end of block.
325 std::vector<IdxMBBPair>::const_iterator I =
326 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
329 LI->end = getMBBEndIdx(I->second) + 1;
331 unsigned idx = index;
332 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
334 if (index != OldI2MI.size())
335 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
337 LI->end = InstrSlots::NUM * i2miMap_.size();
341 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
342 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
345 // Remap the VNInfo def index, which works the same as the
346 // start indices above. VN's with special sentinel defs
347 // don't need to be remapped.
348 if (vni->isDefAccurate() && !vni->isUnused()) {
349 unsigned index = vni->def / InstrSlots::NUM;
350 unsigned offset = vni->def % InstrSlots::NUM;
351 if (offset == InstrSlots::LOAD) {
352 std::vector<IdxMBBPair>::const_iterator I =
353 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
354 // Take the pair containing the index
355 std::vector<IdxMBBPair>::const_iterator J =
356 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
358 vni->def = getMBBStartIdx(J->second);
360 vni->def = mi2iMap_[OldI2MI[index]] + offset;
364 // Remap the VNInfo kill indices, which works the same as
365 // the end indices above.
366 for (size_t i = 0; i < vni->kills.size(); ++i) {
367 unsigned killIdx = vni->kills[i].killIdx;
369 unsigned index = (killIdx - 1) / InstrSlots::NUM;
370 unsigned offset = killIdx % InstrSlots::NUM;
372 if (offset == InstrSlots::LOAD) {
373 assert("Value killed at a load slot.");
374 /*std::vector<IdxMBBPair>::const_iterator I =
375 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
378 vni->kills[i] = getMBBEndIdx(I->second);*/
380 if (vni->kills[i].isPHIKill) {
381 std::vector<IdxMBBPair>::const_iterator I =
382 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), index);
384 vni->kills[i].killIdx = terminatorGaps[I->second];
386 assert(OldI2MI[index] != 0 &&
387 "Kill refers to instruction not present in index maps.");
388 vni->kills[i].killIdx = mi2iMap_[OldI2MI[index]] + offset;
392 unsigned idx = index;
393 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
395 if (index != OldI2MI.size())
396 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
397 (idx == index ? offset : 0);
399 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
407 void LiveIntervals::scaleNumbering(int factor) {
409 // * scale MBB begin and end points
410 // * scale all ranges.
411 // * Update VNI structures.
412 // * Scale instruction numberings
414 // Scale the MBB indices.
416 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
417 MBB != MBBE; ++MBB) {
418 std::pair<unsigned, unsigned> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
419 mbbIndices.first = InstrSlots::scale(mbbIndices.first, factor);
420 mbbIndices.second = InstrSlots::scale(mbbIndices.second, factor);
421 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
423 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
425 // Scale terminator gaps.
426 for (DenseMap<MachineBasicBlock*, unsigned>::iterator
427 TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
429 terminatorGaps[TGI->first] = InstrSlots::scale(TGI->second, factor);
432 // Scale the intervals.
433 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
434 LI->second->scaleNumbering(factor);
437 // Scale MachineInstrs.
438 Mi2IndexMap oldmi2iMap = mi2iMap_;
439 unsigned highestSlot = 0;
440 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
442 unsigned newSlot = InstrSlots::scale(MI->second, factor);
443 mi2iMap_[MI->first] = newSlot;
444 highestSlot = std::max(highestSlot, newSlot);
448 i2miMap_.resize(highestSlot + 1);
449 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
451 i2miMap_[MI->second] = MI->first;
457 /// runOnMachineFunction - Register allocate the whole function
459 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
461 mri_ = &mf_->getRegInfo();
462 tm_ = &fn.getTarget();
463 tri_ = tm_->getRegisterInfo();
464 tii_ = tm_->getInstrInfo();
465 aa_ = &getAnalysis<AliasAnalysis>();
466 lv_ = &getAnalysis<LiveVariables>();
467 allocatableRegs_ = tri_->getAllocatableSet(fn);
469 processImplicitDefs();
473 numIntervals += getNumIntervals();
479 /// print - Implement the dump method.
480 void LiveIntervals::print(std::ostream &O, const Module* ) const {
481 O << "********** INTERVALS **********\n";
482 for (const_iterator I = begin(), E = end(); I != E; ++I) {
483 I->second->print(O, tri_);
487 O << "********** MACHINEINSTRS **********\n";
488 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
489 mbbi != mbbe; ++mbbi) {
490 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
491 for (MachineBasicBlock::iterator mii = mbbi->begin(),
492 mie = mbbi->end(); mii != mie; ++mii) {
493 O << getInstructionIndex(mii) << '\t' << *mii;
498 /// conflictsWithPhysRegDef - Returns true if the specified register
499 /// is defined during the duration of the specified interval.
500 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
501 VirtRegMap &vrm, unsigned reg) {
502 for (LiveInterval::Ranges::const_iterator
503 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
504 for (unsigned index = getBaseIndex(I->start),
505 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
506 index += InstrSlots::NUM) {
507 // skip deleted instructions
508 while (index != end && !getInstructionFromIndex(index))
509 index += InstrSlots::NUM;
510 if (index == end) break;
512 MachineInstr *MI = getInstructionFromIndex(index);
513 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
514 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
515 if (SrcReg == li.reg || DstReg == li.reg)
517 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
518 MachineOperand& mop = MI->getOperand(i);
521 unsigned PhysReg = mop.getReg();
522 if (PhysReg == 0 || PhysReg == li.reg)
524 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
525 if (!vrm.hasPhys(PhysReg))
527 PhysReg = vrm.getPhys(PhysReg);
529 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
538 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
539 /// it can check use as well.
540 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
541 unsigned Reg, bool CheckUse,
542 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
543 for (LiveInterval::Ranges::const_iterator
544 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
545 for (unsigned index = getBaseIndex(I->start),
546 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
547 index += InstrSlots::NUM) {
548 // Skip deleted instructions.
549 MachineInstr *MI = 0;
550 while (index != end) {
551 MI = getInstructionFromIndex(index);
554 index += InstrSlots::NUM;
556 if (index == end) break;
558 if (JoinedCopies.count(MI))
560 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
561 MachineOperand& MO = MI->getOperand(i);
564 if (MO.isUse() && !CheckUse)
566 unsigned PhysReg = MO.getReg();
567 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
569 if (tri_->isSubRegister(Reg, PhysReg))
579 void LiveIntervals::printRegName(unsigned reg) const {
580 if (TargetRegisterInfo::isPhysicalRegister(reg))
581 cerr << tri_->getName(reg);
583 cerr << "%reg" << reg;
586 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
587 MachineBasicBlock::iterator mi,
588 unsigned MIIdx, MachineOperand& MO,
590 LiveInterval &interval) {
591 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
592 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
594 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
595 DOUT << "is a implicit_def\n";
599 // Virtual registers may be defined multiple times (due to phi
600 // elimination and 2-addr elimination). Much of what we do only has to be
601 // done once for the vreg. We use an empty interval to detect the first
602 // time we see a vreg.
603 if (interval.empty()) {
604 // Get the Idx of the defining instructions.
605 unsigned defIndex = getDefIndex(MIIdx);
606 // Earlyclobbers move back one.
607 if (MO.isEarlyClobber())
608 defIndex = getUseIndex(MIIdx);
610 MachineInstr *CopyMI = NULL;
611 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
612 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
613 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
614 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
615 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
617 // Earlyclobbers move back one.
618 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
620 assert(ValNo->id == 0 && "First value in interval is not 0?");
622 // Loop over all of the blocks that the vreg is defined in. There are
623 // two cases we have to handle here. The most common case is a vreg
624 // whose lifetime is contained within a basic block. In this case there
625 // will be a single kill, in MBB, which comes after the definition.
626 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
627 // FIXME: what about dead vars?
629 if (vi.Kills[0] != mi)
630 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
632 killIdx = defIndex+1;
634 // If the kill happens after the definition, we have an intra-block
636 if (killIdx > defIndex) {
637 assert(vi.AliveBlocks.empty() &&
638 "Shouldn't be alive across any blocks!");
639 LiveRange LR(defIndex, killIdx, ValNo);
640 interval.addRange(LR);
641 DOUT << " +" << LR << "\n";
642 interval.addKill(ValNo, killIdx, false);
647 // The other case we handle is when a virtual register lives to the end
648 // of the defining block, potentially live across some blocks, then is
649 // live into some number of blocks, but gets killed. Start by adding a
650 // range that goes from this definition to the end of the defining block.
651 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
652 DOUT << " +" << NewLR;
653 interval.addRange(NewLR);
655 // Iterate over all of the blocks that the variable is completely
656 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
658 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
659 E = vi.AliveBlocks.end(); I != E; ++I) {
660 LiveRange LR(getMBBStartIdx(*I),
661 getMBBEndIdx(*I)+1, // MBB ends at -1.
663 interval.addRange(LR);
667 // Finally, this virtual register is live from the start of any killing
668 // block to the 'use' slot of the killing instruction.
669 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
670 MachineInstr *Kill = vi.Kills[i];
671 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
672 LiveRange LR(getMBBStartIdx(Kill->getParent()),
674 interval.addRange(LR);
675 interval.addKill(ValNo, killIdx, false);
680 // If this is the second time we see a virtual register definition, it
681 // must be due to phi elimination or two addr elimination. If this is
682 // the result of two address elimination, then the vreg is one of the
683 // def-and-use register operand.
684 if (mi->isRegTiedToUseOperand(MOIdx)) {
685 // If this is a two-address definition, then we have already processed
686 // the live range. The only problem is that we didn't realize there
687 // are actually two values in the live interval. Because of this we
688 // need to take the LiveRegion that defines this register and split it
690 assert(interval.containsOneValue());
691 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
692 unsigned RedefIndex = getDefIndex(MIIdx);
693 if (MO.isEarlyClobber())
694 RedefIndex = getUseIndex(MIIdx);
696 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
697 VNInfo *OldValNo = OldLR->valno;
699 // Delete the initial value, which should be short and continuous,
700 // because the 2-addr copy must be in the same MBB as the redef.
701 interval.removeRange(DefIndex, RedefIndex);
703 // Two-address vregs should always only be redefined once. This means
704 // that at this point, there should be exactly one value number in it.
705 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
707 // The new value number (#1) is defined by the instruction we claimed
709 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
710 false, // update at *
712 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
714 // Value#0 is now defined by the 2-addr instruction.
715 OldValNo->def = RedefIndex;
717 if (MO.isEarlyClobber())
718 OldValNo->setHasRedefByEC(true);
720 // Add the new live interval which replaces the range for the input copy.
721 LiveRange LR(DefIndex, RedefIndex, ValNo);
722 DOUT << " replace range with " << LR;
723 interval.addRange(LR);
724 interval.addKill(ValNo, RedefIndex, false);
726 // If this redefinition is dead, we need to add a dummy unit live
727 // range covering the def slot.
729 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
732 interval.print(DOUT, tri_);
735 // Otherwise, this must be because of phi elimination. If this is the
736 // first redefinition of the vreg that we have seen, go back and change
737 // the live range in the PHI block to be a different value number.
738 if (interval.containsOneValue()) {
739 assert(vi.Kills.size() == 1 &&
740 "PHI elimination vreg should have one kill, the PHI itself!");
742 // Remove the old range that we now know has an incorrect number.
743 VNInfo *VNI = interval.getValNumInfo(0);
744 MachineInstr *Killer = vi.Kills[0];
745 unsigned Start = getMBBStartIdx(Killer->getParent());
746 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
747 DOUT << " Removing [" << Start << "," << End << "] from: ";
748 interval.print(DOUT, tri_); DOUT << "\n";
749 interval.removeRange(Start, End);
750 assert(interval.ranges.size() == 1 &&
751 "newly discovered PHI interval has >1 ranges.");
752 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endNumber());
753 interval.addKill(VNI, terminatorGaps[killMBB], true);
754 VNI->setHasPHIKill(true);
755 DOUT << " RESULT: "; interval.print(DOUT, tri_);
757 // Replace the interval with one of a NEW value number. Note that this
758 // value number isn't actually defined by an instruction, weird huh? :)
759 LiveRange LR(Start, End,
760 interval.getNextValue(mbb->getNumber(), 0, false, VNInfoAllocator));
761 LR.valno->setIsPHIDef(true);
762 DOUT << " replace range with " << LR;
763 interval.addRange(LR);
764 interval.addKill(LR.valno, End, false);
765 DOUT << " RESULT: "; interval.print(DOUT, tri_);
768 // In the case of PHI elimination, each variable definition is only
769 // live until the end of the block. We've already taken care of the
770 // rest of the live range.
771 unsigned defIndex = getDefIndex(MIIdx);
772 if (MO.isEarlyClobber())
773 defIndex = getUseIndex(MIIdx);
776 MachineInstr *CopyMI = NULL;
777 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
778 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
779 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
780 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
781 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
783 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
785 unsigned killIndex = getMBBEndIdx(mbb) + 1;
786 LiveRange LR(defIndex, killIndex, ValNo);
787 interval.addRange(LR);
788 interval.addKill(ValNo, terminatorGaps[mbb], true);
789 ValNo->setHasPHIKill(true);
797 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
798 MachineBasicBlock::iterator mi,
801 LiveInterval &interval,
802 MachineInstr *CopyMI) {
803 // A physical register cannot be live across basic block, so its
804 // lifetime must end somewhere in its defining basic block.
805 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
807 unsigned baseIndex = MIIdx;
808 unsigned start = getDefIndex(baseIndex);
809 // Earlyclobbers move back one.
810 if (MO.isEarlyClobber())
811 start = getUseIndex(MIIdx);
812 unsigned end = start;
814 // If it is not used after definition, it is considered dead at
815 // the instruction defining it. Hence its interval is:
816 // [defSlot(def), defSlot(def)+1)
823 // If it is not dead on definition, it must be killed by a
824 // subsequent instruction. Hence its interval is:
825 // [defSlot(def), useSlot(kill)+1)
826 baseIndex += InstrSlots::NUM;
827 while (++mi != MBB->end()) {
828 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
829 getInstructionFromIndex(baseIndex) == 0)
830 baseIndex += InstrSlots::NUM;
831 if (mi->killsRegister(interval.reg, tri_)) {
833 end = getUseIndex(baseIndex) + 1;
836 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
838 if (mi->isRegTiedToUseOperand(DefIdx)) {
839 // Two-address instruction.
840 end = getDefIndex(baseIndex);
841 if (mi->getOperand(DefIdx).isEarlyClobber())
842 end = getUseIndex(baseIndex);
844 // Another instruction redefines the register before it is ever read.
845 // Then the register is essentially dead at the instruction that defines
846 // it. Hence its interval is:
847 // [defSlot(def), defSlot(def)+1)
855 baseIndex += InstrSlots::NUM;
858 // The only case we should have a dead physreg here without a killing or
859 // instruction where we know it's dead is if it is live-in to the function
860 // and never used. Another possible case is the implicit use of the
861 // physical register has been deleted by two-address pass.
865 assert(start < end && "did not find end of interval?");
867 // Already exists? Extend old live interval.
868 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
869 bool Extend = OldLR != interval.end();
870 VNInfo *ValNo = Extend
871 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
872 if (MO.isEarlyClobber() && Extend)
873 ValNo->setHasRedefByEC(true);
874 LiveRange LR(start, end, ValNo);
875 interval.addRange(LR);
876 interval.addKill(LR.valno, end, false);
877 DOUT << " +" << LR << '\n';
880 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
881 MachineBasicBlock::iterator MI,
885 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
886 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
887 getOrCreateInterval(MO.getReg()));
888 else if (allocatableRegs_[MO.getReg()]) {
889 MachineInstr *CopyMI = NULL;
890 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
891 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
892 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
893 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
894 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
896 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
897 getOrCreateInterval(MO.getReg()), CopyMI);
898 // Def of a register also defines its sub-registers.
899 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
900 // If MI also modifies the sub-register explicitly, avoid processing it
901 // more than once. Do not pass in TRI here so it checks for exact match.
902 if (!MI->modifiesRegister(*AS))
903 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
904 getOrCreateInterval(*AS), 0);
908 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
910 LiveInterval &interval, bool isAlias) {
911 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
913 // Look for kills, if it reaches a def before it's killed, then it shouldn't
914 // be considered a livein.
915 MachineBasicBlock::iterator mi = MBB->begin();
916 unsigned baseIndex = MIIdx;
917 unsigned start = baseIndex;
918 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
919 getInstructionFromIndex(baseIndex) == 0)
920 baseIndex += InstrSlots::NUM;
921 unsigned end = baseIndex;
922 bool SeenDefUse = false;
924 while (mi != MBB->end()) {
925 if (mi->killsRegister(interval.reg, tri_)) {
927 end = getUseIndex(baseIndex) + 1;
930 } else if (mi->modifiesRegister(interval.reg, tri_)) {
931 // Another instruction redefines the register before it is ever read.
932 // Then the register is essentially dead at the instruction that defines
933 // it. Hence its interval is:
934 // [defSlot(def), defSlot(def)+1)
936 end = getDefIndex(start) + 1;
941 baseIndex += InstrSlots::NUM;
943 if (mi != MBB->end()) {
944 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
945 getInstructionFromIndex(baseIndex) == 0)
946 baseIndex += InstrSlots::NUM;
950 // Live-in register might not be used at all.
954 end = getDefIndex(MIIdx) + 1;
956 DOUT << " live through";
962 interval.getNextValue(MBB->getNumber(), 0, false, VNInfoAllocator);
963 vni->setIsPHIDef(true);
964 LiveRange LR(start, end, vni);
966 interval.addRange(LR);
967 interval.addKill(LR.valno, end, false);
968 DOUT << " +" << LR << '\n';
971 /// computeIntervals - computes the live intervals for virtual
972 /// registers. for some ordering of the machine instructions [1,N] a
973 /// live interval is an interval [i, j) where 1 <= i <= j < N for
974 /// which a variable is live
975 void LiveIntervals::computeIntervals() {
977 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
978 << "********** Function: "
979 << ((Value*)mf_->getFunction())->getName() << '\n';
981 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
983 MachineBasicBlock *MBB = MBBI;
984 // Track the index of the current machine instr.
985 unsigned MIIndex = getMBBStartIdx(MBB);
986 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
988 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
990 // Create intervals for live-ins to this BB first.
991 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
992 LE = MBB->livein_end(); LI != LE; ++LI) {
993 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
994 // Multiple live-ins can alias the same register.
995 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
996 if (!hasInterval(*AS))
997 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
1001 // Skip over empty initial indices.
1002 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1003 getInstructionFromIndex(MIIndex) == 0)
1004 MIIndex += InstrSlots::NUM;
1006 for (; MI != miEnd; ++MI) {
1007 DOUT << MIIndex << "\t" << *MI;
1010 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
1011 MachineOperand &MO = MI->getOperand(i);
1012 // handle register defs - build intervals
1013 if (MO.isReg() && MO.getReg() && MO.isDef()) {
1014 handleRegisterDef(MBB, MI, MIIndex, MO, i);
1018 // Skip over the empty slots after each instruction.
1019 unsigned Slots = MI->getDesc().getNumDefs();
1022 MIIndex += InstrSlots::NUM * Slots;
1024 // Skip over empty indices.
1025 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
1026 getInstructionFromIndex(MIIndex) == 0)
1027 MIIndex += InstrSlots::NUM;
1032 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
1033 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1034 std::vector<IdxMBBPair>::const_iterator I =
1035 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1037 bool ResVal = false;
1038 while (I != Idx2MBBMap.end()) {
1039 if (I->first >= End)
1041 MBBs.push_back(I->second);
1048 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
1049 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
1050 std::vector<IdxMBBPair>::const_iterator I =
1051 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
1053 bool ResVal = false;
1054 while (I != Idx2MBBMap.end()) {
1057 MachineBasicBlock *MBB = I->second;
1058 if (getMBBEndIdx(MBB) > End)
1060 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
1061 SE = MBB->succ_end(); SI != SE; ++SI)
1062 MBBs.push_back(*SI);
1069 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
1070 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
1071 return new LiveInterval(reg, Weight);
1074 /// dupInterval - Duplicate a live interval. The caller is responsible for
1075 /// managing the allocated memory.
1076 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
1077 LiveInterval *NewLI = createInterval(li->reg);
1078 NewLI->Copy(*li, mri_, getVNInfoAllocator());
1082 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
1083 /// copy field and returns the source register that defines it.
1084 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
1088 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
1089 // If it's extracting out of a physical register, return the sub-register.
1090 unsigned Reg = VNI->copy->getOperand(1).getReg();
1091 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1092 Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
1094 } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
1095 VNI->copy->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
1096 return VNI->copy->getOperand(2).getReg();
1098 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
1099 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
1101 assert(0 && "Unrecognized copy instruction!");
1105 //===----------------------------------------------------------------------===//
1106 // Register allocator hooks.
1109 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
1110 /// allow one) virtual register operand, then its uses are implicitly using
1111 /// the register. Returns the virtual register.
1112 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
1113 MachineInstr *MI) const {
1115 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1116 MachineOperand &MO = MI->getOperand(i);
1117 if (!MO.isReg() || !MO.isUse())
1119 unsigned Reg = MO.getReg();
1120 if (Reg == 0 || Reg == li.reg)
1123 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
1124 !allocatableRegs_[Reg])
1126 // FIXME: For now, only remat MI with at most one register operand.
1128 "Can't rematerialize instruction with multiple register operand!");
1129 RegOp = MO.getReg();
1137 /// isValNoAvailableAt - Return true if the val# of the specified interval
1138 /// which reaches the given instruction also reaches the specified use index.
1139 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
1140 unsigned UseIdx) const {
1141 unsigned Index = getInstructionIndex(MI);
1142 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
1143 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
1144 return UI != li.end() && UI->valno == ValNo;
1147 /// isReMaterializable - Returns true if the definition MI of the specified
1148 /// val# of the specified interval is re-materializable.
1149 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1150 const VNInfo *ValNo, MachineInstr *MI,
1151 SmallVectorImpl<LiveInterval*> &SpillIs,
1156 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
1160 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
1161 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
1162 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
1163 // this but remember this is not safe to fold into a two-address
1165 // This is a load from fixed stack slot. It can be rematerialized.
1168 // If the target-specific rules don't identify an instruction as
1169 // being trivially rematerializable, use some target-independent
1171 if (!MI->getDesc().isRematerializable() ||
1172 !tii_->isTriviallyReMaterializable(MI)) {
1173 if (!EnableAggressiveRemat)
1176 // If the instruction accesses memory but the memoperands have been lost,
1177 // we can't analyze it.
1178 const TargetInstrDesc &TID = MI->getDesc();
1179 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1182 // Avoid instructions obviously unsafe for remat.
1183 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1186 // If the instruction accesses memory and the memory could be non-constant,
1187 // assume the instruction is not rematerializable.
1188 for (std::list<MachineMemOperand>::const_iterator
1189 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1190 const MachineMemOperand &MMO = *I;
1191 if (MMO.isVolatile() || MMO.isStore())
1193 const Value *V = MMO.getValue();
1196 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1197 if (!PSV->isConstant(mf_->getFrameInfo()))
1199 } else if (!aa_->pointsToConstantMemory(V))
1203 // If any of the registers accessed are non-constant, conservatively assume
1204 // the instruction is not rematerializable.
1205 unsigned ImpUse = 0;
1206 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1207 const MachineOperand &MO = MI->getOperand(i);
1209 unsigned Reg = MO.getReg();
1212 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1215 // Only allow one def, and that in the first operand.
1216 if (MO.isDef() != (i == 0))
1219 // Only allow constant-valued registers.
1220 bool IsLiveIn = mri_->isLiveIn(Reg);
1221 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1222 E = mri_->def_end();
1224 // For the def, it should be the only def of that register.
1225 if (MO.isDef() && (next(I) != E || IsLiveIn))
1229 // Only allow one use other register use, as that's all the
1230 // remat mechanisms support currently.
1231 if (Reg != li.reg) {
1234 else if (Reg != ImpUse)
1237 // For the use, there should be only one associated def.
1238 if (I != E && (next(I) != E || IsLiveIn))
1245 unsigned ImpUse = getReMatImplicitUse(li, MI);
1247 const LiveInterval &ImpLi = getInterval(ImpUse);
1248 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1249 re = mri_->use_end(); ri != re; ++ri) {
1250 MachineInstr *UseMI = &*ri;
1251 unsigned UseIdx = getInstructionIndex(UseMI);
1252 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1254 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1258 // If a register operand of the re-materialized instruction is going to
1259 // be spilled next, then it's not legal to re-materialize this instruction.
1260 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1261 if (ImpUse == SpillIs[i]->reg)
1267 /// isReMaterializable - Returns true if the definition MI of the specified
1268 /// val# of the specified interval is re-materializable.
1269 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1270 const VNInfo *ValNo, MachineInstr *MI) {
1271 SmallVector<LiveInterval*, 4> Dummy1;
1273 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1276 /// isReMaterializable - Returns true if every definition of MI of every
1277 /// val# of the specified interval is re-materializable.
1278 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1279 SmallVectorImpl<LiveInterval*> &SpillIs,
1282 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1284 const VNInfo *VNI = *i;
1285 if (VNI->isUnused())
1286 continue; // Dead val#.
1287 // Is the def for the val# rematerializable?
1288 if (!VNI->isDefAccurate())
1290 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1291 bool DefIsLoad = false;
1293 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1295 isLoad |= DefIsLoad;
1300 /// FilterFoldedOps - Filter out two-address use operands. Return
1301 /// true if it finds any issue with the operands that ought to prevent
1303 static bool FilterFoldedOps(MachineInstr *MI,
1304 SmallVector<unsigned, 2> &Ops,
1306 SmallVector<unsigned, 2> &FoldOps) {
1308 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1309 unsigned OpIdx = Ops[i];
1310 MachineOperand &MO = MI->getOperand(OpIdx);
1311 // FIXME: fold subreg use.
1315 MRInfo |= (unsigned)VirtRegMap::isMod;
1317 // Filter out two-address use operand(s).
1318 if (MI->isRegTiedToDefOperand(OpIdx)) {
1319 MRInfo = VirtRegMap::isModRef;
1322 MRInfo |= (unsigned)VirtRegMap::isRef;
1324 FoldOps.push_back(OpIdx);
1330 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1331 /// slot / to reg or any rematerialized load into ith operand of specified
1332 /// MI. If it is successul, MI is updated with the newly created MI and
1334 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1335 VirtRegMap &vrm, MachineInstr *DefMI,
1337 SmallVector<unsigned, 2> &Ops,
1338 bool isSS, int Slot, unsigned Reg) {
1339 // If it is an implicit def instruction, just delete it.
1340 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1341 RemoveMachineInstrFromMaps(MI);
1342 vrm.RemoveMachineInstrFromMaps(MI);
1343 MI->eraseFromParent();
1348 // Filter the list of operand indexes that are to be folded. Abort if
1349 // any operand will prevent folding.
1350 unsigned MRInfo = 0;
1351 SmallVector<unsigned, 2> FoldOps;
1352 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1355 // The only time it's safe to fold into a two address instruction is when
1356 // it's folding reload and spill from / into a spill stack slot.
1357 if (DefMI && (MRInfo & VirtRegMap::isMod))
1360 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1361 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1363 // Remember this instruction uses the spill slot.
1364 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1366 // Attempt to fold the memory reference into the instruction. If
1367 // we can do this, we don't need to insert spill code.
1368 MachineBasicBlock &MBB = *MI->getParent();
1369 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1370 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1371 vrm.transferSpillPts(MI, fmi);
1372 vrm.transferRestorePts(MI, fmi);
1373 vrm.transferEmergencySpills(MI, fmi);
1375 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1376 mi2iMap_[fmi] = InstrIdx;
1377 MI = MBB.insert(MBB.erase(MI), fmi);
1384 /// canFoldMemoryOperand - Returns true if the specified load / store
1385 /// folding is possible.
1386 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1387 SmallVector<unsigned, 2> &Ops,
1389 // Filter the list of operand indexes that are to be folded. Abort if
1390 // any operand will prevent folding.
1391 unsigned MRInfo = 0;
1392 SmallVector<unsigned, 2> FoldOps;
1393 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1396 // It's only legal to remat for a use, not a def.
1397 if (ReMat && (MRInfo & VirtRegMap::isMod))
1400 return tii_->canFoldMemoryOperand(MI, FoldOps);
1403 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1404 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1405 for (LiveInterval::Ranges::const_iterator
1406 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1407 std::vector<IdxMBBPair>::const_iterator II =
1408 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1409 if (II == Idx2MBBMap.end())
1411 if (I->end > II->first) // crossing a MBB.
1413 MBBs.insert(II->second);
1414 if (MBBs.size() > 1)
1420 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1421 /// interval on to-be re-materialized operands of MI) with new register.
1422 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1423 MachineInstr *MI, unsigned NewVReg,
1425 // There is an implicit use. That means one of the other operand is
1426 // being remat'ed and the remat'ed instruction has li.reg as an
1427 // use operand. Make sure we rewrite that as well.
1428 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1429 MachineOperand &MO = MI->getOperand(i);
1432 unsigned Reg = MO.getReg();
1433 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1435 if (!vrm.isReMaterialized(Reg))
1437 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1438 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1440 UseMO->setReg(NewVReg);
1444 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1445 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1446 bool LiveIntervals::
1447 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1448 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1449 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1450 unsigned Slot, int LdSlot,
1451 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1453 const TargetRegisterClass* rc,
1454 SmallVector<int, 4> &ReMatIds,
1455 const MachineLoopInfo *loopInfo,
1456 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1457 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1458 std::vector<LiveInterval*> &NewLIs) {
1459 bool CanFold = false;
1461 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1462 MachineOperand& mop = MI->getOperand(i);
1465 unsigned Reg = mop.getReg();
1466 unsigned RegI = Reg;
1467 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1472 bool TryFold = !DefIsReMat;
1473 bool FoldSS = true; // Default behavior unless it's a remat.
1474 int FoldSlot = Slot;
1476 // If this is the rematerializable definition MI itself and
1477 // all of its uses are rematerialized, simply delete it.
1478 if (MI == ReMatOrigDefMI && CanDelete) {
1479 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1481 RemoveMachineInstrFromMaps(MI);
1482 vrm.RemoveMachineInstrFromMaps(MI);
1483 MI->eraseFromParent();
1487 // If def for this use can't be rematerialized, then try folding.
1488 // If def is rematerializable and it's a load, also try folding.
1489 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1491 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1497 // Scan all of the operands of this instruction rewriting operands
1498 // to use NewVReg instead of li.reg as appropriate. We do this for
1501 // 1. If the instr reads the same spilled vreg multiple times, we
1502 // want to reuse the NewVReg.
1503 // 2. If the instr is a two-addr instruction, we are required to
1504 // keep the src/dst regs pinned.
1506 // Keep track of whether we replace a use and/or def so that we can
1507 // create the spill interval with the appropriate range.
1509 HasUse = mop.isUse();
1510 HasDef = mop.isDef();
1511 SmallVector<unsigned, 2> Ops;
1513 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1514 const MachineOperand &MOj = MI->getOperand(j);
1517 unsigned RegJ = MOj.getReg();
1518 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1522 HasUse |= MOj.isUse();
1523 HasDef |= MOj.isDef();
1527 if (HasUse && !li.liveAt(getUseIndex(index)))
1528 // Must be defined by an implicit def. It should not be spilled. Note,
1529 // this is for correctness reason. e.g.
1530 // 8 %reg1024<def> = IMPLICIT_DEF
1531 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1532 // The live range [12, 14) are not part of the r1024 live interval since
1533 // it's defined by an implicit def. It will not conflicts with live
1534 // interval of r1025. Now suppose both registers are spilled, you can
1535 // easily see a situation where both registers are reloaded before
1536 // the INSERT_SUBREG and both target registers that would overlap.
1539 // Create a new virtual register for the spill interval.
1540 // Create the new register now so we can map the fold instruction
1541 // to the new register so when it is unfolded we get the correct
1543 bool CreatedNewVReg = false;
1545 NewVReg = mri_->createVirtualRegister(rc);
1547 CreatedNewVReg = true;
1553 // Do not fold load / store here if we are splitting. We'll find an
1554 // optimal point to insert a load / store later.
1556 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1557 Ops, FoldSS, FoldSlot, NewVReg)) {
1558 // Folding the load/store can completely change the instruction in
1559 // unpredictable ways, rescan it from the beginning.
1562 // We need to give the new vreg the same stack slot as the
1563 // spilled interval.
1564 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1570 if (isNotInMIMap(MI))
1572 goto RestartInstruction;
1575 // We'll try to fold it later if it's profitable.
1576 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1580 mop.setReg(NewVReg);
1581 if (mop.isImplicit())
1582 rewriteImplicitOps(li, MI, NewVReg, vrm);
1584 // Reuse NewVReg for other reads.
1585 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1586 MachineOperand &mopj = MI->getOperand(Ops[j]);
1587 mopj.setReg(NewVReg);
1588 if (mopj.isImplicit())
1589 rewriteImplicitOps(li, MI, NewVReg, vrm);
1592 if (CreatedNewVReg) {
1594 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1595 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1596 // Each valnum may have its own remat id.
1597 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1599 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1601 if (!CanDelete || (HasUse && HasDef)) {
1602 // If this is a two-addr instruction then its use operands are
1603 // rematerializable but its def is not. It should be assigned a
1605 vrm.assignVirt2StackSlot(NewVReg, Slot);
1608 vrm.assignVirt2StackSlot(NewVReg, Slot);
1610 } else if (HasUse && HasDef &&
1611 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1612 // If this interval hasn't been assigned a stack slot (because earlier
1613 // def is a deleted remat def), do it now.
1614 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1615 vrm.assignVirt2StackSlot(NewVReg, Slot);
1618 // Re-matting an instruction with virtual register use. Add the
1619 // register as an implicit use on the use MI.
1620 if (DefIsReMat && ImpUse)
1621 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1623 // Create a new register interval for this spill / remat.
1624 LiveInterval &nI = getOrCreateInterval(NewVReg);
1625 if (CreatedNewVReg) {
1626 NewLIs.push_back(&nI);
1627 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1629 vrm.setIsSplitFromReg(NewVReg, li.reg);
1633 if (CreatedNewVReg) {
1634 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1635 nI.getNextValue(0, 0, false, VNInfoAllocator));
1639 // Extend the split live interval to this def / use.
1640 unsigned End = getUseIndex(index)+1;
1641 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1642 nI.getValNumInfo(nI.getNumValNums()-1));
1648 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1649 nI.getNextValue(0, 0, false, VNInfoAllocator));
1654 DOUT << "\t\t\t\tAdded new interval: ";
1655 nI.print(DOUT, tri_);
1660 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1662 MachineBasicBlock *MBB, unsigned Idx) const {
1663 unsigned End = getMBBEndIdx(MBB);
1664 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1665 if (VNI->kills[j].isPHIKill)
1668 unsigned KillIdx = VNI->kills[j].killIdx;
1669 if (KillIdx > Idx && KillIdx < End)
1675 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1676 /// during spilling.
1678 struct RewriteInfo {
1683 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1684 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1687 struct RewriteInfoCompare {
1688 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1689 return LHS.Index < RHS.Index;
1694 void LiveIntervals::
1695 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1696 LiveInterval::Ranges::const_iterator &I,
1697 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1698 unsigned Slot, int LdSlot,
1699 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1701 const TargetRegisterClass* rc,
1702 SmallVector<int, 4> &ReMatIds,
1703 const MachineLoopInfo *loopInfo,
1704 BitVector &SpillMBBs,
1705 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1706 BitVector &RestoreMBBs,
1707 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1708 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1709 std::vector<LiveInterval*> &NewLIs) {
1710 bool AllCanFold = true;
1711 unsigned NewVReg = 0;
1712 unsigned start = getBaseIndex(I->start);
1713 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1715 // First collect all the def / use in this live range that will be rewritten.
1716 // Make sure they are sorted according to instruction index.
1717 std::vector<RewriteInfo> RewriteMIs;
1718 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1719 re = mri_->reg_end(); ri != re; ) {
1720 MachineInstr *MI = &*ri;
1721 MachineOperand &O = ri.getOperand();
1723 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1724 unsigned index = getInstructionIndex(MI);
1725 if (index < start || index >= end)
1727 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1728 // Must be defined by an implicit def. It should not be spilled. Note,
1729 // this is for correctness reason. e.g.
1730 // 8 %reg1024<def> = IMPLICIT_DEF
1731 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1732 // The live range [12, 14) are not part of the r1024 live interval since
1733 // it's defined by an implicit def. It will not conflicts with live
1734 // interval of r1025. Now suppose both registers are spilled, you can
1735 // easily see a situation where both registers are reloaded before
1736 // the INSERT_SUBREG and both target registers that would overlap.
1738 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1740 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1742 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1743 // Now rewrite the defs and uses.
1744 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1745 RewriteInfo &rwi = RewriteMIs[i];
1747 unsigned index = rwi.Index;
1748 bool MIHasUse = rwi.HasUse;
1749 bool MIHasDef = rwi.HasDef;
1750 MachineInstr *MI = rwi.MI;
1751 // If MI def and/or use the same register multiple times, then there
1752 // are multiple entries.
1753 unsigned NumUses = MIHasUse;
1754 while (i != e && RewriteMIs[i].MI == MI) {
1755 assert(RewriteMIs[i].Index == index);
1756 bool isUse = RewriteMIs[i].HasUse;
1757 if (isUse) ++NumUses;
1759 MIHasDef |= RewriteMIs[i].HasDef;
1762 MachineBasicBlock *MBB = MI->getParent();
1764 if (ImpUse && MI != ReMatDefMI) {
1765 // Re-matting an instruction with virtual register use. Update the
1766 // register interval's spill weight to HUGE_VALF to prevent it from
1768 LiveInterval &ImpLi = getInterval(ImpUse);
1769 ImpLi.weight = HUGE_VALF;
1772 unsigned MBBId = MBB->getNumber();
1773 unsigned ThisVReg = 0;
1775 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1776 if (NVI != MBBVRegsMap.end()) {
1777 ThisVReg = NVI->second;
1784 // It's better to start a new interval to avoid artifically
1785 // extend the new interval.
1786 if (MIHasDef && !MIHasUse) {
1787 MBBVRegsMap.erase(MBB->getNumber());
1793 bool IsNew = ThisVReg == 0;
1795 // This ends the previous live interval. If all of its def / use
1796 // can be folded, give it a low spill weight.
1797 if (NewVReg && TrySplit && AllCanFold) {
1798 LiveInterval &nI = getOrCreateInterval(NewVReg);
1805 bool HasDef = false;
1806 bool HasUse = false;
1807 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1808 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1809 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1810 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1811 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1812 if (!HasDef && !HasUse)
1815 AllCanFold &= CanFold;
1817 // Update weight of spill interval.
1818 LiveInterval &nI = getOrCreateInterval(NewVReg);
1820 // The spill weight is now infinity as it cannot be spilled again.
1821 nI.weight = HUGE_VALF;
1825 // Keep track of the last def and first use in each MBB.
1827 if (MI != ReMatOrigDefMI || !CanDelete) {
1828 bool HasKill = false;
1830 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1832 // If this is a two-address code, then this index starts a new VNInfo.
1833 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1835 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1837 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1838 SpillIdxes.find(MBBId);
1840 if (SII == SpillIdxes.end()) {
1841 std::vector<SRInfo> S;
1842 S.push_back(SRInfo(index, NewVReg, true));
1843 SpillIdxes.insert(std::make_pair(MBBId, S));
1844 } else if (SII->second.back().vreg != NewVReg) {
1845 SII->second.push_back(SRInfo(index, NewVReg, true));
1846 } else if ((int)index > SII->second.back().index) {
1847 // If there is an earlier def and this is a two-address
1848 // instruction, then it's not possible to fold the store (which
1849 // would also fold the load).
1850 SRInfo &Info = SII->second.back();
1852 Info.canFold = !HasUse;
1854 SpillMBBs.set(MBBId);
1855 } else if (SII != SpillIdxes.end() &&
1856 SII->second.back().vreg == NewVReg &&
1857 (int)index > SII->second.back().index) {
1858 // There is an earlier def that's not killed (must be two-address).
1859 // The spill is no longer needed.
1860 SII->second.pop_back();
1861 if (SII->second.empty()) {
1862 SpillIdxes.erase(MBBId);
1863 SpillMBBs.reset(MBBId);
1870 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1871 SpillIdxes.find(MBBId);
1872 if (SII != SpillIdxes.end() &&
1873 SII->second.back().vreg == NewVReg &&
1874 (int)index > SII->second.back().index)
1875 // Use(s) following the last def, it's not safe to fold the spill.
1876 SII->second.back().canFold = false;
1877 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1878 RestoreIdxes.find(MBBId);
1879 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1880 // If we are splitting live intervals, only fold if it's the first
1881 // use and there isn't another use later in the MBB.
1882 RII->second.back().canFold = false;
1884 // Only need a reload if there isn't an earlier def / use.
1885 if (RII == RestoreIdxes.end()) {
1886 std::vector<SRInfo> Infos;
1887 Infos.push_back(SRInfo(index, NewVReg, true));
1888 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1890 RII->second.push_back(SRInfo(index, NewVReg, true));
1892 RestoreMBBs.set(MBBId);
1896 // Update spill weight.
1897 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1898 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1901 if (NewVReg && TrySplit && AllCanFold) {
1902 // If all of its def / use can be folded, give it a low spill weight.
1903 LiveInterval &nI = getOrCreateInterval(NewVReg);
1908 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1909 BitVector &RestoreMBBs,
1910 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1911 if (!RestoreMBBs[Id])
1913 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1914 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1915 if (Restores[i].index == index &&
1916 Restores[i].vreg == vr &&
1917 Restores[i].canFold)
1922 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1923 BitVector &RestoreMBBs,
1924 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1925 if (!RestoreMBBs[Id])
1927 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1928 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1929 if (Restores[i].index == index && Restores[i].vreg)
1930 Restores[i].index = -1;
1933 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1934 /// spilled and create empty intervals for their uses.
1936 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1937 const TargetRegisterClass* rc,
1938 std::vector<LiveInterval*> &NewLIs) {
1939 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1940 re = mri_->reg_end(); ri != re; ) {
1941 MachineOperand &O = ri.getOperand();
1942 MachineInstr *MI = &*ri;
1945 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1946 "Register def was not rewritten?");
1947 RemoveMachineInstrFromMaps(MI);
1948 vrm.RemoveMachineInstrFromMaps(MI);
1949 MI->eraseFromParent();
1951 // This must be an use of an implicit_def so it's not part of the live
1952 // interval. Create a new empty live interval for it.
1953 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1954 unsigned NewVReg = mri_->createVirtualRegister(rc);
1956 vrm.setIsImplicitlyDefined(NewVReg);
1957 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1958 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1959 MachineOperand &MO = MI->getOperand(i);
1960 if (MO.isReg() && MO.getReg() == li.reg) {
1969 std::vector<LiveInterval*> LiveIntervals::
1970 addIntervalsForSpillsFast(const LiveInterval &li,
1971 const MachineLoopInfo *loopInfo,
1973 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1975 std::vector<LiveInterval*> added;
1977 assert(li.weight != HUGE_VALF &&
1978 "attempt to spill already spilled interval!");
1980 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1984 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1986 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1987 while (RI != mri_->reg_end()) {
1988 MachineInstr* MI = &*RI;
1990 SmallVector<unsigned, 2> Indices;
1991 bool HasUse = false;
1992 bool HasDef = false;
1994 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1995 MachineOperand& mop = MI->getOperand(i);
1996 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1998 HasUse |= MI->getOperand(i).isUse();
1999 HasDef |= MI->getOperand(i).isDef();
2001 Indices.push_back(i);
2004 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
2005 Indices, true, slot, li.reg)) {
2006 unsigned NewVReg = mri_->createVirtualRegister(rc);
2008 vrm.assignVirt2StackSlot(NewVReg, slot);
2010 // create a new register for this spill
2011 LiveInterval &nI = getOrCreateInterval(NewVReg);
2013 // the spill weight is now infinity as it
2014 // cannot be spilled again
2015 nI.weight = HUGE_VALF;
2017 // Rewrite register operands to use the new vreg.
2018 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
2019 E = Indices.end(); I != E; ++I) {
2020 MI->getOperand(*I).setReg(NewVReg);
2022 if (MI->getOperand(*I).isUse())
2023 MI->getOperand(*I).setIsKill(true);
2026 // Fill in the new live interval.
2027 unsigned index = getInstructionIndex(MI);
2029 LiveRange LR(getLoadIndex(index), getUseIndex(index),
2030 nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2033 vrm.addRestorePoint(NewVReg, MI);
2036 LiveRange LR(getDefIndex(index), getStoreIndex(index),
2037 nI.getNextValue(0, 0, false, getVNInfoAllocator()));
2040 vrm.addSpillPoint(NewVReg, true, MI);
2043 added.push_back(&nI);
2045 DOUT << "\t\t\t\tadded new interval: ";
2051 RI = mri_->reg_begin(li.reg);
2057 std::vector<LiveInterval*> LiveIntervals::
2058 addIntervalsForSpills(const LiveInterval &li,
2059 SmallVectorImpl<LiveInterval*> &SpillIs,
2060 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
2062 if (EnableFastSpilling)
2063 return addIntervalsForSpillsFast(li, loopInfo, vrm);
2065 assert(li.weight != HUGE_VALF &&
2066 "attempt to spill already spilled interval!");
2068 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
2069 li.print(DOUT, tri_);
2072 // Each bit specify whether a spill is required in the MBB.
2073 BitVector SpillMBBs(mf_->getNumBlockIDs());
2074 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
2075 BitVector RestoreMBBs(mf_->getNumBlockIDs());
2076 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
2077 DenseMap<unsigned,unsigned> MBBVRegsMap;
2078 std::vector<LiveInterval*> NewLIs;
2079 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
2081 unsigned NumValNums = li.getNumValNums();
2082 SmallVector<MachineInstr*, 4> ReMatDefs;
2083 ReMatDefs.resize(NumValNums, NULL);
2084 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
2085 ReMatOrigDefs.resize(NumValNums, NULL);
2086 SmallVector<int, 4> ReMatIds;
2087 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
2088 BitVector ReMatDelete(NumValNums);
2089 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
2091 // Spilling a split live interval. It cannot be split any further. Also,
2092 // it's also guaranteed to be a single val# / range interval.
2093 if (vrm.getPreSplitReg(li.reg)) {
2094 vrm.setIsSplitFromReg(li.reg, 0);
2095 // Unset the split kill marker on the last use.
2096 unsigned KillIdx = vrm.getKillPoint(li.reg);
2098 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
2099 assert(KillMI && "Last use disappeared?");
2100 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
2101 assert(KillOp != -1 && "Last use disappeared?");
2102 KillMI->getOperand(KillOp).setIsKill(false);
2104 vrm.removeKillPoint(li.reg);
2105 bool DefIsReMat = vrm.isReMaterialized(li.reg);
2106 Slot = vrm.getStackSlot(li.reg);
2107 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
2108 MachineInstr *ReMatDefMI = DefIsReMat ?
2109 vrm.getReMaterializedMI(li.reg) : NULL;
2111 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2112 bool isLoad = isLoadSS ||
2113 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
2114 bool IsFirstRange = true;
2115 for (LiveInterval::Ranges::const_iterator
2116 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2117 // If this is a split live interval with multiple ranges, it means there
2118 // are two-address instructions that re-defined the value. Only the
2119 // first def can be rematerialized!
2121 // Note ReMatOrigDefMI has already been deleted.
2122 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
2123 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2124 false, vrm, rc, ReMatIds, loopInfo,
2125 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2126 MBBVRegsMap, NewLIs);
2128 rewriteInstructionsForSpills(li, false, I, NULL, 0,
2129 Slot, 0, false, false, false,
2130 false, vrm, rc, ReMatIds, loopInfo,
2131 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2132 MBBVRegsMap, NewLIs);
2134 IsFirstRange = false;
2137 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2141 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
2142 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
2146 bool NeedStackSlot = false;
2147 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
2149 const VNInfo *VNI = *i;
2150 unsigned VN = VNI->id;
2151 if (VNI->isUnused())
2152 continue; // Dead val#.
2153 // Is the def for the val# rematerializable?
2154 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
2155 ? getInstructionFromIndex(VNI->def) : 0;
2157 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
2158 // Remember how to remat the def of this val#.
2159 ReMatOrigDefs[VN] = ReMatDefMI;
2160 // Original def may be modified so we have to make a copy here.
2161 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
2162 ClonedMIs.push_back(Clone);
2163 ReMatDefs[VN] = Clone;
2165 bool CanDelete = true;
2166 if (VNI->hasPHIKill()) {
2167 // A kill is a phi node, not all of its uses can be rematerialized.
2168 // It must not be deleted.
2170 // Need a stack slot if there is any live range where uses cannot be
2172 NeedStackSlot = true;
2175 ReMatDelete.set(VN);
2177 // Need a stack slot if there is any live range where uses cannot be
2179 NeedStackSlot = true;
2183 // One stack slot per live interval.
2184 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2185 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2186 Slot = vrm.assignVirt2StackSlot(li.reg);
2188 // This case only occurs when the prealloc splitter has already assigned
2189 // a stack slot to this vreg.
2191 Slot = vrm.getStackSlot(li.reg);
2194 // Create new intervals and rewrite defs and uses.
2195 for (LiveInterval::Ranges::const_iterator
2196 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2197 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2198 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2199 bool DefIsReMat = ReMatDefMI != NULL;
2200 bool CanDelete = ReMatDelete[I->valno->id];
2202 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2203 bool isLoad = isLoadSS ||
2204 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2205 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2206 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2207 CanDelete, vrm, rc, ReMatIds, loopInfo,
2208 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2209 MBBVRegsMap, NewLIs);
2212 // Insert spills / restores if we are splitting.
2214 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2218 SmallPtrSet<LiveInterval*, 4> AddedKill;
2219 SmallVector<unsigned, 2> Ops;
2220 if (NeedStackSlot) {
2221 int Id = SpillMBBs.find_first();
2223 std::vector<SRInfo> &spills = SpillIdxes[Id];
2224 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2225 int index = spills[i].index;
2226 unsigned VReg = spills[i].vreg;
2227 LiveInterval &nI = getOrCreateInterval(VReg);
2228 bool isReMat = vrm.isReMaterialized(VReg);
2229 MachineInstr *MI = getInstructionFromIndex(index);
2230 bool CanFold = false;
2231 bool FoundUse = false;
2233 if (spills[i].canFold) {
2235 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2236 MachineOperand &MO = MI->getOperand(j);
2237 if (!MO.isReg() || MO.getReg() != VReg)
2244 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2245 RestoreMBBs, RestoreIdxes))) {
2246 // MI has two-address uses of the same register. If the use
2247 // isn't the first and only use in the BB, then we can't fold
2248 // it. FIXME: Move this to rewriteInstructionsForSpills.
2255 // Fold the store into the def if possible.
2256 bool Folded = false;
2257 if (CanFold && !Ops.empty()) {
2258 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2261 // Also folded uses, do not issue a load.
2262 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2263 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2265 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2269 // Otherwise tell the spiller to issue a spill.
2271 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2272 bool isKill = LR->end == getStoreIndex(index);
2273 if (!MI->registerDefIsDead(nI.reg))
2274 // No need to spill a dead def.
2275 vrm.addSpillPoint(VReg, isKill, MI);
2277 AddedKill.insert(&nI);
2280 Id = SpillMBBs.find_next(Id);
2284 int Id = RestoreMBBs.find_first();
2286 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2287 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2288 int index = restores[i].index;
2291 unsigned VReg = restores[i].vreg;
2292 LiveInterval &nI = getOrCreateInterval(VReg);
2293 bool isReMat = vrm.isReMaterialized(VReg);
2294 MachineInstr *MI = getInstructionFromIndex(index);
2295 bool CanFold = false;
2297 if (restores[i].canFold) {
2299 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2300 MachineOperand &MO = MI->getOperand(j);
2301 if (!MO.isReg() || MO.getReg() != VReg)
2305 // If this restore were to be folded, it would have been folded
2314 // Fold the load into the use if possible.
2315 bool Folded = false;
2316 if (CanFold && !Ops.empty()) {
2318 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2320 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2322 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2323 // If the rematerializable def is a load, also try to fold it.
2324 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2325 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2326 Ops, isLoadSS, LdSlot, VReg);
2328 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2330 // Re-matting an instruction with virtual register use. Add the
2331 // register as an implicit use on the use MI and update the register
2332 // interval's spill weight to HUGE_VALF to prevent it from being
2334 LiveInterval &ImpLi = getInterval(ImpUse);
2335 ImpLi.weight = HUGE_VALF;
2336 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2341 // If folding is not possible / failed, then tell the spiller to issue a
2342 // load / rematerialization for us.
2344 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2346 vrm.addRestorePoint(VReg, MI);
2348 Id = RestoreMBBs.find_next(Id);
2351 // Finalize intervals: add kills, finalize spill weights, and filter out
2353 std::vector<LiveInterval*> RetNewLIs;
2354 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2355 LiveInterval *LI = NewLIs[i];
2357 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2358 if (!AddedKill.count(LI)) {
2359 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2360 unsigned LastUseIdx = getBaseIndex(LR->end);
2361 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2362 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2363 assert(UseIdx != -1);
2364 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2365 LastUse->getOperand(UseIdx).setIsKill();
2366 vrm.addKillPoint(LI->reg, LastUseIdx);
2369 RetNewLIs.push_back(LI);
2373 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2377 /// hasAllocatableSuperReg - Return true if the specified physical register has
2378 /// any super register that's allocatable.
2379 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2380 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2381 if (allocatableRegs_[*AS] && hasInterval(*AS))
2386 /// getRepresentativeReg - Find the largest super register of the specified
2387 /// physical register.
2388 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2389 // Find the largest super-register that is allocatable.
2390 unsigned BestReg = Reg;
2391 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2392 unsigned SuperReg = *AS;
2393 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2401 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2402 /// specified interval that conflicts with the specified physical register.
2403 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2404 unsigned PhysReg) const {
2405 unsigned NumConflicts = 0;
2406 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2407 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2408 E = mri_->reg_end(); I != E; ++I) {
2409 MachineOperand &O = I.getOperand();
2410 MachineInstr *MI = O.getParent();
2411 unsigned Index = getInstructionIndex(MI);
2412 if (pli.liveAt(Index))
2415 return NumConflicts;
2418 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2419 /// around all defs and uses of the specified interval. Return true if it
2420 /// was able to cut its interval.
2421 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2422 unsigned PhysReg, VirtRegMap &vrm) {
2423 unsigned SpillReg = getRepresentativeReg(PhysReg);
2425 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2426 // If there are registers which alias PhysReg, but which are not a
2427 // sub-register of the chosen representative super register. Assert
2428 // since we can't handle it yet.
2429 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2430 tri_->isSuperRegister(*AS, SpillReg));
2433 LiveInterval &pli = getInterval(SpillReg);
2434 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2435 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2436 E = mri_->reg_end(); I != E; ++I) {
2437 MachineOperand &O = I.getOperand();
2438 MachineInstr *MI = O.getParent();
2439 if (SeenMIs.count(MI))
2442 unsigned Index = getInstructionIndex(MI);
2443 if (pli.liveAt(Index)) {
2444 vrm.addEmergencySpill(SpillReg, MI);
2445 unsigned StartIdx = getLoadIndex(Index);
2446 unsigned EndIdx = getStoreIndex(Index)+1;
2447 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2448 pli.removeRange(StartIdx, EndIdx);
2451 cerr << "Ran out of registers during register allocation!\n";
2452 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2453 cerr << "Please check your inline asm statement for invalid "
2454 << "constraints:\n";
2455 MI->print(cerr.stream(), tm_);
2459 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2460 if (!hasInterval(*AS))
2462 LiveInterval &spli = getInterval(*AS);
2463 if (spli.liveAt(Index))
2464 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2471 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2472 MachineInstr* startInst) {
2473 LiveInterval& Interval = getOrCreateInterval(reg);
2474 VNInfo* VN = Interval.getNextValue(
2475 getInstructionIndex(startInst) + InstrSlots::DEF,
2476 startInst, true, getVNInfoAllocator());
2477 VN->setHasPHIKill(true);
2478 VN->kills.push_back(
2479 VNInfo::KillInfo(terminatorGaps[startInst->getParent()], true));
2480 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2481 getMBBEndIdx(startInst->getParent()) + 1, VN);
2482 Interval.addRange(LR);