1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineLoopInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/Passes.h"
28 #include "llvm/Target/TargetRegisterInfo.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/ADT/Statistic.h"
34 #include "llvm/ADT/STLExtras.h"
39 // Hidden options for help debugging.
40 static cl::opt<bool> DisableReMat("disable-rematerialization",
41 cl::init(false), cl::Hidden);
43 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
44 cl::init(true), cl::Hidden);
45 static cl::opt<int> SplitLimit("split-limit",
46 cl::init(-1), cl::Hidden);
48 STATISTIC(numIntervals, "Number of original intervals");
49 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
50 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
51 STATISTIC(numSplits , "Number of intervals split");
53 char LiveIntervals::ID = 0;
54 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
56 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
57 AU.addPreserved<LiveVariables>();
58 AU.addRequired<LiveVariables>();
59 AU.addPreservedID(MachineLoopInfoID);
60 AU.addPreservedID(MachineDominatorsID);
61 AU.addPreservedID(PHIEliminationID);
62 AU.addRequiredID(PHIEliminationID);
63 AU.addRequiredID(TwoAddressInstructionPassID);
64 MachineFunctionPass::getAnalysisUsage(AU);
67 void LiveIntervals::releaseMemory() {
73 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
74 VNInfoAllocator.Reset();
75 while (!ClonedMIs.empty()) {
76 MachineInstr *MI = ClonedMIs.back();
78 mf_->DeleteMachineInstr(MI);
82 void LiveIntervals::computeNumbering() {
83 Index2MiMap OldI2MI = i2miMap_;
90 // Number MachineInstrs and MachineBasicBlocks.
91 // Initialize MBB indexes to a sentinal.
92 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
95 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
97 unsigned StartIdx = MIIndex;
99 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
101 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
102 assert(inserted && "multiple MachineInstr -> index mappings");
103 i2miMap_.push_back(I);
104 MIIndex += InstrSlots::NUM;
107 if (StartIdx == MIIndex) {
109 MIIndex += InstrSlots::NUM;
110 i2miMap_.push_back(0);
112 // Set the MBB2IdxMap entry for this MBB.
113 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
114 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
116 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
118 if (!OldI2MI.empty())
119 for (iterator I = begin(), E = end(); I != E; ++I)
120 for (LiveInterval::iterator LI = I->second.begin(), LE = I->second.end();
123 // Remap the start index of the live range to the corresponding new
124 // number, or our best guess at what it _should_ correspond to if the
125 // original instruction has been erased. This is either the following
126 // instruction or its predecessor.
127 unsigned offset = LI->start % InstrSlots::NUM;
128 if (OldI2MI[LI->start / InstrSlots::NUM])
129 LI->start = mi2iMap_[OldI2MI[LI->start / InstrSlots::NUM]] + offset;
132 MachineInstr* newInstr = 0;
134 newInstr = OldI2MI[LI->start / InstrSlots::NUM + i];
138 if (mi2iMap_[newInstr] ==
139 MBB2IdxMap[newInstr->getParent()->getNumber()].first)
140 LI->start = mi2iMap_[newInstr];
142 LI->start = mi2iMap_[newInstr] - InstrSlots::NUM + offset;
145 // Remap the ending index in the same way that we remapped the start,
146 // except for the final step where we always map to the immediately
147 // following instruction.
148 if (LI->end / InstrSlots::NUM < OldI2MI.size()) {
149 offset = LI->end % InstrSlots::NUM;
150 if (OldI2MI[LI->end / InstrSlots::NUM])
151 LI->end = mi2iMap_[OldI2MI[LI->end / InstrSlots::NUM]] + offset;
154 MachineInstr* newInstr = 0;
156 newInstr = OldI2MI[LI->end / InstrSlots::NUM + i];
160 LI->end = mi2iMap_[newInstr];
163 LI->end = i2miMap_.size() * InstrSlots::NUM;
166 // Remap the VNInfo def index, which works the same as the
167 // start indices above.
168 VNInfo* vni = LI->valno;
169 offset = vni->def % InstrSlots::NUM;
170 if (OldI2MI[vni->def / InstrSlots::NUM])
171 vni->def = mi2iMap_[OldI2MI[vni->def / InstrSlots::NUM]] + offset;
174 MachineInstr* newInstr = 0;
176 newInstr = OldI2MI[vni->def / InstrSlots::NUM + i];
180 if (mi2iMap_[newInstr] ==
181 MBB2IdxMap[newInstr->getParent()->getNumber()].first)
182 vni->def = mi2iMap_[newInstr];
184 vni->def = mi2iMap_[newInstr] - InstrSlots::NUM + offset;
187 // Remap the VNInfo kill indices, which works the same as
188 // the end indices above.
189 for (size_t i = 0; i < vni->kills.size(); ++i) {
190 offset = vni->kills[i] % InstrSlots::NUM;
191 if (OldI2MI[vni->kills[i] / InstrSlots::NUM])
192 vni->kills[i] = mi2iMap_[OldI2MI[vni->kills[i] / InstrSlots::NUM]] +
196 MachineInstr* newInstr = 0;
198 newInstr = OldI2MI[vni->kills[i] / InstrSlots::NUM + e];
202 vni->kills[i] = mi2iMap_[newInstr];
208 /// runOnMachineFunction - Register allocate the whole function
210 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
212 mri_ = &mf_->getRegInfo();
213 tm_ = &fn.getTarget();
214 tri_ = tm_->getRegisterInfo();
215 tii_ = tm_->getInstrInfo();
216 lv_ = &getAnalysis<LiveVariables>();
217 allocatableRegs_ = tri_->getAllocatableSet(fn);
222 numIntervals += getNumIntervals();
224 DOUT << "********** INTERVALS **********\n";
225 for (iterator I = begin(), E = end(); I != E; ++I) {
226 I->second.print(DOUT, tri_);
230 numIntervalsAfter += getNumIntervals();
235 /// print - Implement the dump method.
236 void LiveIntervals::print(std::ostream &O, const Module* ) const {
237 O << "********** INTERVALS **********\n";
238 for (const_iterator I = begin(), E = end(); I != E; ++I) {
239 I->second.print(O, tri_);
243 O << "********** MACHINEINSTRS **********\n";
244 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
245 mbbi != mbbe; ++mbbi) {
246 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
247 for (MachineBasicBlock::iterator mii = mbbi->begin(),
248 mie = mbbi->end(); mii != mie; ++mii) {
249 O << getInstructionIndex(mii) << '\t' << *mii;
254 /// conflictsWithPhysRegDef - Returns true if the specified register
255 /// is defined during the duration of the specified interval.
256 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
257 VirtRegMap &vrm, unsigned reg) {
258 for (LiveInterval::Ranges::const_iterator
259 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
260 for (unsigned index = getBaseIndex(I->start),
261 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
262 index += InstrSlots::NUM) {
263 // skip deleted instructions
264 while (index != end && !getInstructionFromIndex(index))
265 index += InstrSlots::NUM;
266 if (index == end) break;
268 MachineInstr *MI = getInstructionFromIndex(index);
269 unsigned SrcReg, DstReg;
270 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
271 if (SrcReg == li.reg || DstReg == li.reg)
273 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
274 MachineOperand& mop = MI->getOperand(i);
275 if (!mop.isRegister())
277 unsigned PhysReg = mop.getReg();
278 if (PhysReg == 0 || PhysReg == li.reg)
280 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
281 if (!vrm.hasPhys(PhysReg))
283 PhysReg = vrm.getPhys(PhysReg);
285 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
294 void LiveIntervals::printRegName(unsigned reg) const {
295 if (TargetRegisterInfo::isPhysicalRegister(reg))
296 cerr << tri_->getName(reg);
298 cerr << "%reg" << reg;
301 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
302 MachineBasicBlock::iterator mi,
303 unsigned MIIdx, MachineOperand& MO,
305 LiveInterval &interval) {
306 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
307 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
309 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
310 DOUT << "is a implicit_def\n";
314 // Virtual registers may be defined multiple times (due to phi
315 // elimination and 2-addr elimination). Much of what we do only has to be
316 // done once for the vreg. We use an empty interval to detect the first
317 // time we see a vreg.
318 if (interval.empty()) {
319 // Get the Idx of the defining instructions.
320 unsigned defIndex = getDefIndex(MIIdx);
322 MachineInstr *CopyMI = NULL;
323 unsigned SrcReg, DstReg;
324 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
325 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
326 tii_->isMoveInstr(*mi, SrcReg, DstReg))
328 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
330 assert(ValNo->id == 0 && "First value in interval is not 0?");
332 // Loop over all of the blocks that the vreg is defined in. There are
333 // two cases we have to handle here. The most common case is a vreg
334 // whose lifetime is contained within a basic block. In this case there
335 // will be a single kill, in MBB, which comes after the definition.
336 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
337 // FIXME: what about dead vars?
339 if (vi.Kills[0] != mi)
340 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
342 killIdx = defIndex+1;
344 // If the kill happens after the definition, we have an intra-block
346 if (killIdx > defIndex) {
347 assert(vi.AliveBlocks.none() &&
348 "Shouldn't be alive across any blocks!");
349 LiveRange LR(defIndex, killIdx, ValNo);
350 interval.addRange(LR);
351 DOUT << " +" << LR << "\n";
352 interval.addKill(ValNo, killIdx);
357 // The other case we handle is when a virtual register lives to the end
358 // of the defining block, potentially live across some blocks, then is
359 // live into some number of blocks, but gets killed. Start by adding a
360 // range that goes from this definition to the end of the defining block.
361 LiveRange NewLR(defIndex,
362 getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
364 DOUT << " +" << NewLR;
365 interval.addRange(NewLR);
367 // Iterate over all of the blocks that the variable is completely
368 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
370 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
371 if (vi.AliveBlocks[i]) {
372 LiveRange LR(getMBBStartIdx(i),
373 getMBBEndIdx(i)+1, // MBB ends at -1.
375 interval.addRange(LR);
380 // Finally, this virtual register is live from the start of any killing
381 // block to the 'use' slot of the killing instruction.
382 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
383 MachineInstr *Kill = vi.Kills[i];
384 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
385 LiveRange LR(getMBBStartIdx(Kill->getParent()),
387 interval.addRange(LR);
388 interval.addKill(ValNo, killIdx);
393 // If this is the second time we see a virtual register definition, it
394 // must be due to phi elimination or two addr elimination. If this is
395 // the result of two address elimination, then the vreg is one of the
396 // def-and-use register operand.
397 if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
398 // If this is a two-address definition, then we have already processed
399 // the live range. The only problem is that we didn't realize there
400 // are actually two values in the live interval. Because of this we
401 // need to take the LiveRegion that defines this register and split it
403 assert(interval.containsOneValue());
404 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
405 unsigned RedefIndex = getDefIndex(MIIdx);
407 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
408 VNInfo *OldValNo = OldLR->valno;
410 // Delete the initial value, which should be short and continuous,
411 // because the 2-addr copy must be in the same MBB as the redef.
412 interval.removeRange(DefIndex, RedefIndex);
414 // Two-address vregs should always only be redefined once. This means
415 // that at this point, there should be exactly one value number in it.
416 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
418 // The new value number (#1) is defined by the instruction we claimed
420 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
423 // Value#0 is now defined by the 2-addr instruction.
424 OldValNo->def = RedefIndex;
427 // Add the new live interval which replaces the range for the input copy.
428 LiveRange LR(DefIndex, RedefIndex, ValNo);
429 DOUT << " replace range with " << LR;
430 interval.addRange(LR);
431 interval.addKill(ValNo, RedefIndex);
433 // If this redefinition is dead, we need to add a dummy unit live
434 // range covering the def slot.
436 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
439 interval.print(DOUT, tri_);
442 // Otherwise, this must be because of phi elimination. If this is the
443 // first redefinition of the vreg that we have seen, go back and change
444 // the live range in the PHI block to be a different value number.
445 if (interval.containsOneValue()) {
446 assert(vi.Kills.size() == 1 &&
447 "PHI elimination vreg should have one kill, the PHI itself!");
449 // Remove the old range that we now know has an incorrect number.
450 VNInfo *VNI = interval.getValNumInfo(0);
451 MachineInstr *Killer = vi.Kills[0];
452 unsigned Start = getMBBStartIdx(Killer->getParent());
453 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
454 DOUT << " Removing [" << Start << "," << End << "] from: ";
455 interval.print(DOUT, tri_); DOUT << "\n";
456 interval.removeRange(Start, End);
457 VNI->hasPHIKill = true;
458 DOUT << " RESULT: "; interval.print(DOUT, tri_);
460 // Replace the interval with one of a NEW value number. Note that this
461 // value number isn't actually defined by an instruction, weird huh? :)
462 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
463 DOUT << " replace range with " << LR;
464 interval.addRange(LR);
465 interval.addKill(LR.valno, End);
466 DOUT << " RESULT: "; interval.print(DOUT, tri_);
469 // In the case of PHI elimination, each variable definition is only
470 // live until the end of the block. We've already taken care of the
471 // rest of the live range.
472 unsigned defIndex = getDefIndex(MIIdx);
475 MachineInstr *CopyMI = NULL;
476 unsigned SrcReg, DstReg;
477 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
478 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
479 tii_->isMoveInstr(*mi, SrcReg, DstReg))
481 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
483 unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
484 LiveRange LR(defIndex, killIndex, ValNo);
485 interval.addRange(LR);
486 interval.addKill(ValNo, killIndex);
487 ValNo->hasPHIKill = true;
495 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
496 MachineBasicBlock::iterator mi,
499 LiveInterval &interval,
500 MachineInstr *CopyMI) {
501 // A physical register cannot be live across basic block, so its
502 // lifetime must end somewhere in its defining basic block.
503 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
505 unsigned baseIndex = MIIdx;
506 unsigned start = getDefIndex(baseIndex);
507 unsigned end = start;
509 // If it is not used after definition, it is considered dead at
510 // the instruction defining it. Hence its interval is:
511 // [defSlot(def), defSlot(def)+1)
514 end = getDefIndex(start) + 1;
518 // If it is not dead on definition, it must be killed by a
519 // subsequent instruction. Hence its interval is:
520 // [defSlot(def), useSlot(kill)+1)
521 while (++mi != MBB->end()) {
522 baseIndex += InstrSlots::NUM;
523 if (mi->killsRegister(interval.reg, tri_)) {
525 end = getUseIndex(baseIndex) + 1;
527 } else if (mi->modifiesRegister(interval.reg, tri_)) {
528 // Another instruction redefines the register before it is ever read.
529 // Then the register is essentially dead at the instruction that defines
530 // it. Hence its interval is:
531 // [defSlot(def), defSlot(def)+1)
533 end = getDefIndex(start) + 1;
538 // The only case we should have a dead physreg here without a killing or
539 // instruction where we know it's dead is if it is live-in to the function
541 assert(!CopyMI && "physreg was not killed in defining block!");
542 end = getDefIndex(start) + 1; // It's dead.
545 assert(start < end && "did not find end of interval?");
547 // Already exists? Extend old live interval.
548 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
549 VNInfo *ValNo = (OldLR != interval.end())
550 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
551 LiveRange LR(start, end, ValNo);
552 interval.addRange(LR);
553 interval.addKill(LR.valno, end);
554 DOUT << " +" << LR << '\n';
557 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
558 MachineBasicBlock::iterator MI,
562 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
563 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
564 getOrCreateInterval(MO.getReg()));
565 else if (allocatableRegs_[MO.getReg()]) {
566 MachineInstr *CopyMI = NULL;
567 unsigned SrcReg, DstReg;
568 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
569 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
570 tii_->isMoveInstr(*MI, SrcReg, DstReg))
572 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
573 getOrCreateInterval(MO.getReg()), CopyMI);
574 // Def of a register also defines its sub-registers.
575 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
576 // If MI also modifies the sub-register explicitly, avoid processing it
577 // more than once. Do not pass in TRI here so it checks for exact match.
578 if (!MI->modifiesRegister(*AS))
579 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
580 getOrCreateInterval(*AS), 0);
584 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
586 LiveInterval &interval, bool isAlias) {
587 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
589 // Look for kills, if it reaches a def before it's killed, then it shouldn't
590 // be considered a livein.
591 MachineBasicBlock::iterator mi = MBB->begin();
592 unsigned baseIndex = MIIdx;
593 unsigned start = baseIndex;
594 unsigned end = start;
595 while (mi != MBB->end()) {
596 if (mi->killsRegister(interval.reg, tri_)) {
598 end = getUseIndex(baseIndex) + 1;
600 } else if (mi->modifiesRegister(interval.reg, tri_)) {
601 // Another instruction redefines the register before it is ever read.
602 // Then the register is essentially dead at the instruction that defines
603 // it. Hence its interval is:
604 // [defSlot(def), defSlot(def)+1)
606 end = getDefIndex(start) + 1;
610 baseIndex += InstrSlots::NUM;
615 // Live-in register might not be used at all.
619 end = getDefIndex(MIIdx) + 1;
621 DOUT << " live through";
626 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
627 interval.addRange(LR);
628 interval.addKill(LR.valno, end);
629 DOUT << " +" << LR << '\n';
632 /// computeIntervals - computes the live intervals for virtual
633 /// registers. for some ordering of the machine instructions [1,N] a
634 /// live interval is an interval [i, j) where 1 <= i <= j < N for
635 /// which a variable is live
636 void LiveIntervals::computeIntervals() {
637 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
638 << "********** Function: "
639 << ((Value*)mf_->getFunction())->getName() << '\n';
640 // Track the index of the current machine instr.
641 unsigned MIIndex = 0;
642 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
644 MachineBasicBlock *MBB = MBBI;
645 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
647 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
649 // Create intervals for live-ins to this BB first.
650 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
651 LE = MBB->livein_end(); LI != LE; ++LI) {
652 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
653 // Multiple live-ins can alias the same register.
654 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
655 if (!hasInterval(*AS))
656 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
660 for (; MI != miEnd; ++MI) {
661 DOUT << MIIndex << "\t" << *MI;
664 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
665 MachineOperand &MO = MI->getOperand(i);
666 // handle register defs - build intervals
667 if (MO.isRegister() && MO.getReg() && MO.isDef())
668 handleRegisterDef(MBB, MI, MIIndex, MO, i);
671 MIIndex += InstrSlots::NUM;
674 if (MBB->begin() == miEnd) MIIndex += InstrSlots::NUM; // Empty MBB
678 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
679 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
680 std::vector<IdxMBBPair>::const_iterator I =
681 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
684 while (I != Idx2MBBMap.end()) {
685 if (LR.end <= I->first)
687 MBBs.push_back(I->second);
695 LiveInterval LiveIntervals::createInterval(unsigned reg) {
696 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
698 return LiveInterval(reg, Weight);
701 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
702 /// copy field and returns the source register that defines it.
703 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
707 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
708 return VNI->copy->getOperand(1).getReg();
709 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
710 return VNI->copy->getOperand(2).getReg();
711 unsigned SrcReg, DstReg;
712 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
714 assert(0 && "Unrecognized copy instruction!");
718 //===----------------------------------------------------------------------===//
719 // Register allocator hooks.
722 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
723 /// allow one) virtual register operand, then its uses are implicitly using
724 /// the register. Returns the virtual register.
725 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
726 MachineInstr *MI) const {
728 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
729 MachineOperand &MO = MI->getOperand(i);
730 if (!MO.isRegister() || !MO.isUse())
732 unsigned Reg = MO.getReg();
733 if (Reg == 0 || Reg == li.reg)
735 // FIXME: For now, only remat MI with at most one register operand.
737 "Can't rematerialize instruction with multiple register operand!");
744 /// isValNoAvailableAt - Return true if the val# of the specified interval
745 /// which reaches the given instruction also reaches the specified use index.
746 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
747 unsigned UseIdx) const {
748 unsigned Index = getInstructionIndex(MI);
749 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
750 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
751 return UI != li.end() && UI->valno == ValNo;
754 /// isReMaterializable - Returns true if the definition MI of the specified
755 /// val# of the specified interval is re-materializable.
756 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
757 const VNInfo *ValNo, MachineInstr *MI,
763 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
767 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
768 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
769 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
770 // this but remember this is not safe to fold into a two-address
772 // This is a load from fixed stack slot. It can be rematerialized.
775 if (tii_->isTriviallyReMaterializable(MI)) {
776 const TargetInstrDesc &TID = MI->getDesc();
777 isLoad = TID.isSimpleLoad();
779 unsigned ImpUse = getReMatImplicitUse(li, MI);
781 const LiveInterval &ImpLi = getInterval(ImpUse);
782 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
783 re = mri_->use_end(); ri != re; ++ri) {
784 MachineInstr *UseMI = &*ri;
785 unsigned UseIdx = getInstructionIndex(UseMI);
786 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
788 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
798 /// isReMaterializable - Returns true if every definition of MI of every
799 /// val# of the specified interval is re-materializable.
800 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
802 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
804 const VNInfo *VNI = *i;
805 unsigned DefIdx = VNI->def;
807 continue; // Dead val#.
808 // Is the def for the val# rematerializable?
811 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
812 bool DefIsLoad = false;
814 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
821 /// FilterFoldedOps - Filter out two-address use operands. Return
822 /// true if it finds any issue with the operands that ought to prevent
824 static bool FilterFoldedOps(MachineInstr *MI,
825 SmallVector<unsigned, 2> &Ops,
827 SmallVector<unsigned, 2> &FoldOps) {
828 const TargetInstrDesc &TID = MI->getDesc();
831 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
832 unsigned OpIdx = Ops[i];
833 MachineOperand &MO = MI->getOperand(OpIdx);
834 // FIXME: fold subreg use.
838 MRInfo |= (unsigned)VirtRegMap::isMod;
840 // Filter out two-address use operand(s).
841 if (!MO.isImplicit() &&
842 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
843 MRInfo = VirtRegMap::isModRef;
846 MRInfo |= (unsigned)VirtRegMap::isRef;
848 FoldOps.push_back(OpIdx);
854 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
855 /// slot / to reg or any rematerialized load into ith operand of specified
856 /// MI. If it is successul, MI is updated with the newly created MI and
858 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
859 VirtRegMap &vrm, MachineInstr *DefMI,
861 SmallVector<unsigned, 2> &Ops,
862 bool isSS, int Slot, unsigned Reg) {
863 // If it is an implicit def instruction, just delete it.
864 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
865 RemoveMachineInstrFromMaps(MI);
866 vrm.RemoveMachineInstrFromMaps(MI);
867 MI->eraseFromParent();
872 // Filter the list of operand indexes that are to be folded. Abort if
873 // any operand will prevent folding.
875 SmallVector<unsigned, 2> FoldOps;
876 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
879 // The only time it's safe to fold into a two address instruction is when
880 // it's folding reload and spill from / into a spill stack slot.
881 if (DefMI && (MRInfo & VirtRegMap::isMod))
884 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
885 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
887 // Remember this instruction uses the spill slot.
888 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
890 // Attempt to fold the memory reference into the instruction. If
891 // we can do this, we don't need to insert spill code.
892 MachineBasicBlock &MBB = *MI->getParent();
893 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
894 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
895 vrm.transferSpillPts(MI, fmi);
896 vrm.transferRestorePts(MI, fmi);
897 vrm.transferEmergencySpills(MI, fmi);
899 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
900 mi2iMap_[fmi] = InstrIdx;
901 MI = MBB.insert(MBB.erase(MI), fmi);
908 /// canFoldMemoryOperand - Returns true if the specified load / store
909 /// folding is possible.
910 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
911 SmallVector<unsigned, 2> &Ops,
913 // Filter the list of operand indexes that are to be folded. Abort if
914 // any operand will prevent folding.
916 SmallVector<unsigned, 2> FoldOps;
917 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
920 // It's only legal to remat for a use, not a def.
921 if (ReMat && (MRInfo & VirtRegMap::isMod))
924 return tii_->canFoldMemoryOperand(MI, FoldOps);
927 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
928 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
929 for (LiveInterval::Ranges::const_iterator
930 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
931 std::vector<IdxMBBPair>::const_iterator II =
932 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
933 if (II == Idx2MBBMap.end())
935 if (I->end > II->first) // crossing a MBB.
937 MBBs.insert(II->second);
944 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
945 /// interval on to-be re-materialized operands of MI) with new register.
946 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
947 MachineInstr *MI, unsigned NewVReg,
949 // There is an implicit use. That means one of the other operand is
950 // being remat'ed and the remat'ed instruction has li.reg as an
951 // use operand. Make sure we rewrite that as well.
952 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
953 MachineOperand &MO = MI->getOperand(i);
954 if (!MO.isRegister())
956 unsigned Reg = MO.getReg();
957 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
959 if (!vrm.isReMaterialized(Reg))
961 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
962 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
964 UseMO->setReg(NewVReg);
968 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
969 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
971 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
972 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
973 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
974 unsigned Slot, int LdSlot,
975 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
977 const TargetRegisterClass* rc,
978 SmallVector<int, 4> &ReMatIds,
979 const MachineLoopInfo *loopInfo,
980 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
981 std::map<unsigned,unsigned> &MBBVRegsMap,
982 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
983 MachineBasicBlock *MBB = MI->getParent();
984 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
985 bool CanFold = false;
987 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
988 MachineOperand& mop = MI->getOperand(i);
989 if (!mop.isRegister())
991 unsigned Reg = mop.getReg();
993 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
998 bool TryFold = !DefIsReMat;
999 bool FoldSS = true; // Default behavior unless it's a remat.
1000 int FoldSlot = Slot;
1002 // If this is the rematerializable definition MI itself and
1003 // all of its uses are rematerialized, simply delete it.
1004 if (MI == ReMatOrigDefMI && CanDelete) {
1005 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1007 RemoveMachineInstrFromMaps(MI);
1008 vrm.RemoveMachineInstrFromMaps(MI);
1009 MI->eraseFromParent();
1013 // If def for this use can't be rematerialized, then try folding.
1014 // If def is rematerializable and it's a load, also try folding.
1015 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1017 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1023 // Scan all of the operands of this instruction rewriting operands
1024 // to use NewVReg instead of li.reg as appropriate. We do this for
1027 // 1. If the instr reads the same spilled vreg multiple times, we
1028 // want to reuse the NewVReg.
1029 // 2. If the instr is a two-addr instruction, we are required to
1030 // keep the src/dst regs pinned.
1032 // Keep track of whether we replace a use and/or def so that we can
1033 // create the spill interval with the appropriate range.
1035 HasUse = mop.isUse();
1036 HasDef = mop.isDef();
1037 SmallVector<unsigned, 2> Ops;
1039 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1040 const MachineOperand &MOj = MI->getOperand(j);
1041 if (!MOj.isRegister())
1043 unsigned RegJ = MOj.getReg();
1044 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1048 HasUse |= MOj.isUse();
1049 HasDef |= MOj.isDef();
1053 if (HasUse && !li.liveAt(getUseIndex(index)))
1054 // Must be defined by an implicit def. It should not be spilled. Note,
1055 // this is for correctness reason. e.g.
1056 // 8 %reg1024<def> = IMPLICIT_DEF
1057 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1058 // The live range [12, 14) are not part of the r1024 live interval since
1059 // it's defined by an implicit def. It will not conflicts with live
1060 // interval of r1025. Now suppose both registers are spilled, you can
1061 // easily see a situation where both registers are reloaded before
1062 // the INSERT_SUBREG and both target registers that would overlap.
1065 // Update stack slot spill weight if we are splitting.
1066 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1073 // Do not fold load / store here if we are splitting. We'll find an
1074 // optimal point to insert a load / store later.
1076 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1077 Ops, FoldSS, FoldSlot, Reg)) {
1078 // Folding the load/store can completely change the instruction in
1079 // unpredictable ways, rescan it from the beginning.
1083 if (isRemoved(MI)) {
1087 goto RestartInstruction;
1090 // We'll try to fold it later if it's profitable.
1091 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1095 // Create a new virtual register for the spill interval.
1096 bool CreatedNewVReg = false;
1098 NewVReg = mri_->createVirtualRegister(rc);
1100 CreatedNewVReg = true;
1102 mop.setReg(NewVReg);
1103 if (mop.isImplicit())
1104 rewriteImplicitOps(li, MI, NewVReg, vrm);
1106 // Reuse NewVReg for other reads.
1107 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1108 MachineOperand &mopj = MI->getOperand(Ops[j]);
1109 mopj.setReg(NewVReg);
1110 if (mopj.isImplicit())
1111 rewriteImplicitOps(li, MI, NewVReg, vrm);
1114 if (CreatedNewVReg) {
1116 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1117 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1118 // Each valnum may have its own remat id.
1119 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1121 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1123 if (!CanDelete || (HasUse && HasDef)) {
1124 // If this is a two-addr instruction then its use operands are
1125 // rematerializable but its def is not. It should be assigned a
1127 vrm.assignVirt2StackSlot(NewVReg, Slot);
1130 vrm.assignVirt2StackSlot(NewVReg, Slot);
1132 } else if (HasUse && HasDef &&
1133 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1134 // If this interval hasn't been assigned a stack slot (because earlier
1135 // def is a deleted remat def), do it now.
1136 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1137 vrm.assignVirt2StackSlot(NewVReg, Slot);
1140 // Re-matting an instruction with virtual register use. Add the
1141 // register as an implicit use on the use MI.
1142 if (DefIsReMat && ImpUse)
1143 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1145 // create a new register interval for this spill / remat.
1146 LiveInterval &nI = getOrCreateInterval(NewVReg);
1147 if (CreatedNewVReg) {
1148 NewLIs.push_back(&nI);
1149 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1151 vrm.setIsSplitFromReg(NewVReg, li.reg);
1155 if (CreatedNewVReg) {
1156 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1157 nI.getNextValue(~0U, 0, VNInfoAllocator));
1161 // Extend the split live interval to this def / use.
1162 unsigned End = getUseIndex(index)+1;
1163 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1164 nI.getValNumInfo(nI.getNumValNums()-1));
1170 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1171 nI.getNextValue(~0U, 0, VNInfoAllocator));
1176 DOUT << "\t\t\t\tAdded new interval: ";
1177 nI.print(DOUT, tri_);
1182 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1184 MachineBasicBlock *MBB, unsigned Idx) const {
1185 unsigned End = getMBBEndIdx(MBB);
1186 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1187 unsigned KillIdx = VNI->kills[j];
1188 if (KillIdx > Idx && KillIdx < End)
1194 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1195 /// during spilling.
1197 struct RewriteInfo {
1202 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1203 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1206 struct RewriteInfoCompare {
1207 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1208 return LHS.Index < RHS.Index;
1213 void LiveIntervals::
1214 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1215 LiveInterval::Ranges::const_iterator &I,
1216 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1217 unsigned Slot, int LdSlot,
1218 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1220 const TargetRegisterClass* rc,
1221 SmallVector<int, 4> &ReMatIds,
1222 const MachineLoopInfo *loopInfo,
1223 BitVector &SpillMBBs,
1224 std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
1225 BitVector &RestoreMBBs,
1226 std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1227 std::map<unsigned,unsigned> &MBBVRegsMap,
1228 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1229 bool AllCanFold = true;
1230 unsigned NewVReg = 0;
1231 unsigned start = getBaseIndex(I->start);
1232 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1234 // First collect all the def / use in this live range that will be rewritten.
1235 // Make sure they are sorted according to instruction index.
1236 std::vector<RewriteInfo> RewriteMIs;
1237 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1238 re = mri_->reg_end(); ri != re; ) {
1239 MachineInstr *MI = &*ri;
1240 MachineOperand &O = ri.getOperand();
1242 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1243 unsigned index = getInstructionIndex(MI);
1244 if (index < start || index >= end)
1246 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1247 // Must be defined by an implicit def. It should not be spilled. Note,
1248 // this is for correctness reason. e.g.
1249 // 8 %reg1024<def> = IMPLICIT_DEF
1250 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1251 // The live range [12, 14) are not part of the r1024 live interval since
1252 // it's defined by an implicit def. It will not conflicts with live
1253 // interval of r1025. Now suppose both registers are spilled, you can
1254 // easily see a situation where both registers are reloaded before
1255 // the INSERT_SUBREG and both target registers that would overlap.
1257 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1259 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1261 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1262 // Now rewrite the defs and uses.
1263 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1264 RewriteInfo &rwi = RewriteMIs[i];
1266 unsigned index = rwi.Index;
1267 bool MIHasUse = rwi.HasUse;
1268 bool MIHasDef = rwi.HasDef;
1269 MachineInstr *MI = rwi.MI;
1270 // If MI def and/or use the same register multiple times, then there
1271 // are multiple entries.
1272 unsigned NumUses = MIHasUse;
1273 while (i != e && RewriteMIs[i].MI == MI) {
1274 assert(RewriteMIs[i].Index == index);
1275 bool isUse = RewriteMIs[i].HasUse;
1276 if (isUse) ++NumUses;
1278 MIHasDef |= RewriteMIs[i].HasDef;
1281 MachineBasicBlock *MBB = MI->getParent();
1283 if (ImpUse && MI != ReMatDefMI) {
1284 // Re-matting an instruction with virtual register use. Update the
1285 // register interval's spill weight to HUGE_VALF to prevent it from
1287 LiveInterval &ImpLi = getInterval(ImpUse);
1288 ImpLi.weight = HUGE_VALF;
1291 unsigned MBBId = MBB->getNumber();
1292 unsigned ThisVReg = 0;
1294 std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
1295 if (NVI != MBBVRegsMap.end()) {
1296 ThisVReg = NVI->second;
1303 // It's better to start a new interval to avoid artifically
1304 // extend the new interval.
1305 if (MIHasDef && !MIHasUse) {
1306 MBBVRegsMap.erase(MBB->getNumber());
1312 bool IsNew = ThisVReg == 0;
1314 // This ends the previous live interval. If all of its def / use
1315 // can be folded, give it a low spill weight.
1316 if (NewVReg && TrySplit && AllCanFold) {
1317 LiveInterval &nI = getOrCreateInterval(NewVReg);
1324 bool HasDef = false;
1325 bool HasUse = false;
1326 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1327 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1328 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1329 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1330 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1331 if (!HasDef && !HasUse)
1334 AllCanFold &= CanFold;
1336 // Update weight of spill interval.
1337 LiveInterval &nI = getOrCreateInterval(NewVReg);
1339 // The spill weight is now infinity as it cannot be spilled again.
1340 nI.weight = HUGE_VALF;
1344 // Keep track of the last def and first use in each MBB.
1346 if (MI != ReMatOrigDefMI || !CanDelete) {
1347 bool HasKill = false;
1349 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1351 // If this is a two-address code, then this index starts a new VNInfo.
1352 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1354 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1356 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1357 SpillIdxes.find(MBBId);
1359 if (SII == SpillIdxes.end()) {
1360 std::vector<SRInfo> S;
1361 S.push_back(SRInfo(index, NewVReg, true));
1362 SpillIdxes.insert(std::make_pair(MBBId, S));
1363 } else if (SII->second.back().vreg != NewVReg) {
1364 SII->second.push_back(SRInfo(index, NewVReg, true));
1365 } else if ((int)index > SII->second.back().index) {
1366 // If there is an earlier def and this is a two-address
1367 // instruction, then it's not possible to fold the store (which
1368 // would also fold the load).
1369 SRInfo &Info = SII->second.back();
1371 Info.canFold = !HasUse;
1373 SpillMBBs.set(MBBId);
1374 } else if (SII != SpillIdxes.end() &&
1375 SII->second.back().vreg == NewVReg &&
1376 (int)index > SII->second.back().index) {
1377 // There is an earlier def that's not killed (must be two-address).
1378 // The spill is no longer needed.
1379 SII->second.pop_back();
1380 if (SII->second.empty()) {
1381 SpillIdxes.erase(MBBId);
1382 SpillMBBs.reset(MBBId);
1389 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1390 SpillIdxes.find(MBBId);
1391 if (SII != SpillIdxes.end() &&
1392 SII->second.back().vreg == NewVReg &&
1393 (int)index > SII->second.back().index)
1394 // Use(s) following the last def, it's not safe to fold the spill.
1395 SII->second.back().canFold = false;
1396 std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1397 RestoreIdxes.find(MBBId);
1398 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1399 // If we are splitting live intervals, only fold if it's the first
1400 // use and there isn't another use later in the MBB.
1401 RII->second.back().canFold = false;
1403 // Only need a reload if there isn't an earlier def / use.
1404 if (RII == RestoreIdxes.end()) {
1405 std::vector<SRInfo> Infos;
1406 Infos.push_back(SRInfo(index, NewVReg, true));
1407 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1409 RII->second.push_back(SRInfo(index, NewVReg, true));
1411 RestoreMBBs.set(MBBId);
1415 // Update spill weight.
1416 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1417 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1420 if (NewVReg && TrySplit && AllCanFold) {
1421 // If all of its def / use can be folded, give it a low spill weight.
1422 LiveInterval &nI = getOrCreateInterval(NewVReg);
1427 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1428 BitVector &RestoreMBBs,
1429 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1430 if (!RestoreMBBs[Id])
1432 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1433 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1434 if (Restores[i].index == index &&
1435 Restores[i].vreg == vr &&
1436 Restores[i].canFold)
1441 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1442 BitVector &RestoreMBBs,
1443 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1444 if (!RestoreMBBs[Id])
1446 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1447 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1448 if (Restores[i].index == index && Restores[i].vreg)
1449 Restores[i].index = -1;
1452 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1453 /// spilled and create empty intervals for their uses.
1455 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1456 const TargetRegisterClass* rc,
1457 std::vector<LiveInterval*> &NewLIs) {
1458 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1459 re = mri_->reg_end(); ri != re; ) {
1460 MachineOperand &O = ri.getOperand();
1461 MachineInstr *MI = &*ri;
1464 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1465 "Register def was not rewritten?");
1466 RemoveMachineInstrFromMaps(MI);
1467 vrm.RemoveMachineInstrFromMaps(MI);
1468 MI->eraseFromParent();
1470 // This must be an use of an implicit_def so it's not part of the live
1471 // interval. Create a new empty live interval for it.
1472 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1473 unsigned NewVReg = mri_->createVirtualRegister(rc);
1475 vrm.setIsImplicitlyDefined(NewVReg);
1476 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1477 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1478 MachineOperand &MO = MI->getOperand(i);
1479 if (MO.isReg() && MO.getReg() == li.reg)
1487 std::vector<LiveInterval*> LiveIntervals::
1488 addIntervalsForSpills(const LiveInterval &li,
1489 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1491 assert(li.weight != HUGE_VALF &&
1492 "attempt to spill already spilled interval!");
1494 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1495 li.print(DOUT, tri_);
1498 // Spill slot weight.
1501 // Each bit specify whether it a spill is required in the MBB.
1502 BitVector SpillMBBs(mf_->getNumBlockIDs());
1503 std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1504 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1505 std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1506 std::map<unsigned,unsigned> MBBVRegsMap;
1507 std::vector<LiveInterval*> NewLIs;
1508 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1510 unsigned NumValNums = li.getNumValNums();
1511 SmallVector<MachineInstr*, 4> ReMatDefs;
1512 ReMatDefs.resize(NumValNums, NULL);
1513 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1514 ReMatOrigDefs.resize(NumValNums, NULL);
1515 SmallVector<int, 4> ReMatIds;
1516 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1517 BitVector ReMatDelete(NumValNums);
1518 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1520 // Spilling a split live interval. It cannot be split any further. Also,
1521 // it's also guaranteed to be a single val# / range interval.
1522 if (vrm.getPreSplitReg(li.reg)) {
1523 vrm.setIsSplitFromReg(li.reg, 0);
1524 // Unset the split kill marker on the last use.
1525 unsigned KillIdx = vrm.getKillPoint(li.reg);
1527 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1528 assert(KillMI && "Last use disappeared?");
1529 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1530 assert(KillOp != -1 && "Last use disappeared?");
1531 KillMI->getOperand(KillOp).setIsKill(false);
1533 vrm.removeKillPoint(li.reg);
1534 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1535 Slot = vrm.getStackSlot(li.reg);
1536 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1537 MachineInstr *ReMatDefMI = DefIsReMat ?
1538 vrm.getReMaterializedMI(li.reg) : NULL;
1540 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1541 bool isLoad = isLoadSS ||
1542 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1543 bool IsFirstRange = true;
1544 for (LiveInterval::Ranges::const_iterator
1545 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1546 // If this is a split live interval with multiple ranges, it means there
1547 // are two-address instructions that re-defined the value. Only the
1548 // first def can be rematerialized!
1550 // Note ReMatOrigDefMI has already been deleted.
1551 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1552 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1553 false, vrm, rc, ReMatIds, loopInfo,
1554 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1555 MBBVRegsMap, NewLIs, SSWeight);
1557 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1558 Slot, 0, false, false, false,
1559 false, vrm, rc, ReMatIds, loopInfo,
1560 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1561 MBBVRegsMap, NewLIs, SSWeight);
1563 IsFirstRange = false;
1566 SSWeight = 0.0f; // Already accounted for when split.
1567 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1571 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1572 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1576 bool NeedStackSlot = false;
1577 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1579 const VNInfo *VNI = *i;
1580 unsigned VN = VNI->id;
1581 unsigned DefIdx = VNI->def;
1583 continue; // Dead val#.
1584 // Is the def for the val# rematerializable?
1585 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1586 ? 0 : getInstructionFromIndex(DefIdx);
1588 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1589 // Remember how to remat the def of this val#.
1590 ReMatOrigDefs[VN] = ReMatDefMI;
1591 // Original def may be modified so we have to make a copy here.
1592 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1593 ClonedMIs.push_back(Clone);
1594 ReMatDefs[VN] = Clone;
1596 bool CanDelete = true;
1597 if (VNI->hasPHIKill) {
1598 // A kill is a phi node, not all of its uses can be rematerialized.
1599 // It must not be deleted.
1601 // Need a stack slot if there is any live range where uses cannot be
1603 NeedStackSlot = true;
1606 ReMatDelete.set(VN);
1608 // Need a stack slot if there is any live range where uses cannot be
1610 NeedStackSlot = true;
1614 // One stack slot per live interval.
1615 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1616 Slot = vrm.assignVirt2StackSlot(li.reg);
1618 // Create new intervals and rewrite defs and uses.
1619 for (LiveInterval::Ranges::const_iterator
1620 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1621 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1622 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1623 bool DefIsReMat = ReMatDefMI != NULL;
1624 bool CanDelete = ReMatDelete[I->valno->id];
1626 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1627 bool isLoad = isLoadSS ||
1628 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1629 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1630 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1631 CanDelete, vrm, rc, ReMatIds, loopInfo,
1632 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1633 MBBVRegsMap, NewLIs, SSWeight);
1636 // Insert spills / restores if we are splitting.
1638 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1642 SmallPtrSet<LiveInterval*, 4> AddedKill;
1643 SmallVector<unsigned, 2> Ops;
1644 if (NeedStackSlot) {
1645 int Id = SpillMBBs.find_first();
1647 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1648 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1649 std::vector<SRInfo> &spills = SpillIdxes[Id];
1650 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1651 int index = spills[i].index;
1652 unsigned VReg = spills[i].vreg;
1653 LiveInterval &nI = getOrCreateInterval(VReg);
1654 bool isReMat = vrm.isReMaterialized(VReg);
1655 MachineInstr *MI = getInstructionFromIndex(index);
1656 bool CanFold = false;
1657 bool FoundUse = false;
1659 if (spills[i].canFold) {
1661 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1662 MachineOperand &MO = MI->getOperand(j);
1663 if (!MO.isRegister() || MO.getReg() != VReg)
1670 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1671 RestoreMBBs, RestoreIdxes))) {
1672 // MI has two-address uses of the same register. If the use
1673 // isn't the first and only use in the BB, then we can't fold
1674 // it. FIXME: Move this to rewriteInstructionsForSpills.
1681 // Fold the store into the def if possible.
1682 bool Folded = false;
1683 if (CanFold && !Ops.empty()) {
1684 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1687 // Also folded uses, do not issue a load.
1688 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1689 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1691 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1695 // Otherwise tell the spiller to issue a spill.
1697 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1698 bool isKill = LR->end == getStoreIndex(index);
1699 if (!MI->registerDefIsDead(nI.reg))
1700 // No need to spill a dead def.
1701 vrm.addSpillPoint(VReg, isKill, MI);
1703 AddedKill.insert(&nI);
1706 // Update spill slot weight.
1708 SSWeight += getSpillWeight(true, false, loopDepth);
1710 Id = SpillMBBs.find_next(Id);
1714 int Id = RestoreMBBs.find_first();
1716 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1717 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1719 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1720 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1721 int index = restores[i].index;
1724 unsigned VReg = restores[i].vreg;
1725 LiveInterval &nI = getOrCreateInterval(VReg);
1726 bool isReMat = vrm.isReMaterialized(VReg);
1727 MachineInstr *MI = getInstructionFromIndex(index);
1728 bool CanFold = false;
1730 if (restores[i].canFold) {
1732 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1733 MachineOperand &MO = MI->getOperand(j);
1734 if (!MO.isRegister() || MO.getReg() != VReg)
1738 // If this restore were to be folded, it would have been folded
1747 // Fold the load into the use if possible.
1748 bool Folded = false;
1749 if (CanFold && !Ops.empty()) {
1751 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1753 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1755 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1756 // If the rematerializable def is a load, also try to fold it.
1757 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1758 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1759 Ops, isLoadSS, LdSlot, VReg);
1760 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1762 // Re-matting an instruction with virtual register use. Add the
1763 // register as an implicit use on the use MI and update the register
1764 // interval's spill weight to HUGE_VALF to prevent it from being
1766 LiveInterval &ImpLi = getInterval(ImpUse);
1767 ImpLi.weight = HUGE_VALF;
1768 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1772 // If folding is not possible / failed, then tell the spiller to issue a
1773 // load / rematerialization for us.
1775 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1777 vrm.addRestorePoint(VReg, MI);
1779 // Update spill slot weight.
1781 SSWeight += getSpillWeight(false, true, loopDepth);
1783 Id = RestoreMBBs.find_next(Id);
1786 // Finalize intervals: add kills, finalize spill weights, and filter out
1788 std::vector<LiveInterval*> RetNewLIs;
1789 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1790 LiveInterval *LI = NewLIs[i];
1792 LI->weight /= LI->getSize();
1793 if (!AddedKill.count(LI)) {
1794 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1795 unsigned LastUseIdx = getBaseIndex(LR->end);
1796 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1797 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1798 assert(UseIdx != -1);
1799 if (LastUse->getOperand(UseIdx).isImplicit() ||
1800 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
1801 LastUse->getOperand(UseIdx).setIsKill();
1802 vrm.addKillPoint(LI->reg, LastUseIdx);
1805 RetNewLIs.push_back(LI);
1809 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1813 /// hasAllocatableSuperReg - Return true if the specified physical register has
1814 /// any super register that's allocatable.
1815 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1816 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1817 if (allocatableRegs_[*AS] && hasInterval(*AS))
1822 /// getRepresentativeReg - Find the largest super register of the specified
1823 /// physical register.
1824 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1825 // Find the largest super-register that is allocatable.
1826 unsigned BestReg = Reg;
1827 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1828 unsigned SuperReg = *AS;
1829 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1837 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1838 /// specified interval that conflicts with the specified physical register.
1839 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1840 unsigned PhysReg) const {
1841 unsigned NumConflicts = 0;
1842 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1843 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1844 E = mri_->reg_end(); I != E; ++I) {
1845 MachineOperand &O = I.getOperand();
1846 MachineInstr *MI = O.getParent();
1847 unsigned Index = getInstructionIndex(MI);
1848 if (pli.liveAt(Index))
1851 return NumConflicts;
1854 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1855 /// around all defs and uses of the specified interval.
1856 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1857 unsigned PhysReg, VirtRegMap &vrm) {
1858 unsigned SpillReg = getRepresentativeReg(PhysReg);
1860 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1861 // If there are registers which alias PhysReg, but which are not a
1862 // sub-register of the chosen representative super register. Assert
1863 // since we can't handle it yet.
1864 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
1865 tri_->isSuperRegister(*AS, SpillReg));
1867 LiveInterval &pli = getInterval(SpillReg);
1868 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1869 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1870 E = mri_->reg_end(); I != E; ++I) {
1871 MachineOperand &O = I.getOperand();
1872 MachineInstr *MI = O.getParent();
1873 if (SeenMIs.count(MI))
1876 unsigned Index = getInstructionIndex(MI);
1877 if (pli.liveAt(Index)) {
1878 vrm.addEmergencySpill(SpillReg, MI);
1879 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1880 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
1881 if (!hasInterval(*AS))
1883 LiveInterval &spli = getInterval(*AS);
1884 if (spli.liveAt(Index))
1885 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
1891 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1892 MachineInstr* startInst) {
1893 LiveInterval& Interval = getOrCreateInterval(reg);
1894 VNInfo* VN = Interval.getNextValue(
1895 getInstructionIndex(startInst) + InstrSlots::DEF,
1896 startInst, getVNInfoAllocator());
1897 VN->hasPHIKill = true;
1898 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
1899 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
1900 getMBBEndIdx(startInst->getParent()) + 1, VN);
1901 Interval.addRange(LR);