1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/ADT/STLExtras.h"
43 // Hidden options for help debugging.
44 static cl::opt<bool> DisableReMat("disable-rematerialization",
45 cl::init(false), cl::Hidden);
47 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
48 cl::init(true), cl::Hidden);
49 static cl::opt<int> SplitLimit("split-limit",
50 cl::init(-1), cl::Hidden);
52 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
54 static cl::opt<bool> EnableFastSpilling("fast-spill",
55 cl::init(false), cl::Hidden);
57 STATISTIC(numIntervals, "Number of original intervals");
58 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
59 STATISTIC(numSplits , "Number of intervals split");
61 char LiveIntervals::ID = 0;
62 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
64 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 MachineFunctionPass::getAnalysisUsage(AU);
81 void LiveIntervals::releaseMemory() {
82 // Free the live intervals themselves.
83 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
84 E = r2iMap_.end(); I != E; ++I)
92 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
93 VNInfoAllocator.Reset();
94 while (!ClonedMIs.empty()) {
95 MachineInstr *MI = ClonedMIs.back();
97 mf_->DeleteMachineInstr(MI);
101 void LiveIntervals::computeNumbering() {
102 Index2MiMap OldI2MI = i2miMap_;
103 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
112 // Number MachineInstrs and MachineBasicBlocks.
113 // Initialize MBB indexes to a sentinal.
114 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
116 unsigned MIIndex = 0;
117 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
119 unsigned StartIdx = MIIndex;
121 // Insert an empty slot at the beginning of each block.
122 MIIndex += InstrSlots::NUM;
123 i2miMap_.push_back(0);
125 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
127 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
128 assert(inserted && "multiple MachineInstr -> index mappings");
130 i2miMap_.push_back(I);
131 MIIndex += InstrSlots::NUM;
134 // Insert max(1, numdefs) empty slots after every instruction.
135 unsigned Slots = I->getDesc().getNumDefs();
138 MIIndex += InstrSlots::NUM * Slots;
140 i2miMap_.push_back(0);
143 // Set the MBB2IdxMap entry for this MBB.
144 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
145 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
147 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
149 if (!OldI2MI.empty())
150 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
151 for (LiveInterval::iterator LI = OI->second->begin(),
152 LE = OI->second->end(); LI != LE; ++LI) {
154 // Remap the start index of the live range to the corresponding new
155 // number, or our best guess at what it _should_ correspond to if the
156 // original instruction has been erased. This is either the following
157 // instruction or its predecessor.
158 unsigned index = LI->start / InstrSlots::NUM;
159 unsigned offset = LI->start % InstrSlots::NUM;
160 if (offset == InstrSlots::LOAD) {
161 std::vector<IdxMBBPair>::const_iterator I =
162 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
163 // Take the pair containing the index
164 std::vector<IdxMBBPair>::const_iterator J =
165 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
167 LI->start = getMBBStartIdx(J->second);
169 LI->start = mi2iMap_[OldI2MI[index]] + offset;
172 // Remap the ending index in the same way that we remapped the start,
173 // except for the final step where we always map to the immediately
174 // following instruction.
175 index = (LI->end - 1) / InstrSlots::NUM;
176 offset = LI->end % InstrSlots::NUM;
177 if (offset == InstrSlots::LOAD) {
178 // VReg dies at end of block.
179 std::vector<IdxMBBPair>::const_iterator I =
180 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
183 LI->end = getMBBEndIdx(I->second) + 1;
185 unsigned idx = index;
186 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
188 if (index != OldI2MI.size())
189 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
191 LI->end = InstrSlots::NUM * i2miMap_.size();
195 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
196 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
199 // Remap the VNInfo def index, which works the same as the
200 // start indices above. VN's with special sentinel defs
201 // don't need to be remapped.
202 if (vni->isDefAccurate() && !vni->isUnused()) {
203 unsigned index = vni->def / InstrSlots::NUM;
204 unsigned offset = vni->def % InstrSlots::NUM;
205 if (offset == InstrSlots::LOAD) {
206 std::vector<IdxMBBPair>::const_iterator I =
207 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
208 // Take the pair containing the index
209 std::vector<IdxMBBPair>::const_iterator J =
210 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
212 vni->def = getMBBStartIdx(J->second);
214 vni->def = mi2iMap_[OldI2MI[index]] + offset;
218 // Remap the VNInfo kill indices, which works the same as
219 // the end indices above.
220 for (size_t i = 0; i < vni->kills.size(); ++i) {
221 // PHI kills don't need to be remapped.
222 if (!vni->kills[i]) continue;
224 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
225 unsigned offset = vni->kills[i] % InstrSlots::NUM;
226 if (offset == InstrSlots::LOAD) {
227 std::vector<IdxMBBPair>::const_iterator I =
228 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
231 vni->kills[i] = getMBBEndIdx(I->second);
233 unsigned idx = index;
234 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
236 if (index != OldI2MI.size())
237 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
238 (idx == index ? offset : 0);
240 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
247 void LiveIntervals::scaleNumbering(int factor) {
249 // * scale MBB begin and end points
250 // * scale all ranges.
251 // * Update VNI structures.
252 // * Scale instruction numberings
254 // Scale the MBB indices.
256 for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
257 MBB != MBBE; ++MBB) {
258 std::pair<unsigned, unsigned> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
259 mbbIndices.first = InstrSlots::scale(mbbIndices.first, factor);
260 mbbIndices.second = InstrSlots::scale(mbbIndices.second, factor);
261 Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
263 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
265 // Scale the intervals.
266 for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
267 LI->second->scaleNumbering(factor);
270 // Scale MachineInstrs.
271 Mi2IndexMap oldmi2iMap = mi2iMap_;
272 unsigned highestSlot = 0;
273 for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
275 unsigned newSlot = InstrSlots::scale(MI->second, factor);
276 mi2iMap_[MI->first] = newSlot;
277 highestSlot = std::max(highestSlot, newSlot);
281 i2miMap_.resize(highestSlot + 1);
282 for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
284 i2miMap_[MI->second] = MI->first;
290 /// runOnMachineFunction - Register allocate the whole function
292 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
294 mri_ = &mf_->getRegInfo();
295 tm_ = &fn.getTarget();
296 tri_ = tm_->getRegisterInfo();
297 tii_ = tm_->getInstrInfo();
298 aa_ = &getAnalysis<AliasAnalysis>();
299 lv_ = &getAnalysis<LiveVariables>();
300 allocatableRegs_ = tri_->getAllocatableSet(fn);
305 numIntervals += getNumIntervals();
311 /// print - Implement the dump method.
312 void LiveIntervals::print(std::ostream &O, const Module* ) const {
313 O << "********** INTERVALS **********\n";
314 for (const_iterator I = begin(), E = end(); I != E; ++I) {
315 I->second->print(O, tri_);
319 O << "********** MACHINEINSTRS **********\n";
320 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
321 mbbi != mbbe; ++mbbi) {
322 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
323 for (MachineBasicBlock::iterator mii = mbbi->begin(),
324 mie = mbbi->end(); mii != mie; ++mii) {
325 O << getInstructionIndex(mii) << '\t' << *mii;
330 /// conflictsWithPhysRegDef - Returns true if the specified register
331 /// is defined during the duration of the specified interval.
332 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
333 VirtRegMap &vrm, unsigned reg) {
334 for (LiveInterval::Ranges::const_iterator
335 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
336 for (unsigned index = getBaseIndex(I->start),
337 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
338 index += InstrSlots::NUM) {
339 // skip deleted instructions
340 while (index != end && !getInstructionFromIndex(index))
341 index += InstrSlots::NUM;
342 if (index == end) break;
344 MachineInstr *MI = getInstructionFromIndex(index);
345 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
346 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
347 if (SrcReg == li.reg || DstReg == li.reg)
349 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
350 MachineOperand& mop = MI->getOperand(i);
353 unsigned PhysReg = mop.getReg();
354 if (PhysReg == 0 || PhysReg == li.reg)
356 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
357 if (!vrm.hasPhys(PhysReg))
359 PhysReg = vrm.getPhys(PhysReg);
361 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
370 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
371 /// it can check use as well.
372 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
373 unsigned Reg, bool CheckUse,
374 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
375 for (LiveInterval::Ranges::const_iterator
376 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
377 for (unsigned index = getBaseIndex(I->start),
378 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
379 index += InstrSlots::NUM) {
380 // Skip deleted instructions.
381 MachineInstr *MI = 0;
382 while (index != end) {
383 MI = getInstructionFromIndex(index);
386 index += InstrSlots::NUM;
388 if (index == end) break;
390 if (JoinedCopies.count(MI))
392 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
393 MachineOperand& MO = MI->getOperand(i);
396 if (MO.isUse() && !CheckUse)
398 unsigned PhysReg = MO.getReg();
399 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
401 if (tri_->isSubRegister(Reg, PhysReg))
411 void LiveIntervals::printRegName(unsigned reg) const {
412 if (TargetRegisterInfo::isPhysicalRegister(reg))
413 cerr << tri_->getName(reg);
415 cerr << "%reg" << reg;
418 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
419 MachineBasicBlock::iterator mi,
420 unsigned MIIdx, MachineOperand& MO,
422 LiveInterval &interval) {
423 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
424 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
426 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
427 DOUT << "is a implicit_def\n";
431 // Virtual registers may be defined multiple times (due to phi
432 // elimination and 2-addr elimination). Much of what we do only has to be
433 // done once for the vreg. We use an empty interval to detect the first
434 // time we see a vreg.
435 if (interval.empty()) {
436 // Get the Idx of the defining instructions.
437 unsigned defIndex = getDefIndex(MIIdx);
438 // Earlyclobbers move back one.
439 if (MO.isEarlyClobber())
440 defIndex = getUseIndex(MIIdx);
442 MachineInstr *CopyMI = NULL;
443 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
444 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
445 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
446 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
447 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
449 // Earlyclobbers move back one.
450 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
452 assert(ValNo->id == 0 && "First value in interval is not 0?");
454 // Loop over all of the blocks that the vreg is defined in. There are
455 // two cases we have to handle here. The most common case is a vreg
456 // whose lifetime is contained within a basic block. In this case there
457 // will be a single kill, in MBB, which comes after the definition.
458 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
459 // FIXME: what about dead vars?
461 if (vi.Kills[0] != mi)
462 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
464 killIdx = defIndex+1;
466 // If the kill happens after the definition, we have an intra-block
468 if (killIdx > defIndex) {
469 assert(vi.AliveBlocks.empty() &&
470 "Shouldn't be alive across any blocks!");
471 LiveRange LR(defIndex, killIdx, ValNo);
472 interval.addRange(LR);
473 DOUT << " +" << LR << "\n";
474 interval.addKill(ValNo, killIdx);
479 // The other case we handle is when a virtual register lives to the end
480 // of the defining block, potentially live across some blocks, then is
481 // live into some number of blocks, but gets killed. Start by adding a
482 // range that goes from this definition to the end of the defining block.
483 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
484 DOUT << " +" << NewLR;
485 interval.addRange(NewLR);
487 // Iterate over all of the blocks that the variable is completely
488 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
490 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
491 E = vi.AliveBlocks.end(); I != E; ++I) {
492 LiveRange LR(getMBBStartIdx(*I),
493 getMBBEndIdx(*I)+1, // MBB ends at -1.
495 interval.addRange(LR);
499 // Finally, this virtual register is live from the start of any killing
500 // block to the 'use' slot of the killing instruction.
501 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
502 MachineInstr *Kill = vi.Kills[i];
503 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
504 LiveRange LR(getMBBStartIdx(Kill->getParent()),
506 interval.addRange(LR);
507 interval.addKill(ValNo, killIdx);
512 // If this is the second time we see a virtual register definition, it
513 // must be due to phi elimination or two addr elimination. If this is
514 // the result of two address elimination, then the vreg is one of the
515 // def-and-use register operand.
516 if (mi->isRegTiedToUseOperand(MOIdx)) {
517 // If this is a two-address definition, then we have already processed
518 // the live range. The only problem is that we didn't realize there
519 // are actually two values in the live interval. Because of this we
520 // need to take the LiveRegion that defines this register and split it
522 assert(interval.containsOneValue());
523 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
524 unsigned RedefIndex = getDefIndex(MIIdx);
525 if (MO.isEarlyClobber())
526 RedefIndex = getUseIndex(MIIdx);
528 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
529 VNInfo *OldValNo = OldLR->valno;
531 // Delete the initial value, which should be short and continuous,
532 // because the 2-addr copy must be in the same MBB as the redef.
533 interval.removeRange(DefIndex, RedefIndex);
535 // Two-address vregs should always only be redefined once. This means
536 // that at this point, there should be exactly one value number in it.
537 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
539 // The new value number (#1) is defined by the instruction we claimed
541 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
542 false, // update at *
544 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
546 // Value#0 is now defined by the 2-addr instruction.
547 OldValNo->def = RedefIndex;
549 if (MO.isEarlyClobber())
550 OldValNo->setHasRedefByEC(true);
552 // Add the new live interval which replaces the range for the input copy.
553 LiveRange LR(DefIndex, RedefIndex, ValNo);
554 DOUT << " replace range with " << LR;
555 interval.addRange(LR);
556 interval.addKill(ValNo, RedefIndex);
558 // If this redefinition is dead, we need to add a dummy unit live
559 // range covering the def slot.
561 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
564 interval.print(DOUT, tri_);
567 // Otherwise, this must be because of phi elimination. If this is the
568 // first redefinition of the vreg that we have seen, go back and change
569 // the live range in the PHI block to be a different value number.
570 if (interval.containsOneValue()) {
571 assert(vi.Kills.size() == 1 &&
572 "PHI elimination vreg should have one kill, the PHI itself!");
574 // Remove the old range that we now know has an incorrect number.
575 VNInfo *VNI = interval.getValNumInfo(0);
576 MachineInstr *Killer = vi.Kills[0];
577 unsigned Start = getMBBStartIdx(Killer->getParent());
578 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
579 DOUT << " Removing [" << Start << "," << End << "] from: ";
580 interval.print(DOUT, tri_); DOUT << "\n";
581 interval.removeRange(Start, End);
582 VNI->setHasPHIKill(true);
583 DOUT << " RESULT: "; interval.print(DOUT, tri_);
585 // Replace the interval with one of a NEW value number. Note that this
586 // value number isn't actually defined by an instruction, weird huh? :)
587 LiveRange LR(Start, End,
588 interval.getNextValue(mbb->getNumber(), 0, false, VNInfoAllocator));
589 LR.valno->setIsPHIDef(true);
590 DOUT << " replace range with " << LR;
591 interval.addRange(LR);
592 interval.addKill(LR.valno, End);
593 DOUT << " RESULT: "; interval.print(DOUT, tri_);
596 // In the case of PHI elimination, each variable definition is only
597 // live until the end of the block. We've already taken care of the
598 // rest of the live range.
599 unsigned defIndex = getDefIndex(MIIdx);
600 if (MO.isEarlyClobber())
601 defIndex = getUseIndex(MIIdx);
604 MachineInstr *CopyMI = NULL;
605 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
606 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
607 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
608 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
609 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
611 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
613 unsigned killIndex = getMBBEndIdx(mbb) + 1;
614 LiveRange LR(defIndex, killIndex, ValNo);
615 interval.addRange(LR);
616 interval.addKill(ValNo, killIndex);
617 ValNo->setHasPHIKill(true);
625 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
626 MachineBasicBlock::iterator mi,
629 LiveInterval &interval,
630 MachineInstr *CopyMI) {
631 // A physical register cannot be live across basic block, so its
632 // lifetime must end somewhere in its defining basic block.
633 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
635 unsigned baseIndex = MIIdx;
636 unsigned start = getDefIndex(baseIndex);
637 // Earlyclobbers move back one.
638 if (MO.isEarlyClobber())
639 start = getUseIndex(MIIdx);
640 unsigned end = start;
642 // If it is not used after definition, it is considered dead at
643 // the instruction defining it. Hence its interval is:
644 // [defSlot(def), defSlot(def)+1)
651 // If it is not dead on definition, it must be killed by a
652 // subsequent instruction. Hence its interval is:
653 // [defSlot(def), useSlot(kill)+1)
654 baseIndex += InstrSlots::NUM;
655 while (++mi != MBB->end()) {
656 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
657 getInstructionFromIndex(baseIndex) == 0)
658 baseIndex += InstrSlots::NUM;
659 if (mi->killsRegister(interval.reg, tri_)) {
661 end = getUseIndex(baseIndex) + 1;
664 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
666 if (mi->isRegTiedToUseOperand(DefIdx)) {
667 // Two-address instruction.
668 end = getDefIndex(baseIndex);
669 if (mi->getOperand(DefIdx).isEarlyClobber())
670 end = getUseIndex(baseIndex);
672 // Another instruction redefines the register before it is ever read.
673 // Then the register is essentially dead at the instruction that defines
674 // it. Hence its interval is:
675 // [defSlot(def), defSlot(def)+1)
683 baseIndex += InstrSlots::NUM;
686 // The only case we should have a dead physreg here without a killing or
687 // instruction where we know it's dead is if it is live-in to the function
688 // and never used. Another possible case is the implicit use of the
689 // physical register has been deleted by two-address pass.
693 assert(start < end && "did not find end of interval?");
695 // Already exists? Extend old live interval.
696 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
697 bool Extend = OldLR != interval.end();
698 VNInfo *ValNo = Extend
699 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
700 if (MO.isEarlyClobber() && Extend)
701 ValNo->setHasRedefByEC(true);
702 LiveRange LR(start, end, ValNo);
703 interval.addRange(LR);
704 interval.addKill(LR.valno, end);
705 DOUT << " +" << LR << '\n';
708 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
709 MachineBasicBlock::iterator MI,
713 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
714 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
715 getOrCreateInterval(MO.getReg()));
716 else if (allocatableRegs_[MO.getReg()]) {
717 MachineInstr *CopyMI = NULL;
718 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
719 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
720 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
721 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
722 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
724 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
725 getOrCreateInterval(MO.getReg()), CopyMI);
726 // Def of a register also defines its sub-registers.
727 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
728 // If MI also modifies the sub-register explicitly, avoid processing it
729 // more than once. Do not pass in TRI here so it checks for exact match.
730 if (!MI->modifiesRegister(*AS))
731 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
732 getOrCreateInterval(*AS), 0);
736 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
738 LiveInterval &interval, bool isAlias) {
739 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
741 // Look for kills, if it reaches a def before it's killed, then it shouldn't
742 // be considered a livein.
743 MachineBasicBlock::iterator mi = MBB->begin();
744 unsigned baseIndex = MIIdx;
745 unsigned start = baseIndex;
746 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
747 getInstructionFromIndex(baseIndex) == 0)
748 baseIndex += InstrSlots::NUM;
749 unsigned end = baseIndex;
750 bool SeenDefUse = false;
752 while (mi != MBB->end()) {
753 if (mi->killsRegister(interval.reg, tri_)) {
755 end = getUseIndex(baseIndex) + 1;
758 } else if (mi->modifiesRegister(interval.reg, tri_)) {
759 // Another instruction redefines the register before it is ever read.
760 // Then the register is essentially dead at the instruction that defines
761 // it. Hence its interval is:
762 // [defSlot(def), defSlot(def)+1)
764 end = getDefIndex(start) + 1;
769 baseIndex += InstrSlots::NUM;
771 if (mi != MBB->end()) {
772 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
773 getInstructionFromIndex(baseIndex) == 0)
774 baseIndex += InstrSlots::NUM;
778 // Live-in register might not be used at all.
782 end = getDefIndex(MIIdx) + 1;
784 DOUT << " live through";
790 interval.getNextValue(MBB->getNumber(), 0, false, VNInfoAllocator);
791 vni->setIsPHIDef(true);
792 LiveRange LR(start, end, vni);
794 interval.addRange(LR);
795 interval.addKill(LR.valno, end);
796 DOUT << " +" << LR << '\n';
799 /// computeIntervals - computes the live intervals for virtual
800 /// registers. for some ordering of the machine instructions [1,N] a
801 /// live interval is an interval [i, j) where 1 <= i <= j < N for
802 /// which a variable is live
803 void LiveIntervals::computeIntervals() {
805 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
806 << "********** Function: "
807 << ((Value*)mf_->getFunction())->getName() << '\n';
809 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
811 MachineBasicBlock *MBB = MBBI;
812 // Track the index of the current machine instr.
813 unsigned MIIndex = getMBBStartIdx(MBB);
814 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
816 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
818 // Create intervals for live-ins to this BB first.
819 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
820 LE = MBB->livein_end(); LI != LE; ++LI) {
821 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
822 // Multiple live-ins can alias the same register.
823 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
824 if (!hasInterval(*AS))
825 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
829 // Skip over empty initial indices.
830 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
831 getInstructionFromIndex(MIIndex) == 0)
832 MIIndex += InstrSlots::NUM;
834 for (; MI != miEnd; ++MI) {
835 DOUT << MIIndex << "\t" << *MI;
838 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
839 MachineOperand &MO = MI->getOperand(i);
840 // handle register defs - build intervals
841 if (MO.isReg() && MO.getReg() && MO.isDef()) {
842 handleRegisterDef(MBB, MI, MIIndex, MO, i);
846 // Skip over the empty slots after each instruction.
847 unsigned Slots = MI->getDesc().getNumDefs();
850 MIIndex += InstrSlots::NUM * Slots;
852 // Skip over empty indices.
853 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
854 getInstructionFromIndex(MIIndex) == 0)
855 MIIndex += InstrSlots::NUM;
860 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
861 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
862 std::vector<IdxMBBPair>::const_iterator I =
863 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
866 while (I != Idx2MBBMap.end()) {
869 MBBs.push_back(I->second);
876 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
877 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
878 std::vector<IdxMBBPair>::const_iterator I =
879 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
882 while (I != Idx2MBBMap.end()) {
885 MachineBasicBlock *MBB = I->second;
886 if (getMBBEndIdx(MBB) > End)
888 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
889 SE = MBB->succ_end(); SI != SE; ++SI)
897 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
898 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
899 return new LiveInterval(reg, Weight);
902 /// dupInterval - Duplicate a live interval. The caller is responsible for
903 /// managing the allocated memory.
904 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
905 LiveInterval *NewLI = createInterval(li->reg);
906 NewLI->Copy(*li, mri_, getVNInfoAllocator());
910 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
911 /// copy field and returns the source register that defines it.
912 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
916 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
917 // If it's extracting out of a physical register, return the sub-register.
918 unsigned Reg = VNI->copy->getOperand(1).getReg();
919 if (TargetRegisterInfo::isPhysicalRegister(Reg))
920 Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
922 } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
923 VNI->copy->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
924 return VNI->copy->getOperand(2).getReg();
926 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
927 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
929 assert(0 && "Unrecognized copy instruction!");
933 //===----------------------------------------------------------------------===//
934 // Register allocator hooks.
937 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
938 /// allow one) virtual register operand, then its uses are implicitly using
939 /// the register. Returns the virtual register.
940 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
941 MachineInstr *MI) const {
943 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
944 MachineOperand &MO = MI->getOperand(i);
945 if (!MO.isReg() || !MO.isUse())
947 unsigned Reg = MO.getReg();
948 if (Reg == 0 || Reg == li.reg)
951 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
952 !allocatableRegs_[Reg])
954 // FIXME: For now, only remat MI with at most one register operand.
956 "Can't rematerialize instruction with multiple register operand!");
965 /// isValNoAvailableAt - Return true if the val# of the specified interval
966 /// which reaches the given instruction also reaches the specified use index.
967 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
968 unsigned UseIdx) const {
969 unsigned Index = getInstructionIndex(MI);
970 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
971 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
972 return UI != li.end() && UI->valno == ValNo;
975 /// isReMaterializable - Returns true if the definition MI of the specified
976 /// val# of the specified interval is re-materializable.
977 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
978 const VNInfo *ValNo, MachineInstr *MI,
979 SmallVectorImpl<LiveInterval*> &SpillIs,
984 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
988 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
989 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
990 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
991 // this but remember this is not safe to fold into a two-address
993 // This is a load from fixed stack slot. It can be rematerialized.
996 // If the target-specific rules don't identify an instruction as
997 // being trivially rematerializable, use some target-independent
999 if (!MI->getDesc().isRematerializable() ||
1000 !tii_->isTriviallyReMaterializable(MI)) {
1001 if (!EnableAggressiveRemat)
1004 // If the instruction accesses memory but the memoperands have been lost,
1005 // we can't analyze it.
1006 const TargetInstrDesc &TID = MI->getDesc();
1007 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
1010 // Avoid instructions obviously unsafe for remat.
1011 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
1014 // If the instruction accesses memory and the memory could be non-constant,
1015 // assume the instruction is not rematerializable.
1016 for (std::list<MachineMemOperand>::const_iterator
1017 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
1018 const MachineMemOperand &MMO = *I;
1019 if (MMO.isVolatile() || MMO.isStore())
1021 const Value *V = MMO.getValue();
1024 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
1025 if (!PSV->isConstant(mf_->getFrameInfo()))
1027 } else if (!aa_->pointsToConstantMemory(V))
1031 // If any of the registers accessed are non-constant, conservatively assume
1032 // the instruction is not rematerializable.
1033 unsigned ImpUse = 0;
1034 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1035 const MachineOperand &MO = MI->getOperand(i);
1037 unsigned Reg = MO.getReg();
1040 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1043 // Only allow one def, and that in the first operand.
1044 if (MO.isDef() != (i == 0))
1047 // Only allow constant-valued registers.
1048 bool IsLiveIn = mri_->isLiveIn(Reg);
1049 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
1050 E = mri_->def_end();
1052 // For the def, it should be the only def of that register.
1053 if (MO.isDef() && (next(I) != E || IsLiveIn))
1057 // Only allow one use other register use, as that's all the
1058 // remat mechanisms support currently.
1059 if (Reg != li.reg) {
1062 else if (Reg != ImpUse)
1065 // For the use, there should be only one associated def.
1066 if (I != E && (next(I) != E || IsLiveIn))
1073 unsigned ImpUse = getReMatImplicitUse(li, MI);
1075 const LiveInterval &ImpLi = getInterval(ImpUse);
1076 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1077 re = mri_->use_end(); ri != re; ++ri) {
1078 MachineInstr *UseMI = &*ri;
1079 unsigned UseIdx = getInstructionIndex(UseMI);
1080 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1082 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1086 // If a register operand of the re-materialized instruction is going to
1087 // be spilled next, then it's not legal to re-materialize this instruction.
1088 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1089 if (ImpUse == SpillIs[i]->reg)
1095 /// isReMaterializable - Returns true if the definition MI of the specified
1096 /// val# of the specified interval is re-materializable.
1097 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1098 const VNInfo *ValNo, MachineInstr *MI) {
1099 SmallVector<LiveInterval*, 4> Dummy1;
1101 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1104 /// isReMaterializable - Returns true if every definition of MI of every
1105 /// val# of the specified interval is re-materializable.
1106 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1107 SmallVectorImpl<LiveInterval*> &SpillIs,
1110 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1112 const VNInfo *VNI = *i;
1113 if (VNI->isUnused())
1114 continue; // Dead val#.
1115 // Is the def for the val# rematerializable?
1116 if (!VNI->isDefAccurate())
1118 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1119 bool DefIsLoad = false;
1121 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1123 isLoad |= DefIsLoad;
1128 /// FilterFoldedOps - Filter out two-address use operands. Return
1129 /// true if it finds any issue with the operands that ought to prevent
1131 static bool FilterFoldedOps(MachineInstr *MI,
1132 SmallVector<unsigned, 2> &Ops,
1134 SmallVector<unsigned, 2> &FoldOps) {
1136 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1137 unsigned OpIdx = Ops[i];
1138 MachineOperand &MO = MI->getOperand(OpIdx);
1139 // FIXME: fold subreg use.
1143 MRInfo |= (unsigned)VirtRegMap::isMod;
1145 // Filter out two-address use operand(s).
1146 if (MI->isRegTiedToDefOperand(OpIdx)) {
1147 MRInfo = VirtRegMap::isModRef;
1150 MRInfo |= (unsigned)VirtRegMap::isRef;
1152 FoldOps.push_back(OpIdx);
1158 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1159 /// slot / to reg or any rematerialized load into ith operand of specified
1160 /// MI. If it is successul, MI is updated with the newly created MI and
1162 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1163 VirtRegMap &vrm, MachineInstr *DefMI,
1165 SmallVector<unsigned, 2> &Ops,
1166 bool isSS, int Slot, unsigned Reg) {
1167 // If it is an implicit def instruction, just delete it.
1168 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1169 RemoveMachineInstrFromMaps(MI);
1170 vrm.RemoveMachineInstrFromMaps(MI);
1171 MI->eraseFromParent();
1176 // Filter the list of operand indexes that are to be folded. Abort if
1177 // any operand will prevent folding.
1178 unsigned MRInfo = 0;
1179 SmallVector<unsigned, 2> FoldOps;
1180 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1183 // The only time it's safe to fold into a two address instruction is when
1184 // it's folding reload and spill from / into a spill stack slot.
1185 if (DefMI && (MRInfo & VirtRegMap::isMod))
1188 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1189 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1191 // Remember this instruction uses the spill slot.
1192 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1194 // Attempt to fold the memory reference into the instruction. If
1195 // we can do this, we don't need to insert spill code.
1196 MachineBasicBlock &MBB = *MI->getParent();
1197 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1198 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1199 vrm.transferSpillPts(MI, fmi);
1200 vrm.transferRestorePts(MI, fmi);
1201 vrm.transferEmergencySpills(MI, fmi);
1203 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1204 mi2iMap_[fmi] = InstrIdx;
1205 MI = MBB.insert(MBB.erase(MI), fmi);
1212 /// canFoldMemoryOperand - Returns true if the specified load / store
1213 /// folding is possible.
1214 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1215 SmallVector<unsigned, 2> &Ops,
1217 // Filter the list of operand indexes that are to be folded. Abort if
1218 // any operand will prevent folding.
1219 unsigned MRInfo = 0;
1220 SmallVector<unsigned, 2> FoldOps;
1221 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1224 // It's only legal to remat for a use, not a def.
1225 if (ReMat && (MRInfo & VirtRegMap::isMod))
1228 return tii_->canFoldMemoryOperand(MI, FoldOps);
1231 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1232 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1233 for (LiveInterval::Ranges::const_iterator
1234 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1235 std::vector<IdxMBBPair>::const_iterator II =
1236 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1237 if (II == Idx2MBBMap.end())
1239 if (I->end > II->first) // crossing a MBB.
1241 MBBs.insert(II->second);
1242 if (MBBs.size() > 1)
1248 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1249 /// interval on to-be re-materialized operands of MI) with new register.
1250 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1251 MachineInstr *MI, unsigned NewVReg,
1253 // There is an implicit use. That means one of the other operand is
1254 // being remat'ed and the remat'ed instruction has li.reg as an
1255 // use operand. Make sure we rewrite that as well.
1256 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1257 MachineOperand &MO = MI->getOperand(i);
1260 unsigned Reg = MO.getReg();
1261 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1263 if (!vrm.isReMaterialized(Reg))
1265 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1266 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1268 UseMO->setReg(NewVReg);
1272 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1273 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1274 bool LiveIntervals::
1275 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1276 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1277 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1278 unsigned Slot, int LdSlot,
1279 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1281 const TargetRegisterClass* rc,
1282 SmallVector<int, 4> &ReMatIds,
1283 const MachineLoopInfo *loopInfo,
1284 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1285 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1286 std::vector<LiveInterval*> &NewLIs) {
1287 bool CanFold = false;
1289 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1290 MachineOperand& mop = MI->getOperand(i);
1293 unsigned Reg = mop.getReg();
1294 unsigned RegI = Reg;
1295 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1300 bool TryFold = !DefIsReMat;
1301 bool FoldSS = true; // Default behavior unless it's a remat.
1302 int FoldSlot = Slot;
1304 // If this is the rematerializable definition MI itself and
1305 // all of its uses are rematerialized, simply delete it.
1306 if (MI == ReMatOrigDefMI && CanDelete) {
1307 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1309 RemoveMachineInstrFromMaps(MI);
1310 vrm.RemoveMachineInstrFromMaps(MI);
1311 MI->eraseFromParent();
1315 // If def for this use can't be rematerialized, then try folding.
1316 // If def is rematerializable and it's a load, also try folding.
1317 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1319 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1325 // Scan all of the operands of this instruction rewriting operands
1326 // to use NewVReg instead of li.reg as appropriate. We do this for
1329 // 1. If the instr reads the same spilled vreg multiple times, we
1330 // want to reuse the NewVReg.
1331 // 2. If the instr is a two-addr instruction, we are required to
1332 // keep the src/dst regs pinned.
1334 // Keep track of whether we replace a use and/or def so that we can
1335 // create the spill interval with the appropriate range.
1337 HasUse = mop.isUse();
1338 HasDef = mop.isDef();
1339 SmallVector<unsigned, 2> Ops;
1341 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1342 const MachineOperand &MOj = MI->getOperand(j);
1345 unsigned RegJ = MOj.getReg();
1346 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1350 HasUse |= MOj.isUse();
1351 HasDef |= MOj.isDef();
1355 if (HasUse && !li.liveAt(getUseIndex(index)))
1356 // Must be defined by an implicit def. It should not be spilled. Note,
1357 // this is for correctness reason. e.g.
1358 // 8 %reg1024<def> = IMPLICIT_DEF
1359 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1360 // The live range [12, 14) are not part of the r1024 live interval since
1361 // it's defined by an implicit def. It will not conflicts with live
1362 // interval of r1025. Now suppose both registers are spilled, you can
1363 // easily see a situation where both registers are reloaded before
1364 // the INSERT_SUBREG and both target registers that would overlap.
1367 // Create a new virtual register for the spill interval.
1368 // Create the new register now so we can map the fold instruction
1369 // to the new register so when it is unfolded we get the correct
1371 bool CreatedNewVReg = false;
1373 NewVReg = mri_->createVirtualRegister(rc);
1375 CreatedNewVReg = true;
1381 // Do not fold load / store here if we are splitting. We'll find an
1382 // optimal point to insert a load / store later.
1384 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1385 Ops, FoldSS, FoldSlot, NewVReg)) {
1386 // Folding the load/store can completely change the instruction in
1387 // unpredictable ways, rescan it from the beginning.
1390 // We need to give the new vreg the same stack slot as the
1391 // spilled interval.
1392 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1398 if (isNotInMIMap(MI))
1400 goto RestartInstruction;
1403 // We'll try to fold it later if it's profitable.
1404 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1408 mop.setReg(NewVReg);
1409 if (mop.isImplicit())
1410 rewriteImplicitOps(li, MI, NewVReg, vrm);
1412 // Reuse NewVReg for other reads.
1413 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1414 MachineOperand &mopj = MI->getOperand(Ops[j]);
1415 mopj.setReg(NewVReg);
1416 if (mopj.isImplicit())
1417 rewriteImplicitOps(li, MI, NewVReg, vrm);
1420 if (CreatedNewVReg) {
1422 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1423 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1424 // Each valnum may have its own remat id.
1425 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1427 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1429 if (!CanDelete || (HasUse && HasDef)) {
1430 // If this is a two-addr instruction then its use operands are
1431 // rematerializable but its def is not. It should be assigned a
1433 vrm.assignVirt2StackSlot(NewVReg, Slot);
1436 vrm.assignVirt2StackSlot(NewVReg, Slot);
1438 } else if (HasUse && HasDef &&
1439 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1440 // If this interval hasn't been assigned a stack slot (because earlier
1441 // def is a deleted remat def), do it now.
1442 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1443 vrm.assignVirt2StackSlot(NewVReg, Slot);
1446 // Re-matting an instruction with virtual register use. Add the
1447 // register as an implicit use on the use MI.
1448 if (DefIsReMat && ImpUse)
1449 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1451 // Create a new register interval for this spill / remat.
1452 LiveInterval &nI = getOrCreateInterval(NewVReg);
1453 if (CreatedNewVReg) {
1454 NewLIs.push_back(&nI);
1455 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1457 vrm.setIsSplitFromReg(NewVReg, li.reg);
1461 if (CreatedNewVReg) {
1462 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1463 nI.getNextValue(0, 0, false, VNInfoAllocator));
1467 // Extend the split live interval to this def / use.
1468 unsigned End = getUseIndex(index)+1;
1469 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1470 nI.getValNumInfo(nI.getNumValNums()-1));
1476 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1477 nI.getNextValue(0, 0, false, VNInfoAllocator));
1482 DOUT << "\t\t\t\tAdded new interval: ";
1483 nI.print(DOUT, tri_);
1488 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1490 MachineBasicBlock *MBB, unsigned Idx) const {
1491 unsigned End = getMBBEndIdx(MBB);
1492 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1493 unsigned KillIdx = VNI->kills[j];
1494 if (KillIdx > Idx && KillIdx < End)
1500 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1501 /// during spilling.
1503 struct RewriteInfo {
1508 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1509 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1512 struct RewriteInfoCompare {
1513 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1514 return LHS.Index < RHS.Index;
1519 void LiveIntervals::
1520 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1521 LiveInterval::Ranges::const_iterator &I,
1522 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1523 unsigned Slot, int LdSlot,
1524 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1526 const TargetRegisterClass* rc,
1527 SmallVector<int, 4> &ReMatIds,
1528 const MachineLoopInfo *loopInfo,
1529 BitVector &SpillMBBs,
1530 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1531 BitVector &RestoreMBBs,
1532 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1533 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1534 std::vector<LiveInterval*> &NewLIs) {
1535 bool AllCanFold = true;
1536 unsigned NewVReg = 0;
1537 unsigned start = getBaseIndex(I->start);
1538 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1540 // First collect all the def / use in this live range that will be rewritten.
1541 // Make sure they are sorted according to instruction index.
1542 std::vector<RewriteInfo> RewriteMIs;
1543 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1544 re = mri_->reg_end(); ri != re; ) {
1545 MachineInstr *MI = &*ri;
1546 MachineOperand &O = ri.getOperand();
1548 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1549 unsigned index = getInstructionIndex(MI);
1550 if (index < start || index >= end)
1552 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1553 // Must be defined by an implicit def. It should not be spilled. Note,
1554 // this is for correctness reason. e.g.
1555 // 8 %reg1024<def> = IMPLICIT_DEF
1556 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1557 // The live range [12, 14) are not part of the r1024 live interval since
1558 // it's defined by an implicit def. It will not conflicts with live
1559 // interval of r1025. Now suppose both registers are spilled, you can
1560 // easily see a situation where both registers are reloaded before
1561 // the INSERT_SUBREG and both target registers that would overlap.
1563 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1565 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1567 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1568 // Now rewrite the defs and uses.
1569 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1570 RewriteInfo &rwi = RewriteMIs[i];
1572 unsigned index = rwi.Index;
1573 bool MIHasUse = rwi.HasUse;
1574 bool MIHasDef = rwi.HasDef;
1575 MachineInstr *MI = rwi.MI;
1576 // If MI def and/or use the same register multiple times, then there
1577 // are multiple entries.
1578 unsigned NumUses = MIHasUse;
1579 while (i != e && RewriteMIs[i].MI == MI) {
1580 assert(RewriteMIs[i].Index == index);
1581 bool isUse = RewriteMIs[i].HasUse;
1582 if (isUse) ++NumUses;
1584 MIHasDef |= RewriteMIs[i].HasDef;
1587 MachineBasicBlock *MBB = MI->getParent();
1589 if (ImpUse && MI != ReMatDefMI) {
1590 // Re-matting an instruction with virtual register use. Update the
1591 // register interval's spill weight to HUGE_VALF to prevent it from
1593 LiveInterval &ImpLi = getInterval(ImpUse);
1594 ImpLi.weight = HUGE_VALF;
1597 unsigned MBBId = MBB->getNumber();
1598 unsigned ThisVReg = 0;
1600 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1601 if (NVI != MBBVRegsMap.end()) {
1602 ThisVReg = NVI->second;
1609 // It's better to start a new interval to avoid artifically
1610 // extend the new interval.
1611 if (MIHasDef && !MIHasUse) {
1612 MBBVRegsMap.erase(MBB->getNumber());
1618 bool IsNew = ThisVReg == 0;
1620 // This ends the previous live interval. If all of its def / use
1621 // can be folded, give it a low spill weight.
1622 if (NewVReg && TrySplit && AllCanFold) {
1623 LiveInterval &nI = getOrCreateInterval(NewVReg);
1630 bool HasDef = false;
1631 bool HasUse = false;
1632 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1633 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1634 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1635 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1636 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1637 if (!HasDef && !HasUse)
1640 AllCanFold &= CanFold;
1642 // Update weight of spill interval.
1643 LiveInterval &nI = getOrCreateInterval(NewVReg);
1645 // The spill weight is now infinity as it cannot be spilled again.
1646 nI.weight = HUGE_VALF;
1650 // Keep track of the last def and first use in each MBB.
1652 if (MI != ReMatOrigDefMI || !CanDelete) {
1653 bool HasKill = false;
1655 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1657 // If this is a two-address code, then this index starts a new VNInfo.
1658 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1660 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1662 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1663 SpillIdxes.find(MBBId);
1665 if (SII == SpillIdxes.end()) {
1666 std::vector<SRInfo> S;
1667 S.push_back(SRInfo(index, NewVReg, true));
1668 SpillIdxes.insert(std::make_pair(MBBId, S));
1669 } else if (SII->second.back().vreg != NewVReg) {
1670 SII->second.push_back(SRInfo(index, NewVReg, true));
1671 } else if ((int)index > SII->second.back().index) {
1672 // If there is an earlier def and this is a two-address
1673 // instruction, then it's not possible to fold the store (which
1674 // would also fold the load).
1675 SRInfo &Info = SII->second.back();
1677 Info.canFold = !HasUse;
1679 SpillMBBs.set(MBBId);
1680 } else if (SII != SpillIdxes.end() &&
1681 SII->second.back().vreg == NewVReg &&
1682 (int)index > SII->second.back().index) {
1683 // There is an earlier def that's not killed (must be two-address).
1684 // The spill is no longer needed.
1685 SII->second.pop_back();
1686 if (SII->second.empty()) {
1687 SpillIdxes.erase(MBBId);
1688 SpillMBBs.reset(MBBId);
1695 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1696 SpillIdxes.find(MBBId);
1697 if (SII != SpillIdxes.end() &&
1698 SII->second.back().vreg == NewVReg &&
1699 (int)index > SII->second.back().index)
1700 // Use(s) following the last def, it's not safe to fold the spill.
1701 SII->second.back().canFold = false;
1702 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1703 RestoreIdxes.find(MBBId);
1704 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1705 // If we are splitting live intervals, only fold if it's the first
1706 // use and there isn't another use later in the MBB.
1707 RII->second.back().canFold = false;
1709 // Only need a reload if there isn't an earlier def / use.
1710 if (RII == RestoreIdxes.end()) {
1711 std::vector<SRInfo> Infos;
1712 Infos.push_back(SRInfo(index, NewVReg, true));
1713 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1715 RII->second.push_back(SRInfo(index, NewVReg, true));
1717 RestoreMBBs.set(MBBId);
1721 // Update spill weight.
1722 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1723 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1726 if (NewVReg && TrySplit && AllCanFold) {
1727 // If all of its def / use can be folded, give it a low spill weight.
1728 LiveInterval &nI = getOrCreateInterval(NewVReg);
1733 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1734 BitVector &RestoreMBBs,
1735 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1736 if (!RestoreMBBs[Id])
1738 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1739 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1740 if (Restores[i].index == index &&
1741 Restores[i].vreg == vr &&
1742 Restores[i].canFold)
1747 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1748 BitVector &RestoreMBBs,
1749 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1750 if (!RestoreMBBs[Id])
1752 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1753 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1754 if (Restores[i].index == index && Restores[i].vreg)
1755 Restores[i].index = -1;
1758 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1759 /// spilled and create empty intervals for their uses.
1761 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1762 const TargetRegisterClass* rc,
1763 std::vector<LiveInterval*> &NewLIs) {
1764 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1765 re = mri_->reg_end(); ri != re; ) {
1766 MachineOperand &O = ri.getOperand();
1767 MachineInstr *MI = &*ri;
1770 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1771 "Register def was not rewritten?");
1772 RemoveMachineInstrFromMaps(MI);
1773 vrm.RemoveMachineInstrFromMaps(MI);
1774 MI->eraseFromParent();
1776 // This must be an use of an implicit_def so it's not part of the live
1777 // interval. Create a new empty live interval for it.
1778 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1779 unsigned NewVReg = mri_->createVirtualRegister(rc);
1781 vrm.setIsImplicitlyDefined(NewVReg);
1782 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1783 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1784 MachineOperand &MO = MI->getOperand(i);
1785 if (MO.isReg() && MO.getReg() == li.reg) {
1789 MO.setIsKill(false);
1796 std::vector<LiveInterval*> LiveIntervals::
1797 addIntervalsForSpillsFast(const LiveInterval &li,
1798 const MachineLoopInfo *loopInfo,
1800 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1802 std::vector<LiveInterval*> added;
1804 assert(li.weight != HUGE_VALF &&
1805 "attempt to spill already spilled interval!");
1807 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1811 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1813 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1814 while (RI != mri_->reg_end()) {
1815 MachineInstr* MI = &*RI;
1817 SmallVector<unsigned, 2> Indices;
1818 bool HasUse = false;
1819 bool HasDef = false;
1821 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1822 MachineOperand& mop = MI->getOperand(i);
1823 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1825 HasUse |= MI->getOperand(i).isUse();
1826 HasDef |= MI->getOperand(i).isDef();
1828 Indices.push_back(i);
1831 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1832 Indices, true, slot, li.reg)) {
1833 unsigned NewVReg = mri_->createVirtualRegister(rc);
1835 vrm.assignVirt2StackSlot(NewVReg, slot);
1837 // create a new register for this spill
1838 LiveInterval &nI = getOrCreateInterval(NewVReg);
1840 // the spill weight is now infinity as it
1841 // cannot be spilled again
1842 nI.weight = HUGE_VALF;
1844 // Rewrite register operands to use the new vreg.
1845 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1846 E = Indices.end(); I != E; ++I) {
1847 MI->getOperand(*I).setReg(NewVReg);
1849 if (MI->getOperand(*I).isUse())
1850 MI->getOperand(*I).setIsKill(true);
1853 // Fill in the new live interval.
1854 unsigned index = getInstructionIndex(MI);
1856 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1857 nI.getNextValue(0, 0, false, getVNInfoAllocator()));
1860 vrm.addRestorePoint(NewVReg, MI);
1863 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1864 nI.getNextValue(0, 0, false, getVNInfoAllocator()));
1867 vrm.addSpillPoint(NewVReg, true, MI);
1870 added.push_back(&nI);
1872 DOUT << "\t\t\t\tadded new interval: ";
1878 RI = mri_->reg_begin(li.reg);
1884 std::vector<LiveInterval*> LiveIntervals::
1885 addIntervalsForSpills(const LiveInterval &li,
1886 SmallVectorImpl<LiveInterval*> &SpillIs,
1887 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1889 if (EnableFastSpilling)
1890 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1892 assert(li.weight != HUGE_VALF &&
1893 "attempt to spill already spilled interval!");
1895 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1896 li.print(DOUT, tri_);
1899 // Each bit specify whether a spill is required in the MBB.
1900 BitVector SpillMBBs(mf_->getNumBlockIDs());
1901 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1902 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1903 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1904 DenseMap<unsigned,unsigned> MBBVRegsMap;
1905 std::vector<LiveInterval*> NewLIs;
1906 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1908 unsigned NumValNums = li.getNumValNums();
1909 SmallVector<MachineInstr*, 4> ReMatDefs;
1910 ReMatDefs.resize(NumValNums, NULL);
1911 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1912 ReMatOrigDefs.resize(NumValNums, NULL);
1913 SmallVector<int, 4> ReMatIds;
1914 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1915 BitVector ReMatDelete(NumValNums);
1916 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1918 // Spilling a split live interval. It cannot be split any further. Also,
1919 // it's also guaranteed to be a single val# / range interval.
1920 if (vrm.getPreSplitReg(li.reg)) {
1921 vrm.setIsSplitFromReg(li.reg, 0);
1922 // Unset the split kill marker on the last use.
1923 unsigned KillIdx = vrm.getKillPoint(li.reg);
1925 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1926 assert(KillMI && "Last use disappeared?");
1927 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1928 assert(KillOp != -1 && "Last use disappeared?");
1929 KillMI->getOperand(KillOp).setIsKill(false);
1931 vrm.removeKillPoint(li.reg);
1932 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1933 Slot = vrm.getStackSlot(li.reg);
1934 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1935 MachineInstr *ReMatDefMI = DefIsReMat ?
1936 vrm.getReMaterializedMI(li.reg) : NULL;
1938 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1939 bool isLoad = isLoadSS ||
1940 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1941 bool IsFirstRange = true;
1942 for (LiveInterval::Ranges::const_iterator
1943 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1944 // If this is a split live interval with multiple ranges, it means there
1945 // are two-address instructions that re-defined the value. Only the
1946 // first def can be rematerialized!
1948 // Note ReMatOrigDefMI has already been deleted.
1949 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1950 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1951 false, vrm, rc, ReMatIds, loopInfo,
1952 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1953 MBBVRegsMap, NewLIs);
1955 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1956 Slot, 0, false, false, false,
1957 false, vrm, rc, ReMatIds, loopInfo,
1958 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1959 MBBVRegsMap, NewLIs);
1961 IsFirstRange = false;
1964 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1968 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1969 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1973 bool NeedStackSlot = false;
1974 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1976 const VNInfo *VNI = *i;
1977 unsigned VN = VNI->id;
1978 if (VNI->isUnused())
1979 continue; // Dead val#.
1980 // Is the def for the val# rematerializable?
1981 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1982 ? getInstructionFromIndex(VNI->def) : 0;
1984 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1985 // Remember how to remat the def of this val#.
1986 ReMatOrigDefs[VN] = ReMatDefMI;
1987 // Original def may be modified so we have to make a copy here.
1988 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1989 ClonedMIs.push_back(Clone);
1990 ReMatDefs[VN] = Clone;
1992 bool CanDelete = true;
1993 if (VNI->hasPHIKill()) {
1994 // A kill is a phi node, not all of its uses can be rematerialized.
1995 // It must not be deleted.
1997 // Need a stack slot if there is any live range where uses cannot be
1999 NeedStackSlot = true;
2002 ReMatDelete.set(VN);
2004 // Need a stack slot if there is any live range where uses cannot be
2006 NeedStackSlot = true;
2010 // One stack slot per live interval.
2011 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
2012 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
2013 Slot = vrm.assignVirt2StackSlot(li.reg);
2015 // This case only occurs when the prealloc splitter has already assigned
2016 // a stack slot to this vreg.
2018 Slot = vrm.getStackSlot(li.reg);
2021 // Create new intervals and rewrite defs and uses.
2022 for (LiveInterval::Ranges::const_iterator
2023 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
2024 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
2025 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
2026 bool DefIsReMat = ReMatDefMI != NULL;
2027 bool CanDelete = ReMatDelete[I->valno->id];
2029 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2030 bool isLoad = isLoadSS ||
2031 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
2032 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
2033 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
2034 CanDelete, vrm, rc, ReMatIds, loopInfo,
2035 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
2036 MBBVRegsMap, NewLIs);
2039 // Insert spills / restores if we are splitting.
2041 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2045 SmallPtrSet<LiveInterval*, 4> AddedKill;
2046 SmallVector<unsigned, 2> Ops;
2047 if (NeedStackSlot) {
2048 int Id = SpillMBBs.find_first();
2050 std::vector<SRInfo> &spills = SpillIdxes[Id];
2051 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2052 int index = spills[i].index;
2053 unsigned VReg = spills[i].vreg;
2054 LiveInterval &nI = getOrCreateInterval(VReg);
2055 bool isReMat = vrm.isReMaterialized(VReg);
2056 MachineInstr *MI = getInstructionFromIndex(index);
2057 bool CanFold = false;
2058 bool FoundUse = false;
2060 if (spills[i].canFold) {
2062 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2063 MachineOperand &MO = MI->getOperand(j);
2064 if (!MO.isReg() || MO.getReg() != VReg)
2071 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2072 RestoreMBBs, RestoreIdxes))) {
2073 // MI has two-address uses of the same register. If the use
2074 // isn't the first and only use in the BB, then we can't fold
2075 // it. FIXME: Move this to rewriteInstructionsForSpills.
2082 // Fold the store into the def if possible.
2083 bool Folded = false;
2084 if (CanFold && !Ops.empty()) {
2085 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2088 // Also folded uses, do not issue a load.
2089 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2090 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2092 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2096 // Otherwise tell the spiller to issue a spill.
2098 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2099 bool isKill = LR->end == getStoreIndex(index);
2100 if (!MI->registerDefIsDead(nI.reg))
2101 // No need to spill a dead def.
2102 vrm.addSpillPoint(VReg, isKill, MI);
2104 AddedKill.insert(&nI);
2107 Id = SpillMBBs.find_next(Id);
2111 int Id = RestoreMBBs.find_first();
2113 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2114 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2115 int index = restores[i].index;
2118 unsigned VReg = restores[i].vreg;
2119 LiveInterval &nI = getOrCreateInterval(VReg);
2120 bool isReMat = vrm.isReMaterialized(VReg);
2121 MachineInstr *MI = getInstructionFromIndex(index);
2122 bool CanFold = false;
2124 if (restores[i].canFold) {
2126 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2127 MachineOperand &MO = MI->getOperand(j);
2128 if (!MO.isReg() || MO.getReg() != VReg)
2132 // If this restore were to be folded, it would have been folded
2141 // Fold the load into the use if possible.
2142 bool Folded = false;
2143 if (CanFold && !Ops.empty()) {
2145 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2147 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2149 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2150 // If the rematerializable def is a load, also try to fold it.
2151 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2152 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2153 Ops, isLoadSS, LdSlot, VReg);
2155 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2157 // Re-matting an instruction with virtual register use. Add the
2158 // register as an implicit use on the use MI and update the register
2159 // interval's spill weight to HUGE_VALF to prevent it from being
2161 LiveInterval &ImpLi = getInterval(ImpUse);
2162 ImpLi.weight = HUGE_VALF;
2163 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2168 // If folding is not possible / failed, then tell the spiller to issue a
2169 // load / rematerialization for us.
2171 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2173 vrm.addRestorePoint(VReg, MI);
2175 Id = RestoreMBBs.find_next(Id);
2178 // Finalize intervals: add kills, finalize spill weights, and filter out
2180 std::vector<LiveInterval*> RetNewLIs;
2181 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2182 LiveInterval *LI = NewLIs[i];
2184 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2185 if (!AddedKill.count(LI)) {
2186 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2187 unsigned LastUseIdx = getBaseIndex(LR->end);
2188 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2189 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2190 assert(UseIdx != -1);
2191 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
2192 LastUse->getOperand(UseIdx).setIsKill();
2193 vrm.addKillPoint(LI->reg, LastUseIdx);
2196 RetNewLIs.push_back(LI);
2200 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2204 /// hasAllocatableSuperReg - Return true if the specified physical register has
2205 /// any super register that's allocatable.
2206 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2207 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2208 if (allocatableRegs_[*AS] && hasInterval(*AS))
2213 /// getRepresentativeReg - Find the largest super register of the specified
2214 /// physical register.
2215 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2216 // Find the largest super-register that is allocatable.
2217 unsigned BestReg = Reg;
2218 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2219 unsigned SuperReg = *AS;
2220 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2228 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2229 /// specified interval that conflicts with the specified physical register.
2230 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2231 unsigned PhysReg) const {
2232 unsigned NumConflicts = 0;
2233 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2234 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2235 E = mri_->reg_end(); I != E; ++I) {
2236 MachineOperand &O = I.getOperand();
2237 MachineInstr *MI = O.getParent();
2238 unsigned Index = getInstructionIndex(MI);
2239 if (pli.liveAt(Index))
2242 return NumConflicts;
2245 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2246 /// around all defs and uses of the specified interval. Return true if it
2247 /// was able to cut its interval.
2248 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2249 unsigned PhysReg, VirtRegMap &vrm) {
2250 unsigned SpillReg = getRepresentativeReg(PhysReg);
2252 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2253 // If there are registers which alias PhysReg, but which are not a
2254 // sub-register of the chosen representative super register. Assert
2255 // since we can't handle it yet.
2256 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2257 tri_->isSuperRegister(*AS, SpillReg));
2260 LiveInterval &pli = getInterval(SpillReg);
2261 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2262 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2263 E = mri_->reg_end(); I != E; ++I) {
2264 MachineOperand &O = I.getOperand();
2265 MachineInstr *MI = O.getParent();
2266 if (SeenMIs.count(MI))
2269 unsigned Index = getInstructionIndex(MI);
2270 if (pli.liveAt(Index)) {
2271 vrm.addEmergencySpill(SpillReg, MI);
2272 unsigned StartIdx = getLoadIndex(Index);
2273 unsigned EndIdx = getStoreIndex(Index)+1;
2274 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2275 pli.removeRange(StartIdx, EndIdx);
2278 cerr << "Ran out of registers during register allocation!\n";
2279 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2280 cerr << "Please check your inline asm statement for invalid "
2281 << "constraints:\n";
2282 MI->print(cerr.stream(), tm_);
2286 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2287 if (!hasInterval(*AS))
2289 LiveInterval &spli = getInterval(*AS);
2290 if (spli.liveAt(Index))
2291 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2298 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2299 MachineInstr* startInst) {
2300 LiveInterval& Interval = getOrCreateInterval(reg);
2301 VNInfo* VN = Interval.getNextValue(
2302 getInstructionIndex(startInst) + InstrSlots::DEF,
2303 startInst, true, getVNInfoAllocator());
2304 VN->setHasPHIKill(true);
2305 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2306 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2307 getMBBEndIdx(startInst->getParent()) + 1, VN);
2308 Interval.addRange(LR);