1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/PseudoSourceValue.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/ADT/STLExtras.h"
42 // Hidden options for help debugging.
43 static cl::opt<bool> DisableReMat("disable-rematerialization",
44 cl::init(false), cl::Hidden);
46 static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
47 cl::init(true), cl::Hidden);
48 static cl::opt<int> SplitLimit("split-limit",
49 cl::init(-1), cl::Hidden);
51 static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals, "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64 AU.addRequired<AliasAnalysis>();
65 AU.addPreserved<AliasAnalysis>();
66 AU.addPreserved<LiveVariables>();
67 AU.addRequired<LiveVariables>();
68 AU.addPreservedID(MachineLoopInfoID);
69 AU.addPreservedID(MachineDominatorsID);
72 AU.addPreservedID(PHIEliminationID);
73 AU.addRequiredID(PHIEliminationID);
76 AU.addRequiredID(TwoAddressInstructionPassID);
77 MachineFunctionPass::getAnalysisUsage(AU);
80 void LiveIntervals::releaseMemory() {
81 // Free the live intervals themselves.
82 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
83 E = r2iMap_.end(); I != E; ++I)
91 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
92 VNInfoAllocator.Reset();
93 while (!ClonedMIs.empty()) {
94 MachineInstr *MI = ClonedMIs.back();
96 mf_->DeleteMachineInstr(MI);
100 void LiveIntervals::computeNumbering() {
101 Index2MiMap OldI2MI = i2miMap_;
102 std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
111 // Number MachineInstrs and MachineBasicBlocks.
112 // Initialize MBB indexes to a sentinal.
113 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
115 unsigned MIIndex = 0;
116 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
118 unsigned StartIdx = MIIndex;
120 // Insert an empty slot at the beginning of each block.
121 MIIndex += InstrSlots::NUM;
122 i2miMap_.push_back(0);
124 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
126 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
127 assert(inserted && "multiple MachineInstr -> index mappings");
129 i2miMap_.push_back(I);
130 MIIndex += InstrSlots::NUM;
133 // Insert max(1, numdefs) empty slots after every instruction.
134 unsigned Slots = I->getDesc().getNumDefs();
137 MIIndex += InstrSlots::NUM * Slots;
139 i2miMap_.push_back(0);
142 // Set the MBB2IdxMap entry for this MBB.
143 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
144 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
146 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
148 if (!OldI2MI.empty())
149 for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
150 for (LiveInterval::iterator LI = OI->second->begin(),
151 LE = OI->second->end(); LI != LE; ++LI) {
153 // Remap the start index of the live range to the corresponding new
154 // number, or our best guess at what it _should_ correspond to if the
155 // original instruction has been erased. This is either the following
156 // instruction or its predecessor.
157 unsigned index = LI->start / InstrSlots::NUM;
158 unsigned offset = LI->start % InstrSlots::NUM;
159 if (offset == InstrSlots::LOAD) {
160 std::vector<IdxMBBPair>::const_iterator I =
161 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
162 // Take the pair containing the index
163 std::vector<IdxMBBPair>::const_iterator J =
164 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
166 LI->start = getMBBStartIdx(J->second);
168 LI->start = mi2iMap_[OldI2MI[index]] + offset;
171 // Remap the ending index in the same way that we remapped the start,
172 // except for the final step where we always map to the immediately
173 // following instruction.
174 index = (LI->end - 1) / InstrSlots::NUM;
175 offset = LI->end % InstrSlots::NUM;
176 if (offset == InstrSlots::LOAD) {
177 // VReg dies at end of block.
178 std::vector<IdxMBBPair>::const_iterator I =
179 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
182 LI->end = getMBBEndIdx(I->second) + 1;
184 unsigned idx = index;
185 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
187 if (index != OldI2MI.size())
188 LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
190 LI->end = InstrSlots::NUM * i2miMap_.size();
194 for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
195 VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
198 // Remap the VNInfo def index, which works the same as the
199 // start indices above. VN's with special sentinel defs
200 // don't need to be remapped.
201 if (vni->def != ~0U && vni->def != ~1U) {
202 unsigned index = vni->def / InstrSlots::NUM;
203 unsigned offset = vni->def % InstrSlots::NUM;
204 if (offset == InstrSlots::LOAD) {
205 std::vector<IdxMBBPair>::const_iterator I =
206 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
207 // Take the pair containing the index
208 std::vector<IdxMBBPair>::const_iterator J =
209 (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
211 vni->def = getMBBStartIdx(J->second);
213 vni->def = mi2iMap_[OldI2MI[index]] + offset;
217 // Remap the VNInfo kill indices, which works the same as
218 // the end indices above.
219 for (size_t i = 0; i < vni->kills.size(); ++i) {
220 // PHI kills don't need to be remapped.
221 if (!vni->kills[i]) continue;
223 unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
224 unsigned offset = vni->kills[i] % InstrSlots::NUM;
225 if (offset == InstrSlots::LOAD) {
226 std::vector<IdxMBBPair>::const_iterator I =
227 std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
230 vni->kills[i] = getMBBEndIdx(I->second);
232 unsigned idx = index;
233 while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
235 if (index != OldI2MI.size())
236 vni->kills[i] = mi2iMap_[OldI2MI[index]] +
237 (idx == index ? offset : 0);
239 vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
246 /// runOnMachineFunction - Register allocate the whole function
248 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
250 mri_ = &mf_->getRegInfo();
251 tm_ = &fn.getTarget();
252 tri_ = tm_->getRegisterInfo();
253 tii_ = tm_->getInstrInfo();
254 aa_ = &getAnalysis<AliasAnalysis>();
255 lv_ = &getAnalysis<LiveVariables>();
256 allocatableRegs_ = tri_->getAllocatableSet(fn);
261 numIntervals += getNumIntervals();
267 /// print - Implement the dump method.
268 void LiveIntervals::print(std::ostream &O, const Module* ) const {
269 O << "********** INTERVALS **********\n";
270 for (const_iterator I = begin(), E = end(); I != E; ++I) {
271 I->second->print(O, tri_);
275 O << "********** MACHINEINSTRS **********\n";
276 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
277 mbbi != mbbe; ++mbbi) {
278 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
279 for (MachineBasicBlock::iterator mii = mbbi->begin(),
280 mie = mbbi->end(); mii != mie; ++mii) {
281 O << getInstructionIndex(mii) << '\t' << *mii;
286 /// conflictsWithPhysRegDef - Returns true if the specified register
287 /// is defined during the duration of the specified interval.
288 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
289 VirtRegMap &vrm, unsigned reg) {
290 for (LiveInterval::Ranges::const_iterator
291 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
292 for (unsigned index = getBaseIndex(I->start),
293 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
294 index += InstrSlots::NUM) {
295 // skip deleted instructions
296 while (index != end && !getInstructionFromIndex(index))
297 index += InstrSlots::NUM;
298 if (index == end) break;
300 MachineInstr *MI = getInstructionFromIndex(index);
301 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
302 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
303 if (SrcReg == li.reg || DstReg == li.reg)
305 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
306 MachineOperand& mop = MI->getOperand(i);
309 unsigned PhysReg = mop.getReg();
310 if (PhysReg == 0 || PhysReg == li.reg)
312 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
313 if (!vrm.hasPhys(PhysReg))
315 PhysReg = vrm.getPhys(PhysReg);
317 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
326 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
327 /// it can check use as well.
328 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
329 unsigned Reg, bool CheckUse,
330 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
331 for (LiveInterval::Ranges::const_iterator
332 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
333 for (unsigned index = getBaseIndex(I->start),
334 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
335 index += InstrSlots::NUM) {
336 // Skip deleted instructions.
337 MachineInstr *MI = 0;
338 while (index != end) {
339 MI = getInstructionFromIndex(index);
342 index += InstrSlots::NUM;
344 if (index == end) break;
346 if (JoinedCopies.count(MI))
348 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
349 MachineOperand& MO = MI->getOperand(i);
352 if (MO.isUse() && !CheckUse)
354 unsigned PhysReg = MO.getReg();
355 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
357 if (tri_->isSubRegister(Reg, PhysReg))
367 void LiveIntervals::printRegName(unsigned reg) const {
368 if (TargetRegisterInfo::isPhysicalRegister(reg))
369 cerr << tri_->getName(reg);
371 cerr << "%reg" << reg;
374 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
375 MachineBasicBlock::iterator mi,
376 unsigned MIIdx, MachineOperand& MO,
378 LiveInterval &interval) {
379 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
380 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
382 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
383 DOUT << "is a implicit_def\n";
387 // Virtual registers may be defined multiple times (due to phi
388 // elimination and 2-addr elimination). Much of what we do only has to be
389 // done once for the vreg. We use an empty interval to detect the first
390 // time we see a vreg.
391 if (interval.empty()) {
392 // Get the Idx of the defining instructions.
393 unsigned defIndex = getDefIndex(MIIdx);
394 // Earlyclobbers move back one.
395 if (MO.isEarlyClobber())
396 defIndex = getUseIndex(MIIdx);
398 MachineInstr *CopyMI = NULL;
399 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
400 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
401 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
402 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
404 // Earlyclobbers move back one.
405 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
407 assert(ValNo->id == 0 && "First value in interval is not 0?");
409 // Loop over all of the blocks that the vreg is defined in. There are
410 // two cases we have to handle here. The most common case is a vreg
411 // whose lifetime is contained within a basic block. In this case there
412 // will be a single kill, in MBB, which comes after the definition.
413 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
414 // FIXME: what about dead vars?
416 if (vi.Kills[0] != mi)
417 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
419 killIdx = defIndex+1;
421 // If the kill happens after the definition, we have an intra-block
423 if (killIdx > defIndex) {
424 assert(vi.AliveBlocks.none() &&
425 "Shouldn't be alive across any blocks!");
426 LiveRange LR(defIndex, killIdx, ValNo);
427 interval.addRange(LR);
428 DOUT << " +" << LR << "\n";
429 interval.addKill(ValNo, killIdx);
434 // The other case we handle is when a virtual register lives to the end
435 // of the defining block, potentially live across some blocks, then is
436 // live into some number of blocks, but gets killed. Start by adding a
437 // range that goes from this definition to the end of the defining block.
438 LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
439 DOUT << " +" << NewLR;
440 interval.addRange(NewLR);
442 // Iterate over all of the blocks that the variable is completely
443 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
445 for (int i = vi.AliveBlocks.find_first(); i != -1;
446 i = vi.AliveBlocks.find_next(i)) {
447 LiveRange LR(getMBBStartIdx(i),
448 getMBBEndIdx(i)+1, // MBB ends at -1.
450 interval.addRange(LR);
454 // Finally, this virtual register is live from the start of any killing
455 // block to the 'use' slot of the killing instruction.
456 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
457 MachineInstr *Kill = vi.Kills[i];
458 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
459 LiveRange LR(getMBBStartIdx(Kill->getParent()),
461 interval.addRange(LR);
462 interval.addKill(ValNo, killIdx);
467 // If this is the second time we see a virtual register definition, it
468 // must be due to phi elimination or two addr elimination. If this is
469 // the result of two address elimination, then the vreg is one of the
470 // def-and-use register operand.
471 if (mi->isRegReDefinedByTwoAddr(MOIdx)) {
472 // If this is a two-address definition, then we have already processed
473 // the live range. The only problem is that we didn't realize there
474 // are actually two values in the live interval. Because of this we
475 // need to take the LiveRegion that defines this register and split it
477 assert(interval.containsOneValue());
478 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
479 unsigned RedefIndex = getDefIndex(MIIdx);
480 // It cannot be an early clobber MO.
481 assert(!MO.isEarlyClobber() && "Unexpected early clobber!");
483 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
484 VNInfo *OldValNo = OldLR->valno;
486 // Delete the initial value, which should be short and continuous,
487 // because the 2-addr copy must be in the same MBB as the redef.
488 interval.removeRange(DefIndex, RedefIndex);
490 // Two-address vregs should always only be redefined once. This means
491 // that at this point, there should be exactly one value number in it.
492 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
494 // The new value number (#1) is defined by the instruction we claimed
496 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
499 // Value#0 is now defined by the 2-addr instruction.
500 OldValNo->def = RedefIndex;
503 // Add the new live interval which replaces the range for the input copy.
504 LiveRange LR(DefIndex, RedefIndex, ValNo);
505 DOUT << " replace range with " << LR;
506 interval.addRange(LR);
507 interval.addKill(ValNo, RedefIndex);
509 // If this redefinition is dead, we need to add a dummy unit live
510 // range covering the def slot.
512 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
515 interval.print(DOUT, tri_);
518 // Otherwise, this must be because of phi elimination. If this is the
519 // first redefinition of the vreg that we have seen, go back and change
520 // the live range in the PHI block to be a different value number.
521 if (interval.containsOneValue()) {
522 assert(vi.Kills.size() == 1 &&
523 "PHI elimination vreg should have one kill, the PHI itself!");
525 // Remove the old range that we now know has an incorrect number.
526 VNInfo *VNI = interval.getValNumInfo(0);
527 MachineInstr *Killer = vi.Kills[0];
528 unsigned Start = getMBBStartIdx(Killer->getParent());
529 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
530 DOUT << " Removing [" << Start << "," << End << "] from: ";
531 interval.print(DOUT, tri_); DOUT << "\n";
532 interval.removeRange(Start, End);
533 VNI->hasPHIKill = true;
534 DOUT << " RESULT: "; interval.print(DOUT, tri_);
536 // Replace the interval with one of a NEW value number. Note that this
537 // value number isn't actually defined by an instruction, weird huh? :)
538 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
539 DOUT << " replace range with " << LR;
540 interval.addRange(LR);
541 interval.addKill(LR.valno, End);
542 DOUT << " RESULT: "; interval.print(DOUT, tri_);
545 // In the case of PHI elimination, each variable definition is only
546 // live until the end of the block. We've already taken care of the
547 // rest of the live range.
548 unsigned defIndex = getDefIndex(MIIdx);
549 // It cannot be an early clobber MO.
550 assert(!MO.isEarlyClobber() && "Unexpected early clobber!");
553 MachineInstr *CopyMI = NULL;
554 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
555 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
556 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
557 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
559 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
561 unsigned killIndex = getMBBEndIdx(mbb) + 1;
562 LiveRange LR(defIndex, killIndex, ValNo);
563 interval.addRange(LR);
564 interval.addKill(ValNo, killIndex);
565 ValNo->hasPHIKill = true;
573 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
574 MachineBasicBlock::iterator mi,
577 LiveInterval &interval,
578 MachineInstr *CopyMI) {
579 // A physical register cannot be live across basic block, so its
580 // lifetime must end somewhere in its defining basic block.
581 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
583 unsigned baseIndex = MIIdx;
584 unsigned start = getDefIndex(baseIndex);
585 // Earlyclobbers move back one.
586 if (MO.isEarlyClobber())
587 start = getUseIndex(MIIdx);
588 unsigned end = start;
590 // If it is not used after definition, it is considered dead at
591 // the instruction defining it. Hence its interval is:
592 // [defSlot(def), defSlot(def)+1)
599 // If it is not dead on definition, it must be killed by a
600 // subsequent instruction. Hence its interval is:
601 // [defSlot(def), useSlot(kill)+1)
602 baseIndex += InstrSlots::NUM;
603 while (++mi != MBB->end()) {
604 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
605 getInstructionFromIndex(baseIndex) == 0)
606 baseIndex += InstrSlots::NUM;
607 if (mi->killsRegister(interval.reg, tri_)) {
609 end = getUseIndex(baseIndex) + 1;
611 } else if (mi->modifiesRegister(interval.reg, tri_)) {
612 // Another instruction redefines the register before it is ever read.
613 // Then the register is essentially dead at the instruction that defines
614 // it. Hence its interval is:
615 // [defSlot(def), defSlot(def)+1)
621 baseIndex += InstrSlots::NUM;
624 // The only case we should have a dead physreg here without a killing or
625 // instruction where we know it's dead is if it is live-in to the function
627 assert(!CopyMI && "physreg was not killed in defining block!");
631 assert(start < end && "did not find end of interval?");
633 // Already exists? Extend old live interval.
634 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
635 bool Extend = OldLR != interval.end();
636 VNInfo *ValNo = Extend
637 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
638 if (MO.isEarlyClobber() && Extend)
639 ValNo->redefByEC = true;
640 LiveRange LR(start, end, ValNo);
641 interval.addRange(LR);
642 interval.addKill(LR.valno, end);
643 DOUT << " +" << LR << '\n';
646 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
647 MachineBasicBlock::iterator MI,
651 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
652 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
653 getOrCreateInterval(MO.getReg()));
654 else if (allocatableRegs_[MO.getReg()]) {
655 MachineInstr *CopyMI = NULL;
656 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
657 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
658 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
659 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
661 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
662 getOrCreateInterval(MO.getReg()), CopyMI);
663 // Def of a register also defines its sub-registers.
664 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
665 // If MI also modifies the sub-register explicitly, avoid processing it
666 // more than once. Do not pass in TRI here so it checks for exact match.
667 if (!MI->modifiesRegister(*AS))
668 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
669 getOrCreateInterval(*AS), 0);
673 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
675 LiveInterval &interval, bool isAlias) {
676 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
678 // Look for kills, if it reaches a def before it's killed, then it shouldn't
679 // be considered a livein.
680 MachineBasicBlock::iterator mi = MBB->begin();
681 unsigned baseIndex = MIIdx;
682 unsigned start = baseIndex;
683 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
684 getInstructionFromIndex(baseIndex) == 0)
685 baseIndex += InstrSlots::NUM;
686 unsigned end = baseIndex;
687 bool SeenDefUse = false;
689 while (mi != MBB->end()) {
690 if (mi->killsRegister(interval.reg, tri_)) {
692 end = getUseIndex(baseIndex) + 1;
695 } else if (mi->modifiesRegister(interval.reg, tri_)) {
696 // Another instruction redefines the register before it is ever read.
697 // Then the register is essentially dead at the instruction that defines
698 // it. Hence its interval is:
699 // [defSlot(def), defSlot(def)+1)
701 end = getDefIndex(start) + 1;
706 baseIndex += InstrSlots::NUM;
708 if (mi != MBB->end()) {
709 while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
710 getInstructionFromIndex(baseIndex) == 0)
711 baseIndex += InstrSlots::NUM;
716 // Live-in register might not be used at all.
720 end = getDefIndex(MIIdx) + 1;
722 DOUT << " live through";
727 LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
728 interval.addRange(LR);
729 interval.addKill(LR.valno, end);
730 DOUT << " +" << LR << '\n';
733 /// computeIntervals - computes the live intervals for virtual
734 /// registers. for some ordering of the machine instructions [1,N] a
735 /// live interval is an interval [i, j) where 1 <= i <= j < N for
736 /// which a variable is live
737 void LiveIntervals::computeIntervals() {
739 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
740 << "********** Function: "
741 << ((Value*)mf_->getFunction())->getName() << '\n';
743 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
745 MachineBasicBlock *MBB = MBBI;
746 // Track the index of the current machine instr.
747 unsigned MIIndex = getMBBStartIdx(MBB);
748 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
750 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
752 // Create intervals for live-ins to this BB first.
753 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
754 LE = MBB->livein_end(); LI != LE; ++LI) {
755 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
756 // Multiple live-ins can alias the same register.
757 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
758 if (!hasInterval(*AS))
759 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
763 // Skip over empty initial indices.
764 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
765 getInstructionFromIndex(MIIndex) == 0)
766 MIIndex += InstrSlots::NUM;
768 for (; MI != miEnd; ++MI) {
769 DOUT << MIIndex << "\t" << *MI;
772 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
773 MachineOperand &MO = MI->getOperand(i);
774 // handle register defs - build intervals
775 if (MO.isReg() && MO.getReg() && MO.isDef()) {
776 handleRegisterDef(MBB, MI, MIIndex, MO, i);
780 // Skip over the empty slots after each instruction.
781 unsigned Slots = MI->getDesc().getNumDefs();
784 MIIndex += InstrSlots::NUM * Slots;
786 // Skip over empty indices.
787 while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
788 getInstructionFromIndex(MIIndex) == 0)
789 MIIndex += InstrSlots::NUM;
794 bool LiveIntervals::findLiveInMBBs(unsigned Start, unsigned End,
795 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
796 std::vector<IdxMBBPair>::const_iterator I =
797 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
800 while (I != Idx2MBBMap.end()) {
803 MBBs.push_back(I->second);
810 bool LiveIntervals::findReachableMBBs(unsigned Start, unsigned End,
811 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
812 std::vector<IdxMBBPair>::const_iterator I =
813 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
816 while (I != Idx2MBBMap.end()) {
819 MachineBasicBlock *MBB = I->second;
820 if (getMBBEndIdx(MBB) > End)
822 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
823 SE = MBB->succ_end(); SI != SE; ++SI)
831 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
832 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
833 return new LiveInterval(reg, Weight);
836 /// dupInterval - Duplicate a live interval. The caller is responsible for
837 /// managing the allocated memory.
838 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
839 LiveInterval *NewLI = createInterval(li->reg);
840 NewLI->Copy(*li, getVNInfoAllocator());
844 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
845 /// copy field and returns the source register that defines it.
846 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
850 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
851 // If it's extracting out of a physical register, return the sub-register.
852 unsigned Reg = VNI->copy->getOperand(1).getReg();
853 if (TargetRegisterInfo::isPhysicalRegister(Reg))
854 Reg = tri_->getSubReg(Reg, VNI->copy->getOperand(2).getImm());
856 } else if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
857 return VNI->copy->getOperand(2).getReg();
859 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
860 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg, SrcSubReg, DstSubReg))
862 assert(0 && "Unrecognized copy instruction!");
866 //===----------------------------------------------------------------------===//
867 // Register allocator hooks.
870 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
871 /// allow one) virtual register operand, then its uses are implicitly using
872 /// the register. Returns the virtual register.
873 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
874 MachineInstr *MI) const {
876 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
877 MachineOperand &MO = MI->getOperand(i);
878 if (!MO.isReg() || !MO.isUse())
880 unsigned Reg = MO.getReg();
881 if (Reg == 0 || Reg == li.reg)
883 // FIXME: For now, only remat MI with at most one register operand.
885 "Can't rematerialize instruction with multiple register operand!");
894 /// isValNoAvailableAt - Return true if the val# of the specified interval
895 /// which reaches the given instruction also reaches the specified use index.
896 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
897 unsigned UseIdx) const {
898 unsigned Index = getInstructionIndex(MI);
899 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
900 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
901 return UI != li.end() && UI->valno == ValNo;
904 /// isReMaterializable - Returns true if the definition MI of the specified
905 /// val# of the specified interval is re-materializable.
906 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
907 const VNInfo *ValNo, MachineInstr *MI,
908 SmallVectorImpl<LiveInterval*> &SpillIs,
913 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
917 if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
918 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
919 // FIXME: Let target specific isReallyTriviallyReMaterializable determines
920 // this but remember this is not safe to fold into a two-address
922 // This is a load from fixed stack slot. It can be rematerialized.
925 // If the target-specific rules don't identify an instruction as
926 // being trivially rematerializable, use some target-independent
928 if (!MI->getDesc().isRematerializable() ||
929 !tii_->isTriviallyReMaterializable(MI)) {
930 if (!EnableAggressiveRemat)
933 // If the instruction accesses memory but the memoperands have been lost,
934 // we can't analyze it.
935 const TargetInstrDesc &TID = MI->getDesc();
936 if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
939 // Avoid instructions obviously unsafe for remat.
940 if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
943 // If the instruction accesses memory and the memory could be non-constant,
944 // assume the instruction is not rematerializable.
945 for (std::list<MachineMemOperand>::const_iterator
946 I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
947 const MachineMemOperand &MMO = *I;
948 if (MMO.isVolatile() || MMO.isStore())
950 const Value *V = MMO.getValue();
953 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
954 if (!PSV->isConstant(mf_->getFrameInfo()))
956 } else if (!aa_->pointsToConstantMemory(V))
960 // If any of the registers accessed are non-constant, conservatively assume
961 // the instruction is not rematerializable.
963 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
964 const MachineOperand &MO = MI->getOperand(i);
966 unsigned Reg = MO.getReg();
969 if (TargetRegisterInfo::isPhysicalRegister(Reg))
972 // Only allow one def, and that in the first operand.
973 if (MO.isDef() != (i == 0))
976 // Only allow constant-valued registers.
977 bool IsLiveIn = mri_->isLiveIn(Reg);
978 MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
981 // For the def, it should be the only def of that register.
982 if (MO.isDef() && (next(I) != E || IsLiveIn))
986 // Only allow one use other register use, as that's all the
987 // remat mechanisms support currently.
991 else if (Reg != ImpUse)
994 // For the use, there should be only one associated def.
995 if (I != E && (next(I) != E || IsLiveIn))
1002 unsigned ImpUse = getReMatImplicitUse(li, MI);
1004 const LiveInterval &ImpLi = getInterval(ImpUse);
1005 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
1006 re = mri_->use_end(); ri != re; ++ri) {
1007 MachineInstr *UseMI = &*ri;
1008 unsigned UseIdx = getInstructionIndex(UseMI);
1009 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
1011 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
1015 // If a register operand of the re-materialized instruction is going to
1016 // be spilled next, then it's not legal to re-materialize this instruction.
1017 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
1018 if (ImpUse == SpillIs[i]->reg)
1024 /// isReMaterializable - Returns true if the definition MI of the specified
1025 /// val# of the specified interval is re-materializable.
1026 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1027 const VNInfo *ValNo, MachineInstr *MI) {
1028 SmallVector<LiveInterval*, 4> Dummy1;
1030 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
1033 /// isReMaterializable - Returns true if every definition of MI of every
1034 /// val# of the specified interval is re-materializable.
1035 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
1036 SmallVectorImpl<LiveInterval*> &SpillIs,
1039 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1041 const VNInfo *VNI = *i;
1042 unsigned DefIdx = VNI->def;
1044 continue; // Dead val#.
1045 // Is the def for the val# rematerializable?
1048 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
1049 bool DefIsLoad = false;
1051 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
1053 isLoad |= DefIsLoad;
1058 /// FilterFoldedOps - Filter out two-address use operands. Return
1059 /// true if it finds any issue with the operands that ought to prevent
1061 static bool FilterFoldedOps(MachineInstr *MI,
1062 SmallVector<unsigned, 2> &Ops,
1064 SmallVector<unsigned, 2> &FoldOps) {
1065 const TargetInstrDesc &TID = MI->getDesc();
1068 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1069 unsigned OpIdx = Ops[i];
1070 MachineOperand &MO = MI->getOperand(OpIdx);
1071 // FIXME: fold subreg use.
1075 MRInfo |= (unsigned)VirtRegMap::isMod;
1077 // Filter out two-address use operand(s).
1078 if (!MO.isImplicit() &&
1079 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
1080 MRInfo = VirtRegMap::isModRef;
1083 MRInfo |= (unsigned)VirtRegMap::isRef;
1085 FoldOps.push_back(OpIdx);
1091 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1092 /// slot / to reg or any rematerialized load into ith operand of specified
1093 /// MI. If it is successul, MI is updated with the newly created MI and
1095 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1096 VirtRegMap &vrm, MachineInstr *DefMI,
1098 SmallVector<unsigned, 2> &Ops,
1099 bool isSS, int Slot, unsigned Reg) {
1100 // If it is an implicit def instruction, just delete it.
1101 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1102 RemoveMachineInstrFromMaps(MI);
1103 vrm.RemoveMachineInstrFromMaps(MI);
1104 MI->eraseFromParent();
1109 // Filter the list of operand indexes that are to be folded. Abort if
1110 // any operand will prevent folding.
1111 unsigned MRInfo = 0;
1112 SmallVector<unsigned, 2> FoldOps;
1113 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1116 // The only time it's safe to fold into a two address instruction is when
1117 // it's folding reload and spill from / into a spill stack slot.
1118 if (DefMI && (MRInfo & VirtRegMap::isMod))
1121 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1122 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1124 // Remember this instruction uses the spill slot.
1125 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1127 // Attempt to fold the memory reference into the instruction. If
1128 // we can do this, we don't need to insert spill code.
1129 MachineBasicBlock &MBB = *MI->getParent();
1130 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1131 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1132 vrm.transferSpillPts(MI, fmi);
1133 vrm.transferRestorePts(MI, fmi);
1134 vrm.transferEmergencySpills(MI, fmi);
1136 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1137 mi2iMap_[fmi] = InstrIdx;
1138 MI = MBB.insert(MBB.erase(MI), fmi);
1145 /// canFoldMemoryOperand - Returns true if the specified load / store
1146 /// folding is possible.
1147 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1148 SmallVector<unsigned, 2> &Ops,
1150 // Filter the list of operand indexes that are to be folded. Abort if
1151 // any operand will prevent folding.
1152 unsigned MRInfo = 0;
1153 SmallVector<unsigned, 2> FoldOps;
1154 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1157 // It's only legal to remat for a use, not a def.
1158 if (ReMat && (MRInfo & VirtRegMap::isMod))
1161 return tii_->canFoldMemoryOperand(MI, FoldOps);
1164 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1165 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1166 for (LiveInterval::Ranges::const_iterator
1167 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1168 std::vector<IdxMBBPair>::const_iterator II =
1169 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1170 if (II == Idx2MBBMap.end())
1172 if (I->end > II->first) // crossing a MBB.
1174 MBBs.insert(II->second);
1175 if (MBBs.size() > 1)
1181 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1182 /// interval on to-be re-materialized operands of MI) with new register.
1183 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1184 MachineInstr *MI, unsigned NewVReg,
1186 // There is an implicit use. That means one of the other operand is
1187 // being remat'ed and the remat'ed instruction has li.reg as an
1188 // use operand. Make sure we rewrite that as well.
1189 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1190 MachineOperand &MO = MI->getOperand(i);
1193 unsigned Reg = MO.getReg();
1194 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1196 if (!vrm.isReMaterialized(Reg))
1198 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1199 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1201 UseMO->setReg(NewVReg);
1205 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1206 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1207 bool LiveIntervals::
1208 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1209 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
1210 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1211 unsigned Slot, int LdSlot,
1212 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1214 const TargetRegisterClass* rc,
1215 SmallVector<int, 4> &ReMatIds,
1216 const MachineLoopInfo *loopInfo,
1217 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1218 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1219 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1220 MachineBasicBlock *MBB = MI->getParent();
1221 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1222 bool CanFold = false;
1224 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1225 MachineOperand& mop = MI->getOperand(i);
1228 unsigned Reg = mop.getReg();
1229 unsigned RegI = Reg;
1230 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1235 bool TryFold = !DefIsReMat;
1236 bool FoldSS = true; // Default behavior unless it's a remat.
1237 int FoldSlot = Slot;
1239 // If this is the rematerializable definition MI itself and
1240 // all of its uses are rematerialized, simply delete it.
1241 if (MI == ReMatOrigDefMI && CanDelete) {
1242 DOUT << "\t\t\t\tErasing re-materlizable def: ";
1244 RemoveMachineInstrFromMaps(MI);
1245 vrm.RemoveMachineInstrFromMaps(MI);
1246 MI->eraseFromParent();
1250 // If def for this use can't be rematerialized, then try folding.
1251 // If def is rematerializable and it's a load, also try folding.
1252 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1254 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1260 // Scan all of the operands of this instruction rewriting operands
1261 // to use NewVReg instead of li.reg as appropriate. We do this for
1264 // 1. If the instr reads the same spilled vreg multiple times, we
1265 // want to reuse the NewVReg.
1266 // 2. If the instr is a two-addr instruction, we are required to
1267 // keep the src/dst regs pinned.
1269 // Keep track of whether we replace a use and/or def so that we can
1270 // create the spill interval with the appropriate range.
1272 HasUse = mop.isUse();
1273 HasDef = mop.isDef();
1274 SmallVector<unsigned, 2> Ops;
1276 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1277 const MachineOperand &MOj = MI->getOperand(j);
1280 unsigned RegJ = MOj.getReg();
1281 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1285 HasUse |= MOj.isUse();
1286 HasDef |= MOj.isDef();
1290 if (HasUse && !li.liveAt(getUseIndex(index)))
1291 // Must be defined by an implicit def. It should not be spilled. Note,
1292 // this is for correctness reason. e.g.
1293 // 8 %reg1024<def> = IMPLICIT_DEF
1294 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1295 // The live range [12, 14) are not part of the r1024 live interval since
1296 // it's defined by an implicit def. It will not conflicts with live
1297 // interval of r1025. Now suppose both registers are spilled, you can
1298 // easily see a situation where both registers are reloaded before
1299 // the INSERT_SUBREG and both target registers that would overlap.
1302 // Update stack slot spill weight if we are splitting.
1303 float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1307 // Create a new virtual register for the spill interval.
1308 // Create the new register now so we can map the fold instruction
1309 // to the new register so when it is unfolded we get the correct
1311 bool CreatedNewVReg = false;
1313 NewVReg = mri_->createVirtualRegister(rc);
1315 CreatedNewVReg = true;
1321 // Do not fold load / store here if we are splitting. We'll find an
1322 // optimal point to insert a load / store later.
1324 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1325 Ops, FoldSS, FoldSlot, NewVReg)) {
1326 // Folding the load/store can completely change the instruction in
1327 // unpredictable ways, rescan it from the beginning.
1330 // We need to give the new vreg the same stack slot as the
1331 // spilled interval.
1332 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1338 if (isRemoved(MI)) {
1342 goto RestartInstruction;
1345 // We'll try to fold it later if it's profitable.
1346 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1350 mop.setReg(NewVReg);
1351 if (mop.isImplicit())
1352 rewriteImplicitOps(li, MI, NewVReg, vrm);
1354 // Reuse NewVReg for other reads.
1355 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1356 MachineOperand &mopj = MI->getOperand(Ops[j]);
1357 mopj.setReg(NewVReg);
1358 if (mopj.isImplicit())
1359 rewriteImplicitOps(li, MI, NewVReg, vrm);
1362 if (CreatedNewVReg) {
1364 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1365 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1366 // Each valnum may have its own remat id.
1367 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1369 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1371 if (!CanDelete || (HasUse && HasDef)) {
1372 // If this is a two-addr instruction then its use operands are
1373 // rematerializable but its def is not. It should be assigned a
1375 vrm.assignVirt2StackSlot(NewVReg, Slot);
1378 vrm.assignVirt2StackSlot(NewVReg, Slot);
1380 } else if (HasUse && HasDef &&
1381 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1382 // If this interval hasn't been assigned a stack slot (because earlier
1383 // def is a deleted remat def), do it now.
1384 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1385 vrm.assignVirt2StackSlot(NewVReg, Slot);
1388 // Re-matting an instruction with virtual register use. Add the
1389 // register as an implicit use on the use MI.
1390 if (DefIsReMat && ImpUse)
1391 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1393 // create a new register interval for this spill / remat.
1394 LiveInterval &nI = getOrCreateInterval(NewVReg);
1395 if (CreatedNewVReg) {
1396 NewLIs.push_back(&nI);
1397 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1399 vrm.setIsSplitFromReg(NewVReg, li.reg);
1403 if (CreatedNewVReg) {
1404 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1405 nI.getNextValue(~0U, 0, VNInfoAllocator));
1409 // Extend the split live interval to this def / use.
1410 unsigned End = getUseIndex(index)+1;
1411 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1412 nI.getValNumInfo(nI.getNumValNums()-1));
1418 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1419 nI.getNextValue(~0U, 0, VNInfoAllocator));
1424 DOUT << "\t\t\t\tAdded new interval: ";
1425 nI.print(DOUT, tri_);
1430 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1432 MachineBasicBlock *MBB, unsigned Idx) const {
1433 unsigned End = getMBBEndIdx(MBB);
1434 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1435 unsigned KillIdx = VNI->kills[j];
1436 if (KillIdx > Idx && KillIdx < End)
1442 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1443 /// during spilling.
1445 struct RewriteInfo {
1450 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1451 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1454 struct RewriteInfoCompare {
1455 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1456 return LHS.Index < RHS.Index;
1461 void LiveIntervals::
1462 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1463 LiveInterval::Ranges::const_iterator &I,
1464 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1465 unsigned Slot, int LdSlot,
1466 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1468 const TargetRegisterClass* rc,
1469 SmallVector<int, 4> &ReMatIds,
1470 const MachineLoopInfo *loopInfo,
1471 BitVector &SpillMBBs,
1472 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1473 BitVector &RestoreMBBs,
1474 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1475 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1476 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1477 bool AllCanFold = true;
1478 unsigned NewVReg = 0;
1479 unsigned start = getBaseIndex(I->start);
1480 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1482 // First collect all the def / use in this live range that will be rewritten.
1483 // Make sure they are sorted according to instruction index.
1484 std::vector<RewriteInfo> RewriteMIs;
1485 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1486 re = mri_->reg_end(); ri != re; ) {
1487 MachineInstr *MI = &*ri;
1488 MachineOperand &O = ri.getOperand();
1490 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1491 unsigned index = getInstructionIndex(MI);
1492 if (index < start || index >= end)
1494 if (O.isUse() && !li.liveAt(getUseIndex(index)))
1495 // Must be defined by an implicit def. It should not be spilled. Note,
1496 // this is for correctness reason. e.g.
1497 // 8 %reg1024<def> = IMPLICIT_DEF
1498 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1499 // The live range [12, 14) are not part of the r1024 live interval since
1500 // it's defined by an implicit def. It will not conflicts with live
1501 // interval of r1025. Now suppose both registers are spilled, you can
1502 // easily see a situation where both registers are reloaded before
1503 // the INSERT_SUBREG and both target registers that would overlap.
1505 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1507 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1509 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1510 // Now rewrite the defs and uses.
1511 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1512 RewriteInfo &rwi = RewriteMIs[i];
1514 unsigned index = rwi.Index;
1515 bool MIHasUse = rwi.HasUse;
1516 bool MIHasDef = rwi.HasDef;
1517 MachineInstr *MI = rwi.MI;
1518 // If MI def and/or use the same register multiple times, then there
1519 // are multiple entries.
1520 unsigned NumUses = MIHasUse;
1521 while (i != e && RewriteMIs[i].MI == MI) {
1522 assert(RewriteMIs[i].Index == index);
1523 bool isUse = RewriteMIs[i].HasUse;
1524 if (isUse) ++NumUses;
1526 MIHasDef |= RewriteMIs[i].HasDef;
1529 MachineBasicBlock *MBB = MI->getParent();
1531 if (ImpUse && MI != ReMatDefMI) {
1532 // Re-matting an instruction with virtual register use. Update the
1533 // register interval's spill weight to HUGE_VALF to prevent it from
1535 LiveInterval &ImpLi = getInterval(ImpUse);
1536 ImpLi.weight = HUGE_VALF;
1539 unsigned MBBId = MBB->getNumber();
1540 unsigned ThisVReg = 0;
1542 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1543 if (NVI != MBBVRegsMap.end()) {
1544 ThisVReg = NVI->second;
1551 // It's better to start a new interval to avoid artifically
1552 // extend the new interval.
1553 if (MIHasDef && !MIHasUse) {
1554 MBBVRegsMap.erase(MBB->getNumber());
1560 bool IsNew = ThisVReg == 0;
1562 // This ends the previous live interval. If all of its def / use
1563 // can be folded, give it a low spill weight.
1564 if (NewVReg && TrySplit && AllCanFold) {
1565 LiveInterval &nI = getOrCreateInterval(NewVReg);
1572 bool HasDef = false;
1573 bool HasUse = false;
1574 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1575 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1576 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1577 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1578 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1579 if (!HasDef && !HasUse)
1582 AllCanFold &= CanFold;
1584 // Update weight of spill interval.
1585 LiveInterval &nI = getOrCreateInterval(NewVReg);
1587 // The spill weight is now infinity as it cannot be spilled again.
1588 nI.weight = HUGE_VALF;
1592 // Keep track of the last def and first use in each MBB.
1594 if (MI != ReMatOrigDefMI || !CanDelete) {
1595 bool HasKill = false;
1597 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1599 // If this is a two-address code, then this index starts a new VNInfo.
1600 const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1602 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1604 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1605 SpillIdxes.find(MBBId);
1607 if (SII == SpillIdxes.end()) {
1608 std::vector<SRInfo> S;
1609 S.push_back(SRInfo(index, NewVReg, true));
1610 SpillIdxes.insert(std::make_pair(MBBId, S));
1611 } else if (SII->second.back().vreg != NewVReg) {
1612 SII->second.push_back(SRInfo(index, NewVReg, true));
1613 } else if ((int)index > SII->second.back().index) {
1614 // If there is an earlier def and this is a two-address
1615 // instruction, then it's not possible to fold the store (which
1616 // would also fold the load).
1617 SRInfo &Info = SII->second.back();
1619 Info.canFold = !HasUse;
1621 SpillMBBs.set(MBBId);
1622 } else if (SII != SpillIdxes.end() &&
1623 SII->second.back().vreg == NewVReg &&
1624 (int)index > SII->second.back().index) {
1625 // There is an earlier def that's not killed (must be two-address).
1626 // The spill is no longer needed.
1627 SII->second.pop_back();
1628 if (SII->second.empty()) {
1629 SpillIdxes.erase(MBBId);
1630 SpillMBBs.reset(MBBId);
1637 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1638 SpillIdxes.find(MBBId);
1639 if (SII != SpillIdxes.end() &&
1640 SII->second.back().vreg == NewVReg &&
1641 (int)index > SII->second.back().index)
1642 // Use(s) following the last def, it's not safe to fold the spill.
1643 SII->second.back().canFold = false;
1644 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1645 RestoreIdxes.find(MBBId);
1646 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1647 // If we are splitting live intervals, only fold if it's the first
1648 // use and there isn't another use later in the MBB.
1649 RII->second.back().canFold = false;
1651 // Only need a reload if there isn't an earlier def / use.
1652 if (RII == RestoreIdxes.end()) {
1653 std::vector<SRInfo> Infos;
1654 Infos.push_back(SRInfo(index, NewVReg, true));
1655 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1657 RII->second.push_back(SRInfo(index, NewVReg, true));
1659 RestoreMBBs.set(MBBId);
1663 // Update spill weight.
1664 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1665 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1668 if (NewVReg && TrySplit && AllCanFold) {
1669 // If all of its def / use can be folded, give it a low spill weight.
1670 LiveInterval &nI = getOrCreateInterval(NewVReg);
1675 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1676 BitVector &RestoreMBBs,
1677 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1678 if (!RestoreMBBs[Id])
1680 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1681 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1682 if (Restores[i].index == index &&
1683 Restores[i].vreg == vr &&
1684 Restores[i].canFold)
1689 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1690 BitVector &RestoreMBBs,
1691 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1692 if (!RestoreMBBs[Id])
1694 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1695 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1696 if (Restores[i].index == index && Restores[i].vreg)
1697 Restores[i].index = -1;
1700 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1701 /// spilled and create empty intervals for their uses.
1703 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1704 const TargetRegisterClass* rc,
1705 std::vector<LiveInterval*> &NewLIs) {
1706 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1707 re = mri_->reg_end(); ri != re; ) {
1708 MachineOperand &O = ri.getOperand();
1709 MachineInstr *MI = &*ri;
1712 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1713 "Register def was not rewritten?");
1714 RemoveMachineInstrFromMaps(MI);
1715 vrm.RemoveMachineInstrFromMaps(MI);
1716 MI->eraseFromParent();
1718 // This must be an use of an implicit_def so it's not part of the live
1719 // interval. Create a new empty live interval for it.
1720 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1721 unsigned NewVReg = mri_->createVirtualRegister(rc);
1723 vrm.setIsImplicitlyDefined(NewVReg);
1724 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1725 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1726 MachineOperand &MO = MI->getOperand(i);
1727 if (MO.isReg() && MO.getReg() == li.reg)
1736 bool operator()(LiveInterval* A, LiveInterval* B) {
1737 return A->beginNumber() < B->beginNumber();
1742 std::vector<LiveInterval*> LiveIntervals::
1743 addIntervalsForSpillsFast(const LiveInterval &li,
1744 const MachineLoopInfo *loopInfo,
1745 VirtRegMap &vrm, float& SSWeight) {
1746 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1748 std::vector<LiveInterval*> added;
1750 assert(li.weight != HUGE_VALF &&
1751 "attempt to spill already spilled interval!");
1753 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1757 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1761 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1762 while (RI != mri_->reg_end()) {
1763 MachineInstr* MI = &*RI;
1765 SmallVector<unsigned, 2> Indices;
1766 bool HasUse = false;
1767 bool HasDef = false;
1769 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1770 MachineOperand& mop = MI->getOperand(i);
1771 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1773 HasUse |= MI->getOperand(i).isUse();
1774 HasDef |= MI->getOperand(i).isDef();
1776 Indices.push_back(i);
1779 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1780 Indices, true, slot, li.reg)) {
1781 unsigned NewVReg = mri_->createVirtualRegister(rc);
1783 vrm.assignVirt2StackSlot(NewVReg, slot);
1785 // create a new register for this spill
1786 LiveInterval &nI = getOrCreateInterval(NewVReg);
1788 // the spill weight is now infinity as it
1789 // cannot be spilled again
1790 nI.weight = HUGE_VALF;
1792 // Rewrite register operands to use the new vreg.
1793 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1794 E = Indices.end(); I != E; ++I) {
1795 MI->getOperand(*I).setReg(NewVReg);
1797 if (MI->getOperand(*I).isUse())
1798 MI->getOperand(*I).setIsKill(true);
1801 // Fill in the new live interval.
1802 unsigned index = getInstructionIndex(MI);
1804 LiveRange LR(getLoadIndex(index), getUseIndex(index),
1805 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1808 vrm.addRestorePoint(NewVReg, MI);
1811 LiveRange LR(getDefIndex(index), getStoreIndex(index),
1812 nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1815 vrm.addSpillPoint(NewVReg, true, MI);
1818 added.push_back(&nI);
1820 DOUT << "\t\t\t\tadded new interval: ";
1824 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1827 SSWeight += getSpillWeight(true, true, loopDepth);
1829 SSWeight += getSpillWeight(false, true, loopDepth);
1831 SSWeight += getSpillWeight(true, false, loopDepth);
1835 RI = mri_->reg_begin(li.reg);
1838 // Clients expect the new intervals to be returned in sorted order.
1839 std::sort(added.begin(), added.end(), LISorter());
1844 std::vector<LiveInterval*> LiveIntervals::
1845 addIntervalsForSpills(const LiveInterval &li,
1846 SmallVectorImpl<LiveInterval*> &SpillIs,
1847 const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1850 if (EnableFastSpilling)
1851 return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1853 assert(li.weight != HUGE_VALF &&
1854 "attempt to spill already spilled interval!");
1856 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1857 li.print(DOUT, tri_);
1860 // Spill slot weight.
1863 // Each bit specify whether a spill is required in the MBB.
1864 BitVector SpillMBBs(mf_->getNumBlockIDs());
1865 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1866 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1867 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1868 DenseMap<unsigned,unsigned> MBBVRegsMap;
1869 std::vector<LiveInterval*> NewLIs;
1870 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1872 unsigned NumValNums = li.getNumValNums();
1873 SmallVector<MachineInstr*, 4> ReMatDefs;
1874 ReMatDefs.resize(NumValNums, NULL);
1875 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1876 ReMatOrigDefs.resize(NumValNums, NULL);
1877 SmallVector<int, 4> ReMatIds;
1878 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1879 BitVector ReMatDelete(NumValNums);
1880 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1882 // Spilling a split live interval. It cannot be split any further. Also,
1883 // it's also guaranteed to be a single val# / range interval.
1884 if (vrm.getPreSplitReg(li.reg)) {
1885 vrm.setIsSplitFromReg(li.reg, 0);
1886 // Unset the split kill marker on the last use.
1887 unsigned KillIdx = vrm.getKillPoint(li.reg);
1889 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1890 assert(KillMI && "Last use disappeared?");
1891 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1892 assert(KillOp != -1 && "Last use disappeared?");
1893 KillMI->getOperand(KillOp).setIsKill(false);
1895 vrm.removeKillPoint(li.reg);
1896 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1897 Slot = vrm.getStackSlot(li.reg);
1898 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1899 MachineInstr *ReMatDefMI = DefIsReMat ?
1900 vrm.getReMaterializedMI(li.reg) : NULL;
1902 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1903 bool isLoad = isLoadSS ||
1904 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1905 bool IsFirstRange = true;
1906 for (LiveInterval::Ranges::const_iterator
1907 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1908 // If this is a split live interval with multiple ranges, it means there
1909 // are two-address instructions that re-defined the value. Only the
1910 // first def can be rematerialized!
1912 // Note ReMatOrigDefMI has already been deleted.
1913 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1914 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1915 false, vrm, rc, ReMatIds, loopInfo,
1916 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1917 MBBVRegsMap, NewLIs, SSWeight);
1919 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1920 Slot, 0, false, false, false,
1921 false, vrm, rc, ReMatIds, loopInfo,
1922 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1923 MBBVRegsMap, NewLIs, SSWeight);
1925 IsFirstRange = false;
1928 SSWeight = 0.0f; // Already accounted for when split.
1929 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1933 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1934 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1938 bool NeedStackSlot = false;
1939 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1941 const VNInfo *VNI = *i;
1942 unsigned VN = VNI->id;
1943 unsigned DefIdx = VNI->def;
1945 continue; // Dead val#.
1946 // Is the def for the val# rematerializable?
1947 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1948 ? 0 : getInstructionFromIndex(DefIdx);
1950 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1951 // Remember how to remat the def of this val#.
1952 ReMatOrigDefs[VN] = ReMatDefMI;
1953 // Original def may be modified so we have to make a copy here.
1954 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1955 ClonedMIs.push_back(Clone);
1956 ReMatDefs[VN] = Clone;
1958 bool CanDelete = true;
1959 if (VNI->hasPHIKill) {
1960 // A kill is a phi node, not all of its uses can be rematerialized.
1961 // It must not be deleted.
1963 // Need a stack slot if there is any live range where uses cannot be
1965 NeedStackSlot = true;
1968 ReMatDelete.set(VN);
1970 // Need a stack slot if there is any live range where uses cannot be
1972 NeedStackSlot = true;
1976 // One stack slot per live interval.
1977 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1978 Slot = vrm.assignVirt2StackSlot(li.reg);
1980 // Create new intervals and rewrite defs and uses.
1981 for (LiveInterval::Ranges::const_iterator
1982 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1983 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1984 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1985 bool DefIsReMat = ReMatDefMI != NULL;
1986 bool CanDelete = ReMatDelete[I->valno->id];
1988 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1989 bool isLoad = isLoadSS ||
1990 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1991 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1992 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1993 CanDelete, vrm, rc, ReMatIds, loopInfo,
1994 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1995 MBBVRegsMap, NewLIs, SSWeight);
1998 // Insert spills / restores if we are splitting.
2000 handleSpilledImpDefs(li, vrm, rc, NewLIs);
2004 SmallPtrSet<LiveInterval*, 4> AddedKill;
2005 SmallVector<unsigned, 2> Ops;
2006 if (NeedStackSlot) {
2007 int Id = SpillMBBs.find_first();
2009 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2010 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2011 std::vector<SRInfo> &spills = SpillIdxes[Id];
2012 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
2013 int index = spills[i].index;
2014 unsigned VReg = spills[i].vreg;
2015 LiveInterval &nI = getOrCreateInterval(VReg);
2016 bool isReMat = vrm.isReMaterialized(VReg);
2017 MachineInstr *MI = getInstructionFromIndex(index);
2018 bool CanFold = false;
2019 bool FoundUse = false;
2021 if (spills[i].canFold) {
2023 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2024 MachineOperand &MO = MI->getOperand(j);
2025 if (!MO.isReg() || MO.getReg() != VReg)
2032 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
2033 RestoreMBBs, RestoreIdxes))) {
2034 // MI has two-address uses of the same register. If the use
2035 // isn't the first and only use in the BB, then we can't fold
2036 // it. FIXME: Move this to rewriteInstructionsForSpills.
2043 // Fold the store into the def if possible.
2044 bool Folded = false;
2045 if (CanFold && !Ops.empty()) {
2046 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
2049 // Also folded uses, do not issue a load.
2050 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
2051 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2053 nI.removeRange(getDefIndex(index), getStoreIndex(index));
2057 // Otherwise tell the spiller to issue a spill.
2059 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
2060 bool isKill = LR->end == getStoreIndex(index);
2061 if (!MI->registerDefIsDead(nI.reg))
2062 // No need to spill a dead def.
2063 vrm.addSpillPoint(VReg, isKill, MI);
2065 AddedKill.insert(&nI);
2068 // Update spill slot weight.
2070 SSWeight += getSpillWeight(true, false, loopDepth);
2072 Id = SpillMBBs.find_next(Id);
2076 int Id = RestoreMBBs.find_first();
2078 MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
2079 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
2081 std::vector<SRInfo> &restores = RestoreIdxes[Id];
2082 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
2083 int index = restores[i].index;
2086 unsigned VReg = restores[i].vreg;
2087 LiveInterval &nI = getOrCreateInterval(VReg);
2088 bool isReMat = vrm.isReMaterialized(VReg);
2089 MachineInstr *MI = getInstructionFromIndex(index);
2090 bool CanFold = false;
2092 if (restores[i].canFold) {
2094 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2095 MachineOperand &MO = MI->getOperand(j);
2096 if (!MO.isReg() || MO.getReg() != VReg)
2100 // If this restore were to be folded, it would have been folded
2109 // Fold the load into the use if possible.
2110 bool Folded = false;
2111 if (CanFold && !Ops.empty()) {
2113 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2115 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2117 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2118 // If the rematerializable def is a load, also try to fold it.
2119 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
2120 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2121 Ops, isLoadSS, LdSlot, VReg);
2123 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2125 // Re-matting an instruction with virtual register use. Add the
2126 // register as an implicit use on the use MI and update the register
2127 // interval's spill weight to HUGE_VALF to prevent it from being
2129 LiveInterval &ImpLi = getInterval(ImpUse);
2130 ImpLi.weight = HUGE_VALF;
2131 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2136 // If folding is not possible / failed, then tell the spiller to issue a
2137 // load / rematerialization for us.
2139 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2141 vrm.addRestorePoint(VReg, MI);
2143 // Update spill slot weight.
2145 SSWeight += getSpillWeight(false, true, loopDepth);
2147 Id = RestoreMBBs.find_next(Id);
2150 // Finalize intervals: add kills, finalize spill weights, and filter out
2152 std::vector<LiveInterval*> RetNewLIs;
2153 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2154 LiveInterval *LI = NewLIs[i];
2156 LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2157 if (!AddedKill.count(LI)) {
2158 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2159 unsigned LastUseIdx = getBaseIndex(LR->end);
2160 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2161 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2162 assert(UseIdx != -1);
2163 if (LastUse->getOperand(UseIdx).isImplicit() ||
2164 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
2165 LastUse->getOperand(UseIdx).setIsKill();
2166 vrm.addKillPoint(LI->reg, LastUseIdx);
2169 RetNewLIs.push_back(LI);
2173 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2177 /// hasAllocatableSuperReg - Return true if the specified physical register has
2178 /// any super register that's allocatable.
2179 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2180 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2181 if (allocatableRegs_[*AS] && hasInterval(*AS))
2186 /// getRepresentativeReg - Find the largest super register of the specified
2187 /// physical register.
2188 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2189 // Find the largest super-register that is allocatable.
2190 unsigned BestReg = Reg;
2191 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2192 unsigned SuperReg = *AS;
2193 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2201 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2202 /// specified interval that conflicts with the specified physical register.
2203 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2204 unsigned PhysReg) const {
2205 unsigned NumConflicts = 0;
2206 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2207 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2208 E = mri_->reg_end(); I != E; ++I) {
2209 MachineOperand &O = I.getOperand();
2210 MachineInstr *MI = O.getParent();
2211 unsigned Index = getInstructionIndex(MI);
2212 if (pli.liveAt(Index))
2215 return NumConflicts;
2218 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2219 /// around all defs and uses of the specified interval.
2220 void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2221 unsigned PhysReg, VirtRegMap &vrm) {
2222 unsigned SpillReg = getRepresentativeReg(PhysReg);
2224 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2225 // If there are registers which alias PhysReg, but which are not a
2226 // sub-register of the chosen representative super register. Assert
2227 // since we can't handle it yet.
2228 assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2229 tri_->isSuperRegister(*AS, SpillReg));
2231 LiveInterval &pli = getInterval(SpillReg);
2232 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2233 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2234 E = mri_->reg_end(); I != E; ++I) {
2235 MachineOperand &O = I.getOperand();
2236 MachineInstr *MI = O.getParent();
2237 if (SeenMIs.count(MI))
2240 unsigned Index = getInstructionIndex(MI);
2241 if (pli.liveAt(Index)) {
2242 vrm.addEmergencySpill(SpillReg, MI);
2243 unsigned StartIdx = getLoadIndex(Index);
2244 unsigned EndIdx = getStoreIndex(Index)+1;
2245 if (pli.isInOneLiveRange(StartIdx, EndIdx))
2246 pli.removeRange(StartIdx, EndIdx);
2248 cerr << "Ran out of registers during register allocation!\n";
2249 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2250 cerr << "Please check your inline asm statement for invalid "
2251 << "constraints:\n";
2252 MI->print(cerr.stream(), tm_);
2256 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2257 if (!hasInterval(*AS))
2259 LiveInterval &spli = getInterval(*AS);
2260 if (spli.liveAt(Index))
2261 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2267 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2268 MachineInstr* startInst) {
2269 LiveInterval& Interval = getOrCreateInterval(reg);
2270 VNInfo* VN = Interval.getNextValue(
2271 getInstructionIndex(startInst) + InstrSlots::DEF,
2272 startInst, getVNInfoAllocator());
2273 VN->hasPHIKill = true;
2274 VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2275 LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2276 getMBBEndIdx(startInst->getParent()) + 1, VN);
2277 Interval.addRange(LR);