1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.Reset();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
117 numIntervals += getNumIntervals();
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 OS << getInstructionIndex(mii) << '\t' << *mii;
148 void LiveIntervals::dumpInstrs() const {
152 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
153 VirtRegMap &vrm, unsigned reg) {
154 // We don't handle fancy stuff crossing basic block boundaries
155 if (li.ranges.size() != 1)
157 const LiveRange &range = li.ranges.front();
158 SlotIndex idx = range.start.getBaseIndex();
159 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
161 // Skip deleted instructions
162 MachineInstr *firstMI = getInstructionFromIndex(idx);
163 while (!firstMI && idx != end) {
164 idx = idx.getNextIndex();
165 firstMI = getInstructionFromIndex(idx);
170 // Find last instruction in range
171 SlotIndex lastIdx = end.getPrevIndex();
172 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
173 while (!lastMI && lastIdx != idx) {
174 lastIdx = lastIdx.getPrevIndex();
175 lastMI = getInstructionFromIndex(lastIdx);
180 // Range cannot cross basic block boundaries or terminators
181 MachineBasicBlock *MBB = firstMI->getParent();
182 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
185 MachineBasicBlock::const_iterator E = lastMI;
187 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
188 const MachineInstr &MI = *I;
190 // Allow copies to and from li.reg
191 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
192 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
193 if (SrcReg == li.reg || DstReg == li.reg)
196 // Check for operands using reg
197 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
198 const MachineOperand& mop = MI.getOperand(i);
201 unsigned PhysReg = mop.getReg();
202 if (PhysReg == 0 || PhysReg == li.reg)
204 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
205 if (!vrm.hasPhys(PhysReg))
207 PhysReg = vrm.getPhys(PhysReg);
209 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
214 // No conflicts found.
218 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
219 /// it can check use as well.
220 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
221 unsigned Reg, bool CheckUse,
222 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
223 for (LiveInterval::Ranges::const_iterator
224 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
225 for (SlotIndex index = I->start.getBaseIndex(),
226 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
228 index = index.getNextIndex()) {
229 MachineInstr *MI = getInstructionFromIndex(index);
231 continue; // skip deleted instructions
233 if (JoinedCopies.count(MI))
235 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
236 MachineOperand& MO = MI->getOperand(i);
239 if (MO.isUse() && !CheckUse)
241 unsigned PhysReg = MO.getReg();
242 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
244 if (tri_->isSubRegister(Reg, PhysReg))
254 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
255 if (TargetRegisterInfo::isPhysicalRegister(reg))
256 errs() << tri_->getName(reg);
258 errs() << "%reg" << reg;
262 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
263 MachineBasicBlock::iterator mi,
267 LiveInterval &interval) {
269 errs() << "\t\tregister: ";
270 printRegName(interval.reg, tri_);
273 // Virtual registers may be defined multiple times (due to phi
274 // elimination and 2-addr elimination). Much of what we do only has to be
275 // done once for the vreg. We use an empty interval to detect the first
276 // time we see a vreg.
277 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
278 if (interval.empty()) {
279 // Get the Idx of the defining instructions.
280 SlotIndex defIndex = MIIdx.getDefIndex();
281 // Earlyclobbers move back one, so that they overlap the live range
283 if (MO.isEarlyClobber())
284 defIndex = MIIdx.getUseIndex();
286 MachineInstr *CopyMI = NULL;
287 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
288 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
289 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
290 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
291 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
293 // Earlyclobbers move back one.
294 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
296 assert(ValNo->id == 0 && "First value in interval is not 0?");
298 // Loop over all of the blocks that the vreg is defined in. There are
299 // two cases we have to handle here. The most common case is a vreg
300 // whose lifetime is contained within a basic block. In this case there
301 // will be a single kill, in MBB, which comes after the definition.
302 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
303 // FIXME: what about dead vars?
305 if (vi.Kills[0] != mi)
306 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
308 killIdx = defIndex.getStoreIndex();
310 // If the kill happens after the definition, we have an intra-block
312 if (killIdx > defIndex) {
313 assert(vi.AliveBlocks.empty() &&
314 "Shouldn't be alive across any blocks!");
315 LiveRange LR(defIndex, killIdx, ValNo);
316 interval.addRange(LR);
317 DEBUG(errs() << " +" << LR << "\n");
318 ValNo->addKill(killIdx);
323 // The other case we handle is when a virtual register lives to the end
324 // of the defining block, potentially live across some blocks, then is
325 // live into some number of blocks, but gets killed. Start by adding a
326 // range that goes from this definition to the end of the defining block.
327 LiveRange NewLR(defIndex, getMBBEndIdx(mbb).getNextIndex().getLoadIndex(),
329 DEBUG(errs() << " +" << NewLR);
330 interval.addRange(NewLR);
332 // Iterate over all of the blocks that the variable is completely
333 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
335 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
336 E = vi.AliveBlocks.end(); I != E; ++I) {
338 getMBBStartIdx(mf_->getBlockNumbered(*I)),
339 getMBBEndIdx(mf_->getBlockNumbered(*I)).getNextIndex().getLoadIndex(),
341 interval.addRange(LR);
342 DEBUG(errs() << " +" << LR);
345 // Finally, this virtual register is live from the start of any killing
346 // block to the 'use' slot of the killing instruction.
347 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
348 MachineInstr *Kill = vi.Kills[i];
350 getInstructionIndex(Kill).getDefIndex();
351 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
352 interval.addRange(LR);
353 ValNo->addKill(killIdx);
354 DEBUG(errs() << " +" << LR);
358 // If this is the second time we see a virtual register definition, it
359 // must be due to phi elimination or two addr elimination. If this is
360 // the result of two address elimination, then the vreg is one of the
361 // def-and-use register operand.
362 if (mi->isRegTiedToUseOperand(MOIdx)) {
363 // If this is a two-address definition, then we have already processed
364 // the live range. The only problem is that we didn't realize there
365 // are actually two values in the live interval. Because of this we
366 // need to take the LiveRegion that defines this register and split it
368 assert(interval.containsOneValue());
369 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
370 SlotIndex RedefIndex = MIIdx.getDefIndex();
371 if (MO.isEarlyClobber())
372 RedefIndex = MIIdx.getUseIndex();
374 const LiveRange *OldLR =
375 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
376 VNInfo *OldValNo = OldLR->valno;
378 // Delete the initial value, which should be short and continuous,
379 // because the 2-addr copy must be in the same MBB as the redef.
380 interval.removeRange(DefIndex, RedefIndex);
382 // Two-address vregs should always only be redefined once. This means
383 // that at this point, there should be exactly one value number in it.
384 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
386 // The new value number (#1) is defined by the instruction we claimed
388 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
389 false, // update at *
391 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
393 // Value#0 is now defined by the 2-addr instruction.
394 OldValNo->def = RedefIndex;
395 OldValNo->setCopy(0);
397 // Add the new live interval which replaces the range for the input copy.
398 LiveRange LR(DefIndex, RedefIndex, ValNo);
399 DEBUG(errs() << " replace range with " << LR);
400 interval.addRange(LR);
401 ValNo->addKill(RedefIndex);
403 // If this redefinition is dead, we need to add a dummy unit live
404 // range covering the def slot.
406 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
410 errs() << " RESULT: ";
411 interval.print(errs(), tri_);
414 // Otherwise, this must be because of phi elimination. If this is the
415 // first redefinition of the vreg that we have seen, go back and change
416 // the live range in the PHI block to be a different value number.
417 if (interval.containsOneValue()) {
418 // Remove the old range that we now know has an incorrect number.
419 VNInfo *VNI = interval.getValNumInfo(0);
420 MachineInstr *Killer = vi.Kills[0];
421 SlotIndex Start = getMBBStartIdx(Killer->getParent());
422 SlotIndex End = getInstructionIndex(Killer).getDefIndex();
424 errs() << " Removing [" << Start << "," << End << "] from: ";
425 interval.print(errs(), tri_);
428 interval.removeRange(Start, End);
429 assert(interval.ranges.size() == 1 &&
430 "Newly discovered PHI interval has >1 ranges.");
431 MachineBasicBlock *killMBB = getMBBFromIndex(VNI->def);
432 VNI->addKill(indexes_->getTerminatorGap(killMBB));
433 VNI->setHasPHIKill(true);
435 errs() << " RESULT: ";
436 interval.print(errs(), tri_);
439 // Replace the interval with one of a NEW value number. Note that this
440 // value number isn't actually defined by an instruction, weird huh? :)
441 LiveRange LR(Start, End,
442 interval.getNextValue(SlotIndex(getMBBStartIdx(Killer->getParent()), true),
443 0, false, VNInfoAllocator));
444 LR.valno->setIsPHIDef(true);
445 DEBUG(errs() << " replace range with " << LR);
446 interval.addRange(LR);
447 LR.valno->addKill(End);
449 errs() << " RESULT: ";
450 interval.print(errs(), tri_);
454 // In the case of PHI elimination, each variable definition is only
455 // live until the end of the block. We've already taken care of the
456 // rest of the live range.
457 SlotIndex defIndex = MIIdx.getDefIndex();
458 if (MO.isEarlyClobber())
459 defIndex = MIIdx.getUseIndex();
462 MachineInstr *CopyMI = NULL;
463 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
464 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
465 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
466 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
467 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
469 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
471 SlotIndex killIndex = getMBBEndIdx(mbb).getNextIndex().getLoadIndex();
472 LiveRange LR(defIndex, killIndex, ValNo);
473 interval.addRange(LR);
474 ValNo->addKill(indexes_->getTerminatorGap(mbb));
475 ValNo->setHasPHIKill(true);
476 DEBUG(errs() << " +" << LR);
480 DEBUG(errs() << '\n');
483 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
484 MachineBasicBlock::iterator mi,
487 LiveInterval &interval,
488 MachineInstr *CopyMI) {
489 // A physical register cannot be live across basic block, so its
490 // lifetime must end somewhere in its defining basic block.
492 errs() << "\t\tregister: ";
493 printRegName(interval.reg, tri_);
496 SlotIndex baseIndex = MIIdx;
497 SlotIndex start = baseIndex.getDefIndex();
498 // Earlyclobbers move back one.
499 if (MO.isEarlyClobber())
500 start = MIIdx.getUseIndex();
501 SlotIndex end = start;
503 // If it is not used after definition, it is considered dead at
504 // the instruction defining it. Hence its interval is:
505 // [defSlot(def), defSlot(def)+1)
506 // For earlyclobbers, the defSlot was pushed back one; the extra
507 // advance below compensates.
509 DEBUG(errs() << " dead");
510 end = start.getStoreIndex();
514 // If it is not dead on definition, it must be killed by a
515 // subsequent instruction. Hence its interval is:
516 // [defSlot(def), useSlot(kill)+1)
517 baseIndex = baseIndex.getNextIndex();
518 while (++mi != MBB->end()) {
520 if (getInstructionFromIndex(baseIndex) == 0)
521 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
523 if (mi->killsRegister(interval.reg, tri_)) {
524 DEBUG(errs() << " killed");
525 end = baseIndex.getDefIndex();
528 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
530 if (mi->isRegTiedToUseOperand(DefIdx)) {
531 // Two-address instruction.
532 end = baseIndex.getDefIndex();
534 // Another instruction redefines the register before it is ever read.
535 // Then the register is essentially dead at the instruction that defines
536 // it. Hence its interval is:
537 // [defSlot(def), defSlot(def)+1)
538 DEBUG(errs() << " dead");
539 end = start.getStoreIndex();
545 baseIndex = baseIndex.getNextIndex();
548 // The only case we should have a dead physreg here without a killing or
549 // instruction where we know it's dead is if it is live-in to the function
550 // and never used. Another possible case is the implicit use of the
551 // physical register has been deleted by two-address pass.
552 end = start.getStoreIndex();
555 assert(start < end && "did not find end of interval?");
557 // Already exists? Extend old live interval.
558 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
559 bool Extend = OldLR != interval.end();
560 VNInfo *ValNo = Extend
561 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
562 if (MO.isEarlyClobber() && Extend)
563 ValNo->setHasRedefByEC(true);
564 LiveRange LR(start, end, ValNo);
565 interval.addRange(LR);
566 LR.valno->addKill(end);
567 DEBUG(errs() << " +" << LR << '\n');
570 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
571 MachineBasicBlock::iterator MI,
575 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
576 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
577 getOrCreateInterval(MO.getReg()));
578 else if (allocatableRegs_[MO.getReg()]) {
579 MachineInstr *CopyMI = NULL;
580 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
581 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
582 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
583 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
584 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
586 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
587 getOrCreateInterval(MO.getReg()), CopyMI);
588 // Def of a register also defines its sub-registers.
589 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
590 // If MI also modifies the sub-register explicitly, avoid processing it
591 // more than once. Do not pass in TRI here so it checks for exact match.
592 if (!MI->modifiesRegister(*AS))
593 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
594 getOrCreateInterval(*AS), 0);
598 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
600 LiveInterval &interval, bool isAlias) {
602 errs() << "\t\tlivein register: ";
603 printRegName(interval.reg, tri_);
606 // Look for kills, if it reaches a def before it's killed, then it shouldn't
607 // be considered a livein.
608 MachineBasicBlock::iterator mi = MBB->begin();
609 SlotIndex baseIndex = MIIdx;
610 SlotIndex start = baseIndex;
611 if (getInstructionFromIndex(baseIndex) == 0)
612 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
614 SlotIndex end = baseIndex;
615 bool SeenDefUse = false;
617 while (mi != MBB->end()) {
618 if (mi->killsRegister(interval.reg, tri_)) {
619 DEBUG(errs() << " killed");
620 end = baseIndex.getDefIndex();
623 } else if (mi->modifiesRegister(interval.reg, tri_)) {
624 // Another instruction redefines the register before it is ever read.
625 // Then the register is essentially dead at the instruction that defines
626 // it. Hence its interval is:
627 // [defSlot(def), defSlot(def)+1)
628 DEBUG(errs() << " dead");
629 end = start.getStoreIndex();
635 if (mi != MBB->end()) {
636 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
640 // Live-in register might not be used at all.
643 DEBUG(errs() << " dead");
644 end = MIIdx.getStoreIndex();
646 DEBUG(errs() << " live through");
652 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
653 0, false, VNInfoAllocator);
654 vni->setIsPHIDef(true);
655 LiveRange LR(start, end, vni);
657 interval.addRange(LR);
658 LR.valno->addKill(end);
659 DEBUG(errs() << " +" << LR << '\n');
662 /// computeIntervals - computes the live intervals for virtual
663 /// registers. for some ordering of the machine instructions [1,N] a
664 /// live interval is an interval [i, j) where 1 <= i <= j < N for
665 /// which a variable is live
666 void LiveIntervals::computeIntervals() {
667 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n"
668 << "********** Function: "
669 << ((Value*)mf_->getFunction())->getName() << '\n');
671 SmallVector<unsigned, 8> UndefUses;
672 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
674 MachineBasicBlock *MBB = MBBI;
675 // Track the index of the current machine instr.
676 SlotIndex MIIndex = getMBBStartIdx(MBB);
677 DEBUG(errs() << MBB->getName() << ":\n");
679 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
681 // Create intervals for live-ins to this BB first.
682 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
683 LE = MBB->livein_end(); LI != LE; ++LI) {
684 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
685 // Multiple live-ins can alias the same register.
686 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
687 if (!hasInterval(*AS))
688 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
692 // Skip over empty initial indices.
693 if (getInstructionFromIndex(MIIndex) == 0)
694 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
696 for (; MI != miEnd; ++MI) {
697 DEBUG(errs() << MIIndex << "\t" << *MI);
700 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
701 MachineOperand &MO = MI->getOperand(i);
702 if (!MO.isReg() || !MO.getReg())
705 // handle register defs - build intervals
707 handleRegisterDef(MBB, MI, MIIndex, MO, i);
708 else if (MO.isUndef())
709 UndefUses.push_back(MO.getReg());
712 // Move to the next instr slot.
713 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
717 // Create empty intervals for registers defined by implicit_def's (except
718 // for those implicit_def that define values which are liveout of their
720 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
721 unsigned UndefReg = UndefUses[i];
722 (void)getOrCreateInterval(UndefReg);
726 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
727 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
728 return new LiveInterval(reg, Weight);
731 /// dupInterval - Duplicate a live interval. The caller is responsible for
732 /// managing the allocated memory.
733 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
734 LiveInterval *NewLI = createInterval(li->reg);
735 NewLI->Copy(*li, mri_, getVNInfoAllocator());
739 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
740 /// copy field and returns the source register that defines it.
741 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
745 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
746 // If it's extracting out of a physical register, return the sub-register.
747 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
748 if (TargetRegisterInfo::isPhysicalRegister(Reg))
749 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
751 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
752 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
753 return VNI->getCopy()->getOperand(2).getReg();
755 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
756 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
758 llvm_unreachable("Unrecognized copy instruction!");
762 //===----------------------------------------------------------------------===//
763 // Register allocator hooks.
766 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
767 /// allow one) virtual register operand, then its uses are implicitly using
768 /// the register. Returns the virtual register.
769 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
770 MachineInstr *MI) const {
772 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
773 MachineOperand &MO = MI->getOperand(i);
774 if (!MO.isReg() || !MO.isUse())
776 unsigned Reg = MO.getReg();
777 if (Reg == 0 || Reg == li.reg)
780 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
781 !allocatableRegs_[Reg])
783 // FIXME: For now, only remat MI with at most one register operand.
785 "Can't rematerialize instruction with multiple register operand!");
794 /// isValNoAvailableAt - Return true if the val# of the specified interval
795 /// which reaches the given instruction also reaches the specified use index.
796 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
797 SlotIndex UseIdx) const {
798 SlotIndex Index = getInstructionIndex(MI);
799 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
800 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
801 return UI != li.end() && UI->valno == ValNo;
804 /// isReMaterializable - Returns true if the definition MI of the specified
805 /// val# of the specified interval is re-materializable.
806 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
807 const VNInfo *ValNo, MachineInstr *MI,
808 SmallVectorImpl<LiveInterval*> &SpillIs,
813 if (!tii_->isTriviallyReMaterializable(MI, aa_))
816 // Target-specific code can mark an instruction as being rematerializable
817 // if it has one virtual reg use, though it had better be something like
818 // a PIC base register which is likely to be live everywhere.
819 unsigned ImpUse = getReMatImplicitUse(li, MI);
821 const LiveInterval &ImpLi = getInterval(ImpUse);
822 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
823 re = mri_->use_end(); ri != re; ++ri) {
824 MachineInstr *UseMI = &*ri;
825 SlotIndex UseIdx = getInstructionIndex(UseMI);
826 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
828 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
832 // If a register operand of the re-materialized instruction is going to
833 // be spilled next, then it's not legal to re-materialize this instruction.
834 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
835 if (ImpUse == SpillIs[i]->reg)
841 /// isReMaterializable - Returns true if the definition MI of the specified
842 /// val# of the specified interval is re-materializable.
843 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
844 const VNInfo *ValNo, MachineInstr *MI) {
845 SmallVector<LiveInterval*, 4> Dummy1;
847 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
850 /// isReMaterializable - Returns true if every definition of MI of every
851 /// val# of the specified interval is re-materializable.
852 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
853 SmallVectorImpl<LiveInterval*> &SpillIs,
856 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
858 const VNInfo *VNI = *i;
860 continue; // Dead val#.
861 // Is the def for the val# rematerializable?
862 if (!VNI->isDefAccurate())
864 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
865 bool DefIsLoad = false;
867 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
874 /// FilterFoldedOps - Filter out two-address use operands. Return
875 /// true if it finds any issue with the operands that ought to prevent
877 static bool FilterFoldedOps(MachineInstr *MI,
878 SmallVector<unsigned, 2> &Ops,
880 SmallVector<unsigned, 2> &FoldOps) {
882 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
883 unsigned OpIdx = Ops[i];
884 MachineOperand &MO = MI->getOperand(OpIdx);
885 // FIXME: fold subreg use.
889 MRInfo |= (unsigned)VirtRegMap::isMod;
891 // Filter out two-address use operand(s).
892 if (MI->isRegTiedToDefOperand(OpIdx)) {
893 MRInfo = VirtRegMap::isModRef;
896 MRInfo |= (unsigned)VirtRegMap::isRef;
898 FoldOps.push_back(OpIdx);
904 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
905 /// slot / to reg or any rematerialized load into ith operand of specified
906 /// MI. If it is successul, MI is updated with the newly created MI and
908 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
909 VirtRegMap &vrm, MachineInstr *DefMI,
911 SmallVector<unsigned, 2> &Ops,
912 bool isSS, int Slot, unsigned Reg) {
913 // If it is an implicit def instruction, just delete it.
914 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
915 RemoveMachineInstrFromMaps(MI);
916 vrm.RemoveMachineInstrFromMaps(MI);
917 MI->eraseFromParent();
922 // Filter the list of operand indexes that are to be folded. Abort if
923 // any operand will prevent folding.
925 SmallVector<unsigned, 2> FoldOps;
926 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
929 // The only time it's safe to fold into a two address instruction is when
930 // it's folding reload and spill from / into a spill stack slot.
931 if (DefMI && (MRInfo & VirtRegMap::isMod))
934 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
935 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
937 // Remember this instruction uses the spill slot.
938 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
940 // Attempt to fold the memory reference into the instruction. If
941 // we can do this, we don't need to insert spill code.
942 MachineBasicBlock &MBB = *MI->getParent();
943 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
944 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
945 vrm.transferSpillPts(MI, fmi);
946 vrm.transferRestorePts(MI, fmi);
947 vrm.transferEmergencySpills(MI, fmi);
948 ReplaceMachineInstrInMaps(MI, fmi);
949 MI = MBB.insert(MBB.erase(MI), fmi);
956 /// canFoldMemoryOperand - Returns true if the specified load / store
957 /// folding is possible.
958 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
959 SmallVector<unsigned, 2> &Ops,
961 // Filter the list of operand indexes that are to be folded. Abort if
962 // any operand will prevent folding.
964 SmallVector<unsigned, 2> FoldOps;
965 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
968 // It's only legal to remat for a use, not a def.
969 if (ReMat && (MRInfo & VirtRegMap::isMod))
972 return tii_->canFoldMemoryOperand(MI, FoldOps);
975 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
976 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
978 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
983 for (++itr; itr != li.ranges.end(); ++itr) {
984 MachineBasicBlock *mbb2 =
985 indexes_->getMBBCoveringRange(itr->start, itr->end);
994 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
995 /// interval on to-be re-materialized operands of MI) with new register.
996 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
997 MachineInstr *MI, unsigned NewVReg,
999 // There is an implicit use. That means one of the other operand is
1000 // being remat'ed and the remat'ed instruction has li.reg as an
1001 // use operand. Make sure we rewrite that as well.
1002 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1003 MachineOperand &MO = MI->getOperand(i);
1006 unsigned Reg = MO.getReg();
1007 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1009 if (!vrm.isReMaterialized(Reg))
1011 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1012 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1014 UseMO->setReg(NewVReg);
1018 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1019 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1020 bool LiveIntervals::
1021 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1022 bool TrySplit, SlotIndex index, SlotIndex end,
1024 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1025 unsigned Slot, int LdSlot,
1026 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1028 const TargetRegisterClass* rc,
1029 SmallVector<int, 4> &ReMatIds,
1030 const MachineLoopInfo *loopInfo,
1031 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1032 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1033 std::vector<LiveInterval*> &NewLIs) {
1034 bool CanFold = false;
1036 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1037 MachineOperand& mop = MI->getOperand(i);
1040 unsigned Reg = mop.getReg();
1041 unsigned RegI = Reg;
1042 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1047 bool TryFold = !DefIsReMat;
1048 bool FoldSS = true; // Default behavior unless it's a remat.
1049 int FoldSlot = Slot;
1051 // If this is the rematerializable definition MI itself and
1052 // all of its uses are rematerialized, simply delete it.
1053 if (MI == ReMatOrigDefMI && CanDelete) {
1054 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: "
1056 RemoveMachineInstrFromMaps(MI);
1057 vrm.RemoveMachineInstrFromMaps(MI);
1058 MI->eraseFromParent();
1062 // If def for this use can't be rematerialized, then try folding.
1063 // If def is rematerializable and it's a load, also try folding.
1064 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1066 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1072 // Scan all of the operands of this instruction rewriting operands
1073 // to use NewVReg instead of li.reg as appropriate. We do this for
1076 // 1. If the instr reads the same spilled vreg multiple times, we
1077 // want to reuse the NewVReg.
1078 // 2. If the instr is a two-addr instruction, we are required to
1079 // keep the src/dst regs pinned.
1081 // Keep track of whether we replace a use and/or def so that we can
1082 // create the spill interval with the appropriate range.
1084 HasUse = mop.isUse();
1085 HasDef = mop.isDef();
1086 SmallVector<unsigned, 2> Ops;
1088 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1089 const MachineOperand &MOj = MI->getOperand(j);
1092 unsigned RegJ = MOj.getReg();
1093 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1097 if (!MOj.isUndef()) {
1098 HasUse |= MOj.isUse();
1099 HasDef |= MOj.isDef();
1104 // Create a new virtual register for the spill interval.
1105 // Create the new register now so we can map the fold instruction
1106 // to the new register so when it is unfolded we get the correct
1108 bool CreatedNewVReg = false;
1110 NewVReg = mri_->createVirtualRegister(rc);
1112 CreatedNewVReg = true;
1114 // The new virtual register should get the same allocation hints as the
1116 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1117 if (Hint.first || Hint.second)
1118 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1124 // Do not fold load / store here if we are splitting. We'll find an
1125 // optimal point to insert a load / store later.
1127 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1128 Ops, FoldSS, FoldSlot, NewVReg)) {
1129 // Folding the load/store can completely change the instruction in
1130 // unpredictable ways, rescan it from the beginning.
1133 // We need to give the new vreg the same stack slot as the
1134 // spilled interval.
1135 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1141 if (isNotInMIMap(MI))
1143 goto RestartInstruction;
1146 // We'll try to fold it later if it's profitable.
1147 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1151 mop.setReg(NewVReg);
1152 if (mop.isImplicit())
1153 rewriteImplicitOps(li, MI, NewVReg, vrm);
1155 // Reuse NewVReg for other reads.
1156 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1157 MachineOperand &mopj = MI->getOperand(Ops[j]);
1158 mopj.setReg(NewVReg);
1159 if (mopj.isImplicit())
1160 rewriteImplicitOps(li, MI, NewVReg, vrm);
1163 if (CreatedNewVReg) {
1165 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1166 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1167 // Each valnum may have its own remat id.
1168 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1170 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1172 if (!CanDelete || (HasUse && HasDef)) {
1173 // If this is a two-addr instruction then its use operands are
1174 // rematerializable but its def is not. It should be assigned a
1176 vrm.assignVirt2StackSlot(NewVReg, Slot);
1179 vrm.assignVirt2StackSlot(NewVReg, Slot);
1181 } else if (HasUse && HasDef &&
1182 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1183 // If this interval hasn't been assigned a stack slot (because earlier
1184 // def is a deleted remat def), do it now.
1185 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1186 vrm.assignVirt2StackSlot(NewVReg, Slot);
1189 // Re-matting an instruction with virtual register use. Add the
1190 // register as an implicit use on the use MI.
1191 if (DefIsReMat && ImpUse)
1192 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1194 // Create a new register interval for this spill / remat.
1195 LiveInterval &nI = getOrCreateInterval(NewVReg);
1196 if (CreatedNewVReg) {
1197 NewLIs.push_back(&nI);
1198 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1200 vrm.setIsSplitFromReg(NewVReg, li.reg);
1204 if (CreatedNewVReg) {
1205 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1206 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1207 DEBUG(errs() << " +" << LR);
1210 // Extend the split live interval to this def / use.
1211 SlotIndex End = index.getDefIndex();
1212 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1213 nI.getValNumInfo(nI.getNumValNums()-1));
1214 DEBUG(errs() << " +" << LR);
1219 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1220 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1221 DEBUG(errs() << " +" << LR);
1226 errs() << "\t\t\t\tAdded new interval: ";
1227 nI.print(errs(), tri_);
1233 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1235 MachineBasicBlock *MBB,
1236 SlotIndex Idx) const {
1237 SlotIndex End = getMBBEndIdx(MBB);
1238 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1239 if (VNI->kills[j].isPHI())
1242 SlotIndex KillIdx = VNI->kills[j];
1243 if (KillIdx > Idx && KillIdx < End)
1249 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1250 /// during spilling.
1252 struct RewriteInfo {
1257 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1258 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1261 struct RewriteInfoCompare {
1262 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1263 return LHS.Index < RHS.Index;
1268 void LiveIntervals::
1269 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1270 LiveInterval::Ranges::const_iterator &I,
1271 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1272 unsigned Slot, int LdSlot,
1273 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1275 const TargetRegisterClass* rc,
1276 SmallVector<int, 4> &ReMatIds,
1277 const MachineLoopInfo *loopInfo,
1278 BitVector &SpillMBBs,
1279 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1280 BitVector &RestoreMBBs,
1281 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1282 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1283 std::vector<LiveInterval*> &NewLIs) {
1284 bool AllCanFold = true;
1285 unsigned NewVReg = 0;
1286 SlotIndex start = I->start.getBaseIndex();
1287 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1289 // First collect all the def / use in this live range that will be rewritten.
1290 // Make sure they are sorted according to instruction index.
1291 std::vector<RewriteInfo> RewriteMIs;
1292 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1293 re = mri_->reg_end(); ri != re; ) {
1294 MachineInstr *MI = &*ri;
1295 MachineOperand &O = ri.getOperand();
1297 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1298 SlotIndex index = getInstructionIndex(MI);
1299 if (index < start || index >= end)
1303 // Must be defined by an implicit def. It should not be spilled. Note,
1304 // this is for correctness reason. e.g.
1305 // 8 %reg1024<def> = IMPLICIT_DEF
1306 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1307 // The live range [12, 14) are not part of the r1024 live interval since
1308 // it's defined by an implicit def. It will not conflicts with live
1309 // interval of r1025. Now suppose both registers are spilled, you can
1310 // easily see a situation where both registers are reloaded before
1311 // the INSERT_SUBREG and both target registers that would overlap.
1313 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1315 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1317 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1318 // Now rewrite the defs and uses.
1319 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1320 RewriteInfo &rwi = RewriteMIs[i];
1322 SlotIndex index = rwi.Index;
1323 bool MIHasUse = rwi.HasUse;
1324 bool MIHasDef = rwi.HasDef;
1325 MachineInstr *MI = rwi.MI;
1326 // If MI def and/or use the same register multiple times, then there
1327 // are multiple entries.
1328 unsigned NumUses = MIHasUse;
1329 while (i != e && RewriteMIs[i].MI == MI) {
1330 assert(RewriteMIs[i].Index == index);
1331 bool isUse = RewriteMIs[i].HasUse;
1332 if (isUse) ++NumUses;
1334 MIHasDef |= RewriteMIs[i].HasDef;
1337 MachineBasicBlock *MBB = MI->getParent();
1339 if (ImpUse && MI != ReMatDefMI) {
1340 // Re-matting an instruction with virtual register use. Update the
1341 // register interval's spill weight to HUGE_VALF to prevent it from
1343 LiveInterval &ImpLi = getInterval(ImpUse);
1344 ImpLi.weight = HUGE_VALF;
1347 unsigned MBBId = MBB->getNumber();
1348 unsigned ThisVReg = 0;
1350 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1351 if (NVI != MBBVRegsMap.end()) {
1352 ThisVReg = NVI->second;
1359 // It's better to start a new interval to avoid artifically
1360 // extend the new interval.
1361 if (MIHasDef && !MIHasUse) {
1362 MBBVRegsMap.erase(MBB->getNumber());
1368 bool IsNew = ThisVReg == 0;
1370 // This ends the previous live interval. If all of its def / use
1371 // can be folded, give it a low spill weight.
1372 if (NewVReg && TrySplit && AllCanFold) {
1373 LiveInterval &nI = getOrCreateInterval(NewVReg);
1380 bool HasDef = false;
1381 bool HasUse = false;
1382 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1383 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1384 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1385 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1386 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1387 if (!HasDef && !HasUse)
1390 AllCanFold &= CanFold;
1392 // Update weight of spill interval.
1393 LiveInterval &nI = getOrCreateInterval(NewVReg);
1395 // The spill weight is now infinity as it cannot be spilled again.
1396 nI.weight = HUGE_VALF;
1400 // Keep track of the last def and first use in each MBB.
1402 if (MI != ReMatOrigDefMI || !CanDelete) {
1403 bool HasKill = false;
1405 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1407 // If this is a two-address code, then this index starts a new VNInfo.
1408 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1410 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1412 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1413 SpillIdxes.find(MBBId);
1415 if (SII == SpillIdxes.end()) {
1416 std::vector<SRInfo> S;
1417 S.push_back(SRInfo(index, NewVReg, true));
1418 SpillIdxes.insert(std::make_pair(MBBId, S));
1419 } else if (SII->second.back().vreg != NewVReg) {
1420 SII->second.push_back(SRInfo(index, NewVReg, true));
1421 } else if (index > SII->second.back().index) {
1422 // If there is an earlier def and this is a two-address
1423 // instruction, then it's not possible to fold the store (which
1424 // would also fold the load).
1425 SRInfo &Info = SII->second.back();
1427 Info.canFold = !HasUse;
1429 SpillMBBs.set(MBBId);
1430 } else if (SII != SpillIdxes.end() &&
1431 SII->second.back().vreg == NewVReg &&
1432 index > SII->second.back().index) {
1433 // There is an earlier def that's not killed (must be two-address).
1434 // The spill is no longer needed.
1435 SII->second.pop_back();
1436 if (SII->second.empty()) {
1437 SpillIdxes.erase(MBBId);
1438 SpillMBBs.reset(MBBId);
1445 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1446 SpillIdxes.find(MBBId);
1447 if (SII != SpillIdxes.end() &&
1448 SII->second.back().vreg == NewVReg &&
1449 index > SII->second.back().index)
1450 // Use(s) following the last def, it's not safe to fold the spill.
1451 SII->second.back().canFold = false;
1452 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1453 RestoreIdxes.find(MBBId);
1454 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1455 // If we are splitting live intervals, only fold if it's the first
1456 // use and there isn't another use later in the MBB.
1457 RII->second.back().canFold = false;
1459 // Only need a reload if there isn't an earlier def / use.
1460 if (RII == RestoreIdxes.end()) {
1461 std::vector<SRInfo> Infos;
1462 Infos.push_back(SRInfo(index, NewVReg, true));
1463 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1465 RII->second.push_back(SRInfo(index, NewVReg, true));
1467 RestoreMBBs.set(MBBId);
1471 // Update spill weight.
1472 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1473 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1476 if (NewVReg && TrySplit && AllCanFold) {
1477 // If all of its def / use can be folded, give it a low spill weight.
1478 LiveInterval &nI = getOrCreateInterval(NewVReg);
1483 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1484 unsigned vr, BitVector &RestoreMBBs,
1485 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1486 if (!RestoreMBBs[Id])
1488 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1489 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1490 if (Restores[i].index == index &&
1491 Restores[i].vreg == vr &&
1492 Restores[i].canFold)
1497 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1498 unsigned vr, BitVector &RestoreMBBs,
1499 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1500 if (!RestoreMBBs[Id])
1502 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1503 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1504 if (Restores[i].index == index && Restores[i].vreg)
1505 Restores[i].index = SlotIndex();
1508 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1509 /// spilled and create empty intervals for their uses.
1511 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1512 const TargetRegisterClass* rc,
1513 std::vector<LiveInterval*> &NewLIs) {
1514 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1515 re = mri_->reg_end(); ri != re; ) {
1516 MachineOperand &O = ri.getOperand();
1517 MachineInstr *MI = &*ri;
1520 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1521 "Register def was not rewritten?");
1522 RemoveMachineInstrFromMaps(MI);
1523 vrm.RemoveMachineInstrFromMaps(MI);
1524 MI->eraseFromParent();
1526 // This must be an use of an implicit_def so it's not part of the live
1527 // interval. Create a new empty live interval for it.
1528 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1529 unsigned NewVReg = mri_->createVirtualRegister(rc);
1531 vrm.setIsImplicitlyDefined(NewVReg);
1532 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1533 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1534 MachineOperand &MO = MI->getOperand(i);
1535 if (MO.isReg() && MO.getReg() == li.reg) {
1544 std::vector<LiveInterval*> LiveIntervals::
1545 addIntervalsForSpillsFast(const LiveInterval &li,
1546 const MachineLoopInfo *loopInfo,
1548 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1550 std::vector<LiveInterval*> added;
1552 assert(li.weight != HUGE_VALF &&
1553 "attempt to spill already spilled interval!");
1556 errs() << "\t\t\t\tadding intervals for spills for interval: ";
1561 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1563 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1564 while (RI != mri_->reg_end()) {
1565 MachineInstr* MI = &*RI;
1567 SmallVector<unsigned, 2> Indices;
1568 bool HasUse = false;
1569 bool HasDef = false;
1571 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1572 MachineOperand& mop = MI->getOperand(i);
1573 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1575 HasUse |= MI->getOperand(i).isUse();
1576 HasDef |= MI->getOperand(i).isDef();
1578 Indices.push_back(i);
1581 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1582 Indices, true, slot, li.reg)) {
1583 unsigned NewVReg = mri_->createVirtualRegister(rc);
1585 vrm.assignVirt2StackSlot(NewVReg, slot);
1587 // create a new register for this spill
1588 LiveInterval &nI = getOrCreateInterval(NewVReg);
1590 // the spill weight is now infinity as it
1591 // cannot be spilled again
1592 nI.weight = HUGE_VALF;
1594 // Rewrite register operands to use the new vreg.
1595 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1596 E = Indices.end(); I != E; ++I) {
1597 MI->getOperand(*I).setReg(NewVReg);
1599 if (MI->getOperand(*I).isUse())
1600 MI->getOperand(*I).setIsKill(true);
1603 // Fill in the new live interval.
1604 SlotIndex index = getInstructionIndex(MI);
1606 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1607 nI.getNextValue(SlotIndex(), 0, false,
1608 getVNInfoAllocator()));
1609 DEBUG(errs() << " +" << LR);
1611 vrm.addRestorePoint(NewVReg, MI);
1614 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1615 nI.getNextValue(SlotIndex(), 0, false,
1616 getVNInfoAllocator()));
1617 DEBUG(errs() << " +" << LR);
1619 vrm.addSpillPoint(NewVReg, true, MI);
1622 added.push_back(&nI);
1625 errs() << "\t\t\t\tadded new interval: ";
1632 RI = mri_->reg_begin(li.reg);
1638 std::vector<LiveInterval*> LiveIntervals::
1639 addIntervalsForSpills(const LiveInterval &li,
1640 SmallVectorImpl<LiveInterval*> &SpillIs,
1641 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1643 if (EnableFastSpilling)
1644 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1646 assert(li.weight != HUGE_VALF &&
1647 "attempt to spill already spilled interval!");
1650 errs() << "\t\t\t\tadding intervals for spills for interval: ";
1651 li.print(errs(), tri_);
1655 // Each bit specify whether a spill is required in the MBB.
1656 BitVector SpillMBBs(mf_->getNumBlockIDs());
1657 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1658 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1659 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1660 DenseMap<unsigned,unsigned> MBBVRegsMap;
1661 std::vector<LiveInterval*> NewLIs;
1662 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1664 unsigned NumValNums = li.getNumValNums();
1665 SmallVector<MachineInstr*, 4> ReMatDefs;
1666 ReMatDefs.resize(NumValNums, NULL);
1667 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1668 ReMatOrigDefs.resize(NumValNums, NULL);
1669 SmallVector<int, 4> ReMatIds;
1670 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1671 BitVector ReMatDelete(NumValNums);
1672 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1674 // Spilling a split live interval. It cannot be split any further. Also,
1675 // it's also guaranteed to be a single val# / range interval.
1676 if (vrm.getPreSplitReg(li.reg)) {
1677 vrm.setIsSplitFromReg(li.reg, 0);
1678 // Unset the split kill marker on the last use.
1679 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1680 if (KillIdx != SlotIndex()) {
1681 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1682 assert(KillMI && "Last use disappeared?");
1683 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1684 assert(KillOp != -1 && "Last use disappeared?");
1685 KillMI->getOperand(KillOp).setIsKill(false);
1687 vrm.removeKillPoint(li.reg);
1688 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1689 Slot = vrm.getStackSlot(li.reg);
1690 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1691 MachineInstr *ReMatDefMI = DefIsReMat ?
1692 vrm.getReMaterializedMI(li.reg) : NULL;
1694 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1695 bool isLoad = isLoadSS ||
1696 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1697 bool IsFirstRange = true;
1698 for (LiveInterval::Ranges::const_iterator
1699 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1700 // If this is a split live interval with multiple ranges, it means there
1701 // are two-address instructions that re-defined the value. Only the
1702 // first def can be rematerialized!
1704 // Note ReMatOrigDefMI has already been deleted.
1705 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1706 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1707 false, vrm, rc, ReMatIds, loopInfo,
1708 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1709 MBBVRegsMap, NewLIs);
1711 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1712 Slot, 0, false, false, false,
1713 false, vrm, rc, ReMatIds, loopInfo,
1714 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1715 MBBVRegsMap, NewLIs);
1717 IsFirstRange = false;
1720 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1724 bool TrySplit = !intervalIsInOneMBB(li);
1727 bool NeedStackSlot = false;
1728 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1730 const VNInfo *VNI = *i;
1731 unsigned VN = VNI->id;
1732 if (VNI->isUnused())
1733 continue; // Dead val#.
1734 // Is the def for the val# rematerializable?
1735 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1736 ? getInstructionFromIndex(VNI->def) : 0;
1738 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1739 // Remember how to remat the def of this val#.
1740 ReMatOrigDefs[VN] = ReMatDefMI;
1741 // Original def may be modified so we have to make a copy here.
1742 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1743 CloneMIs.push_back(Clone);
1744 ReMatDefs[VN] = Clone;
1746 bool CanDelete = true;
1747 if (VNI->hasPHIKill()) {
1748 // A kill is a phi node, not all of its uses can be rematerialized.
1749 // It must not be deleted.
1751 // Need a stack slot if there is any live range where uses cannot be
1753 NeedStackSlot = true;
1756 ReMatDelete.set(VN);
1758 // Need a stack slot if there is any live range where uses cannot be
1760 NeedStackSlot = true;
1764 // One stack slot per live interval.
1765 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1766 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1767 Slot = vrm.assignVirt2StackSlot(li.reg);
1769 // This case only occurs when the prealloc splitter has already assigned
1770 // a stack slot to this vreg.
1772 Slot = vrm.getStackSlot(li.reg);
1775 // Create new intervals and rewrite defs and uses.
1776 for (LiveInterval::Ranges::const_iterator
1777 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1778 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1779 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1780 bool DefIsReMat = ReMatDefMI != NULL;
1781 bool CanDelete = ReMatDelete[I->valno->id];
1783 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1784 bool isLoad = isLoadSS ||
1785 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1786 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1787 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1788 CanDelete, vrm, rc, ReMatIds, loopInfo,
1789 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1790 MBBVRegsMap, NewLIs);
1793 // Insert spills / restores if we are splitting.
1795 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1799 SmallPtrSet<LiveInterval*, 4> AddedKill;
1800 SmallVector<unsigned, 2> Ops;
1801 if (NeedStackSlot) {
1802 int Id = SpillMBBs.find_first();
1804 std::vector<SRInfo> &spills = SpillIdxes[Id];
1805 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1806 SlotIndex index = spills[i].index;
1807 unsigned VReg = spills[i].vreg;
1808 LiveInterval &nI = getOrCreateInterval(VReg);
1809 bool isReMat = vrm.isReMaterialized(VReg);
1810 MachineInstr *MI = getInstructionFromIndex(index);
1811 bool CanFold = false;
1812 bool FoundUse = false;
1814 if (spills[i].canFold) {
1816 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1817 MachineOperand &MO = MI->getOperand(j);
1818 if (!MO.isReg() || MO.getReg() != VReg)
1825 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1826 RestoreMBBs, RestoreIdxes))) {
1827 // MI has two-address uses of the same register. If the use
1828 // isn't the first and only use in the BB, then we can't fold
1829 // it. FIXME: Move this to rewriteInstructionsForSpills.
1836 // Fold the store into the def if possible.
1837 bool Folded = false;
1838 if (CanFold && !Ops.empty()) {
1839 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1842 // Also folded uses, do not issue a load.
1843 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1844 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1846 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1850 // Otherwise tell the spiller to issue a spill.
1852 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1853 bool isKill = LR->end == index.getStoreIndex();
1854 if (!MI->registerDefIsDead(nI.reg))
1855 // No need to spill a dead def.
1856 vrm.addSpillPoint(VReg, isKill, MI);
1858 AddedKill.insert(&nI);
1861 Id = SpillMBBs.find_next(Id);
1865 int Id = RestoreMBBs.find_first();
1867 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1868 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1869 SlotIndex index = restores[i].index;
1870 if (index == SlotIndex())
1872 unsigned VReg = restores[i].vreg;
1873 LiveInterval &nI = getOrCreateInterval(VReg);
1874 bool isReMat = vrm.isReMaterialized(VReg);
1875 MachineInstr *MI = getInstructionFromIndex(index);
1876 bool CanFold = false;
1878 if (restores[i].canFold) {
1880 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1881 MachineOperand &MO = MI->getOperand(j);
1882 if (!MO.isReg() || MO.getReg() != VReg)
1886 // If this restore were to be folded, it would have been folded
1895 // Fold the load into the use if possible.
1896 bool Folded = false;
1897 if (CanFold && !Ops.empty()) {
1899 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1901 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1903 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1904 // If the rematerializable def is a load, also try to fold it.
1905 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1906 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1907 Ops, isLoadSS, LdSlot, VReg);
1909 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1911 // Re-matting an instruction with virtual register use. Add the
1912 // register as an implicit use on the use MI and update the register
1913 // interval's spill weight to HUGE_VALF to prevent it from being
1915 LiveInterval &ImpLi = getInterval(ImpUse);
1916 ImpLi.weight = HUGE_VALF;
1917 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1922 // If folding is not possible / failed, then tell the spiller to issue a
1923 // load / rematerialization for us.
1925 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1927 vrm.addRestorePoint(VReg, MI);
1929 Id = RestoreMBBs.find_next(Id);
1932 // Finalize intervals: add kills, finalize spill weights, and filter out
1934 std::vector<LiveInterval*> RetNewLIs;
1935 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1936 LiveInterval *LI = NewLIs[i];
1938 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1939 if (!AddedKill.count(LI)) {
1940 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1941 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1942 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1943 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1944 assert(UseIdx != -1);
1945 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1946 LastUse->getOperand(UseIdx).setIsKill();
1947 vrm.addKillPoint(LI->reg, LastUseIdx);
1950 RetNewLIs.push_back(LI);
1954 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1958 /// hasAllocatableSuperReg - Return true if the specified physical register has
1959 /// any super register that's allocatable.
1960 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1961 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1962 if (allocatableRegs_[*AS] && hasInterval(*AS))
1967 /// getRepresentativeReg - Find the largest super register of the specified
1968 /// physical register.
1969 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1970 // Find the largest super-register that is allocatable.
1971 unsigned BestReg = Reg;
1972 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1973 unsigned SuperReg = *AS;
1974 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1982 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1983 /// specified interval that conflicts with the specified physical register.
1984 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1985 unsigned PhysReg) const {
1986 unsigned NumConflicts = 0;
1987 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1988 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1989 E = mri_->reg_end(); I != E; ++I) {
1990 MachineOperand &O = I.getOperand();
1991 MachineInstr *MI = O.getParent();
1992 SlotIndex Index = getInstructionIndex(MI);
1993 if (pli.liveAt(Index))
1996 return NumConflicts;
1999 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2000 /// around all defs and uses of the specified interval. Return true if it
2001 /// was able to cut its interval.
2002 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2003 unsigned PhysReg, VirtRegMap &vrm) {
2004 unsigned SpillReg = getRepresentativeReg(PhysReg);
2006 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2007 // If there are registers which alias PhysReg, but which are not a
2008 // sub-register of the chosen representative super register. Assert
2009 // since we can't handle it yet.
2010 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2011 tri_->isSuperRegister(*AS, SpillReg));
2014 SmallVector<unsigned, 4> PRegs;
2015 if (hasInterval(SpillReg))
2016 PRegs.push_back(SpillReg);
2018 SmallSet<unsigned, 4> Added;
2019 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2020 if (Added.insert(*AS) && hasInterval(*AS)) {
2021 PRegs.push_back(*AS);
2022 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2027 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2028 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2029 E = mri_->reg_end(); I != E; ++I) {
2030 MachineOperand &O = I.getOperand();
2031 MachineInstr *MI = O.getParent();
2032 if (SeenMIs.count(MI))
2035 SlotIndex Index = getInstructionIndex(MI);
2036 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2037 unsigned PReg = PRegs[i];
2038 LiveInterval &pli = getInterval(PReg);
2039 if (!pli.liveAt(Index))
2041 vrm.addEmergencySpill(PReg, MI);
2042 SlotIndex StartIdx = Index.getLoadIndex();
2043 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2044 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2045 pli.removeRange(StartIdx, EndIdx);
2049 raw_string_ostream Msg(msg);
2050 Msg << "Ran out of registers during register allocation!";
2051 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2052 Msg << "\nPlease check your inline asm statement for invalid "
2053 << "constraints:\n";
2054 MI->print(Msg, tm_);
2056 llvm_report_error(Msg.str());
2058 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2059 if (!hasInterval(*AS))
2061 LiveInterval &spli = getInterval(*AS);
2062 if (spli.liveAt(Index))
2063 spli.removeRange(Index.getLoadIndex(),
2064 Index.getNextIndex().getBaseIndex());
2071 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2072 MachineInstr* startInst) {
2073 LiveInterval& Interval = getOrCreateInterval(reg);
2074 VNInfo* VN = Interval.getNextValue(
2075 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2076 startInst, true, getVNInfoAllocator());
2077 VN->setHasPHIKill(true);
2078 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2080 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2081 getMBBEndIdx(startInst->getParent()).getNextIndex().getBaseIndex(), VN);
2082 Interval.addRange(LR);