1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 STATISTIC(numIntervals , "Number of original intervals");
54 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
55 STATISTIC(numSplits , "Number of intervals split");
57 char LiveIntervals::ID = 0;
58 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
59 "Live Interval Analysis", false, false)
60 INITIALIZE_PASS_DEPENDENCY(LiveVariables)
61 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
62 INITIALIZE_PASS_DEPENDENCY(PHIElimination)
63 INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
64 INITIALIZE_PASS_DEPENDENCY(ProcessImplicitDefs)
65 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
66 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
67 INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
68 "Live Interval Analysis", false, false)
70 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
72 AU.addRequired<AliasAnalysis>();
73 AU.addPreserved<AliasAnalysis>();
74 AU.addRequired<LiveVariables>();
75 AU.addPreserved<LiveVariables>();
76 AU.addRequired<MachineLoopInfo>();
77 AU.addPreserved<MachineLoopInfo>();
78 AU.addPreservedID(MachineDominatorsID);
81 AU.addPreservedID(PHIEliminationID);
82 AU.addRequiredID(PHIEliminationID);
85 AU.addRequiredID(TwoAddressInstructionPassID);
86 AU.addPreserved<ProcessImplicitDefs>();
87 AU.addRequired<ProcessImplicitDefs>();
88 AU.addPreserved<SlotIndexes>();
89 AU.addRequiredTransitive<SlotIndexes>();
90 MachineFunctionPass::getAnalysisUsage(AU);
93 void LiveIntervals::releaseMemory() {
94 // Free the live intervals themselves.
95 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
96 E = r2iMap_.end(); I != E; ++I)
101 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
102 VNInfoAllocator.Reset();
103 while (!CloneMIs.empty()) {
104 MachineInstr *MI = CloneMIs.back();
106 mf_->DeleteMachineInstr(MI);
110 /// runOnMachineFunction - Register allocate the whole function
112 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
114 mri_ = &mf_->getRegInfo();
115 tm_ = &fn.getTarget();
116 tri_ = tm_->getRegisterInfo();
117 tii_ = tm_->getInstrInfo();
118 aa_ = &getAnalysis<AliasAnalysis>();
119 lv_ = &getAnalysis<LiveVariables>();
120 indexes_ = &getAnalysis<SlotIndexes>();
121 allocatableRegs_ = tri_->getAllocatableSet(fn);
125 numIntervals += getNumIntervals();
131 /// print - Implement the dump method.
132 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
133 OS << "********** INTERVALS **********\n";
134 for (const_iterator I = begin(), E = end(); I != E; ++I) {
135 I->second->print(OS, tri_);
142 void LiveIntervals::printInstrs(raw_ostream &OS) const {
143 OS << "********** MACHINEINSTRS **********\n";
145 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
146 mbbi != mbbe; ++mbbi) {
147 OS << "BB#" << mbbi->getNumber()
148 << ":\t\t# derived from " << mbbi->getName() << "\n";
149 for (MachineBasicBlock::iterator mii = mbbi->begin(),
150 mie = mbbi->end(); mii != mie; ++mii) {
151 if (mii->isDebugValue())
154 OS << getInstructionIndex(mii) << '\t' << *mii;
159 void LiveIntervals::dumpInstrs() const {
163 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
164 VirtRegMap &vrm, unsigned reg) {
165 // We don't handle fancy stuff crossing basic block boundaries
166 if (li.ranges.size() != 1)
168 const LiveRange &range = li.ranges.front();
169 SlotIndex idx = range.start.getBaseIndex();
170 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
172 // Skip deleted instructions
173 MachineInstr *firstMI = getInstructionFromIndex(idx);
174 while (!firstMI && idx != end) {
175 idx = idx.getNextIndex();
176 firstMI = getInstructionFromIndex(idx);
181 // Find last instruction in range
182 SlotIndex lastIdx = end.getPrevIndex();
183 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
184 while (!lastMI && lastIdx != idx) {
185 lastIdx = lastIdx.getPrevIndex();
186 lastMI = getInstructionFromIndex(lastIdx);
191 // Range cannot cross basic block boundaries or terminators
192 MachineBasicBlock *MBB = firstMI->getParent();
193 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
196 MachineBasicBlock::const_iterator E = lastMI;
198 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
199 const MachineInstr &MI = *I;
201 // Allow copies to and from li.reg
203 if (MI.getOperand(0).getReg() == li.reg ||
204 MI.getOperand(1).getReg() == li.reg)
207 // Check for operands using reg
208 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
209 const MachineOperand& mop = MI.getOperand(i);
212 unsigned PhysReg = mop.getReg();
213 if (PhysReg == 0 || PhysReg == li.reg)
215 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
216 if (!vrm.hasPhys(PhysReg))
218 PhysReg = vrm.getPhys(PhysReg);
220 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
225 // No conflicts found.
229 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
230 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
231 for (LiveInterval::Ranges::const_iterator
232 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
233 for (SlotIndex index = I->start.getBaseIndex(),
234 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
236 index = index.getNextIndex()) {
237 MachineInstr *MI = getInstructionFromIndex(index);
239 continue; // skip deleted instructions
241 if (JoinedCopies.count(MI))
243 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
244 MachineOperand& MO = MI->getOperand(i);
247 unsigned PhysReg = MO.getReg();
248 if (PhysReg == 0 || PhysReg == Reg ||
249 TargetRegisterInfo::isVirtualRegister(PhysReg))
251 if (tri_->regsOverlap(Reg, PhysReg))
261 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
262 if (TargetRegisterInfo::isPhysicalRegister(reg))
263 dbgs() << tri_->getName(reg);
265 dbgs() << "%reg" << reg;
270 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
271 unsigned Reg = MI.getOperand(MOIdx).getReg();
272 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
273 const MachineOperand &MO = MI.getOperand(i);
276 if (MO.getReg() == Reg && MO.isDef()) {
277 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
278 MI.getOperand(MOIdx).getSubReg() &&
279 (MO.getSubReg() || MO.isImplicit()));
286 /// isPartialRedef - Return true if the specified def at the specific index is
287 /// partially re-defining the specified live interval. A common case of this is
288 /// a definition of the sub-register.
289 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
290 LiveInterval &interval) {
291 if (!MO.getSubReg() || MO.isEarlyClobber())
294 SlotIndex RedefIndex = MIIdx.getDefIndex();
295 const LiveRange *OldLR =
296 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
297 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
299 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
304 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
305 MachineBasicBlock::iterator mi,
309 LiveInterval &interval) {
311 dbgs() << "\t\tregister: ";
312 printRegName(interval.reg, tri_);
315 // Virtual registers may be defined multiple times (due to phi
316 // elimination and 2-addr elimination). Much of what we do only has to be
317 // done once for the vreg. We use an empty interval to detect the first
318 // time we see a vreg.
319 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
320 if (interval.empty()) {
321 // Get the Idx of the defining instructions.
322 SlotIndex defIndex = MIIdx.getDefIndex();
323 // Earlyclobbers move back one, so that they overlap the live range
325 if (MO.isEarlyClobber())
326 defIndex = MIIdx.getUseIndex();
328 // Make sure the first definition is not a partial redefinition. Add an
329 // <imp-def> of the full register.
331 mi->addRegisterDefined(interval.reg);
333 MachineInstr *CopyMI = NULL;
334 if (mi->isCopyLike()) {
338 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
339 assert(ValNo->id == 0 && "First value in interval is not 0?");
341 // Loop over all of the blocks that the vreg is defined in. There are
342 // two cases we have to handle here. The most common case is a vreg
343 // whose lifetime is contained within a basic block. In this case there
344 // will be a single kill, in MBB, which comes after the definition.
345 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
346 // FIXME: what about dead vars?
348 if (vi.Kills[0] != mi)
349 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
351 killIdx = defIndex.getStoreIndex();
353 // If the kill happens after the definition, we have an intra-block
355 if (killIdx > defIndex) {
356 assert(vi.AliveBlocks.empty() &&
357 "Shouldn't be alive across any blocks!");
358 LiveRange LR(defIndex, killIdx, ValNo);
359 interval.addRange(LR);
360 DEBUG(dbgs() << " +" << LR << "\n");
365 // The other case we handle is when a virtual register lives to the end
366 // of the defining block, potentially live across some blocks, then is
367 // live into some number of blocks, but gets killed. Start by adding a
368 // range that goes from this definition to the end of the defining block.
369 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
370 DEBUG(dbgs() << " +" << NewLR);
371 interval.addRange(NewLR);
373 bool PHIJoin = lv_->isPHIJoin(interval.reg);
376 // A phi join register is killed at the end of the MBB and revived as a new
377 // valno in the killing blocks.
378 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
379 DEBUG(dbgs() << " phi-join");
380 ValNo->setHasPHIKill(true);
382 // Iterate over all of the blocks that the variable is completely
383 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
385 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
386 E = vi.AliveBlocks.end(); I != E; ++I) {
387 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
388 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
389 interval.addRange(LR);
390 DEBUG(dbgs() << " +" << LR);
394 // Finally, this virtual register is live from the start of any killing
395 // block to the 'use' slot of the killing instruction.
396 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
397 MachineInstr *Kill = vi.Kills[i];
398 SlotIndex Start = getMBBStartIdx(Kill->getParent());
399 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
401 // Create interval with one of a NEW value number. Note that this value
402 // number isn't actually defined by an instruction, weird huh? :)
404 assert(getInstructionFromIndex(Start) == 0 &&
405 "PHI def index points at actual instruction.");
406 ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
407 ValNo->setIsPHIDef(true);
409 LiveRange LR(Start, killIdx, ValNo);
410 interval.addRange(LR);
411 DEBUG(dbgs() << " +" << LR);
415 if (MultipleDefsBySameMI(*mi, MOIdx))
416 // Multiple defs of the same virtual register by the same instruction.
417 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
418 // This is likely due to elimination of REG_SEQUENCE instructions. Return
419 // here since there is nothing to do.
422 // If this is the second time we see a virtual register definition, it
423 // must be due to phi elimination or two addr elimination. If this is
424 // the result of two address elimination, then the vreg is one of the
425 // def-and-use register operand.
427 // It may also be partial redef like this:
428 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
429 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
430 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
431 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
432 // If this is a two-address definition, then we have already processed
433 // the live range. The only problem is that we didn't realize there
434 // are actually two values in the live interval. Because of this we
435 // need to take the LiveRegion that defines this register and split it
437 SlotIndex RedefIndex = MIIdx.getDefIndex();
438 if (MO.isEarlyClobber())
439 RedefIndex = MIIdx.getUseIndex();
441 const LiveRange *OldLR =
442 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
443 VNInfo *OldValNo = OldLR->valno;
444 SlotIndex DefIndex = OldValNo->def.getDefIndex();
446 // Delete the previous value, which should be short and continuous,
447 // because the 2-addr copy must be in the same MBB as the redef.
448 interval.removeRange(DefIndex, RedefIndex);
450 // The new value number (#1) is defined by the instruction we claimed
452 VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
454 // Value#0 is now defined by the 2-addr instruction.
455 OldValNo->def = RedefIndex;
456 OldValNo->setCopy(0);
458 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
459 if (PartReDef && mi->isCopyLike())
460 OldValNo->setCopy(&*mi);
462 // Add the new live interval which replaces the range for the input copy.
463 LiveRange LR(DefIndex, RedefIndex, ValNo);
464 DEBUG(dbgs() << " replace range with " << LR);
465 interval.addRange(LR);
467 // If this redefinition is dead, we need to add a dummy unit live
468 // range covering the def slot.
470 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
474 dbgs() << " RESULT: ";
475 interval.print(dbgs(), tri_);
477 } else if (lv_->isPHIJoin(interval.reg)) {
478 // In the case of PHI elimination, each variable definition is only
479 // live until the end of the block. We've already taken care of the
480 // rest of the live range.
482 SlotIndex defIndex = MIIdx.getDefIndex();
483 if (MO.isEarlyClobber())
484 defIndex = MIIdx.getUseIndex();
487 MachineInstr *CopyMI = NULL;
488 if (mi->isCopyLike())
490 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
492 SlotIndex killIndex = getMBBEndIdx(mbb);
493 LiveRange LR(defIndex, killIndex, ValNo);
494 interval.addRange(LR);
495 ValNo->setHasPHIKill(true);
496 DEBUG(dbgs() << " phi-join +" << LR);
498 llvm_unreachable("Multiply defined register");
502 DEBUG(dbgs() << '\n');
505 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
506 MachineBasicBlock::iterator mi,
509 LiveInterval &interval,
510 MachineInstr *CopyMI) {
511 // A physical register cannot be live across basic block, so its
512 // lifetime must end somewhere in its defining basic block.
514 dbgs() << "\t\tregister: ";
515 printRegName(interval.reg, tri_);
518 SlotIndex baseIndex = MIIdx;
519 SlotIndex start = baseIndex.getDefIndex();
520 // Earlyclobbers move back one.
521 if (MO.isEarlyClobber())
522 start = MIIdx.getUseIndex();
523 SlotIndex end = start;
525 // If it is not used after definition, it is considered dead at
526 // the instruction defining it. Hence its interval is:
527 // [defSlot(def), defSlot(def)+1)
528 // For earlyclobbers, the defSlot was pushed back one; the extra
529 // advance below compensates.
531 DEBUG(dbgs() << " dead");
532 end = start.getStoreIndex();
536 // If it is not dead on definition, it must be killed by a
537 // subsequent instruction. Hence its interval is:
538 // [defSlot(def), useSlot(kill)+1)
539 baseIndex = baseIndex.getNextIndex();
540 while (++mi != MBB->end()) {
542 if (mi->isDebugValue())
544 if (getInstructionFromIndex(baseIndex) == 0)
545 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
547 if (mi->killsRegister(interval.reg, tri_)) {
548 DEBUG(dbgs() << " killed");
549 end = baseIndex.getDefIndex();
552 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
554 if (mi->isRegTiedToUseOperand(DefIdx)) {
555 // Two-address instruction.
556 end = baseIndex.getDefIndex();
558 // Another instruction redefines the register before it is ever read.
559 // Then the register is essentially dead at the instruction that
560 // defines it. Hence its interval is:
561 // [defSlot(def), defSlot(def)+1)
562 DEBUG(dbgs() << " dead");
563 end = start.getStoreIndex();
569 baseIndex = baseIndex.getNextIndex();
572 // The only case we should have a dead physreg here without a killing or
573 // instruction where we know it's dead is if it is live-in to the function
574 // and never used. Another possible case is the implicit use of the
575 // physical register has been deleted by two-address pass.
576 end = start.getStoreIndex();
579 assert(start < end && "did not find end of interval?");
581 // Already exists? Extend old live interval.
582 VNInfo *ValNo = interval.getVNInfoAt(start);
583 bool Extend = ValNo != 0;
585 ValNo = interval.getNextValue(start, CopyMI, VNInfoAllocator);
586 if (Extend && MO.isEarlyClobber())
587 ValNo->setHasRedefByEC(true);
588 LiveRange LR(start, end, ValNo);
589 interval.addRange(LR);
590 DEBUG(dbgs() << " +" << LR << '\n');
593 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
594 MachineBasicBlock::iterator MI,
598 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
599 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
600 getOrCreateInterval(MO.getReg()));
601 else if (allocatableRegs_[MO.getReg()]) {
602 MachineInstr *CopyMI = NULL;
603 if (MI->isCopyLike())
605 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
606 getOrCreateInterval(MO.getReg()), CopyMI);
607 // Def of a register also defines its sub-registers.
608 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
609 // If MI also modifies the sub-register explicitly, avoid processing it
610 // more than once. Do not pass in TRI here so it checks for exact match.
611 if (!MI->definesRegister(*AS))
612 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
613 getOrCreateInterval(*AS), 0);
617 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
619 LiveInterval &interval, bool isAlias) {
621 dbgs() << "\t\tlivein register: ";
622 printRegName(interval.reg, tri_);
625 // Look for kills, if it reaches a def before it's killed, then it shouldn't
626 // be considered a livein.
627 MachineBasicBlock::iterator mi = MBB->begin();
628 MachineBasicBlock::iterator E = MBB->end();
629 // Skip over DBG_VALUE at the start of the MBB.
630 if (mi != E && mi->isDebugValue()) {
631 while (++mi != E && mi->isDebugValue())
634 // MBB is empty except for DBG_VALUE's.
638 SlotIndex baseIndex = MIIdx;
639 SlotIndex start = baseIndex;
640 if (getInstructionFromIndex(baseIndex) == 0)
641 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
643 SlotIndex end = baseIndex;
644 bool SeenDefUse = false;
647 if (mi->killsRegister(interval.reg, tri_)) {
648 DEBUG(dbgs() << " killed");
649 end = baseIndex.getDefIndex();
652 } else if (mi->definesRegister(interval.reg, tri_)) {
653 // Another instruction redefines the register before it is ever read.
654 // Then the register is essentially dead at the instruction that defines
655 // it. Hence its interval is:
656 // [defSlot(def), defSlot(def)+1)
657 DEBUG(dbgs() << " dead");
658 end = start.getStoreIndex();
663 while (++mi != E && mi->isDebugValue())
664 // Skip over DBG_VALUE.
667 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
670 // Live-in register might not be used at all.
673 DEBUG(dbgs() << " dead");
674 end = MIIdx.getStoreIndex();
676 DEBUG(dbgs() << " live through");
681 SlotIndex defIdx = getMBBStartIdx(MBB);
682 assert(getInstructionFromIndex(defIdx) == 0 &&
683 "PHI def index points at actual instruction.");
685 interval.getNextValue(defIdx, 0, VNInfoAllocator);
686 vni->setIsPHIDef(true);
687 LiveRange LR(start, end, vni);
689 interval.addRange(LR);
690 DEBUG(dbgs() << " +" << LR << '\n');
693 /// computeIntervals - computes the live intervals for virtual
694 /// registers. for some ordering of the machine instructions [1,N] a
695 /// live interval is an interval [i, j) where 1 <= i <= j < N for
696 /// which a variable is live
697 void LiveIntervals::computeIntervals() {
698 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
699 << "********** Function: "
700 << ((Value*)mf_->getFunction())->getName() << '\n');
702 SmallVector<unsigned, 8> UndefUses;
703 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
705 MachineBasicBlock *MBB = MBBI;
709 // Track the index of the current machine instr.
710 SlotIndex MIIndex = getMBBStartIdx(MBB);
711 DEBUG(dbgs() << "BB#" << MBB->getNumber()
712 << ":\t\t# derived from " << MBB->getName() << "\n");
714 // Create intervals for live-ins to this BB first.
715 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
716 LE = MBB->livein_end(); LI != LE; ++LI) {
717 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
718 // Multiple live-ins can alias the same register.
719 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
720 if (!hasInterval(*AS))
721 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
725 // Skip over empty initial indices.
726 if (getInstructionFromIndex(MIIndex) == 0)
727 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
729 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
731 DEBUG(dbgs() << MIIndex << "\t" << *MI);
732 if (MI->isDebugValue())
736 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
737 MachineOperand &MO = MI->getOperand(i);
738 if (!MO.isReg() || !MO.getReg())
741 // handle register defs - build intervals
743 handleRegisterDef(MBB, MI, MIIndex, MO, i);
744 else if (MO.isUndef())
745 UndefUses.push_back(MO.getReg());
748 // Move to the next instr slot.
749 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
753 // Create empty intervals for registers defined by implicit_def's (except
754 // for those implicit_def that define values which are liveout of their
756 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
757 unsigned UndefReg = UndefUses[i];
758 (void)getOrCreateInterval(UndefReg);
762 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
763 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
764 return new LiveInterval(reg, Weight);
767 /// dupInterval - Duplicate a live interval. The caller is responsible for
768 /// managing the allocated memory.
769 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
770 LiveInterval *NewLI = createInterval(li->reg);
771 NewLI->Copy(*li, mri_, getVNInfoAllocator());
775 //===----------------------------------------------------------------------===//
776 // Register allocator hooks.
779 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
780 /// allow one) virtual register operand, then its uses are implicitly using
781 /// the register. Returns the virtual register.
782 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
783 MachineInstr *MI) const {
785 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
786 MachineOperand &MO = MI->getOperand(i);
787 if (!MO.isReg() || !MO.isUse())
789 unsigned Reg = MO.getReg();
790 if (Reg == 0 || Reg == li.reg)
793 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
794 !allocatableRegs_[Reg])
796 // FIXME: For now, only remat MI with at most one register operand.
798 "Can't rematerialize instruction with multiple register operand!");
807 /// isValNoAvailableAt - Return true if the val# of the specified interval
808 /// which reaches the given instruction also reaches the specified use index.
809 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
810 SlotIndex UseIdx) const {
811 VNInfo *UValNo = li.getVNInfoAt(UseIdx);
812 return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI));
815 /// isReMaterializable - Returns true if the definition MI of the specified
816 /// val# of the specified interval is re-materializable.
817 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
818 const VNInfo *ValNo, MachineInstr *MI,
819 SmallVectorImpl<LiveInterval*> &SpillIs,
824 if (!tii_->isTriviallyReMaterializable(MI, aa_))
827 // Target-specific code can mark an instruction as being rematerializable
828 // if it has one virtual reg use, though it had better be something like
829 // a PIC base register which is likely to be live everywhere.
830 unsigned ImpUse = getReMatImplicitUse(li, MI);
832 const LiveInterval &ImpLi = getInterval(ImpUse);
833 for (MachineRegisterInfo::use_nodbg_iterator
834 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
836 MachineInstr *UseMI = &*ri;
837 SlotIndex UseIdx = getInstructionIndex(UseMI);
838 if (li.getVNInfoAt(UseIdx) != ValNo)
840 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
844 // If a register operand of the re-materialized instruction is going to
845 // be spilled next, then it's not legal to re-materialize this instruction.
846 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
847 if (ImpUse == SpillIs[i]->reg)
853 /// isReMaterializable - Returns true if the definition MI of the specified
854 /// val# of the specified interval is re-materializable.
855 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
856 const VNInfo *ValNo, MachineInstr *MI) {
857 SmallVector<LiveInterval*, 4> Dummy1;
859 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
862 /// isReMaterializable - Returns true if every definition of MI of every
863 /// val# of the specified interval is re-materializable.
864 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
865 SmallVectorImpl<LiveInterval*> &SpillIs,
868 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
870 const VNInfo *VNI = *i;
872 continue; // Dead val#.
873 // Is the def for the val# rematerializable?
874 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
877 bool DefIsLoad = false;
879 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
886 /// FilterFoldedOps - Filter out two-address use operands. Return
887 /// true if it finds any issue with the operands that ought to prevent
889 static bool FilterFoldedOps(MachineInstr *MI,
890 SmallVector<unsigned, 2> &Ops,
892 SmallVector<unsigned, 2> &FoldOps) {
894 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
895 unsigned OpIdx = Ops[i];
896 MachineOperand &MO = MI->getOperand(OpIdx);
897 // FIXME: fold subreg use.
901 MRInfo |= (unsigned)VirtRegMap::isMod;
903 // Filter out two-address use operand(s).
904 if (MI->isRegTiedToDefOperand(OpIdx)) {
905 MRInfo = VirtRegMap::isModRef;
908 MRInfo |= (unsigned)VirtRegMap::isRef;
910 FoldOps.push_back(OpIdx);
916 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
917 /// slot / to reg or any rematerialized load into ith operand of specified
918 /// MI. If it is successul, MI is updated with the newly created MI and
920 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
921 VirtRegMap &vrm, MachineInstr *DefMI,
923 SmallVector<unsigned, 2> &Ops,
924 bool isSS, int Slot, unsigned Reg) {
925 // If it is an implicit def instruction, just delete it.
926 if (MI->isImplicitDef()) {
927 RemoveMachineInstrFromMaps(MI);
928 vrm.RemoveMachineInstrFromMaps(MI);
929 MI->eraseFromParent();
934 // Filter the list of operand indexes that are to be folded. Abort if
935 // any operand will prevent folding.
937 SmallVector<unsigned, 2> FoldOps;
938 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
941 // The only time it's safe to fold into a two address instruction is when
942 // it's folding reload and spill from / into a spill stack slot.
943 if (DefMI && (MRInfo & VirtRegMap::isMod))
946 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
947 : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
949 // Remember this instruction uses the spill slot.
950 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
952 // Attempt to fold the memory reference into the instruction. If
953 // we can do this, we don't need to insert spill code.
954 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
955 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
956 vrm.transferSpillPts(MI, fmi);
957 vrm.transferRestorePts(MI, fmi);
958 vrm.transferEmergencySpills(MI, fmi);
959 ReplaceMachineInstrInMaps(MI, fmi);
960 MI->eraseFromParent();
968 /// canFoldMemoryOperand - Returns true if the specified load / store
969 /// folding is possible.
970 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
971 SmallVector<unsigned, 2> &Ops,
973 // Filter the list of operand indexes that are to be folded. Abort if
974 // any operand will prevent folding.
976 SmallVector<unsigned, 2> FoldOps;
977 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
980 // It's only legal to remat for a use, not a def.
981 if (ReMat && (MRInfo & VirtRegMap::isMod))
984 return tii_->canFoldMemoryOperand(MI, FoldOps);
987 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
988 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
990 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
995 for (++itr; itr != li.ranges.end(); ++itr) {
996 MachineBasicBlock *mbb2 =
997 indexes_->getMBBCoveringRange(itr->start, itr->end);
1006 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1007 /// interval on to-be re-materialized operands of MI) with new register.
1008 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1009 MachineInstr *MI, unsigned NewVReg,
1011 // There is an implicit use. That means one of the other operand is
1012 // being remat'ed and the remat'ed instruction has li.reg as an
1013 // use operand. Make sure we rewrite that as well.
1014 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1015 MachineOperand &MO = MI->getOperand(i);
1018 unsigned Reg = MO.getReg();
1019 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1021 if (!vrm.isReMaterialized(Reg))
1023 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1024 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1026 UseMO->setReg(NewVReg);
1030 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1031 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1032 bool LiveIntervals::
1033 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1034 bool TrySplit, SlotIndex index, SlotIndex end,
1036 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1037 unsigned Slot, int LdSlot,
1038 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1040 const TargetRegisterClass* rc,
1041 SmallVector<int, 4> &ReMatIds,
1042 const MachineLoopInfo *loopInfo,
1043 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1044 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1045 std::vector<LiveInterval*> &NewLIs) {
1046 bool CanFold = false;
1048 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1049 MachineOperand& mop = MI->getOperand(i);
1052 unsigned Reg = mop.getReg();
1053 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1058 bool TryFold = !DefIsReMat;
1059 bool FoldSS = true; // Default behavior unless it's a remat.
1060 int FoldSlot = Slot;
1062 // If this is the rematerializable definition MI itself and
1063 // all of its uses are rematerialized, simply delete it.
1064 if (MI == ReMatOrigDefMI && CanDelete) {
1065 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1067 RemoveMachineInstrFromMaps(MI);
1068 vrm.RemoveMachineInstrFromMaps(MI);
1069 MI->eraseFromParent();
1073 // If def for this use can't be rematerialized, then try folding.
1074 // If def is rematerializable and it's a load, also try folding.
1075 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1077 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1083 // Scan all of the operands of this instruction rewriting operands
1084 // to use NewVReg instead of li.reg as appropriate. We do this for
1087 // 1. If the instr reads the same spilled vreg multiple times, we
1088 // want to reuse the NewVReg.
1089 // 2. If the instr is a two-addr instruction, we are required to
1090 // keep the src/dst regs pinned.
1092 // Keep track of whether we replace a use and/or def so that we can
1093 // create the spill interval with the appropriate range.
1094 SmallVector<unsigned, 2> Ops;
1095 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1097 // Create a new virtual register for the spill interval.
1098 // Create the new register now so we can map the fold instruction
1099 // to the new register so when it is unfolded we get the correct
1101 bool CreatedNewVReg = false;
1103 NewVReg = mri_->createVirtualRegister(rc);
1105 CreatedNewVReg = true;
1107 // The new virtual register should get the same allocation hints as the
1109 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1110 if (Hint.first || Hint.second)
1111 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1117 // Do not fold load / store here if we are splitting. We'll find an
1118 // optimal point to insert a load / store later.
1120 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1121 Ops, FoldSS, FoldSlot, NewVReg)) {
1122 // Folding the load/store can completely change the instruction in
1123 // unpredictable ways, rescan it from the beginning.
1126 // We need to give the new vreg the same stack slot as the
1127 // spilled interval.
1128 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1134 if (isNotInMIMap(MI))
1136 goto RestartInstruction;
1139 // We'll try to fold it later if it's profitable.
1140 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1144 mop.setReg(NewVReg);
1145 if (mop.isImplicit())
1146 rewriteImplicitOps(li, MI, NewVReg, vrm);
1148 // Reuse NewVReg for other reads.
1149 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1150 MachineOperand &mopj = MI->getOperand(Ops[j]);
1151 mopj.setReg(NewVReg);
1152 if (mopj.isImplicit())
1153 rewriteImplicitOps(li, MI, NewVReg, vrm);
1156 if (CreatedNewVReg) {
1158 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1159 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1160 // Each valnum may have its own remat id.
1161 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1163 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1165 if (!CanDelete || (HasUse && HasDef)) {
1166 // If this is a two-addr instruction then its use operands are
1167 // rematerializable but its def is not. It should be assigned a
1169 vrm.assignVirt2StackSlot(NewVReg, Slot);
1172 vrm.assignVirt2StackSlot(NewVReg, Slot);
1174 } else if (HasUse && HasDef &&
1175 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1176 // If this interval hasn't been assigned a stack slot (because earlier
1177 // def is a deleted remat def), do it now.
1178 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1179 vrm.assignVirt2StackSlot(NewVReg, Slot);
1182 // Re-matting an instruction with virtual register use. Add the
1183 // register as an implicit use on the use MI.
1184 if (DefIsReMat && ImpUse)
1185 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1187 // Create a new register interval for this spill / remat.
1188 LiveInterval &nI = getOrCreateInterval(NewVReg);
1189 if (CreatedNewVReg) {
1190 NewLIs.push_back(&nI);
1191 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1193 vrm.setIsSplitFromReg(NewVReg, li.reg);
1197 if (CreatedNewVReg) {
1198 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1199 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1200 DEBUG(dbgs() << " +" << LR);
1203 // Extend the split live interval to this def / use.
1204 SlotIndex End = index.getDefIndex();
1205 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1206 nI.getValNumInfo(nI.getNumValNums()-1));
1207 DEBUG(dbgs() << " +" << LR);
1212 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1213 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1214 DEBUG(dbgs() << " +" << LR);
1219 dbgs() << "\t\t\t\tAdded new interval: ";
1220 nI.print(dbgs(), tri_);
1226 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1228 MachineBasicBlock *MBB,
1229 SlotIndex Idx) const {
1230 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1233 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1234 /// during spilling.
1236 struct RewriteInfo {
1239 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1242 struct RewriteInfoCompare {
1243 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1244 return LHS.Index < RHS.Index;
1249 void LiveIntervals::
1250 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1251 LiveInterval::Ranges::const_iterator &I,
1252 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1253 unsigned Slot, int LdSlot,
1254 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1256 const TargetRegisterClass* rc,
1257 SmallVector<int, 4> &ReMatIds,
1258 const MachineLoopInfo *loopInfo,
1259 BitVector &SpillMBBs,
1260 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1261 BitVector &RestoreMBBs,
1262 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1263 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1264 std::vector<LiveInterval*> &NewLIs) {
1265 bool AllCanFold = true;
1266 unsigned NewVReg = 0;
1267 SlotIndex start = I->start.getBaseIndex();
1268 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1270 // First collect all the def / use in this live range that will be rewritten.
1271 // Make sure they are sorted according to instruction index.
1272 std::vector<RewriteInfo> RewriteMIs;
1273 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1274 re = mri_->reg_end(); ri != re; ) {
1275 MachineInstr *MI = &*ri;
1276 MachineOperand &O = ri.getOperand();
1278 if (MI->isDebugValue()) {
1279 // Modify DBG_VALUE now that the value is in a spill slot.
1280 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1281 uint64_t Offset = MI->getOperand(1).getImm();
1282 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1283 DebugLoc DL = MI->getDebugLoc();
1284 int FI = isLoadSS ? LdSlot : (int)Slot;
1285 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1286 Offset, MDPtr, DL)) {
1287 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1288 ReplaceMachineInstrInMaps(MI, NewDV);
1289 MachineBasicBlock *MBB = MI->getParent();
1290 MBB->insert(MBB->erase(MI), NewDV);
1295 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1296 RemoveMachineInstrFromMaps(MI);
1297 vrm.RemoveMachineInstrFromMaps(MI);
1298 MI->eraseFromParent();
1301 assert(!(O.isImplicit() && O.isUse()) &&
1302 "Spilling register that's used as implicit use?");
1303 SlotIndex index = getInstructionIndex(MI);
1304 if (index < start || index >= end)
1308 // Must be defined by an implicit def. It should not be spilled. Note,
1309 // this is for correctness reason. e.g.
1310 // 8 %reg1024<def> = IMPLICIT_DEF
1311 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1312 // The live range [12, 14) are not part of the r1024 live interval since
1313 // it's defined by an implicit def. It will not conflicts with live
1314 // interval of r1025. Now suppose both registers are spilled, you can
1315 // easily see a situation where both registers are reloaded before
1316 // the INSERT_SUBREG and both target registers that would overlap.
1318 RewriteMIs.push_back(RewriteInfo(index, MI));
1320 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1322 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1323 // Now rewrite the defs and uses.
1324 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1325 RewriteInfo &rwi = RewriteMIs[i];
1327 SlotIndex index = rwi.Index;
1328 MachineInstr *MI = rwi.MI;
1329 // If MI def and/or use the same register multiple times, then there
1330 // are multiple entries.
1331 while (i != e && RewriteMIs[i].MI == MI) {
1332 assert(RewriteMIs[i].Index == index);
1335 MachineBasicBlock *MBB = MI->getParent();
1337 if (ImpUse && MI != ReMatDefMI) {
1338 // Re-matting an instruction with virtual register use. Prevent interval
1339 // from being spilled.
1340 getInterval(ImpUse).markNotSpillable();
1343 unsigned MBBId = MBB->getNumber();
1344 unsigned ThisVReg = 0;
1346 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1347 if (NVI != MBBVRegsMap.end()) {
1348 ThisVReg = NVI->second;
1355 // It's better to start a new interval to avoid artifically
1356 // extend the new interval.
1357 if (MI->readsWritesVirtualRegister(li.reg) ==
1358 std::make_pair(false,true)) {
1359 MBBVRegsMap.erase(MBB->getNumber());
1365 bool IsNew = ThisVReg == 0;
1367 // This ends the previous live interval. If all of its def / use
1368 // can be folded, give it a low spill weight.
1369 if (NewVReg && TrySplit && AllCanFold) {
1370 LiveInterval &nI = getOrCreateInterval(NewVReg);
1377 bool HasDef = false;
1378 bool HasUse = false;
1379 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1380 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1381 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1382 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1383 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1384 if (!HasDef && !HasUse)
1387 AllCanFold &= CanFold;
1389 // Update weight of spill interval.
1390 LiveInterval &nI = getOrCreateInterval(NewVReg);
1392 // The spill weight is now infinity as it cannot be spilled again.
1393 nI.markNotSpillable();
1397 // Keep track of the last def and first use in each MBB.
1399 if (MI != ReMatOrigDefMI || !CanDelete) {
1400 bool HasKill = false;
1402 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1404 // If this is a two-address code, then this index starts a new VNInfo.
1405 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1407 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1409 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1410 SpillIdxes.find(MBBId);
1412 if (SII == SpillIdxes.end()) {
1413 std::vector<SRInfo> S;
1414 S.push_back(SRInfo(index, NewVReg, true));
1415 SpillIdxes.insert(std::make_pair(MBBId, S));
1416 } else if (SII->second.back().vreg != NewVReg) {
1417 SII->second.push_back(SRInfo(index, NewVReg, true));
1418 } else if (index > SII->second.back().index) {
1419 // If there is an earlier def and this is a two-address
1420 // instruction, then it's not possible to fold the store (which
1421 // would also fold the load).
1422 SRInfo &Info = SII->second.back();
1424 Info.canFold = !HasUse;
1426 SpillMBBs.set(MBBId);
1427 } else if (SII != SpillIdxes.end() &&
1428 SII->second.back().vreg == NewVReg &&
1429 index > SII->second.back().index) {
1430 // There is an earlier def that's not killed (must be two-address).
1431 // The spill is no longer needed.
1432 SII->second.pop_back();
1433 if (SII->second.empty()) {
1434 SpillIdxes.erase(MBBId);
1435 SpillMBBs.reset(MBBId);
1442 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1443 SpillIdxes.find(MBBId);
1444 if (SII != SpillIdxes.end() &&
1445 SII->second.back().vreg == NewVReg &&
1446 index > SII->second.back().index)
1447 // Use(s) following the last def, it's not safe to fold the spill.
1448 SII->second.back().canFold = false;
1449 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1450 RestoreIdxes.find(MBBId);
1451 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1452 // If we are splitting live intervals, only fold if it's the first
1453 // use and there isn't another use later in the MBB.
1454 RII->second.back().canFold = false;
1456 // Only need a reload if there isn't an earlier def / use.
1457 if (RII == RestoreIdxes.end()) {
1458 std::vector<SRInfo> Infos;
1459 Infos.push_back(SRInfo(index, NewVReg, true));
1460 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1462 RII->second.push_back(SRInfo(index, NewVReg, true));
1464 RestoreMBBs.set(MBBId);
1468 // Update spill weight.
1469 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1470 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1473 if (NewVReg && TrySplit && AllCanFold) {
1474 // If all of its def / use can be folded, give it a low spill weight.
1475 LiveInterval &nI = getOrCreateInterval(NewVReg);
1480 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1481 unsigned vr, BitVector &RestoreMBBs,
1482 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1483 if (!RestoreMBBs[Id])
1485 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1486 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1487 if (Restores[i].index == index &&
1488 Restores[i].vreg == vr &&
1489 Restores[i].canFold)
1494 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1495 unsigned vr, BitVector &RestoreMBBs,
1496 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1497 if (!RestoreMBBs[Id])
1499 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1500 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1501 if (Restores[i].index == index && Restores[i].vreg)
1502 Restores[i].index = SlotIndex();
1505 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1506 /// spilled and create empty intervals for their uses.
1508 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1509 const TargetRegisterClass* rc,
1510 std::vector<LiveInterval*> &NewLIs) {
1511 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1512 re = mri_->reg_end(); ri != re; ) {
1513 MachineOperand &O = ri.getOperand();
1514 MachineInstr *MI = &*ri;
1516 if (MI->isDebugValue()) {
1517 // Remove debug info for now.
1519 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1523 assert(MI->isImplicitDef() &&
1524 "Register def was not rewritten?");
1525 RemoveMachineInstrFromMaps(MI);
1526 vrm.RemoveMachineInstrFromMaps(MI);
1527 MI->eraseFromParent();
1529 // This must be an use of an implicit_def so it's not part of the live
1530 // interval. Create a new empty live interval for it.
1531 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1532 unsigned NewVReg = mri_->createVirtualRegister(rc);
1534 vrm.setIsImplicitlyDefined(NewVReg);
1535 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1536 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1537 MachineOperand &MO = MI->getOperand(i);
1538 if (MO.isReg() && MO.getReg() == li.reg) {
1548 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1549 // Limit the loop depth ridiculousness.
1550 if (loopDepth > 200)
1553 // The loop depth is used to roughly estimate the number of times the
1554 // instruction is executed. Something like 10^d is simple, but will quickly
1555 // overflow a float. This expression behaves like 10^d for small d, but is
1556 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1557 // headroom before overflow.
1558 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1560 return (isDef + isUse) * lc;
1564 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1565 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1566 normalizeSpillWeight(*NewLIs[i]);
1569 std::vector<LiveInterval*> LiveIntervals::
1570 addIntervalsForSpills(const LiveInterval &li,
1571 SmallVectorImpl<LiveInterval*> &SpillIs,
1572 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1573 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1576 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1577 li.print(dbgs(), tri_);
1581 // Each bit specify whether a spill is required in the MBB.
1582 BitVector SpillMBBs(mf_->getNumBlockIDs());
1583 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1584 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1585 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1586 DenseMap<unsigned,unsigned> MBBVRegsMap;
1587 std::vector<LiveInterval*> NewLIs;
1588 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1590 unsigned NumValNums = li.getNumValNums();
1591 SmallVector<MachineInstr*, 4> ReMatDefs;
1592 ReMatDefs.resize(NumValNums, NULL);
1593 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1594 ReMatOrigDefs.resize(NumValNums, NULL);
1595 SmallVector<int, 4> ReMatIds;
1596 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1597 BitVector ReMatDelete(NumValNums);
1598 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1600 // Spilling a split live interval. It cannot be split any further. Also,
1601 // it's also guaranteed to be a single val# / range interval.
1602 if (vrm.getPreSplitReg(li.reg)) {
1603 vrm.setIsSplitFromReg(li.reg, 0);
1604 // Unset the split kill marker on the last use.
1605 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1606 if (KillIdx != SlotIndex()) {
1607 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1608 assert(KillMI && "Last use disappeared?");
1609 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1610 assert(KillOp != -1 && "Last use disappeared?");
1611 KillMI->getOperand(KillOp).setIsKill(false);
1613 vrm.removeKillPoint(li.reg);
1614 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1615 Slot = vrm.getStackSlot(li.reg);
1616 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1617 MachineInstr *ReMatDefMI = DefIsReMat ?
1618 vrm.getReMaterializedMI(li.reg) : NULL;
1620 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1621 bool isLoad = isLoadSS ||
1622 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1623 bool IsFirstRange = true;
1624 for (LiveInterval::Ranges::const_iterator
1625 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1626 // If this is a split live interval with multiple ranges, it means there
1627 // are two-address instructions that re-defined the value. Only the
1628 // first def can be rematerialized!
1630 // Note ReMatOrigDefMI has already been deleted.
1631 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1632 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1633 false, vrm, rc, ReMatIds, loopInfo,
1634 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1635 MBBVRegsMap, NewLIs);
1637 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1638 Slot, 0, false, false, false,
1639 false, vrm, rc, ReMatIds, loopInfo,
1640 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1641 MBBVRegsMap, NewLIs);
1643 IsFirstRange = false;
1646 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1647 normalizeSpillWeights(NewLIs);
1651 bool TrySplit = !intervalIsInOneMBB(li);
1654 bool NeedStackSlot = false;
1655 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1657 const VNInfo *VNI = *i;
1658 unsigned VN = VNI->id;
1659 if (VNI->isUnused())
1660 continue; // Dead val#.
1661 // Is the def for the val# rematerializable?
1662 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1664 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1665 // Remember how to remat the def of this val#.
1666 ReMatOrigDefs[VN] = ReMatDefMI;
1667 // Original def may be modified so we have to make a copy here.
1668 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1669 CloneMIs.push_back(Clone);
1670 ReMatDefs[VN] = Clone;
1672 bool CanDelete = true;
1673 if (VNI->hasPHIKill()) {
1674 // A kill is a phi node, not all of its uses can be rematerialized.
1675 // It must not be deleted.
1677 // Need a stack slot if there is any live range where uses cannot be
1679 NeedStackSlot = true;
1682 ReMatDelete.set(VN);
1684 // Need a stack slot if there is any live range where uses cannot be
1686 NeedStackSlot = true;
1690 // One stack slot per live interval.
1691 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1692 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1693 Slot = vrm.assignVirt2StackSlot(li.reg);
1695 // This case only occurs when the prealloc splitter has already assigned
1696 // a stack slot to this vreg.
1698 Slot = vrm.getStackSlot(li.reg);
1701 // Create new intervals and rewrite defs and uses.
1702 for (LiveInterval::Ranges::const_iterator
1703 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1704 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1705 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1706 bool DefIsReMat = ReMatDefMI != NULL;
1707 bool CanDelete = ReMatDelete[I->valno->id];
1709 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1710 bool isLoad = isLoadSS ||
1711 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1712 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1713 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1714 CanDelete, vrm, rc, ReMatIds, loopInfo,
1715 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1716 MBBVRegsMap, NewLIs);
1719 // Insert spills / restores if we are splitting.
1721 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1722 normalizeSpillWeights(NewLIs);
1726 SmallPtrSet<LiveInterval*, 4> AddedKill;
1727 SmallVector<unsigned, 2> Ops;
1728 if (NeedStackSlot) {
1729 int Id = SpillMBBs.find_first();
1731 std::vector<SRInfo> &spills = SpillIdxes[Id];
1732 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1733 SlotIndex index = spills[i].index;
1734 unsigned VReg = spills[i].vreg;
1735 LiveInterval &nI = getOrCreateInterval(VReg);
1736 bool isReMat = vrm.isReMaterialized(VReg);
1737 MachineInstr *MI = getInstructionFromIndex(index);
1738 bool CanFold = false;
1739 bool FoundUse = false;
1741 if (spills[i].canFold) {
1743 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1744 MachineOperand &MO = MI->getOperand(j);
1745 if (!MO.isReg() || MO.getReg() != VReg)
1752 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1753 RestoreMBBs, RestoreIdxes))) {
1754 // MI has two-address uses of the same register. If the use
1755 // isn't the first and only use in the BB, then we can't fold
1756 // it. FIXME: Move this to rewriteInstructionsForSpills.
1763 // Fold the store into the def if possible.
1764 bool Folded = false;
1765 if (CanFold && !Ops.empty()) {
1766 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1769 // Also folded uses, do not issue a load.
1770 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1771 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1773 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1777 // Otherwise tell the spiller to issue a spill.
1779 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1780 bool isKill = LR->end == index.getStoreIndex();
1781 if (!MI->registerDefIsDead(nI.reg))
1782 // No need to spill a dead def.
1783 vrm.addSpillPoint(VReg, isKill, MI);
1785 AddedKill.insert(&nI);
1788 Id = SpillMBBs.find_next(Id);
1792 int Id = RestoreMBBs.find_first();
1794 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1795 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1796 SlotIndex index = restores[i].index;
1797 if (index == SlotIndex())
1799 unsigned VReg = restores[i].vreg;
1800 LiveInterval &nI = getOrCreateInterval(VReg);
1801 bool isReMat = vrm.isReMaterialized(VReg);
1802 MachineInstr *MI = getInstructionFromIndex(index);
1803 bool CanFold = false;
1805 if (restores[i].canFold) {
1807 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1808 MachineOperand &MO = MI->getOperand(j);
1809 if (!MO.isReg() || MO.getReg() != VReg)
1813 // If this restore were to be folded, it would have been folded
1822 // Fold the load into the use if possible.
1823 bool Folded = false;
1824 if (CanFold && !Ops.empty()) {
1826 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1828 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1830 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1831 // If the rematerializable def is a load, also try to fold it.
1832 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1833 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1834 Ops, isLoadSS, LdSlot, VReg);
1836 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1838 // Re-matting an instruction with virtual register use. Add the
1839 // register as an implicit use on the use MI and mark the register
1840 // interval as unspillable.
1841 LiveInterval &ImpLi = getInterval(ImpUse);
1842 ImpLi.markNotSpillable();
1843 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1848 // If folding is not possible / failed, then tell the spiller to issue a
1849 // load / rematerialization for us.
1851 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1853 vrm.addRestorePoint(VReg, MI);
1855 Id = RestoreMBBs.find_next(Id);
1858 // Finalize intervals: add kills, finalize spill weights, and filter out
1860 std::vector<LiveInterval*> RetNewLIs;
1861 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1862 LiveInterval *LI = NewLIs[i];
1864 if (!AddedKill.count(LI)) {
1865 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1866 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1867 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1868 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1869 assert(UseIdx != -1);
1870 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1871 LastUse->getOperand(UseIdx).setIsKill();
1872 vrm.addKillPoint(LI->reg, LastUseIdx);
1875 RetNewLIs.push_back(LI);
1879 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1880 normalizeSpillWeights(RetNewLIs);
1884 /// hasAllocatableSuperReg - Return true if the specified physical register has
1885 /// any super register that's allocatable.
1886 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1887 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1888 if (allocatableRegs_[*AS] && hasInterval(*AS))
1893 /// getRepresentativeReg - Find the largest super register of the specified
1894 /// physical register.
1895 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1896 // Find the largest super-register that is allocatable.
1897 unsigned BestReg = Reg;
1898 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1899 unsigned SuperReg = *AS;
1900 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1908 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1909 /// specified interval that conflicts with the specified physical register.
1910 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1911 unsigned PhysReg) const {
1912 unsigned NumConflicts = 0;
1913 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1914 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1915 E = mri_->reg_end(); I != E; ++I) {
1916 MachineOperand &O = I.getOperand();
1917 MachineInstr *MI = O.getParent();
1918 if (MI->isDebugValue())
1920 SlotIndex Index = getInstructionIndex(MI);
1921 if (pli.liveAt(Index))
1924 return NumConflicts;
1927 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1928 /// around all defs and uses of the specified interval. Return true if it
1929 /// was able to cut its interval.
1930 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1931 unsigned PhysReg, VirtRegMap &vrm) {
1932 unsigned SpillReg = getRepresentativeReg(PhysReg);
1934 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1935 // If there are registers which alias PhysReg, but which are not a
1936 // sub-register of the chosen representative super register. Assert
1937 // since we can't handle it yet.
1938 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
1939 tri_->isSuperRegister(*AS, SpillReg));
1942 SmallVector<unsigned, 4> PRegs;
1943 if (hasInterval(SpillReg))
1944 PRegs.push_back(SpillReg);
1946 SmallSet<unsigned, 4> Added;
1947 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
1948 if (Added.insert(*AS) && hasInterval(*AS)) {
1949 PRegs.push_back(*AS);
1950 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
1955 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1956 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1957 E = mri_->reg_end(); I != E; ++I) {
1958 MachineOperand &O = I.getOperand();
1959 MachineInstr *MI = O.getParent();
1960 if (MI->isDebugValue() || SeenMIs.count(MI))
1963 SlotIndex Index = getInstructionIndex(MI);
1964 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
1965 unsigned PReg = PRegs[i];
1966 LiveInterval &pli = getInterval(PReg);
1967 if (!pli.liveAt(Index))
1969 vrm.addEmergencySpill(PReg, MI);
1970 SlotIndex StartIdx = Index.getLoadIndex();
1971 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
1972 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
1973 pli.removeRange(StartIdx, EndIdx);
1977 raw_string_ostream Msg(msg);
1978 Msg << "Ran out of registers during register allocation!";
1979 if (MI->isInlineAsm()) {
1980 Msg << "\nPlease check your inline asm statement for invalid "
1981 << "constraints:\n";
1982 MI->print(Msg, tm_);
1984 report_fatal_error(Msg.str());
1986 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
1987 if (!hasInterval(*AS))
1989 LiveInterval &spli = getInterval(*AS);
1990 if (spli.liveAt(Index))
1991 spli.removeRange(Index.getLoadIndex(),
1992 Index.getNextIndex().getBaseIndex());
1999 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2000 MachineInstr* startInst) {
2001 LiveInterval& Interval = getOrCreateInterval(reg);
2002 VNInfo* VN = Interval.getNextValue(
2003 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2004 startInst, getVNInfoAllocator());
2005 VN->setHasPHIKill(true);
2007 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2008 getMBBEndIdx(startInst->getParent()), VN);
2009 Interval.addRange(LR);