1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 STATISTIC(numIntervals , "Number of original intervals");
54 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
55 STATISTIC(numSplits , "Number of intervals split");
57 char LiveIntervals::ID = 0;
58 INITIALIZE_PASS(LiveIntervals, "liveintervals",
59 "Live Interval Analysis", false, false);
61 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
63 AU.addRequired<AliasAnalysis>();
64 AU.addPreserved<AliasAnalysis>();
65 AU.addRequired<LiveVariables>();
66 AU.addPreserved<LiveVariables>();
67 AU.addRequired<MachineLoopInfo>();
68 AU.addPreserved<MachineLoopInfo>();
69 AU.addPreservedID(MachineDominatorsID);
72 AU.addPreservedID(PHIEliminationID);
73 AU.addRequiredID(PHIEliminationID);
76 AU.addRequiredID(TwoAddressInstructionPassID);
77 AU.addPreserved<ProcessImplicitDefs>();
78 AU.addRequired<ProcessImplicitDefs>();
79 AU.addPreserved<SlotIndexes>();
80 AU.addRequiredTransitive<SlotIndexes>();
81 MachineFunctionPass::getAnalysisUsage(AU);
84 void LiveIntervals::releaseMemory() {
85 // Free the live intervals themselves.
86 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
87 E = r2iMap_.end(); I != E; ++I)
92 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
93 VNInfoAllocator.Reset();
94 while (!CloneMIs.empty()) {
95 MachineInstr *MI = CloneMIs.back();
97 mf_->DeleteMachineInstr(MI);
101 /// runOnMachineFunction - Register allocate the whole function
103 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
105 mri_ = &mf_->getRegInfo();
106 tm_ = &fn.getTarget();
107 tri_ = tm_->getRegisterInfo();
108 tii_ = tm_->getInstrInfo();
109 aa_ = &getAnalysis<AliasAnalysis>();
110 lv_ = &getAnalysis<LiveVariables>();
111 indexes_ = &getAnalysis<SlotIndexes>();
112 allocatableRegs_ = tri_->getAllocatableSet(fn);
116 numIntervals += getNumIntervals();
122 /// print - Implement the dump method.
123 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
124 OS << "********** INTERVALS **********\n";
125 for (const_iterator I = begin(), E = end(); I != E; ++I) {
126 I->second->print(OS, tri_);
133 void LiveIntervals::printInstrs(raw_ostream &OS) const {
134 OS << "********** MACHINEINSTRS **********\n";
136 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
137 mbbi != mbbe; ++mbbi) {
138 OS << "BB#" << mbbi->getNumber()
139 << ":\t\t# derived from " << mbbi->getName() << "\n";
140 for (MachineBasicBlock::iterator mii = mbbi->begin(),
141 mie = mbbi->end(); mii != mie; ++mii) {
142 if (mii->isDebugValue())
145 OS << getInstructionIndex(mii) << '\t' << *mii;
150 void LiveIntervals::dumpInstrs() const {
154 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
155 VirtRegMap &vrm, unsigned reg) {
156 // We don't handle fancy stuff crossing basic block boundaries
157 if (li.ranges.size() != 1)
159 const LiveRange &range = li.ranges.front();
160 SlotIndex idx = range.start.getBaseIndex();
161 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
163 // Skip deleted instructions
164 MachineInstr *firstMI = getInstructionFromIndex(idx);
165 while (!firstMI && idx != end) {
166 idx = idx.getNextIndex();
167 firstMI = getInstructionFromIndex(idx);
172 // Find last instruction in range
173 SlotIndex lastIdx = end.getPrevIndex();
174 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
175 while (!lastMI && lastIdx != idx) {
176 lastIdx = lastIdx.getPrevIndex();
177 lastMI = getInstructionFromIndex(lastIdx);
182 // Range cannot cross basic block boundaries or terminators
183 MachineBasicBlock *MBB = firstMI->getParent();
184 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
187 MachineBasicBlock::const_iterator E = lastMI;
189 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
190 const MachineInstr &MI = *I;
192 // Allow copies to and from li.reg
194 if (MI.getOperand(0).getReg() == li.reg ||
195 MI.getOperand(1).getReg() == li.reg)
198 // Check for operands using reg
199 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
200 const MachineOperand& mop = MI.getOperand(i);
203 unsigned PhysReg = mop.getReg();
204 if (PhysReg == 0 || PhysReg == li.reg)
206 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
207 if (!vrm.hasPhys(PhysReg))
209 PhysReg = vrm.getPhys(PhysReg);
211 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
216 // No conflicts found.
220 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
221 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
222 for (LiveInterval::Ranges::const_iterator
223 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
224 for (SlotIndex index = I->start.getBaseIndex(),
225 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
227 index = index.getNextIndex()) {
228 MachineInstr *MI = getInstructionFromIndex(index);
230 continue; // skip deleted instructions
232 if (JoinedCopies.count(MI))
234 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
235 MachineOperand& MO = MI->getOperand(i);
238 unsigned PhysReg = MO.getReg();
239 if (PhysReg == 0 || PhysReg == Reg ||
240 TargetRegisterInfo::isVirtualRegister(PhysReg))
242 if (tri_->regsOverlap(Reg, PhysReg))
252 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
253 if (TargetRegisterInfo::isPhysicalRegister(reg))
254 dbgs() << tri_->getName(reg);
256 dbgs() << "%reg" << reg;
261 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
262 unsigned Reg = MI.getOperand(MOIdx).getReg();
263 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
264 const MachineOperand &MO = MI.getOperand(i);
267 if (MO.getReg() == Reg && MO.isDef()) {
268 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
269 MI.getOperand(MOIdx).getSubReg() &&
270 (MO.getSubReg() || MO.isImplicit()));
277 /// isPartialRedef - Return true if the specified def at the specific index is
278 /// partially re-defining the specified live interval. A common case of this is
279 /// a definition of the sub-register.
280 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
281 LiveInterval &interval) {
282 if (!MO.getSubReg() || MO.isEarlyClobber())
285 SlotIndex RedefIndex = MIIdx.getDefIndex();
286 const LiveRange *OldLR =
287 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
288 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
290 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
295 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
296 MachineBasicBlock::iterator mi,
300 LiveInterval &interval) {
302 dbgs() << "\t\tregister: ";
303 printRegName(interval.reg, tri_);
306 // Virtual registers may be defined multiple times (due to phi
307 // elimination and 2-addr elimination). Much of what we do only has to be
308 // done once for the vreg. We use an empty interval to detect the first
309 // time we see a vreg.
310 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
311 if (interval.empty()) {
312 // Get the Idx of the defining instructions.
313 SlotIndex defIndex = MIIdx.getDefIndex();
314 // Earlyclobbers move back one, so that they overlap the live range
316 if (MO.isEarlyClobber())
317 defIndex = MIIdx.getUseIndex();
319 // Make sure the first definition is not a partial redefinition. Add an
320 // <imp-def> of the full register.
322 mi->addRegisterDefined(interval.reg);
324 MachineInstr *CopyMI = NULL;
325 if (mi->isCopyLike()) {
329 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
330 assert(ValNo->id == 0 && "First value in interval is not 0?");
332 // Loop over all of the blocks that the vreg is defined in. There are
333 // two cases we have to handle here. The most common case is a vreg
334 // whose lifetime is contained within a basic block. In this case there
335 // will be a single kill, in MBB, which comes after the definition.
336 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
337 // FIXME: what about dead vars?
339 if (vi.Kills[0] != mi)
340 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
342 killIdx = defIndex.getStoreIndex();
344 // If the kill happens after the definition, we have an intra-block
346 if (killIdx > defIndex) {
347 assert(vi.AliveBlocks.empty() &&
348 "Shouldn't be alive across any blocks!");
349 LiveRange LR(defIndex, killIdx, ValNo);
350 interval.addRange(LR);
351 DEBUG(dbgs() << " +" << LR << "\n");
356 // The other case we handle is when a virtual register lives to the end
357 // of the defining block, potentially live across some blocks, then is
358 // live into some number of blocks, but gets killed. Start by adding a
359 // range that goes from this definition to the end of the defining block.
360 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
361 DEBUG(dbgs() << " +" << NewLR);
362 interval.addRange(NewLR);
364 bool PHIJoin = lv_->isPHIJoin(interval.reg);
367 // A phi join register is killed at the end of the MBB and revived as a new
368 // valno in the killing blocks.
369 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
370 DEBUG(dbgs() << " phi-join");
371 ValNo->setHasPHIKill(true);
373 // Iterate over all of the blocks that the variable is completely
374 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
376 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
377 E = vi.AliveBlocks.end(); I != E; ++I) {
378 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
379 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
380 interval.addRange(LR);
381 DEBUG(dbgs() << " +" << LR);
385 // Finally, this virtual register is live from the start of any killing
386 // block to the 'use' slot of the killing instruction.
387 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
388 MachineInstr *Kill = vi.Kills[i];
389 SlotIndex Start = getMBBStartIdx(Kill->getParent());
390 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
392 // Create interval with one of a NEW value number. Note that this value
393 // number isn't actually defined by an instruction, weird huh? :)
395 assert(getInstructionFromIndex(Start) == 0 &&
396 "PHI def index points at actual instruction.");
397 ValNo = interval.getNextValue(Start, 0, VNInfoAllocator);
398 ValNo->setIsPHIDef(true);
400 LiveRange LR(Start, killIdx, ValNo);
401 interval.addRange(LR);
402 DEBUG(dbgs() << " +" << LR);
406 if (MultipleDefsBySameMI(*mi, MOIdx))
407 // Multiple defs of the same virtual register by the same instruction.
408 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
409 // This is likely due to elimination of REG_SEQUENCE instructions. Return
410 // here since there is nothing to do.
413 // If this is the second time we see a virtual register definition, it
414 // must be due to phi elimination or two addr elimination. If this is
415 // the result of two address elimination, then the vreg is one of the
416 // def-and-use register operand.
418 // It may also be partial redef like this:
419 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
420 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
421 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
422 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
423 // If this is a two-address definition, then we have already processed
424 // the live range. The only problem is that we didn't realize there
425 // are actually two values in the live interval. Because of this we
426 // need to take the LiveRegion that defines this register and split it
428 SlotIndex RedefIndex = MIIdx.getDefIndex();
429 if (MO.isEarlyClobber())
430 RedefIndex = MIIdx.getUseIndex();
432 const LiveRange *OldLR =
433 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
434 VNInfo *OldValNo = OldLR->valno;
435 SlotIndex DefIndex = OldValNo->def.getDefIndex();
437 // Delete the previous value, which should be short and continuous,
438 // because the 2-addr copy must be in the same MBB as the redef.
439 interval.removeRange(DefIndex, RedefIndex);
441 // The new value number (#1) is defined by the instruction we claimed
443 VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator);
445 // Value#0 is now defined by the 2-addr instruction.
446 OldValNo->def = RedefIndex;
447 OldValNo->setCopy(0);
449 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
450 if (PartReDef && mi->isCopyLike())
451 OldValNo->setCopy(&*mi);
453 // Add the new live interval which replaces the range for the input copy.
454 LiveRange LR(DefIndex, RedefIndex, ValNo);
455 DEBUG(dbgs() << " replace range with " << LR);
456 interval.addRange(LR);
458 // If this redefinition is dead, we need to add a dummy unit live
459 // range covering the def slot.
461 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
465 dbgs() << " RESULT: ";
466 interval.print(dbgs(), tri_);
468 } else if (lv_->isPHIJoin(interval.reg)) {
469 // In the case of PHI elimination, each variable definition is only
470 // live until the end of the block. We've already taken care of the
471 // rest of the live range.
473 SlotIndex defIndex = MIIdx.getDefIndex();
474 if (MO.isEarlyClobber())
475 defIndex = MIIdx.getUseIndex();
478 MachineInstr *CopyMI = NULL;
479 if (mi->isCopyLike())
481 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
483 SlotIndex killIndex = getMBBEndIdx(mbb);
484 LiveRange LR(defIndex, killIndex, ValNo);
485 interval.addRange(LR);
486 ValNo->setHasPHIKill(true);
487 DEBUG(dbgs() << " phi-join +" << LR);
489 llvm_unreachable("Multiply defined register");
493 DEBUG(dbgs() << '\n');
496 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
497 MachineBasicBlock::iterator mi,
500 LiveInterval &interval,
501 MachineInstr *CopyMI) {
502 // A physical register cannot be live across basic block, so its
503 // lifetime must end somewhere in its defining basic block.
505 dbgs() << "\t\tregister: ";
506 printRegName(interval.reg, tri_);
509 SlotIndex baseIndex = MIIdx;
510 SlotIndex start = baseIndex.getDefIndex();
511 // Earlyclobbers move back one.
512 if (MO.isEarlyClobber())
513 start = MIIdx.getUseIndex();
514 SlotIndex end = start;
516 // If it is not used after definition, it is considered dead at
517 // the instruction defining it. Hence its interval is:
518 // [defSlot(def), defSlot(def)+1)
519 // For earlyclobbers, the defSlot was pushed back one; the extra
520 // advance below compensates.
522 DEBUG(dbgs() << " dead");
523 end = start.getStoreIndex();
527 // If it is not dead on definition, it must be killed by a
528 // subsequent instruction. Hence its interval is:
529 // [defSlot(def), useSlot(kill)+1)
530 baseIndex = baseIndex.getNextIndex();
531 while (++mi != MBB->end()) {
533 if (mi->isDebugValue())
535 if (getInstructionFromIndex(baseIndex) == 0)
536 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
538 if (mi->killsRegister(interval.reg, tri_)) {
539 DEBUG(dbgs() << " killed");
540 end = baseIndex.getDefIndex();
543 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
545 if (mi->isRegTiedToUseOperand(DefIdx)) {
546 // Two-address instruction.
547 end = baseIndex.getDefIndex();
549 // Another instruction redefines the register before it is ever read.
550 // Then the register is essentially dead at the instruction that
551 // defines it. Hence its interval is:
552 // [defSlot(def), defSlot(def)+1)
553 DEBUG(dbgs() << " dead");
554 end = start.getStoreIndex();
560 baseIndex = baseIndex.getNextIndex();
563 // The only case we should have a dead physreg here without a killing or
564 // instruction where we know it's dead is if it is live-in to the function
565 // and never used. Another possible case is the implicit use of the
566 // physical register has been deleted by two-address pass.
567 end = start.getStoreIndex();
570 assert(start < end && "did not find end of interval?");
572 // Already exists? Extend old live interval.
573 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
574 bool Extend = OldLR != interval.end();
575 VNInfo *ValNo = Extend
576 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
577 if (MO.isEarlyClobber() && Extend)
578 ValNo->setHasRedefByEC(true);
579 LiveRange LR(start, end, ValNo);
580 interval.addRange(LR);
581 DEBUG(dbgs() << " +" << LR << '\n');
584 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
585 MachineBasicBlock::iterator MI,
589 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
590 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
591 getOrCreateInterval(MO.getReg()));
592 else if (allocatableRegs_[MO.getReg()]) {
593 MachineInstr *CopyMI = NULL;
594 if (MI->isCopyLike())
596 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
597 getOrCreateInterval(MO.getReg()), CopyMI);
598 // Def of a register also defines its sub-registers.
599 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
600 // If MI also modifies the sub-register explicitly, avoid processing it
601 // more than once. Do not pass in TRI here so it checks for exact match.
602 if (!MI->definesRegister(*AS))
603 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
604 getOrCreateInterval(*AS), 0);
608 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
610 LiveInterval &interval, bool isAlias) {
612 dbgs() << "\t\tlivein register: ";
613 printRegName(interval.reg, tri_);
616 // Look for kills, if it reaches a def before it's killed, then it shouldn't
617 // be considered a livein.
618 MachineBasicBlock::iterator mi = MBB->begin();
619 MachineBasicBlock::iterator E = MBB->end();
620 // Skip over DBG_VALUE at the start of the MBB.
621 if (mi != E && mi->isDebugValue()) {
622 while (++mi != E && mi->isDebugValue())
625 // MBB is empty except for DBG_VALUE's.
629 SlotIndex baseIndex = MIIdx;
630 SlotIndex start = baseIndex;
631 if (getInstructionFromIndex(baseIndex) == 0)
632 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
634 SlotIndex end = baseIndex;
635 bool SeenDefUse = false;
638 if (mi->killsRegister(interval.reg, tri_)) {
639 DEBUG(dbgs() << " killed");
640 end = baseIndex.getDefIndex();
643 } else if (mi->definesRegister(interval.reg, tri_)) {
644 // Another instruction redefines the register before it is ever read.
645 // Then the register is essentially dead at the instruction that defines
646 // it. Hence its interval is:
647 // [defSlot(def), defSlot(def)+1)
648 DEBUG(dbgs() << " dead");
649 end = start.getStoreIndex();
654 while (++mi != E && mi->isDebugValue())
655 // Skip over DBG_VALUE.
658 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
661 // Live-in register might not be used at all.
664 DEBUG(dbgs() << " dead");
665 end = MIIdx.getStoreIndex();
667 DEBUG(dbgs() << " live through");
672 SlotIndex defIdx = getMBBStartIdx(MBB);
673 assert(getInstructionFromIndex(defIdx) == 0 &&
674 "PHI def index points at actual instruction.");
676 interval.getNextValue(defIdx, 0, VNInfoAllocator);
677 vni->setIsPHIDef(true);
678 LiveRange LR(start, end, vni);
680 interval.addRange(LR);
681 DEBUG(dbgs() << " +" << LR << '\n');
684 /// computeIntervals - computes the live intervals for virtual
685 /// registers. for some ordering of the machine instructions [1,N] a
686 /// live interval is an interval [i, j) where 1 <= i <= j < N for
687 /// which a variable is live
688 void LiveIntervals::computeIntervals() {
689 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
690 << "********** Function: "
691 << ((Value*)mf_->getFunction())->getName() << '\n');
693 SmallVector<unsigned, 8> UndefUses;
694 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
696 MachineBasicBlock *MBB = MBBI;
700 // Track the index of the current machine instr.
701 SlotIndex MIIndex = getMBBStartIdx(MBB);
702 DEBUG(dbgs() << "BB#" << MBB->getNumber()
703 << ":\t\t# derived from " << MBB->getName() << "\n");
705 // Create intervals for live-ins to this BB first.
706 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
707 LE = MBB->livein_end(); LI != LE; ++LI) {
708 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
709 // Multiple live-ins can alias the same register.
710 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
711 if (!hasInterval(*AS))
712 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
716 // Skip over empty initial indices.
717 if (getInstructionFromIndex(MIIndex) == 0)
718 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
720 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
722 DEBUG(dbgs() << MIIndex << "\t" << *MI);
723 if (MI->isDebugValue())
727 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
728 MachineOperand &MO = MI->getOperand(i);
729 if (!MO.isReg() || !MO.getReg())
732 // handle register defs - build intervals
734 handleRegisterDef(MBB, MI, MIIndex, MO, i);
735 else if (MO.isUndef())
736 UndefUses.push_back(MO.getReg());
739 // Move to the next instr slot.
740 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
744 // Create empty intervals for registers defined by implicit_def's (except
745 // for those implicit_def that define values which are liveout of their
747 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
748 unsigned UndefReg = UndefUses[i];
749 (void)getOrCreateInterval(UndefReg);
753 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
754 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
755 return new LiveInterval(reg, Weight);
758 /// dupInterval - Duplicate a live interval. The caller is responsible for
759 /// managing the allocated memory.
760 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
761 LiveInterval *NewLI = createInterval(li->reg);
762 NewLI->Copy(*li, mri_, getVNInfoAllocator());
766 //===----------------------------------------------------------------------===//
767 // Register allocator hooks.
770 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
771 /// allow one) virtual register operand, then its uses are implicitly using
772 /// the register. Returns the virtual register.
773 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
774 MachineInstr *MI) const {
776 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
777 MachineOperand &MO = MI->getOperand(i);
778 if (!MO.isReg() || !MO.isUse())
780 unsigned Reg = MO.getReg();
781 if (Reg == 0 || Reg == li.reg)
784 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
785 !allocatableRegs_[Reg])
787 // FIXME: For now, only remat MI with at most one register operand.
789 "Can't rematerialize instruction with multiple register operand!");
798 /// isValNoAvailableAt - Return true if the val# of the specified interval
799 /// which reaches the given instruction also reaches the specified use index.
800 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
801 SlotIndex UseIdx) const {
802 SlotIndex Index = getInstructionIndex(MI);
803 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
804 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
805 return UI != li.end() && UI->valno == ValNo;
808 /// isReMaterializable - Returns true if the definition MI of the specified
809 /// val# of the specified interval is re-materializable.
810 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
811 const VNInfo *ValNo, MachineInstr *MI,
812 SmallVectorImpl<LiveInterval*> &SpillIs,
817 if (!tii_->isTriviallyReMaterializable(MI, aa_))
820 // Target-specific code can mark an instruction as being rematerializable
821 // if it has one virtual reg use, though it had better be something like
822 // a PIC base register which is likely to be live everywhere.
823 unsigned ImpUse = getReMatImplicitUse(li, MI);
825 const LiveInterval &ImpLi = getInterval(ImpUse);
826 for (MachineRegisterInfo::use_nodbg_iterator
827 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
829 MachineInstr *UseMI = &*ri;
830 SlotIndex UseIdx = getInstructionIndex(UseMI);
831 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
833 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
837 // If a register operand of the re-materialized instruction is going to
838 // be spilled next, then it's not legal to re-materialize this instruction.
839 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
840 if (ImpUse == SpillIs[i]->reg)
846 /// isReMaterializable - Returns true if the definition MI of the specified
847 /// val# of the specified interval is re-materializable.
848 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
849 const VNInfo *ValNo, MachineInstr *MI) {
850 SmallVector<LiveInterval*, 4> Dummy1;
852 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
855 /// isReMaterializable - Returns true if every definition of MI of every
856 /// val# of the specified interval is re-materializable.
857 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
858 SmallVectorImpl<LiveInterval*> &SpillIs,
861 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
863 const VNInfo *VNI = *i;
865 continue; // Dead val#.
866 // Is the def for the val# rematerializable?
867 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
870 bool DefIsLoad = false;
872 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
879 /// FilterFoldedOps - Filter out two-address use operands. Return
880 /// true if it finds any issue with the operands that ought to prevent
882 static bool FilterFoldedOps(MachineInstr *MI,
883 SmallVector<unsigned, 2> &Ops,
885 SmallVector<unsigned, 2> &FoldOps) {
887 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
888 unsigned OpIdx = Ops[i];
889 MachineOperand &MO = MI->getOperand(OpIdx);
890 // FIXME: fold subreg use.
894 MRInfo |= (unsigned)VirtRegMap::isMod;
896 // Filter out two-address use operand(s).
897 if (MI->isRegTiedToDefOperand(OpIdx)) {
898 MRInfo = VirtRegMap::isModRef;
901 MRInfo |= (unsigned)VirtRegMap::isRef;
903 FoldOps.push_back(OpIdx);
909 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
910 /// slot / to reg or any rematerialized load into ith operand of specified
911 /// MI. If it is successul, MI is updated with the newly created MI and
913 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
914 VirtRegMap &vrm, MachineInstr *DefMI,
916 SmallVector<unsigned, 2> &Ops,
917 bool isSS, int Slot, unsigned Reg) {
918 // If it is an implicit def instruction, just delete it.
919 if (MI->isImplicitDef()) {
920 RemoveMachineInstrFromMaps(MI);
921 vrm.RemoveMachineInstrFromMaps(MI);
922 MI->eraseFromParent();
927 // Filter the list of operand indexes that are to be folded. Abort if
928 // any operand will prevent folding.
930 SmallVector<unsigned, 2> FoldOps;
931 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
934 // The only time it's safe to fold into a two address instruction is when
935 // it's folding reload and spill from / into a spill stack slot.
936 if (DefMI && (MRInfo & VirtRegMap::isMod))
939 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
940 : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
942 // Remember this instruction uses the spill slot.
943 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
945 // Attempt to fold the memory reference into the instruction. If
946 // we can do this, we don't need to insert spill code.
947 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
948 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
949 vrm.transferSpillPts(MI, fmi);
950 vrm.transferRestorePts(MI, fmi);
951 vrm.transferEmergencySpills(MI, fmi);
952 ReplaceMachineInstrInMaps(MI, fmi);
953 MI->eraseFromParent();
961 /// canFoldMemoryOperand - Returns true if the specified load / store
962 /// folding is possible.
963 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
964 SmallVector<unsigned, 2> &Ops,
966 // Filter the list of operand indexes that are to be folded. Abort if
967 // any operand will prevent folding.
969 SmallVector<unsigned, 2> FoldOps;
970 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
973 // It's only legal to remat for a use, not a def.
974 if (ReMat && (MRInfo & VirtRegMap::isMod))
977 return tii_->canFoldMemoryOperand(MI, FoldOps);
980 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
981 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
983 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
988 for (++itr; itr != li.ranges.end(); ++itr) {
989 MachineBasicBlock *mbb2 =
990 indexes_->getMBBCoveringRange(itr->start, itr->end);
999 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1000 /// interval on to-be re-materialized operands of MI) with new register.
1001 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1002 MachineInstr *MI, unsigned NewVReg,
1004 // There is an implicit use. That means one of the other operand is
1005 // being remat'ed and the remat'ed instruction has li.reg as an
1006 // use operand. Make sure we rewrite that as well.
1007 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1008 MachineOperand &MO = MI->getOperand(i);
1011 unsigned Reg = MO.getReg();
1012 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1014 if (!vrm.isReMaterialized(Reg))
1016 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1017 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1019 UseMO->setReg(NewVReg);
1023 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1024 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1025 bool LiveIntervals::
1026 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1027 bool TrySplit, SlotIndex index, SlotIndex end,
1029 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1030 unsigned Slot, int LdSlot,
1031 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1033 const TargetRegisterClass* rc,
1034 SmallVector<int, 4> &ReMatIds,
1035 const MachineLoopInfo *loopInfo,
1036 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1037 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1038 std::vector<LiveInterval*> &NewLIs) {
1039 bool CanFold = false;
1041 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1042 MachineOperand& mop = MI->getOperand(i);
1045 unsigned Reg = mop.getReg();
1046 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1051 bool TryFold = !DefIsReMat;
1052 bool FoldSS = true; // Default behavior unless it's a remat.
1053 int FoldSlot = Slot;
1055 // If this is the rematerializable definition MI itself and
1056 // all of its uses are rematerialized, simply delete it.
1057 if (MI == ReMatOrigDefMI && CanDelete) {
1058 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1060 RemoveMachineInstrFromMaps(MI);
1061 vrm.RemoveMachineInstrFromMaps(MI);
1062 MI->eraseFromParent();
1066 // If def for this use can't be rematerialized, then try folding.
1067 // If def is rematerializable and it's a load, also try folding.
1068 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1070 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1076 // Scan all of the operands of this instruction rewriting operands
1077 // to use NewVReg instead of li.reg as appropriate. We do this for
1080 // 1. If the instr reads the same spilled vreg multiple times, we
1081 // want to reuse the NewVReg.
1082 // 2. If the instr is a two-addr instruction, we are required to
1083 // keep the src/dst regs pinned.
1085 // Keep track of whether we replace a use and/or def so that we can
1086 // create the spill interval with the appropriate range.
1087 SmallVector<unsigned, 2> Ops;
1088 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1090 // Create a new virtual register for the spill interval.
1091 // Create the new register now so we can map the fold instruction
1092 // to the new register so when it is unfolded we get the correct
1094 bool CreatedNewVReg = false;
1096 NewVReg = mri_->createVirtualRegister(rc);
1098 CreatedNewVReg = true;
1100 // The new virtual register should get the same allocation hints as the
1102 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1103 if (Hint.first || Hint.second)
1104 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1110 // Do not fold load / store here if we are splitting. We'll find an
1111 // optimal point to insert a load / store later.
1113 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1114 Ops, FoldSS, FoldSlot, NewVReg)) {
1115 // Folding the load/store can completely change the instruction in
1116 // unpredictable ways, rescan it from the beginning.
1119 // We need to give the new vreg the same stack slot as the
1120 // spilled interval.
1121 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1127 if (isNotInMIMap(MI))
1129 goto RestartInstruction;
1132 // We'll try to fold it later if it's profitable.
1133 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1137 mop.setReg(NewVReg);
1138 if (mop.isImplicit())
1139 rewriteImplicitOps(li, MI, NewVReg, vrm);
1141 // Reuse NewVReg for other reads.
1142 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1143 MachineOperand &mopj = MI->getOperand(Ops[j]);
1144 mopj.setReg(NewVReg);
1145 if (mopj.isImplicit())
1146 rewriteImplicitOps(li, MI, NewVReg, vrm);
1149 if (CreatedNewVReg) {
1151 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1152 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1153 // Each valnum may have its own remat id.
1154 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1156 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1158 if (!CanDelete || (HasUse && HasDef)) {
1159 // If this is a two-addr instruction then its use operands are
1160 // rematerializable but its def is not. It should be assigned a
1162 vrm.assignVirt2StackSlot(NewVReg, Slot);
1165 vrm.assignVirt2StackSlot(NewVReg, Slot);
1167 } else if (HasUse && HasDef &&
1168 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1169 // If this interval hasn't been assigned a stack slot (because earlier
1170 // def is a deleted remat def), do it now.
1171 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1172 vrm.assignVirt2StackSlot(NewVReg, Slot);
1175 // Re-matting an instruction with virtual register use. Add the
1176 // register as an implicit use on the use MI.
1177 if (DefIsReMat && ImpUse)
1178 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1180 // Create a new register interval for this spill / remat.
1181 LiveInterval &nI = getOrCreateInterval(NewVReg);
1182 if (CreatedNewVReg) {
1183 NewLIs.push_back(&nI);
1184 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1186 vrm.setIsSplitFromReg(NewVReg, li.reg);
1190 if (CreatedNewVReg) {
1191 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1192 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1193 DEBUG(dbgs() << " +" << LR);
1196 // Extend the split live interval to this def / use.
1197 SlotIndex End = index.getDefIndex();
1198 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1199 nI.getValNumInfo(nI.getNumValNums()-1));
1200 DEBUG(dbgs() << " +" << LR);
1205 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1206 nI.getNextValue(SlotIndex(), 0, VNInfoAllocator));
1207 DEBUG(dbgs() << " +" << LR);
1212 dbgs() << "\t\t\t\tAdded new interval: ";
1213 nI.print(dbgs(), tri_);
1219 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1221 MachineBasicBlock *MBB,
1222 SlotIndex Idx) const {
1223 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1226 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1227 /// during spilling.
1229 struct RewriteInfo {
1232 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1235 struct RewriteInfoCompare {
1236 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1237 return LHS.Index < RHS.Index;
1242 void LiveIntervals::
1243 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1244 LiveInterval::Ranges::const_iterator &I,
1245 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1246 unsigned Slot, int LdSlot,
1247 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1249 const TargetRegisterClass* rc,
1250 SmallVector<int, 4> &ReMatIds,
1251 const MachineLoopInfo *loopInfo,
1252 BitVector &SpillMBBs,
1253 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1254 BitVector &RestoreMBBs,
1255 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1256 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1257 std::vector<LiveInterval*> &NewLIs) {
1258 bool AllCanFold = true;
1259 unsigned NewVReg = 0;
1260 SlotIndex start = I->start.getBaseIndex();
1261 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1263 // First collect all the def / use in this live range that will be rewritten.
1264 // Make sure they are sorted according to instruction index.
1265 std::vector<RewriteInfo> RewriteMIs;
1266 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1267 re = mri_->reg_end(); ri != re; ) {
1268 MachineInstr *MI = &*ri;
1269 MachineOperand &O = ri.getOperand();
1271 if (MI->isDebugValue()) {
1272 // Modify DBG_VALUE now that the value is in a spill slot.
1273 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1274 uint64_t Offset = MI->getOperand(1).getImm();
1275 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1276 DebugLoc DL = MI->getDebugLoc();
1277 int FI = isLoadSS ? LdSlot : (int)Slot;
1278 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1279 Offset, MDPtr, DL)) {
1280 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1281 ReplaceMachineInstrInMaps(MI, NewDV);
1282 MachineBasicBlock *MBB = MI->getParent();
1283 MBB->insert(MBB->erase(MI), NewDV);
1288 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1289 RemoveMachineInstrFromMaps(MI);
1290 vrm.RemoveMachineInstrFromMaps(MI);
1291 MI->eraseFromParent();
1294 assert(!(O.isImplicit() && O.isUse()) &&
1295 "Spilling register that's used as implicit use?");
1296 SlotIndex index = getInstructionIndex(MI);
1297 if (index < start || index >= end)
1301 // Must be defined by an implicit def. It should not be spilled. Note,
1302 // this is for correctness reason. e.g.
1303 // 8 %reg1024<def> = IMPLICIT_DEF
1304 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1305 // The live range [12, 14) are not part of the r1024 live interval since
1306 // it's defined by an implicit def. It will not conflicts with live
1307 // interval of r1025. Now suppose both registers are spilled, you can
1308 // easily see a situation where both registers are reloaded before
1309 // the INSERT_SUBREG and both target registers that would overlap.
1311 RewriteMIs.push_back(RewriteInfo(index, MI));
1313 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1315 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1316 // Now rewrite the defs and uses.
1317 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1318 RewriteInfo &rwi = RewriteMIs[i];
1320 SlotIndex index = rwi.Index;
1321 MachineInstr *MI = rwi.MI;
1322 // If MI def and/or use the same register multiple times, then there
1323 // are multiple entries.
1324 while (i != e && RewriteMIs[i].MI == MI) {
1325 assert(RewriteMIs[i].Index == index);
1328 MachineBasicBlock *MBB = MI->getParent();
1330 if (ImpUse && MI != ReMatDefMI) {
1331 // Re-matting an instruction with virtual register use. Prevent interval
1332 // from being spilled.
1333 getInterval(ImpUse).markNotSpillable();
1336 unsigned MBBId = MBB->getNumber();
1337 unsigned ThisVReg = 0;
1339 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1340 if (NVI != MBBVRegsMap.end()) {
1341 ThisVReg = NVI->second;
1348 // It's better to start a new interval to avoid artifically
1349 // extend the new interval.
1350 if (MI->readsWritesVirtualRegister(li.reg) ==
1351 std::make_pair(false,true)) {
1352 MBBVRegsMap.erase(MBB->getNumber());
1358 bool IsNew = ThisVReg == 0;
1360 // This ends the previous live interval. If all of its def / use
1361 // can be folded, give it a low spill weight.
1362 if (NewVReg && TrySplit && AllCanFold) {
1363 LiveInterval &nI = getOrCreateInterval(NewVReg);
1370 bool HasDef = false;
1371 bool HasUse = false;
1372 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1373 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1374 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1375 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1376 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1377 if (!HasDef && !HasUse)
1380 AllCanFold &= CanFold;
1382 // Update weight of spill interval.
1383 LiveInterval &nI = getOrCreateInterval(NewVReg);
1385 // The spill weight is now infinity as it cannot be spilled again.
1386 nI.markNotSpillable();
1390 // Keep track of the last def and first use in each MBB.
1392 if (MI != ReMatOrigDefMI || !CanDelete) {
1393 bool HasKill = false;
1395 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1397 // If this is a two-address code, then this index starts a new VNInfo.
1398 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1400 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1402 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1403 SpillIdxes.find(MBBId);
1405 if (SII == SpillIdxes.end()) {
1406 std::vector<SRInfo> S;
1407 S.push_back(SRInfo(index, NewVReg, true));
1408 SpillIdxes.insert(std::make_pair(MBBId, S));
1409 } else if (SII->second.back().vreg != NewVReg) {
1410 SII->second.push_back(SRInfo(index, NewVReg, true));
1411 } else if (index > SII->second.back().index) {
1412 // If there is an earlier def and this is a two-address
1413 // instruction, then it's not possible to fold the store (which
1414 // would also fold the load).
1415 SRInfo &Info = SII->second.back();
1417 Info.canFold = !HasUse;
1419 SpillMBBs.set(MBBId);
1420 } else if (SII != SpillIdxes.end() &&
1421 SII->second.back().vreg == NewVReg &&
1422 index > SII->second.back().index) {
1423 // There is an earlier def that's not killed (must be two-address).
1424 // The spill is no longer needed.
1425 SII->second.pop_back();
1426 if (SII->second.empty()) {
1427 SpillIdxes.erase(MBBId);
1428 SpillMBBs.reset(MBBId);
1435 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1436 SpillIdxes.find(MBBId);
1437 if (SII != SpillIdxes.end() &&
1438 SII->second.back().vreg == NewVReg &&
1439 index > SII->second.back().index)
1440 // Use(s) following the last def, it's not safe to fold the spill.
1441 SII->second.back().canFold = false;
1442 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1443 RestoreIdxes.find(MBBId);
1444 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1445 // If we are splitting live intervals, only fold if it's the first
1446 // use and there isn't another use later in the MBB.
1447 RII->second.back().canFold = false;
1449 // Only need a reload if there isn't an earlier def / use.
1450 if (RII == RestoreIdxes.end()) {
1451 std::vector<SRInfo> Infos;
1452 Infos.push_back(SRInfo(index, NewVReg, true));
1453 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1455 RII->second.push_back(SRInfo(index, NewVReg, true));
1457 RestoreMBBs.set(MBBId);
1461 // Update spill weight.
1462 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1463 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1466 if (NewVReg && TrySplit && AllCanFold) {
1467 // If all of its def / use can be folded, give it a low spill weight.
1468 LiveInterval &nI = getOrCreateInterval(NewVReg);
1473 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1474 unsigned vr, BitVector &RestoreMBBs,
1475 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1476 if (!RestoreMBBs[Id])
1478 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1479 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1480 if (Restores[i].index == index &&
1481 Restores[i].vreg == vr &&
1482 Restores[i].canFold)
1487 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1488 unsigned vr, BitVector &RestoreMBBs,
1489 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1490 if (!RestoreMBBs[Id])
1492 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1493 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1494 if (Restores[i].index == index && Restores[i].vreg)
1495 Restores[i].index = SlotIndex();
1498 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1499 /// spilled and create empty intervals for their uses.
1501 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1502 const TargetRegisterClass* rc,
1503 std::vector<LiveInterval*> &NewLIs) {
1504 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1505 re = mri_->reg_end(); ri != re; ) {
1506 MachineOperand &O = ri.getOperand();
1507 MachineInstr *MI = &*ri;
1509 if (MI->isDebugValue()) {
1510 // Remove debug info for now.
1512 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1516 assert(MI->isImplicitDef() &&
1517 "Register def was not rewritten?");
1518 RemoveMachineInstrFromMaps(MI);
1519 vrm.RemoveMachineInstrFromMaps(MI);
1520 MI->eraseFromParent();
1522 // This must be an use of an implicit_def so it's not part of the live
1523 // interval. Create a new empty live interval for it.
1524 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1525 unsigned NewVReg = mri_->createVirtualRegister(rc);
1527 vrm.setIsImplicitlyDefined(NewVReg);
1528 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1529 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1530 MachineOperand &MO = MI->getOperand(i);
1531 if (MO.isReg() && MO.getReg() == li.reg) {
1541 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1542 // Limit the loop depth ridiculousness.
1543 if (loopDepth > 200)
1546 // The loop depth is used to roughly estimate the number of times the
1547 // instruction is executed. Something like 10^d is simple, but will quickly
1548 // overflow a float. This expression behaves like 10^d for small d, but is
1549 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1550 // headroom before overflow.
1551 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1553 return (isDef + isUse) * lc;
1557 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1558 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1559 normalizeSpillWeight(*NewLIs[i]);
1562 std::vector<LiveInterval*> LiveIntervals::
1563 addIntervalsForSpills(const LiveInterval &li,
1564 SmallVectorImpl<LiveInterval*> &SpillIs,
1565 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1566 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1569 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1570 li.print(dbgs(), tri_);
1574 // Each bit specify whether a spill is required in the MBB.
1575 BitVector SpillMBBs(mf_->getNumBlockIDs());
1576 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1577 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1578 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1579 DenseMap<unsigned,unsigned> MBBVRegsMap;
1580 std::vector<LiveInterval*> NewLIs;
1581 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1583 unsigned NumValNums = li.getNumValNums();
1584 SmallVector<MachineInstr*, 4> ReMatDefs;
1585 ReMatDefs.resize(NumValNums, NULL);
1586 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1587 ReMatOrigDefs.resize(NumValNums, NULL);
1588 SmallVector<int, 4> ReMatIds;
1589 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1590 BitVector ReMatDelete(NumValNums);
1591 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1593 // Spilling a split live interval. It cannot be split any further. Also,
1594 // it's also guaranteed to be a single val# / range interval.
1595 if (vrm.getPreSplitReg(li.reg)) {
1596 vrm.setIsSplitFromReg(li.reg, 0);
1597 // Unset the split kill marker on the last use.
1598 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1599 if (KillIdx != SlotIndex()) {
1600 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1601 assert(KillMI && "Last use disappeared?");
1602 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1603 assert(KillOp != -1 && "Last use disappeared?");
1604 KillMI->getOperand(KillOp).setIsKill(false);
1606 vrm.removeKillPoint(li.reg);
1607 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1608 Slot = vrm.getStackSlot(li.reg);
1609 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1610 MachineInstr *ReMatDefMI = DefIsReMat ?
1611 vrm.getReMaterializedMI(li.reg) : NULL;
1613 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1614 bool isLoad = isLoadSS ||
1615 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1616 bool IsFirstRange = true;
1617 for (LiveInterval::Ranges::const_iterator
1618 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1619 // If this is a split live interval with multiple ranges, it means there
1620 // are two-address instructions that re-defined the value. Only the
1621 // first def can be rematerialized!
1623 // Note ReMatOrigDefMI has already been deleted.
1624 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1625 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1626 false, vrm, rc, ReMatIds, loopInfo,
1627 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1628 MBBVRegsMap, NewLIs);
1630 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1631 Slot, 0, false, false, false,
1632 false, vrm, rc, ReMatIds, loopInfo,
1633 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1634 MBBVRegsMap, NewLIs);
1636 IsFirstRange = false;
1639 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1640 normalizeSpillWeights(NewLIs);
1644 bool TrySplit = !intervalIsInOneMBB(li);
1647 bool NeedStackSlot = false;
1648 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1650 const VNInfo *VNI = *i;
1651 unsigned VN = VNI->id;
1652 if (VNI->isUnused())
1653 continue; // Dead val#.
1654 // Is the def for the val# rematerializable?
1655 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
1657 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1658 // Remember how to remat the def of this val#.
1659 ReMatOrigDefs[VN] = ReMatDefMI;
1660 // Original def may be modified so we have to make a copy here.
1661 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1662 CloneMIs.push_back(Clone);
1663 ReMatDefs[VN] = Clone;
1665 bool CanDelete = true;
1666 if (VNI->hasPHIKill()) {
1667 // A kill is a phi node, not all of its uses can be rematerialized.
1668 // It must not be deleted.
1670 // Need a stack slot if there is any live range where uses cannot be
1672 NeedStackSlot = true;
1675 ReMatDelete.set(VN);
1677 // Need a stack slot if there is any live range where uses cannot be
1679 NeedStackSlot = true;
1683 // One stack slot per live interval.
1684 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1685 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1686 Slot = vrm.assignVirt2StackSlot(li.reg);
1688 // This case only occurs when the prealloc splitter has already assigned
1689 // a stack slot to this vreg.
1691 Slot = vrm.getStackSlot(li.reg);
1694 // Create new intervals and rewrite defs and uses.
1695 for (LiveInterval::Ranges::const_iterator
1696 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1697 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1698 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1699 bool DefIsReMat = ReMatDefMI != NULL;
1700 bool CanDelete = ReMatDelete[I->valno->id];
1702 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1703 bool isLoad = isLoadSS ||
1704 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1705 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1706 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1707 CanDelete, vrm, rc, ReMatIds, loopInfo,
1708 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1709 MBBVRegsMap, NewLIs);
1712 // Insert spills / restores if we are splitting.
1714 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1715 normalizeSpillWeights(NewLIs);
1719 SmallPtrSet<LiveInterval*, 4> AddedKill;
1720 SmallVector<unsigned, 2> Ops;
1721 if (NeedStackSlot) {
1722 int Id = SpillMBBs.find_first();
1724 std::vector<SRInfo> &spills = SpillIdxes[Id];
1725 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1726 SlotIndex index = spills[i].index;
1727 unsigned VReg = spills[i].vreg;
1728 LiveInterval &nI = getOrCreateInterval(VReg);
1729 bool isReMat = vrm.isReMaterialized(VReg);
1730 MachineInstr *MI = getInstructionFromIndex(index);
1731 bool CanFold = false;
1732 bool FoundUse = false;
1734 if (spills[i].canFold) {
1736 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1737 MachineOperand &MO = MI->getOperand(j);
1738 if (!MO.isReg() || MO.getReg() != VReg)
1745 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1746 RestoreMBBs, RestoreIdxes))) {
1747 // MI has two-address uses of the same register. If the use
1748 // isn't the first and only use in the BB, then we can't fold
1749 // it. FIXME: Move this to rewriteInstructionsForSpills.
1756 // Fold the store into the def if possible.
1757 bool Folded = false;
1758 if (CanFold && !Ops.empty()) {
1759 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1762 // Also folded uses, do not issue a load.
1763 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1764 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1766 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1770 // Otherwise tell the spiller to issue a spill.
1772 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1773 bool isKill = LR->end == index.getStoreIndex();
1774 if (!MI->registerDefIsDead(nI.reg))
1775 // No need to spill a dead def.
1776 vrm.addSpillPoint(VReg, isKill, MI);
1778 AddedKill.insert(&nI);
1781 Id = SpillMBBs.find_next(Id);
1785 int Id = RestoreMBBs.find_first();
1787 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1788 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1789 SlotIndex index = restores[i].index;
1790 if (index == SlotIndex())
1792 unsigned VReg = restores[i].vreg;
1793 LiveInterval &nI = getOrCreateInterval(VReg);
1794 bool isReMat = vrm.isReMaterialized(VReg);
1795 MachineInstr *MI = getInstructionFromIndex(index);
1796 bool CanFold = false;
1798 if (restores[i].canFold) {
1800 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1801 MachineOperand &MO = MI->getOperand(j);
1802 if (!MO.isReg() || MO.getReg() != VReg)
1806 // If this restore were to be folded, it would have been folded
1815 // Fold the load into the use if possible.
1816 bool Folded = false;
1817 if (CanFold && !Ops.empty()) {
1819 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1821 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1823 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1824 // If the rematerializable def is a load, also try to fold it.
1825 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1826 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1827 Ops, isLoadSS, LdSlot, VReg);
1829 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1831 // Re-matting an instruction with virtual register use. Add the
1832 // register as an implicit use on the use MI and mark the register
1833 // interval as unspillable.
1834 LiveInterval &ImpLi = getInterval(ImpUse);
1835 ImpLi.markNotSpillable();
1836 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1841 // If folding is not possible / failed, then tell the spiller to issue a
1842 // load / rematerialization for us.
1844 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1846 vrm.addRestorePoint(VReg, MI);
1848 Id = RestoreMBBs.find_next(Id);
1851 // Finalize intervals: add kills, finalize spill weights, and filter out
1853 std::vector<LiveInterval*> RetNewLIs;
1854 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1855 LiveInterval *LI = NewLIs[i];
1857 if (!AddedKill.count(LI)) {
1858 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1859 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1860 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1861 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1862 assert(UseIdx != -1);
1863 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1864 LastUse->getOperand(UseIdx).setIsKill();
1865 vrm.addKillPoint(LI->reg, LastUseIdx);
1868 RetNewLIs.push_back(LI);
1872 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1873 normalizeSpillWeights(RetNewLIs);
1877 /// hasAllocatableSuperReg - Return true if the specified physical register has
1878 /// any super register that's allocatable.
1879 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1880 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1881 if (allocatableRegs_[*AS] && hasInterval(*AS))
1886 /// getRepresentativeReg - Find the largest super register of the specified
1887 /// physical register.
1888 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1889 // Find the largest super-register that is allocatable.
1890 unsigned BestReg = Reg;
1891 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1892 unsigned SuperReg = *AS;
1893 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1901 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1902 /// specified interval that conflicts with the specified physical register.
1903 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1904 unsigned PhysReg) const {
1905 unsigned NumConflicts = 0;
1906 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1907 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1908 E = mri_->reg_end(); I != E; ++I) {
1909 MachineOperand &O = I.getOperand();
1910 MachineInstr *MI = O.getParent();
1911 if (MI->isDebugValue())
1913 SlotIndex Index = getInstructionIndex(MI);
1914 if (pli.liveAt(Index))
1917 return NumConflicts;
1920 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1921 /// around all defs and uses of the specified interval. Return true if it
1922 /// was able to cut its interval.
1923 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1924 unsigned PhysReg, VirtRegMap &vrm) {
1925 unsigned SpillReg = getRepresentativeReg(PhysReg);
1927 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1928 // If there are registers which alias PhysReg, but which are not a
1929 // sub-register of the chosen representative super register. Assert
1930 // since we can't handle it yet.
1931 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
1932 tri_->isSuperRegister(*AS, SpillReg));
1935 SmallVector<unsigned, 4> PRegs;
1936 if (hasInterval(SpillReg))
1937 PRegs.push_back(SpillReg);
1939 SmallSet<unsigned, 4> Added;
1940 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
1941 if (Added.insert(*AS) && hasInterval(*AS)) {
1942 PRegs.push_back(*AS);
1943 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
1948 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1949 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1950 E = mri_->reg_end(); I != E; ++I) {
1951 MachineOperand &O = I.getOperand();
1952 MachineInstr *MI = O.getParent();
1953 if (MI->isDebugValue() || SeenMIs.count(MI))
1956 SlotIndex Index = getInstructionIndex(MI);
1957 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
1958 unsigned PReg = PRegs[i];
1959 LiveInterval &pli = getInterval(PReg);
1960 if (!pli.liveAt(Index))
1962 vrm.addEmergencySpill(PReg, MI);
1963 SlotIndex StartIdx = Index.getLoadIndex();
1964 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
1965 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
1966 pli.removeRange(StartIdx, EndIdx);
1970 raw_string_ostream Msg(msg);
1971 Msg << "Ran out of registers during register allocation!";
1972 if (MI->isInlineAsm()) {
1973 Msg << "\nPlease check your inline asm statement for invalid "
1974 << "constraints:\n";
1975 MI->print(Msg, tm_);
1977 report_fatal_error(Msg.str());
1979 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
1980 if (!hasInterval(*AS))
1982 LiveInterval &spli = getInterval(*AS);
1983 if (spli.liveAt(Index))
1984 spli.removeRange(Index.getLoadIndex(),
1985 Index.getNextIndex().getBaseIndex());
1992 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1993 MachineInstr* startInst) {
1994 LiveInterval& Interval = getOrCreateInterval(reg);
1995 VNInfo* VN = Interval.getNextValue(
1996 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
1997 startInst, getVNInfoAllocator());
1998 VN->setHasPHIKill(true);
2000 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2001 getMBBEndIdx(startInst->getParent()), VN);
2002 Interval.addRange(LR);