1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 STATISTIC(numIntervals , "Number of original intervals");
54 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
55 STATISTIC(numSplits , "Number of intervals split");
57 char LiveIntervals::ID = 0;
58 INITIALIZE_PASS(LiveIntervals, "liveintervals",
59 "Live Interval Analysis", false, false);
61 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
63 AU.addRequired<AliasAnalysis>();
64 AU.addPreserved<AliasAnalysis>();
65 AU.addRequired<LiveVariables>();
66 AU.addPreserved<LiveVariables>();
67 AU.addRequired<MachineLoopInfo>();
68 AU.addPreserved<MachineLoopInfo>();
69 AU.addPreservedID(MachineDominatorsID);
72 AU.addPreservedID(PHIEliminationID);
73 AU.addRequiredID(PHIEliminationID);
76 AU.addRequiredID(TwoAddressInstructionPassID);
77 AU.addPreserved<ProcessImplicitDefs>();
78 AU.addRequired<ProcessImplicitDefs>();
79 AU.addPreserved<SlotIndexes>();
80 AU.addRequiredTransitive<SlotIndexes>();
81 MachineFunctionPass::getAnalysisUsage(AU);
84 void LiveIntervals::releaseMemory() {
85 // Free the live intervals themselves.
86 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
87 E = r2iMap_.end(); I != E; ++I)
92 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
93 VNInfoAllocator.Reset();
94 while (!CloneMIs.empty()) {
95 MachineInstr *MI = CloneMIs.back();
97 mf_->DeleteMachineInstr(MI);
101 /// runOnMachineFunction - Register allocate the whole function
103 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
105 mri_ = &mf_->getRegInfo();
106 tm_ = &fn.getTarget();
107 tri_ = tm_->getRegisterInfo();
108 tii_ = tm_->getInstrInfo();
109 aa_ = &getAnalysis<AliasAnalysis>();
110 lv_ = &getAnalysis<LiveVariables>();
111 indexes_ = &getAnalysis<SlotIndexes>();
112 allocatableRegs_ = tri_->getAllocatableSet(fn);
116 numIntervals += getNumIntervals();
122 /// print - Implement the dump method.
123 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
124 OS << "********** INTERVALS **********\n";
125 for (const_iterator I = begin(), E = end(); I != E; ++I) {
126 I->second->print(OS, tri_);
133 void LiveIntervals::printInstrs(raw_ostream &OS) const {
134 OS << "********** MACHINEINSTRS **********\n";
136 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
137 mbbi != mbbe; ++mbbi) {
138 OS << "BB#" << mbbi->getNumber()
139 << ":\t\t# derived from " << mbbi->getName() << "\n";
140 for (MachineBasicBlock::iterator mii = mbbi->begin(),
141 mie = mbbi->end(); mii != mie; ++mii) {
142 if (mii->isDebugValue())
145 OS << getInstructionIndex(mii) << '\t' << *mii;
150 void LiveIntervals::dumpInstrs() const {
154 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
155 VirtRegMap &vrm, unsigned reg) {
156 // We don't handle fancy stuff crossing basic block boundaries
157 if (li.ranges.size() != 1)
159 const LiveRange &range = li.ranges.front();
160 SlotIndex idx = range.start.getBaseIndex();
161 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
163 // Skip deleted instructions
164 MachineInstr *firstMI = getInstructionFromIndex(idx);
165 while (!firstMI && idx != end) {
166 idx = idx.getNextIndex();
167 firstMI = getInstructionFromIndex(idx);
172 // Find last instruction in range
173 SlotIndex lastIdx = end.getPrevIndex();
174 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
175 while (!lastMI && lastIdx != idx) {
176 lastIdx = lastIdx.getPrevIndex();
177 lastMI = getInstructionFromIndex(lastIdx);
182 // Range cannot cross basic block boundaries or terminators
183 MachineBasicBlock *MBB = firstMI->getParent();
184 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
187 MachineBasicBlock::const_iterator E = lastMI;
189 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
190 const MachineInstr &MI = *I;
192 // Allow copies to and from li.reg
194 if (MI.getOperand(0).getReg() == li.reg ||
195 MI.getOperand(1).getReg() == li.reg)
198 // Check for operands using reg
199 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
200 const MachineOperand& mop = MI.getOperand(i);
203 unsigned PhysReg = mop.getReg();
204 if (PhysReg == 0 || PhysReg == li.reg)
206 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
207 if (!vrm.hasPhys(PhysReg))
209 PhysReg = vrm.getPhys(PhysReg);
211 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
216 // No conflicts found.
220 bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg,
221 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
222 for (LiveInterval::Ranges::const_iterator
223 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
224 for (SlotIndex index = I->start.getBaseIndex(),
225 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
227 index = index.getNextIndex()) {
228 MachineInstr *MI = getInstructionFromIndex(index);
230 continue; // skip deleted instructions
232 if (JoinedCopies.count(MI))
234 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
235 MachineOperand& MO = MI->getOperand(i);
238 unsigned PhysReg = MO.getReg();
239 if (PhysReg == 0 || PhysReg == Reg ||
240 TargetRegisterInfo::isVirtualRegister(PhysReg))
242 if (tri_->regsOverlap(Reg, PhysReg))
252 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
253 if (TargetRegisterInfo::isPhysicalRegister(reg))
254 dbgs() << tri_->getName(reg);
256 dbgs() << "%reg" << reg;
261 bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) {
262 unsigned Reg = MI.getOperand(MOIdx).getReg();
263 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) {
264 const MachineOperand &MO = MI.getOperand(i);
267 if (MO.getReg() == Reg && MO.isDef()) {
268 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() &&
269 MI.getOperand(MOIdx).getSubReg() &&
270 (MO.getSubReg() || MO.isImplicit()));
277 /// isPartialRedef - Return true if the specified def at the specific index is
278 /// partially re-defining the specified live interval. A common case of this is
279 /// a definition of the sub-register.
280 bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO,
281 LiveInterval &interval) {
282 if (!MO.getSubReg() || MO.isEarlyClobber())
285 SlotIndex RedefIndex = MIIdx.getDefIndex();
286 const LiveRange *OldLR =
287 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
288 if (OldLR->valno->isDefAccurate()) {
289 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def);
290 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1;
295 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
296 MachineBasicBlock::iterator mi,
300 LiveInterval &interval) {
302 dbgs() << "\t\tregister: ";
303 printRegName(interval.reg, tri_);
306 // Virtual registers may be defined multiple times (due to phi
307 // elimination and 2-addr elimination). Much of what we do only has to be
308 // done once for the vreg. We use an empty interval to detect the first
309 // time we see a vreg.
310 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
311 if (interval.empty()) {
312 // Get the Idx of the defining instructions.
313 SlotIndex defIndex = MIIdx.getDefIndex();
314 // Earlyclobbers move back one, so that they overlap the live range
316 if (MO.isEarlyClobber())
317 defIndex = MIIdx.getUseIndex();
319 // Make sure the first definition is not a partial redefinition. Add an
320 // <imp-def> of the full register.
322 mi->addRegisterDefined(interval.reg);
324 MachineInstr *CopyMI = NULL;
325 if (mi->isCopyLike()) {
329 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true,
331 assert(ValNo->id == 0 && "First value in interval is not 0?");
333 // Loop over all of the blocks that the vreg is defined in. There are
334 // two cases we have to handle here. The most common case is a vreg
335 // whose lifetime is contained within a basic block. In this case there
336 // will be a single kill, in MBB, which comes after the definition.
337 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
338 // FIXME: what about dead vars?
340 if (vi.Kills[0] != mi)
341 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
343 killIdx = defIndex.getStoreIndex();
345 // If the kill happens after the definition, we have an intra-block
347 if (killIdx > defIndex) {
348 assert(vi.AliveBlocks.empty() &&
349 "Shouldn't be alive across any blocks!");
350 LiveRange LR(defIndex, killIdx, ValNo);
351 interval.addRange(LR);
352 DEBUG(dbgs() << " +" << LR << "\n");
357 // The other case we handle is when a virtual register lives to the end
358 // of the defining block, potentially live across some blocks, then is
359 // live into some number of blocks, but gets killed. Start by adding a
360 // range that goes from this definition to the end of the defining block.
361 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
362 DEBUG(dbgs() << " +" << NewLR);
363 interval.addRange(NewLR);
365 bool PHIJoin = lv_->isPHIJoin(interval.reg);
368 // A phi join register is killed at the end of the MBB and revived as a new
369 // valno in the killing blocks.
370 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
371 DEBUG(dbgs() << " phi-join");
372 ValNo->setHasPHIKill(true);
374 // Iterate over all of the blocks that the variable is completely
375 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
377 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
378 E = vi.AliveBlocks.end(); I != E; ++I) {
379 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
380 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
381 interval.addRange(LR);
382 DEBUG(dbgs() << " +" << LR);
386 // Finally, this virtual register is live from the start of any killing
387 // block to the 'use' slot of the killing instruction.
388 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
389 MachineInstr *Kill = vi.Kills[i];
390 SlotIndex Start = getMBBStartIdx(Kill->getParent());
391 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
393 // Create interval with one of a NEW value number. Note that this value
394 // number isn't actually defined by an instruction, weird huh? :)
396 ValNo = interval.getNextValue(Start, 0, false, VNInfoAllocator);
397 ValNo->setIsPHIDef(true);
399 LiveRange LR(Start, killIdx, ValNo);
400 interval.addRange(LR);
401 DEBUG(dbgs() << " +" << LR);
405 if (MultipleDefsBySameMI(*mi, MOIdx))
406 // Multiple defs of the same virtual register by the same instruction.
407 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ...
408 // This is likely due to elimination of REG_SEQUENCE instructions. Return
409 // here since there is nothing to do.
412 // If this is the second time we see a virtual register definition, it
413 // must be due to phi elimination or two addr elimination. If this is
414 // the result of two address elimination, then the vreg is one of the
415 // def-and-use register operand.
417 // It may also be partial redef like this:
418 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0
419 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0
420 bool PartReDef = isPartialRedef(MIIdx, MO, interval);
421 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) {
422 // If this is a two-address definition, then we have already processed
423 // the live range. The only problem is that we didn't realize there
424 // are actually two values in the live interval. Because of this we
425 // need to take the LiveRegion that defines this register and split it
427 SlotIndex RedefIndex = MIIdx.getDefIndex();
428 if (MO.isEarlyClobber())
429 RedefIndex = MIIdx.getUseIndex();
431 const LiveRange *OldLR =
432 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
433 VNInfo *OldValNo = OldLR->valno;
434 SlotIndex DefIndex = OldValNo->def.getDefIndex();
436 // Delete the previous value, which should be short and continuous,
437 // because the 2-addr copy must be in the same MBB as the redef.
438 interval.removeRange(DefIndex, RedefIndex);
440 // The new value number (#1) is defined by the instruction we claimed
442 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
443 false, // update at *
445 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
447 // Value#0 is now defined by the 2-addr instruction.
448 OldValNo->def = RedefIndex;
449 OldValNo->setCopy(0);
451 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ...
452 if (PartReDef && mi->isCopyLike())
453 OldValNo->setCopy(&*mi);
455 // Add the new live interval which replaces the range for the input copy.
456 LiveRange LR(DefIndex, RedefIndex, ValNo);
457 DEBUG(dbgs() << " replace range with " << LR);
458 interval.addRange(LR);
460 // If this redefinition is dead, we need to add a dummy unit live
461 // range covering the def slot.
463 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
467 dbgs() << " RESULT: ";
468 interval.print(dbgs(), tri_);
470 } else if (lv_->isPHIJoin(interval.reg)) {
471 // In the case of PHI elimination, each variable definition is only
472 // live until the end of the block. We've already taken care of the
473 // rest of the live range.
475 SlotIndex defIndex = MIIdx.getDefIndex();
476 if (MO.isEarlyClobber())
477 defIndex = MIIdx.getUseIndex();
480 MachineInstr *CopyMI = NULL;
481 if (mi->isCopyLike())
483 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
485 SlotIndex killIndex = getMBBEndIdx(mbb);
486 LiveRange LR(defIndex, killIndex, ValNo);
487 interval.addRange(LR);
488 ValNo->setHasPHIKill(true);
489 DEBUG(dbgs() << " phi-join +" << LR);
491 llvm_unreachable("Multiply defined register");
495 DEBUG(dbgs() << '\n');
498 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
499 MachineBasicBlock::iterator mi,
502 LiveInterval &interval,
503 MachineInstr *CopyMI) {
504 // A physical register cannot be live across basic block, so its
505 // lifetime must end somewhere in its defining basic block.
507 dbgs() << "\t\tregister: ";
508 printRegName(interval.reg, tri_);
511 SlotIndex baseIndex = MIIdx;
512 SlotIndex start = baseIndex.getDefIndex();
513 // Earlyclobbers move back one.
514 if (MO.isEarlyClobber())
515 start = MIIdx.getUseIndex();
516 SlotIndex end = start;
518 // If it is not used after definition, it is considered dead at
519 // the instruction defining it. Hence its interval is:
520 // [defSlot(def), defSlot(def)+1)
521 // For earlyclobbers, the defSlot was pushed back one; the extra
522 // advance below compensates.
524 DEBUG(dbgs() << " dead");
525 end = start.getStoreIndex();
529 // If it is not dead on definition, it must be killed by a
530 // subsequent instruction. Hence its interval is:
531 // [defSlot(def), useSlot(kill)+1)
532 baseIndex = baseIndex.getNextIndex();
533 while (++mi != MBB->end()) {
535 if (mi->isDebugValue())
537 if (getInstructionFromIndex(baseIndex) == 0)
538 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
540 if (mi->killsRegister(interval.reg, tri_)) {
541 DEBUG(dbgs() << " killed");
542 end = baseIndex.getDefIndex();
545 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_);
547 if (mi->isRegTiedToUseOperand(DefIdx)) {
548 // Two-address instruction.
549 end = baseIndex.getDefIndex();
551 // Another instruction redefines the register before it is ever read.
552 // Then the register is essentially dead at the instruction that
553 // defines it. Hence its interval is:
554 // [defSlot(def), defSlot(def)+1)
555 DEBUG(dbgs() << " dead");
556 end = start.getStoreIndex();
562 baseIndex = baseIndex.getNextIndex();
565 // The only case we should have a dead physreg here without a killing or
566 // instruction where we know it's dead is if it is live-in to the function
567 // and never used. Another possible case is the implicit use of the
568 // physical register has been deleted by two-address pass.
569 end = start.getStoreIndex();
572 assert(start < end && "did not find end of interval?");
574 // Already exists? Extend old live interval.
575 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
576 bool Extend = OldLR != interval.end();
577 VNInfo *ValNo = Extend
578 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
579 if (MO.isEarlyClobber() && Extend)
580 ValNo->setHasRedefByEC(true);
581 LiveRange LR(start, end, ValNo);
582 interval.addRange(LR);
583 DEBUG(dbgs() << " +" << LR << '\n');
586 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
587 MachineBasicBlock::iterator MI,
591 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
592 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
593 getOrCreateInterval(MO.getReg()));
594 else if (allocatableRegs_[MO.getReg()]) {
595 MachineInstr *CopyMI = NULL;
596 if (MI->isCopyLike())
598 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
599 getOrCreateInterval(MO.getReg()), CopyMI);
600 // Def of a register also defines its sub-registers.
601 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
602 // If MI also modifies the sub-register explicitly, avoid processing it
603 // more than once. Do not pass in TRI here so it checks for exact match.
604 if (!MI->definesRegister(*AS))
605 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
606 getOrCreateInterval(*AS), 0);
610 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
612 LiveInterval &interval, bool isAlias) {
614 dbgs() << "\t\tlivein register: ";
615 printRegName(interval.reg, tri_);
618 // Look for kills, if it reaches a def before it's killed, then it shouldn't
619 // be considered a livein.
620 MachineBasicBlock::iterator mi = MBB->begin();
621 MachineBasicBlock::iterator E = MBB->end();
622 // Skip over DBG_VALUE at the start of the MBB.
623 if (mi != E && mi->isDebugValue()) {
624 while (++mi != E && mi->isDebugValue())
627 // MBB is empty except for DBG_VALUE's.
631 SlotIndex baseIndex = MIIdx;
632 SlotIndex start = baseIndex;
633 if (getInstructionFromIndex(baseIndex) == 0)
634 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
636 SlotIndex end = baseIndex;
637 bool SeenDefUse = false;
640 if (mi->killsRegister(interval.reg, tri_)) {
641 DEBUG(dbgs() << " killed");
642 end = baseIndex.getDefIndex();
645 } else if (mi->definesRegister(interval.reg, tri_)) {
646 // Another instruction redefines the register before it is ever read.
647 // Then the register is essentially dead at the instruction that defines
648 // it. Hence its interval is:
649 // [defSlot(def), defSlot(def)+1)
650 DEBUG(dbgs() << " dead");
651 end = start.getStoreIndex();
656 while (++mi != E && mi->isDebugValue())
657 // Skip over DBG_VALUE.
660 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
663 // Live-in register might not be used at all.
666 DEBUG(dbgs() << " dead");
667 end = MIIdx.getStoreIndex();
669 DEBUG(dbgs() << " live through");
675 interval.getNextValue(getMBBStartIdx(MBB), 0, false, VNInfoAllocator);
676 vni->setIsPHIDef(true);
677 LiveRange LR(start, end, vni);
679 interval.addRange(LR);
680 DEBUG(dbgs() << " +" << LR << '\n');
683 /// computeIntervals - computes the live intervals for virtual
684 /// registers. for some ordering of the machine instructions [1,N] a
685 /// live interval is an interval [i, j) where 1 <= i <= j < N for
686 /// which a variable is live
687 void LiveIntervals::computeIntervals() {
688 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
689 << "********** Function: "
690 << ((Value*)mf_->getFunction())->getName() << '\n');
692 SmallVector<unsigned, 8> UndefUses;
693 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
695 MachineBasicBlock *MBB = MBBI;
699 // Track the index of the current machine instr.
700 SlotIndex MIIndex = getMBBStartIdx(MBB);
701 DEBUG(dbgs() << "BB#" << MBB->getNumber()
702 << ":\t\t# derived from " << MBB->getName() << "\n");
704 // Create intervals for live-ins to this BB first.
705 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
706 LE = MBB->livein_end(); LI != LE; ++LI) {
707 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
708 // Multiple live-ins can alias the same register.
709 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
710 if (!hasInterval(*AS))
711 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
715 // Skip over empty initial indices.
716 if (getInstructionFromIndex(MIIndex) == 0)
717 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
719 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
721 DEBUG(dbgs() << MIIndex << "\t" << *MI);
722 if (MI->isDebugValue())
726 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
727 MachineOperand &MO = MI->getOperand(i);
728 if (!MO.isReg() || !MO.getReg())
731 // handle register defs - build intervals
733 handleRegisterDef(MBB, MI, MIIndex, MO, i);
734 else if (MO.isUndef())
735 UndefUses.push_back(MO.getReg());
738 // Move to the next instr slot.
739 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
743 // Create empty intervals for registers defined by implicit_def's (except
744 // for those implicit_def that define values which are liveout of their
746 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
747 unsigned UndefReg = UndefUses[i];
748 (void)getOrCreateInterval(UndefReg);
752 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
753 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
754 return new LiveInterval(reg, Weight);
757 /// dupInterval - Duplicate a live interval. The caller is responsible for
758 /// managing the allocated memory.
759 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
760 LiveInterval *NewLI = createInterval(li->reg);
761 NewLI->Copy(*li, mri_, getVNInfoAllocator());
765 //===----------------------------------------------------------------------===//
766 // Register allocator hooks.
769 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
770 /// allow one) virtual register operand, then its uses are implicitly using
771 /// the register. Returns the virtual register.
772 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
773 MachineInstr *MI) const {
775 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
776 MachineOperand &MO = MI->getOperand(i);
777 if (!MO.isReg() || !MO.isUse())
779 unsigned Reg = MO.getReg();
780 if (Reg == 0 || Reg == li.reg)
783 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
784 !allocatableRegs_[Reg])
786 // FIXME: For now, only remat MI with at most one register operand.
788 "Can't rematerialize instruction with multiple register operand!");
797 /// isValNoAvailableAt - Return true if the val# of the specified interval
798 /// which reaches the given instruction also reaches the specified use index.
799 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
800 SlotIndex UseIdx) const {
801 SlotIndex Index = getInstructionIndex(MI);
802 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
803 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
804 return UI != li.end() && UI->valno == ValNo;
807 /// isReMaterializable - Returns true if the definition MI of the specified
808 /// val# of the specified interval is re-materializable.
809 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
810 const VNInfo *ValNo, MachineInstr *MI,
811 SmallVectorImpl<LiveInterval*> &SpillIs,
816 if (!tii_->isTriviallyReMaterializable(MI, aa_))
819 // Target-specific code can mark an instruction as being rematerializable
820 // if it has one virtual reg use, though it had better be something like
821 // a PIC base register which is likely to be live everywhere.
822 unsigned ImpUse = getReMatImplicitUse(li, MI);
824 const LiveInterval &ImpLi = getInterval(ImpUse);
825 for (MachineRegisterInfo::use_nodbg_iterator
826 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
828 MachineInstr *UseMI = &*ri;
829 SlotIndex UseIdx = getInstructionIndex(UseMI);
830 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
832 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
836 // If a register operand of the re-materialized instruction is going to
837 // be spilled next, then it's not legal to re-materialize this instruction.
838 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
839 if (ImpUse == SpillIs[i]->reg)
845 /// isReMaterializable - Returns true if the definition MI of the specified
846 /// val# of the specified interval is re-materializable.
847 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
848 const VNInfo *ValNo, MachineInstr *MI) {
849 SmallVector<LiveInterval*, 4> Dummy1;
851 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
854 /// isReMaterializable - Returns true if every definition of MI of every
855 /// val# of the specified interval is re-materializable.
856 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
857 SmallVectorImpl<LiveInterval*> &SpillIs,
860 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
862 const VNInfo *VNI = *i;
864 continue; // Dead val#.
865 // Is the def for the val# rematerializable?
866 if (!VNI->isDefAccurate())
868 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
869 bool DefIsLoad = false;
871 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
878 /// FilterFoldedOps - Filter out two-address use operands. Return
879 /// true if it finds any issue with the operands that ought to prevent
881 static bool FilterFoldedOps(MachineInstr *MI,
882 SmallVector<unsigned, 2> &Ops,
884 SmallVector<unsigned, 2> &FoldOps) {
886 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
887 unsigned OpIdx = Ops[i];
888 MachineOperand &MO = MI->getOperand(OpIdx);
889 // FIXME: fold subreg use.
893 MRInfo |= (unsigned)VirtRegMap::isMod;
895 // Filter out two-address use operand(s).
896 if (MI->isRegTiedToDefOperand(OpIdx)) {
897 MRInfo = VirtRegMap::isModRef;
900 MRInfo |= (unsigned)VirtRegMap::isRef;
902 FoldOps.push_back(OpIdx);
908 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
909 /// slot / to reg or any rematerialized load into ith operand of specified
910 /// MI. If it is successul, MI is updated with the newly created MI and
912 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
913 VirtRegMap &vrm, MachineInstr *DefMI,
915 SmallVector<unsigned, 2> &Ops,
916 bool isSS, int Slot, unsigned Reg) {
917 // If it is an implicit def instruction, just delete it.
918 if (MI->isImplicitDef()) {
919 RemoveMachineInstrFromMaps(MI);
920 vrm.RemoveMachineInstrFromMaps(MI);
921 MI->eraseFromParent();
926 // Filter the list of operand indexes that are to be folded. Abort if
927 // any operand will prevent folding.
929 SmallVector<unsigned, 2> FoldOps;
930 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
933 // The only time it's safe to fold into a two address instruction is when
934 // it's folding reload and spill from / into a spill stack slot.
935 if (DefMI && (MRInfo & VirtRegMap::isMod))
938 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot)
939 : tii_->foldMemoryOperand(MI, FoldOps, DefMI);
941 // Remember this instruction uses the spill slot.
942 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
944 // Attempt to fold the memory reference into the instruction. If
945 // we can do this, we don't need to insert spill code.
946 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
947 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
948 vrm.transferSpillPts(MI, fmi);
949 vrm.transferRestorePts(MI, fmi);
950 vrm.transferEmergencySpills(MI, fmi);
951 ReplaceMachineInstrInMaps(MI, fmi);
952 MI->eraseFromParent();
960 /// canFoldMemoryOperand - Returns true if the specified load / store
961 /// folding is possible.
962 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
963 SmallVector<unsigned, 2> &Ops,
965 // Filter the list of operand indexes that are to be folded. Abort if
966 // any operand will prevent folding.
968 SmallVector<unsigned, 2> FoldOps;
969 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
972 // It's only legal to remat for a use, not a def.
973 if (ReMat && (MRInfo & VirtRegMap::isMod))
976 return tii_->canFoldMemoryOperand(MI, FoldOps);
979 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
980 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
982 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
987 for (++itr; itr != li.ranges.end(); ++itr) {
988 MachineBasicBlock *mbb2 =
989 indexes_->getMBBCoveringRange(itr->start, itr->end);
998 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
999 /// interval on to-be re-materialized operands of MI) with new register.
1000 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1001 MachineInstr *MI, unsigned NewVReg,
1003 // There is an implicit use. That means one of the other operand is
1004 // being remat'ed and the remat'ed instruction has li.reg as an
1005 // use operand. Make sure we rewrite that as well.
1006 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1007 MachineOperand &MO = MI->getOperand(i);
1010 unsigned Reg = MO.getReg();
1011 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1013 if (!vrm.isReMaterialized(Reg))
1015 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1016 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1018 UseMO->setReg(NewVReg);
1022 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1023 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1024 bool LiveIntervals::
1025 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1026 bool TrySplit, SlotIndex index, SlotIndex end,
1028 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1029 unsigned Slot, int LdSlot,
1030 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1032 const TargetRegisterClass* rc,
1033 SmallVector<int, 4> &ReMatIds,
1034 const MachineLoopInfo *loopInfo,
1035 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1036 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1037 std::vector<LiveInterval*> &NewLIs) {
1038 bool CanFold = false;
1040 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1041 MachineOperand& mop = MI->getOperand(i);
1044 unsigned Reg = mop.getReg();
1045 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1050 bool TryFold = !DefIsReMat;
1051 bool FoldSS = true; // Default behavior unless it's a remat.
1052 int FoldSlot = Slot;
1054 // If this is the rematerializable definition MI itself and
1055 // all of its uses are rematerialized, simply delete it.
1056 if (MI == ReMatOrigDefMI && CanDelete) {
1057 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1059 RemoveMachineInstrFromMaps(MI);
1060 vrm.RemoveMachineInstrFromMaps(MI);
1061 MI->eraseFromParent();
1065 // If def for this use can't be rematerialized, then try folding.
1066 // If def is rematerializable and it's a load, also try folding.
1067 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1069 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1075 // Scan all of the operands of this instruction rewriting operands
1076 // to use NewVReg instead of li.reg as appropriate. We do this for
1079 // 1. If the instr reads the same spilled vreg multiple times, we
1080 // want to reuse the NewVReg.
1081 // 2. If the instr is a two-addr instruction, we are required to
1082 // keep the src/dst regs pinned.
1084 // Keep track of whether we replace a use and/or def so that we can
1085 // create the spill interval with the appropriate range.
1086 SmallVector<unsigned, 2> Ops;
1087 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops);
1089 // Create a new virtual register for the spill interval.
1090 // Create the new register now so we can map the fold instruction
1091 // to the new register so when it is unfolded we get the correct
1093 bool CreatedNewVReg = false;
1095 NewVReg = mri_->createVirtualRegister(rc);
1097 CreatedNewVReg = true;
1099 // The new virtual register should get the same allocation hints as the
1101 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1102 if (Hint.first || Hint.second)
1103 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1109 // Do not fold load / store here if we are splitting. We'll find an
1110 // optimal point to insert a load / store later.
1112 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1113 Ops, FoldSS, FoldSlot, NewVReg)) {
1114 // Folding the load/store can completely change the instruction in
1115 // unpredictable ways, rescan it from the beginning.
1118 // We need to give the new vreg the same stack slot as the
1119 // spilled interval.
1120 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1126 if (isNotInMIMap(MI))
1128 goto RestartInstruction;
1131 // We'll try to fold it later if it's profitable.
1132 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1136 mop.setReg(NewVReg);
1137 if (mop.isImplicit())
1138 rewriteImplicitOps(li, MI, NewVReg, vrm);
1140 // Reuse NewVReg for other reads.
1141 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1142 MachineOperand &mopj = MI->getOperand(Ops[j]);
1143 mopj.setReg(NewVReg);
1144 if (mopj.isImplicit())
1145 rewriteImplicitOps(li, MI, NewVReg, vrm);
1148 if (CreatedNewVReg) {
1150 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1151 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1152 // Each valnum may have its own remat id.
1153 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1155 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1157 if (!CanDelete || (HasUse && HasDef)) {
1158 // If this is a two-addr instruction then its use operands are
1159 // rematerializable but its def is not. It should be assigned a
1161 vrm.assignVirt2StackSlot(NewVReg, Slot);
1164 vrm.assignVirt2StackSlot(NewVReg, Slot);
1166 } else if (HasUse && HasDef &&
1167 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1168 // If this interval hasn't been assigned a stack slot (because earlier
1169 // def is a deleted remat def), do it now.
1170 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1171 vrm.assignVirt2StackSlot(NewVReg, Slot);
1174 // Re-matting an instruction with virtual register use. Add the
1175 // register as an implicit use on the use MI.
1176 if (DefIsReMat && ImpUse)
1177 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1179 // Create a new register interval for this spill / remat.
1180 LiveInterval &nI = getOrCreateInterval(NewVReg);
1181 if (CreatedNewVReg) {
1182 NewLIs.push_back(&nI);
1183 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1185 vrm.setIsSplitFromReg(NewVReg, li.reg);
1189 if (CreatedNewVReg) {
1190 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1191 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1192 DEBUG(dbgs() << " +" << LR);
1195 // Extend the split live interval to this def / use.
1196 SlotIndex End = index.getDefIndex();
1197 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1198 nI.getValNumInfo(nI.getNumValNums()-1));
1199 DEBUG(dbgs() << " +" << LR);
1204 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1205 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1206 DEBUG(dbgs() << " +" << LR);
1211 dbgs() << "\t\t\t\tAdded new interval: ";
1212 nI.print(dbgs(), tri_);
1218 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1220 MachineBasicBlock *MBB,
1221 SlotIndex Idx) const {
1222 return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB));
1225 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1226 /// during spilling.
1228 struct RewriteInfo {
1231 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {}
1234 struct RewriteInfoCompare {
1235 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1236 return LHS.Index < RHS.Index;
1241 void LiveIntervals::
1242 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1243 LiveInterval::Ranges::const_iterator &I,
1244 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1245 unsigned Slot, int LdSlot,
1246 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1248 const TargetRegisterClass* rc,
1249 SmallVector<int, 4> &ReMatIds,
1250 const MachineLoopInfo *loopInfo,
1251 BitVector &SpillMBBs,
1252 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1253 BitVector &RestoreMBBs,
1254 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1255 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1256 std::vector<LiveInterval*> &NewLIs) {
1257 bool AllCanFold = true;
1258 unsigned NewVReg = 0;
1259 SlotIndex start = I->start.getBaseIndex();
1260 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1262 // First collect all the def / use in this live range that will be rewritten.
1263 // Make sure they are sorted according to instruction index.
1264 std::vector<RewriteInfo> RewriteMIs;
1265 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1266 re = mri_->reg_end(); ri != re; ) {
1267 MachineInstr *MI = &*ri;
1268 MachineOperand &O = ri.getOperand();
1270 if (MI->isDebugValue()) {
1271 // Modify DBG_VALUE now that the value is in a spill slot.
1272 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) {
1273 uint64_t Offset = MI->getOperand(1).getImm();
1274 const MDNode *MDPtr = MI->getOperand(2).getMetadata();
1275 DebugLoc DL = MI->getDebugLoc();
1276 int FI = isLoadSS ? LdSlot : (int)Slot;
1277 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI,
1278 Offset, MDPtr, DL)) {
1279 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
1280 ReplaceMachineInstrInMaps(MI, NewDV);
1281 MachineBasicBlock *MBB = MI->getParent();
1282 MBB->insert(MBB->erase(MI), NewDV);
1287 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1288 RemoveMachineInstrFromMaps(MI);
1289 vrm.RemoveMachineInstrFromMaps(MI);
1290 MI->eraseFromParent();
1293 assert(!(O.isImplicit() && O.isUse()) &&
1294 "Spilling register that's used as implicit use?");
1295 SlotIndex index = getInstructionIndex(MI);
1296 if (index < start || index >= end)
1300 // Must be defined by an implicit def. It should not be spilled. Note,
1301 // this is for correctness reason. e.g.
1302 // 8 %reg1024<def> = IMPLICIT_DEF
1303 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1304 // The live range [12, 14) are not part of the r1024 live interval since
1305 // it's defined by an implicit def. It will not conflicts with live
1306 // interval of r1025. Now suppose both registers are spilled, you can
1307 // easily see a situation where both registers are reloaded before
1308 // the INSERT_SUBREG and both target registers that would overlap.
1310 RewriteMIs.push_back(RewriteInfo(index, MI));
1312 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1314 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1315 // Now rewrite the defs and uses.
1316 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1317 RewriteInfo &rwi = RewriteMIs[i];
1319 SlotIndex index = rwi.Index;
1320 MachineInstr *MI = rwi.MI;
1321 // If MI def and/or use the same register multiple times, then there
1322 // are multiple entries.
1323 while (i != e && RewriteMIs[i].MI == MI) {
1324 assert(RewriteMIs[i].Index == index);
1327 MachineBasicBlock *MBB = MI->getParent();
1329 if (ImpUse && MI != ReMatDefMI) {
1330 // Re-matting an instruction with virtual register use. Prevent interval
1331 // from being spilled.
1332 getInterval(ImpUse).markNotSpillable();
1335 unsigned MBBId = MBB->getNumber();
1336 unsigned ThisVReg = 0;
1338 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1339 if (NVI != MBBVRegsMap.end()) {
1340 ThisVReg = NVI->second;
1347 // It's better to start a new interval to avoid artifically
1348 // extend the new interval.
1349 if (MI->readsWritesVirtualRegister(li.reg) ==
1350 std::make_pair(false,true)) {
1351 MBBVRegsMap.erase(MBB->getNumber());
1357 bool IsNew = ThisVReg == 0;
1359 // This ends the previous live interval. If all of its def / use
1360 // can be folded, give it a low spill weight.
1361 if (NewVReg && TrySplit && AllCanFold) {
1362 LiveInterval &nI = getOrCreateInterval(NewVReg);
1369 bool HasDef = false;
1370 bool HasUse = false;
1371 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1372 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1373 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1374 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1375 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1376 if (!HasDef && !HasUse)
1379 AllCanFold &= CanFold;
1381 // Update weight of spill interval.
1382 LiveInterval &nI = getOrCreateInterval(NewVReg);
1384 // The spill weight is now infinity as it cannot be spilled again.
1385 nI.markNotSpillable();
1389 // Keep track of the last def and first use in each MBB.
1391 if (MI != ReMatOrigDefMI || !CanDelete) {
1392 bool HasKill = false;
1394 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1396 // If this is a two-address code, then this index starts a new VNInfo.
1397 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1399 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1401 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1402 SpillIdxes.find(MBBId);
1404 if (SII == SpillIdxes.end()) {
1405 std::vector<SRInfo> S;
1406 S.push_back(SRInfo(index, NewVReg, true));
1407 SpillIdxes.insert(std::make_pair(MBBId, S));
1408 } else if (SII->second.back().vreg != NewVReg) {
1409 SII->second.push_back(SRInfo(index, NewVReg, true));
1410 } else if (index > SII->second.back().index) {
1411 // If there is an earlier def and this is a two-address
1412 // instruction, then it's not possible to fold the store (which
1413 // would also fold the load).
1414 SRInfo &Info = SII->second.back();
1416 Info.canFold = !HasUse;
1418 SpillMBBs.set(MBBId);
1419 } else if (SII != SpillIdxes.end() &&
1420 SII->second.back().vreg == NewVReg &&
1421 index > SII->second.back().index) {
1422 // There is an earlier def that's not killed (must be two-address).
1423 // The spill is no longer needed.
1424 SII->second.pop_back();
1425 if (SII->second.empty()) {
1426 SpillIdxes.erase(MBBId);
1427 SpillMBBs.reset(MBBId);
1434 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1435 SpillIdxes.find(MBBId);
1436 if (SII != SpillIdxes.end() &&
1437 SII->second.back().vreg == NewVReg &&
1438 index > SII->second.back().index)
1439 // Use(s) following the last def, it's not safe to fold the spill.
1440 SII->second.back().canFold = false;
1441 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1442 RestoreIdxes.find(MBBId);
1443 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1444 // If we are splitting live intervals, only fold if it's the first
1445 // use and there isn't another use later in the MBB.
1446 RII->second.back().canFold = false;
1448 // Only need a reload if there isn't an earlier def / use.
1449 if (RII == RestoreIdxes.end()) {
1450 std::vector<SRInfo> Infos;
1451 Infos.push_back(SRInfo(index, NewVReg, true));
1452 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1454 RII->second.push_back(SRInfo(index, NewVReg, true));
1456 RestoreMBBs.set(MBBId);
1460 // Update spill weight.
1461 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1462 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1465 if (NewVReg && TrySplit && AllCanFold) {
1466 // If all of its def / use can be folded, give it a low spill weight.
1467 LiveInterval &nI = getOrCreateInterval(NewVReg);
1472 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1473 unsigned vr, BitVector &RestoreMBBs,
1474 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1475 if (!RestoreMBBs[Id])
1477 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1478 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1479 if (Restores[i].index == index &&
1480 Restores[i].vreg == vr &&
1481 Restores[i].canFold)
1486 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1487 unsigned vr, BitVector &RestoreMBBs,
1488 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1489 if (!RestoreMBBs[Id])
1491 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1492 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1493 if (Restores[i].index == index && Restores[i].vreg)
1494 Restores[i].index = SlotIndex();
1497 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1498 /// spilled and create empty intervals for their uses.
1500 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1501 const TargetRegisterClass* rc,
1502 std::vector<LiveInterval*> &NewLIs) {
1503 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1504 re = mri_->reg_end(); ri != re; ) {
1505 MachineOperand &O = ri.getOperand();
1506 MachineInstr *MI = &*ri;
1508 if (MI->isDebugValue()) {
1509 // Remove debug info for now.
1511 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1515 assert(MI->isImplicitDef() &&
1516 "Register def was not rewritten?");
1517 RemoveMachineInstrFromMaps(MI);
1518 vrm.RemoveMachineInstrFromMaps(MI);
1519 MI->eraseFromParent();
1521 // This must be an use of an implicit_def so it's not part of the live
1522 // interval. Create a new empty live interval for it.
1523 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1524 unsigned NewVReg = mri_->createVirtualRegister(rc);
1526 vrm.setIsImplicitlyDefined(NewVReg);
1527 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1528 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1529 MachineOperand &MO = MI->getOperand(i);
1530 if (MO.isReg() && MO.getReg() == li.reg) {
1540 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1541 // Limit the loop depth ridiculousness.
1542 if (loopDepth > 200)
1545 // The loop depth is used to roughly estimate the number of times the
1546 // instruction is executed. Something like 10^d is simple, but will quickly
1547 // overflow a float. This expression behaves like 10^d for small d, but is
1548 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1549 // headroom before overflow.
1550 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1552 return (isDef + isUse) * lc;
1556 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1557 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1558 normalizeSpillWeight(*NewLIs[i]);
1561 std::vector<LiveInterval*> LiveIntervals::
1562 addIntervalsForSpills(const LiveInterval &li,
1563 SmallVectorImpl<LiveInterval*> &SpillIs,
1564 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1565 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1568 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1569 li.print(dbgs(), tri_);
1573 // Each bit specify whether a spill is required in the MBB.
1574 BitVector SpillMBBs(mf_->getNumBlockIDs());
1575 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1576 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1577 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1578 DenseMap<unsigned,unsigned> MBBVRegsMap;
1579 std::vector<LiveInterval*> NewLIs;
1580 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1582 unsigned NumValNums = li.getNumValNums();
1583 SmallVector<MachineInstr*, 4> ReMatDefs;
1584 ReMatDefs.resize(NumValNums, NULL);
1585 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1586 ReMatOrigDefs.resize(NumValNums, NULL);
1587 SmallVector<int, 4> ReMatIds;
1588 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1589 BitVector ReMatDelete(NumValNums);
1590 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1592 // Spilling a split live interval. It cannot be split any further. Also,
1593 // it's also guaranteed to be a single val# / range interval.
1594 if (vrm.getPreSplitReg(li.reg)) {
1595 vrm.setIsSplitFromReg(li.reg, 0);
1596 // Unset the split kill marker on the last use.
1597 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1598 if (KillIdx != SlotIndex()) {
1599 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1600 assert(KillMI && "Last use disappeared?");
1601 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1602 assert(KillOp != -1 && "Last use disappeared?");
1603 KillMI->getOperand(KillOp).setIsKill(false);
1605 vrm.removeKillPoint(li.reg);
1606 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1607 Slot = vrm.getStackSlot(li.reg);
1608 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1609 MachineInstr *ReMatDefMI = DefIsReMat ?
1610 vrm.getReMaterializedMI(li.reg) : NULL;
1612 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1613 bool isLoad = isLoadSS ||
1614 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1615 bool IsFirstRange = true;
1616 for (LiveInterval::Ranges::const_iterator
1617 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1618 // If this is a split live interval with multiple ranges, it means there
1619 // are two-address instructions that re-defined the value. Only the
1620 // first def can be rematerialized!
1622 // Note ReMatOrigDefMI has already been deleted.
1623 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1624 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1625 false, vrm, rc, ReMatIds, loopInfo,
1626 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1627 MBBVRegsMap, NewLIs);
1629 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1630 Slot, 0, false, false, false,
1631 false, vrm, rc, ReMatIds, loopInfo,
1632 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1633 MBBVRegsMap, NewLIs);
1635 IsFirstRange = false;
1638 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1639 normalizeSpillWeights(NewLIs);
1643 bool TrySplit = !intervalIsInOneMBB(li);
1646 bool NeedStackSlot = false;
1647 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1649 const VNInfo *VNI = *i;
1650 unsigned VN = VNI->id;
1651 if (VNI->isUnused())
1652 continue; // Dead val#.
1653 // Is the def for the val# rematerializable?
1654 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1655 ? getInstructionFromIndex(VNI->def) : 0;
1657 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1658 // Remember how to remat the def of this val#.
1659 ReMatOrigDefs[VN] = ReMatDefMI;
1660 // Original def may be modified so we have to make a copy here.
1661 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1662 CloneMIs.push_back(Clone);
1663 ReMatDefs[VN] = Clone;
1665 bool CanDelete = true;
1666 if (VNI->hasPHIKill()) {
1667 // A kill is a phi node, not all of its uses can be rematerialized.
1668 // It must not be deleted.
1670 // Need a stack slot if there is any live range where uses cannot be
1672 NeedStackSlot = true;
1675 ReMatDelete.set(VN);
1677 // Need a stack slot if there is any live range where uses cannot be
1679 NeedStackSlot = true;
1683 // One stack slot per live interval.
1684 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1685 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1686 Slot = vrm.assignVirt2StackSlot(li.reg);
1688 // This case only occurs when the prealloc splitter has already assigned
1689 // a stack slot to this vreg.
1691 Slot = vrm.getStackSlot(li.reg);
1694 // Create new intervals and rewrite defs and uses.
1695 for (LiveInterval::Ranges::const_iterator
1696 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1697 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1698 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1699 bool DefIsReMat = ReMatDefMI != NULL;
1700 bool CanDelete = ReMatDelete[I->valno->id];
1702 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1703 bool isLoad = isLoadSS ||
1704 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1705 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1706 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1707 CanDelete, vrm, rc, ReMatIds, loopInfo,
1708 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1709 MBBVRegsMap, NewLIs);
1712 // Insert spills / restores if we are splitting.
1714 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1715 normalizeSpillWeights(NewLIs);
1719 SmallPtrSet<LiveInterval*, 4> AddedKill;
1720 SmallVector<unsigned, 2> Ops;
1721 if (NeedStackSlot) {
1722 int Id = SpillMBBs.find_first();
1724 std::vector<SRInfo> &spills = SpillIdxes[Id];
1725 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1726 SlotIndex index = spills[i].index;
1727 unsigned VReg = spills[i].vreg;
1728 LiveInterval &nI = getOrCreateInterval(VReg);
1729 bool isReMat = vrm.isReMaterialized(VReg);
1730 MachineInstr *MI = getInstructionFromIndex(index);
1731 bool CanFold = false;
1732 bool FoundUse = false;
1734 if (spills[i].canFold) {
1736 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1737 MachineOperand &MO = MI->getOperand(j);
1738 if (!MO.isReg() || MO.getReg() != VReg)
1745 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1746 RestoreMBBs, RestoreIdxes))) {
1747 // MI has two-address uses of the same register. If the use
1748 // isn't the first and only use in the BB, then we can't fold
1749 // it. FIXME: Move this to rewriteInstructionsForSpills.
1756 // Fold the store into the def if possible.
1757 bool Folded = false;
1758 if (CanFold && !Ops.empty()) {
1759 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1762 // Also folded uses, do not issue a load.
1763 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1764 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1766 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1770 // Otherwise tell the spiller to issue a spill.
1772 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1773 bool isKill = LR->end == index.getStoreIndex();
1774 if (!MI->registerDefIsDead(nI.reg))
1775 // No need to spill a dead def.
1776 vrm.addSpillPoint(VReg, isKill, MI);
1778 AddedKill.insert(&nI);
1781 Id = SpillMBBs.find_next(Id);
1785 int Id = RestoreMBBs.find_first();
1787 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1788 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1789 SlotIndex index = restores[i].index;
1790 if (index == SlotIndex())
1792 unsigned VReg = restores[i].vreg;
1793 LiveInterval &nI = getOrCreateInterval(VReg);
1794 bool isReMat = vrm.isReMaterialized(VReg);
1795 MachineInstr *MI = getInstructionFromIndex(index);
1796 bool CanFold = false;
1798 if (restores[i].canFold) {
1800 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1801 MachineOperand &MO = MI->getOperand(j);
1802 if (!MO.isReg() || MO.getReg() != VReg)
1806 // If this restore were to be folded, it would have been folded
1815 // Fold the load into the use if possible.
1816 bool Folded = false;
1817 if (CanFold && !Ops.empty()) {
1819 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1821 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1823 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1824 // If the rematerializable def is a load, also try to fold it.
1825 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1826 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1827 Ops, isLoadSS, LdSlot, VReg);
1829 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1831 // Re-matting an instruction with virtual register use. Add the
1832 // register as an implicit use on the use MI and mark the register
1833 // interval as unspillable.
1834 LiveInterval &ImpLi = getInterval(ImpUse);
1835 ImpLi.markNotSpillable();
1836 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1841 // If folding is not possible / failed, then tell the spiller to issue a
1842 // load / rematerialization for us.
1844 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1846 vrm.addRestorePoint(VReg, MI);
1848 Id = RestoreMBBs.find_next(Id);
1851 // Finalize intervals: add kills, finalize spill weights, and filter out
1853 std::vector<LiveInterval*> RetNewLIs;
1854 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1855 LiveInterval *LI = NewLIs[i];
1857 if (!AddedKill.count(LI)) {
1858 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1859 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1860 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1861 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1862 assert(UseIdx != -1);
1863 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1864 LastUse->getOperand(UseIdx).setIsKill();
1865 vrm.addKillPoint(LI->reg, LastUseIdx);
1868 RetNewLIs.push_back(LI);
1872 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1873 normalizeSpillWeights(RetNewLIs);
1877 /// hasAllocatableSuperReg - Return true if the specified physical register has
1878 /// any super register that's allocatable.
1879 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1880 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1881 if (allocatableRegs_[*AS] && hasInterval(*AS))
1886 /// getRepresentativeReg - Find the largest super register of the specified
1887 /// physical register.
1888 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1889 // Find the largest super-register that is allocatable.
1890 unsigned BestReg = Reg;
1891 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1892 unsigned SuperReg = *AS;
1893 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1901 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1902 /// specified interval that conflicts with the specified physical register.
1903 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1904 unsigned PhysReg) const {
1905 unsigned NumConflicts = 0;
1906 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1907 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1908 E = mri_->reg_end(); I != E; ++I) {
1909 MachineOperand &O = I.getOperand();
1910 MachineInstr *MI = O.getParent();
1911 if (MI->isDebugValue())
1913 SlotIndex Index = getInstructionIndex(MI);
1914 if (pli.liveAt(Index))
1917 return NumConflicts;
1920 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
1921 /// around all defs and uses of the specified interval. Return true if it
1922 /// was able to cut its interval.
1923 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
1924 unsigned PhysReg, VirtRegMap &vrm) {
1925 unsigned SpillReg = getRepresentativeReg(PhysReg);
1927 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
1928 // If there are registers which alias PhysReg, but which are not a
1929 // sub-register of the chosen representative super register. Assert
1930 // since we can't handle it yet.
1931 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
1932 tri_->isSuperRegister(*AS, SpillReg));
1935 SmallVector<unsigned, 4> PRegs;
1936 if (hasInterval(SpillReg))
1937 PRegs.push_back(SpillReg);
1939 SmallSet<unsigned, 4> Added;
1940 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
1941 if (Added.insert(*AS) && hasInterval(*AS)) {
1942 PRegs.push_back(*AS);
1943 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
1948 SmallPtrSet<MachineInstr*, 8> SeenMIs;
1949 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1950 E = mri_->reg_end(); I != E; ++I) {
1951 MachineOperand &O = I.getOperand();
1952 MachineInstr *MI = O.getParent();
1953 if (MI->isDebugValue() || SeenMIs.count(MI))
1956 SlotIndex Index = getInstructionIndex(MI);
1957 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
1958 unsigned PReg = PRegs[i];
1959 LiveInterval &pli = getInterval(PReg);
1960 if (!pli.liveAt(Index))
1962 vrm.addEmergencySpill(PReg, MI);
1963 SlotIndex StartIdx = Index.getLoadIndex();
1964 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
1965 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
1966 pli.removeRange(StartIdx, EndIdx);
1970 raw_string_ostream Msg(msg);
1971 Msg << "Ran out of registers during register allocation!";
1972 if (MI->isInlineAsm()) {
1973 Msg << "\nPlease check your inline asm statement for invalid "
1974 << "constraints:\n";
1975 MI->print(Msg, tm_);
1977 report_fatal_error(Msg.str());
1979 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
1980 if (!hasInterval(*AS))
1982 LiveInterval &spli = getInterval(*AS);
1983 if (spli.liveAt(Index))
1984 spli.removeRange(Index.getLoadIndex(),
1985 Index.getNextIndex().getBaseIndex());
1992 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
1993 MachineInstr* startInst) {
1994 LiveInterval& Interval = getOrCreateInterval(reg);
1995 VNInfo* VN = Interval.getNextValue(
1996 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
1997 startInst, true, getVNInfoAllocator());
1998 VN->setHasPHIKill(true);
2000 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2001 getMBBEndIdx(startInst->getParent()), VN);
2002 Interval.addRange(LR);