1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I) {
95 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
96 VNInfoAllocator.Reset();
97 while (!CloneMIs.empty()) {
98 MachineInstr *MI = CloneMIs.back();
100 mf_->DeleteMachineInstr(MI);
104 /// runOnMachineFunction - Register allocate the whole function
106 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
108 mri_ = &mf_->getRegInfo();
109 tm_ = &fn.getTarget();
110 tri_ = tm_->getRegisterInfo();
111 tii_ = tm_->getInstrInfo();
112 aa_ = &getAnalysis<AliasAnalysis>();
113 lv_ = &getAnalysis<LiveVariables>();
114 indexes_ = &getAnalysis<SlotIndexes>();
115 allocatableRegs_ = tri_->getAllocatableSet(fn);
119 numIntervals += getNumIntervals();
125 /// print - Implement the dump method.
126 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
127 OS << "********** INTERVALS **********\n";
128 for (const_iterator I = begin(), E = end(); I != E; ++I) {
129 I->second->print(OS, tri_);
136 void LiveIntervals::printInstrs(raw_ostream &OS) const {
137 OS << "********** MACHINEINSTRS **********\n";
139 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
140 mbbi != mbbe; ++mbbi) {
141 OS << "BB#" << mbbi->getNumber()
142 << ":\t\t# derived from " << mbbi->getName() << "\n";
143 for (MachineBasicBlock::iterator mii = mbbi->begin(),
144 mie = mbbi->end(); mii != mie; ++mii) {
145 if (mii->isDebugValue())
148 OS << getInstructionIndex(mii) << '\t' << *mii;
153 void LiveIntervals::dumpInstrs() const {
157 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
158 VirtRegMap &vrm, unsigned reg) {
159 // We don't handle fancy stuff crossing basic block boundaries
160 if (li.ranges.size() != 1)
162 const LiveRange &range = li.ranges.front();
163 SlotIndex idx = range.start.getBaseIndex();
164 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
166 // Skip deleted instructions
167 MachineInstr *firstMI = getInstructionFromIndex(idx);
168 while (!firstMI && idx != end) {
169 idx = idx.getNextIndex();
170 firstMI = getInstructionFromIndex(idx);
175 // Find last instruction in range
176 SlotIndex lastIdx = end.getPrevIndex();
177 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
178 while (!lastMI && lastIdx != idx) {
179 lastIdx = lastIdx.getPrevIndex();
180 lastMI = getInstructionFromIndex(lastIdx);
185 // Range cannot cross basic block boundaries or terminators
186 MachineBasicBlock *MBB = firstMI->getParent();
187 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
190 MachineBasicBlock::const_iterator E = lastMI;
192 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
193 const MachineInstr &MI = *I;
195 // Allow copies to and from li.reg
196 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
197 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
198 if (SrcReg == li.reg || DstReg == li.reg)
201 // Check for operands using reg
202 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
203 const MachineOperand& mop = MI.getOperand(i);
206 unsigned PhysReg = mop.getReg();
207 if (PhysReg == 0 || PhysReg == li.reg)
209 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
210 if (!vrm.hasPhys(PhysReg))
212 PhysReg = vrm.getPhys(PhysReg);
214 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
219 // No conflicts found.
223 /// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except
224 /// it checks for sub-register reference and it can check use as well.
225 bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li,
226 unsigned Reg, bool CheckUse,
227 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
228 for (LiveInterval::Ranges::const_iterator
229 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
230 for (SlotIndex index = I->start.getBaseIndex(),
231 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
233 index = index.getNextIndex()) {
234 MachineInstr *MI = getInstructionFromIndex(index);
236 continue; // skip deleted instructions
238 if (JoinedCopies.count(MI))
240 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
241 MachineOperand& MO = MI->getOperand(i);
244 if (MO.isUse() && !CheckUse)
246 unsigned PhysReg = MO.getReg();
247 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
249 if (tri_->isSubRegister(Reg, PhysReg))
259 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
260 if (TargetRegisterInfo::isPhysicalRegister(reg))
261 dbgs() << tri_->getName(reg);
263 dbgs() << "%reg" << reg;
267 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
268 MachineBasicBlock::iterator mi,
272 LiveInterval &interval) {
274 dbgs() << "\t\tregister: ";
275 printRegName(interval.reg, tri_);
278 // Virtual registers may be defined multiple times (due to phi
279 // elimination and 2-addr elimination). Much of what we do only has to be
280 // done once for the vreg. We use an empty interval to detect the first
281 // time we see a vreg.
282 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
283 if (interval.empty()) {
284 // Get the Idx of the defining instructions.
285 SlotIndex defIndex = MIIdx.getDefIndex();
286 // Earlyclobbers move back one, so that they overlap the live range
288 if (MO.isEarlyClobber())
289 defIndex = MIIdx.getUseIndex();
291 MachineInstr *CopyMI = NULL;
292 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
293 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
294 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
296 // Earlyclobbers move back one.
297 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
299 assert(ValNo->id == 0 && "First value in interval is not 0?");
301 // Loop over all of the blocks that the vreg is defined in. There are
302 // two cases we have to handle here. The most common case is a vreg
303 // whose lifetime is contained within a basic block. In this case there
304 // will be a single kill, in MBB, which comes after the definition.
305 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
306 // FIXME: what about dead vars?
308 if (vi.Kills[0] != mi)
309 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
311 killIdx = defIndex.getStoreIndex();
313 // If the kill happens after the definition, we have an intra-block
315 if (killIdx > defIndex) {
316 assert(vi.AliveBlocks.empty() &&
317 "Shouldn't be alive across any blocks!");
318 LiveRange LR(defIndex, killIdx, ValNo);
319 interval.addRange(LR);
320 DEBUG(dbgs() << " +" << LR << "\n");
321 ValNo->addKill(killIdx);
326 // The other case we handle is when a virtual register lives to the end
327 // of the defining block, potentially live across some blocks, then is
328 // live into some number of blocks, but gets killed. Start by adding a
329 // range that goes from this definition to the end of the defining block.
330 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
331 DEBUG(dbgs() << " +" << NewLR);
332 interval.addRange(NewLR);
334 bool PHIJoin = lv_->isPHIJoin(interval.reg);
337 // A phi join register is killed at the end of the MBB and revived as a new
338 // valno in the killing blocks.
339 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
340 DEBUG(dbgs() << " phi-join");
341 ValNo->addKill(indexes_->getTerminatorGap(mbb));
342 ValNo->setHasPHIKill(true);
344 // Iterate over all of the blocks that the variable is completely
345 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
347 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
348 E = vi.AliveBlocks.end(); I != E; ++I) {
349 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
350 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
351 interval.addRange(LR);
352 DEBUG(dbgs() << " +" << LR);
356 // Finally, this virtual register is live from the start of any killing
357 // block to the 'use' slot of the killing instruction.
358 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
359 MachineInstr *Kill = vi.Kills[i];
360 SlotIndex Start = getMBBStartIdx(Kill->getParent());
361 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
363 // Create interval with one of a NEW value number. Note that this value
364 // number isn't actually defined by an instruction, weird huh? :)
366 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
368 ValNo->setIsPHIDef(true);
370 LiveRange LR(Start, killIdx, ValNo);
371 interval.addRange(LR);
372 ValNo->addKill(killIdx);
373 DEBUG(dbgs() << " +" << LR);
377 // If this is the second time we see a virtual register definition, it
378 // must be due to phi elimination or two addr elimination. If this is
379 // the result of two address elimination, then the vreg is one of the
380 // def-and-use register operand.
381 if (mi->isRegTiedToUseOperand(MOIdx)) {
382 // If this is a two-address definition, then we have already processed
383 // the live range. The only problem is that we didn't realize there
384 // are actually two values in the live interval. Because of this we
385 // need to take the LiveRegion that defines this register and split it
387 assert(interval.containsOneValue());
388 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
389 SlotIndex RedefIndex = MIIdx.getDefIndex();
390 if (MO.isEarlyClobber())
391 RedefIndex = MIIdx.getUseIndex();
393 const LiveRange *OldLR =
394 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
395 VNInfo *OldValNo = OldLR->valno;
397 // Delete the initial value, which should be short and continuous,
398 // because the 2-addr copy must be in the same MBB as the redef.
399 interval.removeRange(DefIndex, RedefIndex);
401 // Two-address vregs should always only be redefined once. This means
402 // that at this point, there should be exactly one value number in it.
403 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
405 // The new value number (#1) is defined by the instruction we claimed
407 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
408 false, // update at *
410 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
412 // Value#0 is now defined by the 2-addr instruction.
413 OldValNo->def = RedefIndex;
414 OldValNo->setCopy(0);
416 // Add the new live interval which replaces the range for the input copy.
417 LiveRange LR(DefIndex, RedefIndex, ValNo);
418 DEBUG(dbgs() << " replace range with " << LR);
419 interval.addRange(LR);
420 ValNo->addKill(RedefIndex);
422 // If this redefinition is dead, we need to add a dummy unit live
423 // range covering the def slot.
425 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
429 dbgs() << " RESULT: ";
430 interval.print(dbgs(), tri_);
433 assert(lv_->isPHIJoin(interval.reg) && "Multiply defined register");
434 // In the case of PHI elimination, each variable definition is only
435 // live until the end of the block. We've already taken care of the
436 // rest of the live range.
438 SlotIndex defIndex = MIIdx.getDefIndex();
439 if (MO.isEarlyClobber())
440 defIndex = MIIdx.getUseIndex();
443 MachineInstr *CopyMI = NULL;
444 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
445 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
446 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
448 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
450 SlotIndex killIndex = getMBBEndIdx(mbb);
451 LiveRange LR(defIndex, killIndex, ValNo);
452 interval.addRange(LR);
453 ValNo->addKill(indexes_->getTerminatorGap(mbb));
454 ValNo->setHasPHIKill(true);
455 DEBUG(dbgs() << " phi-join +" << LR);
459 DEBUG(dbgs() << '\n');
462 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
463 MachineBasicBlock::iterator mi,
466 LiveInterval &interval,
467 MachineInstr *CopyMI) {
468 // A physical register cannot be live across basic block, so its
469 // lifetime must end somewhere in its defining basic block.
471 dbgs() << "\t\tregister: ";
472 printRegName(interval.reg, tri_);
475 SlotIndex baseIndex = MIIdx;
476 SlotIndex start = baseIndex.getDefIndex();
477 // Earlyclobbers move back one.
478 if (MO.isEarlyClobber())
479 start = MIIdx.getUseIndex();
480 SlotIndex end = start;
482 // If it is not used after definition, it is considered dead at
483 // the instruction defining it. Hence its interval is:
484 // [defSlot(def), defSlot(def)+1)
485 // For earlyclobbers, the defSlot was pushed back one; the extra
486 // advance below compensates.
488 DEBUG(dbgs() << " dead");
489 end = start.getStoreIndex();
493 // If it is not dead on definition, it must be killed by a
494 // subsequent instruction. Hence its interval is:
495 // [defSlot(def), useSlot(kill)+1)
496 baseIndex = baseIndex.getNextIndex();
497 while (++mi != MBB->end()) {
499 if (mi->isDebugValue())
501 if (getInstructionFromIndex(baseIndex) == 0)
502 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
504 if (mi->killsRegister(interval.reg, tri_)) {
505 DEBUG(dbgs() << " killed");
506 end = baseIndex.getDefIndex();
509 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
511 if (mi->isRegTiedToUseOperand(DefIdx)) {
512 // Two-address instruction.
513 end = baseIndex.getDefIndex();
515 // Another instruction redefines the register before it is ever read.
516 // Then the register is essentially dead at the instruction that
517 // defines it. Hence its interval is:
518 // [defSlot(def), defSlot(def)+1)
519 DEBUG(dbgs() << " dead");
520 end = start.getStoreIndex();
526 baseIndex = baseIndex.getNextIndex();
529 // The only case we should have a dead physreg here without a killing or
530 // instruction where we know it's dead is if it is live-in to the function
531 // and never used. Another possible case is the implicit use of the
532 // physical register has been deleted by two-address pass.
533 end = start.getStoreIndex();
536 assert(start < end && "did not find end of interval?");
538 // Already exists? Extend old live interval.
539 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
540 bool Extend = OldLR != interval.end();
541 VNInfo *ValNo = Extend
542 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
543 if (MO.isEarlyClobber() && Extend)
544 ValNo->setHasRedefByEC(true);
545 LiveRange LR(start, end, ValNo);
546 interval.addRange(LR);
547 LR.valno->addKill(end);
548 DEBUG(dbgs() << " +" << LR << '\n');
551 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
552 MachineBasicBlock::iterator MI,
556 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
557 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
558 getOrCreateInterval(MO.getReg()));
559 else if (allocatableRegs_[MO.getReg()]) {
560 MachineInstr *CopyMI = NULL;
561 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
562 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
563 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
565 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
566 getOrCreateInterval(MO.getReg()), CopyMI);
567 // Def of a register also defines its sub-registers.
568 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
569 // If MI also modifies the sub-register explicitly, avoid processing it
570 // more than once. Do not pass in TRI here so it checks for exact match.
571 if (!MI->modifiesRegister(*AS))
572 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
573 getOrCreateInterval(*AS), 0);
577 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
579 LiveInterval &interval, bool isAlias) {
581 dbgs() << "\t\tlivein register: ";
582 printRegName(interval.reg, tri_);
585 // Look for kills, if it reaches a def before it's killed, then it shouldn't
586 // be considered a livein.
587 MachineBasicBlock::iterator mi = MBB->begin();
588 MachineBasicBlock::iterator E = MBB->end();
589 // Skip over DBG_VALUE at the start of the MBB.
590 if (mi != E && mi->isDebugValue()) {
591 while (++mi != E && mi->isDebugValue())
594 // MBB is empty except for DBG_VALUE's.
598 SlotIndex baseIndex = MIIdx;
599 SlotIndex start = baseIndex;
600 if (getInstructionFromIndex(baseIndex) == 0)
601 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
603 SlotIndex end = baseIndex;
604 bool SeenDefUse = false;
607 if (mi->killsRegister(interval.reg, tri_)) {
608 DEBUG(dbgs() << " killed");
609 end = baseIndex.getDefIndex();
612 } else if (mi->modifiesRegister(interval.reg, tri_)) {
613 // Another instruction redefines the register before it is ever read.
614 // Then the register is essentially dead at the instruction that defines
615 // it. Hence its interval is:
616 // [defSlot(def), defSlot(def)+1)
617 DEBUG(dbgs() << " dead");
618 end = start.getStoreIndex();
623 while (++mi != E && mi->isDebugValue())
624 // Skip over DBG_VALUE.
627 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
630 // Live-in register might not be used at all.
633 DEBUG(dbgs() << " dead");
634 end = MIIdx.getStoreIndex();
636 DEBUG(dbgs() << " live through");
642 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
643 0, false, VNInfoAllocator);
644 vni->setIsPHIDef(true);
645 LiveRange LR(start, end, vni);
647 interval.addRange(LR);
648 LR.valno->addKill(end);
649 DEBUG(dbgs() << " +" << LR << '\n');
652 /// computeIntervals - computes the live intervals for virtual
653 /// registers. for some ordering of the machine instructions [1,N] a
654 /// live interval is an interval [i, j) where 1 <= i <= j < N for
655 /// which a variable is live
656 void LiveIntervals::computeIntervals() {
657 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
658 << "********** Function: "
659 << ((Value*)mf_->getFunction())->getName() << '\n');
661 SmallVector<unsigned, 8> UndefUses;
662 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
664 MachineBasicBlock *MBB = MBBI;
668 // Track the index of the current machine instr.
669 SlotIndex MIIndex = getMBBStartIdx(MBB);
670 DEBUG(dbgs() << MBB->getName() << ":\n");
672 // Create intervals for live-ins to this BB first.
673 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
674 LE = MBB->livein_end(); LI != LE; ++LI) {
675 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
676 // Multiple live-ins can alias the same register.
677 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
678 if (!hasInterval(*AS))
679 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
683 // Skip over empty initial indices.
684 if (getInstructionFromIndex(MIIndex) == 0)
685 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
687 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
689 DEBUG(dbgs() << MIIndex << "\t" << *MI);
690 if (MI->isDebugValue())
694 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
695 MachineOperand &MO = MI->getOperand(i);
696 if (!MO.isReg() || !MO.getReg())
699 // handle register defs - build intervals
701 handleRegisterDef(MBB, MI, MIIndex, MO, i);
702 else if (MO.isUndef())
703 UndefUses.push_back(MO.getReg());
706 // Move to the next instr slot.
707 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
711 // Create empty intervals for registers defined by implicit_def's (except
712 // for those implicit_def that define values which are liveout of their
714 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
715 unsigned UndefReg = UndefUses[i];
716 (void)getOrCreateInterval(UndefReg);
720 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
721 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
722 return new LiveInterval(reg, Weight);
725 /// dupInterval - Duplicate a live interval. The caller is responsible for
726 /// managing the allocated memory.
727 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
728 LiveInterval *NewLI = createInterval(li->reg);
729 NewLI->Copy(*li, mri_, getVNInfoAllocator());
733 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
734 /// copy field and returns the source register that defines it.
735 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
739 if (VNI->getCopy()->isExtractSubreg()) {
740 // If it's extracting out of a physical register, return the sub-register.
741 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
742 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
743 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
744 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
745 if (SrcSubReg == DstSubReg)
746 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
747 // reg1034 can still be coalesced to EDX.
749 assert(DstSubReg == 0);
750 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
753 } else if (VNI->getCopy()->isInsertSubreg() ||
754 VNI->getCopy()->isSubregToReg())
755 return VNI->getCopy()->getOperand(2).getReg();
757 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
758 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
760 llvm_unreachable("Unrecognized copy instruction!");
764 //===----------------------------------------------------------------------===//
765 // Register allocator hooks.
768 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
769 /// allow one) virtual register operand, then its uses are implicitly using
770 /// the register. Returns the virtual register.
771 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
772 MachineInstr *MI) const {
774 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
775 MachineOperand &MO = MI->getOperand(i);
776 if (!MO.isReg() || !MO.isUse())
778 unsigned Reg = MO.getReg();
779 if (Reg == 0 || Reg == li.reg)
782 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
783 !allocatableRegs_[Reg])
785 // FIXME: For now, only remat MI with at most one register operand.
787 "Can't rematerialize instruction with multiple register operand!");
796 /// isValNoAvailableAt - Return true if the val# of the specified interval
797 /// which reaches the given instruction also reaches the specified use index.
798 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
799 SlotIndex UseIdx) const {
800 SlotIndex Index = getInstructionIndex(MI);
801 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
802 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
803 return UI != li.end() && UI->valno == ValNo;
806 /// isReMaterializable - Returns true if the definition MI of the specified
807 /// val# of the specified interval is re-materializable.
808 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
809 const VNInfo *ValNo, MachineInstr *MI,
810 SmallVectorImpl<LiveInterval*> &SpillIs,
815 if (!tii_->isTriviallyReMaterializable(MI, aa_))
818 // Target-specific code can mark an instruction as being rematerializable
819 // if it has one virtual reg use, though it had better be something like
820 // a PIC base register which is likely to be live everywhere.
821 unsigned ImpUse = getReMatImplicitUse(li, MI);
823 const LiveInterval &ImpLi = getInterval(ImpUse);
824 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
825 re = mri_->use_end(); ri != re; ++ri) {
826 MachineInstr *UseMI = &*ri;
827 SlotIndex UseIdx = getInstructionIndex(UseMI);
828 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
830 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
834 // If a register operand of the re-materialized instruction is going to
835 // be spilled next, then it's not legal to re-materialize this instruction.
836 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
837 if (ImpUse == SpillIs[i]->reg)
843 /// isReMaterializable - Returns true if the definition MI of the specified
844 /// val# of the specified interval is re-materializable.
845 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
846 const VNInfo *ValNo, MachineInstr *MI) {
847 SmallVector<LiveInterval*, 4> Dummy1;
849 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
852 /// isReMaterializable - Returns true if every definition of MI of every
853 /// val# of the specified interval is re-materializable.
854 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
855 SmallVectorImpl<LiveInterval*> &SpillIs,
858 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
860 const VNInfo *VNI = *i;
862 continue; // Dead val#.
863 // Is the def for the val# rematerializable?
864 if (!VNI->isDefAccurate())
866 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
867 bool DefIsLoad = false;
869 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
876 /// FilterFoldedOps - Filter out two-address use operands. Return
877 /// true if it finds any issue with the operands that ought to prevent
879 static bool FilterFoldedOps(MachineInstr *MI,
880 SmallVector<unsigned, 2> &Ops,
882 SmallVector<unsigned, 2> &FoldOps) {
884 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
885 unsigned OpIdx = Ops[i];
886 MachineOperand &MO = MI->getOperand(OpIdx);
887 // FIXME: fold subreg use.
891 MRInfo |= (unsigned)VirtRegMap::isMod;
893 // Filter out two-address use operand(s).
894 if (MI->isRegTiedToDefOperand(OpIdx)) {
895 MRInfo = VirtRegMap::isModRef;
898 MRInfo |= (unsigned)VirtRegMap::isRef;
900 FoldOps.push_back(OpIdx);
906 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
907 /// slot / to reg or any rematerialized load into ith operand of specified
908 /// MI. If it is successul, MI is updated with the newly created MI and
910 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
911 VirtRegMap &vrm, MachineInstr *DefMI,
913 SmallVector<unsigned, 2> &Ops,
914 bool isSS, int Slot, unsigned Reg) {
915 // If it is an implicit def instruction, just delete it.
916 if (MI->isImplicitDef()) {
917 RemoveMachineInstrFromMaps(MI);
918 vrm.RemoveMachineInstrFromMaps(MI);
919 MI->eraseFromParent();
924 // Filter the list of operand indexes that are to be folded. Abort if
925 // any operand will prevent folding.
927 SmallVector<unsigned, 2> FoldOps;
928 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
931 // The only time it's safe to fold into a two address instruction is when
932 // it's folding reload and spill from / into a spill stack slot.
933 if (DefMI && (MRInfo & VirtRegMap::isMod))
936 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
937 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
939 // Remember this instruction uses the spill slot.
940 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
942 // Attempt to fold the memory reference into the instruction. If
943 // we can do this, we don't need to insert spill code.
944 MachineBasicBlock &MBB = *MI->getParent();
945 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
946 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
947 vrm.transferSpillPts(MI, fmi);
948 vrm.transferRestorePts(MI, fmi);
949 vrm.transferEmergencySpills(MI, fmi);
950 ReplaceMachineInstrInMaps(MI, fmi);
951 MI = MBB.insert(MBB.erase(MI), fmi);
958 /// canFoldMemoryOperand - Returns true if the specified load / store
959 /// folding is possible.
960 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
961 SmallVector<unsigned, 2> &Ops,
963 // Filter the list of operand indexes that are to be folded. Abort if
964 // any operand will prevent folding.
966 SmallVector<unsigned, 2> FoldOps;
967 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
970 // It's only legal to remat for a use, not a def.
971 if (ReMat && (MRInfo & VirtRegMap::isMod))
974 return tii_->canFoldMemoryOperand(MI, FoldOps);
977 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
978 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
980 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
985 for (++itr; itr != li.ranges.end(); ++itr) {
986 MachineBasicBlock *mbb2 =
987 indexes_->getMBBCoveringRange(itr->start, itr->end);
996 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
997 /// interval on to-be re-materialized operands of MI) with new register.
998 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
999 MachineInstr *MI, unsigned NewVReg,
1001 // There is an implicit use. That means one of the other operand is
1002 // being remat'ed and the remat'ed instruction has li.reg as an
1003 // use operand. Make sure we rewrite that as well.
1004 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1005 MachineOperand &MO = MI->getOperand(i);
1008 unsigned Reg = MO.getReg();
1009 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1011 if (!vrm.isReMaterialized(Reg))
1013 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1014 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1016 UseMO->setReg(NewVReg);
1020 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1021 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1022 bool LiveIntervals::
1023 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1024 bool TrySplit, SlotIndex index, SlotIndex end,
1026 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1027 unsigned Slot, int LdSlot,
1028 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1030 const TargetRegisterClass* rc,
1031 SmallVector<int, 4> &ReMatIds,
1032 const MachineLoopInfo *loopInfo,
1033 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1034 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1035 std::vector<LiveInterval*> &NewLIs) {
1036 bool CanFold = false;
1038 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1039 MachineOperand& mop = MI->getOperand(i);
1042 unsigned Reg = mop.getReg();
1043 unsigned RegI = Reg;
1044 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1049 bool TryFold = !DefIsReMat;
1050 bool FoldSS = true; // Default behavior unless it's a remat.
1051 int FoldSlot = Slot;
1053 // If this is the rematerializable definition MI itself and
1054 // all of its uses are rematerialized, simply delete it.
1055 if (MI == ReMatOrigDefMI && CanDelete) {
1056 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1058 RemoveMachineInstrFromMaps(MI);
1059 vrm.RemoveMachineInstrFromMaps(MI);
1060 MI->eraseFromParent();
1064 // If def for this use can't be rematerialized, then try folding.
1065 // If def is rematerializable and it's a load, also try folding.
1066 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1068 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1074 // Scan all of the operands of this instruction rewriting operands
1075 // to use NewVReg instead of li.reg as appropriate. We do this for
1078 // 1. If the instr reads the same spilled vreg multiple times, we
1079 // want to reuse the NewVReg.
1080 // 2. If the instr is a two-addr instruction, we are required to
1081 // keep the src/dst regs pinned.
1083 // Keep track of whether we replace a use and/or def so that we can
1084 // create the spill interval with the appropriate range.
1086 HasUse = mop.isUse();
1087 HasDef = mop.isDef();
1088 SmallVector<unsigned, 2> Ops;
1090 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1091 const MachineOperand &MOj = MI->getOperand(j);
1094 unsigned RegJ = MOj.getReg();
1095 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1099 if (!MOj.isUndef()) {
1100 HasUse |= MOj.isUse();
1101 HasDef |= MOj.isDef();
1106 // Create a new virtual register for the spill interval.
1107 // Create the new register now so we can map the fold instruction
1108 // to the new register so when it is unfolded we get the correct
1110 bool CreatedNewVReg = false;
1112 NewVReg = mri_->createVirtualRegister(rc);
1114 CreatedNewVReg = true;
1116 // The new virtual register should get the same allocation hints as the
1118 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1119 if (Hint.first || Hint.second)
1120 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1126 // Do not fold load / store here if we are splitting. We'll find an
1127 // optimal point to insert a load / store later.
1129 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1130 Ops, FoldSS, FoldSlot, NewVReg)) {
1131 // Folding the load/store can completely change the instruction in
1132 // unpredictable ways, rescan it from the beginning.
1135 // We need to give the new vreg the same stack slot as the
1136 // spilled interval.
1137 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1143 if (isNotInMIMap(MI))
1145 goto RestartInstruction;
1148 // We'll try to fold it later if it's profitable.
1149 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1153 mop.setReg(NewVReg);
1154 if (mop.isImplicit())
1155 rewriteImplicitOps(li, MI, NewVReg, vrm);
1157 // Reuse NewVReg for other reads.
1158 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1159 MachineOperand &mopj = MI->getOperand(Ops[j]);
1160 mopj.setReg(NewVReg);
1161 if (mopj.isImplicit())
1162 rewriteImplicitOps(li, MI, NewVReg, vrm);
1165 if (CreatedNewVReg) {
1167 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1168 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1169 // Each valnum may have its own remat id.
1170 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1172 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1174 if (!CanDelete || (HasUse && HasDef)) {
1175 // If this is a two-addr instruction then its use operands are
1176 // rematerializable but its def is not. It should be assigned a
1178 vrm.assignVirt2StackSlot(NewVReg, Slot);
1181 vrm.assignVirt2StackSlot(NewVReg, Slot);
1183 } else if (HasUse && HasDef &&
1184 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1185 // If this interval hasn't been assigned a stack slot (because earlier
1186 // def is a deleted remat def), do it now.
1187 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1188 vrm.assignVirt2StackSlot(NewVReg, Slot);
1191 // Re-matting an instruction with virtual register use. Add the
1192 // register as an implicit use on the use MI.
1193 if (DefIsReMat && ImpUse)
1194 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1196 // Create a new register interval for this spill / remat.
1197 LiveInterval &nI = getOrCreateInterval(NewVReg);
1198 if (CreatedNewVReg) {
1199 NewLIs.push_back(&nI);
1200 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1202 vrm.setIsSplitFromReg(NewVReg, li.reg);
1206 if (CreatedNewVReg) {
1207 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1208 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1209 DEBUG(dbgs() << " +" << LR);
1212 // Extend the split live interval to this def / use.
1213 SlotIndex End = index.getDefIndex();
1214 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1215 nI.getValNumInfo(nI.getNumValNums()-1));
1216 DEBUG(dbgs() << " +" << LR);
1221 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1222 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1223 DEBUG(dbgs() << " +" << LR);
1228 dbgs() << "\t\t\t\tAdded new interval: ";
1229 nI.print(dbgs(), tri_);
1235 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1237 MachineBasicBlock *MBB,
1238 SlotIndex Idx) const {
1239 SlotIndex End = getMBBEndIdx(MBB);
1240 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1241 if (VNI->kills[j].isPHI())
1244 SlotIndex KillIdx = VNI->kills[j];
1245 if (KillIdx > Idx && KillIdx <= End)
1251 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1252 /// during spilling.
1254 struct RewriteInfo {
1259 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1260 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1263 struct RewriteInfoCompare {
1264 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1265 return LHS.Index < RHS.Index;
1270 void LiveIntervals::
1271 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1272 LiveInterval::Ranges::const_iterator &I,
1273 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1274 unsigned Slot, int LdSlot,
1275 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1277 const TargetRegisterClass* rc,
1278 SmallVector<int, 4> &ReMatIds,
1279 const MachineLoopInfo *loopInfo,
1280 BitVector &SpillMBBs,
1281 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1282 BitVector &RestoreMBBs,
1283 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1284 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1285 std::vector<LiveInterval*> &NewLIs) {
1286 bool AllCanFold = true;
1287 unsigned NewVReg = 0;
1288 SlotIndex start = I->start.getBaseIndex();
1289 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1291 // First collect all the def / use in this live range that will be rewritten.
1292 // Make sure they are sorted according to instruction index.
1293 std::vector<RewriteInfo> RewriteMIs;
1294 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1295 re = mri_->reg_end(); ri != re; ) {
1296 MachineInstr *MI = &*ri;
1297 MachineOperand &O = ri.getOperand();
1299 if (MI->isDebugValue()) {
1300 // Remove debug info for now.
1302 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1305 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1306 SlotIndex index = getInstructionIndex(MI);
1307 if (index < start || index >= end)
1311 // Must be defined by an implicit def. It should not be spilled. Note,
1312 // this is for correctness reason. e.g.
1313 // 8 %reg1024<def> = IMPLICIT_DEF
1314 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1315 // The live range [12, 14) are not part of the r1024 live interval since
1316 // it's defined by an implicit def. It will not conflicts with live
1317 // interval of r1025. Now suppose both registers are spilled, you can
1318 // easily see a situation where both registers are reloaded before
1319 // the INSERT_SUBREG and both target registers that would overlap.
1321 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1323 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1325 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1326 // Now rewrite the defs and uses.
1327 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1328 RewriteInfo &rwi = RewriteMIs[i];
1330 SlotIndex index = rwi.Index;
1331 bool MIHasUse = rwi.HasUse;
1332 bool MIHasDef = rwi.HasDef;
1333 MachineInstr *MI = rwi.MI;
1334 // If MI def and/or use the same register multiple times, then there
1335 // are multiple entries.
1336 unsigned NumUses = MIHasUse;
1337 while (i != e && RewriteMIs[i].MI == MI) {
1338 assert(RewriteMIs[i].Index == index);
1339 bool isUse = RewriteMIs[i].HasUse;
1340 if (isUse) ++NumUses;
1342 MIHasDef |= RewriteMIs[i].HasDef;
1345 MachineBasicBlock *MBB = MI->getParent();
1347 if (ImpUse && MI != ReMatDefMI) {
1348 // Re-matting an instruction with virtual register use. Prevent interval
1349 // from being spilled.
1350 getInterval(ImpUse).markNotSpillable();
1353 unsigned MBBId = MBB->getNumber();
1354 unsigned ThisVReg = 0;
1356 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1357 if (NVI != MBBVRegsMap.end()) {
1358 ThisVReg = NVI->second;
1365 // It's better to start a new interval to avoid artifically
1366 // extend the new interval.
1367 if (MIHasDef && !MIHasUse) {
1368 MBBVRegsMap.erase(MBB->getNumber());
1374 bool IsNew = ThisVReg == 0;
1376 // This ends the previous live interval. If all of its def / use
1377 // can be folded, give it a low spill weight.
1378 if (NewVReg && TrySplit && AllCanFold) {
1379 LiveInterval &nI = getOrCreateInterval(NewVReg);
1386 bool HasDef = false;
1387 bool HasUse = false;
1388 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1389 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1390 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1391 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1392 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1393 if (!HasDef && !HasUse)
1396 AllCanFold &= CanFold;
1398 // Update weight of spill interval.
1399 LiveInterval &nI = getOrCreateInterval(NewVReg);
1401 // The spill weight is now infinity as it cannot be spilled again.
1402 nI.markNotSpillable();
1406 // Keep track of the last def and first use in each MBB.
1408 if (MI != ReMatOrigDefMI || !CanDelete) {
1409 bool HasKill = false;
1411 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1413 // If this is a two-address code, then this index starts a new VNInfo.
1414 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1416 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1418 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1419 SpillIdxes.find(MBBId);
1421 if (SII == SpillIdxes.end()) {
1422 std::vector<SRInfo> S;
1423 S.push_back(SRInfo(index, NewVReg, true));
1424 SpillIdxes.insert(std::make_pair(MBBId, S));
1425 } else if (SII->second.back().vreg != NewVReg) {
1426 SII->second.push_back(SRInfo(index, NewVReg, true));
1427 } else if (index > SII->second.back().index) {
1428 // If there is an earlier def and this is a two-address
1429 // instruction, then it's not possible to fold the store (which
1430 // would also fold the load).
1431 SRInfo &Info = SII->second.back();
1433 Info.canFold = !HasUse;
1435 SpillMBBs.set(MBBId);
1436 } else if (SII != SpillIdxes.end() &&
1437 SII->second.back().vreg == NewVReg &&
1438 index > SII->second.back().index) {
1439 // There is an earlier def that's not killed (must be two-address).
1440 // The spill is no longer needed.
1441 SII->second.pop_back();
1442 if (SII->second.empty()) {
1443 SpillIdxes.erase(MBBId);
1444 SpillMBBs.reset(MBBId);
1451 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1452 SpillIdxes.find(MBBId);
1453 if (SII != SpillIdxes.end() &&
1454 SII->second.back().vreg == NewVReg &&
1455 index > SII->second.back().index)
1456 // Use(s) following the last def, it's not safe to fold the spill.
1457 SII->second.back().canFold = false;
1458 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1459 RestoreIdxes.find(MBBId);
1460 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1461 // If we are splitting live intervals, only fold if it's the first
1462 // use and there isn't another use later in the MBB.
1463 RII->second.back().canFold = false;
1465 // Only need a reload if there isn't an earlier def / use.
1466 if (RII == RestoreIdxes.end()) {
1467 std::vector<SRInfo> Infos;
1468 Infos.push_back(SRInfo(index, NewVReg, true));
1469 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1471 RII->second.push_back(SRInfo(index, NewVReg, true));
1473 RestoreMBBs.set(MBBId);
1477 // Update spill weight.
1478 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1479 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1482 if (NewVReg && TrySplit && AllCanFold) {
1483 // If all of its def / use can be folded, give it a low spill weight.
1484 LiveInterval &nI = getOrCreateInterval(NewVReg);
1489 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1490 unsigned vr, BitVector &RestoreMBBs,
1491 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1492 if (!RestoreMBBs[Id])
1494 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1495 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1496 if (Restores[i].index == index &&
1497 Restores[i].vreg == vr &&
1498 Restores[i].canFold)
1503 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1504 unsigned vr, BitVector &RestoreMBBs,
1505 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1506 if (!RestoreMBBs[Id])
1508 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1509 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1510 if (Restores[i].index == index && Restores[i].vreg)
1511 Restores[i].index = SlotIndex();
1514 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1515 /// spilled and create empty intervals for their uses.
1517 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1518 const TargetRegisterClass* rc,
1519 std::vector<LiveInterval*> &NewLIs) {
1520 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1521 re = mri_->reg_end(); ri != re; ) {
1522 MachineOperand &O = ri.getOperand();
1523 MachineInstr *MI = &*ri;
1526 assert(MI->isImplicitDef() &&
1527 "Register def was not rewritten?");
1528 RemoveMachineInstrFromMaps(MI);
1529 vrm.RemoveMachineInstrFromMaps(MI);
1530 MI->eraseFromParent();
1532 // This must be an use of an implicit_def so it's not part of the live
1533 // interval. Create a new empty live interval for it.
1534 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1535 unsigned NewVReg = mri_->createVirtualRegister(rc);
1537 vrm.setIsImplicitlyDefined(NewVReg);
1538 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1539 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1540 MachineOperand &MO = MI->getOperand(i);
1541 if (MO.isReg() && MO.getReg() == li.reg) {
1551 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1552 // Limit the loop depth ridiculousness.
1553 if (loopDepth > 200)
1556 // The loop depth is used to roughly estimate the number of times the
1557 // instruction is executed. Something like 10^d is simple, but will quickly
1558 // overflow a float. This expression behaves like 10^d for small d, but is
1559 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1560 // headroom before overflow.
1561 float lc = powf(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1563 return (isDef + isUse) * lc;
1567 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1568 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1569 normalizeSpillWeight(*NewLIs[i]);
1572 std::vector<LiveInterval*> LiveIntervals::
1573 addIntervalsForSpillsFast(const LiveInterval &li,
1574 const MachineLoopInfo *loopInfo,
1576 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1578 std::vector<LiveInterval*> added;
1580 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1583 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1588 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1590 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1591 while (RI != mri_->reg_end()) {
1592 MachineInstr* MI = &*RI;
1594 SmallVector<unsigned, 2> Indices;
1595 bool HasUse = false;
1596 bool HasDef = false;
1598 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1599 MachineOperand& mop = MI->getOperand(i);
1600 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1602 HasUse |= MI->getOperand(i).isUse();
1603 HasDef |= MI->getOperand(i).isDef();
1605 Indices.push_back(i);
1608 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1609 Indices, true, slot, li.reg)) {
1610 unsigned NewVReg = mri_->createVirtualRegister(rc);
1612 vrm.assignVirt2StackSlot(NewVReg, slot);
1614 // create a new register for this spill
1615 LiveInterval &nI = getOrCreateInterval(NewVReg);
1616 nI.markNotSpillable();
1618 // Rewrite register operands to use the new vreg.
1619 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1620 E = Indices.end(); I != E; ++I) {
1621 MI->getOperand(*I).setReg(NewVReg);
1623 if (MI->getOperand(*I).isUse())
1624 MI->getOperand(*I).setIsKill(true);
1627 // Fill in the new live interval.
1628 SlotIndex index = getInstructionIndex(MI);
1630 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1631 nI.getNextValue(SlotIndex(), 0, false,
1632 getVNInfoAllocator()));
1633 DEBUG(dbgs() << " +" << LR);
1635 vrm.addRestorePoint(NewVReg, MI);
1638 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1639 nI.getNextValue(SlotIndex(), 0, false,
1640 getVNInfoAllocator()));
1641 DEBUG(dbgs() << " +" << LR);
1643 vrm.addSpillPoint(NewVReg, true, MI);
1646 added.push_back(&nI);
1649 dbgs() << "\t\t\t\tadded new interval: ";
1656 RI = mri_->reg_begin(li.reg);
1662 std::vector<LiveInterval*> LiveIntervals::
1663 addIntervalsForSpills(const LiveInterval &li,
1664 SmallVectorImpl<LiveInterval*> &SpillIs,
1665 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1667 if (EnableFastSpilling)
1668 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1670 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1673 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1674 li.print(dbgs(), tri_);
1678 // Each bit specify whether a spill is required in the MBB.
1679 BitVector SpillMBBs(mf_->getNumBlockIDs());
1680 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1681 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1682 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1683 DenseMap<unsigned,unsigned> MBBVRegsMap;
1684 std::vector<LiveInterval*> NewLIs;
1685 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1687 unsigned NumValNums = li.getNumValNums();
1688 SmallVector<MachineInstr*, 4> ReMatDefs;
1689 ReMatDefs.resize(NumValNums, NULL);
1690 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1691 ReMatOrigDefs.resize(NumValNums, NULL);
1692 SmallVector<int, 4> ReMatIds;
1693 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1694 BitVector ReMatDelete(NumValNums);
1695 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1697 // Spilling a split live interval. It cannot be split any further. Also,
1698 // it's also guaranteed to be a single val# / range interval.
1699 if (vrm.getPreSplitReg(li.reg)) {
1700 vrm.setIsSplitFromReg(li.reg, 0);
1701 // Unset the split kill marker on the last use.
1702 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1703 if (KillIdx != SlotIndex()) {
1704 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1705 assert(KillMI && "Last use disappeared?");
1706 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1707 assert(KillOp != -1 && "Last use disappeared?");
1708 KillMI->getOperand(KillOp).setIsKill(false);
1710 vrm.removeKillPoint(li.reg);
1711 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1712 Slot = vrm.getStackSlot(li.reg);
1713 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1714 MachineInstr *ReMatDefMI = DefIsReMat ?
1715 vrm.getReMaterializedMI(li.reg) : NULL;
1717 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1718 bool isLoad = isLoadSS ||
1719 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1720 bool IsFirstRange = true;
1721 for (LiveInterval::Ranges::const_iterator
1722 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1723 // If this is a split live interval with multiple ranges, it means there
1724 // are two-address instructions that re-defined the value. Only the
1725 // first def can be rematerialized!
1727 // Note ReMatOrigDefMI has already been deleted.
1728 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1729 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1730 false, vrm, rc, ReMatIds, loopInfo,
1731 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1732 MBBVRegsMap, NewLIs);
1734 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1735 Slot, 0, false, false, false,
1736 false, vrm, rc, ReMatIds, loopInfo,
1737 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1738 MBBVRegsMap, NewLIs);
1740 IsFirstRange = false;
1743 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1744 normalizeSpillWeights(NewLIs);
1748 bool TrySplit = !intervalIsInOneMBB(li);
1751 bool NeedStackSlot = false;
1752 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1754 const VNInfo *VNI = *i;
1755 unsigned VN = VNI->id;
1756 if (VNI->isUnused())
1757 continue; // Dead val#.
1758 // Is the def for the val# rematerializable?
1759 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1760 ? getInstructionFromIndex(VNI->def) : 0;
1762 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1763 // Remember how to remat the def of this val#.
1764 ReMatOrigDefs[VN] = ReMatDefMI;
1765 // Original def may be modified so we have to make a copy here.
1766 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1767 CloneMIs.push_back(Clone);
1768 ReMatDefs[VN] = Clone;
1770 bool CanDelete = true;
1771 if (VNI->hasPHIKill()) {
1772 // A kill is a phi node, not all of its uses can be rematerialized.
1773 // It must not be deleted.
1775 // Need a stack slot if there is any live range where uses cannot be
1777 NeedStackSlot = true;
1780 ReMatDelete.set(VN);
1782 // Need a stack slot if there is any live range where uses cannot be
1784 NeedStackSlot = true;
1788 // One stack slot per live interval.
1789 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1790 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1791 Slot = vrm.assignVirt2StackSlot(li.reg);
1793 // This case only occurs when the prealloc splitter has already assigned
1794 // a stack slot to this vreg.
1796 Slot = vrm.getStackSlot(li.reg);
1799 // Create new intervals and rewrite defs and uses.
1800 for (LiveInterval::Ranges::const_iterator
1801 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1802 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1803 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1804 bool DefIsReMat = ReMatDefMI != NULL;
1805 bool CanDelete = ReMatDelete[I->valno->id];
1807 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1808 bool isLoad = isLoadSS ||
1809 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1810 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1811 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1812 CanDelete, vrm, rc, ReMatIds, loopInfo,
1813 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1814 MBBVRegsMap, NewLIs);
1817 // Insert spills / restores if we are splitting.
1819 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1820 normalizeSpillWeights(NewLIs);
1824 SmallPtrSet<LiveInterval*, 4> AddedKill;
1825 SmallVector<unsigned, 2> Ops;
1826 if (NeedStackSlot) {
1827 int Id = SpillMBBs.find_first();
1829 std::vector<SRInfo> &spills = SpillIdxes[Id];
1830 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1831 SlotIndex index = spills[i].index;
1832 unsigned VReg = spills[i].vreg;
1833 LiveInterval &nI = getOrCreateInterval(VReg);
1834 bool isReMat = vrm.isReMaterialized(VReg);
1835 MachineInstr *MI = getInstructionFromIndex(index);
1836 bool CanFold = false;
1837 bool FoundUse = false;
1839 if (spills[i].canFold) {
1841 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1842 MachineOperand &MO = MI->getOperand(j);
1843 if (!MO.isReg() || MO.getReg() != VReg)
1850 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1851 RestoreMBBs, RestoreIdxes))) {
1852 // MI has two-address uses of the same register. If the use
1853 // isn't the first and only use in the BB, then we can't fold
1854 // it. FIXME: Move this to rewriteInstructionsForSpills.
1861 // Fold the store into the def if possible.
1862 bool Folded = false;
1863 if (CanFold && !Ops.empty()) {
1864 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1867 // Also folded uses, do not issue a load.
1868 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1869 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1871 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1875 // Otherwise tell the spiller to issue a spill.
1877 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1878 bool isKill = LR->end == index.getStoreIndex();
1879 if (!MI->registerDefIsDead(nI.reg))
1880 // No need to spill a dead def.
1881 vrm.addSpillPoint(VReg, isKill, MI);
1883 AddedKill.insert(&nI);
1886 Id = SpillMBBs.find_next(Id);
1890 int Id = RestoreMBBs.find_first();
1892 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1893 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1894 SlotIndex index = restores[i].index;
1895 if (index == SlotIndex())
1897 unsigned VReg = restores[i].vreg;
1898 LiveInterval &nI = getOrCreateInterval(VReg);
1899 bool isReMat = vrm.isReMaterialized(VReg);
1900 MachineInstr *MI = getInstructionFromIndex(index);
1901 bool CanFold = false;
1903 if (restores[i].canFold) {
1905 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1906 MachineOperand &MO = MI->getOperand(j);
1907 if (!MO.isReg() || MO.getReg() != VReg)
1911 // If this restore were to be folded, it would have been folded
1920 // Fold the load into the use if possible.
1921 bool Folded = false;
1922 if (CanFold && !Ops.empty()) {
1924 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1926 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1928 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1929 // If the rematerializable def is a load, also try to fold it.
1930 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1931 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1932 Ops, isLoadSS, LdSlot, VReg);
1934 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1936 // Re-matting an instruction with virtual register use. Add the
1937 // register as an implicit use on the use MI and mark the register
1938 // interval as unspillable.
1939 LiveInterval &ImpLi = getInterval(ImpUse);
1940 ImpLi.markNotSpillable();
1941 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1946 // If folding is not possible / failed, then tell the spiller to issue a
1947 // load / rematerialization for us.
1949 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1951 vrm.addRestorePoint(VReg, MI);
1953 Id = RestoreMBBs.find_next(Id);
1956 // Finalize intervals: add kills, finalize spill weights, and filter out
1958 std::vector<LiveInterval*> RetNewLIs;
1959 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1960 LiveInterval *LI = NewLIs[i];
1962 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1963 if (!AddedKill.count(LI)) {
1964 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1965 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1966 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1967 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1968 assert(UseIdx != -1);
1969 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1970 LastUse->getOperand(UseIdx).setIsKill();
1971 vrm.addKillPoint(LI->reg, LastUseIdx);
1974 RetNewLIs.push_back(LI);
1978 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1979 normalizeSpillWeights(RetNewLIs);
1983 /// hasAllocatableSuperReg - Return true if the specified physical register has
1984 /// any super register that's allocatable.
1985 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1986 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1987 if (allocatableRegs_[*AS] && hasInterval(*AS))
1992 /// getRepresentativeReg - Find the largest super register of the specified
1993 /// physical register.
1994 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1995 // Find the largest super-register that is allocatable.
1996 unsigned BestReg = Reg;
1997 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1998 unsigned SuperReg = *AS;
1999 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2007 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2008 /// specified interval that conflicts with the specified physical register.
2009 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2010 unsigned PhysReg) const {
2011 unsigned NumConflicts = 0;
2012 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2013 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2014 E = mri_->reg_end(); I != E; ++I) {
2015 MachineOperand &O = I.getOperand();
2016 MachineInstr *MI = O.getParent();
2017 SlotIndex Index = getInstructionIndex(MI);
2018 if (pli.liveAt(Index))
2021 return NumConflicts;
2024 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2025 /// around all defs and uses of the specified interval. Return true if it
2026 /// was able to cut its interval.
2027 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2028 unsigned PhysReg, VirtRegMap &vrm) {
2029 unsigned SpillReg = getRepresentativeReg(PhysReg);
2031 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2032 // If there are registers which alias PhysReg, but which are not a
2033 // sub-register of the chosen representative super register. Assert
2034 // since we can't handle it yet.
2035 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2036 tri_->isSuperRegister(*AS, SpillReg));
2039 SmallVector<unsigned, 4> PRegs;
2040 if (hasInterval(SpillReg))
2041 PRegs.push_back(SpillReg);
2043 SmallSet<unsigned, 4> Added;
2044 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2045 if (Added.insert(*AS) && hasInterval(*AS)) {
2046 PRegs.push_back(*AS);
2047 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2052 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2053 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2054 E = mri_->reg_end(); I != E; ++I) {
2055 MachineOperand &O = I.getOperand();
2056 MachineInstr *MI = O.getParent();
2057 if (SeenMIs.count(MI))
2060 SlotIndex Index = getInstructionIndex(MI);
2061 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2062 unsigned PReg = PRegs[i];
2063 LiveInterval &pli = getInterval(PReg);
2064 if (!pli.liveAt(Index))
2066 vrm.addEmergencySpill(PReg, MI);
2067 SlotIndex StartIdx = Index.getLoadIndex();
2068 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2069 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2070 pli.removeRange(StartIdx, EndIdx);
2074 raw_string_ostream Msg(msg);
2075 Msg << "Ran out of registers during register allocation!";
2076 if (MI->isInlineAsm()) {
2077 Msg << "\nPlease check your inline asm statement for invalid "
2078 << "constraints:\n";
2079 MI->print(Msg, tm_);
2081 llvm_report_error(Msg.str());
2083 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2084 if (!hasInterval(*AS))
2086 LiveInterval &spli = getInterval(*AS);
2087 if (spli.liveAt(Index))
2088 spli.removeRange(Index.getLoadIndex(),
2089 Index.getNextIndex().getBaseIndex());
2096 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2097 MachineInstr* startInst) {
2098 LiveInterval& Interval = getOrCreateInterval(reg);
2099 VNInfo* VN = Interval.getNextValue(
2100 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2101 startInst, true, getVNInfoAllocator());
2102 VN->setHasPHIKill(true);
2103 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2105 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2106 getMBBEndIdx(startInst->getParent()), VN);
2107 Interval.addRange(LR);