1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 static void VNInfoDTor(void* Ptr)
87 reinterpret_cast<VNInfo*>(Ptr)->~VNInfo();
90 void LiveIntervals::releaseMemory() {
91 // Free the live intervals themselves.
92 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
93 E = r2iMap_.end(); I != E; ++I)
98 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
99 VNInfoAllocator.Reset((unsigned)sizeof(VNInfo), alignof<VNInfo>(), VNInfoDTor);
100 while (!CloneMIs.empty()) {
101 MachineInstr *MI = CloneMIs.back();
103 mf_->DeleteMachineInstr(MI);
107 /// runOnMachineFunction - Register allocate the whole function
109 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
111 mri_ = &mf_->getRegInfo();
112 tm_ = &fn.getTarget();
113 tri_ = tm_->getRegisterInfo();
114 tii_ = tm_->getInstrInfo();
115 aa_ = &getAnalysis<AliasAnalysis>();
116 lv_ = &getAnalysis<LiveVariables>();
117 indexes_ = &getAnalysis<SlotIndexes>();
118 allocatableRegs_ = tri_->getAllocatableSet(fn);
122 numIntervals += getNumIntervals();
128 /// print - Implement the dump method.
129 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
130 OS << "********** INTERVALS **********\n";
131 for (const_iterator I = begin(), E = end(); I != E; ++I) {
132 I->second->print(OS, tri_);
139 void LiveIntervals::printInstrs(raw_ostream &OS) const {
140 OS << "********** MACHINEINSTRS **********\n";
142 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
143 mbbi != mbbe; ++mbbi) {
144 OS << "BB#" << mbbi->getNumber()
145 << ":\t\t# derived from " << mbbi->getName() << "\n";
146 for (MachineBasicBlock::iterator mii = mbbi->begin(),
147 mie = mbbi->end(); mii != mie; ++mii) {
148 if (mii->isDebugValue())
151 OS << getInstructionIndex(mii) << '\t' << *mii;
156 void LiveIntervals::dumpInstrs() const {
160 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
161 VirtRegMap &vrm, unsigned reg) {
162 // We don't handle fancy stuff crossing basic block boundaries
163 if (li.ranges.size() != 1)
165 const LiveRange &range = li.ranges.front();
166 SlotIndex idx = range.start.getBaseIndex();
167 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
169 // Skip deleted instructions
170 MachineInstr *firstMI = getInstructionFromIndex(idx);
171 while (!firstMI && idx != end) {
172 idx = idx.getNextIndex();
173 firstMI = getInstructionFromIndex(idx);
178 // Find last instruction in range
179 SlotIndex lastIdx = end.getPrevIndex();
180 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
181 while (!lastMI && lastIdx != idx) {
182 lastIdx = lastIdx.getPrevIndex();
183 lastMI = getInstructionFromIndex(lastIdx);
188 // Range cannot cross basic block boundaries or terminators
189 MachineBasicBlock *MBB = firstMI->getParent();
190 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
193 MachineBasicBlock::const_iterator E = lastMI;
195 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
196 const MachineInstr &MI = *I;
198 // Allow copies to and from li.reg
199 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
200 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
201 if (SrcReg == li.reg || DstReg == li.reg)
204 // Check for operands using reg
205 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
206 const MachineOperand& mop = MI.getOperand(i);
209 unsigned PhysReg = mop.getReg();
210 if (PhysReg == 0 || PhysReg == li.reg)
212 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
213 if (!vrm.hasPhys(PhysReg))
215 PhysReg = vrm.getPhys(PhysReg);
217 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
222 // No conflicts found.
226 /// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except
227 /// it checks for sub-register reference and it can check use as well.
228 bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li,
229 unsigned Reg, bool CheckUse,
230 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
231 for (LiveInterval::Ranges::const_iterator
232 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
233 for (SlotIndex index = I->start.getBaseIndex(),
234 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
236 index = index.getNextIndex()) {
237 MachineInstr *MI = getInstructionFromIndex(index);
239 continue; // skip deleted instructions
241 if (JoinedCopies.count(MI))
243 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
244 MachineOperand& MO = MI->getOperand(i);
247 if (MO.isUse() && !CheckUse)
249 unsigned PhysReg = MO.getReg();
250 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
252 if (tri_->isSubRegister(Reg, PhysReg))
262 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
263 if (TargetRegisterInfo::isPhysicalRegister(reg))
264 dbgs() << tri_->getName(reg);
266 dbgs() << "%reg" << reg;
270 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
271 MachineBasicBlock::iterator mi,
275 LiveInterval &interval) {
277 dbgs() << "\t\tregister: ";
278 printRegName(interval.reg, tri_);
281 // Virtual registers may be defined multiple times (due to phi
282 // elimination and 2-addr elimination). Much of what we do only has to be
283 // done once for the vreg. We use an empty interval to detect the first
284 // time we see a vreg.
285 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
286 if (interval.empty()) {
287 // Get the Idx of the defining instructions.
288 SlotIndex defIndex = MIIdx.getDefIndex();
289 // Earlyclobbers move back one, so that they overlap the live range
291 if (MO.isEarlyClobber())
292 defIndex = MIIdx.getUseIndex();
294 MachineInstr *CopyMI = NULL;
295 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
296 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
297 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
299 // Earlyclobbers move back one.
300 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
302 assert(ValNo->id == 0 && "First value in interval is not 0?");
304 // Loop over all of the blocks that the vreg is defined in. There are
305 // two cases we have to handle here. The most common case is a vreg
306 // whose lifetime is contained within a basic block. In this case there
307 // will be a single kill, in MBB, which comes after the definition.
308 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
309 // FIXME: what about dead vars?
311 if (vi.Kills[0] != mi)
312 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
314 killIdx = defIndex.getStoreIndex();
316 // If the kill happens after the definition, we have an intra-block
318 if (killIdx > defIndex) {
319 assert(vi.AliveBlocks.empty() &&
320 "Shouldn't be alive across any blocks!");
321 LiveRange LR(defIndex, killIdx, ValNo);
322 interval.addRange(LR);
323 DEBUG(dbgs() << " +" << LR << "\n");
324 ValNo->addKill(killIdx);
329 // The other case we handle is when a virtual register lives to the end
330 // of the defining block, potentially live across some blocks, then is
331 // live into some number of blocks, but gets killed. Start by adding a
332 // range that goes from this definition to the end of the defining block.
333 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
334 DEBUG(dbgs() << " +" << NewLR);
335 interval.addRange(NewLR);
337 bool PHIJoin = lv_->isPHIJoin(interval.reg);
340 // A phi join register is killed at the end of the MBB and revived as a new
341 // valno in the killing blocks.
342 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
343 DEBUG(dbgs() << " phi-join");
344 ValNo->addKill(indexes_->getTerminatorGap(mbb));
345 ValNo->setHasPHIKill(true);
347 // Iterate over all of the blocks that the variable is completely
348 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
350 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
351 E = vi.AliveBlocks.end(); I != E; ++I) {
352 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
353 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
354 interval.addRange(LR);
355 DEBUG(dbgs() << " +" << LR);
359 // Finally, this virtual register is live from the start of any killing
360 // block to the 'use' slot of the killing instruction.
361 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
362 MachineInstr *Kill = vi.Kills[i];
363 SlotIndex Start = getMBBStartIdx(Kill->getParent());
364 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
366 // Create interval with one of a NEW value number. Note that this value
367 // number isn't actually defined by an instruction, weird huh? :)
369 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
371 ValNo->setIsPHIDef(true);
373 LiveRange LR(Start, killIdx, ValNo);
374 interval.addRange(LR);
375 ValNo->addKill(killIdx);
376 DEBUG(dbgs() << " +" << LR);
380 // If this is the second time we see a virtual register definition, it
381 // must be due to phi elimination or two addr elimination. If this is
382 // the result of two address elimination, then the vreg is one of the
383 // def-and-use register operand.
384 if (mi->isRegTiedToUseOperand(MOIdx)) {
385 // If this is a two-address definition, then we have already processed
386 // the live range. The only problem is that we didn't realize there
387 // are actually two values in the live interval. Because of this we
388 // need to take the LiveRegion that defines this register and split it
390 assert(interval.containsOneValue());
391 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
392 SlotIndex RedefIndex = MIIdx.getDefIndex();
393 if (MO.isEarlyClobber())
394 RedefIndex = MIIdx.getUseIndex();
396 const LiveRange *OldLR =
397 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
398 VNInfo *OldValNo = OldLR->valno;
400 // Delete the initial value, which should be short and continuous,
401 // because the 2-addr copy must be in the same MBB as the redef.
402 interval.removeRange(DefIndex, RedefIndex);
404 // Two-address vregs should always only be redefined once. This means
405 // that at this point, there should be exactly one value number in it.
406 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
408 // The new value number (#1) is defined by the instruction we claimed
410 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
411 false, // update at *
413 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
415 // Value#0 is now defined by the 2-addr instruction.
416 OldValNo->def = RedefIndex;
417 OldValNo->setCopy(0);
419 // Add the new live interval which replaces the range for the input copy.
420 LiveRange LR(DefIndex, RedefIndex, ValNo);
421 DEBUG(dbgs() << " replace range with " << LR);
422 interval.addRange(LR);
423 ValNo->addKill(RedefIndex);
425 // If this redefinition is dead, we need to add a dummy unit live
426 // range covering the def slot.
428 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
432 dbgs() << " RESULT: ";
433 interval.print(dbgs(), tri_);
436 assert(lv_->isPHIJoin(interval.reg) && "Multiply defined register");
437 // In the case of PHI elimination, each variable definition is only
438 // live until the end of the block. We've already taken care of the
439 // rest of the live range.
441 SlotIndex defIndex = MIIdx.getDefIndex();
442 if (MO.isEarlyClobber())
443 defIndex = MIIdx.getUseIndex();
446 MachineInstr *CopyMI = NULL;
447 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
448 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
449 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
451 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
453 SlotIndex killIndex = getMBBEndIdx(mbb);
454 LiveRange LR(defIndex, killIndex, ValNo);
455 interval.addRange(LR);
456 ValNo->addKill(indexes_->getTerminatorGap(mbb));
457 ValNo->setHasPHIKill(true);
458 DEBUG(dbgs() << " phi-join +" << LR);
462 DEBUG(dbgs() << '\n');
465 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
466 MachineBasicBlock::iterator mi,
469 LiveInterval &interval,
470 MachineInstr *CopyMI) {
471 // A physical register cannot be live across basic block, so its
472 // lifetime must end somewhere in its defining basic block.
474 dbgs() << "\t\tregister: ";
475 printRegName(interval.reg, tri_);
478 SlotIndex baseIndex = MIIdx;
479 SlotIndex start = baseIndex.getDefIndex();
480 // Earlyclobbers move back one.
481 if (MO.isEarlyClobber())
482 start = MIIdx.getUseIndex();
483 SlotIndex end = start;
485 // If it is not used after definition, it is considered dead at
486 // the instruction defining it. Hence its interval is:
487 // [defSlot(def), defSlot(def)+1)
488 // For earlyclobbers, the defSlot was pushed back one; the extra
489 // advance below compensates.
491 DEBUG(dbgs() << " dead");
492 end = start.getStoreIndex();
496 // If it is not dead on definition, it must be killed by a
497 // subsequent instruction. Hence its interval is:
498 // [defSlot(def), useSlot(kill)+1)
499 baseIndex = baseIndex.getNextIndex();
500 while (++mi != MBB->end()) {
502 if (mi->isDebugValue())
504 if (getInstructionFromIndex(baseIndex) == 0)
505 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
507 if (mi->killsRegister(interval.reg, tri_)) {
508 DEBUG(dbgs() << " killed");
509 end = baseIndex.getDefIndex();
512 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
514 if (mi->isRegTiedToUseOperand(DefIdx)) {
515 // Two-address instruction.
516 end = baseIndex.getDefIndex();
518 // Another instruction redefines the register before it is ever read.
519 // Then the register is essentially dead at the instruction that
520 // defines it. Hence its interval is:
521 // [defSlot(def), defSlot(def)+1)
522 DEBUG(dbgs() << " dead");
523 end = start.getStoreIndex();
529 baseIndex = baseIndex.getNextIndex();
532 // The only case we should have a dead physreg here without a killing or
533 // instruction where we know it's dead is if it is live-in to the function
534 // and never used. Another possible case is the implicit use of the
535 // physical register has been deleted by two-address pass.
536 end = start.getStoreIndex();
539 assert(start < end && "did not find end of interval?");
541 // Already exists? Extend old live interval.
542 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
543 bool Extend = OldLR != interval.end();
544 VNInfo *ValNo = Extend
545 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
546 if (MO.isEarlyClobber() && Extend)
547 ValNo->setHasRedefByEC(true);
548 LiveRange LR(start, end, ValNo);
549 interval.addRange(LR);
550 LR.valno->addKill(end);
551 DEBUG(dbgs() << " +" << LR << '\n');
554 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
555 MachineBasicBlock::iterator MI,
559 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
560 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
561 getOrCreateInterval(MO.getReg()));
562 else if (allocatableRegs_[MO.getReg()]) {
563 MachineInstr *CopyMI = NULL;
564 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
565 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
566 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
568 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
569 getOrCreateInterval(MO.getReg()), CopyMI);
570 // Def of a register also defines its sub-registers.
571 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
572 // If MI also modifies the sub-register explicitly, avoid processing it
573 // more than once. Do not pass in TRI here so it checks for exact match.
574 if (!MI->modifiesRegister(*AS))
575 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
576 getOrCreateInterval(*AS), 0);
580 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
582 LiveInterval &interval, bool isAlias) {
584 dbgs() << "\t\tlivein register: ";
585 printRegName(interval.reg, tri_);
588 // Look for kills, if it reaches a def before it's killed, then it shouldn't
589 // be considered a livein.
590 MachineBasicBlock::iterator mi = MBB->begin();
591 MachineBasicBlock::iterator E = MBB->end();
592 // Skip over DBG_VALUE at the start of the MBB.
593 if (mi != E && mi->isDebugValue()) {
594 while (++mi != E && mi->isDebugValue())
597 // MBB is empty except for DBG_VALUE's.
601 SlotIndex baseIndex = MIIdx;
602 SlotIndex start = baseIndex;
603 if (getInstructionFromIndex(baseIndex) == 0)
604 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
606 SlotIndex end = baseIndex;
607 bool SeenDefUse = false;
610 if (mi->killsRegister(interval.reg, tri_)) {
611 DEBUG(dbgs() << " killed");
612 end = baseIndex.getDefIndex();
615 } else if (mi->modifiesRegister(interval.reg, tri_)) {
616 // Another instruction redefines the register before it is ever read.
617 // Then the register is essentially dead at the instruction that defines
618 // it. Hence its interval is:
619 // [defSlot(def), defSlot(def)+1)
620 DEBUG(dbgs() << " dead");
621 end = start.getStoreIndex();
626 while (++mi != E && mi->isDebugValue())
627 // Skip over DBG_VALUE.
630 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
633 // Live-in register might not be used at all.
636 DEBUG(dbgs() << " dead");
637 end = MIIdx.getStoreIndex();
639 DEBUG(dbgs() << " live through");
645 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
646 0, false, VNInfoAllocator);
647 vni->setIsPHIDef(true);
648 LiveRange LR(start, end, vni);
650 interval.addRange(LR);
651 LR.valno->addKill(end);
652 DEBUG(dbgs() << " +" << LR << '\n');
655 /// computeIntervals - computes the live intervals for virtual
656 /// registers. for some ordering of the machine instructions [1,N] a
657 /// live interval is an interval [i, j) where 1 <= i <= j < N for
658 /// which a variable is live
659 void LiveIntervals::computeIntervals() {
660 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
661 << "********** Function: "
662 << ((Value*)mf_->getFunction())->getName() << '\n');
664 SmallVector<unsigned, 8> UndefUses;
665 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
667 MachineBasicBlock *MBB = MBBI;
671 // Track the index of the current machine instr.
672 SlotIndex MIIndex = getMBBStartIdx(MBB);
673 DEBUG(dbgs() << MBB->getName() << ":\n");
675 // Create intervals for live-ins to this BB first.
676 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
677 LE = MBB->livein_end(); LI != LE; ++LI) {
678 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
679 // Multiple live-ins can alias the same register.
680 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
681 if (!hasInterval(*AS))
682 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
686 // Skip over empty initial indices.
687 if (getInstructionFromIndex(MIIndex) == 0)
688 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
690 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
692 DEBUG(dbgs() << MIIndex << "\t" << *MI);
693 if (MI->isDebugValue())
697 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
698 MachineOperand &MO = MI->getOperand(i);
699 if (!MO.isReg() || !MO.getReg())
702 // handle register defs - build intervals
704 handleRegisterDef(MBB, MI, MIIndex, MO, i);
705 else if (MO.isUndef())
706 UndefUses.push_back(MO.getReg());
709 // Move to the next instr slot.
710 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
714 // Create empty intervals for registers defined by implicit_def's (except
715 // for those implicit_def that define values which are liveout of their
717 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
718 unsigned UndefReg = UndefUses[i];
719 (void)getOrCreateInterval(UndefReg);
723 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
724 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
725 return new LiveInterval(reg, Weight);
728 /// dupInterval - Duplicate a live interval. The caller is responsible for
729 /// managing the allocated memory.
730 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
731 LiveInterval *NewLI = createInterval(li->reg);
732 NewLI->Copy(*li, mri_, getVNInfoAllocator());
736 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
737 /// copy field and returns the source register that defines it.
738 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
742 if (VNI->getCopy()->isExtractSubreg()) {
743 // If it's extracting out of a physical register, return the sub-register.
744 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
745 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
746 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
747 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
748 if (SrcSubReg == DstSubReg)
749 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
750 // reg1034 can still be coalesced to EDX.
752 assert(DstSubReg == 0);
753 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
756 } else if (VNI->getCopy()->isInsertSubreg() ||
757 VNI->getCopy()->isSubregToReg())
758 return VNI->getCopy()->getOperand(2).getReg();
760 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
761 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
763 llvm_unreachable("Unrecognized copy instruction!");
767 //===----------------------------------------------------------------------===//
768 // Register allocator hooks.
771 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
772 /// allow one) virtual register operand, then its uses are implicitly using
773 /// the register. Returns the virtual register.
774 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
775 MachineInstr *MI) const {
777 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
778 MachineOperand &MO = MI->getOperand(i);
779 if (!MO.isReg() || !MO.isUse())
781 unsigned Reg = MO.getReg();
782 if (Reg == 0 || Reg == li.reg)
785 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
786 !allocatableRegs_[Reg])
788 // FIXME: For now, only remat MI with at most one register operand.
790 "Can't rematerialize instruction with multiple register operand!");
799 /// isValNoAvailableAt - Return true if the val# of the specified interval
800 /// which reaches the given instruction also reaches the specified use index.
801 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
802 SlotIndex UseIdx) const {
803 SlotIndex Index = getInstructionIndex(MI);
804 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
805 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
806 return UI != li.end() && UI->valno == ValNo;
809 /// isReMaterializable - Returns true if the definition MI of the specified
810 /// val# of the specified interval is re-materializable.
811 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
812 const VNInfo *ValNo, MachineInstr *MI,
813 SmallVectorImpl<LiveInterval*> &SpillIs,
818 if (!tii_->isTriviallyReMaterializable(MI, aa_))
821 // Target-specific code can mark an instruction as being rematerializable
822 // if it has one virtual reg use, though it had better be something like
823 // a PIC base register which is likely to be live everywhere.
824 unsigned ImpUse = getReMatImplicitUse(li, MI);
826 const LiveInterval &ImpLi = getInterval(ImpUse);
827 for (MachineRegisterInfo::use_nodbg_iterator
828 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
830 MachineInstr *UseMI = &*ri;
831 SlotIndex UseIdx = getInstructionIndex(UseMI);
832 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
834 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
838 // If a register operand of the re-materialized instruction is going to
839 // be spilled next, then it's not legal to re-materialize this instruction.
840 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
841 if (ImpUse == SpillIs[i]->reg)
847 /// isReMaterializable - Returns true if the definition MI of the specified
848 /// val# of the specified interval is re-materializable.
849 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
850 const VNInfo *ValNo, MachineInstr *MI) {
851 SmallVector<LiveInterval*, 4> Dummy1;
853 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
856 /// isReMaterializable - Returns true if every definition of MI of every
857 /// val# of the specified interval is re-materializable.
858 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
859 SmallVectorImpl<LiveInterval*> &SpillIs,
862 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
864 const VNInfo *VNI = *i;
866 continue; // Dead val#.
867 // Is the def for the val# rematerializable?
868 if (!VNI->isDefAccurate())
870 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
871 bool DefIsLoad = false;
873 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
880 /// FilterFoldedOps - Filter out two-address use operands. Return
881 /// true if it finds any issue with the operands that ought to prevent
883 static bool FilterFoldedOps(MachineInstr *MI,
884 SmallVector<unsigned, 2> &Ops,
886 SmallVector<unsigned, 2> &FoldOps) {
888 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
889 unsigned OpIdx = Ops[i];
890 MachineOperand &MO = MI->getOperand(OpIdx);
891 // FIXME: fold subreg use.
895 MRInfo |= (unsigned)VirtRegMap::isMod;
897 // Filter out two-address use operand(s).
898 if (MI->isRegTiedToDefOperand(OpIdx)) {
899 MRInfo = VirtRegMap::isModRef;
902 MRInfo |= (unsigned)VirtRegMap::isRef;
904 FoldOps.push_back(OpIdx);
910 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
911 /// slot / to reg or any rematerialized load into ith operand of specified
912 /// MI. If it is successul, MI is updated with the newly created MI and
914 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
915 VirtRegMap &vrm, MachineInstr *DefMI,
917 SmallVector<unsigned, 2> &Ops,
918 bool isSS, int Slot, unsigned Reg) {
919 // If it is an implicit def instruction, just delete it.
920 if (MI->isImplicitDef()) {
921 RemoveMachineInstrFromMaps(MI);
922 vrm.RemoveMachineInstrFromMaps(MI);
923 MI->eraseFromParent();
928 // Filter the list of operand indexes that are to be folded. Abort if
929 // any operand will prevent folding.
931 SmallVector<unsigned, 2> FoldOps;
932 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
935 // The only time it's safe to fold into a two address instruction is when
936 // it's folding reload and spill from / into a spill stack slot.
937 if (DefMI && (MRInfo & VirtRegMap::isMod))
940 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
941 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
943 // Remember this instruction uses the spill slot.
944 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
946 // Attempt to fold the memory reference into the instruction. If
947 // we can do this, we don't need to insert spill code.
948 MachineBasicBlock &MBB = *MI->getParent();
949 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
950 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
951 vrm.transferSpillPts(MI, fmi);
952 vrm.transferRestorePts(MI, fmi);
953 vrm.transferEmergencySpills(MI, fmi);
954 ReplaceMachineInstrInMaps(MI, fmi);
955 MI = MBB.insert(MBB.erase(MI), fmi);
962 /// canFoldMemoryOperand - Returns true if the specified load / store
963 /// folding is possible.
964 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
965 SmallVector<unsigned, 2> &Ops,
967 // Filter the list of operand indexes that are to be folded. Abort if
968 // any operand will prevent folding.
970 SmallVector<unsigned, 2> FoldOps;
971 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
974 // It's only legal to remat for a use, not a def.
975 if (ReMat && (MRInfo & VirtRegMap::isMod))
978 return tii_->canFoldMemoryOperand(MI, FoldOps);
981 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
982 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
984 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
989 for (++itr; itr != li.ranges.end(); ++itr) {
990 MachineBasicBlock *mbb2 =
991 indexes_->getMBBCoveringRange(itr->start, itr->end);
1000 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1001 /// interval on to-be re-materialized operands of MI) with new register.
1002 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1003 MachineInstr *MI, unsigned NewVReg,
1005 // There is an implicit use. That means one of the other operand is
1006 // being remat'ed and the remat'ed instruction has li.reg as an
1007 // use operand. Make sure we rewrite that as well.
1008 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1009 MachineOperand &MO = MI->getOperand(i);
1012 unsigned Reg = MO.getReg();
1013 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1015 if (!vrm.isReMaterialized(Reg))
1017 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1018 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1020 UseMO->setReg(NewVReg);
1024 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1025 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1026 bool LiveIntervals::
1027 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1028 bool TrySplit, SlotIndex index, SlotIndex end,
1030 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1031 unsigned Slot, int LdSlot,
1032 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1034 const TargetRegisterClass* rc,
1035 SmallVector<int, 4> &ReMatIds,
1036 const MachineLoopInfo *loopInfo,
1037 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1038 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1039 std::vector<LiveInterval*> &NewLIs) {
1040 bool CanFold = false;
1042 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1043 MachineOperand& mop = MI->getOperand(i);
1046 unsigned Reg = mop.getReg();
1047 unsigned RegI = Reg;
1048 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1053 bool TryFold = !DefIsReMat;
1054 bool FoldSS = true; // Default behavior unless it's a remat.
1055 int FoldSlot = Slot;
1057 // If this is the rematerializable definition MI itself and
1058 // all of its uses are rematerialized, simply delete it.
1059 if (MI == ReMatOrigDefMI && CanDelete) {
1060 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1062 RemoveMachineInstrFromMaps(MI);
1063 vrm.RemoveMachineInstrFromMaps(MI);
1064 MI->eraseFromParent();
1068 // If def for this use can't be rematerialized, then try folding.
1069 // If def is rematerializable and it's a load, also try folding.
1070 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1072 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1078 // Scan all of the operands of this instruction rewriting operands
1079 // to use NewVReg instead of li.reg as appropriate. We do this for
1082 // 1. If the instr reads the same spilled vreg multiple times, we
1083 // want to reuse the NewVReg.
1084 // 2. If the instr is a two-addr instruction, we are required to
1085 // keep the src/dst regs pinned.
1087 // Keep track of whether we replace a use and/or def so that we can
1088 // create the spill interval with the appropriate range.
1090 HasUse = mop.isUse();
1091 HasDef = mop.isDef();
1092 SmallVector<unsigned, 2> Ops;
1094 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1095 const MachineOperand &MOj = MI->getOperand(j);
1098 unsigned RegJ = MOj.getReg();
1099 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1103 if (!MOj.isUndef()) {
1104 HasUse |= MOj.isUse();
1105 HasDef |= MOj.isDef();
1110 // Create a new virtual register for the spill interval.
1111 // Create the new register now so we can map the fold instruction
1112 // to the new register so when it is unfolded we get the correct
1114 bool CreatedNewVReg = false;
1116 NewVReg = mri_->createVirtualRegister(rc);
1118 CreatedNewVReg = true;
1120 // The new virtual register should get the same allocation hints as the
1122 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1123 if (Hint.first || Hint.second)
1124 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1130 // Do not fold load / store here if we are splitting. We'll find an
1131 // optimal point to insert a load / store later.
1133 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1134 Ops, FoldSS, FoldSlot, NewVReg)) {
1135 // Folding the load/store can completely change the instruction in
1136 // unpredictable ways, rescan it from the beginning.
1139 // We need to give the new vreg the same stack slot as the
1140 // spilled interval.
1141 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1147 if (isNotInMIMap(MI))
1149 goto RestartInstruction;
1152 // We'll try to fold it later if it's profitable.
1153 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1157 mop.setReg(NewVReg);
1158 if (mop.isImplicit())
1159 rewriteImplicitOps(li, MI, NewVReg, vrm);
1161 // Reuse NewVReg for other reads.
1162 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1163 MachineOperand &mopj = MI->getOperand(Ops[j]);
1164 mopj.setReg(NewVReg);
1165 if (mopj.isImplicit())
1166 rewriteImplicitOps(li, MI, NewVReg, vrm);
1169 if (CreatedNewVReg) {
1171 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1172 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1173 // Each valnum may have its own remat id.
1174 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1176 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1178 if (!CanDelete || (HasUse && HasDef)) {
1179 // If this is a two-addr instruction then its use operands are
1180 // rematerializable but its def is not. It should be assigned a
1182 vrm.assignVirt2StackSlot(NewVReg, Slot);
1185 vrm.assignVirt2StackSlot(NewVReg, Slot);
1187 } else if (HasUse && HasDef &&
1188 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1189 // If this interval hasn't been assigned a stack slot (because earlier
1190 // def is a deleted remat def), do it now.
1191 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1192 vrm.assignVirt2StackSlot(NewVReg, Slot);
1195 // Re-matting an instruction with virtual register use. Add the
1196 // register as an implicit use on the use MI.
1197 if (DefIsReMat && ImpUse)
1198 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1200 // Create a new register interval for this spill / remat.
1201 LiveInterval &nI = getOrCreateInterval(NewVReg);
1202 if (CreatedNewVReg) {
1203 NewLIs.push_back(&nI);
1204 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1206 vrm.setIsSplitFromReg(NewVReg, li.reg);
1210 if (CreatedNewVReg) {
1211 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1212 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1213 DEBUG(dbgs() << " +" << LR);
1216 // Extend the split live interval to this def / use.
1217 SlotIndex End = index.getDefIndex();
1218 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1219 nI.getValNumInfo(nI.getNumValNums()-1));
1220 DEBUG(dbgs() << " +" << LR);
1225 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1226 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1227 DEBUG(dbgs() << " +" << LR);
1232 dbgs() << "\t\t\t\tAdded new interval: ";
1233 nI.print(dbgs(), tri_);
1239 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1241 MachineBasicBlock *MBB,
1242 SlotIndex Idx) const {
1243 SlotIndex End = getMBBEndIdx(MBB);
1244 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1245 if (VNI->kills[j].isPHI())
1248 SlotIndex KillIdx = VNI->kills[j];
1249 if (KillIdx > Idx && KillIdx <= End)
1255 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1256 /// during spilling.
1258 struct RewriteInfo {
1263 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1264 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1267 struct RewriteInfoCompare {
1268 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1269 return LHS.Index < RHS.Index;
1274 void LiveIntervals::
1275 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1276 LiveInterval::Ranges::const_iterator &I,
1277 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1278 unsigned Slot, int LdSlot,
1279 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1281 const TargetRegisterClass* rc,
1282 SmallVector<int, 4> &ReMatIds,
1283 const MachineLoopInfo *loopInfo,
1284 BitVector &SpillMBBs,
1285 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1286 BitVector &RestoreMBBs,
1287 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1288 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1289 std::vector<LiveInterval*> &NewLIs) {
1290 bool AllCanFold = true;
1291 unsigned NewVReg = 0;
1292 SlotIndex start = I->start.getBaseIndex();
1293 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1295 // First collect all the def / use in this live range that will be rewritten.
1296 // Make sure they are sorted according to instruction index.
1297 std::vector<RewriteInfo> RewriteMIs;
1298 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1299 re = mri_->reg_end(); ri != re; ) {
1300 MachineInstr *MI = &*ri;
1301 MachineOperand &O = ri.getOperand();
1303 if (MI->isDebugValue()) {
1304 // Remove debug info for now.
1306 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1309 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1310 SlotIndex index = getInstructionIndex(MI);
1311 if (index < start || index >= end)
1315 // Must be defined by an implicit def. It should not be spilled. Note,
1316 // this is for correctness reason. e.g.
1317 // 8 %reg1024<def> = IMPLICIT_DEF
1318 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1319 // The live range [12, 14) are not part of the r1024 live interval since
1320 // it's defined by an implicit def. It will not conflicts with live
1321 // interval of r1025. Now suppose both registers are spilled, you can
1322 // easily see a situation where both registers are reloaded before
1323 // the INSERT_SUBREG and both target registers that would overlap.
1325 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1327 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1329 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1330 // Now rewrite the defs and uses.
1331 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1332 RewriteInfo &rwi = RewriteMIs[i];
1334 SlotIndex index = rwi.Index;
1335 bool MIHasUse = rwi.HasUse;
1336 bool MIHasDef = rwi.HasDef;
1337 MachineInstr *MI = rwi.MI;
1338 // If MI def and/or use the same register multiple times, then there
1339 // are multiple entries.
1340 unsigned NumUses = MIHasUse;
1341 while (i != e && RewriteMIs[i].MI == MI) {
1342 assert(RewriteMIs[i].Index == index);
1343 bool isUse = RewriteMIs[i].HasUse;
1344 if (isUse) ++NumUses;
1346 MIHasDef |= RewriteMIs[i].HasDef;
1349 MachineBasicBlock *MBB = MI->getParent();
1351 if (ImpUse && MI != ReMatDefMI) {
1352 // Re-matting an instruction with virtual register use. Prevent interval
1353 // from being spilled.
1354 getInterval(ImpUse).markNotSpillable();
1357 unsigned MBBId = MBB->getNumber();
1358 unsigned ThisVReg = 0;
1360 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1361 if (NVI != MBBVRegsMap.end()) {
1362 ThisVReg = NVI->second;
1369 // It's better to start a new interval to avoid artifically
1370 // extend the new interval.
1371 if (MIHasDef && !MIHasUse) {
1372 MBBVRegsMap.erase(MBB->getNumber());
1378 bool IsNew = ThisVReg == 0;
1380 // This ends the previous live interval. If all of its def / use
1381 // can be folded, give it a low spill weight.
1382 if (NewVReg && TrySplit && AllCanFold) {
1383 LiveInterval &nI = getOrCreateInterval(NewVReg);
1390 bool HasDef = false;
1391 bool HasUse = false;
1392 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1393 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1394 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1395 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1396 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1397 if (!HasDef && !HasUse)
1400 AllCanFold &= CanFold;
1402 // Update weight of spill interval.
1403 LiveInterval &nI = getOrCreateInterval(NewVReg);
1405 // The spill weight is now infinity as it cannot be spilled again.
1406 nI.markNotSpillable();
1410 // Keep track of the last def and first use in each MBB.
1412 if (MI != ReMatOrigDefMI || !CanDelete) {
1413 bool HasKill = false;
1415 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1417 // If this is a two-address code, then this index starts a new VNInfo.
1418 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1420 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1422 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1423 SpillIdxes.find(MBBId);
1425 if (SII == SpillIdxes.end()) {
1426 std::vector<SRInfo> S;
1427 S.push_back(SRInfo(index, NewVReg, true));
1428 SpillIdxes.insert(std::make_pair(MBBId, S));
1429 } else if (SII->second.back().vreg != NewVReg) {
1430 SII->second.push_back(SRInfo(index, NewVReg, true));
1431 } else if (index > SII->second.back().index) {
1432 // If there is an earlier def and this is a two-address
1433 // instruction, then it's not possible to fold the store (which
1434 // would also fold the load).
1435 SRInfo &Info = SII->second.back();
1437 Info.canFold = !HasUse;
1439 SpillMBBs.set(MBBId);
1440 } else if (SII != SpillIdxes.end() &&
1441 SII->second.back().vreg == NewVReg &&
1442 index > SII->second.back().index) {
1443 // There is an earlier def that's not killed (must be two-address).
1444 // The spill is no longer needed.
1445 SII->second.pop_back();
1446 if (SII->second.empty()) {
1447 SpillIdxes.erase(MBBId);
1448 SpillMBBs.reset(MBBId);
1455 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1456 SpillIdxes.find(MBBId);
1457 if (SII != SpillIdxes.end() &&
1458 SII->second.back().vreg == NewVReg &&
1459 index > SII->second.back().index)
1460 // Use(s) following the last def, it's not safe to fold the spill.
1461 SII->second.back().canFold = false;
1462 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1463 RestoreIdxes.find(MBBId);
1464 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1465 // If we are splitting live intervals, only fold if it's the first
1466 // use and there isn't another use later in the MBB.
1467 RII->second.back().canFold = false;
1469 // Only need a reload if there isn't an earlier def / use.
1470 if (RII == RestoreIdxes.end()) {
1471 std::vector<SRInfo> Infos;
1472 Infos.push_back(SRInfo(index, NewVReg, true));
1473 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1475 RII->second.push_back(SRInfo(index, NewVReg, true));
1477 RestoreMBBs.set(MBBId);
1481 // Update spill weight.
1482 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1483 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1486 if (NewVReg && TrySplit && AllCanFold) {
1487 // If all of its def / use can be folded, give it a low spill weight.
1488 LiveInterval &nI = getOrCreateInterval(NewVReg);
1493 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1494 unsigned vr, BitVector &RestoreMBBs,
1495 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1496 if (!RestoreMBBs[Id])
1498 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1499 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1500 if (Restores[i].index == index &&
1501 Restores[i].vreg == vr &&
1502 Restores[i].canFold)
1507 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1508 unsigned vr, BitVector &RestoreMBBs,
1509 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1510 if (!RestoreMBBs[Id])
1512 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1513 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1514 if (Restores[i].index == index && Restores[i].vreg)
1515 Restores[i].index = SlotIndex();
1518 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1519 /// spilled and create empty intervals for their uses.
1521 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1522 const TargetRegisterClass* rc,
1523 std::vector<LiveInterval*> &NewLIs) {
1524 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1525 re = mri_->reg_end(); ri != re; ) {
1526 MachineOperand &O = ri.getOperand();
1527 MachineInstr *MI = &*ri;
1529 if (MI->isDebugValue()) {
1530 // Remove debug info for now.
1532 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1536 assert(MI->isImplicitDef() &&
1537 "Register def was not rewritten?");
1538 RemoveMachineInstrFromMaps(MI);
1539 vrm.RemoveMachineInstrFromMaps(MI);
1540 MI->eraseFromParent();
1542 // This must be an use of an implicit_def so it's not part of the live
1543 // interval. Create a new empty live interval for it.
1544 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1545 unsigned NewVReg = mri_->createVirtualRegister(rc);
1547 vrm.setIsImplicitlyDefined(NewVReg);
1548 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1549 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1550 MachineOperand &MO = MI->getOperand(i);
1551 if (MO.isReg() && MO.getReg() == li.reg) {
1561 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1562 // Limit the loop depth ridiculousness.
1563 if (loopDepth > 200)
1566 // The loop depth is used to roughly estimate the number of times the
1567 // instruction is executed. Something like 10^d is simple, but will quickly
1568 // overflow a float. This expression behaves like 10^d for small d, but is
1569 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1570 // headroom before overflow.
1571 float lc = powf(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1573 return (isDef + isUse) * lc;
1577 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1578 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1579 normalizeSpillWeight(*NewLIs[i]);
1582 std::vector<LiveInterval*> LiveIntervals::
1583 addIntervalsForSpillsFast(const LiveInterval &li,
1584 const MachineLoopInfo *loopInfo,
1586 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1588 std::vector<LiveInterval*> added;
1590 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1593 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1598 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1600 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1601 while (RI != mri_->reg_end()) {
1602 MachineInstr* MI = &*RI;
1604 SmallVector<unsigned, 2> Indices;
1605 bool HasUse = false;
1606 bool HasDef = false;
1608 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1609 MachineOperand& mop = MI->getOperand(i);
1610 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1612 HasUse |= MI->getOperand(i).isUse();
1613 HasDef |= MI->getOperand(i).isDef();
1615 Indices.push_back(i);
1618 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1619 Indices, true, slot, li.reg)) {
1620 unsigned NewVReg = mri_->createVirtualRegister(rc);
1622 vrm.assignVirt2StackSlot(NewVReg, slot);
1624 // create a new register for this spill
1625 LiveInterval &nI = getOrCreateInterval(NewVReg);
1626 nI.markNotSpillable();
1628 // Rewrite register operands to use the new vreg.
1629 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1630 E = Indices.end(); I != E; ++I) {
1631 MI->getOperand(*I).setReg(NewVReg);
1633 if (MI->getOperand(*I).isUse())
1634 MI->getOperand(*I).setIsKill(true);
1637 // Fill in the new live interval.
1638 SlotIndex index = getInstructionIndex(MI);
1640 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1641 nI.getNextValue(SlotIndex(), 0, false,
1642 getVNInfoAllocator()));
1643 DEBUG(dbgs() << " +" << LR);
1645 vrm.addRestorePoint(NewVReg, MI);
1648 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1649 nI.getNextValue(SlotIndex(), 0, false,
1650 getVNInfoAllocator()));
1651 DEBUG(dbgs() << " +" << LR);
1653 vrm.addSpillPoint(NewVReg, true, MI);
1656 added.push_back(&nI);
1659 dbgs() << "\t\t\t\tadded new interval: ";
1666 RI = mri_->reg_begin(li.reg);
1672 std::vector<LiveInterval*> LiveIntervals::
1673 addIntervalsForSpills(const LiveInterval &li,
1674 SmallVectorImpl<LiveInterval*> &SpillIs,
1675 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1677 if (EnableFastSpilling)
1678 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1680 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1683 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1684 li.print(dbgs(), tri_);
1688 // Each bit specify whether a spill is required in the MBB.
1689 BitVector SpillMBBs(mf_->getNumBlockIDs());
1690 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1691 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1692 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1693 DenseMap<unsigned,unsigned> MBBVRegsMap;
1694 std::vector<LiveInterval*> NewLIs;
1695 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1697 unsigned NumValNums = li.getNumValNums();
1698 SmallVector<MachineInstr*, 4> ReMatDefs;
1699 ReMatDefs.resize(NumValNums, NULL);
1700 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1701 ReMatOrigDefs.resize(NumValNums, NULL);
1702 SmallVector<int, 4> ReMatIds;
1703 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1704 BitVector ReMatDelete(NumValNums);
1705 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1707 // Spilling a split live interval. It cannot be split any further. Also,
1708 // it's also guaranteed to be a single val# / range interval.
1709 if (vrm.getPreSplitReg(li.reg)) {
1710 vrm.setIsSplitFromReg(li.reg, 0);
1711 // Unset the split kill marker on the last use.
1712 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1713 if (KillIdx != SlotIndex()) {
1714 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1715 assert(KillMI && "Last use disappeared?");
1716 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1717 assert(KillOp != -1 && "Last use disappeared?");
1718 KillMI->getOperand(KillOp).setIsKill(false);
1720 vrm.removeKillPoint(li.reg);
1721 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1722 Slot = vrm.getStackSlot(li.reg);
1723 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1724 MachineInstr *ReMatDefMI = DefIsReMat ?
1725 vrm.getReMaterializedMI(li.reg) : NULL;
1727 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1728 bool isLoad = isLoadSS ||
1729 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1730 bool IsFirstRange = true;
1731 for (LiveInterval::Ranges::const_iterator
1732 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1733 // If this is a split live interval with multiple ranges, it means there
1734 // are two-address instructions that re-defined the value. Only the
1735 // first def can be rematerialized!
1737 // Note ReMatOrigDefMI has already been deleted.
1738 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1739 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1740 false, vrm, rc, ReMatIds, loopInfo,
1741 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1742 MBBVRegsMap, NewLIs);
1744 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1745 Slot, 0, false, false, false,
1746 false, vrm, rc, ReMatIds, loopInfo,
1747 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1748 MBBVRegsMap, NewLIs);
1750 IsFirstRange = false;
1753 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1754 normalizeSpillWeights(NewLIs);
1758 bool TrySplit = !intervalIsInOneMBB(li);
1761 bool NeedStackSlot = false;
1762 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1764 const VNInfo *VNI = *i;
1765 unsigned VN = VNI->id;
1766 if (VNI->isUnused())
1767 continue; // Dead val#.
1768 // Is the def for the val# rematerializable?
1769 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1770 ? getInstructionFromIndex(VNI->def) : 0;
1772 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1773 // Remember how to remat the def of this val#.
1774 ReMatOrigDefs[VN] = ReMatDefMI;
1775 // Original def may be modified so we have to make a copy here.
1776 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1777 CloneMIs.push_back(Clone);
1778 ReMatDefs[VN] = Clone;
1780 bool CanDelete = true;
1781 if (VNI->hasPHIKill()) {
1782 // A kill is a phi node, not all of its uses can be rematerialized.
1783 // It must not be deleted.
1785 // Need a stack slot if there is any live range where uses cannot be
1787 NeedStackSlot = true;
1790 ReMatDelete.set(VN);
1792 // Need a stack slot if there is any live range where uses cannot be
1794 NeedStackSlot = true;
1798 // One stack slot per live interval.
1799 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1800 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1801 Slot = vrm.assignVirt2StackSlot(li.reg);
1803 // This case only occurs when the prealloc splitter has already assigned
1804 // a stack slot to this vreg.
1806 Slot = vrm.getStackSlot(li.reg);
1809 // Create new intervals and rewrite defs and uses.
1810 for (LiveInterval::Ranges::const_iterator
1811 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1812 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1813 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1814 bool DefIsReMat = ReMatDefMI != NULL;
1815 bool CanDelete = ReMatDelete[I->valno->id];
1817 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1818 bool isLoad = isLoadSS ||
1819 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1820 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1821 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1822 CanDelete, vrm, rc, ReMatIds, loopInfo,
1823 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1824 MBBVRegsMap, NewLIs);
1827 // Insert spills / restores if we are splitting.
1829 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1830 normalizeSpillWeights(NewLIs);
1834 SmallPtrSet<LiveInterval*, 4> AddedKill;
1835 SmallVector<unsigned, 2> Ops;
1836 if (NeedStackSlot) {
1837 int Id = SpillMBBs.find_first();
1839 std::vector<SRInfo> &spills = SpillIdxes[Id];
1840 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1841 SlotIndex index = spills[i].index;
1842 unsigned VReg = spills[i].vreg;
1843 LiveInterval &nI = getOrCreateInterval(VReg);
1844 bool isReMat = vrm.isReMaterialized(VReg);
1845 MachineInstr *MI = getInstructionFromIndex(index);
1846 bool CanFold = false;
1847 bool FoundUse = false;
1849 if (spills[i].canFold) {
1851 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1852 MachineOperand &MO = MI->getOperand(j);
1853 if (!MO.isReg() || MO.getReg() != VReg)
1860 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1861 RestoreMBBs, RestoreIdxes))) {
1862 // MI has two-address uses of the same register. If the use
1863 // isn't the first and only use in the BB, then we can't fold
1864 // it. FIXME: Move this to rewriteInstructionsForSpills.
1871 // Fold the store into the def if possible.
1872 bool Folded = false;
1873 if (CanFold && !Ops.empty()) {
1874 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1877 // Also folded uses, do not issue a load.
1878 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1879 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1881 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1885 // Otherwise tell the spiller to issue a spill.
1887 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1888 bool isKill = LR->end == index.getStoreIndex();
1889 if (!MI->registerDefIsDead(nI.reg))
1890 // No need to spill a dead def.
1891 vrm.addSpillPoint(VReg, isKill, MI);
1893 AddedKill.insert(&nI);
1896 Id = SpillMBBs.find_next(Id);
1900 int Id = RestoreMBBs.find_first();
1902 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1903 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1904 SlotIndex index = restores[i].index;
1905 if (index == SlotIndex())
1907 unsigned VReg = restores[i].vreg;
1908 LiveInterval &nI = getOrCreateInterval(VReg);
1909 bool isReMat = vrm.isReMaterialized(VReg);
1910 MachineInstr *MI = getInstructionFromIndex(index);
1911 bool CanFold = false;
1913 if (restores[i].canFold) {
1915 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1916 MachineOperand &MO = MI->getOperand(j);
1917 if (!MO.isReg() || MO.getReg() != VReg)
1921 // If this restore were to be folded, it would have been folded
1930 // Fold the load into the use if possible.
1931 bool Folded = false;
1932 if (CanFold && !Ops.empty()) {
1934 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1936 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1938 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1939 // If the rematerializable def is a load, also try to fold it.
1940 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1941 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1942 Ops, isLoadSS, LdSlot, VReg);
1944 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1946 // Re-matting an instruction with virtual register use. Add the
1947 // register as an implicit use on the use MI and mark the register
1948 // interval as unspillable.
1949 LiveInterval &ImpLi = getInterval(ImpUse);
1950 ImpLi.markNotSpillable();
1951 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1956 // If folding is not possible / failed, then tell the spiller to issue a
1957 // load / rematerialization for us.
1959 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1961 vrm.addRestorePoint(VReg, MI);
1963 Id = RestoreMBBs.find_next(Id);
1966 // Finalize intervals: add kills, finalize spill weights, and filter out
1968 std::vector<LiveInterval*> RetNewLIs;
1969 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1970 LiveInterval *LI = NewLIs[i];
1972 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1973 if (!AddedKill.count(LI)) {
1974 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1975 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1976 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1977 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1978 assert(UseIdx != -1);
1979 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1980 LastUse->getOperand(UseIdx).setIsKill();
1981 vrm.addKillPoint(LI->reg, LastUseIdx);
1984 RetNewLIs.push_back(LI);
1988 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1989 normalizeSpillWeights(RetNewLIs);
1993 /// hasAllocatableSuperReg - Return true if the specified physical register has
1994 /// any super register that's allocatable.
1995 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1996 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1997 if (allocatableRegs_[*AS] && hasInterval(*AS))
2002 /// getRepresentativeReg - Find the largest super register of the specified
2003 /// physical register.
2004 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2005 // Find the largest super-register that is allocatable.
2006 unsigned BestReg = Reg;
2007 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2008 unsigned SuperReg = *AS;
2009 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2017 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2018 /// specified interval that conflicts with the specified physical register.
2019 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2020 unsigned PhysReg) const {
2021 unsigned NumConflicts = 0;
2022 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2023 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2024 E = mri_->reg_end(); I != E; ++I) {
2025 MachineOperand &O = I.getOperand();
2026 MachineInstr *MI = O.getParent();
2027 if (MI->isDebugValue())
2029 SlotIndex Index = getInstructionIndex(MI);
2030 if (pli.liveAt(Index))
2033 return NumConflicts;
2036 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2037 /// around all defs and uses of the specified interval. Return true if it
2038 /// was able to cut its interval.
2039 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2040 unsigned PhysReg, VirtRegMap &vrm) {
2041 unsigned SpillReg = getRepresentativeReg(PhysReg);
2043 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2044 // If there are registers which alias PhysReg, but which are not a
2045 // sub-register of the chosen representative super register. Assert
2046 // since we can't handle it yet.
2047 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2048 tri_->isSuperRegister(*AS, SpillReg));
2051 SmallVector<unsigned, 4> PRegs;
2052 if (hasInterval(SpillReg))
2053 PRegs.push_back(SpillReg);
2055 SmallSet<unsigned, 4> Added;
2056 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2057 if (Added.insert(*AS) && hasInterval(*AS)) {
2058 PRegs.push_back(*AS);
2059 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2064 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2065 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2066 E = mri_->reg_end(); I != E; ++I) {
2067 MachineOperand &O = I.getOperand();
2068 MachineInstr *MI = O.getParent();
2069 if (MI->isDebugValue() || SeenMIs.count(MI))
2072 SlotIndex Index = getInstructionIndex(MI);
2073 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2074 unsigned PReg = PRegs[i];
2075 LiveInterval &pli = getInterval(PReg);
2076 if (!pli.liveAt(Index))
2078 vrm.addEmergencySpill(PReg, MI);
2079 SlotIndex StartIdx = Index.getLoadIndex();
2080 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2081 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2082 pli.removeRange(StartIdx, EndIdx);
2086 raw_string_ostream Msg(msg);
2087 Msg << "Ran out of registers during register allocation!";
2088 if (MI->isInlineAsm()) {
2089 Msg << "\nPlease check your inline asm statement for invalid "
2090 << "constraints:\n";
2091 MI->print(Msg, tm_);
2093 llvm_report_error(Msg.str());
2095 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2096 if (!hasInterval(*AS))
2098 LiveInterval &spli = getInterval(*AS);
2099 if (spli.liveAt(Index))
2100 spli.removeRange(Index.getLoadIndex(),
2101 Index.getNextIndex().getBaseIndex());
2108 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2109 MachineInstr* startInst) {
2110 LiveInterval& Interval = getOrCreateInterval(reg);
2111 VNInfo* VN = Interval.getNextValue(
2112 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2113 startInst, true, getVNInfoAllocator());
2114 VN->setHasPHIKill(true);
2115 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2117 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2118 getMBBEndIdx(startInst->getParent()), VN);
2119 Interval.addRange(LR);