1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.Reset();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
117 numIntervals += getNumIntervals();
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 if (mii->isDebugValue())
144 OS << SlotIndex::getEmptyKey() << '\t' << *mii;
146 OS << getInstructionIndex(mii) << '\t' << *mii;
151 void LiveIntervals::dumpInstrs() const {
155 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156 VirtRegMap &vrm, unsigned reg) {
157 // We don't handle fancy stuff crossing basic block boundaries
158 if (li.ranges.size() != 1)
160 const LiveRange &range = li.ranges.front();
161 SlotIndex idx = range.start.getBaseIndex();
162 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
164 // Skip deleted instructions
165 MachineInstr *firstMI = getInstructionFromIndex(idx);
166 while (!firstMI && idx != end) {
167 idx = idx.getNextIndex();
168 firstMI = getInstructionFromIndex(idx);
173 // Find last instruction in range
174 SlotIndex lastIdx = end.getPrevIndex();
175 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176 while (!lastMI && lastIdx != idx) {
177 lastIdx = lastIdx.getPrevIndex();
178 lastMI = getInstructionFromIndex(lastIdx);
183 // Range cannot cross basic block boundaries or terminators
184 MachineBasicBlock *MBB = firstMI->getParent();
185 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
188 MachineBasicBlock::const_iterator E = lastMI;
190 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191 const MachineInstr &MI = *I;
193 // Allow copies to and from li.reg
194 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196 if (SrcReg == li.reg || DstReg == li.reg)
199 // Check for operands using reg
200 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
201 const MachineOperand& mop = MI.getOperand(i);
204 unsigned PhysReg = mop.getReg();
205 if (PhysReg == 0 || PhysReg == li.reg)
207 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208 if (!vrm.hasPhys(PhysReg))
210 PhysReg = vrm.getPhys(PhysReg);
212 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
217 // No conflicts found.
221 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
222 /// it can check use as well.
223 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
224 unsigned Reg, bool CheckUse,
225 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
226 for (LiveInterval::Ranges::const_iterator
227 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
228 for (SlotIndex index = I->start.getBaseIndex(),
229 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
231 index = index.getNextIndex()) {
232 MachineInstr *MI = getInstructionFromIndex(index);
234 continue; // skip deleted instructions
236 if (JoinedCopies.count(MI))
238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239 MachineOperand& MO = MI->getOperand(i);
242 if (MO.isUse() && !CheckUse)
244 unsigned PhysReg = MO.getReg();
245 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
247 if (tri_->isSubRegister(Reg, PhysReg))
257 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
258 if (TargetRegisterInfo::isPhysicalRegister(reg))
259 dbgs() << tri_->getName(reg);
261 dbgs() << "%reg" << reg;
265 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
266 MachineBasicBlock::iterator mi,
270 LiveInterval &interval) {
272 dbgs() << "\t\tregister: ";
273 printRegName(interval.reg, tri_);
276 // Virtual registers may be defined multiple times (due to phi
277 // elimination and 2-addr elimination). Much of what we do only has to be
278 // done once for the vreg. We use an empty interval to detect the first
279 // time we see a vreg.
280 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
281 if (interval.empty()) {
282 // Get the Idx of the defining instructions.
283 SlotIndex defIndex = MIIdx.getDefIndex();
284 // Earlyclobbers move back one, so that they overlap the live range
286 if (MO.isEarlyClobber())
287 defIndex = MIIdx.getUseIndex();
289 MachineInstr *CopyMI = NULL;
290 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
291 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
292 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
294 // Earlyclobbers move back one.
295 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
297 assert(ValNo->id == 0 && "First value in interval is not 0?");
299 // Loop over all of the blocks that the vreg is defined in. There are
300 // two cases we have to handle here. The most common case is a vreg
301 // whose lifetime is contained within a basic block. In this case there
302 // will be a single kill, in MBB, which comes after the definition.
303 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
304 // FIXME: what about dead vars?
306 if (vi.Kills[0] != mi)
307 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
309 killIdx = defIndex.getStoreIndex();
311 // If the kill happens after the definition, we have an intra-block
313 if (killIdx > defIndex) {
314 assert(vi.AliveBlocks.empty() &&
315 "Shouldn't be alive across any blocks!");
316 LiveRange LR(defIndex, killIdx, ValNo);
317 interval.addRange(LR);
318 DEBUG(dbgs() << " +" << LR << "\n");
319 ValNo->addKill(killIdx);
324 // The other case we handle is when a virtual register lives to the end
325 // of the defining block, potentially live across some blocks, then is
326 // live into some number of blocks, but gets killed. Start by adding a
327 // range that goes from this definition to the end of the defining block.
328 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
329 DEBUG(dbgs() << " +" << NewLR);
330 interval.addRange(NewLR);
332 // Iterate over all of the blocks that the variable is completely
333 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
335 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
336 E = vi.AliveBlocks.end(); I != E; ++I) {
337 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
338 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
339 interval.addRange(LR);
340 DEBUG(dbgs() << " +" << LR);
343 // Finally, this virtual register is live from the start of any killing
344 // block to the 'use' slot of the killing instruction.
345 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
346 MachineInstr *Kill = vi.Kills[i];
348 getInstructionIndex(Kill).getDefIndex();
349 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
350 interval.addRange(LR);
351 ValNo->addKill(killIdx);
352 DEBUG(dbgs() << " +" << LR);
356 // If this is the second time we see a virtual register definition, it
357 // must be due to phi elimination or two addr elimination. If this is
358 // the result of two address elimination, then the vreg is one of the
359 // def-and-use register operand.
360 if (mi->isRegTiedToUseOperand(MOIdx)) {
361 // If this is a two-address definition, then we have already processed
362 // the live range. The only problem is that we didn't realize there
363 // are actually two values in the live interval. Because of this we
364 // need to take the LiveRegion that defines this register and split it
366 assert(interval.containsOneValue());
367 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
368 SlotIndex RedefIndex = MIIdx.getDefIndex();
369 if (MO.isEarlyClobber())
370 RedefIndex = MIIdx.getUseIndex();
372 const LiveRange *OldLR =
373 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
374 VNInfo *OldValNo = OldLR->valno;
376 // Delete the initial value, which should be short and continuous,
377 // because the 2-addr copy must be in the same MBB as the redef.
378 interval.removeRange(DefIndex, RedefIndex);
380 // Two-address vregs should always only be redefined once. This means
381 // that at this point, there should be exactly one value number in it.
382 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
384 // The new value number (#1) is defined by the instruction we claimed
386 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
387 false, // update at *
389 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
391 // Value#0 is now defined by the 2-addr instruction.
392 OldValNo->def = RedefIndex;
393 OldValNo->setCopy(0);
395 // Add the new live interval which replaces the range for the input copy.
396 LiveRange LR(DefIndex, RedefIndex, ValNo);
397 DEBUG(dbgs() << " replace range with " << LR);
398 interval.addRange(LR);
399 ValNo->addKill(RedefIndex);
401 // If this redefinition is dead, we need to add a dummy unit live
402 // range covering the def slot.
404 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
408 dbgs() << " RESULT: ";
409 interval.print(dbgs(), tri_);
412 // Otherwise, this must be because of phi elimination. If this is the
413 // first redefinition of the vreg that we have seen, go back and change
414 // the live range in the PHI block to be a different value number.
415 if (interval.containsOneValue()) {
417 VNInfo *VNI = interval.getValNumInfo(0);
418 // Phi elimination may have reused the register for multiple identical
419 // phi nodes. There will be a kill per phi. Remove the old ranges that
420 // we now know have an incorrect number.
421 for (unsigned ki=0, ke=vi.Kills.size(); ki != ke; ++ki) {
422 MachineInstr *Killer = vi.Kills[ki];
423 SlotIndex Start = getMBBStartIdx(Killer->getParent());
424 SlotIndex End = getInstructionIndex(Killer).getDefIndex();
426 dbgs() << "\n\t\trenaming [" << Start << "," << End << "] in: ";
427 interval.print(dbgs(), tri_);
429 interval.removeRange(Start, End);
431 // Replace the interval with one of a NEW value number. Note that
432 // this value number isn't actually defined by an instruction, weird
434 LiveRange LR(Start, End,
435 interval.getNextValue(SlotIndex(Start, true),
436 0, false, VNInfoAllocator));
437 LR.valno->setIsPHIDef(true);
438 interval.addRange(LR);
439 LR.valno->addKill(End);
442 MachineBasicBlock *killMBB = getMBBFromIndex(VNI->def);
443 VNI->addKill(indexes_->getTerminatorGap(killMBB));
444 VNI->setHasPHIKill(true);
446 dbgs() << " RESULT: ";
447 interval.print(dbgs(), tri_);
451 // In the case of PHI elimination, each variable definition is only
452 // live until the end of the block. We've already taken care of the
453 // rest of the live range.
454 SlotIndex defIndex = MIIdx.getDefIndex();
455 if (MO.isEarlyClobber())
456 defIndex = MIIdx.getUseIndex();
459 MachineInstr *CopyMI = NULL;
460 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
461 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
462 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
464 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
466 SlotIndex killIndex = getMBBEndIdx(mbb);
467 LiveRange LR(defIndex, killIndex, ValNo);
468 interval.addRange(LR);
469 ValNo->addKill(indexes_->getTerminatorGap(mbb));
470 ValNo->setHasPHIKill(true);
471 DEBUG(dbgs() << " +" << LR);
475 DEBUG(dbgs() << '\n');
478 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
479 MachineBasicBlock::iterator mi,
482 LiveInterval &interval,
483 MachineInstr *CopyMI) {
484 // A physical register cannot be live across basic block, so its
485 // lifetime must end somewhere in its defining basic block.
487 dbgs() << "\t\tregister: ";
488 printRegName(interval.reg, tri_);
491 SlotIndex baseIndex = MIIdx;
492 SlotIndex start = baseIndex.getDefIndex();
493 // Earlyclobbers move back one.
494 if (MO.isEarlyClobber())
495 start = MIIdx.getUseIndex();
496 SlotIndex end = start;
498 // If it is not used after definition, it is considered dead at
499 // the instruction defining it. Hence its interval is:
500 // [defSlot(def), defSlot(def)+1)
501 // For earlyclobbers, the defSlot was pushed back one; the extra
502 // advance below compensates.
504 DEBUG(dbgs() << " dead");
505 end = start.getStoreIndex();
509 // If it is not dead on definition, it must be killed by a
510 // subsequent instruction. Hence its interval is:
511 // [defSlot(def), useSlot(kill)+1)
512 baseIndex = baseIndex.getNextIndex();
513 while (++mi != MBB->end()) {
515 if (mi->isDebugValue())
517 if (getInstructionFromIndex(baseIndex) == 0)
518 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
520 if (mi->killsRegister(interval.reg, tri_)) {
521 DEBUG(dbgs() << " killed");
522 end = baseIndex.getDefIndex();
525 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
527 if (mi->isRegTiedToUseOperand(DefIdx)) {
528 // Two-address instruction.
529 end = baseIndex.getDefIndex();
531 // Another instruction redefines the register before it is ever read.
532 // Then the register is essentially dead at the instruction that
533 // defines it. Hence its interval is:
534 // [defSlot(def), defSlot(def)+1)
535 DEBUG(dbgs() << " dead");
536 end = start.getStoreIndex();
542 baseIndex = baseIndex.getNextIndex();
545 // The only case we should have a dead physreg here without a killing or
546 // instruction where we know it's dead is if it is live-in to the function
547 // and never used. Another possible case is the implicit use of the
548 // physical register has been deleted by two-address pass.
549 end = start.getStoreIndex();
552 assert(start < end && "did not find end of interval?");
554 // Already exists? Extend old live interval.
555 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
556 bool Extend = OldLR != interval.end();
557 VNInfo *ValNo = Extend
558 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
559 if (MO.isEarlyClobber() && Extend)
560 ValNo->setHasRedefByEC(true);
561 LiveRange LR(start, end, ValNo);
562 interval.addRange(LR);
563 LR.valno->addKill(end);
564 DEBUG(dbgs() << " +" << LR << '\n');
567 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
568 MachineBasicBlock::iterator MI,
572 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
573 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
574 getOrCreateInterval(MO.getReg()));
575 else if (allocatableRegs_[MO.getReg()]) {
576 MachineInstr *CopyMI = NULL;
577 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
578 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
579 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
581 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
582 getOrCreateInterval(MO.getReg()), CopyMI);
583 // Def of a register also defines its sub-registers.
584 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
585 // If MI also modifies the sub-register explicitly, avoid processing it
586 // more than once. Do not pass in TRI here so it checks for exact match.
587 if (!MI->modifiesRegister(*AS))
588 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
589 getOrCreateInterval(*AS), 0);
593 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
595 LiveInterval &interval, bool isAlias) {
597 dbgs() << "\t\tlivein register: ";
598 printRegName(interval.reg, tri_);
601 // Look for kills, if it reaches a def before it's killed, then it shouldn't
602 // be considered a livein.
603 MachineBasicBlock::iterator mi = MBB->begin();
604 SlotIndex baseIndex = MIIdx;
605 SlotIndex start = baseIndex;
606 if (getInstructionFromIndex(baseIndex) == 0)
607 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
609 SlotIndex end = baseIndex;
610 bool SeenDefUse = false;
612 MachineBasicBlock::iterator E = MBB->end();
614 if (mi->isDebugValue()) {
616 if (mi != E && !mi->isDebugValue()) {
617 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
621 if (mi->killsRegister(interval.reg, tri_)) {
622 DEBUG(dbgs() << " killed");
623 end = baseIndex.getDefIndex();
626 } else if (mi->modifiesRegister(interval.reg, tri_)) {
627 // Another instruction redefines the register before it is ever read.
628 // Then the register is essentially dead at the instruction that defines
629 // it. Hence its interval is:
630 // [defSlot(def), defSlot(def)+1)
631 DEBUG(dbgs() << " dead");
632 end = start.getStoreIndex();
638 if (mi != E && !mi->isDebugValue()) {
639 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
643 // Live-in register might not be used at all.
646 DEBUG(dbgs() << " dead");
647 end = MIIdx.getStoreIndex();
649 DEBUG(dbgs() << " live through");
655 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
656 0, false, VNInfoAllocator);
657 vni->setIsPHIDef(true);
658 LiveRange LR(start, end, vni);
660 interval.addRange(LR);
661 LR.valno->addKill(end);
662 DEBUG(dbgs() << " +" << LR << '\n');
665 /// computeIntervals - computes the live intervals for virtual
666 /// registers. for some ordering of the machine instructions [1,N] a
667 /// live interval is an interval [i, j) where 1 <= i <= j < N for
668 /// which a variable is live
669 void LiveIntervals::computeIntervals() {
670 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
671 << "********** Function: "
672 << ((Value*)mf_->getFunction())->getName() << '\n');
674 SmallVector<unsigned, 8> UndefUses;
675 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
677 MachineBasicBlock *MBB = MBBI;
681 // Track the index of the current machine instr.
682 SlotIndex MIIndex = getMBBStartIdx(MBB);
683 DEBUG(dbgs() << MBB->getName() << ":\n");
685 // Create intervals for live-ins to this BB first.
686 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
687 LE = MBB->livein_end(); LI != LE; ++LI) {
688 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
689 // Multiple live-ins can alias the same register.
690 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
691 if (!hasInterval(*AS))
692 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
696 // Skip over empty initial indices.
697 if (getInstructionFromIndex(MIIndex) == 0)
698 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
700 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
702 DEBUG(dbgs() << MIIndex << "\t" << *MI);
703 if (MI->isDebugValue())
707 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
708 MachineOperand &MO = MI->getOperand(i);
709 if (!MO.isReg() || !MO.getReg())
712 // handle register defs - build intervals
714 handleRegisterDef(MBB, MI, MIIndex, MO, i);
715 else if (MO.isUndef())
716 UndefUses.push_back(MO.getReg());
719 // Move to the next instr slot.
720 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
724 // Create empty intervals for registers defined by implicit_def's (except
725 // for those implicit_def that define values which are liveout of their
727 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
728 unsigned UndefReg = UndefUses[i];
729 (void)getOrCreateInterval(UndefReg);
733 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
734 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
735 return new LiveInterval(reg, Weight);
738 /// dupInterval - Duplicate a live interval. The caller is responsible for
739 /// managing the allocated memory.
740 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
741 LiveInterval *NewLI = createInterval(li->reg);
742 NewLI->Copy(*li, mri_, getVNInfoAllocator());
746 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
747 /// copy field and returns the source register that defines it.
748 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
752 if (VNI->getCopy()->isExtractSubreg()) {
753 // If it's extracting out of a physical register, return the sub-register.
754 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
755 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
756 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
757 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
758 if (SrcSubReg == DstSubReg)
759 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
760 // reg1034 can still be coalesced to EDX.
762 assert(DstSubReg == 0);
763 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
766 } else if (VNI->getCopy()->isInsertSubreg() ||
767 VNI->getCopy()->isSubregToReg())
768 return VNI->getCopy()->getOperand(2).getReg();
770 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
771 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
773 llvm_unreachable("Unrecognized copy instruction!");
777 //===----------------------------------------------------------------------===//
778 // Register allocator hooks.
781 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
782 /// allow one) virtual register operand, then its uses are implicitly using
783 /// the register. Returns the virtual register.
784 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
785 MachineInstr *MI) const {
787 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
788 MachineOperand &MO = MI->getOperand(i);
789 if (!MO.isReg() || !MO.isUse())
791 unsigned Reg = MO.getReg();
792 if (Reg == 0 || Reg == li.reg)
795 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
796 !allocatableRegs_[Reg])
798 // FIXME: For now, only remat MI with at most one register operand.
800 "Can't rematerialize instruction with multiple register operand!");
809 /// isValNoAvailableAt - Return true if the val# of the specified interval
810 /// which reaches the given instruction also reaches the specified use index.
811 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
812 SlotIndex UseIdx) const {
813 SlotIndex Index = getInstructionIndex(MI);
814 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
815 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
816 return UI != li.end() && UI->valno == ValNo;
819 /// isReMaterializable - Returns true if the definition MI of the specified
820 /// val# of the specified interval is re-materializable.
821 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
822 const VNInfo *ValNo, MachineInstr *MI,
823 SmallVectorImpl<LiveInterval*> &SpillIs,
828 if (!tii_->isTriviallyReMaterializable(MI, aa_))
831 // Target-specific code can mark an instruction as being rematerializable
832 // if it has one virtual reg use, though it had better be something like
833 // a PIC base register which is likely to be live everywhere.
834 unsigned ImpUse = getReMatImplicitUse(li, MI);
836 const LiveInterval &ImpLi = getInterval(ImpUse);
837 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
838 re = mri_->use_end(); ri != re; ++ri) {
839 MachineInstr *UseMI = &*ri;
840 SlotIndex UseIdx = getInstructionIndex(UseMI);
841 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
843 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
847 // If a register operand of the re-materialized instruction is going to
848 // be spilled next, then it's not legal to re-materialize this instruction.
849 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
850 if (ImpUse == SpillIs[i]->reg)
856 /// isReMaterializable - Returns true if the definition MI of the specified
857 /// val# of the specified interval is re-materializable.
858 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
859 const VNInfo *ValNo, MachineInstr *MI) {
860 SmallVector<LiveInterval*, 4> Dummy1;
862 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
865 /// isReMaterializable - Returns true if every definition of MI of every
866 /// val# of the specified interval is re-materializable.
867 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
868 SmallVectorImpl<LiveInterval*> &SpillIs,
871 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
873 const VNInfo *VNI = *i;
875 continue; // Dead val#.
876 // Is the def for the val# rematerializable?
877 if (!VNI->isDefAccurate())
879 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
880 bool DefIsLoad = false;
882 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
889 /// FilterFoldedOps - Filter out two-address use operands. Return
890 /// true if it finds any issue with the operands that ought to prevent
892 static bool FilterFoldedOps(MachineInstr *MI,
893 SmallVector<unsigned, 2> &Ops,
895 SmallVector<unsigned, 2> &FoldOps) {
897 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
898 unsigned OpIdx = Ops[i];
899 MachineOperand &MO = MI->getOperand(OpIdx);
900 // FIXME: fold subreg use.
904 MRInfo |= (unsigned)VirtRegMap::isMod;
906 // Filter out two-address use operand(s).
907 if (MI->isRegTiedToDefOperand(OpIdx)) {
908 MRInfo = VirtRegMap::isModRef;
911 MRInfo |= (unsigned)VirtRegMap::isRef;
913 FoldOps.push_back(OpIdx);
919 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
920 /// slot / to reg or any rematerialized load into ith operand of specified
921 /// MI. If it is successul, MI is updated with the newly created MI and
923 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
924 VirtRegMap &vrm, MachineInstr *DefMI,
926 SmallVector<unsigned, 2> &Ops,
927 bool isSS, int Slot, unsigned Reg) {
928 // If it is an implicit def instruction, just delete it.
929 if (MI->isImplicitDef()) {
930 RemoveMachineInstrFromMaps(MI);
931 vrm.RemoveMachineInstrFromMaps(MI);
932 MI->eraseFromParent();
937 // Filter the list of operand indexes that are to be folded. Abort if
938 // any operand will prevent folding.
940 SmallVector<unsigned, 2> FoldOps;
941 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
944 // The only time it's safe to fold into a two address instruction is when
945 // it's folding reload and spill from / into a spill stack slot.
946 if (DefMI && (MRInfo & VirtRegMap::isMod))
949 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
950 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
952 // Remember this instruction uses the spill slot.
953 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
955 // Attempt to fold the memory reference into the instruction. If
956 // we can do this, we don't need to insert spill code.
957 MachineBasicBlock &MBB = *MI->getParent();
958 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
959 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
960 vrm.transferSpillPts(MI, fmi);
961 vrm.transferRestorePts(MI, fmi);
962 vrm.transferEmergencySpills(MI, fmi);
963 ReplaceMachineInstrInMaps(MI, fmi);
964 MI = MBB.insert(MBB.erase(MI), fmi);
971 /// canFoldMemoryOperand - Returns true if the specified load / store
972 /// folding is possible.
973 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
974 SmallVector<unsigned, 2> &Ops,
976 // Filter the list of operand indexes that are to be folded. Abort if
977 // any operand will prevent folding.
979 SmallVector<unsigned, 2> FoldOps;
980 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
983 // It's only legal to remat for a use, not a def.
984 if (ReMat && (MRInfo & VirtRegMap::isMod))
987 return tii_->canFoldMemoryOperand(MI, FoldOps);
990 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
991 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
993 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
998 for (++itr; itr != li.ranges.end(); ++itr) {
999 MachineBasicBlock *mbb2 =
1000 indexes_->getMBBCoveringRange(itr->start, itr->end);
1009 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1010 /// interval on to-be re-materialized operands of MI) with new register.
1011 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1012 MachineInstr *MI, unsigned NewVReg,
1014 // There is an implicit use. That means one of the other operand is
1015 // being remat'ed and the remat'ed instruction has li.reg as an
1016 // use operand. Make sure we rewrite that as well.
1017 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1018 MachineOperand &MO = MI->getOperand(i);
1021 unsigned Reg = MO.getReg();
1022 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1024 if (!vrm.isReMaterialized(Reg))
1026 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1027 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1029 UseMO->setReg(NewVReg);
1033 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1034 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1035 bool LiveIntervals::
1036 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1037 bool TrySplit, SlotIndex index, SlotIndex end,
1039 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1040 unsigned Slot, int LdSlot,
1041 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1043 const TargetRegisterClass* rc,
1044 SmallVector<int, 4> &ReMatIds,
1045 const MachineLoopInfo *loopInfo,
1046 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1047 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1048 std::vector<LiveInterval*> &NewLIs) {
1049 bool CanFold = false;
1051 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1052 MachineOperand& mop = MI->getOperand(i);
1055 unsigned Reg = mop.getReg();
1056 unsigned RegI = Reg;
1057 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1062 bool TryFold = !DefIsReMat;
1063 bool FoldSS = true; // Default behavior unless it's a remat.
1064 int FoldSlot = Slot;
1066 // If this is the rematerializable definition MI itself and
1067 // all of its uses are rematerialized, simply delete it.
1068 if (MI == ReMatOrigDefMI && CanDelete) {
1069 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1071 RemoveMachineInstrFromMaps(MI);
1072 vrm.RemoveMachineInstrFromMaps(MI);
1073 MI->eraseFromParent();
1077 // If def for this use can't be rematerialized, then try folding.
1078 // If def is rematerializable and it's a load, also try folding.
1079 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1081 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1087 // Scan all of the operands of this instruction rewriting operands
1088 // to use NewVReg instead of li.reg as appropriate. We do this for
1091 // 1. If the instr reads the same spilled vreg multiple times, we
1092 // want to reuse the NewVReg.
1093 // 2. If the instr is a two-addr instruction, we are required to
1094 // keep the src/dst regs pinned.
1096 // Keep track of whether we replace a use and/or def so that we can
1097 // create the spill interval with the appropriate range.
1099 HasUse = mop.isUse();
1100 HasDef = mop.isDef();
1101 SmallVector<unsigned, 2> Ops;
1103 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1104 const MachineOperand &MOj = MI->getOperand(j);
1107 unsigned RegJ = MOj.getReg();
1108 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1112 if (!MOj.isUndef()) {
1113 HasUse |= MOj.isUse();
1114 HasDef |= MOj.isDef();
1119 // Create a new virtual register for the spill interval.
1120 // Create the new register now so we can map the fold instruction
1121 // to the new register so when it is unfolded we get the correct
1123 bool CreatedNewVReg = false;
1125 NewVReg = mri_->createVirtualRegister(rc);
1127 CreatedNewVReg = true;
1129 // The new virtual register should get the same allocation hints as the
1131 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1132 if (Hint.first || Hint.second)
1133 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1139 // Do not fold load / store here if we are splitting. We'll find an
1140 // optimal point to insert a load / store later.
1142 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1143 Ops, FoldSS, FoldSlot, NewVReg)) {
1144 // Folding the load/store can completely change the instruction in
1145 // unpredictable ways, rescan it from the beginning.
1148 // We need to give the new vreg the same stack slot as the
1149 // spilled interval.
1150 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1156 if (isNotInMIMap(MI))
1158 goto RestartInstruction;
1161 // We'll try to fold it later if it's profitable.
1162 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1166 mop.setReg(NewVReg);
1167 if (mop.isImplicit())
1168 rewriteImplicitOps(li, MI, NewVReg, vrm);
1170 // Reuse NewVReg for other reads.
1171 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1172 MachineOperand &mopj = MI->getOperand(Ops[j]);
1173 mopj.setReg(NewVReg);
1174 if (mopj.isImplicit())
1175 rewriteImplicitOps(li, MI, NewVReg, vrm);
1178 if (CreatedNewVReg) {
1180 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1181 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1182 // Each valnum may have its own remat id.
1183 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1185 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1187 if (!CanDelete || (HasUse && HasDef)) {
1188 // If this is a two-addr instruction then its use operands are
1189 // rematerializable but its def is not. It should be assigned a
1191 vrm.assignVirt2StackSlot(NewVReg, Slot);
1194 vrm.assignVirt2StackSlot(NewVReg, Slot);
1196 } else if (HasUse && HasDef &&
1197 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1198 // If this interval hasn't been assigned a stack slot (because earlier
1199 // def is a deleted remat def), do it now.
1200 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1201 vrm.assignVirt2StackSlot(NewVReg, Slot);
1204 // Re-matting an instruction with virtual register use. Add the
1205 // register as an implicit use on the use MI.
1206 if (DefIsReMat && ImpUse)
1207 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1209 // Create a new register interval for this spill / remat.
1210 LiveInterval &nI = getOrCreateInterval(NewVReg);
1211 if (CreatedNewVReg) {
1212 NewLIs.push_back(&nI);
1213 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1215 vrm.setIsSplitFromReg(NewVReg, li.reg);
1219 if (CreatedNewVReg) {
1220 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1221 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1222 DEBUG(dbgs() << " +" << LR);
1225 // Extend the split live interval to this def / use.
1226 SlotIndex End = index.getDefIndex();
1227 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1228 nI.getValNumInfo(nI.getNumValNums()-1));
1229 DEBUG(dbgs() << " +" << LR);
1234 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1235 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1236 DEBUG(dbgs() << " +" << LR);
1241 dbgs() << "\t\t\t\tAdded new interval: ";
1242 nI.print(dbgs(), tri_);
1248 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1250 MachineBasicBlock *MBB,
1251 SlotIndex Idx) const {
1252 SlotIndex End = getMBBEndIdx(MBB);
1253 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1254 if (VNI->kills[j].isPHI())
1257 SlotIndex KillIdx = VNI->kills[j];
1258 if (KillIdx > Idx && KillIdx <= End)
1264 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1265 /// during spilling.
1267 struct RewriteInfo {
1272 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1273 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1276 struct RewriteInfoCompare {
1277 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1278 return LHS.Index < RHS.Index;
1283 void LiveIntervals::
1284 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1285 LiveInterval::Ranges::const_iterator &I,
1286 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1287 unsigned Slot, int LdSlot,
1288 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1290 const TargetRegisterClass* rc,
1291 SmallVector<int, 4> &ReMatIds,
1292 const MachineLoopInfo *loopInfo,
1293 BitVector &SpillMBBs,
1294 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1295 BitVector &RestoreMBBs,
1296 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1297 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1298 std::vector<LiveInterval*> &NewLIs) {
1299 bool AllCanFold = true;
1300 unsigned NewVReg = 0;
1301 SlotIndex start = I->start.getBaseIndex();
1302 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1304 // First collect all the def / use in this live range that will be rewritten.
1305 // Make sure they are sorted according to instruction index.
1306 std::vector<RewriteInfo> RewriteMIs;
1307 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1308 re = mri_->reg_end(); ri != re; ) {
1309 MachineInstr *MI = &*ri;
1310 MachineOperand &O = ri.getOperand();
1312 if (MI->isDebugValue()) {
1313 // Remove debug info for now.
1315 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1318 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1319 SlotIndex index = getInstructionIndex(MI);
1320 if (index < start || index >= end)
1324 // Must be defined by an implicit def. It should not be spilled. Note,
1325 // this is for correctness reason. e.g.
1326 // 8 %reg1024<def> = IMPLICIT_DEF
1327 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1328 // The live range [12, 14) are not part of the r1024 live interval since
1329 // it's defined by an implicit def. It will not conflicts with live
1330 // interval of r1025. Now suppose both registers are spilled, you can
1331 // easily see a situation where both registers are reloaded before
1332 // the INSERT_SUBREG and both target registers that would overlap.
1334 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1336 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1338 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1339 // Now rewrite the defs and uses.
1340 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1341 RewriteInfo &rwi = RewriteMIs[i];
1343 SlotIndex index = rwi.Index;
1344 bool MIHasUse = rwi.HasUse;
1345 bool MIHasDef = rwi.HasDef;
1346 MachineInstr *MI = rwi.MI;
1347 // If MI def and/or use the same register multiple times, then there
1348 // are multiple entries.
1349 unsigned NumUses = MIHasUse;
1350 while (i != e && RewriteMIs[i].MI == MI) {
1351 assert(RewriteMIs[i].Index == index);
1352 bool isUse = RewriteMIs[i].HasUse;
1353 if (isUse) ++NumUses;
1355 MIHasDef |= RewriteMIs[i].HasDef;
1358 MachineBasicBlock *MBB = MI->getParent();
1360 if (ImpUse && MI != ReMatDefMI) {
1361 // Re-matting an instruction with virtual register use. Update the
1362 // register interval's spill weight to HUGE_VALF to prevent it from
1364 LiveInterval &ImpLi = getInterval(ImpUse);
1365 ImpLi.weight = HUGE_VALF;
1368 unsigned MBBId = MBB->getNumber();
1369 unsigned ThisVReg = 0;
1371 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1372 if (NVI != MBBVRegsMap.end()) {
1373 ThisVReg = NVI->second;
1380 // It's better to start a new interval to avoid artifically
1381 // extend the new interval.
1382 if (MIHasDef && !MIHasUse) {
1383 MBBVRegsMap.erase(MBB->getNumber());
1389 bool IsNew = ThisVReg == 0;
1391 // This ends the previous live interval. If all of its def / use
1392 // can be folded, give it a low spill weight.
1393 if (NewVReg && TrySplit && AllCanFold) {
1394 LiveInterval &nI = getOrCreateInterval(NewVReg);
1401 bool HasDef = false;
1402 bool HasUse = false;
1403 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1404 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1405 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1406 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1407 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1408 if (!HasDef && !HasUse)
1411 AllCanFold &= CanFold;
1413 // Update weight of spill interval.
1414 LiveInterval &nI = getOrCreateInterval(NewVReg);
1416 // The spill weight is now infinity as it cannot be spilled again.
1417 nI.weight = HUGE_VALF;
1421 // Keep track of the last def and first use in each MBB.
1423 if (MI != ReMatOrigDefMI || !CanDelete) {
1424 bool HasKill = false;
1426 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1428 // If this is a two-address code, then this index starts a new VNInfo.
1429 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1431 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1433 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1434 SpillIdxes.find(MBBId);
1436 if (SII == SpillIdxes.end()) {
1437 std::vector<SRInfo> S;
1438 S.push_back(SRInfo(index, NewVReg, true));
1439 SpillIdxes.insert(std::make_pair(MBBId, S));
1440 } else if (SII->second.back().vreg != NewVReg) {
1441 SII->second.push_back(SRInfo(index, NewVReg, true));
1442 } else if (index > SII->second.back().index) {
1443 // If there is an earlier def and this is a two-address
1444 // instruction, then it's not possible to fold the store (which
1445 // would also fold the load).
1446 SRInfo &Info = SII->second.back();
1448 Info.canFold = !HasUse;
1450 SpillMBBs.set(MBBId);
1451 } else if (SII != SpillIdxes.end() &&
1452 SII->second.back().vreg == NewVReg &&
1453 index > SII->second.back().index) {
1454 // There is an earlier def that's not killed (must be two-address).
1455 // The spill is no longer needed.
1456 SII->second.pop_back();
1457 if (SII->second.empty()) {
1458 SpillIdxes.erase(MBBId);
1459 SpillMBBs.reset(MBBId);
1466 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1467 SpillIdxes.find(MBBId);
1468 if (SII != SpillIdxes.end() &&
1469 SII->second.back().vreg == NewVReg &&
1470 index > SII->second.back().index)
1471 // Use(s) following the last def, it's not safe to fold the spill.
1472 SII->second.back().canFold = false;
1473 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1474 RestoreIdxes.find(MBBId);
1475 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1476 // If we are splitting live intervals, only fold if it's the first
1477 // use and there isn't another use later in the MBB.
1478 RII->second.back().canFold = false;
1480 // Only need a reload if there isn't an earlier def / use.
1481 if (RII == RestoreIdxes.end()) {
1482 std::vector<SRInfo> Infos;
1483 Infos.push_back(SRInfo(index, NewVReg, true));
1484 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1486 RII->second.push_back(SRInfo(index, NewVReg, true));
1488 RestoreMBBs.set(MBBId);
1492 // Update spill weight.
1493 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1494 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1497 if (NewVReg && TrySplit && AllCanFold) {
1498 // If all of its def / use can be folded, give it a low spill weight.
1499 LiveInterval &nI = getOrCreateInterval(NewVReg);
1504 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1505 unsigned vr, BitVector &RestoreMBBs,
1506 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1507 if (!RestoreMBBs[Id])
1509 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1510 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1511 if (Restores[i].index == index &&
1512 Restores[i].vreg == vr &&
1513 Restores[i].canFold)
1518 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1519 unsigned vr, BitVector &RestoreMBBs,
1520 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1521 if (!RestoreMBBs[Id])
1523 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1524 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1525 if (Restores[i].index == index && Restores[i].vreg)
1526 Restores[i].index = SlotIndex();
1529 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1530 /// spilled and create empty intervals for their uses.
1532 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1533 const TargetRegisterClass* rc,
1534 std::vector<LiveInterval*> &NewLIs) {
1535 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1536 re = mri_->reg_end(); ri != re; ) {
1537 MachineOperand &O = ri.getOperand();
1538 MachineInstr *MI = &*ri;
1541 assert(MI->isImplicitDef() &&
1542 "Register def was not rewritten?");
1543 RemoveMachineInstrFromMaps(MI);
1544 vrm.RemoveMachineInstrFromMaps(MI);
1545 MI->eraseFromParent();
1547 // This must be an use of an implicit_def so it's not part of the live
1548 // interval. Create a new empty live interval for it.
1549 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1550 unsigned NewVReg = mri_->createVirtualRegister(rc);
1552 vrm.setIsImplicitlyDefined(NewVReg);
1553 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1554 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1555 MachineOperand &MO = MI->getOperand(i);
1556 if (MO.isReg() && MO.getReg() == li.reg) {
1566 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1567 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1568 normalizeSpillWeight(*NewLIs[i]);
1571 std::vector<LiveInterval*> LiveIntervals::
1572 addIntervalsForSpillsFast(const LiveInterval &li,
1573 const MachineLoopInfo *loopInfo,
1575 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1577 std::vector<LiveInterval*> added;
1579 assert(li.weight != HUGE_VALF &&
1580 "attempt to spill already spilled interval!");
1583 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1588 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1590 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1591 while (RI != mri_->reg_end()) {
1592 MachineInstr* MI = &*RI;
1594 SmallVector<unsigned, 2> Indices;
1595 bool HasUse = false;
1596 bool HasDef = false;
1598 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1599 MachineOperand& mop = MI->getOperand(i);
1600 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1602 HasUse |= MI->getOperand(i).isUse();
1603 HasDef |= MI->getOperand(i).isDef();
1605 Indices.push_back(i);
1608 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1609 Indices, true, slot, li.reg)) {
1610 unsigned NewVReg = mri_->createVirtualRegister(rc);
1612 vrm.assignVirt2StackSlot(NewVReg, slot);
1614 // create a new register for this spill
1615 LiveInterval &nI = getOrCreateInterval(NewVReg);
1617 // the spill weight is now infinity as it
1618 // cannot be spilled again
1619 nI.weight = HUGE_VALF;
1621 // Rewrite register operands to use the new vreg.
1622 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1623 E = Indices.end(); I != E; ++I) {
1624 MI->getOperand(*I).setReg(NewVReg);
1626 if (MI->getOperand(*I).isUse())
1627 MI->getOperand(*I).setIsKill(true);
1630 // Fill in the new live interval.
1631 SlotIndex index = getInstructionIndex(MI);
1633 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1634 nI.getNextValue(SlotIndex(), 0, false,
1635 getVNInfoAllocator()));
1636 DEBUG(dbgs() << " +" << LR);
1638 vrm.addRestorePoint(NewVReg, MI);
1641 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1642 nI.getNextValue(SlotIndex(), 0, false,
1643 getVNInfoAllocator()));
1644 DEBUG(dbgs() << " +" << LR);
1646 vrm.addSpillPoint(NewVReg, true, MI);
1649 added.push_back(&nI);
1652 dbgs() << "\t\t\t\tadded new interval: ";
1659 RI = mri_->reg_begin(li.reg);
1665 std::vector<LiveInterval*> LiveIntervals::
1666 addIntervalsForSpills(const LiveInterval &li,
1667 SmallVectorImpl<LiveInterval*> &SpillIs,
1668 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1670 if (EnableFastSpilling)
1671 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1673 assert(li.weight != HUGE_VALF &&
1674 "attempt to spill already spilled interval!");
1677 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1678 li.print(dbgs(), tri_);
1682 // Each bit specify whether a spill is required in the MBB.
1683 BitVector SpillMBBs(mf_->getNumBlockIDs());
1684 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1685 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1686 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1687 DenseMap<unsigned,unsigned> MBBVRegsMap;
1688 std::vector<LiveInterval*> NewLIs;
1689 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1691 unsigned NumValNums = li.getNumValNums();
1692 SmallVector<MachineInstr*, 4> ReMatDefs;
1693 ReMatDefs.resize(NumValNums, NULL);
1694 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1695 ReMatOrigDefs.resize(NumValNums, NULL);
1696 SmallVector<int, 4> ReMatIds;
1697 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1698 BitVector ReMatDelete(NumValNums);
1699 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1701 // Spilling a split live interval. It cannot be split any further. Also,
1702 // it's also guaranteed to be a single val# / range interval.
1703 if (vrm.getPreSplitReg(li.reg)) {
1704 vrm.setIsSplitFromReg(li.reg, 0);
1705 // Unset the split kill marker on the last use.
1706 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1707 if (KillIdx != SlotIndex()) {
1708 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1709 assert(KillMI && "Last use disappeared?");
1710 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1711 assert(KillOp != -1 && "Last use disappeared?");
1712 KillMI->getOperand(KillOp).setIsKill(false);
1714 vrm.removeKillPoint(li.reg);
1715 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1716 Slot = vrm.getStackSlot(li.reg);
1717 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1718 MachineInstr *ReMatDefMI = DefIsReMat ?
1719 vrm.getReMaterializedMI(li.reg) : NULL;
1721 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1722 bool isLoad = isLoadSS ||
1723 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1724 bool IsFirstRange = true;
1725 for (LiveInterval::Ranges::const_iterator
1726 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1727 // If this is a split live interval with multiple ranges, it means there
1728 // are two-address instructions that re-defined the value. Only the
1729 // first def can be rematerialized!
1731 // Note ReMatOrigDefMI has already been deleted.
1732 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1733 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1734 false, vrm, rc, ReMatIds, loopInfo,
1735 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1736 MBBVRegsMap, NewLIs);
1738 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1739 Slot, 0, false, false, false,
1740 false, vrm, rc, ReMatIds, loopInfo,
1741 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1742 MBBVRegsMap, NewLIs);
1744 IsFirstRange = false;
1747 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1748 normalizeSpillWeights(NewLIs);
1752 bool TrySplit = !intervalIsInOneMBB(li);
1755 bool NeedStackSlot = false;
1756 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1758 const VNInfo *VNI = *i;
1759 unsigned VN = VNI->id;
1760 if (VNI->isUnused())
1761 continue; // Dead val#.
1762 // Is the def for the val# rematerializable?
1763 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1764 ? getInstructionFromIndex(VNI->def) : 0;
1766 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1767 // Remember how to remat the def of this val#.
1768 ReMatOrigDefs[VN] = ReMatDefMI;
1769 // Original def may be modified so we have to make a copy here.
1770 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1771 CloneMIs.push_back(Clone);
1772 ReMatDefs[VN] = Clone;
1774 bool CanDelete = true;
1775 if (VNI->hasPHIKill()) {
1776 // A kill is a phi node, not all of its uses can be rematerialized.
1777 // It must not be deleted.
1779 // Need a stack slot if there is any live range where uses cannot be
1781 NeedStackSlot = true;
1784 ReMatDelete.set(VN);
1786 // Need a stack slot if there is any live range where uses cannot be
1788 NeedStackSlot = true;
1792 // One stack slot per live interval.
1793 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1794 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1795 Slot = vrm.assignVirt2StackSlot(li.reg);
1797 // This case only occurs when the prealloc splitter has already assigned
1798 // a stack slot to this vreg.
1800 Slot = vrm.getStackSlot(li.reg);
1803 // Create new intervals and rewrite defs and uses.
1804 for (LiveInterval::Ranges::const_iterator
1805 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1806 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1807 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1808 bool DefIsReMat = ReMatDefMI != NULL;
1809 bool CanDelete = ReMatDelete[I->valno->id];
1811 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1812 bool isLoad = isLoadSS ||
1813 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1814 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1815 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1816 CanDelete, vrm, rc, ReMatIds, loopInfo,
1817 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1818 MBBVRegsMap, NewLIs);
1821 // Insert spills / restores if we are splitting.
1823 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1824 normalizeSpillWeights(NewLIs);
1828 SmallPtrSet<LiveInterval*, 4> AddedKill;
1829 SmallVector<unsigned, 2> Ops;
1830 if (NeedStackSlot) {
1831 int Id = SpillMBBs.find_first();
1833 std::vector<SRInfo> &spills = SpillIdxes[Id];
1834 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1835 SlotIndex index = spills[i].index;
1836 unsigned VReg = spills[i].vreg;
1837 LiveInterval &nI = getOrCreateInterval(VReg);
1838 bool isReMat = vrm.isReMaterialized(VReg);
1839 MachineInstr *MI = getInstructionFromIndex(index);
1840 bool CanFold = false;
1841 bool FoundUse = false;
1843 if (spills[i].canFold) {
1845 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1846 MachineOperand &MO = MI->getOperand(j);
1847 if (!MO.isReg() || MO.getReg() != VReg)
1854 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1855 RestoreMBBs, RestoreIdxes))) {
1856 // MI has two-address uses of the same register. If the use
1857 // isn't the first and only use in the BB, then we can't fold
1858 // it. FIXME: Move this to rewriteInstructionsForSpills.
1865 // Fold the store into the def if possible.
1866 bool Folded = false;
1867 if (CanFold && !Ops.empty()) {
1868 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1871 // Also folded uses, do not issue a load.
1872 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1873 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1875 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1879 // Otherwise tell the spiller to issue a spill.
1881 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1882 bool isKill = LR->end == index.getStoreIndex();
1883 if (!MI->registerDefIsDead(nI.reg))
1884 // No need to spill a dead def.
1885 vrm.addSpillPoint(VReg, isKill, MI);
1887 AddedKill.insert(&nI);
1890 Id = SpillMBBs.find_next(Id);
1894 int Id = RestoreMBBs.find_first();
1896 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1897 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1898 SlotIndex index = restores[i].index;
1899 if (index == SlotIndex())
1901 unsigned VReg = restores[i].vreg;
1902 LiveInterval &nI = getOrCreateInterval(VReg);
1903 bool isReMat = vrm.isReMaterialized(VReg);
1904 MachineInstr *MI = getInstructionFromIndex(index);
1905 bool CanFold = false;
1907 if (restores[i].canFold) {
1909 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1910 MachineOperand &MO = MI->getOperand(j);
1911 if (!MO.isReg() || MO.getReg() != VReg)
1915 // If this restore were to be folded, it would have been folded
1924 // Fold the load into the use if possible.
1925 bool Folded = false;
1926 if (CanFold && !Ops.empty()) {
1928 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1930 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1932 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1933 // If the rematerializable def is a load, also try to fold it.
1934 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1935 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1936 Ops, isLoadSS, LdSlot, VReg);
1938 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1940 // Re-matting an instruction with virtual register use. Add the
1941 // register as an implicit use on the use MI and update the register
1942 // interval's spill weight to HUGE_VALF to prevent it from being
1944 LiveInterval &ImpLi = getInterval(ImpUse);
1945 ImpLi.weight = HUGE_VALF;
1946 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1951 // If folding is not possible / failed, then tell the spiller to issue a
1952 // load / rematerialization for us.
1954 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1956 vrm.addRestorePoint(VReg, MI);
1958 Id = RestoreMBBs.find_next(Id);
1961 // Finalize intervals: add kills, finalize spill weights, and filter out
1963 std::vector<LiveInterval*> RetNewLIs;
1964 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1965 LiveInterval *LI = NewLIs[i];
1967 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1968 if (!AddedKill.count(LI)) {
1969 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1970 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1971 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1972 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1973 assert(UseIdx != -1);
1974 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1975 LastUse->getOperand(UseIdx).setIsKill();
1976 vrm.addKillPoint(LI->reg, LastUseIdx);
1979 RetNewLIs.push_back(LI);
1983 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1984 normalizeSpillWeights(RetNewLIs);
1988 /// hasAllocatableSuperReg - Return true if the specified physical register has
1989 /// any super register that's allocatable.
1990 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1991 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1992 if (allocatableRegs_[*AS] && hasInterval(*AS))
1997 /// getRepresentativeReg - Find the largest super register of the specified
1998 /// physical register.
1999 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2000 // Find the largest super-register that is allocatable.
2001 unsigned BestReg = Reg;
2002 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2003 unsigned SuperReg = *AS;
2004 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2012 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2013 /// specified interval that conflicts with the specified physical register.
2014 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2015 unsigned PhysReg) const {
2016 unsigned NumConflicts = 0;
2017 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2018 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2019 E = mri_->reg_end(); I != E; ++I) {
2020 MachineOperand &O = I.getOperand();
2021 MachineInstr *MI = O.getParent();
2022 SlotIndex Index = getInstructionIndex(MI);
2023 if (pli.liveAt(Index))
2026 return NumConflicts;
2029 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2030 /// around all defs and uses of the specified interval. Return true if it
2031 /// was able to cut its interval.
2032 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2033 unsigned PhysReg, VirtRegMap &vrm) {
2034 unsigned SpillReg = getRepresentativeReg(PhysReg);
2036 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2037 // If there are registers which alias PhysReg, but which are not a
2038 // sub-register of the chosen representative super register. Assert
2039 // since we can't handle it yet.
2040 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2041 tri_->isSuperRegister(*AS, SpillReg));
2044 SmallVector<unsigned, 4> PRegs;
2045 if (hasInterval(SpillReg))
2046 PRegs.push_back(SpillReg);
2048 SmallSet<unsigned, 4> Added;
2049 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2050 if (Added.insert(*AS) && hasInterval(*AS)) {
2051 PRegs.push_back(*AS);
2052 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2057 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2058 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2059 E = mri_->reg_end(); I != E; ++I) {
2060 MachineOperand &O = I.getOperand();
2061 MachineInstr *MI = O.getParent();
2062 if (SeenMIs.count(MI))
2065 SlotIndex Index = getInstructionIndex(MI);
2066 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2067 unsigned PReg = PRegs[i];
2068 LiveInterval &pli = getInterval(PReg);
2069 if (!pli.liveAt(Index))
2071 vrm.addEmergencySpill(PReg, MI);
2072 SlotIndex StartIdx = Index.getLoadIndex();
2073 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2074 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2075 pli.removeRange(StartIdx, EndIdx);
2079 raw_string_ostream Msg(msg);
2080 Msg << "Ran out of registers during register allocation!";
2081 if (MI->isInlineAsm()) {
2082 Msg << "\nPlease check your inline asm statement for invalid "
2083 << "constraints:\n";
2084 MI->print(Msg, tm_);
2086 llvm_report_error(Msg.str());
2088 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2089 if (!hasInterval(*AS))
2091 LiveInterval &spli = getInterval(*AS);
2092 if (spli.liveAt(Index))
2093 spli.removeRange(Index.getLoadIndex(),
2094 Index.getNextIndex().getBaseIndex());
2101 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2102 MachineInstr* startInst) {
2103 LiveInterval& Interval = getOrCreateInterval(reg);
2104 VNInfo* VN = Interval.getNextValue(
2105 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2106 startInst, true, getVNInfoAllocator());
2107 VN->setHasPHIKill(true);
2108 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2110 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2111 getMBBEndIdx(startInst->getParent()), VN);
2112 Interval.addRange(LR);