1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.Reset();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
117 numIntervals += getNumIntervals();
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 if (mii->getOpcode()==TargetInstrInfo::DEBUG_VALUE)
144 OS << SlotIndex::getEmptyKey() << '\t' << *mii;
146 OS << getInstructionIndex(mii) << '\t' << *mii;
151 void LiveIntervals::dumpInstrs() const {
155 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156 VirtRegMap &vrm, unsigned reg) {
157 // We don't handle fancy stuff crossing basic block boundaries
158 if (li.ranges.size() != 1)
160 const LiveRange &range = li.ranges.front();
161 SlotIndex idx = range.start.getBaseIndex();
162 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
164 // Skip deleted instructions
165 MachineInstr *firstMI = getInstructionFromIndex(idx);
166 while (!firstMI && idx != end) {
167 idx = idx.getNextIndex();
168 firstMI = getInstructionFromIndex(idx);
173 // Find last instruction in range
174 SlotIndex lastIdx = end.getPrevIndex();
175 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176 while (!lastMI && lastIdx != idx) {
177 lastIdx = lastIdx.getPrevIndex();
178 lastMI = getInstructionFromIndex(lastIdx);
183 // Range cannot cross basic block boundaries or terminators
184 MachineBasicBlock *MBB = firstMI->getParent();
185 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
188 MachineBasicBlock::const_iterator E = lastMI;
190 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191 const MachineInstr &MI = *I;
193 // Allow copies to and from li.reg
194 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196 if (SrcReg == li.reg || DstReg == li.reg)
199 // Check for operands using reg
200 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
201 const MachineOperand& mop = MI.getOperand(i);
204 unsigned PhysReg = mop.getReg();
205 if (PhysReg == 0 || PhysReg == li.reg)
207 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208 if (!vrm.hasPhys(PhysReg))
210 PhysReg = vrm.getPhys(PhysReg);
212 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
217 // No conflicts found.
221 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
222 /// it can check use as well.
223 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
224 unsigned Reg, bool CheckUse,
225 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
226 for (LiveInterval::Ranges::const_iterator
227 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
228 for (SlotIndex index = I->start.getBaseIndex(),
229 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
231 index = index.getNextIndex()) {
232 MachineInstr *MI = getInstructionFromIndex(index);
234 continue; // skip deleted instructions
236 if (JoinedCopies.count(MI))
238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239 MachineOperand& MO = MI->getOperand(i);
242 if (MO.isUse() && !CheckUse)
244 unsigned PhysReg = MO.getReg();
245 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
247 if (tri_->isSubRegister(Reg, PhysReg))
257 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
258 if (TargetRegisterInfo::isPhysicalRegister(reg))
259 dbgs() << tri_->getName(reg);
261 dbgs() << "%reg" << reg;
265 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
266 MachineBasicBlock::iterator mi,
270 LiveInterval &interval) {
272 dbgs() << "\t\tregister: ";
273 printRegName(interval.reg, tri_);
276 // Virtual registers may be defined multiple times (due to phi
277 // elimination and 2-addr elimination). Much of what we do only has to be
278 // done once for the vreg. We use an empty interval to detect the first
279 // time we see a vreg.
280 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
281 if (interval.empty()) {
282 // Get the Idx of the defining instructions.
283 SlotIndex defIndex = MIIdx.getDefIndex();
284 // Earlyclobbers move back one, so that they overlap the live range
286 if (MO.isEarlyClobber())
287 defIndex = MIIdx.getUseIndex();
289 MachineInstr *CopyMI = NULL;
290 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
291 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
292 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
293 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
294 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
296 // Earlyclobbers move back one.
297 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
299 assert(ValNo->id == 0 && "First value in interval is not 0?");
301 // Loop over all of the blocks that the vreg is defined in. There are
302 // two cases we have to handle here. The most common case is a vreg
303 // whose lifetime is contained within a basic block. In this case there
304 // will be a single kill, in MBB, which comes after the definition.
305 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
306 // FIXME: what about dead vars?
308 if (vi.Kills[0] != mi)
309 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
311 killIdx = defIndex.getStoreIndex();
313 // If the kill happens after the definition, we have an intra-block
315 if (killIdx > defIndex) {
316 assert(vi.AliveBlocks.empty() &&
317 "Shouldn't be alive across any blocks!");
318 LiveRange LR(defIndex, killIdx, ValNo);
319 interval.addRange(LR);
320 DEBUG(dbgs() << " +" << LR << "\n");
321 ValNo->addKill(killIdx);
326 // The other case we handle is when a virtual register lives to the end
327 // of the defining block, potentially live across some blocks, then is
328 // live into some number of blocks, but gets killed. Start by adding a
329 // range that goes from this definition to the end of the defining block.
330 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
331 DEBUG(dbgs() << " +" << NewLR);
332 interval.addRange(NewLR);
334 // Iterate over all of the blocks that the variable is completely
335 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
337 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
338 E = vi.AliveBlocks.end(); I != E; ++I) {
339 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
340 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
341 interval.addRange(LR);
342 DEBUG(dbgs() << " +" << LR);
345 // Finally, this virtual register is live from the start of any killing
346 // block to the 'use' slot of the killing instruction.
347 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
348 MachineInstr *Kill = vi.Kills[i];
350 getInstructionIndex(Kill).getDefIndex();
351 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
352 interval.addRange(LR);
353 ValNo->addKill(killIdx);
354 DEBUG(dbgs() << " +" << LR);
358 // If this is the second time we see a virtual register definition, it
359 // must be due to phi elimination or two addr elimination. If this is
360 // the result of two address elimination, then the vreg is one of the
361 // def-and-use register operand.
362 if (mi->isRegTiedToUseOperand(MOIdx)) {
363 // If this is a two-address definition, then we have already processed
364 // the live range. The only problem is that we didn't realize there
365 // are actually two values in the live interval. Because of this we
366 // need to take the LiveRegion that defines this register and split it
368 assert(interval.containsOneValue());
369 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
370 SlotIndex RedefIndex = MIIdx.getDefIndex();
371 if (MO.isEarlyClobber())
372 RedefIndex = MIIdx.getUseIndex();
374 const LiveRange *OldLR =
375 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
376 VNInfo *OldValNo = OldLR->valno;
378 // Delete the initial value, which should be short and continuous,
379 // because the 2-addr copy must be in the same MBB as the redef.
380 interval.removeRange(DefIndex, RedefIndex);
382 // Two-address vregs should always only be redefined once. This means
383 // that at this point, there should be exactly one value number in it.
384 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
386 // The new value number (#1) is defined by the instruction we claimed
388 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
389 false, // update at *
391 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
393 // Value#0 is now defined by the 2-addr instruction.
394 OldValNo->def = RedefIndex;
395 OldValNo->setCopy(0);
397 // Add the new live interval which replaces the range for the input copy.
398 LiveRange LR(DefIndex, RedefIndex, ValNo);
399 DEBUG(dbgs() << " replace range with " << LR);
400 interval.addRange(LR);
401 ValNo->addKill(RedefIndex);
403 // If this redefinition is dead, we need to add a dummy unit live
404 // range covering the def slot.
406 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
410 dbgs() << " RESULT: ";
411 interval.print(dbgs(), tri_);
414 // Otherwise, this must be because of phi elimination. If this is the
415 // first redefinition of the vreg that we have seen, go back and change
416 // the live range in the PHI block to be a different value number.
417 if (interval.containsOneValue()) {
419 VNInfo *VNI = interval.getValNumInfo(0);
420 // Phi elimination may have reused the register for multiple identical
421 // phi nodes. There will be a kill per phi. Remove the old ranges that
422 // we now know have an incorrect number.
423 for (unsigned ki=0, ke=vi.Kills.size(); ki != ke; ++ki) {
424 MachineInstr *Killer = vi.Kills[ki];
425 SlotIndex Start = getMBBStartIdx(Killer->getParent());
426 SlotIndex End = getInstructionIndex(Killer).getDefIndex();
428 dbgs() << "\n\t\trenaming [" << Start << "," << End << "] in: ";
429 interval.print(dbgs(), tri_);
431 interval.removeRange(Start, End);
433 // Replace the interval with one of a NEW value number. Note that
434 // this value number isn't actually defined by an instruction, weird
436 LiveRange LR(Start, End,
437 interval.getNextValue(SlotIndex(Start, true),
438 0, false, VNInfoAllocator));
439 LR.valno->setIsPHIDef(true);
440 interval.addRange(LR);
441 LR.valno->addKill(End);
444 MachineBasicBlock *killMBB = getMBBFromIndex(VNI->def);
445 VNI->addKill(indexes_->getTerminatorGap(killMBB));
446 VNI->setHasPHIKill(true);
448 dbgs() << " RESULT: ";
449 interval.print(dbgs(), tri_);
453 // In the case of PHI elimination, each variable definition is only
454 // live until the end of the block. We've already taken care of the
455 // rest of the live range.
456 SlotIndex defIndex = MIIdx.getDefIndex();
457 if (MO.isEarlyClobber())
458 defIndex = MIIdx.getUseIndex();
461 MachineInstr *CopyMI = NULL;
462 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
463 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
464 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
465 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
466 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
468 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
470 SlotIndex killIndex = getMBBEndIdx(mbb);
471 LiveRange LR(defIndex, killIndex, ValNo);
472 interval.addRange(LR);
473 ValNo->addKill(indexes_->getTerminatorGap(mbb));
474 ValNo->setHasPHIKill(true);
475 DEBUG(dbgs() << " +" << LR);
479 DEBUG(dbgs() << '\n');
482 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
483 MachineBasicBlock::iterator mi,
486 LiveInterval &interval,
487 MachineInstr *CopyMI) {
488 // A physical register cannot be live across basic block, so its
489 // lifetime must end somewhere in its defining basic block.
491 dbgs() << "\t\tregister: ";
492 printRegName(interval.reg, tri_);
495 SlotIndex baseIndex = MIIdx;
496 SlotIndex start = baseIndex.getDefIndex();
497 // Earlyclobbers move back one.
498 if (MO.isEarlyClobber())
499 start = MIIdx.getUseIndex();
500 SlotIndex end = start;
502 // If it is not used after definition, it is considered dead at
503 // the instruction defining it. Hence its interval is:
504 // [defSlot(def), defSlot(def)+1)
505 // For earlyclobbers, the defSlot was pushed back one; the extra
506 // advance below compensates.
508 DEBUG(dbgs() << " dead");
509 end = start.getStoreIndex();
513 // If it is not dead on definition, it must be killed by a
514 // subsequent instruction. Hence its interval is:
515 // [defSlot(def), useSlot(kill)+1)
516 baseIndex = baseIndex.getNextIndex();
517 while (++mi != MBB->end()) {
519 if (getInstructionFromIndex(baseIndex) == 0)
520 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
522 if (mi->killsRegister(interval.reg, tri_)) {
523 DEBUG(dbgs() << " killed");
524 end = baseIndex.getDefIndex();
527 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
529 if (mi->isRegTiedToUseOperand(DefIdx)) {
530 // Two-address instruction.
531 end = baseIndex.getDefIndex();
533 // Another instruction redefines the register before it is ever read.
534 // Then the register is essentially dead at the instruction that defines
535 // it. Hence its interval is:
536 // [defSlot(def), defSlot(def)+1)
537 DEBUG(dbgs() << " dead");
538 end = start.getStoreIndex();
544 baseIndex = baseIndex.getNextIndex();
547 // The only case we should have a dead physreg here without a killing or
548 // instruction where we know it's dead is if it is live-in to the function
549 // and never used. Another possible case is the implicit use of the
550 // physical register has been deleted by two-address pass.
551 end = start.getStoreIndex();
554 assert(start < end && "did not find end of interval?");
556 // Already exists? Extend old live interval.
557 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
558 bool Extend = OldLR != interval.end();
559 VNInfo *ValNo = Extend
560 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
561 if (MO.isEarlyClobber() && Extend)
562 ValNo->setHasRedefByEC(true);
563 LiveRange LR(start, end, ValNo);
564 interval.addRange(LR);
565 LR.valno->addKill(end);
566 DEBUG(dbgs() << " +" << LR << '\n');
569 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
570 MachineBasicBlock::iterator MI,
574 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
575 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
576 getOrCreateInterval(MO.getReg()));
577 else if (allocatableRegs_[MO.getReg()]) {
578 MachineInstr *CopyMI = NULL;
579 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
580 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
581 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
582 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
583 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
585 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
586 getOrCreateInterval(MO.getReg()), CopyMI);
587 // Def of a register also defines its sub-registers.
588 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
589 // If MI also modifies the sub-register explicitly, avoid processing it
590 // more than once. Do not pass in TRI here so it checks for exact match.
591 if (!MI->modifiesRegister(*AS))
592 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
593 getOrCreateInterval(*AS), 0);
597 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
599 LiveInterval &interval, bool isAlias) {
601 dbgs() << "\t\tlivein register: ";
602 printRegName(interval.reg, tri_);
605 // Look for kills, if it reaches a def before it's killed, then it shouldn't
606 // be considered a livein.
607 MachineBasicBlock::iterator mi = MBB->begin();
608 SlotIndex baseIndex = MIIdx;
609 SlotIndex start = baseIndex;
610 if (getInstructionFromIndex(baseIndex) == 0)
611 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
613 SlotIndex end = baseIndex;
614 bool SeenDefUse = false;
616 while (mi != MBB->end()) {
617 if (mi->killsRegister(interval.reg, tri_)) {
618 DEBUG(dbgs() << " killed");
619 end = baseIndex.getDefIndex();
622 } else if (mi->modifiesRegister(interval.reg, tri_)) {
623 // Another instruction redefines the register before it is ever read.
624 // Then the register is essentially dead at the instruction that defines
625 // it. Hence its interval is:
626 // [defSlot(def), defSlot(def)+1)
627 DEBUG(dbgs() << " dead");
628 end = start.getStoreIndex();
634 if (mi != MBB->end()) {
635 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
639 // Live-in register might not be used at all.
642 DEBUG(dbgs() << " dead");
643 end = MIIdx.getStoreIndex();
645 DEBUG(dbgs() << " live through");
651 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
652 0, false, VNInfoAllocator);
653 vni->setIsPHIDef(true);
654 LiveRange LR(start, end, vni);
656 interval.addRange(LR);
657 LR.valno->addKill(end);
658 DEBUG(dbgs() << " +" << LR << '\n');
661 /// computeIntervals - computes the live intervals for virtual
662 /// registers. for some ordering of the machine instructions [1,N] a
663 /// live interval is an interval [i, j) where 1 <= i <= j < N for
664 /// which a variable is live
665 void LiveIntervals::computeIntervals() {
666 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
667 << "********** Function: "
668 << ((Value*)mf_->getFunction())->getName() << '\n');
670 SmallVector<unsigned, 8> UndefUses;
671 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
673 MachineBasicBlock *MBB = MBBI;
677 // Track the index of the current machine instr.
678 SlotIndex MIIndex = getMBBStartIdx(MBB);
679 DEBUG(dbgs() << MBB->getName() << ":\n");
681 // Create intervals for live-ins to this BB first.
682 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
683 LE = MBB->livein_end(); LI != LE; ++LI) {
684 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
685 // Multiple live-ins can alias the same register.
686 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
687 if (!hasInterval(*AS))
688 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
692 // Skip over empty initial indices.
693 if (getInstructionFromIndex(MIIndex) == 0)
694 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
696 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
698 DEBUG(dbgs() << MIIndex << "\t" << *MI);
699 if (MI->getOpcode()==TargetInstrInfo::DEBUG_VALUE)
703 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
704 MachineOperand &MO = MI->getOperand(i);
705 if (!MO.isReg() || !MO.getReg())
708 // handle register defs - build intervals
710 handleRegisterDef(MBB, MI, MIIndex, MO, i);
711 else if (MO.isUndef())
712 UndefUses.push_back(MO.getReg());
715 // Move to the next instr slot.
716 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
720 // Create empty intervals for registers defined by implicit_def's (except
721 // for those implicit_def that define values which are liveout of their
723 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
724 unsigned UndefReg = UndefUses[i];
725 (void)getOrCreateInterval(UndefReg);
729 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
730 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
731 return new LiveInterval(reg, Weight);
734 /// dupInterval - Duplicate a live interval. The caller is responsible for
735 /// managing the allocated memory.
736 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
737 LiveInterval *NewLI = createInterval(li->reg);
738 NewLI->Copy(*li, mri_, getVNInfoAllocator());
742 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
743 /// copy field and returns the source register that defines it.
744 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
748 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) {
749 // If it's extracting out of a physical register, return the sub-register.
750 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
751 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
752 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
753 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
754 if (SrcSubReg == DstSubReg)
755 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
756 // reg1034 can still be coalesced to EDX.
758 assert(DstSubReg == 0);
759 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
762 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
763 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
764 return VNI->getCopy()->getOperand(2).getReg();
766 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
767 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
769 llvm_unreachable("Unrecognized copy instruction!");
773 //===----------------------------------------------------------------------===//
774 // Register allocator hooks.
777 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
778 /// allow one) virtual register operand, then its uses are implicitly using
779 /// the register. Returns the virtual register.
780 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
781 MachineInstr *MI) const {
783 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
784 MachineOperand &MO = MI->getOperand(i);
785 if (!MO.isReg() || !MO.isUse())
787 unsigned Reg = MO.getReg();
788 if (Reg == 0 || Reg == li.reg)
791 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
792 !allocatableRegs_[Reg])
794 // FIXME: For now, only remat MI with at most one register operand.
796 "Can't rematerialize instruction with multiple register operand!");
805 /// isValNoAvailableAt - Return true if the val# of the specified interval
806 /// which reaches the given instruction also reaches the specified use index.
807 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
808 SlotIndex UseIdx) const {
809 SlotIndex Index = getInstructionIndex(MI);
810 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
811 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
812 return UI != li.end() && UI->valno == ValNo;
815 /// isReMaterializable - Returns true if the definition MI of the specified
816 /// val# of the specified interval is re-materializable.
817 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
818 const VNInfo *ValNo, MachineInstr *MI,
819 SmallVectorImpl<LiveInterval*> &SpillIs,
824 if (!tii_->isTriviallyReMaterializable(MI, aa_))
827 // Target-specific code can mark an instruction as being rematerializable
828 // if it has one virtual reg use, though it had better be something like
829 // a PIC base register which is likely to be live everywhere.
830 unsigned ImpUse = getReMatImplicitUse(li, MI);
832 const LiveInterval &ImpLi = getInterval(ImpUse);
833 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
834 re = mri_->use_end(); ri != re; ++ri) {
835 MachineInstr *UseMI = &*ri;
836 SlotIndex UseIdx = getInstructionIndex(UseMI);
837 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
839 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
843 // If a register operand of the re-materialized instruction is going to
844 // be spilled next, then it's not legal to re-materialize this instruction.
845 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
846 if (ImpUse == SpillIs[i]->reg)
852 /// isReMaterializable - Returns true if the definition MI of the specified
853 /// val# of the specified interval is re-materializable.
854 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
855 const VNInfo *ValNo, MachineInstr *MI) {
856 SmallVector<LiveInterval*, 4> Dummy1;
858 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
861 /// isReMaterializable - Returns true if every definition of MI of every
862 /// val# of the specified interval is re-materializable.
863 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
864 SmallVectorImpl<LiveInterval*> &SpillIs,
867 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
869 const VNInfo *VNI = *i;
871 continue; // Dead val#.
872 // Is the def for the val# rematerializable?
873 if (!VNI->isDefAccurate())
875 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
876 bool DefIsLoad = false;
878 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
885 /// FilterFoldedOps - Filter out two-address use operands. Return
886 /// true if it finds any issue with the operands that ought to prevent
888 static bool FilterFoldedOps(MachineInstr *MI,
889 SmallVector<unsigned, 2> &Ops,
891 SmallVector<unsigned, 2> &FoldOps) {
893 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
894 unsigned OpIdx = Ops[i];
895 MachineOperand &MO = MI->getOperand(OpIdx);
896 // FIXME: fold subreg use.
900 MRInfo |= (unsigned)VirtRegMap::isMod;
902 // Filter out two-address use operand(s).
903 if (MI->isRegTiedToDefOperand(OpIdx)) {
904 MRInfo = VirtRegMap::isModRef;
907 MRInfo |= (unsigned)VirtRegMap::isRef;
909 FoldOps.push_back(OpIdx);
915 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
916 /// slot / to reg or any rematerialized load into ith operand of specified
917 /// MI. If it is successul, MI is updated with the newly created MI and
919 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
920 VirtRegMap &vrm, MachineInstr *DefMI,
922 SmallVector<unsigned, 2> &Ops,
923 bool isSS, int Slot, unsigned Reg) {
924 // If it is an implicit def instruction, just delete it.
925 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
926 RemoveMachineInstrFromMaps(MI);
927 vrm.RemoveMachineInstrFromMaps(MI);
928 MI->eraseFromParent();
933 // Filter the list of operand indexes that are to be folded. Abort if
934 // any operand will prevent folding.
936 SmallVector<unsigned, 2> FoldOps;
937 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
940 // The only time it's safe to fold into a two address instruction is when
941 // it's folding reload and spill from / into a spill stack slot.
942 if (DefMI && (MRInfo & VirtRegMap::isMod))
945 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
946 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
948 // Remember this instruction uses the spill slot.
949 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
951 // Attempt to fold the memory reference into the instruction. If
952 // we can do this, we don't need to insert spill code.
953 MachineBasicBlock &MBB = *MI->getParent();
954 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
955 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
956 vrm.transferSpillPts(MI, fmi);
957 vrm.transferRestorePts(MI, fmi);
958 vrm.transferEmergencySpills(MI, fmi);
959 ReplaceMachineInstrInMaps(MI, fmi);
960 MI = MBB.insert(MBB.erase(MI), fmi);
967 /// canFoldMemoryOperand - Returns true if the specified load / store
968 /// folding is possible.
969 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
970 SmallVector<unsigned, 2> &Ops,
972 // Filter the list of operand indexes that are to be folded. Abort if
973 // any operand will prevent folding.
975 SmallVector<unsigned, 2> FoldOps;
976 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
979 // It's only legal to remat for a use, not a def.
980 if (ReMat && (MRInfo & VirtRegMap::isMod))
983 return tii_->canFoldMemoryOperand(MI, FoldOps);
986 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
987 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
989 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
994 for (++itr; itr != li.ranges.end(); ++itr) {
995 MachineBasicBlock *mbb2 =
996 indexes_->getMBBCoveringRange(itr->start, itr->end);
1005 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1006 /// interval on to-be re-materialized operands of MI) with new register.
1007 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1008 MachineInstr *MI, unsigned NewVReg,
1010 // There is an implicit use. That means one of the other operand is
1011 // being remat'ed and the remat'ed instruction has li.reg as an
1012 // use operand. Make sure we rewrite that as well.
1013 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1014 MachineOperand &MO = MI->getOperand(i);
1017 unsigned Reg = MO.getReg();
1018 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1020 if (!vrm.isReMaterialized(Reg))
1022 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1023 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1025 UseMO->setReg(NewVReg);
1029 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1030 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1031 bool LiveIntervals::
1032 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1033 bool TrySplit, SlotIndex index, SlotIndex end,
1035 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1036 unsigned Slot, int LdSlot,
1037 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1039 const TargetRegisterClass* rc,
1040 SmallVector<int, 4> &ReMatIds,
1041 const MachineLoopInfo *loopInfo,
1042 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1043 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1044 std::vector<LiveInterval*> &NewLIs) {
1045 bool CanFold = false;
1047 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1048 MachineOperand& mop = MI->getOperand(i);
1051 unsigned Reg = mop.getReg();
1052 unsigned RegI = Reg;
1053 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1058 bool TryFold = !DefIsReMat;
1059 bool FoldSS = true; // Default behavior unless it's a remat.
1060 int FoldSlot = Slot;
1062 // If this is the rematerializable definition MI itself and
1063 // all of its uses are rematerialized, simply delete it.
1064 if (MI == ReMatOrigDefMI && CanDelete) {
1065 DEBUG(dbgs() << "\t\t\t\tErasing re-materlizable def: "
1067 RemoveMachineInstrFromMaps(MI);
1068 vrm.RemoveMachineInstrFromMaps(MI);
1069 MI->eraseFromParent();
1073 // If def for this use can't be rematerialized, then try folding.
1074 // If def is rematerializable and it's a load, also try folding.
1075 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1077 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1083 // Scan all of the operands of this instruction rewriting operands
1084 // to use NewVReg instead of li.reg as appropriate. We do this for
1087 // 1. If the instr reads the same spilled vreg multiple times, we
1088 // want to reuse the NewVReg.
1089 // 2. If the instr is a two-addr instruction, we are required to
1090 // keep the src/dst regs pinned.
1092 // Keep track of whether we replace a use and/or def so that we can
1093 // create the spill interval with the appropriate range.
1095 HasUse = mop.isUse();
1096 HasDef = mop.isDef();
1097 SmallVector<unsigned, 2> Ops;
1099 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1100 const MachineOperand &MOj = MI->getOperand(j);
1103 unsigned RegJ = MOj.getReg();
1104 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1108 if (!MOj.isUndef()) {
1109 HasUse |= MOj.isUse();
1110 HasDef |= MOj.isDef();
1115 // Create a new virtual register for the spill interval.
1116 // Create the new register now so we can map the fold instruction
1117 // to the new register so when it is unfolded we get the correct
1119 bool CreatedNewVReg = false;
1121 NewVReg = mri_->createVirtualRegister(rc);
1123 CreatedNewVReg = true;
1125 // The new virtual register should get the same allocation hints as the
1127 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1128 if (Hint.first || Hint.second)
1129 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1135 // Do not fold load / store here if we are splitting. We'll find an
1136 // optimal point to insert a load / store later.
1138 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1139 Ops, FoldSS, FoldSlot, NewVReg)) {
1140 // Folding the load/store can completely change the instruction in
1141 // unpredictable ways, rescan it from the beginning.
1144 // We need to give the new vreg the same stack slot as the
1145 // spilled interval.
1146 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1152 if (isNotInMIMap(MI))
1154 goto RestartInstruction;
1157 // We'll try to fold it later if it's profitable.
1158 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1162 mop.setReg(NewVReg);
1163 if (mop.isImplicit())
1164 rewriteImplicitOps(li, MI, NewVReg, vrm);
1166 // Reuse NewVReg for other reads.
1167 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1168 MachineOperand &mopj = MI->getOperand(Ops[j]);
1169 mopj.setReg(NewVReg);
1170 if (mopj.isImplicit())
1171 rewriteImplicitOps(li, MI, NewVReg, vrm);
1174 if (CreatedNewVReg) {
1176 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1177 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1178 // Each valnum may have its own remat id.
1179 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1181 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1183 if (!CanDelete || (HasUse && HasDef)) {
1184 // If this is a two-addr instruction then its use operands are
1185 // rematerializable but its def is not. It should be assigned a
1187 vrm.assignVirt2StackSlot(NewVReg, Slot);
1190 vrm.assignVirt2StackSlot(NewVReg, Slot);
1192 } else if (HasUse && HasDef &&
1193 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1194 // If this interval hasn't been assigned a stack slot (because earlier
1195 // def is a deleted remat def), do it now.
1196 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1197 vrm.assignVirt2StackSlot(NewVReg, Slot);
1200 // Re-matting an instruction with virtual register use. Add the
1201 // register as an implicit use on the use MI.
1202 if (DefIsReMat && ImpUse)
1203 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1205 // Create a new register interval for this spill / remat.
1206 LiveInterval &nI = getOrCreateInterval(NewVReg);
1207 if (CreatedNewVReg) {
1208 NewLIs.push_back(&nI);
1209 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1211 vrm.setIsSplitFromReg(NewVReg, li.reg);
1215 if (CreatedNewVReg) {
1216 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1217 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1218 DEBUG(dbgs() << " +" << LR);
1221 // Extend the split live interval to this def / use.
1222 SlotIndex End = index.getDefIndex();
1223 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1224 nI.getValNumInfo(nI.getNumValNums()-1));
1225 DEBUG(dbgs() << " +" << LR);
1230 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1231 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1232 DEBUG(dbgs() << " +" << LR);
1237 dbgs() << "\t\t\t\tAdded new interval: ";
1238 nI.print(dbgs(), tri_);
1244 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1246 MachineBasicBlock *MBB,
1247 SlotIndex Idx) const {
1248 SlotIndex End = getMBBEndIdx(MBB);
1249 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1250 if (VNI->kills[j].isPHI())
1253 SlotIndex KillIdx = VNI->kills[j];
1254 if (KillIdx > Idx && KillIdx <= End)
1260 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1261 /// during spilling.
1263 struct RewriteInfo {
1268 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1269 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1272 struct RewriteInfoCompare {
1273 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1274 return LHS.Index < RHS.Index;
1279 void LiveIntervals::
1280 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1281 LiveInterval::Ranges::const_iterator &I,
1282 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1283 unsigned Slot, int LdSlot,
1284 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1286 const TargetRegisterClass* rc,
1287 SmallVector<int, 4> &ReMatIds,
1288 const MachineLoopInfo *loopInfo,
1289 BitVector &SpillMBBs,
1290 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1291 BitVector &RestoreMBBs,
1292 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1293 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1294 std::vector<LiveInterval*> &NewLIs) {
1295 bool AllCanFold = true;
1296 unsigned NewVReg = 0;
1297 SlotIndex start = I->start.getBaseIndex();
1298 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1300 // First collect all the def / use in this live range that will be rewritten.
1301 // Make sure they are sorted according to instruction index.
1302 std::vector<RewriteInfo> RewriteMIs;
1303 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1304 re = mri_->reg_end(); ri != re; ) {
1305 MachineInstr *MI = &*ri;
1306 MachineOperand &O = ri.getOperand();
1308 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1309 SlotIndex index = getInstructionIndex(MI);
1310 if (index < start || index >= end)
1314 // Must be defined by an implicit def. It should not be spilled. Note,
1315 // this is for correctness reason. e.g.
1316 // 8 %reg1024<def> = IMPLICIT_DEF
1317 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1318 // The live range [12, 14) are not part of the r1024 live interval since
1319 // it's defined by an implicit def. It will not conflicts with live
1320 // interval of r1025. Now suppose both registers are spilled, you can
1321 // easily see a situation where both registers are reloaded before
1322 // the INSERT_SUBREG and both target registers that would overlap.
1324 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1326 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1328 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1329 // Now rewrite the defs and uses.
1330 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1331 RewriteInfo &rwi = RewriteMIs[i];
1333 SlotIndex index = rwi.Index;
1334 bool MIHasUse = rwi.HasUse;
1335 bool MIHasDef = rwi.HasDef;
1336 MachineInstr *MI = rwi.MI;
1337 // If MI def and/or use the same register multiple times, then there
1338 // are multiple entries.
1339 unsigned NumUses = MIHasUse;
1340 while (i != e && RewriteMIs[i].MI == MI) {
1341 assert(RewriteMIs[i].Index == index);
1342 bool isUse = RewriteMIs[i].HasUse;
1343 if (isUse) ++NumUses;
1345 MIHasDef |= RewriteMIs[i].HasDef;
1348 MachineBasicBlock *MBB = MI->getParent();
1350 if (ImpUse && MI != ReMatDefMI) {
1351 // Re-matting an instruction with virtual register use. Update the
1352 // register interval's spill weight to HUGE_VALF to prevent it from
1354 LiveInterval &ImpLi = getInterval(ImpUse);
1355 ImpLi.weight = HUGE_VALF;
1358 unsigned MBBId = MBB->getNumber();
1359 unsigned ThisVReg = 0;
1361 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1362 if (NVI != MBBVRegsMap.end()) {
1363 ThisVReg = NVI->second;
1370 // It's better to start a new interval to avoid artifically
1371 // extend the new interval.
1372 if (MIHasDef && !MIHasUse) {
1373 MBBVRegsMap.erase(MBB->getNumber());
1379 bool IsNew = ThisVReg == 0;
1381 // This ends the previous live interval. If all of its def / use
1382 // can be folded, give it a low spill weight.
1383 if (NewVReg && TrySplit && AllCanFold) {
1384 LiveInterval &nI = getOrCreateInterval(NewVReg);
1391 bool HasDef = false;
1392 bool HasUse = false;
1393 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1394 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1395 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1396 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1397 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1398 if (!HasDef && !HasUse)
1401 AllCanFold &= CanFold;
1403 // Update weight of spill interval.
1404 LiveInterval &nI = getOrCreateInterval(NewVReg);
1406 // The spill weight is now infinity as it cannot be spilled again.
1407 nI.weight = HUGE_VALF;
1411 // Keep track of the last def and first use in each MBB.
1413 if (MI != ReMatOrigDefMI || !CanDelete) {
1414 bool HasKill = false;
1416 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1418 // If this is a two-address code, then this index starts a new VNInfo.
1419 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1421 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1423 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1424 SpillIdxes.find(MBBId);
1426 if (SII == SpillIdxes.end()) {
1427 std::vector<SRInfo> S;
1428 S.push_back(SRInfo(index, NewVReg, true));
1429 SpillIdxes.insert(std::make_pair(MBBId, S));
1430 } else if (SII->second.back().vreg != NewVReg) {
1431 SII->second.push_back(SRInfo(index, NewVReg, true));
1432 } else if (index > SII->second.back().index) {
1433 // If there is an earlier def and this is a two-address
1434 // instruction, then it's not possible to fold the store (which
1435 // would also fold the load).
1436 SRInfo &Info = SII->second.back();
1438 Info.canFold = !HasUse;
1440 SpillMBBs.set(MBBId);
1441 } else if (SII != SpillIdxes.end() &&
1442 SII->second.back().vreg == NewVReg &&
1443 index > SII->second.back().index) {
1444 // There is an earlier def that's not killed (must be two-address).
1445 // The spill is no longer needed.
1446 SII->second.pop_back();
1447 if (SII->second.empty()) {
1448 SpillIdxes.erase(MBBId);
1449 SpillMBBs.reset(MBBId);
1456 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1457 SpillIdxes.find(MBBId);
1458 if (SII != SpillIdxes.end() &&
1459 SII->second.back().vreg == NewVReg &&
1460 index > SII->second.back().index)
1461 // Use(s) following the last def, it's not safe to fold the spill.
1462 SII->second.back().canFold = false;
1463 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1464 RestoreIdxes.find(MBBId);
1465 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1466 // If we are splitting live intervals, only fold if it's the first
1467 // use and there isn't another use later in the MBB.
1468 RII->second.back().canFold = false;
1470 // Only need a reload if there isn't an earlier def / use.
1471 if (RII == RestoreIdxes.end()) {
1472 std::vector<SRInfo> Infos;
1473 Infos.push_back(SRInfo(index, NewVReg, true));
1474 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1476 RII->second.push_back(SRInfo(index, NewVReg, true));
1478 RestoreMBBs.set(MBBId);
1482 // Update spill weight.
1483 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1484 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1487 if (NewVReg && TrySplit && AllCanFold) {
1488 // If all of its def / use can be folded, give it a low spill weight.
1489 LiveInterval &nI = getOrCreateInterval(NewVReg);
1494 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1495 unsigned vr, BitVector &RestoreMBBs,
1496 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1497 if (!RestoreMBBs[Id])
1499 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1500 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1501 if (Restores[i].index == index &&
1502 Restores[i].vreg == vr &&
1503 Restores[i].canFold)
1508 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1509 unsigned vr, BitVector &RestoreMBBs,
1510 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1511 if (!RestoreMBBs[Id])
1513 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1514 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1515 if (Restores[i].index == index && Restores[i].vreg)
1516 Restores[i].index = SlotIndex();
1519 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1520 /// spilled and create empty intervals for their uses.
1522 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1523 const TargetRegisterClass* rc,
1524 std::vector<LiveInterval*> &NewLIs) {
1525 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1526 re = mri_->reg_end(); ri != re; ) {
1527 MachineOperand &O = ri.getOperand();
1528 MachineInstr *MI = &*ri;
1531 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1532 "Register def was not rewritten?");
1533 RemoveMachineInstrFromMaps(MI);
1534 vrm.RemoveMachineInstrFromMaps(MI);
1535 MI->eraseFromParent();
1537 // This must be an use of an implicit_def so it's not part of the live
1538 // interval. Create a new empty live interval for it.
1539 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1540 unsigned NewVReg = mri_->createVirtualRegister(rc);
1542 vrm.setIsImplicitlyDefined(NewVReg);
1543 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1544 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1545 MachineOperand &MO = MI->getOperand(i);
1546 if (MO.isReg() && MO.getReg() == li.reg) {
1555 std::vector<LiveInterval*> LiveIntervals::
1556 addIntervalsForSpillsFast(const LiveInterval &li,
1557 const MachineLoopInfo *loopInfo,
1559 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1561 std::vector<LiveInterval*> added;
1563 assert(li.weight != HUGE_VALF &&
1564 "attempt to spill already spilled interval!");
1567 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1572 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1574 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1575 while (RI != mri_->reg_end()) {
1576 MachineInstr* MI = &*RI;
1578 SmallVector<unsigned, 2> Indices;
1579 bool HasUse = false;
1580 bool HasDef = false;
1582 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1583 MachineOperand& mop = MI->getOperand(i);
1584 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1586 HasUse |= MI->getOperand(i).isUse();
1587 HasDef |= MI->getOperand(i).isDef();
1589 Indices.push_back(i);
1592 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1593 Indices, true, slot, li.reg)) {
1594 unsigned NewVReg = mri_->createVirtualRegister(rc);
1596 vrm.assignVirt2StackSlot(NewVReg, slot);
1598 // create a new register for this spill
1599 LiveInterval &nI = getOrCreateInterval(NewVReg);
1601 // the spill weight is now infinity as it
1602 // cannot be spilled again
1603 nI.weight = HUGE_VALF;
1605 // Rewrite register operands to use the new vreg.
1606 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1607 E = Indices.end(); I != E; ++I) {
1608 MI->getOperand(*I).setReg(NewVReg);
1610 if (MI->getOperand(*I).isUse())
1611 MI->getOperand(*I).setIsKill(true);
1614 // Fill in the new live interval.
1615 SlotIndex index = getInstructionIndex(MI);
1617 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1618 nI.getNextValue(SlotIndex(), 0, false,
1619 getVNInfoAllocator()));
1620 DEBUG(dbgs() << " +" << LR);
1622 vrm.addRestorePoint(NewVReg, MI);
1625 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1626 nI.getNextValue(SlotIndex(), 0, false,
1627 getVNInfoAllocator()));
1628 DEBUG(dbgs() << " +" << LR);
1630 vrm.addSpillPoint(NewVReg, true, MI);
1633 added.push_back(&nI);
1636 dbgs() << "\t\t\t\tadded new interval: ";
1643 RI = mri_->reg_begin(li.reg);
1649 std::vector<LiveInterval*> LiveIntervals::
1650 addIntervalsForSpills(const LiveInterval &li,
1651 SmallVectorImpl<LiveInterval*> &SpillIs,
1652 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1654 if (EnableFastSpilling)
1655 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1657 assert(li.weight != HUGE_VALF &&
1658 "attempt to spill already spilled interval!");
1661 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1662 li.print(dbgs(), tri_);
1666 // Each bit specify whether a spill is required in the MBB.
1667 BitVector SpillMBBs(mf_->getNumBlockIDs());
1668 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1669 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1670 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1671 DenseMap<unsigned,unsigned> MBBVRegsMap;
1672 std::vector<LiveInterval*> NewLIs;
1673 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1675 unsigned NumValNums = li.getNumValNums();
1676 SmallVector<MachineInstr*, 4> ReMatDefs;
1677 ReMatDefs.resize(NumValNums, NULL);
1678 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1679 ReMatOrigDefs.resize(NumValNums, NULL);
1680 SmallVector<int, 4> ReMatIds;
1681 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1682 BitVector ReMatDelete(NumValNums);
1683 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1685 // Spilling a split live interval. It cannot be split any further. Also,
1686 // it's also guaranteed to be a single val# / range interval.
1687 if (vrm.getPreSplitReg(li.reg)) {
1688 vrm.setIsSplitFromReg(li.reg, 0);
1689 // Unset the split kill marker on the last use.
1690 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1691 if (KillIdx != SlotIndex()) {
1692 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1693 assert(KillMI && "Last use disappeared?");
1694 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1695 assert(KillOp != -1 && "Last use disappeared?");
1696 KillMI->getOperand(KillOp).setIsKill(false);
1698 vrm.removeKillPoint(li.reg);
1699 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1700 Slot = vrm.getStackSlot(li.reg);
1701 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1702 MachineInstr *ReMatDefMI = DefIsReMat ?
1703 vrm.getReMaterializedMI(li.reg) : NULL;
1705 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1706 bool isLoad = isLoadSS ||
1707 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1708 bool IsFirstRange = true;
1709 for (LiveInterval::Ranges::const_iterator
1710 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1711 // If this is a split live interval with multiple ranges, it means there
1712 // are two-address instructions that re-defined the value. Only the
1713 // first def can be rematerialized!
1715 // Note ReMatOrigDefMI has already been deleted.
1716 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1717 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1718 false, vrm, rc, ReMatIds, loopInfo,
1719 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1720 MBBVRegsMap, NewLIs);
1722 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1723 Slot, 0, false, false, false,
1724 false, vrm, rc, ReMatIds, loopInfo,
1725 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1726 MBBVRegsMap, NewLIs);
1728 IsFirstRange = false;
1731 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1735 bool TrySplit = !intervalIsInOneMBB(li);
1738 bool NeedStackSlot = false;
1739 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1741 const VNInfo *VNI = *i;
1742 unsigned VN = VNI->id;
1743 if (VNI->isUnused())
1744 continue; // Dead val#.
1745 // Is the def for the val# rematerializable?
1746 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1747 ? getInstructionFromIndex(VNI->def) : 0;
1749 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1750 // Remember how to remat the def of this val#.
1751 ReMatOrigDefs[VN] = ReMatDefMI;
1752 // Original def may be modified so we have to make a copy here.
1753 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1754 CloneMIs.push_back(Clone);
1755 ReMatDefs[VN] = Clone;
1757 bool CanDelete = true;
1758 if (VNI->hasPHIKill()) {
1759 // A kill is a phi node, not all of its uses can be rematerialized.
1760 // It must not be deleted.
1762 // Need a stack slot if there is any live range where uses cannot be
1764 NeedStackSlot = true;
1767 ReMatDelete.set(VN);
1769 // Need a stack slot if there is any live range where uses cannot be
1771 NeedStackSlot = true;
1775 // One stack slot per live interval.
1776 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1777 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1778 Slot = vrm.assignVirt2StackSlot(li.reg);
1780 // This case only occurs when the prealloc splitter has already assigned
1781 // a stack slot to this vreg.
1783 Slot = vrm.getStackSlot(li.reg);
1786 // Create new intervals and rewrite defs and uses.
1787 for (LiveInterval::Ranges::const_iterator
1788 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1789 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1790 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1791 bool DefIsReMat = ReMatDefMI != NULL;
1792 bool CanDelete = ReMatDelete[I->valno->id];
1794 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1795 bool isLoad = isLoadSS ||
1796 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1797 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1798 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1799 CanDelete, vrm, rc, ReMatIds, loopInfo,
1800 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1801 MBBVRegsMap, NewLIs);
1804 // Insert spills / restores if we are splitting.
1806 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1810 SmallPtrSet<LiveInterval*, 4> AddedKill;
1811 SmallVector<unsigned, 2> Ops;
1812 if (NeedStackSlot) {
1813 int Id = SpillMBBs.find_first();
1815 std::vector<SRInfo> &spills = SpillIdxes[Id];
1816 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1817 SlotIndex index = spills[i].index;
1818 unsigned VReg = spills[i].vreg;
1819 LiveInterval &nI = getOrCreateInterval(VReg);
1820 bool isReMat = vrm.isReMaterialized(VReg);
1821 MachineInstr *MI = getInstructionFromIndex(index);
1822 bool CanFold = false;
1823 bool FoundUse = false;
1825 if (spills[i].canFold) {
1827 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1828 MachineOperand &MO = MI->getOperand(j);
1829 if (!MO.isReg() || MO.getReg() != VReg)
1836 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1837 RestoreMBBs, RestoreIdxes))) {
1838 // MI has two-address uses of the same register. If the use
1839 // isn't the first and only use in the BB, then we can't fold
1840 // it. FIXME: Move this to rewriteInstructionsForSpills.
1847 // Fold the store into the def if possible.
1848 bool Folded = false;
1849 if (CanFold && !Ops.empty()) {
1850 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1853 // Also folded uses, do not issue a load.
1854 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1855 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1857 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1861 // Otherwise tell the spiller to issue a spill.
1863 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1864 bool isKill = LR->end == index.getStoreIndex();
1865 if (!MI->registerDefIsDead(nI.reg))
1866 // No need to spill a dead def.
1867 vrm.addSpillPoint(VReg, isKill, MI);
1869 AddedKill.insert(&nI);
1872 Id = SpillMBBs.find_next(Id);
1876 int Id = RestoreMBBs.find_first();
1878 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1879 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1880 SlotIndex index = restores[i].index;
1881 if (index == SlotIndex())
1883 unsigned VReg = restores[i].vreg;
1884 LiveInterval &nI = getOrCreateInterval(VReg);
1885 bool isReMat = vrm.isReMaterialized(VReg);
1886 MachineInstr *MI = getInstructionFromIndex(index);
1887 bool CanFold = false;
1889 if (restores[i].canFold) {
1891 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1892 MachineOperand &MO = MI->getOperand(j);
1893 if (!MO.isReg() || MO.getReg() != VReg)
1897 // If this restore were to be folded, it would have been folded
1906 // Fold the load into the use if possible.
1907 bool Folded = false;
1908 if (CanFold && !Ops.empty()) {
1910 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1912 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1914 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1915 // If the rematerializable def is a load, also try to fold it.
1916 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1917 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1918 Ops, isLoadSS, LdSlot, VReg);
1920 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1922 // Re-matting an instruction with virtual register use. Add the
1923 // register as an implicit use on the use MI and update the register
1924 // interval's spill weight to HUGE_VALF to prevent it from being
1926 LiveInterval &ImpLi = getInterval(ImpUse);
1927 ImpLi.weight = HUGE_VALF;
1928 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1933 // If folding is not possible / failed, then tell the spiller to issue a
1934 // load / rematerialization for us.
1936 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1938 vrm.addRestorePoint(VReg, MI);
1940 Id = RestoreMBBs.find_next(Id);
1943 // Finalize intervals: add kills, finalize spill weights, and filter out
1945 std::vector<LiveInterval*> RetNewLIs;
1946 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1947 LiveInterval *LI = NewLIs[i];
1949 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1950 if (!AddedKill.count(LI)) {
1951 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1952 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1953 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1954 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1955 assert(UseIdx != -1);
1956 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1957 LastUse->getOperand(UseIdx).setIsKill();
1958 vrm.addKillPoint(LI->reg, LastUseIdx);
1961 RetNewLIs.push_back(LI);
1965 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1969 /// hasAllocatableSuperReg - Return true if the specified physical register has
1970 /// any super register that's allocatable.
1971 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1972 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1973 if (allocatableRegs_[*AS] && hasInterval(*AS))
1978 /// getRepresentativeReg - Find the largest super register of the specified
1979 /// physical register.
1980 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1981 // Find the largest super-register that is allocatable.
1982 unsigned BestReg = Reg;
1983 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1984 unsigned SuperReg = *AS;
1985 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1993 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1994 /// specified interval that conflicts with the specified physical register.
1995 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1996 unsigned PhysReg) const {
1997 unsigned NumConflicts = 0;
1998 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1999 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2000 E = mri_->reg_end(); I != E; ++I) {
2001 MachineOperand &O = I.getOperand();
2002 MachineInstr *MI = O.getParent();
2003 SlotIndex Index = getInstructionIndex(MI);
2004 if (pli.liveAt(Index))
2007 return NumConflicts;
2010 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2011 /// around all defs and uses of the specified interval. Return true if it
2012 /// was able to cut its interval.
2013 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2014 unsigned PhysReg, VirtRegMap &vrm) {
2015 unsigned SpillReg = getRepresentativeReg(PhysReg);
2017 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2018 // If there are registers which alias PhysReg, but which are not a
2019 // sub-register of the chosen representative super register. Assert
2020 // since we can't handle it yet.
2021 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2022 tri_->isSuperRegister(*AS, SpillReg));
2025 SmallVector<unsigned, 4> PRegs;
2026 if (hasInterval(SpillReg))
2027 PRegs.push_back(SpillReg);
2029 SmallSet<unsigned, 4> Added;
2030 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2031 if (Added.insert(*AS) && hasInterval(*AS)) {
2032 PRegs.push_back(*AS);
2033 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2038 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2039 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2040 E = mri_->reg_end(); I != E; ++I) {
2041 MachineOperand &O = I.getOperand();
2042 MachineInstr *MI = O.getParent();
2043 if (SeenMIs.count(MI))
2046 SlotIndex Index = getInstructionIndex(MI);
2047 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2048 unsigned PReg = PRegs[i];
2049 LiveInterval &pli = getInterval(PReg);
2050 if (!pli.liveAt(Index))
2052 vrm.addEmergencySpill(PReg, MI);
2053 SlotIndex StartIdx = Index.getLoadIndex();
2054 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2055 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2056 pli.removeRange(StartIdx, EndIdx);
2060 raw_string_ostream Msg(msg);
2061 Msg << "Ran out of registers during register allocation!";
2062 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) {
2063 Msg << "\nPlease check your inline asm statement for invalid "
2064 << "constraints:\n";
2065 MI->print(Msg, tm_);
2067 llvm_report_error(Msg.str());
2069 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2070 if (!hasInterval(*AS))
2072 LiveInterval &spli = getInterval(*AS);
2073 if (spli.liveAt(Index))
2074 spli.removeRange(Index.getLoadIndex(),
2075 Index.getNextIndex().getBaseIndex());
2082 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2083 MachineInstr* startInst) {
2084 LiveInterval& Interval = getOrCreateInterval(reg);
2085 VNInfo* VN = Interval.getNextValue(
2086 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2087 startInst, true, getVNInfoAllocator());
2088 VN->setHasPHIKill(true);
2089 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2091 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2092 getMBBEndIdx(startInst->getParent()), VN);
2093 Interval.addRange(LR);