1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.Reset();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
117 numIntervals += getNumIntervals();
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 if (mii->isDebugValue())
144 OS << SlotIndex::getEmptyKey() << '\t' << *mii;
146 OS << getInstructionIndex(mii) << '\t' << *mii;
151 void LiveIntervals::dumpInstrs() const {
155 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156 VirtRegMap &vrm, unsigned reg) {
157 // We don't handle fancy stuff crossing basic block boundaries
158 if (li.ranges.size() != 1)
160 const LiveRange &range = li.ranges.front();
161 SlotIndex idx = range.start.getBaseIndex();
162 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
164 // Skip deleted instructions
165 MachineInstr *firstMI = getInstructionFromIndex(idx);
166 while (!firstMI && idx != end) {
167 idx = idx.getNextIndex();
168 firstMI = getInstructionFromIndex(idx);
173 // Find last instruction in range
174 SlotIndex lastIdx = end.getPrevIndex();
175 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176 while (!lastMI && lastIdx != idx) {
177 lastIdx = lastIdx.getPrevIndex();
178 lastMI = getInstructionFromIndex(lastIdx);
183 // Range cannot cross basic block boundaries or terminators
184 MachineBasicBlock *MBB = firstMI->getParent();
185 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
188 MachineBasicBlock::const_iterator E = lastMI;
190 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191 const MachineInstr &MI = *I;
193 // Allow copies to and from li.reg
194 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196 if (SrcReg == li.reg || DstReg == li.reg)
199 // Check for operands using reg
200 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
201 const MachineOperand& mop = MI.getOperand(i);
204 unsigned PhysReg = mop.getReg();
205 if (PhysReg == 0 || PhysReg == li.reg)
207 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208 if (!vrm.hasPhys(PhysReg))
210 PhysReg = vrm.getPhys(PhysReg);
212 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
217 // No conflicts found.
221 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
222 /// it can check use as well.
223 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
224 unsigned Reg, bool CheckUse,
225 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
226 for (LiveInterval::Ranges::const_iterator
227 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
228 for (SlotIndex index = I->start.getBaseIndex(),
229 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
231 index = index.getNextIndex()) {
232 MachineInstr *MI = getInstructionFromIndex(index);
234 continue; // skip deleted instructions
236 if (JoinedCopies.count(MI))
238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239 MachineOperand& MO = MI->getOperand(i);
242 if (MO.isUse() && !CheckUse)
244 unsigned PhysReg = MO.getReg();
245 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
247 if (tri_->isSubRegister(Reg, PhysReg))
257 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
258 if (TargetRegisterInfo::isPhysicalRegister(reg))
259 dbgs() << tri_->getName(reg);
261 dbgs() << "%reg" << reg;
265 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
266 MachineBasicBlock::iterator mi,
270 LiveInterval &interval) {
272 dbgs() << "\t\tregister: ";
273 printRegName(interval.reg, tri_);
276 // Virtual registers may be defined multiple times (due to phi
277 // elimination and 2-addr elimination). Much of what we do only has to be
278 // done once for the vreg. We use an empty interval to detect the first
279 // time we see a vreg.
280 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
281 if (interval.empty()) {
282 // Get the Idx of the defining instructions.
283 SlotIndex defIndex = MIIdx.getDefIndex();
284 // Earlyclobbers move back one, so that they overlap the live range
286 if (MO.isEarlyClobber())
287 defIndex = MIIdx.getUseIndex();
289 MachineInstr *CopyMI = NULL;
290 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
291 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
292 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
294 // Earlyclobbers move back one.
295 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
297 assert(ValNo->id == 0 && "First value in interval is not 0?");
299 // Loop over all of the blocks that the vreg is defined in. There are
300 // two cases we have to handle here. The most common case is a vreg
301 // whose lifetime is contained within a basic block. In this case there
302 // will be a single kill, in MBB, which comes after the definition.
303 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
304 // FIXME: what about dead vars?
306 if (vi.Kills[0] != mi)
307 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
309 killIdx = defIndex.getStoreIndex();
311 // If the kill happens after the definition, we have an intra-block
313 if (killIdx > defIndex) {
314 assert(vi.AliveBlocks.empty() &&
315 "Shouldn't be alive across any blocks!");
316 LiveRange LR(defIndex, killIdx, ValNo);
317 interval.addRange(LR);
318 DEBUG(dbgs() << " +" << LR << "\n");
319 ValNo->addKill(killIdx);
324 // The other case we handle is when a virtual register lives to the end
325 // of the defining block, potentially live across some blocks, then is
326 // live into some number of blocks, but gets killed. Start by adding a
327 // range that goes from this definition to the end of the defining block.
328 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
329 DEBUG(dbgs() << " +" << NewLR);
330 interval.addRange(NewLR);
332 // Iterate over all of the blocks that the variable is completely
333 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
335 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
336 E = vi.AliveBlocks.end(); I != E; ++I) {
337 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
338 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
339 interval.addRange(LR);
340 DEBUG(dbgs() << " +" << LR);
343 // Finally, this virtual register is live from the start of any killing
344 // block to the 'use' slot of the killing instruction.
345 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
346 MachineInstr *Kill = vi.Kills[i];
348 getInstructionIndex(Kill).getDefIndex();
349 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
350 interval.addRange(LR);
351 ValNo->addKill(killIdx);
352 DEBUG(dbgs() << " +" << LR);
356 // If this is the second time we see a virtual register definition, it
357 // must be due to phi elimination or two addr elimination. If this is
358 // the result of two address elimination, then the vreg is one of the
359 // def-and-use register operand.
360 if (mi->isRegTiedToUseOperand(MOIdx)) {
361 // If this is a two-address definition, then we have already processed
362 // the live range. The only problem is that we didn't realize there
363 // are actually two values in the live interval. Because of this we
364 // need to take the LiveRegion that defines this register and split it
366 assert(interval.containsOneValue());
367 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
368 SlotIndex RedefIndex = MIIdx.getDefIndex();
369 if (MO.isEarlyClobber())
370 RedefIndex = MIIdx.getUseIndex();
372 const LiveRange *OldLR =
373 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
374 VNInfo *OldValNo = OldLR->valno;
376 // Delete the initial value, which should be short and continuous,
377 // because the 2-addr copy must be in the same MBB as the redef.
378 interval.removeRange(DefIndex, RedefIndex);
380 // Two-address vregs should always only be redefined once. This means
381 // that at this point, there should be exactly one value number in it.
382 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
384 // The new value number (#1) is defined by the instruction we claimed
386 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
387 false, // update at *
389 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
391 // Value#0 is now defined by the 2-addr instruction.
392 OldValNo->def = RedefIndex;
393 OldValNo->setCopy(0);
395 // Add the new live interval which replaces the range for the input copy.
396 LiveRange LR(DefIndex, RedefIndex, ValNo);
397 DEBUG(dbgs() << " replace range with " << LR);
398 interval.addRange(LR);
399 ValNo->addKill(RedefIndex);
401 // If this redefinition is dead, we need to add a dummy unit live
402 // range covering the def slot.
404 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
408 dbgs() << " RESULT: ";
409 interval.print(dbgs(), tri_);
412 // Otherwise, this must be because of phi elimination. If this is the
413 // first redefinition of the vreg that we have seen, go back and change
414 // the live range in the PHI block to be a different value number.
415 if (interval.containsOneValue()) {
417 VNInfo *VNI = interval.getValNumInfo(0);
418 // Phi elimination may have reused the register for multiple identical
419 // phi nodes. There will be a kill per phi. Remove the old ranges that
420 // we now know have an incorrect number.
421 for (unsigned ki=0, ke=vi.Kills.size(); ki != ke; ++ki) {
422 MachineInstr *Killer = vi.Kills[ki];
423 SlotIndex Start = getMBBStartIdx(Killer->getParent());
424 SlotIndex End = getInstructionIndex(Killer).getDefIndex();
426 dbgs() << "\n\t\trenaming [" << Start << "," << End << "] in: ";
427 interval.print(dbgs(), tri_);
429 interval.removeRange(Start, End);
431 // Replace the interval with one of a NEW value number. Note that
432 // this value number isn't actually defined by an instruction, weird
434 LiveRange LR(Start, End,
435 interval.getNextValue(SlotIndex(Start, true),
436 0, false, VNInfoAllocator));
437 LR.valno->setIsPHIDef(true);
438 interval.addRange(LR);
439 LR.valno->addKill(End);
442 MachineBasicBlock *killMBB = getMBBFromIndex(VNI->def);
443 VNI->addKill(indexes_->getTerminatorGap(killMBB));
444 VNI->setHasPHIKill(true);
446 dbgs() << " RESULT: ";
447 interval.print(dbgs(), tri_);
451 // In the case of PHI elimination, each variable definition is only
452 // live until the end of the block. We've already taken care of the
453 // rest of the live range.
454 SlotIndex defIndex = MIIdx.getDefIndex();
455 if (MO.isEarlyClobber())
456 defIndex = MIIdx.getUseIndex();
459 MachineInstr *CopyMI = NULL;
460 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
461 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
462 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
464 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
466 SlotIndex killIndex = getMBBEndIdx(mbb);
467 LiveRange LR(defIndex, killIndex, ValNo);
468 interval.addRange(LR);
469 ValNo->addKill(indexes_->getTerminatorGap(mbb));
470 ValNo->setHasPHIKill(true);
471 DEBUG(dbgs() << " +" << LR);
475 DEBUG(dbgs() << '\n');
478 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
479 MachineBasicBlock::iterator mi,
482 LiveInterval &interval,
483 MachineInstr *CopyMI) {
484 // A physical register cannot be live across basic block, so its
485 // lifetime must end somewhere in its defining basic block.
487 dbgs() << "\t\tregister: ";
488 printRegName(interval.reg, tri_);
491 SlotIndex baseIndex = MIIdx;
492 SlotIndex start = baseIndex.getDefIndex();
493 // Earlyclobbers move back one.
494 if (MO.isEarlyClobber())
495 start = MIIdx.getUseIndex();
496 SlotIndex end = start;
498 // If it is not used after definition, it is considered dead at
499 // the instruction defining it. Hence its interval is:
500 // [defSlot(def), defSlot(def)+1)
501 // For earlyclobbers, the defSlot was pushed back one; the extra
502 // advance below compensates.
504 DEBUG(dbgs() << " dead");
505 end = start.getStoreIndex();
509 // If it is not dead on definition, it must be killed by a
510 // subsequent instruction. Hence its interval is:
511 // [defSlot(def), useSlot(kill)+1)
512 baseIndex = baseIndex.getNextIndex();
513 while (++mi != MBB->end()) {
515 if (mi->isDebugValue())
517 if (getInstructionFromIndex(baseIndex) == 0)
518 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
520 if (mi->killsRegister(interval.reg, tri_)) {
521 DEBUG(dbgs() << " killed");
522 end = baseIndex.getDefIndex();
525 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
527 if (mi->isRegTiedToUseOperand(DefIdx)) {
528 // Two-address instruction.
529 end = baseIndex.getDefIndex();
531 // Another instruction redefines the register before it is ever read.
532 // Then the register is essentially dead at the instruction that
533 // defines it. Hence its interval is:
534 // [defSlot(def), defSlot(def)+1)
535 DEBUG(dbgs() << " dead");
536 end = start.getStoreIndex();
542 baseIndex = baseIndex.getNextIndex();
545 // The only case we should have a dead physreg here without a killing or
546 // instruction where we know it's dead is if it is live-in to the function
547 // and never used. Another possible case is the implicit use of the
548 // physical register has been deleted by two-address pass.
549 end = start.getStoreIndex();
552 assert(start < end && "did not find end of interval?");
554 // Already exists? Extend old live interval.
555 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
556 bool Extend = OldLR != interval.end();
557 VNInfo *ValNo = Extend
558 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
559 if (MO.isEarlyClobber() && Extend)
560 ValNo->setHasRedefByEC(true);
561 LiveRange LR(start, end, ValNo);
562 interval.addRange(LR);
563 LR.valno->addKill(end);
564 DEBUG(dbgs() << " +" << LR << '\n');
567 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
568 MachineBasicBlock::iterator MI,
572 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
573 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
574 getOrCreateInterval(MO.getReg()));
575 else if (allocatableRegs_[MO.getReg()]) {
576 MachineInstr *CopyMI = NULL;
577 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
578 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
579 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
581 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
582 getOrCreateInterval(MO.getReg()), CopyMI);
583 // Def of a register also defines its sub-registers.
584 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
585 // If MI also modifies the sub-register explicitly, avoid processing it
586 // more than once. Do not pass in TRI here so it checks for exact match.
587 if (!MI->modifiesRegister(*AS))
588 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
589 getOrCreateInterval(*AS), 0);
593 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
595 LiveInterval &interval, bool isAlias) {
597 dbgs() << "\t\tlivein register: ";
598 printRegName(interval.reg, tri_);
601 // Look for kills, if it reaches a def before it's killed, then it shouldn't
602 // be considered a livein.
603 MachineBasicBlock::iterator mi = MBB->begin();
604 SlotIndex baseIndex = MIIdx;
605 SlotIndex start = baseIndex;
606 if (getInstructionFromIndex(baseIndex) == 0)
607 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
609 SlotIndex end = baseIndex;
610 bool SeenDefUse = false;
612 MachineBasicBlock::iterator E = MBB->end();
614 if (mi->isDebugValue()) {
618 if (mi->killsRegister(interval.reg, tri_)) {
619 DEBUG(dbgs() << " killed");
620 end = baseIndex.getDefIndex();
623 } else if (mi->modifiesRegister(interval.reg, tri_)) {
624 // Another instruction redefines the register before it is ever read.
625 // Then the register is essentially dead at the instruction that defines
626 // it. Hence its interval is:
627 // [defSlot(def), defSlot(def)+1)
628 DEBUG(dbgs() << " dead");
629 end = start.getStoreIndex();
635 if (mi != E && !mi->isDebugValue()) {
636 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
640 // Live-in register might not be used at all.
643 DEBUG(dbgs() << " dead");
644 end = MIIdx.getStoreIndex();
646 DEBUG(dbgs() << " live through");
652 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
653 0, false, VNInfoAllocator);
654 vni->setIsPHIDef(true);
655 LiveRange LR(start, end, vni);
657 interval.addRange(LR);
658 LR.valno->addKill(end);
659 DEBUG(dbgs() << " +" << LR << '\n');
662 /// computeIntervals - computes the live intervals for virtual
663 /// registers. for some ordering of the machine instructions [1,N] a
664 /// live interval is an interval [i, j) where 1 <= i <= j < N for
665 /// which a variable is live
666 void LiveIntervals::computeIntervals() {
667 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
668 << "********** Function: "
669 << ((Value*)mf_->getFunction())->getName() << '\n');
671 SmallVector<unsigned, 8> UndefUses;
672 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
674 MachineBasicBlock *MBB = MBBI;
678 // Track the index of the current machine instr.
679 SlotIndex MIIndex = getMBBStartIdx(MBB);
680 DEBUG(dbgs() << MBB->getName() << ":\n");
682 // Create intervals for live-ins to this BB first.
683 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
684 LE = MBB->livein_end(); LI != LE; ++LI) {
685 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
686 // Multiple live-ins can alias the same register.
687 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
688 if (!hasInterval(*AS))
689 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
693 // Skip over empty initial indices.
694 if (getInstructionFromIndex(MIIndex) == 0)
695 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
697 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
699 DEBUG(dbgs() << MIIndex << "\t" << *MI);
700 if (MI->isDebugValue())
704 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
705 MachineOperand &MO = MI->getOperand(i);
706 if (!MO.isReg() || !MO.getReg())
709 // handle register defs - build intervals
711 handleRegisterDef(MBB, MI, MIIndex, MO, i);
712 else if (MO.isUndef())
713 UndefUses.push_back(MO.getReg());
716 // Move to the next instr slot.
717 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
721 // Create empty intervals for registers defined by implicit_def's (except
722 // for those implicit_def that define values which are liveout of their
724 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
725 unsigned UndefReg = UndefUses[i];
726 (void)getOrCreateInterval(UndefReg);
730 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
731 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
732 return new LiveInterval(reg, Weight);
735 /// dupInterval - Duplicate a live interval. The caller is responsible for
736 /// managing the allocated memory.
737 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
738 LiveInterval *NewLI = createInterval(li->reg);
739 NewLI->Copy(*li, mri_, getVNInfoAllocator());
743 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
744 /// copy field and returns the source register that defines it.
745 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
749 if (VNI->getCopy()->isExtractSubreg()) {
750 // If it's extracting out of a physical register, return the sub-register.
751 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
752 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
753 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
754 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
755 if (SrcSubReg == DstSubReg)
756 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
757 // reg1034 can still be coalesced to EDX.
759 assert(DstSubReg == 0);
760 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
763 } else if (VNI->getCopy()->isInsertSubreg() ||
764 VNI->getCopy()->isSubregToReg())
765 return VNI->getCopy()->getOperand(2).getReg();
767 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
768 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
770 llvm_unreachable("Unrecognized copy instruction!");
774 //===----------------------------------------------------------------------===//
775 // Register allocator hooks.
778 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
779 /// allow one) virtual register operand, then its uses are implicitly using
780 /// the register. Returns the virtual register.
781 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
782 MachineInstr *MI) const {
784 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
785 MachineOperand &MO = MI->getOperand(i);
786 if (!MO.isReg() || !MO.isUse())
788 unsigned Reg = MO.getReg();
789 if (Reg == 0 || Reg == li.reg)
792 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
793 !allocatableRegs_[Reg])
795 // FIXME: For now, only remat MI with at most one register operand.
797 "Can't rematerialize instruction with multiple register operand!");
806 /// isValNoAvailableAt - Return true if the val# of the specified interval
807 /// which reaches the given instruction also reaches the specified use index.
808 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
809 SlotIndex UseIdx) const {
810 SlotIndex Index = getInstructionIndex(MI);
811 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
812 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
813 return UI != li.end() && UI->valno == ValNo;
816 /// isReMaterializable - Returns true if the definition MI of the specified
817 /// val# of the specified interval is re-materializable.
818 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
819 const VNInfo *ValNo, MachineInstr *MI,
820 SmallVectorImpl<LiveInterval*> &SpillIs,
825 if (!tii_->isTriviallyReMaterializable(MI, aa_))
828 // Target-specific code can mark an instruction as being rematerializable
829 // if it has one virtual reg use, though it had better be something like
830 // a PIC base register which is likely to be live everywhere.
831 unsigned ImpUse = getReMatImplicitUse(li, MI);
833 const LiveInterval &ImpLi = getInterval(ImpUse);
834 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
835 re = mri_->use_end(); ri != re; ++ri) {
836 MachineInstr *UseMI = &*ri;
837 SlotIndex UseIdx = getInstructionIndex(UseMI);
838 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
840 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
844 // If a register operand of the re-materialized instruction is going to
845 // be spilled next, then it's not legal to re-materialize this instruction.
846 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
847 if (ImpUse == SpillIs[i]->reg)
853 /// isReMaterializable - Returns true if the definition MI of the specified
854 /// val# of the specified interval is re-materializable.
855 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
856 const VNInfo *ValNo, MachineInstr *MI) {
857 SmallVector<LiveInterval*, 4> Dummy1;
859 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
862 /// isReMaterializable - Returns true if every definition of MI of every
863 /// val# of the specified interval is re-materializable.
864 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
865 SmallVectorImpl<LiveInterval*> &SpillIs,
868 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
870 const VNInfo *VNI = *i;
872 continue; // Dead val#.
873 // Is the def for the val# rematerializable?
874 if (!VNI->isDefAccurate())
876 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
877 bool DefIsLoad = false;
879 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
886 /// FilterFoldedOps - Filter out two-address use operands. Return
887 /// true if it finds any issue with the operands that ought to prevent
889 static bool FilterFoldedOps(MachineInstr *MI,
890 SmallVector<unsigned, 2> &Ops,
892 SmallVector<unsigned, 2> &FoldOps) {
894 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
895 unsigned OpIdx = Ops[i];
896 MachineOperand &MO = MI->getOperand(OpIdx);
897 // FIXME: fold subreg use.
901 MRInfo |= (unsigned)VirtRegMap::isMod;
903 // Filter out two-address use operand(s).
904 if (MI->isRegTiedToDefOperand(OpIdx)) {
905 MRInfo = VirtRegMap::isModRef;
908 MRInfo |= (unsigned)VirtRegMap::isRef;
910 FoldOps.push_back(OpIdx);
916 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
917 /// slot / to reg or any rematerialized load into ith operand of specified
918 /// MI. If it is successul, MI is updated with the newly created MI and
920 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
921 VirtRegMap &vrm, MachineInstr *DefMI,
923 SmallVector<unsigned, 2> &Ops,
924 bool isSS, int Slot, unsigned Reg) {
925 // If it is an implicit def instruction, just delete it.
926 if (MI->isImplicitDef()) {
927 RemoveMachineInstrFromMaps(MI);
928 vrm.RemoveMachineInstrFromMaps(MI);
929 MI->eraseFromParent();
934 // Filter the list of operand indexes that are to be folded. Abort if
935 // any operand will prevent folding.
937 SmallVector<unsigned, 2> FoldOps;
938 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
941 // The only time it's safe to fold into a two address instruction is when
942 // it's folding reload and spill from / into a spill stack slot.
943 if (DefMI && (MRInfo & VirtRegMap::isMod))
946 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
947 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
949 // Remember this instruction uses the spill slot.
950 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
952 // Attempt to fold the memory reference into the instruction. If
953 // we can do this, we don't need to insert spill code.
954 MachineBasicBlock &MBB = *MI->getParent();
955 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
956 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
957 vrm.transferSpillPts(MI, fmi);
958 vrm.transferRestorePts(MI, fmi);
959 vrm.transferEmergencySpills(MI, fmi);
960 ReplaceMachineInstrInMaps(MI, fmi);
961 MI = MBB.insert(MBB.erase(MI), fmi);
968 /// canFoldMemoryOperand - Returns true if the specified load / store
969 /// folding is possible.
970 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
971 SmallVector<unsigned, 2> &Ops,
973 // Filter the list of operand indexes that are to be folded. Abort if
974 // any operand will prevent folding.
976 SmallVector<unsigned, 2> FoldOps;
977 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
980 // It's only legal to remat for a use, not a def.
981 if (ReMat && (MRInfo & VirtRegMap::isMod))
984 return tii_->canFoldMemoryOperand(MI, FoldOps);
987 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
988 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
990 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
995 for (++itr; itr != li.ranges.end(); ++itr) {
996 MachineBasicBlock *mbb2 =
997 indexes_->getMBBCoveringRange(itr->start, itr->end);
1006 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1007 /// interval on to-be re-materialized operands of MI) with new register.
1008 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1009 MachineInstr *MI, unsigned NewVReg,
1011 // There is an implicit use. That means one of the other operand is
1012 // being remat'ed and the remat'ed instruction has li.reg as an
1013 // use operand. Make sure we rewrite that as well.
1014 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1015 MachineOperand &MO = MI->getOperand(i);
1018 unsigned Reg = MO.getReg();
1019 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1021 if (!vrm.isReMaterialized(Reg))
1023 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1024 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1026 UseMO->setReg(NewVReg);
1030 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1031 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1032 bool LiveIntervals::
1033 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1034 bool TrySplit, SlotIndex index, SlotIndex end,
1036 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1037 unsigned Slot, int LdSlot,
1038 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1040 const TargetRegisterClass* rc,
1041 SmallVector<int, 4> &ReMatIds,
1042 const MachineLoopInfo *loopInfo,
1043 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1044 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1045 std::vector<LiveInterval*> &NewLIs) {
1046 bool CanFold = false;
1048 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1049 MachineOperand& mop = MI->getOperand(i);
1052 unsigned Reg = mop.getReg();
1053 unsigned RegI = Reg;
1054 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1059 bool TryFold = !DefIsReMat;
1060 bool FoldSS = true; // Default behavior unless it's a remat.
1061 int FoldSlot = Slot;
1063 // If this is the rematerializable definition MI itself and
1064 // all of its uses are rematerialized, simply delete it.
1065 if (MI == ReMatOrigDefMI && CanDelete) {
1066 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1068 RemoveMachineInstrFromMaps(MI);
1069 vrm.RemoveMachineInstrFromMaps(MI);
1070 MI->eraseFromParent();
1074 // If def for this use can't be rematerialized, then try folding.
1075 // If def is rematerializable and it's a load, also try folding.
1076 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1078 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1084 // Scan all of the operands of this instruction rewriting operands
1085 // to use NewVReg instead of li.reg as appropriate. We do this for
1088 // 1. If the instr reads the same spilled vreg multiple times, we
1089 // want to reuse the NewVReg.
1090 // 2. If the instr is a two-addr instruction, we are required to
1091 // keep the src/dst regs pinned.
1093 // Keep track of whether we replace a use and/or def so that we can
1094 // create the spill interval with the appropriate range.
1096 HasUse = mop.isUse();
1097 HasDef = mop.isDef();
1098 SmallVector<unsigned, 2> Ops;
1100 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1101 const MachineOperand &MOj = MI->getOperand(j);
1104 unsigned RegJ = MOj.getReg();
1105 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1109 if (!MOj.isUndef()) {
1110 HasUse |= MOj.isUse();
1111 HasDef |= MOj.isDef();
1116 // Create a new virtual register for the spill interval.
1117 // Create the new register now so we can map the fold instruction
1118 // to the new register so when it is unfolded we get the correct
1120 bool CreatedNewVReg = false;
1122 NewVReg = mri_->createVirtualRegister(rc);
1124 CreatedNewVReg = true;
1126 // The new virtual register should get the same allocation hints as the
1128 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1129 if (Hint.first || Hint.second)
1130 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1136 // Do not fold load / store here if we are splitting. We'll find an
1137 // optimal point to insert a load / store later.
1139 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1140 Ops, FoldSS, FoldSlot, NewVReg)) {
1141 // Folding the load/store can completely change the instruction in
1142 // unpredictable ways, rescan it from the beginning.
1145 // We need to give the new vreg the same stack slot as the
1146 // spilled interval.
1147 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1153 if (isNotInMIMap(MI))
1155 goto RestartInstruction;
1158 // We'll try to fold it later if it's profitable.
1159 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1163 mop.setReg(NewVReg);
1164 if (mop.isImplicit())
1165 rewriteImplicitOps(li, MI, NewVReg, vrm);
1167 // Reuse NewVReg for other reads.
1168 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1169 MachineOperand &mopj = MI->getOperand(Ops[j]);
1170 mopj.setReg(NewVReg);
1171 if (mopj.isImplicit())
1172 rewriteImplicitOps(li, MI, NewVReg, vrm);
1175 if (CreatedNewVReg) {
1177 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1178 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1179 // Each valnum may have its own remat id.
1180 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1182 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1184 if (!CanDelete || (HasUse && HasDef)) {
1185 // If this is a two-addr instruction then its use operands are
1186 // rematerializable but its def is not. It should be assigned a
1188 vrm.assignVirt2StackSlot(NewVReg, Slot);
1191 vrm.assignVirt2StackSlot(NewVReg, Slot);
1193 } else if (HasUse && HasDef &&
1194 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1195 // If this interval hasn't been assigned a stack slot (because earlier
1196 // def is a deleted remat def), do it now.
1197 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1198 vrm.assignVirt2StackSlot(NewVReg, Slot);
1201 // Re-matting an instruction with virtual register use. Add the
1202 // register as an implicit use on the use MI.
1203 if (DefIsReMat && ImpUse)
1204 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1206 // Create a new register interval for this spill / remat.
1207 LiveInterval &nI = getOrCreateInterval(NewVReg);
1208 if (CreatedNewVReg) {
1209 NewLIs.push_back(&nI);
1210 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1212 vrm.setIsSplitFromReg(NewVReg, li.reg);
1216 if (CreatedNewVReg) {
1217 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1218 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1219 DEBUG(dbgs() << " +" << LR);
1222 // Extend the split live interval to this def / use.
1223 SlotIndex End = index.getDefIndex();
1224 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1225 nI.getValNumInfo(nI.getNumValNums()-1));
1226 DEBUG(dbgs() << " +" << LR);
1231 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1232 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1233 DEBUG(dbgs() << " +" << LR);
1238 dbgs() << "\t\t\t\tAdded new interval: ";
1239 nI.print(dbgs(), tri_);
1245 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1247 MachineBasicBlock *MBB,
1248 SlotIndex Idx) const {
1249 SlotIndex End = getMBBEndIdx(MBB);
1250 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1251 if (VNI->kills[j].isPHI())
1254 SlotIndex KillIdx = VNI->kills[j];
1255 if (KillIdx > Idx && KillIdx <= End)
1261 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1262 /// during spilling.
1264 struct RewriteInfo {
1269 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1270 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1273 struct RewriteInfoCompare {
1274 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1275 return LHS.Index < RHS.Index;
1280 void LiveIntervals::
1281 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1282 LiveInterval::Ranges::const_iterator &I,
1283 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1284 unsigned Slot, int LdSlot,
1285 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1287 const TargetRegisterClass* rc,
1288 SmallVector<int, 4> &ReMatIds,
1289 const MachineLoopInfo *loopInfo,
1290 BitVector &SpillMBBs,
1291 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1292 BitVector &RestoreMBBs,
1293 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1294 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1295 std::vector<LiveInterval*> &NewLIs) {
1296 bool AllCanFold = true;
1297 unsigned NewVReg = 0;
1298 SlotIndex start = I->start.getBaseIndex();
1299 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1301 // First collect all the def / use in this live range that will be rewritten.
1302 // Make sure they are sorted according to instruction index.
1303 std::vector<RewriteInfo> RewriteMIs;
1304 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1305 re = mri_->reg_end(); ri != re; ) {
1306 MachineInstr *MI = &*ri;
1307 MachineOperand &O = ri.getOperand();
1309 if (MI->isDebugValue()) {
1310 // Remove debug info for now.
1312 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1315 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1316 SlotIndex index = getInstructionIndex(MI);
1317 if (index < start || index >= end)
1321 // Must be defined by an implicit def. It should not be spilled. Note,
1322 // this is for correctness reason. e.g.
1323 // 8 %reg1024<def> = IMPLICIT_DEF
1324 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1325 // The live range [12, 14) are not part of the r1024 live interval since
1326 // it's defined by an implicit def. It will not conflicts with live
1327 // interval of r1025. Now suppose both registers are spilled, you can
1328 // easily see a situation where both registers are reloaded before
1329 // the INSERT_SUBREG and both target registers that would overlap.
1331 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1333 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1335 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1336 // Now rewrite the defs and uses.
1337 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1338 RewriteInfo &rwi = RewriteMIs[i];
1340 SlotIndex index = rwi.Index;
1341 bool MIHasUse = rwi.HasUse;
1342 bool MIHasDef = rwi.HasDef;
1343 MachineInstr *MI = rwi.MI;
1344 // If MI def and/or use the same register multiple times, then there
1345 // are multiple entries.
1346 unsigned NumUses = MIHasUse;
1347 while (i != e && RewriteMIs[i].MI == MI) {
1348 assert(RewriteMIs[i].Index == index);
1349 bool isUse = RewriteMIs[i].HasUse;
1350 if (isUse) ++NumUses;
1352 MIHasDef |= RewriteMIs[i].HasDef;
1355 MachineBasicBlock *MBB = MI->getParent();
1357 if (ImpUse && MI != ReMatDefMI) {
1358 // Re-matting an instruction with virtual register use. Update the
1359 // register interval's spill weight to HUGE_VALF to prevent it from
1361 LiveInterval &ImpLi = getInterval(ImpUse);
1362 ImpLi.weight = HUGE_VALF;
1365 unsigned MBBId = MBB->getNumber();
1366 unsigned ThisVReg = 0;
1368 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1369 if (NVI != MBBVRegsMap.end()) {
1370 ThisVReg = NVI->second;
1377 // It's better to start a new interval to avoid artifically
1378 // extend the new interval.
1379 if (MIHasDef && !MIHasUse) {
1380 MBBVRegsMap.erase(MBB->getNumber());
1386 bool IsNew = ThisVReg == 0;
1388 // This ends the previous live interval. If all of its def / use
1389 // can be folded, give it a low spill weight.
1390 if (NewVReg && TrySplit && AllCanFold) {
1391 LiveInterval &nI = getOrCreateInterval(NewVReg);
1398 bool HasDef = false;
1399 bool HasUse = false;
1400 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1401 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1402 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1403 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1404 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1405 if (!HasDef && !HasUse)
1408 AllCanFold &= CanFold;
1410 // Update weight of spill interval.
1411 LiveInterval &nI = getOrCreateInterval(NewVReg);
1413 // The spill weight is now infinity as it cannot be spilled again.
1414 nI.weight = HUGE_VALF;
1418 // Keep track of the last def and first use in each MBB.
1420 if (MI != ReMatOrigDefMI || !CanDelete) {
1421 bool HasKill = false;
1423 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1425 // If this is a two-address code, then this index starts a new VNInfo.
1426 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1428 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1430 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1431 SpillIdxes.find(MBBId);
1433 if (SII == SpillIdxes.end()) {
1434 std::vector<SRInfo> S;
1435 S.push_back(SRInfo(index, NewVReg, true));
1436 SpillIdxes.insert(std::make_pair(MBBId, S));
1437 } else if (SII->second.back().vreg != NewVReg) {
1438 SII->second.push_back(SRInfo(index, NewVReg, true));
1439 } else if (index > SII->second.back().index) {
1440 // If there is an earlier def and this is a two-address
1441 // instruction, then it's not possible to fold the store (which
1442 // would also fold the load).
1443 SRInfo &Info = SII->second.back();
1445 Info.canFold = !HasUse;
1447 SpillMBBs.set(MBBId);
1448 } else if (SII != SpillIdxes.end() &&
1449 SII->second.back().vreg == NewVReg &&
1450 index > SII->second.back().index) {
1451 // There is an earlier def that's not killed (must be two-address).
1452 // The spill is no longer needed.
1453 SII->second.pop_back();
1454 if (SII->second.empty()) {
1455 SpillIdxes.erase(MBBId);
1456 SpillMBBs.reset(MBBId);
1463 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1464 SpillIdxes.find(MBBId);
1465 if (SII != SpillIdxes.end() &&
1466 SII->second.back().vreg == NewVReg &&
1467 index > SII->second.back().index)
1468 // Use(s) following the last def, it's not safe to fold the spill.
1469 SII->second.back().canFold = false;
1470 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1471 RestoreIdxes.find(MBBId);
1472 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1473 // If we are splitting live intervals, only fold if it's the first
1474 // use and there isn't another use later in the MBB.
1475 RII->second.back().canFold = false;
1477 // Only need a reload if there isn't an earlier def / use.
1478 if (RII == RestoreIdxes.end()) {
1479 std::vector<SRInfo> Infos;
1480 Infos.push_back(SRInfo(index, NewVReg, true));
1481 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1483 RII->second.push_back(SRInfo(index, NewVReg, true));
1485 RestoreMBBs.set(MBBId);
1489 // Update spill weight.
1490 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1491 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1494 if (NewVReg && TrySplit && AllCanFold) {
1495 // If all of its def / use can be folded, give it a low spill weight.
1496 LiveInterval &nI = getOrCreateInterval(NewVReg);
1501 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1502 unsigned vr, BitVector &RestoreMBBs,
1503 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1504 if (!RestoreMBBs[Id])
1506 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1507 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1508 if (Restores[i].index == index &&
1509 Restores[i].vreg == vr &&
1510 Restores[i].canFold)
1515 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1516 unsigned vr, BitVector &RestoreMBBs,
1517 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1518 if (!RestoreMBBs[Id])
1520 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1521 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1522 if (Restores[i].index == index && Restores[i].vreg)
1523 Restores[i].index = SlotIndex();
1526 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1527 /// spilled and create empty intervals for their uses.
1529 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1530 const TargetRegisterClass* rc,
1531 std::vector<LiveInterval*> &NewLIs) {
1532 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1533 re = mri_->reg_end(); ri != re; ) {
1534 MachineOperand &O = ri.getOperand();
1535 MachineInstr *MI = &*ri;
1538 assert(MI->isImplicitDef() &&
1539 "Register def was not rewritten?");
1540 RemoveMachineInstrFromMaps(MI);
1541 vrm.RemoveMachineInstrFromMaps(MI);
1542 MI->eraseFromParent();
1544 // This must be an use of an implicit_def so it's not part of the live
1545 // interval. Create a new empty live interval for it.
1546 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1547 unsigned NewVReg = mri_->createVirtualRegister(rc);
1549 vrm.setIsImplicitlyDefined(NewVReg);
1550 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1551 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1552 MachineOperand &MO = MI->getOperand(i);
1553 if (MO.isReg() && MO.getReg() == li.reg) {
1562 std::vector<LiveInterval*> LiveIntervals::
1563 addIntervalsForSpillsFast(const LiveInterval &li,
1564 const MachineLoopInfo *loopInfo,
1566 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1568 std::vector<LiveInterval*> added;
1570 assert(li.weight != HUGE_VALF &&
1571 "attempt to spill already spilled interval!");
1574 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1579 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1581 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1582 while (RI != mri_->reg_end()) {
1583 MachineInstr* MI = &*RI;
1585 SmallVector<unsigned, 2> Indices;
1586 bool HasUse = false;
1587 bool HasDef = false;
1589 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1590 MachineOperand& mop = MI->getOperand(i);
1591 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1593 HasUse |= MI->getOperand(i).isUse();
1594 HasDef |= MI->getOperand(i).isDef();
1596 Indices.push_back(i);
1599 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1600 Indices, true, slot, li.reg)) {
1601 unsigned NewVReg = mri_->createVirtualRegister(rc);
1603 vrm.assignVirt2StackSlot(NewVReg, slot);
1605 // create a new register for this spill
1606 LiveInterval &nI = getOrCreateInterval(NewVReg);
1608 // the spill weight is now infinity as it
1609 // cannot be spilled again
1610 nI.weight = HUGE_VALF;
1612 // Rewrite register operands to use the new vreg.
1613 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1614 E = Indices.end(); I != E; ++I) {
1615 MI->getOperand(*I).setReg(NewVReg);
1617 if (MI->getOperand(*I).isUse())
1618 MI->getOperand(*I).setIsKill(true);
1621 // Fill in the new live interval.
1622 SlotIndex index = getInstructionIndex(MI);
1624 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1625 nI.getNextValue(SlotIndex(), 0, false,
1626 getVNInfoAllocator()));
1627 DEBUG(dbgs() << " +" << LR);
1629 vrm.addRestorePoint(NewVReg, MI);
1632 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1633 nI.getNextValue(SlotIndex(), 0, false,
1634 getVNInfoAllocator()));
1635 DEBUG(dbgs() << " +" << LR);
1637 vrm.addSpillPoint(NewVReg, true, MI);
1640 added.push_back(&nI);
1643 dbgs() << "\t\t\t\tadded new interval: ";
1650 RI = mri_->reg_begin(li.reg);
1656 std::vector<LiveInterval*> LiveIntervals::
1657 addIntervalsForSpills(const LiveInterval &li,
1658 SmallVectorImpl<LiveInterval*> &SpillIs,
1659 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1661 if (EnableFastSpilling)
1662 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1664 assert(li.weight != HUGE_VALF &&
1665 "attempt to spill already spilled interval!");
1668 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1669 li.print(dbgs(), tri_);
1673 // Each bit specify whether a spill is required in the MBB.
1674 BitVector SpillMBBs(mf_->getNumBlockIDs());
1675 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1676 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1677 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1678 DenseMap<unsigned,unsigned> MBBVRegsMap;
1679 std::vector<LiveInterval*> NewLIs;
1680 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1682 unsigned NumValNums = li.getNumValNums();
1683 SmallVector<MachineInstr*, 4> ReMatDefs;
1684 ReMatDefs.resize(NumValNums, NULL);
1685 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1686 ReMatOrigDefs.resize(NumValNums, NULL);
1687 SmallVector<int, 4> ReMatIds;
1688 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1689 BitVector ReMatDelete(NumValNums);
1690 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1692 // Spilling a split live interval. It cannot be split any further. Also,
1693 // it's also guaranteed to be a single val# / range interval.
1694 if (vrm.getPreSplitReg(li.reg)) {
1695 vrm.setIsSplitFromReg(li.reg, 0);
1696 // Unset the split kill marker on the last use.
1697 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1698 if (KillIdx != SlotIndex()) {
1699 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1700 assert(KillMI && "Last use disappeared?");
1701 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1702 assert(KillOp != -1 && "Last use disappeared?");
1703 KillMI->getOperand(KillOp).setIsKill(false);
1705 vrm.removeKillPoint(li.reg);
1706 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1707 Slot = vrm.getStackSlot(li.reg);
1708 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1709 MachineInstr *ReMatDefMI = DefIsReMat ?
1710 vrm.getReMaterializedMI(li.reg) : NULL;
1712 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1713 bool isLoad = isLoadSS ||
1714 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1715 bool IsFirstRange = true;
1716 for (LiveInterval::Ranges::const_iterator
1717 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1718 // If this is a split live interval with multiple ranges, it means there
1719 // are two-address instructions that re-defined the value. Only the
1720 // first def can be rematerialized!
1722 // Note ReMatOrigDefMI has already been deleted.
1723 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1724 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1725 false, vrm, rc, ReMatIds, loopInfo,
1726 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1727 MBBVRegsMap, NewLIs);
1729 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1730 Slot, 0, false, false, false,
1731 false, vrm, rc, ReMatIds, loopInfo,
1732 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1733 MBBVRegsMap, NewLIs);
1735 IsFirstRange = false;
1738 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1742 bool TrySplit = !intervalIsInOneMBB(li);
1745 bool NeedStackSlot = false;
1746 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1748 const VNInfo *VNI = *i;
1749 unsigned VN = VNI->id;
1750 if (VNI->isUnused())
1751 continue; // Dead val#.
1752 // Is the def for the val# rematerializable?
1753 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1754 ? getInstructionFromIndex(VNI->def) : 0;
1756 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1757 // Remember how to remat the def of this val#.
1758 ReMatOrigDefs[VN] = ReMatDefMI;
1759 // Original def may be modified so we have to make a copy here.
1760 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1761 CloneMIs.push_back(Clone);
1762 ReMatDefs[VN] = Clone;
1764 bool CanDelete = true;
1765 if (VNI->hasPHIKill()) {
1766 // A kill is a phi node, not all of its uses can be rematerialized.
1767 // It must not be deleted.
1769 // Need a stack slot if there is any live range where uses cannot be
1771 NeedStackSlot = true;
1774 ReMatDelete.set(VN);
1776 // Need a stack slot if there is any live range where uses cannot be
1778 NeedStackSlot = true;
1782 // One stack slot per live interval.
1783 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1784 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1785 Slot = vrm.assignVirt2StackSlot(li.reg);
1787 // This case only occurs when the prealloc splitter has already assigned
1788 // a stack slot to this vreg.
1790 Slot = vrm.getStackSlot(li.reg);
1793 // Create new intervals and rewrite defs and uses.
1794 for (LiveInterval::Ranges::const_iterator
1795 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1796 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1797 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1798 bool DefIsReMat = ReMatDefMI != NULL;
1799 bool CanDelete = ReMatDelete[I->valno->id];
1801 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1802 bool isLoad = isLoadSS ||
1803 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1804 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1805 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1806 CanDelete, vrm, rc, ReMatIds, loopInfo,
1807 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1808 MBBVRegsMap, NewLIs);
1811 // Insert spills / restores if we are splitting.
1813 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1817 SmallPtrSet<LiveInterval*, 4> AddedKill;
1818 SmallVector<unsigned, 2> Ops;
1819 if (NeedStackSlot) {
1820 int Id = SpillMBBs.find_first();
1822 std::vector<SRInfo> &spills = SpillIdxes[Id];
1823 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1824 SlotIndex index = spills[i].index;
1825 unsigned VReg = spills[i].vreg;
1826 LiveInterval &nI = getOrCreateInterval(VReg);
1827 bool isReMat = vrm.isReMaterialized(VReg);
1828 MachineInstr *MI = getInstructionFromIndex(index);
1829 bool CanFold = false;
1830 bool FoundUse = false;
1832 if (spills[i].canFold) {
1834 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1835 MachineOperand &MO = MI->getOperand(j);
1836 if (!MO.isReg() || MO.getReg() != VReg)
1843 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1844 RestoreMBBs, RestoreIdxes))) {
1845 // MI has two-address uses of the same register. If the use
1846 // isn't the first and only use in the BB, then we can't fold
1847 // it. FIXME: Move this to rewriteInstructionsForSpills.
1854 // Fold the store into the def if possible.
1855 bool Folded = false;
1856 if (CanFold && !Ops.empty()) {
1857 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1860 // Also folded uses, do not issue a load.
1861 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1862 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1864 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1868 // Otherwise tell the spiller to issue a spill.
1870 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1871 bool isKill = LR->end == index.getStoreIndex();
1872 if (!MI->registerDefIsDead(nI.reg))
1873 // No need to spill a dead def.
1874 vrm.addSpillPoint(VReg, isKill, MI);
1876 AddedKill.insert(&nI);
1879 Id = SpillMBBs.find_next(Id);
1883 int Id = RestoreMBBs.find_first();
1885 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1886 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1887 SlotIndex index = restores[i].index;
1888 if (index == SlotIndex())
1890 unsigned VReg = restores[i].vreg;
1891 LiveInterval &nI = getOrCreateInterval(VReg);
1892 bool isReMat = vrm.isReMaterialized(VReg);
1893 MachineInstr *MI = getInstructionFromIndex(index);
1894 bool CanFold = false;
1896 if (restores[i].canFold) {
1898 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1899 MachineOperand &MO = MI->getOperand(j);
1900 if (!MO.isReg() || MO.getReg() != VReg)
1904 // If this restore were to be folded, it would have been folded
1913 // Fold the load into the use if possible.
1914 bool Folded = false;
1915 if (CanFold && !Ops.empty()) {
1917 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1919 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1921 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1922 // If the rematerializable def is a load, also try to fold it.
1923 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1924 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1925 Ops, isLoadSS, LdSlot, VReg);
1927 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1929 // Re-matting an instruction with virtual register use. Add the
1930 // register as an implicit use on the use MI and update the register
1931 // interval's spill weight to HUGE_VALF to prevent it from being
1933 LiveInterval &ImpLi = getInterval(ImpUse);
1934 ImpLi.weight = HUGE_VALF;
1935 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1940 // If folding is not possible / failed, then tell the spiller to issue a
1941 // load / rematerialization for us.
1943 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1945 vrm.addRestorePoint(VReg, MI);
1947 Id = RestoreMBBs.find_next(Id);
1950 // Finalize intervals: add kills, finalize spill weights, and filter out
1952 std::vector<LiveInterval*> RetNewLIs;
1953 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1954 LiveInterval *LI = NewLIs[i];
1956 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1957 if (!AddedKill.count(LI)) {
1958 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1959 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1960 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1961 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1962 assert(UseIdx != -1);
1963 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1964 LastUse->getOperand(UseIdx).setIsKill();
1965 vrm.addKillPoint(LI->reg, LastUseIdx);
1968 RetNewLIs.push_back(LI);
1972 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1976 /// hasAllocatableSuperReg - Return true if the specified physical register has
1977 /// any super register that's allocatable.
1978 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1979 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1980 if (allocatableRegs_[*AS] && hasInterval(*AS))
1985 /// getRepresentativeReg - Find the largest super register of the specified
1986 /// physical register.
1987 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1988 // Find the largest super-register that is allocatable.
1989 unsigned BestReg = Reg;
1990 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1991 unsigned SuperReg = *AS;
1992 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2000 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2001 /// specified interval that conflicts with the specified physical register.
2002 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2003 unsigned PhysReg) const {
2004 unsigned NumConflicts = 0;
2005 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2006 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2007 E = mri_->reg_end(); I != E; ++I) {
2008 MachineOperand &O = I.getOperand();
2009 MachineInstr *MI = O.getParent();
2010 SlotIndex Index = getInstructionIndex(MI);
2011 if (pli.liveAt(Index))
2014 return NumConflicts;
2017 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2018 /// around all defs and uses of the specified interval. Return true if it
2019 /// was able to cut its interval.
2020 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2021 unsigned PhysReg, VirtRegMap &vrm) {
2022 unsigned SpillReg = getRepresentativeReg(PhysReg);
2024 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2025 // If there are registers which alias PhysReg, but which are not a
2026 // sub-register of the chosen representative super register. Assert
2027 // since we can't handle it yet.
2028 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2029 tri_->isSuperRegister(*AS, SpillReg));
2032 SmallVector<unsigned, 4> PRegs;
2033 if (hasInterval(SpillReg))
2034 PRegs.push_back(SpillReg);
2036 SmallSet<unsigned, 4> Added;
2037 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2038 if (Added.insert(*AS) && hasInterval(*AS)) {
2039 PRegs.push_back(*AS);
2040 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2045 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2046 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2047 E = mri_->reg_end(); I != E; ++I) {
2048 MachineOperand &O = I.getOperand();
2049 MachineInstr *MI = O.getParent();
2050 if (SeenMIs.count(MI))
2053 SlotIndex Index = getInstructionIndex(MI);
2054 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2055 unsigned PReg = PRegs[i];
2056 LiveInterval &pli = getInterval(PReg);
2057 if (!pli.liveAt(Index))
2059 vrm.addEmergencySpill(PReg, MI);
2060 SlotIndex StartIdx = Index.getLoadIndex();
2061 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2062 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2063 pli.removeRange(StartIdx, EndIdx);
2067 raw_string_ostream Msg(msg);
2068 Msg << "Ran out of registers during register allocation!";
2069 if (MI->isInlineAsm()) {
2070 Msg << "\nPlease check your inline asm statement for invalid "
2071 << "constraints:\n";
2072 MI->print(Msg, tm_);
2074 llvm_report_error(Msg.str());
2076 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2077 if (!hasInterval(*AS))
2079 LiveInterval &spli = getInterval(*AS);
2080 if (spli.liveAt(Index))
2081 spli.removeRange(Index.getLoadIndex(),
2082 Index.getNextIndex().getBaseIndex());
2089 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2090 MachineInstr* startInst) {
2091 LiveInterval& Interval = getOrCreateInterval(reg);
2092 VNInfo* VN = Interval.getNextValue(
2093 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2094 startInst, true, getVNInfoAllocator());
2095 VN->setHasPHIKill(true);
2096 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2098 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2099 getMBBEndIdx(startInst->getParent()), VN);
2100 Interval.addRange(LR);