1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.Reset();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
117 numIntervals += getNumIntervals();
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 if (mii->isDebugValue())
144 OS << SlotIndex::getEmptyKey() << '\t' << *mii;
146 OS << getInstructionIndex(mii) << '\t' << *mii;
151 void LiveIntervals::dumpInstrs() const {
155 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156 VirtRegMap &vrm, unsigned reg) {
157 // We don't handle fancy stuff crossing basic block boundaries
158 if (li.ranges.size() != 1)
160 const LiveRange &range = li.ranges.front();
161 SlotIndex idx = range.start.getBaseIndex();
162 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
164 // Skip deleted instructions
165 MachineInstr *firstMI = getInstructionFromIndex(idx);
166 while (!firstMI && idx != end) {
167 idx = idx.getNextIndex();
168 firstMI = getInstructionFromIndex(idx);
173 // Find last instruction in range
174 SlotIndex lastIdx = end.getPrevIndex();
175 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176 while (!lastMI && lastIdx != idx) {
177 lastIdx = lastIdx.getPrevIndex();
178 lastMI = getInstructionFromIndex(lastIdx);
183 // Range cannot cross basic block boundaries or terminators
184 MachineBasicBlock *MBB = firstMI->getParent();
185 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
188 MachineBasicBlock::const_iterator E = lastMI;
190 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191 const MachineInstr &MI = *I;
193 // Allow copies to and from li.reg
194 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196 if (SrcReg == li.reg || DstReg == li.reg)
199 // Check for operands using reg
200 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
201 const MachineOperand& mop = MI.getOperand(i);
204 unsigned PhysReg = mop.getReg();
205 if (PhysReg == 0 || PhysReg == li.reg)
207 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208 if (!vrm.hasPhys(PhysReg))
210 PhysReg = vrm.getPhys(PhysReg);
212 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
217 // No conflicts found.
221 /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
222 /// it can check use as well.
223 bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
224 unsigned Reg, bool CheckUse,
225 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
226 for (LiveInterval::Ranges::const_iterator
227 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
228 for (SlotIndex index = I->start.getBaseIndex(),
229 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
231 index = index.getNextIndex()) {
232 MachineInstr *MI = getInstructionFromIndex(index);
234 continue; // skip deleted instructions
236 if (JoinedCopies.count(MI))
238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239 MachineOperand& MO = MI->getOperand(i);
242 if (MO.isUse() && !CheckUse)
244 unsigned PhysReg = MO.getReg();
245 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
247 if (tri_->isSubRegister(Reg, PhysReg))
257 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
258 if (TargetRegisterInfo::isPhysicalRegister(reg))
259 dbgs() << tri_->getName(reg);
261 dbgs() << "%reg" << reg;
265 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
266 MachineBasicBlock::iterator mi,
270 LiveInterval &interval) {
272 dbgs() << "\t\tregister: ";
273 printRegName(interval.reg, tri_);
276 // Virtual registers may be defined multiple times (due to phi
277 // elimination and 2-addr elimination). Much of what we do only has to be
278 // done once for the vreg. We use an empty interval to detect the first
279 // time we see a vreg.
280 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
281 if (interval.empty()) {
282 // Get the Idx of the defining instructions.
283 SlotIndex defIndex = MIIdx.getDefIndex();
284 // Earlyclobbers move back one, so that they overlap the live range
286 if (MO.isEarlyClobber())
287 defIndex = MIIdx.getUseIndex();
289 MachineInstr *CopyMI = NULL;
290 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
291 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
292 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
294 // Earlyclobbers move back one.
295 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
297 assert(ValNo->id == 0 && "First value in interval is not 0?");
299 // Loop over all of the blocks that the vreg is defined in. There are
300 // two cases we have to handle here. The most common case is a vreg
301 // whose lifetime is contained within a basic block. In this case there
302 // will be a single kill, in MBB, which comes after the definition.
303 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
304 // FIXME: what about dead vars?
306 if (vi.Kills[0] != mi)
307 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
309 killIdx = defIndex.getStoreIndex();
311 // If the kill happens after the definition, we have an intra-block
313 if (killIdx > defIndex) {
314 assert(vi.AliveBlocks.empty() &&
315 "Shouldn't be alive across any blocks!");
316 LiveRange LR(defIndex, killIdx, ValNo);
317 interval.addRange(LR);
318 DEBUG(dbgs() << " +" << LR << "\n");
319 ValNo->addKill(killIdx);
324 // The other case we handle is when a virtual register lives to the end
325 // of the defining block, potentially live across some blocks, then is
326 // live into some number of blocks, but gets killed. Start by adding a
327 // range that goes from this definition to the end of the defining block.
328 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
329 DEBUG(dbgs() << " +" << NewLR);
330 interval.addRange(NewLR);
332 // Iterate over all of the blocks that the variable is completely
333 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
335 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
336 E = vi.AliveBlocks.end(); I != E; ++I) {
337 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
338 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
339 interval.addRange(LR);
340 DEBUG(dbgs() << " +" << LR);
343 // Finally, this virtual register is live from the start of any killing
344 // block to the 'use' slot of the killing instruction.
345 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
346 MachineInstr *Kill = vi.Kills[i];
348 getInstructionIndex(Kill).getDefIndex();
349 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
350 interval.addRange(LR);
351 ValNo->addKill(killIdx);
352 DEBUG(dbgs() << " +" << LR);
356 // If this is the second time we see a virtual register definition, it
357 // must be due to phi elimination or two addr elimination. If this is
358 // the result of two address elimination, then the vreg is one of the
359 // def-and-use register operand.
360 if (mi->isRegTiedToUseOperand(MOIdx)) {
361 // If this is a two-address definition, then we have already processed
362 // the live range. The only problem is that we didn't realize there
363 // are actually two values in the live interval. Because of this we
364 // need to take the LiveRegion that defines this register and split it
366 assert(interval.containsOneValue());
367 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
368 SlotIndex RedefIndex = MIIdx.getDefIndex();
369 if (MO.isEarlyClobber())
370 RedefIndex = MIIdx.getUseIndex();
372 const LiveRange *OldLR =
373 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
374 VNInfo *OldValNo = OldLR->valno;
376 // Delete the initial value, which should be short and continuous,
377 // because the 2-addr copy must be in the same MBB as the redef.
378 interval.removeRange(DefIndex, RedefIndex);
380 // Two-address vregs should always only be redefined once. This means
381 // that at this point, there should be exactly one value number in it.
382 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
384 // The new value number (#1) is defined by the instruction we claimed
386 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
387 false, // update at *
389 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
391 // Value#0 is now defined by the 2-addr instruction.
392 OldValNo->def = RedefIndex;
393 OldValNo->setCopy(0);
395 // Add the new live interval which replaces the range for the input copy.
396 LiveRange LR(DefIndex, RedefIndex, ValNo);
397 DEBUG(dbgs() << " replace range with " << LR);
398 interval.addRange(LR);
399 ValNo->addKill(RedefIndex);
401 // If this redefinition is dead, we need to add a dummy unit live
402 // range covering the def slot.
404 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
408 dbgs() << " RESULT: ";
409 interval.print(dbgs(), tri_);
412 // Otherwise, this must be because of phi elimination. If this is the
413 // first redefinition of the vreg that we have seen, go back and change
414 // the live range in the PHI block to be a different value number.
415 if (interval.containsOneValue()) {
417 VNInfo *VNI = interval.getValNumInfo(0);
418 // Phi elimination may have reused the register for multiple identical
419 // phi nodes. There will be a kill per phi. Remove the old ranges that
420 // we now know have an incorrect number.
421 for (unsigned ki=0, ke=vi.Kills.size(); ki != ke; ++ki) {
422 MachineInstr *Killer = vi.Kills[ki];
423 SlotIndex Start = getMBBStartIdx(Killer->getParent());
424 SlotIndex End = getInstructionIndex(Killer).getDefIndex();
426 dbgs() << "\n\t\trenaming [" << Start << "," << End << "] in: ";
427 interval.print(dbgs(), tri_);
429 interval.removeRange(Start, End);
431 // Replace the interval with one of a NEW value number. Note that
432 // this value number isn't actually defined by an instruction, weird
434 LiveRange LR(Start, End,
435 interval.getNextValue(SlotIndex(Start, true),
436 0, false, VNInfoAllocator));
437 LR.valno->setIsPHIDef(true);
438 interval.addRange(LR);
439 LR.valno->addKill(End);
442 MachineBasicBlock *killMBB = getMBBFromIndex(VNI->def);
443 VNI->addKill(indexes_->getTerminatorGap(killMBB));
444 VNI->setHasPHIKill(true);
446 dbgs() << " RESULT: ";
447 interval.print(dbgs(), tri_);
451 // In the case of PHI elimination, each variable definition is only
452 // live until the end of the block. We've already taken care of the
453 // rest of the live range.
454 SlotIndex defIndex = MIIdx.getDefIndex();
455 if (MO.isEarlyClobber())
456 defIndex = MIIdx.getUseIndex();
459 MachineInstr *CopyMI = NULL;
460 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
461 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
462 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
464 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
466 SlotIndex killIndex = getMBBEndIdx(mbb);
467 LiveRange LR(defIndex, killIndex, ValNo);
468 interval.addRange(LR);
469 ValNo->addKill(indexes_->getTerminatorGap(mbb));
470 ValNo->setHasPHIKill(true);
471 DEBUG(dbgs() << " +" << LR);
475 DEBUG(dbgs() << '\n');
478 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
479 MachineBasicBlock::iterator mi,
482 LiveInterval &interval,
483 MachineInstr *CopyMI) {
484 // A physical register cannot be live across basic block, so its
485 // lifetime must end somewhere in its defining basic block.
487 dbgs() << "\t\tregister: ";
488 printRegName(interval.reg, tri_);
491 SlotIndex baseIndex = MIIdx;
492 SlotIndex start = baseIndex.getDefIndex();
493 // Earlyclobbers move back one.
494 if (MO.isEarlyClobber())
495 start = MIIdx.getUseIndex();
496 SlotIndex end = start;
498 // If it is not used after definition, it is considered dead at
499 // the instruction defining it. Hence its interval is:
500 // [defSlot(def), defSlot(def)+1)
501 // For earlyclobbers, the defSlot was pushed back one; the extra
502 // advance below compensates.
504 DEBUG(dbgs() << " dead");
505 end = start.getStoreIndex();
509 // If it is not dead on definition, it must be killed by a
510 // subsequent instruction. Hence its interval is:
511 // [defSlot(def), useSlot(kill)+1)
512 baseIndex = baseIndex.getNextIndex();
513 while (++mi != MBB->end()) {
515 if (getInstructionFromIndex(baseIndex) == 0)
516 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
518 if (mi->killsRegister(interval.reg, tri_)) {
519 DEBUG(dbgs() << " killed");
520 end = baseIndex.getDefIndex();
523 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
525 if (mi->isRegTiedToUseOperand(DefIdx)) {
526 // Two-address instruction.
527 end = baseIndex.getDefIndex();
529 // Another instruction redefines the register before it is ever read.
530 // Then the register is essentially dead at the instruction that defines
531 // it. Hence its interval is:
532 // [defSlot(def), defSlot(def)+1)
533 DEBUG(dbgs() << " dead");
534 end = start.getStoreIndex();
540 baseIndex = baseIndex.getNextIndex();
543 // The only case we should have a dead physreg here without a killing or
544 // instruction where we know it's dead is if it is live-in to the function
545 // and never used. Another possible case is the implicit use of the
546 // physical register has been deleted by two-address pass.
547 end = start.getStoreIndex();
550 assert(start < end && "did not find end of interval?");
552 // Already exists? Extend old live interval.
553 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
554 bool Extend = OldLR != interval.end();
555 VNInfo *ValNo = Extend
556 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
557 if (MO.isEarlyClobber() && Extend)
558 ValNo->setHasRedefByEC(true);
559 LiveRange LR(start, end, ValNo);
560 interval.addRange(LR);
561 LR.valno->addKill(end);
562 DEBUG(dbgs() << " +" << LR << '\n');
565 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
566 MachineBasicBlock::iterator MI,
570 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
571 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
572 getOrCreateInterval(MO.getReg()));
573 else if (allocatableRegs_[MO.getReg()]) {
574 MachineInstr *CopyMI = NULL;
575 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
576 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
577 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
579 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
580 getOrCreateInterval(MO.getReg()), CopyMI);
581 // Def of a register also defines its sub-registers.
582 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
583 // If MI also modifies the sub-register explicitly, avoid processing it
584 // more than once. Do not pass in TRI here so it checks for exact match.
585 if (!MI->modifiesRegister(*AS))
586 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
587 getOrCreateInterval(*AS), 0);
591 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
593 LiveInterval &interval, bool isAlias) {
595 dbgs() << "\t\tlivein register: ";
596 printRegName(interval.reg, tri_);
599 // Look for kills, if it reaches a def before it's killed, then it shouldn't
600 // be considered a livein.
601 MachineBasicBlock::iterator mi = MBB->begin();
602 SlotIndex baseIndex = MIIdx;
603 SlotIndex start = baseIndex;
604 if (getInstructionFromIndex(baseIndex) == 0)
605 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
607 SlotIndex end = baseIndex;
608 bool SeenDefUse = false;
610 while (mi != MBB->end()) {
611 if (mi->killsRegister(interval.reg, tri_)) {
612 DEBUG(dbgs() << " killed");
613 end = baseIndex.getDefIndex();
616 } else if (mi->modifiesRegister(interval.reg, tri_)) {
617 // Another instruction redefines the register before it is ever read.
618 // Then the register is essentially dead at the instruction that defines
619 // it. Hence its interval is:
620 // [defSlot(def), defSlot(def)+1)
621 DEBUG(dbgs() << " dead");
622 end = start.getStoreIndex();
628 if (mi != MBB->end()) {
629 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
633 // Live-in register might not be used at all.
636 DEBUG(dbgs() << " dead");
637 end = MIIdx.getStoreIndex();
639 DEBUG(dbgs() << " live through");
645 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
646 0, false, VNInfoAllocator);
647 vni->setIsPHIDef(true);
648 LiveRange LR(start, end, vni);
650 interval.addRange(LR);
651 LR.valno->addKill(end);
652 DEBUG(dbgs() << " +" << LR << '\n');
655 /// computeIntervals - computes the live intervals for virtual
656 /// registers. for some ordering of the machine instructions [1,N] a
657 /// live interval is an interval [i, j) where 1 <= i <= j < N for
658 /// which a variable is live
659 void LiveIntervals::computeIntervals() {
660 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
661 << "********** Function: "
662 << ((Value*)mf_->getFunction())->getName() << '\n');
664 SmallVector<unsigned, 8> UndefUses;
665 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
667 MachineBasicBlock *MBB = MBBI;
671 // Track the index of the current machine instr.
672 SlotIndex MIIndex = getMBBStartIdx(MBB);
673 DEBUG(dbgs() << MBB->getName() << ":\n");
675 // Create intervals for live-ins to this BB first.
676 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
677 LE = MBB->livein_end(); LI != LE; ++LI) {
678 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
679 // Multiple live-ins can alias the same register.
680 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
681 if (!hasInterval(*AS))
682 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
686 // Skip over empty initial indices.
687 if (getInstructionFromIndex(MIIndex) == 0)
688 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
690 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
692 DEBUG(dbgs() << MIIndex << "\t" << *MI);
693 if (MI->isDebugValue())
697 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
698 MachineOperand &MO = MI->getOperand(i);
699 if (!MO.isReg() || !MO.getReg())
702 // handle register defs - build intervals
704 handleRegisterDef(MBB, MI, MIIndex, MO, i);
705 else if (MO.isUndef())
706 UndefUses.push_back(MO.getReg());
709 // Move to the next instr slot.
710 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
714 // Create empty intervals for registers defined by implicit_def's (except
715 // for those implicit_def that define values which are liveout of their
717 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
718 unsigned UndefReg = UndefUses[i];
719 (void)getOrCreateInterval(UndefReg);
723 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
724 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
725 return new LiveInterval(reg, Weight);
728 /// dupInterval - Duplicate a live interval. The caller is responsible for
729 /// managing the allocated memory.
730 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
731 LiveInterval *NewLI = createInterval(li->reg);
732 NewLI->Copy(*li, mri_, getVNInfoAllocator());
736 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
737 /// copy field and returns the source register that defines it.
738 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
742 if (VNI->getCopy()->isExtractSubreg()) {
743 // If it's extracting out of a physical register, return the sub-register.
744 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
745 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
746 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
747 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
748 if (SrcSubReg == DstSubReg)
749 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
750 // reg1034 can still be coalesced to EDX.
752 assert(DstSubReg == 0);
753 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
756 } else if (VNI->getCopy()->isInsertSubreg() ||
757 VNI->getCopy()->isSubregToReg())
758 return VNI->getCopy()->getOperand(2).getReg();
760 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
761 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
763 llvm_unreachable("Unrecognized copy instruction!");
767 //===----------------------------------------------------------------------===//
768 // Register allocator hooks.
771 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
772 /// allow one) virtual register operand, then its uses are implicitly using
773 /// the register. Returns the virtual register.
774 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
775 MachineInstr *MI) const {
777 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
778 MachineOperand &MO = MI->getOperand(i);
779 if (!MO.isReg() || !MO.isUse())
781 unsigned Reg = MO.getReg();
782 if (Reg == 0 || Reg == li.reg)
785 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
786 !allocatableRegs_[Reg])
788 // FIXME: For now, only remat MI with at most one register operand.
790 "Can't rematerialize instruction with multiple register operand!");
799 /// isValNoAvailableAt - Return true if the val# of the specified interval
800 /// which reaches the given instruction also reaches the specified use index.
801 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
802 SlotIndex UseIdx) const {
803 SlotIndex Index = getInstructionIndex(MI);
804 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
805 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
806 return UI != li.end() && UI->valno == ValNo;
809 /// isReMaterializable - Returns true if the definition MI of the specified
810 /// val# of the specified interval is re-materializable.
811 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
812 const VNInfo *ValNo, MachineInstr *MI,
813 SmallVectorImpl<LiveInterval*> &SpillIs,
818 if (!tii_->isTriviallyReMaterializable(MI, aa_))
821 // Target-specific code can mark an instruction as being rematerializable
822 // if it has one virtual reg use, though it had better be something like
823 // a PIC base register which is likely to be live everywhere.
824 unsigned ImpUse = getReMatImplicitUse(li, MI);
826 const LiveInterval &ImpLi = getInterval(ImpUse);
827 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
828 re = mri_->use_end(); ri != re; ++ri) {
829 MachineInstr *UseMI = &*ri;
830 SlotIndex UseIdx = getInstructionIndex(UseMI);
831 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
833 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
837 // If a register operand of the re-materialized instruction is going to
838 // be spilled next, then it's not legal to re-materialize this instruction.
839 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
840 if (ImpUse == SpillIs[i]->reg)
846 /// isReMaterializable - Returns true if the definition MI of the specified
847 /// val# of the specified interval is re-materializable.
848 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
849 const VNInfo *ValNo, MachineInstr *MI) {
850 SmallVector<LiveInterval*, 4> Dummy1;
852 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
855 /// isReMaterializable - Returns true if every definition of MI of every
856 /// val# of the specified interval is re-materializable.
857 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
858 SmallVectorImpl<LiveInterval*> &SpillIs,
861 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
863 const VNInfo *VNI = *i;
865 continue; // Dead val#.
866 // Is the def for the val# rematerializable?
867 if (!VNI->isDefAccurate())
869 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
870 bool DefIsLoad = false;
872 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
879 /// FilterFoldedOps - Filter out two-address use operands. Return
880 /// true if it finds any issue with the operands that ought to prevent
882 static bool FilterFoldedOps(MachineInstr *MI,
883 SmallVector<unsigned, 2> &Ops,
885 SmallVector<unsigned, 2> &FoldOps) {
887 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
888 unsigned OpIdx = Ops[i];
889 MachineOperand &MO = MI->getOperand(OpIdx);
890 // FIXME: fold subreg use.
894 MRInfo |= (unsigned)VirtRegMap::isMod;
896 // Filter out two-address use operand(s).
897 if (MI->isRegTiedToDefOperand(OpIdx)) {
898 MRInfo = VirtRegMap::isModRef;
901 MRInfo |= (unsigned)VirtRegMap::isRef;
903 FoldOps.push_back(OpIdx);
909 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
910 /// slot / to reg or any rematerialized load into ith operand of specified
911 /// MI. If it is successul, MI is updated with the newly created MI and
913 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
914 VirtRegMap &vrm, MachineInstr *DefMI,
916 SmallVector<unsigned, 2> &Ops,
917 bool isSS, int Slot, unsigned Reg) {
918 // If it is an implicit def instruction, just delete it.
919 if (MI->isImplicitDef()) {
920 RemoveMachineInstrFromMaps(MI);
921 vrm.RemoveMachineInstrFromMaps(MI);
922 MI->eraseFromParent();
927 // Filter the list of operand indexes that are to be folded. Abort if
928 // any operand will prevent folding.
930 SmallVector<unsigned, 2> FoldOps;
931 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
934 // The only time it's safe to fold into a two address instruction is when
935 // it's folding reload and spill from / into a spill stack slot.
936 if (DefMI && (MRInfo & VirtRegMap::isMod))
939 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
940 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
942 // Remember this instruction uses the spill slot.
943 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
945 // Attempt to fold the memory reference into the instruction. If
946 // we can do this, we don't need to insert spill code.
947 MachineBasicBlock &MBB = *MI->getParent();
948 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
949 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
950 vrm.transferSpillPts(MI, fmi);
951 vrm.transferRestorePts(MI, fmi);
952 vrm.transferEmergencySpills(MI, fmi);
953 ReplaceMachineInstrInMaps(MI, fmi);
954 MI = MBB.insert(MBB.erase(MI), fmi);
961 /// canFoldMemoryOperand - Returns true if the specified load / store
962 /// folding is possible.
963 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
964 SmallVector<unsigned, 2> &Ops,
966 // Filter the list of operand indexes that are to be folded. Abort if
967 // any operand will prevent folding.
969 SmallVector<unsigned, 2> FoldOps;
970 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
973 // It's only legal to remat for a use, not a def.
974 if (ReMat && (MRInfo & VirtRegMap::isMod))
977 return tii_->canFoldMemoryOperand(MI, FoldOps);
980 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
981 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
983 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
988 for (++itr; itr != li.ranges.end(); ++itr) {
989 MachineBasicBlock *mbb2 =
990 indexes_->getMBBCoveringRange(itr->start, itr->end);
999 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1000 /// interval on to-be re-materialized operands of MI) with new register.
1001 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1002 MachineInstr *MI, unsigned NewVReg,
1004 // There is an implicit use. That means one of the other operand is
1005 // being remat'ed and the remat'ed instruction has li.reg as an
1006 // use operand. Make sure we rewrite that as well.
1007 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1008 MachineOperand &MO = MI->getOperand(i);
1011 unsigned Reg = MO.getReg();
1012 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1014 if (!vrm.isReMaterialized(Reg))
1016 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1017 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1019 UseMO->setReg(NewVReg);
1023 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1024 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1025 bool LiveIntervals::
1026 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1027 bool TrySplit, SlotIndex index, SlotIndex end,
1029 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1030 unsigned Slot, int LdSlot,
1031 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1033 const TargetRegisterClass* rc,
1034 SmallVector<int, 4> &ReMatIds,
1035 const MachineLoopInfo *loopInfo,
1036 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1037 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1038 std::vector<LiveInterval*> &NewLIs) {
1039 bool CanFold = false;
1041 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1042 MachineOperand& mop = MI->getOperand(i);
1045 unsigned Reg = mop.getReg();
1046 unsigned RegI = Reg;
1047 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1052 bool TryFold = !DefIsReMat;
1053 bool FoldSS = true; // Default behavior unless it's a remat.
1054 int FoldSlot = Slot;
1056 // If this is the rematerializable definition MI itself and
1057 // all of its uses are rematerialized, simply delete it.
1058 if (MI == ReMatOrigDefMI && CanDelete) {
1059 DEBUG(dbgs() << "\t\t\t\tErasing re-materlizable def: "
1061 RemoveMachineInstrFromMaps(MI);
1062 vrm.RemoveMachineInstrFromMaps(MI);
1063 MI->eraseFromParent();
1067 // If def for this use can't be rematerialized, then try folding.
1068 // If def is rematerializable and it's a load, also try folding.
1069 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1071 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1077 // Scan all of the operands of this instruction rewriting operands
1078 // to use NewVReg instead of li.reg as appropriate. We do this for
1081 // 1. If the instr reads the same spilled vreg multiple times, we
1082 // want to reuse the NewVReg.
1083 // 2. If the instr is a two-addr instruction, we are required to
1084 // keep the src/dst regs pinned.
1086 // Keep track of whether we replace a use and/or def so that we can
1087 // create the spill interval with the appropriate range.
1089 HasUse = mop.isUse();
1090 HasDef = mop.isDef();
1091 SmallVector<unsigned, 2> Ops;
1093 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1094 const MachineOperand &MOj = MI->getOperand(j);
1097 unsigned RegJ = MOj.getReg();
1098 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1102 if (!MOj.isUndef()) {
1103 HasUse |= MOj.isUse();
1104 HasDef |= MOj.isDef();
1109 // Create a new virtual register for the spill interval.
1110 // Create the new register now so we can map the fold instruction
1111 // to the new register so when it is unfolded we get the correct
1113 bool CreatedNewVReg = false;
1115 NewVReg = mri_->createVirtualRegister(rc);
1117 CreatedNewVReg = true;
1119 // The new virtual register should get the same allocation hints as the
1121 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1122 if (Hint.first || Hint.second)
1123 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1129 // Do not fold load / store here if we are splitting. We'll find an
1130 // optimal point to insert a load / store later.
1132 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1133 Ops, FoldSS, FoldSlot, NewVReg)) {
1134 // Folding the load/store can completely change the instruction in
1135 // unpredictable ways, rescan it from the beginning.
1138 // We need to give the new vreg the same stack slot as the
1139 // spilled interval.
1140 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1146 if (isNotInMIMap(MI))
1148 goto RestartInstruction;
1151 // We'll try to fold it later if it's profitable.
1152 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1156 mop.setReg(NewVReg);
1157 if (mop.isImplicit())
1158 rewriteImplicitOps(li, MI, NewVReg, vrm);
1160 // Reuse NewVReg for other reads.
1161 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1162 MachineOperand &mopj = MI->getOperand(Ops[j]);
1163 mopj.setReg(NewVReg);
1164 if (mopj.isImplicit())
1165 rewriteImplicitOps(li, MI, NewVReg, vrm);
1168 if (CreatedNewVReg) {
1170 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1171 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1172 // Each valnum may have its own remat id.
1173 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1175 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1177 if (!CanDelete || (HasUse && HasDef)) {
1178 // If this is a two-addr instruction then its use operands are
1179 // rematerializable but its def is not. It should be assigned a
1181 vrm.assignVirt2StackSlot(NewVReg, Slot);
1184 vrm.assignVirt2StackSlot(NewVReg, Slot);
1186 } else if (HasUse && HasDef &&
1187 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1188 // If this interval hasn't been assigned a stack slot (because earlier
1189 // def is a deleted remat def), do it now.
1190 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1191 vrm.assignVirt2StackSlot(NewVReg, Slot);
1194 // Re-matting an instruction with virtual register use. Add the
1195 // register as an implicit use on the use MI.
1196 if (DefIsReMat && ImpUse)
1197 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1199 // Create a new register interval for this spill / remat.
1200 LiveInterval &nI = getOrCreateInterval(NewVReg);
1201 if (CreatedNewVReg) {
1202 NewLIs.push_back(&nI);
1203 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1205 vrm.setIsSplitFromReg(NewVReg, li.reg);
1209 if (CreatedNewVReg) {
1210 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1211 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1212 DEBUG(dbgs() << " +" << LR);
1215 // Extend the split live interval to this def / use.
1216 SlotIndex End = index.getDefIndex();
1217 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1218 nI.getValNumInfo(nI.getNumValNums()-1));
1219 DEBUG(dbgs() << " +" << LR);
1224 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1225 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1226 DEBUG(dbgs() << " +" << LR);
1231 dbgs() << "\t\t\t\tAdded new interval: ";
1232 nI.print(dbgs(), tri_);
1238 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1240 MachineBasicBlock *MBB,
1241 SlotIndex Idx) const {
1242 SlotIndex End = getMBBEndIdx(MBB);
1243 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1244 if (VNI->kills[j].isPHI())
1247 SlotIndex KillIdx = VNI->kills[j];
1248 if (KillIdx > Idx && KillIdx <= End)
1254 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1255 /// during spilling.
1257 struct RewriteInfo {
1262 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1263 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1266 struct RewriteInfoCompare {
1267 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1268 return LHS.Index < RHS.Index;
1273 void LiveIntervals::
1274 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1275 LiveInterval::Ranges::const_iterator &I,
1276 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1277 unsigned Slot, int LdSlot,
1278 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1280 const TargetRegisterClass* rc,
1281 SmallVector<int, 4> &ReMatIds,
1282 const MachineLoopInfo *loopInfo,
1283 BitVector &SpillMBBs,
1284 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1285 BitVector &RestoreMBBs,
1286 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1287 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1288 std::vector<LiveInterval*> &NewLIs) {
1289 bool AllCanFold = true;
1290 unsigned NewVReg = 0;
1291 SlotIndex start = I->start.getBaseIndex();
1292 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1294 // First collect all the def / use in this live range that will be rewritten.
1295 // Make sure they are sorted according to instruction index.
1296 std::vector<RewriteInfo> RewriteMIs;
1297 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1298 re = mri_->reg_end(); ri != re; ) {
1299 MachineInstr *MI = &*ri;
1300 MachineOperand &O = ri.getOperand();
1302 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1303 SlotIndex index = getInstructionIndex(MI);
1304 if (index < start || index >= end)
1308 // Must be defined by an implicit def. It should not be spilled. Note,
1309 // this is for correctness reason. e.g.
1310 // 8 %reg1024<def> = IMPLICIT_DEF
1311 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1312 // The live range [12, 14) are not part of the r1024 live interval since
1313 // it's defined by an implicit def. It will not conflicts with live
1314 // interval of r1025. Now suppose both registers are spilled, you can
1315 // easily see a situation where both registers are reloaded before
1316 // the INSERT_SUBREG and both target registers that would overlap.
1318 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1320 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1322 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1323 // Now rewrite the defs and uses.
1324 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1325 RewriteInfo &rwi = RewriteMIs[i];
1327 SlotIndex index = rwi.Index;
1328 bool MIHasUse = rwi.HasUse;
1329 bool MIHasDef = rwi.HasDef;
1330 MachineInstr *MI = rwi.MI;
1331 // If MI def and/or use the same register multiple times, then there
1332 // are multiple entries.
1333 unsigned NumUses = MIHasUse;
1334 while (i != e && RewriteMIs[i].MI == MI) {
1335 assert(RewriteMIs[i].Index == index);
1336 bool isUse = RewriteMIs[i].HasUse;
1337 if (isUse) ++NumUses;
1339 MIHasDef |= RewriteMIs[i].HasDef;
1342 MachineBasicBlock *MBB = MI->getParent();
1344 if (ImpUse && MI != ReMatDefMI) {
1345 // Re-matting an instruction with virtual register use. Update the
1346 // register interval's spill weight to HUGE_VALF to prevent it from
1348 LiveInterval &ImpLi = getInterval(ImpUse);
1349 ImpLi.weight = HUGE_VALF;
1352 unsigned MBBId = MBB->getNumber();
1353 unsigned ThisVReg = 0;
1355 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1356 if (NVI != MBBVRegsMap.end()) {
1357 ThisVReg = NVI->second;
1364 // It's better to start a new interval to avoid artifically
1365 // extend the new interval.
1366 if (MIHasDef && !MIHasUse) {
1367 MBBVRegsMap.erase(MBB->getNumber());
1373 bool IsNew = ThisVReg == 0;
1375 // This ends the previous live interval. If all of its def / use
1376 // can be folded, give it a low spill weight.
1377 if (NewVReg && TrySplit && AllCanFold) {
1378 LiveInterval &nI = getOrCreateInterval(NewVReg);
1385 bool HasDef = false;
1386 bool HasUse = false;
1387 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1388 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1389 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1390 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1391 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1392 if (!HasDef && !HasUse)
1395 AllCanFold &= CanFold;
1397 // Update weight of spill interval.
1398 LiveInterval &nI = getOrCreateInterval(NewVReg);
1400 // The spill weight is now infinity as it cannot be spilled again.
1401 nI.weight = HUGE_VALF;
1405 // Keep track of the last def and first use in each MBB.
1407 if (MI != ReMatOrigDefMI || !CanDelete) {
1408 bool HasKill = false;
1410 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1412 // If this is a two-address code, then this index starts a new VNInfo.
1413 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1415 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1417 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1418 SpillIdxes.find(MBBId);
1420 if (SII == SpillIdxes.end()) {
1421 std::vector<SRInfo> S;
1422 S.push_back(SRInfo(index, NewVReg, true));
1423 SpillIdxes.insert(std::make_pair(MBBId, S));
1424 } else if (SII->second.back().vreg != NewVReg) {
1425 SII->second.push_back(SRInfo(index, NewVReg, true));
1426 } else if (index > SII->second.back().index) {
1427 // If there is an earlier def and this is a two-address
1428 // instruction, then it's not possible to fold the store (which
1429 // would also fold the load).
1430 SRInfo &Info = SII->second.back();
1432 Info.canFold = !HasUse;
1434 SpillMBBs.set(MBBId);
1435 } else if (SII != SpillIdxes.end() &&
1436 SII->second.back().vreg == NewVReg &&
1437 index > SII->second.back().index) {
1438 // There is an earlier def that's not killed (must be two-address).
1439 // The spill is no longer needed.
1440 SII->second.pop_back();
1441 if (SII->second.empty()) {
1442 SpillIdxes.erase(MBBId);
1443 SpillMBBs.reset(MBBId);
1450 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1451 SpillIdxes.find(MBBId);
1452 if (SII != SpillIdxes.end() &&
1453 SII->second.back().vreg == NewVReg &&
1454 index > SII->second.back().index)
1455 // Use(s) following the last def, it's not safe to fold the spill.
1456 SII->second.back().canFold = false;
1457 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1458 RestoreIdxes.find(MBBId);
1459 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1460 // If we are splitting live intervals, only fold if it's the first
1461 // use and there isn't another use later in the MBB.
1462 RII->second.back().canFold = false;
1464 // Only need a reload if there isn't an earlier def / use.
1465 if (RII == RestoreIdxes.end()) {
1466 std::vector<SRInfo> Infos;
1467 Infos.push_back(SRInfo(index, NewVReg, true));
1468 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1470 RII->second.push_back(SRInfo(index, NewVReg, true));
1472 RestoreMBBs.set(MBBId);
1476 // Update spill weight.
1477 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1478 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1481 if (NewVReg && TrySplit && AllCanFold) {
1482 // If all of its def / use can be folded, give it a low spill weight.
1483 LiveInterval &nI = getOrCreateInterval(NewVReg);
1488 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1489 unsigned vr, BitVector &RestoreMBBs,
1490 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1491 if (!RestoreMBBs[Id])
1493 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1494 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1495 if (Restores[i].index == index &&
1496 Restores[i].vreg == vr &&
1497 Restores[i].canFold)
1502 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1503 unsigned vr, BitVector &RestoreMBBs,
1504 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1505 if (!RestoreMBBs[Id])
1507 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1508 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1509 if (Restores[i].index == index && Restores[i].vreg)
1510 Restores[i].index = SlotIndex();
1513 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1514 /// spilled and create empty intervals for their uses.
1516 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1517 const TargetRegisterClass* rc,
1518 std::vector<LiveInterval*> &NewLIs) {
1519 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1520 re = mri_->reg_end(); ri != re; ) {
1521 MachineOperand &O = ri.getOperand();
1522 MachineInstr *MI = &*ri;
1525 assert(MI->isImplicitDef() &&
1526 "Register def was not rewritten?");
1527 RemoveMachineInstrFromMaps(MI);
1528 vrm.RemoveMachineInstrFromMaps(MI);
1529 MI->eraseFromParent();
1531 // This must be an use of an implicit_def so it's not part of the live
1532 // interval. Create a new empty live interval for it.
1533 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1534 unsigned NewVReg = mri_->createVirtualRegister(rc);
1536 vrm.setIsImplicitlyDefined(NewVReg);
1537 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1538 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1539 MachineOperand &MO = MI->getOperand(i);
1540 if (MO.isReg() && MO.getReg() == li.reg) {
1549 std::vector<LiveInterval*> LiveIntervals::
1550 addIntervalsForSpillsFast(const LiveInterval &li,
1551 const MachineLoopInfo *loopInfo,
1553 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1555 std::vector<LiveInterval*> added;
1557 assert(li.weight != HUGE_VALF &&
1558 "attempt to spill already spilled interval!");
1561 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1566 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1568 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1569 while (RI != mri_->reg_end()) {
1570 MachineInstr* MI = &*RI;
1572 SmallVector<unsigned, 2> Indices;
1573 bool HasUse = false;
1574 bool HasDef = false;
1576 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1577 MachineOperand& mop = MI->getOperand(i);
1578 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1580 HasUse |= MI->getOperand(i).isUse();
1581 HasDef |= MI->getOperand(i).isDef();
1583 Indices.push_back(i);
1586 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1587 Indices, true, slot, li.reg)) {
1588 unsigned NewVReg = mri_->createVirtualRegister(rc);
1590 vrm.assignVirt2StackSlot(NewVReg, slot);
1592 // create a new register for this spill
1593 LiveInterval &nI = getOrCreateInterval(NewVReg);
1595 // the spill weight is now infinity as it
1596 // cannot be spilled again
1597 nI.weight = HUGE_VALF;
1599 // Rewrite register operands to use the new vreg.
1600 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1601 E = Indices.end(); I != E; ++I) {
1602 MI->getOperand(*I).setReg(NewVReg);
1604 if (MI->getOperand(*I).isUse())
1605 MI->getOperand(*I).setIsKill(true);
1608 // Fill in the new live interval.
1609 SlotIndex index = getInstructionIndex(MI);
1611 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1612 nI.getNextValue(SlotIndex(), 0, false,
1613 getVNInfoAllocator()));
1614 DEBUG(dbgs() << " +" << LR);
1616 vrm.addRestorePoint(NewVReg, MI);
1619 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1620 nI.getNextValue(SlotIndex(), 0, false,
1621 getVNInfoAllocator()));
1622 DEBUG(dbgs() << " +" << LR);
1624 vrm.addSpillPoint(NewVReg, true, MI);
1627 added.push_back(&nI);
1630 dbgs() << "\t\t\t\tadded new interval: ";
1637 RI = mri_->reg_begin(li.reg);
1643 std::vector<LiveInterval*> LiveIntervals::
1644 addIntervalsForSpills(const LiveInterval &li,
1645 SmallVectorImpl<LiveInterval*> &SpillIs,
1646 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1648 if (EnableFastSpilling)
1649 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1651 assert(li.weight != HUGE_VALF &&
1652 "attempt to spill already spilled interval!");
1655 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1656 li.print(dbgs(), tri_);
1660 // Each bit specify whether a spill is required in the MBB.
1661 BitVector SpillMBBs(mf_->getNumBlockIDs());
1662 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1663 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1664 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1665 DenseMap<unsigned,unsigned> MBBVRegsMap;
1666 std::vector<LiveInterval*> NewLIs;
1667 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1669 unsigned NumValNums = li.getNumValNums();
1670 SmallVector<MachineInstr*, 4> ReMatDefs;
1671 ReMatDefs.resize(NumValNums, NULL);
1672 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1673 ReMatOrigDefs.resize(NumValNums, NULL);
1674 SmallVector<int, 4> ReMatIds;
1675 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1676 BitVector ReMatDelete(NumValNums);
1677 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1679 // Spilling a split live interval. It cannot be split any further. Also,
1680 // it's also guaranteed to be a single val# / range interval.
1681 if (vrm.getPreSplitReg(li.reg)) {
1682 vrm.setIsSplitFromReg(li.reg, 0);
1683 // Unset the split kill marker on the last use.
1684 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1685 if (KillIdx != SlotIndex()) {
1686 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1687 assert(KillMI && "Last use disappeared?");
1688 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1689 assert(KillOp != -1 && "Last use disappeared?");
1690 KillMI->getOperand(KillOp).setIsKill(false);
1692 vrm.removeKillPoint(li.reg);
1693 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1694 Slot = vrm.getStackSlot(li.reg);
1695 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1696 MachineInstr *ReMatDefMI = DefIsReMat ?
1697 vrm.getReMaterializedMI(li.reg) : NULL;
1699 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1700 bool isLoad = isLoadSS ||
1701 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1702 bool IsFirstRange = true;
1703 for (LiveInterval::Ranges::const_iterator
1704 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1705 // If this is a split live interval with multiple ranges, it means there
1706 // are two-address instructions that re-defined the value. Only the
1707 // first def can be rematerialized!
1709 // Note ReMatOrigDefMI has already been deleted.
1710 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1711 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1712 false, vrm, rc, ReMatIds, loopInfo,
1713 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1714 MBBVRegsMap, NewLIs);
1716 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1717 Slot, 0, false, false, false,
1718 false, vrm, rc, ReMatIds, loopInfo,
1719 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1720 MBBVRegsMap, NewLIs);
1722 IsFirstRange = false;
1725 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1729 bool TrySplit = !intervalIsInOneMBB(li);
1732 bool NeedStackSlot = false;
1733 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1735 const VNInfo *VNI = *i;
1736 unsigned VN = VNI->id;
1737 if (VNI->isUnused())
1738 continue; // Dead val#.
1739 // Is the def for the val# rematerializable?
1740 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1741 ? getInstructionFromIndex(VNI->def) : 0;
1743 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1744 // Remember how to remat the def of this val#.
1745 ReMatOrigDefs[VN] = ReMatDefMI;
1746 // Original def may be modified so we have to make a copy here.
1747 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1748 CloneMIs.push_back(Clone);
1749 ReMatDefs[VN] = Clone;
1751 bool CanDelete = true;
1752 if (VNI->hasPHIKill()) {
1753 // A kill is a phi node, not all of its uses can be rematerialized.
1754 // It must not be deleted.
1756 // Need a stack slot if there is any live range where uses cannot be
1758 NeedStackSlot = true;
1761 ReMatDelete.set(VN);
1763 // Need a stack slot if there is any live range where uses cannot be
1765 NeedStackSlot = true;
1769 // One stack slot per live interval.
1770 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1771 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1772 Slot = vrm.assignVirt2StackSlot(li.reg);
1774 // This case only occurs when the prealloc splitter has already assigned
1775 // a stack slot to this vreg.
1777 Slot = vrm.getStackSlot(li.reg);
1780 // Create new intervals and rewrite defs and uses.
1781 for (LiveInterval::Ranges::const_iterator
1782 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1783 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1784 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1785 bool DefIsReMat = ReMatDefMI != NULL;
1786 bool CanDelete = ReMatDelete[I->valno->id];
1788 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1789 bool isLoad = isLoadSS ||
1790 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1791 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1792 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1793 CanDelete, vrm, rc, ReMatIds, loopInfo,
1794 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1795 MBBVRegsMap, NewLIs);
1798 // Insert spills / restores if we are splitting.
1800 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1804 SmallPtrSet<LiveInterval*, 4> AddedKill;
1805 SmallVector<unsigned, 2> Ops;
1806 if (NeedStackSlot) {
1807 int Id = SpillMBBs.find_first();
1809 std::vector<SRInfo> &spills = SpillIdxes[Id];
1810 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1811 SlotIndex index = spills[i].index;
1812 unsigned VReg = spills[i].vreg;
1813 LiveInterval &nI = getOrCreateInterval(VReg);
1814 bool isReMat = vrm.isReMaterialized(VReg);
1815 MachineInstr *MI = getInstructionFromIndex(index);
1816 bool CanFold = false;
1817 bool FoundUse = false;
1819 if (spills[i].canFold) {
1821 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1822 MachineOperand &MO = MI->getOperand(j);
1823 if (!MO.isReg() || MO.getReg() != VReg)
1830 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1831 RestoreMBBs, RestoreIdxes))) {
1832 // MI has two-address uses of the same register. If the use
1833 // isn't the first and only use in the BB, then we can't fold
1834 // it. FIXME: Move this to rewriteInstructionsForSpills.
1841 // Fold the store into the def if possible.
1842 bool Folded = false;
1843 if (CanFold && !Ops.empty()) {
1844 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1847 // Also folded uses, do not issue a load.
1848 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1849 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1851 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1855 // Otherwise tell the spiller to issue a spill.
1857 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1858 bool isKill = LR->end == index.getStoreIndex();
1859 if (!MI->registerDefIsDead(nI.reg))
1860 // No need to spill a dead def.
1861 vrm.addSpillPoint(VReg, isKill, MI);
1863 AddedKill.insert(&nI);
1866 Id = SpillMBBs.find_next(Id);
1870 int Id = RestoreMBBs.find_first();
1872 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1873 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1874 SlotIndex index = restores[i].index;
1875 if (index == SlotIndex())
1877 unsigned VReg = restores[i].vreg;
1878 LiveInterval &nI = getOrCreateInterval(VReg);
1879 bool isReMat = vrm.isReMaterialized(VReg);
1880 MachineInstr *MI = getInstructionFromIndex(index);
1881 bool CanFold = false;
1883 if (restores[i].canFold) {
1885 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1886 MachineOperand &MO = MI->getOperand(j);
1887 if (!MO.isReg() || MO.getReg() != VReg)
1891 // If this restore were to be folded, it would have been folded
1900 // Fold the load into the use if possible.
1901 bool Folded = false;
1902 if (CanFold && !Ops.empty()) {
1904 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1906 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1908 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1909 // If the rematerializable def is a load, also try to fold it.
1910 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1911 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1912 Ops, isLoadSS, LdSlot, VReg);
1914 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1916 // Re-matting an instruction with virtual register use. Add the
1917 // register as an implicit use on the use MI and update the register
1918 // interval's spill weight to HUGE_VALF to prevent it from being
1920 LiveInterval &ImpLi = getInterval(ImpUse);
1921 ImpLi.weight = HUGE_VALF;
1922 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1927 // If folding is not possible / failed, then tell the spiller to issue a
1928 // load / rematerialization for us.
1930 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1932 vrm.addRestorePoint(VReg, MI);
1934 Id = RestoreMBBs.find_next(Id);
1937 // Finalize intervals: add kills, finalize spill weights, and filter out
1939 std::vector<LiveInterval*> RetNewLIs;
1940 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1941 LiveInterval *LI = NewLIs[i];
1943 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1944 if (!AddedKill.count(LI)) {
1945 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1946 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1947 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1948 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1949 assert(UseIdx != -1);
1950 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1951 LastUse->getOperand(UseIdx).setIsKill();
1952 vrm.addKillPoint(LI->reg, LastUseIdx);
1955 RetNewLIs.push_back(LI);
1959 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1963 /// hasAllocatableSuperReg - Return true if the specified physical register has
1964 /// any super register that's allocatable.
1965 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1966 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1967 if (allocatableRegs_[*AS] && hasInterval(*AS))
1972 /// getRepresentativeReg - Find the largest super register of the specified
1973 /// physical register.
1974 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
1975 // Find the largest super-register that is allocatable.
1976 unsigned BestReg = Reg;
1977 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
1978 unsigned SuperReg = *AS;
1979 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
1987 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
1988 /// specified interval that conflicts with the specified physical register.
1989 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
1990 unsigned PhysReg) const {
1991 unsigned NumConflicts = 0;
1992 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
1993 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
1994 E = mri_->reg_end(); I != E; ++I) {
1995 MachineOperand &O = I.getOperand();
1996 MachineInstr *MI = O.getParent();
1997 SlotIndex Index = getInstructionIndex(MI);
1998 if (pli.liveAt(Index))
2001 return NumConflicts;
2004 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2005 /// around all defs and uses of the specified interval. Return true if it
2006 /// was able to cut its interval.
2007 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2008 unsigned PhysReg, VirtRegMap &vrm) {
2009 unsigned SpillReg = getRepresentativeReg(PhysReg);
2011 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2012 // If there are registers which alias PhysReg, but which are not a
2013 // sub-register of the chosen representative super register. Assert
2014 // since we can't handle it yet.
2015 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2016 tri_->isSuperRegister(*AS, SpillReg));
2019 SmallVector<unsigned, 4> PRegs;
2020 if (hasInterval(SpillReg))
2021 PRegs.push_back(SpillReg);
2023 SmallSet<unsigned, 4> Added;
2024 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2025 if (Added.insert(*AS) && hasInterval(*AS)) {
2026 PRegs.push_back(*AS);
2027 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2032 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2033 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2034 E = mri_->reg_end(); I != E; ++I) {
2035 MachineOperand &O = I.getOperand();
2036 MachineInstr *MI = O.getParent();
2037 if (SeenMIs.count(MI))
2040 SlotIndex Index = getInstructionIndex(MI);
2041 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2042 unsigned PReg = PRegs[i];
2043 LiveInterval &pli = getInterval(PReg);
2044 if (!pli.liveAt(Index))
2046 vrm.addEmergencySpill(PReg, MI);
2047 SlotIndex StartIdx = Index.getLoadIndex();
2048 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2049 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2050 pli.removeRange(StartIdx, EndIdx);
2054 raw_string_ostream Msg(msg);
2055 Msg << "Ran out of registers during register allocation!";
2056 if (MI->isInlineAsm()) {
2057 Msg << "\nPlease check your inline asm statement for invalid "
2058 << "constraints:\n";
2059 MI->print(Msg, tm_);
2061 llvm_report_error(Msg.str());
2063 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2064 if (!hasInterval(*AS))
2066 LiveInterval &spli = getInterval(*AS);
2067 if (spli.liveAt(Index))
2068 spli.removeRange(Index.getLoadIndex(),
2069 Index.getNextIndex().getBaseIndex());
2076 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2077 MachineInstr* startInst) {
2078 LiveInterval& Interval = getOrCreateInterval(reg);
2079 VNInfo* VN = Interval.getNextValue(
2080 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2081 startInst, true, getVNInfoAllocator());
2082 VN->setHasPHIKill(true);
2083 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2085 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2086 getMBBEndIdx(startInst->getParent()), VN);
2087 Interval.addRange(LR);